blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
โ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
โ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
โ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0c69ea683638698c016e99957ec711ef3150859b
|
91c6539e8f0cdbbe087024d4427c710a97f2baba
|
/Code/undetermind.R
|
72d69a1cb55ffd38c738f5d4fcf39dc42854640e
|
[] |
no_license
|
kevinkr/kaggle-allstate
|
fa3dd5475a1c34387bfe87ac1c4f2351191cac64
|
d1b76b10c7758e02af7dba02dcde6202263a08b1
|
refs/heads/master
| 2021-01-10T23:55:38.630232
| 2016-12-14T13:20:37
| 2016-12-14T13:20:37
| 70,800,850
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,398
|
r
|
undetermind.R
|
# Allstate Kaggle competition
# Start: 10-13-16
source("packages.R")
#############################
# Merge data sets
#############################
#
#Create dummy variable in test
test<- mutate(test, loss = "none")
#Create sorting variable dataset before combining
test <- mutate(test, dataset = "testset")
train <- mutate(train, dataset = "trainset")
# factorize categoricals
data <- data %>% mutate_each(funs(factor), starts_with("cat"))
### Misfit code
######################################################################
# Not useful in this case with so many variables
# correspondence (see http://gastonsanchez.com/how-to/2012/10/13/MCA-in-R/)
cats = apply(train.cat.factored[,110:116], 2, function(x) nlevels(as.factor(x)))
mca1 = MCA(train.cat.factored[,110:116], graph = FALSE)
# data frame with variable coordinates
mca1_vars_df = data.frame(mca1$var$coord, Variable = rep(names(cats), cats))
# data frame with observation coordinates
mca1_obs_df = data.frame(mca1$ind$coord)
# plot of variable categories
ggplot(data=mca1_vars_df,
aes(x = Dim.1, y = Dim.2, label = rownames(mca1_vars_df))) +
geom_hline(yintercept = 0, colour = "gray70") +
geom_vline(xintercept = 0, colour = "gray70") +
geom_text(aes(colour=Variable)) +
ggtitle("MCA plot of variables using R package FactoMineR")
#####################################################################
|
8c8bc7fc39cc76d211a2270e7ca490d1691e41cb
|
7917fc0a7108a994bf39359385fb5728d189c182
|
/cran/paws.security.identity/man/kms_list_aliases.Rd
|
06a21bceab74144d46ddd9e0b8c9f540face5552
|
[
"Apache-2.0"
] |
permissive
|
TWarczak/paws
|
b59300a5c41e374542a80aba223f84e1e2538bec
|
e70532e3e245286452e97e3286b5decce5c4eb90
|
refs/heads/main
| 2023-07-06T21:51:31.572720
| 2021-08-06T02:08:53
| 2021-08-06T02:08:53
| 396,131,582
| 1
| 0
|
NOASSERTION
| 2021-08-14T21:11:04
| 2021-08-14T21:11:04
| null |
UTF-8
|
R
| false
| true
| 3,849
|
rd
|
kms_list_aliases.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kms_operations.R
\name{kms_list_aliases}
\alias{kms_list_aliases}
\title{Gets a list of aliases in the caller's AWS account and region}
\usage{
kms_list_aliases(KeyId, Limit, Marker)
}
\arguments{
\item{KeyId}{Lists only aliases that are associated with the specified CMK. Enter a
CMK in your AWS account.
This parameter is optional. If you omit it,
\code{\link[=kms_list_aliases]{list_aliases}} returns all aliases in the account
and Region.
Specify the key ID or the Amazon Resource Name (ARN) of the CMK.
For example:
\itemize{
\item Key ID: \verb{1234abcd-12ab-34cd-56ef-1234567890ab}
\item Key ARN:
\verb{arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab}
}
To get the key ID and key ARN for a CMK, use
\code{\link[=kms_list_keys]{list_keys}} or \code{\link[=kms_describe_key]{describe_key}}.}
\item{Limit}{Use this parameter to specify the maximum number of items to return.
When this value is present, AWS KMS does not return more than the
specified number of items, but it might return fewer.
This value is optional. If you include a value, it must be between 1 and
100, inclusive. If you do not include a value, it defaults to 50.}
\item{Marker}{Use this parameter in a subsequent request after you receive a response
with truncated results. Set it to the value of \code{NextMarker} from the
truncated response you just received.}
}
\value{
A list with the following syntax:\preformatted{list(
Aliases = list(
list(
AliasName = "string",
AliasArn = "string",
TargetKeyId = "string",
CreationDate = as.POSIXct(
"2015-01-01"
),
LastUpdatedDate = as.POSIXct(
"2015-01-01"
)
)
),
NextMarker = "string",
Truncated = TRUE|FALSE
)
}
}
\description{
Gets a list of aliases in the caller's AWS account and region. For more
information about aliases, see \code{\link[=kms_create_alias]{create_alias}}.
By default, the \code{\link[=kms_list_aliases]{list_aliases}} operation returns all
aliases in the account and region. To get only the aliases associated
with a particular customer master key (CMK), use the \code{KeyId} parameter.
The \code{\link[=kms_list_aliases]{list_aliases}} response can include aliases that
you created and associated with your customer managed CMKs, and aliases
that AWS created and associated with AWS managed CMKs in your account.
You can recognize AWS aliases because their names have the format
\verb{aws/<service-name>}, such as \code{aws/dynamodb}.
The response might also include aliases that have no \code{TargetKeyId}
field. These are predefined aliases that AWS has created but has not yet
associated with a CMK. Aliases that AWS creates in your account,
including predefined aliases, do not count against your \href{https://docs.aws.amazon.com/kms/latest/developerguide/limits.html#aliases-limit}{AWS KMS aliases quota}.
\strong{Cross-account use}: No. \code{\link[=kms_list_aliases]{list_aliases}} does not
return aliases in other AWS accounts.
\strong{Required permissions}:
\href{https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html}{kms:ListAliases}
(IAM policy)
For details, see \href{https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html#alias-access}{Controlling access to aliases}
in the \emph{AWS Key Management Service Developer Guide}.
\strong{Related operations:}
\itemize{
\item \code{\link[=kms_create_alias]{create_alias}}
\item \code{\link[=kms_delete_alias]{delete_alias}}
\item \code{\link[=kms_update_alias]{update_alias}}
}
}
\section{Request syntax}{
\preformatted{svc$list_aliases(
KeyId = "string",
Limit = 123,
Marker = "string"
)
}
}
\examples{
\dontrun{
# The following example lists aliases.
svc$list_aliases()
}
}
\keyword{internal}
|
df200bbe7edd7af69fcbb8e4277bce057f931636
|
1a08f81a8ebee2753b42333e77735f9416f4c396
|
/R/tag_by_regex.R
|
be7f0f164e57451f8a74d38893103d75db38ce8b
|
[] |
no_license
|
gaospecial/biblioreport
|
1404645207c667e71c0bd11c36af2fe9120ff124
|
ab407fad6223c8ab8c5eecd80d21bb7c0471d082
|
refs/heads/master
| 2023-09-05T19:31:05.171230
| 2021-11-09T11:40:29
| 2021-11-09T11:40:29
| 334,099,411
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,179
|
r
|
tag_by_regex.R
|
# ๆ นๆฎๆฃ็ดข่ฏๅฏนๆ็ฎ่ฟ่กๅ็ฑป
#' Tag record by regular expression search
#'
#' @param x is the search content
#' @param pattern a list of regular expression, which can be obtained by [keywords_from()]
#' @param pattern.names is a human readable abbreviation name
#' @param sep default is ";"
#'
#' @return
#' @export
#'
#' @examples
tag_by_regex <- function(x, pattern, pattern.names = names(pattern), sep = ";"){
require(stringr)
nRecord <- length(x)
result <- vector("list", length = nRecord)
nPattern <- length(pattern)
for (i in 1:nRecord){
this_record <- x[[i]]
idx <- vector(length = nPattern)
for (j in 1:nPattern){
this_pattern <- pattern[[j]]
if (!is.na(this_record) & str_detect(this_record, this_pattern)) idx[[j]] <- TRUE
}
result[[i]] <- paste0(pattern.names[idx], collapse = sep)
}
return(unlist(result))
}
# ๆดๅๅคไธชๅ
ณ้ฎ่ฏไธบไธไธชๅ
ณ้ฎ่ฏ
keywords_from <- function(..., list = NULL, name = "primary"){
keyword_list <- list(...)
if (!is.null(list)) keyword_list <- c(keyword_list, list)
result <- lapply(keyword_list, function(x){
x[[name]]
})
paste0(unlist(result), collapse = "|")
}
|
280a3f13cdc4c1ee4f3be09d4426c63982a1e5ac
|
6e012caa8d23f76498892dd4a5053c618e74a0d5
|
/scripts/exploratory data analysis/L0-descriptive_stats_0.1.R
|
2616d50d53c2f07885a993c2377d92cdf64ce8dd
|
[
"MIT"
] |
permissive
|
duttashi/learnr
|
4bc973fed064fe496673fc53d65cb797db7ae120
|
3d7e17d044dfd2e4ce1996d1a2a5a02dc7e4db45
|
refs/heads/master
| 2023-06-29T01:39:40.940465
| 2023-06-16T05:00:07
| 2023-06-16T05:00:07
| 62,351,661
| 81
| 51
|
MIT
| 2023-06-16T05:00:08
| 2016-07-01T00:56:08
|
R
|
UTF-8
|
R
| false
| false
| 1,469
|
r
|
L0-descriptive_stats_0.1.R
|
# This script is in continuation with L0-descriptive_stats_0.0.R
# Data Formatting
# load the required libraries
library(stringr)
library(chron)
# see the help pages for str_pad, substring, paste, chron, head
help("str_pad")
# prints the first five rows of the dataset.
head(flights[miss_name]) # The date format need to be changed
# create a new object dep_time and assign the values of flights$DepTime . If the value is less than 4 elements, fill make it a 4-element value with zeros.
dep_time<- str_pad(flights$DepTime, 4, pad = "0")
# create a new object hour and assign the first two elements of the dep_time object.
# Doing this because so as to seperate the hour, minute, seconds from the variable DepTime
hour<-substring(dep_time,1,2)
# Create a new object named minutes and assign the last two elements of the dep_time object.
minute<-substring(dep_time,3,4)
# Assign to the object dep_time the hour in format โHH:MM:SSโ , seconds should be โ00โ , we make this assumption for the sake of formatting.
for(i in 1:length(dep_time)){
dep_time[i] <- paste(c(hour[i],minute[i],'00'),collapse = ':')
}
# Change the class of dep_time from character to times.
dep_time <- chron(times=dep_time)
# Print the first 10 rows and then the 10 last rows of the dep_time.
head(dep_time, n=10)
tail(dep_time, n=10)
# If the formatting of the object is โHH:MM:SSโ(as it should) then assign the dep_time to flights$DepTime
flights$DepTime<-dep_time
str(flights)
|
fe6dc93ca943b828d83b4a77acbf1bf6553f4f6a
|
68562f46424bf312d5fe070990243ae03ed1454e
|
/man/labels2matrix.Rd
|
fd50b08987af43193692adc67824399f3ec44949
|
[
"Apache-2.0"
] |
permissive
|
ANTsX/ANTsR
|
edb12114bc3d143c59ebd3947301de705ec51b63
|
8deb4d897fdb295a0213ca59e3bf1846f62ce99a
|
refs/heads/master
| 2023-06-24T14:48:05.362501
| 2023-06-24T11:15:10
| 2023-06-24T11:15:10
| 5,782,626
| 86
| 32
|
Apache-2.0
| 2023-06-17T12:15:50
| 2012-09-12T16:28:03
|
R
|
UTF-8
|
R
| false
| true
| 1,118
|
rd
|
labels2matrix.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/labels2matrix.R
\name{labels2matrix}
\alias{labels2matrix}
\title{Convert label image to a matrix}
\usage{
labels2matrix(img, mask, targetLabels = NULL, missingVal = NA)
}
\arguments{
\item{img}{input label image}
\item{mask}{defines domain of interest}
\item{targetLabels}{defines target regions to be returned. if the target
label does not exist in the input label image, then the matrix will contain
a constant value of missingVal (default NA) in that row.}
\item{missingVal}{for missing label values.}
}
\value{
matrix is output
}
\description{
Convert a labeled image to an n x m binary matrix where n = number of voxels
and m = number of labels. Only includes values inside the provided mask while
including background ( img == 0 ) for consistency with timeseries2matrix and
other image to matrix operations.
}
\examples{
fi = antsImageRead(getANTsRData("r16") ,2) \%>\% resampleImage(c(60,60),1,0)
mask = getMask( fi )
labs = kmeansSegmentation( fi, 3 )$segmentation
labmat = labels2matrix( labs, mask )
}
\author{
Avants BB
}
|
3c918eeb90cf54db623d44d85f538ce30c1a200d
|
f3818f31a2452e60e849a76652a013429ff64be7
|
/scripts/TPF_family_dataset.R
|
982936ba88e1421bd156a3538e00fbe66a5a7b49
|
[
"MIT"
] |
permissive
|
tfausten/tfdatalab
|
85d3ab2443fbb644d9e257878b5b58eab9467f32
|
2a3c8c9785c71c1f4828a540e55152fa61e89fa4
|
refs/heads/master
| 2020-03-23T03:24:55.085592
| 2018-11-09T08:15:13
| 2018-11-09T08:15:13
| 141,029,151
| 0
| 0
|
MIT
| 2018-11-06T07:56:59
| 2018-07-15T13:23:10
|
HTML
|
UTF-8
|
R
| false
| false
| 3,774
|
r
|
TPF_family_dataset.R
|
#create a single-entry family-id dataset with relevant values for further analysis
load("./datasource/TPF/201803_TPF_Core.RData")
tpf_families <- subset(tpf_core, select = c(Family_id, USPTO_app_first, EPO_app_first, JPO_app_first,
PCT_app_first))
rm(tpf_core)
#tranform application dates into date format. Convert to character first to make as.Date work
tpf_families[, 2:5] <- lapply(tpf_families[, 2:5], as.character)
tpf_families[, 2:5] <- lapply(tpf_families[, 2:5], as.Date, format = "%Y%m%d")
str(tpf_families)
####find out which application was first and fill this info into new column first_app
#replace NA values in dates with 3000-01-01 to enable comparisons (didn't work with NA values)
for (i in 2:5) {
isna <- which(is.na(tpf_families[, i]))
tpf_families[isna, i] <- "3000-01-01"
}
#initialize new columns for "first" data
tpf_families$first_app <- NA
tpf_families$first_app_year <- as.numeric(NA)
#calculate values for which patent office the patent was registered at first (first_app) and what was the year of the first
#application (first_app_year)
attach(tpf_families)
#US first
first <- which((USPTO_app_first <= EPO_app_first) & (USPTO_app_first <= JPO_app_first) & (USPTO_app_first <= PCT_app_first))
tpf_families$first_app[first] <- "USPTO"
tpf_families$first_app_year[first] <- as.numeric(format(tpf_families$USPTO_app_first[first], "%Y"))
#EPO first
first <- which((EPO_app_first <= USPTO_app_first) & (EPO_app_first <= JPO_app_first) & (EPO_app_first <= PCT_app_first))
tpf_families$first_app[first] <- "EPO"
tpf_families$first_app_year[first] <- as.numeric(format(tpf_families$EPO_app_first[first], "%Y"))
#JPO first
first <- which((JPO_app_first <= EPO_app_first) & (JPO_app_first <= USPTO_app_first) & (JPO_app_first <= PCT_app_first))
tpf_families$first_app[first] <- "JPO"
tpf_families$first_app_year[first] <- as.numeric(format(tpf_families$JPO_app_first[first], "%Y"))
#PCT
first <- which((PCT_app_first <= EPO_app_first) & (PCT_app_first <= JPO_app_first) & (PCT_app_first <= USPTO_app_first))
tpf_families$first_app[first] <- "PCT"
tpf_families$first_app_year[first] <- as.numeric(format(tpf_families$PCT_app_first[first], "%Y"))
detach(tpf_families)
#return 3000-01-01 code to NA values
for (i in 2:5) {
isna <- which(tpf_families[, i] == "3000-01-01")
tpf_families[isna, i] <- NA
}
rm(first, isna)
tpf_families$first_app <- as.factor(tpf_families$first_app)
table(tpf_families$first_app)
####
####add country column that shows originating country if applicants/inventors come from a single country and "INT" if applicants are international
load("./datasource/TPF/unicountries_app.RData")
tpf_families$country_app <- as.character(NA)
whichint_app <- which(lapply(unicountries_app, length) > 1) #the whichint vectors contain the indices of patens with inventors/applicants from more than one country
tpf_families$country_app[whichint_app] <- "INT"
whichcountry_app <- which(lapply(unicountries_app, length) == 1)
tpf_families$country_app[whichcountry_app] <- unicountries_app[whichcountry_app]
rm(whichint_app, whichcountry_app)
load("./datasource/TPF/unicountries_inv.RData")
tpf_families$country_inv <- as.character(NA)
whichint_inv <- which(lapply(unicountries_inv, length) > 1)
tpf_families$country_inv[whichint_inv] <- "INT"
whichcountry_inv <- which(lapply(unicountries_inv, length) == 1)
tpf_families$country_inv[whichcountry_inv] <- unicountries_inv[whichcountry_inv]
rm(whichint_inv, whichcountry_inv)
tpf_families$country_app <- as.factor(unlist(tpf_families$country_app))
tpf_families$country_inv <- as.factor(unlist(tpf_families$country_inv))
table(tpf_families$country_app)
#save the dataset
save(tpf_families, file = "./datasource/TPF/tpf_families.RData")
|
e9bf4990657119e808b4c54c5ef0960df09f6a41
|
535d05d170267fe6076945f8f6f9e226b51fe50d
|
/global.R
|
fb6faa4bdb1389cc1f412c8bad0ebacef31cb4fb
|
[] |
no_license
|
djepstein87/menu_scrapy
|
b6f6db16252d5a30eda420ecd5a5d3585f02822f
|
d910713238c31926a25dedc90f1ecd98cce17ba8
|
refs/heads/master
| 2021-01-11T14:16:41.994949
| 2017-04-10T21:50:41
| 2017-04-10T21:50:41
| 81,286,254
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 52
|
r
|
global.R
|
#menus_reduced = readRDS('./data/rds_menus_reduced')
|
7ff4ba647d3dc065ad20cae106cb84ac393d69ac
|
28291c2ab8e0ae8b9392dd9ce921ff9d0c727f37
|
/models/analise_modelos.R
|
514cbc65446f1b2fd978576f016c4077039ce5f5
|
[] |
no_license
|
pbizil/predict_ipca
|
009eb3018b5b60dd4e9b4158f55ed82e97351258
|
65665b2bb94310e10d1e3925ded1297dcb09cf13
|
refs/heads/main
| 2023-07-07T00:18:27.265783
| 2021-08-14T19:30:55
| 2021-08-14T19:30:55
| 394,425,863
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,628
|
r
|
analise_modelos.R
|
library(DBI)
library(ggplot2)
library(rjson)
library(anytime)
library(naniar)
library(Metrics)
# target
con <- dbConnect(RSQLite::SQLite(), "/Users/pbizil/Desktop/tcc_pos/data/app_db.db")
dados <- dbReadTable(con, "preds_l1")
colnames(dados)[3] <- c("expectativas_ipca")
dados$date <- as.Date(strptime(anytime::anydate(dados$date), "%Y-%m-%d"))
png(filename="/Users/pbizil/Desktop/tcc_pos/plots/plots_output/ipca_serie_target.png")
ggplot() +
geom_line(data = dados, aes(x=date, y=y1, colour = "IPCA Mensal")) +
geom_line(data = dados, aes(x=date, y=expectativas_ipca, colour = "Expectativas IPCA")) +
labs(title = "IPCA mensal e Expectativa - jan/2015 a jun/2021", x = "Meses", y = "Variaรงรฃo percentual") +
scale_colour_manual("", breaks = c("IPCA Mensal", "Expectativas IPCA"), values = c("black", "blue")) +
theme(legend.position='bottom')
dev.off()
# 1 mes
con <- dbConnect(RSQLite::SQLite(), "/Users/pbizil/Desktop/tcc_pos/data/app_db.db")
dados <- dbReadTable(con, "preds_l1")
colnames(dados)[3] <- c("expectativas_ipca")
dados$date <- as.Date(strptime(anytime::anydate(dados$date), "%Y-%m-%d"))
png(filename="/Users/pbizil/Desktop/tcc_pos/plots/plots_output/ipca_pred_1mes_cb.png")
ggplot() +
geom_line(data = dados, aes(x=date, y=y1, colour = "IPCA Mensal")) +
geom_line(data = dados, aes(x=date, y=expectativas_ipca, colour = "Expectativas IPCA")) +
geom_line(data = dados, aes(x=date, y=y_preds_cb_1, colour = "Prediรงรฃo IPCA - Catboost")) +
labs(title = "IPCA mensal, Expectativa e Prediรงรฃo Catboost de um mรชs \n jan/2015 a jun/2021", x = "Meses", y = "Variaรงรฃo percentual") +
scale_colour_manual("", breaks = c("IPCA Mensal", "Expectativas IPCA", "Prediรงรฃo IPCA - Catboost"), values = c("black", "blue", "red")) +
theme(legend.position='bottom')
dev.off()
png(filename="/Users/pbizil/Desktop/tcc_pos/plots/plots_output/ipca_pred_1mes_xg.png")
ggplot() +
geom_line(data = dados, aes(x=date, y=y1, colour = "IPCA Mensal")) +
geom_line(data = dados, aes(x=date, y=expectativas_ipca, colour = "Expectativas IPCA")) +
geom_line(data = dados, aes(x=date, y=y_preds_xg_1, colour = "Prediรงรฃo IPCA - XGBoost")) +
labs(title = "IPCA mensal, Expectativa e Prediรงรฃo XGBoost de um mรชs \n jan/2015 a jun/2021", x = "Meses", y = "Variaรงรฃo percentual") +
scale_colour_manual("", breaks = c("IPCA Mensal", "Expectativas IPCA", "Prediรงรฃo IPCA - XGBoost"), values = c("black", "blue", "red")) +
theme(legend.position='bottom')
dev.off()
png(filename="/Users/pbizil/Desktop/tcc_pos/plots/plots_output/ipca_pred_1mes_lgbm.png")
ggplot() +
geom_line(data = dados, aes(x=date, y=y1, colour = "IPCA Mensal")) +
geom_line(data = dados, aes(x=date, y=expectativas_ipca, colour = "Expectativas IPCA")) +
geom_line(data = dados, aes(x=date, y=y_preds_lgbm_1, colour = "Prediรงรฃo IPCA - LGBM")) +
labs(title = "IPCA mensal, Expectativa e Prediรงรฃo LGBM de um mรชs \n jan/2015 a jun/2021", x = "Meses", y = "Variaรงรฃo percentual") +
scale_colour_manual("", breaks = c("IPCA Mensal", "Expectativas IPCA", "Prediรงรฃo IPCA - LGBM"), values = c("black", "blue", "red")) +
theme(legend.position='bottom')
dev.off()
# 2 meses
con <- dbConnect(RSQLite::SQLite(), "/Users/pbizil/Desktop/tcc_pos/data/app_db.db")
dados <- dbReadTable(con, "preds_l2")
colnames(dados)[3] <- c("expectativas_ipca")
dados$date <- as.Date(strptime(anytime::anydate(dados$date), "%Y-%m-%d"))
png(filename="/Users/pbizil/Desktop/tcc_pos/plots/plots_output/ipca_pred_2meses_cb.png")
ggplot() +
geom_line(data = dados, aes(x=date, y=y2, colour = "IPCA Mensal")) +
geom_line(data = dados, aes(x=date, y=expectativas_ipca, colour = "Expectativas IPCA")) +
geom_line(data = dados, aes(x=date, y=y_preds_cb_2, colour = "Prediรงรฃo IPCA - Catboost")) +
labs(title = "IPCA mensal, Expectativa e Prediรงรฃo Catboost de dois meses \n jan/2015 a jun/2021", x = "Meses", y = "Variaรงรฃo percentual") +
scale_colour_manual("", breaks = c("IPCA Mensal", "Expectativas IPCA", "Prediรงรฃo IPCA - Catboost"), values = c("black", "blue", "red")) +
theme(legend.position='bottom')
dev.off()
png(filename="/Users/pbizil/Desktop/tcc_pos/plots/plots_output/ipca_pred_2meses_xg.png")
ggplot() +
geom_line(data = dados, aes(x=date, y=y2, colour = "IPCA Mensal")) +
geom_line(data = dados, aes(x=date, y=expectativas_ipca, colour = "Expectativas IPCA")) +
geom_line(data = dados, aes(x=date, y=y_preds_xg_2, colour = "Prediรงรฃo IPCA - XGBoost")) +
labs(title = "IPCA mensal, Expectativa e Prediรงรฃo XGBoost de dois meses \n jan/2015 a jun/2021", x = "Meses", y = "Variaรงรฃo percentual") +
scale_colour_manual("", breaks = c("IPCA Mensal", "Expectativas IPCA", "Prediรงรฃo IPCA - XGBoost"), values = c("black", "blue", "red")) +
theme(legend.position='bottom')
dev.off()
png(filename="/Users/pbizil/Desktop/tcc_pos/plots/plots_output/ipca_pred_2meses_lgbm.png")
ggplot() +
geom_line(data = dados, aes(x=date, y=y2, colour = "IPCA Mensal")) +
geom_line(data = dados, aes(x=date, y=expectativas_ipca, colour = "Expectativas IPCA")) +
geom_line(data = dados, aes(x=date, y=y_preds_lgbm_2, colour = "Prediรงรฃo IPCA - LGBM")) +
labs(title = "IPCA mensal, Expectativa e Prediรงรฃo LGBM de dois meses \n jan/2015 a jun/2021", x = "Meses", y = "Variaรงรฃo percentual") +
scale_colour_manual("", breaks = c("IPCA Mensal", "Expectativas IPCA", "Prediรงรฃo IPCA - LGBM"), values = c("black", "blue", "red")) +
theme(legend.position='bottom')
dev.off()
# 3 meses
con <- dbConnect(RSQLite::SQLite(), "/Users/pbizil/Desktop/tcc_pos/data/app_db.db")
dados <- dbReadTable(con, "preds_l3")
colnames(dados)[3] <- c("expectativas_ipca")
dados$date <- as.Date(strptime(anytime::anydate(dados$date), "%Y-%m-%d"))
png(filename="/Users/pbizil/Desktop/tcc_pos/plots/plots_output/ipca_pred_3meses_cb.png")
ggplot() +
geom_line(data = dados, aes(x=date, y=y3, colour = "IPCA Mensal")) +
geom_line(data = dados, aes(x=date, y=expectativas_ipca, colour = "Expectativas IPCA")) +
geom_line(data = dados, aes(x=date, y=y_preds_cb_3, colour = "Prediรงรฃo IPCA - Catboost")) +
labs(title = "IPCA mensal, Expectativa e Prediรงรฃo Catboost de trรชs meses \n jan/2015 a jun/2021", x = "Meses", y = "Variaรงรฃo percentual") +
scale_colour_manual("", breaks = c("IPCA Mensal", "Expectativas IPCA", "Prediรงรฃo IPCA - Catboost"), values = c("black", "blue", "red")) +
theme(legend.position='bottom')
dev.off()
png(filename="/Users/pbizil/Desktop/tcc_pos/plots/plots_output/ipca_pred_3meses_xg.png")
ggplot() +
geom_line(data = dados, aes(x=date, y=y3, colour = "IPCA Mensal")) +
geom_line(data = dados, aes(x=date, y=expectativas_ipca, colour = "Expectativas IPCA")) +
geom_line(data = dados, aes(x=date, y=y_preds_xg_3, colour = "Prediรงรฃo IPCA - XGBoost")) +
labs(title = "IPCA mensal, Expectativa e Prediรงรฃo XGBoost de trรชs meses \n jan/2015 a jun/2021", x = "Meses", y = "Variaรงรฃo percentual") +
scale_colour_manual("", breaks = c("IPCA Mensal", "Expectativas IPCA", "Prediรงรฃo IPCA - XGBoost"), values = c("black", "blue", "red")) +
theme(legend.position='bottom')
dev.off()
png(filename="/Users/pbizil/Desktop/tcc_pos/plots/plots_output/ipca_pred_3meses_lgbm.png")
ggplot() +
geom_line(data = dados, aes(x=date, y=y3, colour = "IPCA Mensal")) +
geom_line(data = dados, aes(x=date, y=expectativas_ipca, colour = "Expectativas IPCA")) +
geom_line(data = dados, aes(x=date, y=y_preds_lgbm_3, colour = "Prediรงรฃo IPCA - LGBM")) +
labs(title = "IPCA mensal, Expectativa e Prediรงรฃo LGBM de trรชs meses \n jan/2015 a jun/2021", x = "Meses", y = "Variaรงรฃo percentual") +
scale_colour_manual("", breaks = c("IPCA Mensal", "Expectativas IPCA", "Prediรงรฃo IPCA - LGBM"), values = c("black", "blue", "red")) +
theme(legend.position='bottom')
dev.off()
# 4 meses
con <- dbConnect(RSQLite::SQLite(), "/Users/pbizil/Desktop/tcc_pos/data/app_db.db")
dados <- dbReadTable(con, "preds_l4")
colnames(dados)[3] <- c("expectativas_ipca")
dados$date <- as.Date(strptime(anytime::anydate(dados$date), "%Y-%m-%d"))
png(filename="/Users/pbizil/Desktop/tcc_pos/plots/plots_output/ipca_pred_4meses_cb.png")
ggplot() +
geom_line(data = dados, aes(x=date, y=y4, colour = "IPCA Mensal")) +
geom_line(data = dados, aes(x=date, y=expectativas_ipca, colour = "Expectativas IPCA")) +
geom_line(data = dados, aes(x=date, y=y_preds_cb_4, colour = "Prediรงรฃo IPCA - Catboost")) +
labs(title = "IPCA mensal, Expectativa e Prediรงรฃo Catboost de quatro meses \n jan/2015 a jun/2021", x = "Meses", y = "Variaรงรฃo percentual") +
scale_colour_manual("", breaks = c("IPCA Mensal", "Expectativas IPCA", "Prediรงรฃo IPCA - Catboost"), values = c("black", "blue", "red")) +
theme(legend.position='bottom')
dev.off()
png(filename="/Users/pbizil/Desktop/tcc_pos/plots/plots_output/ipca_pred_4meses_xg.png")
ggplot() +
geom_line(data = dados, aes(x=date, y=y4, colour = "IPCA Mensal")) +
geom_line(data = dados, aes(x=date, y=expectativas_ipca, colour = "Expectativas IPCA")) +
geom_line(data = dados, aes(x=date, y=y_preds_xg_4, colour = "Prediรงรฃo IPCA - XGBoost")) +
labs(title = "IPCA mensal, Expectativa e Prediรงรฃo XGBoost de quatro meses \n jan/2015 a jun/2021", x = "Meses", y = "Variaรงรฃo percentual") +
scale_colour_manual("", breaks = c("IPCA Mensal", "Expectativas IPCA", "Prediรงรฃo IPCA - XGBoost"), values = c("black", "blue", "red")) +
theme(legend.position='bottom')
dev.off()
png(filename="/Users/pbizil/Desktop/tcc_pos/plots/plots_output/ipca_pred_4meses_lgbm.png")
ggplot() +
geom_line(data = dados, aes(x=date, y=y4, colour = "IPCA Mensal")) +
geom_line(data = dados, aes(x=date, y=expectativas_ipca, colour = "Expectativas IPCA")) +
geom_line(data = dados, aes(x=date, y=y_preds_lgbm_4, colour = "Prediรงรฃo IPCA - LGBM")) +
labs(title = "IPCA mensal, Expectativa e Prediรงรฃo LGBM de quatro meses \n jan/2015 a jun/2021", x = "Meses", y = "Variaรงรฃo percentual") +
scale_colour_manual("", breaks = c("IPCA Mensal", "Expectativas IPCA", "Prediรงรฃo IPCA - LGBM"), values = c("black", "blue", "red")) +
theme(legend.position='bottom')
dev.off()
# desempenho dos modelos
# 1 mes
con <- dbConnect(RSQLite::SQLite(), "/Users/pbizil/Desktop/tcc_pos/data/app_db.db")
dados <- dbReadTable(con, "preds_l1")
colnames(dados)[3] <- c("expectativas_ipca")
dados_r2 <- data.frame("Prediรงรตes" = c("Expectativa de mercado", "Catboost", "XGBoost", "LGBM"), "Desempenho" = c(mse(dados$y1, dados$expectativas_ipca), mse(dados$y1, dados$y_preds_cb_1), mse(dados$y1, dados$y_preds_xg_1), mse(dados$y1, dados$y_preds_lgbm_1)),
"Fonte" = c("Expectativa", "Modelo", "Modelo", "Modelo"))
png(filename="/Users/pbizil/Desktop/tcc_pos/plots/plots_desempenho/ipca_pred_desemp_1mes.png")
ggplot(data=dados_r2, aes(x=reorder(Prediรงรตes, -Desempenho), y=Desempenho, fill=Fonte)) +
geom_bar(position="stack", stat = "identity", width=0.4) +
geom_text(aes(label=Desempenho), position=position_dodge(width=1), hjust = 1.2) +
ggtitle("Desempenho modelos e expectativa de mercado para \n prediรงรฃo de inflaรงรฃo em um mรชs - valor do mse") +
xlab("Fonte das prediรงรตes") + ylab("Desempenho (mse)") +
theme(legend.position='bottom') +
coord_flip()
dev.off()
# 2 meses
con <- dbConnect(RSQLite::SQLite(), "/Users/pbizil/Desktop/tcc_pos/data/app_db.db")
dados <- dbReadTable(con, "preds_l2")
colnames(dados)[3] <- c("expectativas_ipca")
dados_r2 <- data.frame("Prediรงรตes" = c("Expectativa de mercado", "Catboost", "XGBoost", "LGBM"), "Desempenho" = c(mse(dados$y2, dados$expectativas_ipca), mse(dados$y2, dados$y_preds_cb_2), mse(dados$y2, dados$y_preds_xg_2), mse(dados$y2, dados$y_preds_lgbm_2)),
"Fonte" = c("Expectativa", "Modelo", "Modelo", "Modelo"))
png(filename="/Users/pbizil/Desktop/tcc_pos/plots/plots_desempenho/ipca_pred_desemp_2meses.png")
ggplot(data=dados_r2, aes(x=reorder(Prediรงรตes, -Desempenho), y=Desempenho, fill=Fonte)) +
geom_bar(position="stack", stat = "identity", width=0.4) +
geom_text(aes(label=Desempenho), position=position_dodge(width=1), hjust = 1.2) +
ggtitle("Desempenho modelos e expectativa de mercado para \n prediรงรฃo de inflaรงรฃo em dois meses - valor do mse") +
xlab("Fonte das prediรงรตes") + ylab("Desempenho (mse)") +
theme(legend.position='bottom') +
coord_flip()
dev.off()
# 3 meses
con <- dbConnect(RSQLite::SQLite(), "/Users/pbizil/Desktop/tcc_pos/data/app_db.db")
dados <- dbReadTable(con, "preds_l3")
colnames(dados)[3] <- c("expectativas_ipca")
dados_r2 <- data.frame("Prediรงรตes" = c("Expectativa de mercado", "Catboost", "XGBoost", "LGBM"), "Desempenho" = c(mse(dados$y3, dados$expectativas_ipca), mse(dados$y3, dados$y_preds_cb_3), mse(dados$y3, dados$y_preds_xg_3), mse(dados$y3, dados$y_preds_lgbm_3)),
"Fonte" = c("Expectativa", "Modelo", "Modelo", "Modelo"))
png(filename="/Users/pbizil/Desktop/tcc_pos/plots/plots_desempenho/ipca_pred_desemp_3meses.png")
ggplot(data=dados_r2, aes(x=reorder(Prediรงรตes, -Desempenho), y=Desempenho, fill=Fonte)) +
geom_bar(position="stack", stat = "identity", width=0.4) +
geom_text(aes(label=Desempenho), position=position_dodge(width=1), hjust = 1.2) +
ggtitle("Desempenho modelos e expectativa de mercado para \n prediรงรฃo de inflaรงรฃo em trรชs meses - valor do mse") +
xlab("Fonte das prediรงรตes") + ylab("Desempenho (mse)") +
theme(legend.position='bottom') +
coord_flip()
dev.off()
# 4 meses
con <- dbConnect(RSQLite::SQLite(), "/Users/pbizil/Desktop/tcc_pos/data/app_db.db")
dados <- dbReadTable(con, "preds_l4")
colnames(dados)[3] <- c("expectativas_ipca")
dados_r2 <- data.frame("Prediรงรตes" = c("Expectativa de mercado", "Catboost", "XGBoost", "LGBM"), "Desempenho" = c(mse(dados$y4, dados$expectativas_ipca), mse(dados$y4, dados$y_preds_cb_4), mse(dados$y4, dados$y_preds_xg_4), mse(dados$y4, dados$y_preds_lgbm_4)),
"Fonte" = c("Expectativa", "Modelo", "Modelo", "Modelo"))
png(filename="/Users/pbizil/Desktop/tcc_pos/plots/plots_desempenho/ipca_pred_desemp_4meses.png")
ggplot(data=dados_r2, aes(x=reorder(Prediรงรตes, -Desempenho), y=Desempenho, fill=Fonte)) +
geom_bar(position="stack", stat = "identity", width=0.4) +
geom_text(aes(label=Desempenho), position=position_dodge(width=1), hjust = 1.2) +
ggtitle("Desempenho modelos e expectativa de mercado para \n prediรงรฃo de inflaรงรฃo em quatro meses - valor do mse") +
xlab("Fonte das prediรงรตes") + ylab("Desempenho (mse)") +
theme(legend.position='bottom') +
coord_flip()
dev.off()
|
bbc17553a37734f8a181a153172d16361fe57682
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/errint/examples/print.measure.Rd.R
|
a889b385cdae7a75a843e73a6d048e116bdf0a9d
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 152
|
r
|
print.measure.Rd.R
|
library(errint)
### Name: print.measure
### Title: Printing Measures
### Aliases: print.measure
### ** Examples
res<-measure(0.1,0.7)
print(res)
|
3c31edabb4586f3410510c037e3ca9413f491d86
|
e30bb95df470e1c177b825c7bb16d825be1c4cf0
|
/Building Variable/Type I Variable.R
|
76907011669cc7ba8a0e12f76f8ae6c6a8106aea
|
[] |
no_license
|
Libardo1/Credit-Card-Payment-Supervised-Fraud-Detection
|
316f290dba9776384e6f4a4d9c599c571b4a8b39
|
c9c8f54132f6d1f3cc5245429876170d0f6f6339
|
refs/heads/master
| 2020-03-17T04:41:45.119460
| 2018-03-18T22:12:52
| 2018-03-18T22:12:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,750
|
r
|
Type I Variable.R
|
load("Card Payments_Cleaned.rda")
library(dplyr)
############################# Type I Variable ######################
# historical count with 3 days time window
############## Subset trial #############
data_Jan = filter(data, date < "2010-02-01")
current = data_Jan[6748,]
row = 6748
###### time window subsetting method
ptm <- proc.time()
#subset = filter(data_Jan, date >= current$date - 3, recordnum < row)
for (i in 1:nrow(data_Jan)) {
current_date = data_Jan[i,"date"]
current_key = data_Jan[i,"cardnum"]
subset = data_Jan %>%
filter(date >= current_date - 3, recordnum < i, cardnum == current_key) %>%
select(cardnum, amount)
}
proc.time() - ptm
# 20s
ptm <- proc.time()
#subset = data_Jan[data_Jan$date >= current$date - 3 & data_Jan$recordnum < row,]
for (i in 1:nrow(data_Jan)) {
current_date = data_Jan[i,"date"]
current_key = data_Jan[i,"cardnum"]
subset = data_Jan[data_Jan$date >= current_date - 3 & data_Jan$recordnum < i & data_Jan$cardnum == current_key, c("cardnum", "amount")]
}
proc.time() - ptm
# 11s
###### 3-day trial on the subset
ptm <- proc.time()
for (i in 1:nrow(data_Jan)) {
current_date = data_Jan[i,"date"]
current_key = data_Jan[i,"cardnum"]
current_amount = data_Jan[i,"amount"]
subset = data_Jan[data_Jan$date >= current_date - 3 & data_Jan$recordnum < i & data_Jan$cardnum == current_key, c("cardnum", "amount")]
if (nrow(subset) != 0) {
avg = mean(subset$amount)
max = max(subset$amount)
median = median(subset$amount)
total = sum(subset$amount)
data_Jan[i, paste0("amount_hist_avg_","3")] = current_amount / avg
data_Jan[i, paste0("amount_hist_max_","3")] = current_amount / max
data_Jan[i, paste0("amount_hist_median_","3")] = current_amount / median
data_Jan[i, paste0("amount_hist_total_","3")] = current_amount / total
} else {
data_Jan[i, paste0("amount_hist_avg_","3")] = 1
data_Jan[i, paste0("amount_hist_max_","3")] = 0
data_Jan[i, paste0("amount_hist_median_","3")] = 1
data_Jan[i, paste0("amount_hist_total_","3")] = 0
}
}
proc.time() - ptm
# 15s
############## Function packaging #############
build_var <- function(df, time_window, key) {
###########################
# df: the name of the cleaned data frame
# time_window: 3 or 7 or other
# key: "card" or "merchant"
###########################
df[, paste0(key, "_", "amount_to_avg_", time_window)] = 1
df[, paste0(key, "_", "amount_to_max_", time_window)] = 0
df[, paste0(key, "_", "amount_to_median_", time_window)] = 1
df[, paste0(key, "_", "amount_to_total_", time_window)] = 0
for (i in 1:nrow(df)) {
#print(i)
current_date = df[i,"date"]
current_amount = df[i,"amount"]
if (key == "card") {
current_key = df[i,"cardnum"]
subset = df[df$date >= current_date - time_window & df$recordnum < i & df$cardnum == current_key, c("cardnum", "amount")]
} else if (key == "merchant") {
current_key = df[i,"merchnum"]
subset = df[df$date >= current_date - time_window & df$recordnum < i & df$merchnum == current_key, c("merchnum", "amount")]
}
#print(nrow(subset))
if (nrow(subset) != 0) {
avg = mean(subset$amount)
max = max(subset$amount)
median = median(subset$amount)
total = sum(subset$amount)
df[i, paste0(key, "_", "amount_to_avg_",time_window)] = current_amount / avg
df[i, paste0(key, "_", "amount_to_max_", time_window)] = current_amount / max
df[i, paste0(key, "_", "amount_to_median_", time_window)] = current_amount / median
df[i, paste0(key, "_", "amount_to_total_", time_window)] = current_amount / total
}
}
return(df)
}
# ptm <- proc.time()
# data_Jan = build_var(data_Jan, 3, "card")
# proc.time() - ptm
# # 14s
#
# ptm <- proc.time()
# data_Jan = build_var(data_Jan, 7, "card")
# proc.time() - ptm
# # 14s
#
# ptm <- proc.time()
# data_Jan = build_var(data_Jan, 3, "merchant")
# proc.time() - ptm
# # 5s
#
# ptm <- proc.time()
# data_Jan = build_var(data_Jan, 7, "merchant")
# proc.time() - ptm
# # 5s
############## Run on the whole dataset #############
ptm <- proc.time()
data = build_var(data, 3, "card")
proc.time() - ptm
# 1016 s = 17 min
ptm <- proc.time()
data = build_var(data, 3, "merchant")
proc.time() - ptm
# 774 s = 13 min
ptm <- proc.time()
data = build_var(data, 7, "card")
proc.time() - ptm
# 1265 s = 21 min
ptm <- proc.time()
data = build_var(data, 7, "merchant")
proc.time() - ptm
# 866 s = 14 min
save(data, file = "data_type_a.Rdata")
|
b7b768f457a7348129f79f8d6ec039374709224d
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/ashr/examples/mixcdf.Rd.R
|
4baca8553cec29c8b2c4d7651ade2ade0691e387
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 158
|
r
|
mixcdf.Rd.R
|
library(ashr)
### Name: mixcdf
### Title: mixcdf
### Aliases: mixcdf
### ** Examples
mixcdf(normalmix(c(0.5,0.5),c(0,0),c(1,2)),seq(-4,4,length=100))
|
3d0db0e357e36a1f1fd19e6e23f37cd20994bb3d
|
73ebcf788041071a87add04bec5b08675573b78f
|
/cuda/hw3.R
|
b9f44a46585202a2c4849c8eaaba0fce278d1710
|
[] |
no_license
|
rachan5/ECS158HW2
|
3c721b23f0dcf6830531ca57b75d71ef32b56919
|
443e21edcb3a3622a2f6ed913ee86d7bf5fdcbe3
|
refs/heads/master
| 2020-04-10T00:08:04.302301
| 2015-03-21T06:51:37
| 2015-03-21T06:51:37
| 30,328,120
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 274
|
r
|
hw3.R
|
x <- c(1, 1,2,2, 3,3, 4,4, 5,5, 6,6, 7,7, 8,8, 9,9, 10,10)
y <- c(11,11, 12,12, 13,13, 14,14, 15,15, 16,16, 17,17, 18,18, 19,19, 20,20)
smoother <- function(x,y,h) {
meanclose <- function(t)
mean(y[abs(x-t) < h])
sapply(x,meanclose)
}
print(smoother(x,y,2))
|
5e259bd0ca26b1975064087a4b1a3edccd0ff638
|
4ca76a3cef4af592ba8ab121aae35f5eccb59670
|
/man/write_hrc2.Rd
|
36f8cf3b50181b8ef531f6c0665a6d0594377826
|
[
"MIT"
] |
permissive
|
InseeFrLab/rtauargus
|
5e9405d3453a534adc235ec71f5d559de00f8f62
|
f3810aff361d2eb7aa31d47e38fe1943f42733ad
|
refs/heads/master
| 2023-08-18T22:24:22.967560
| 2023-07-20T16:33:36
| 2023-07-20T16:33:36
| 442,119,707
| 4
| 4
|
MIT
| 2023-09-05T11:56:35
| 2021-12-27T09:57:18
|
R
|
UTF-8
|
R
| false
| true
| 11,385
|
rd
|
write_hrc2.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/writehrc.R
\name{write_hrc2}
\alias{write_hrc2}
\title{Creates a hrc file from correspondence table}
\usage{
write_hrc2(
corr_table,
file_name = NULL,
sort_table = FALSE,
rev = FALSE,
hier_lead_string = getOption("rtauargus.hierleadstring"),
adjust_unique_roots = FALSE,
add_char = "ZZZ"
)
}
\arguments{
\item{corr_table}{Data frame. Correspondence table, from most aggregated level to most detailed one
\cr
Table de correspondance, du niveau le plus agrรฉgรฉ au niveau le plus fin}
\item{file_name}{character string. Name for the output file (with .hrc extension or not).
If NULL (default), file_name is set to the same name as the correspondence table
\cr
Nom du fichier en sortie (avec ou sans l'extension .hrc) ; Si NULL (par dรฉfaut),
le nom du fichier sera identique au nom de la table de correspondance.}
\item{sort_table}{boolean. If TRUE, table will be sorted beforehand.
(default to FALSE)\cr
Si TRUE, la table sera triรฉe avant traitement. (dรฉfaut ร FALSE)}
\item{rev}{boolean. If TRUE, column order is reversed.\cr
Si TRUE, inverse l'ordre des colonnes.}
\item{hier_lead_string}{character. (Single) character indicating the
hierarchy depth in the .hrc file. By default, the value is set to the current
value mentionned in the package options (i.e. "@" at the package startup).
\cr
Caractรจre unique repรฉrant le niveau de profondeur dans le .hrc}
\item{adjust_unique_roots}{boolean. If TRUE will add fictional roots to the
correspondence table, by doing so there will be no unique roots in the hrc file.
With tabular function, unique roots are not handled by Tau-Argus. \cr
Si TRUE la fonction va ajouter des feuilles fictives au fichier .hrc afin
qu'il n'y ait plus de feuilles uniques. Les feuilles uniques peuvent gรฉnรฉrer
des problรจmes dans l'exรฉcution de Tau-Argus}
\item{add_char}{character If adjust_unique_roots is TRUE add_char is the string that will
be used to create fictional roots, be sure that this string does not create
duplicates.The string will be paste at the beginning of a unique root
default = "ZZZ" \cr
character Si adjust_unique_roots est TRUE add_char est l'รฉlรฉment qui sera
utilisรฉ afin de crรฉer des feuilles fictives, il faut รชtre sur que cela
ne crรฉe pas de doublons dans la hiรฉrarchie.La chaine de caractรจre sera
ajoutรฉ au dรฉbut d'une feuille unique. Par defaut :"ZZZ"}
}
\value{
Invisible. Path to the written .hrc file.
\cr
Chemin vers le fichier .hrc.
}
\description{
Creates a .hrc hierarchy from a correspondence table. \cr
Ecrit une hiรฉrarchie .hrc ร partir d'une table de correspondance.
}
\details{
Creates a .hrc hierarchy file adapted to tau-Argus from a
correspondence table fully describing it. By default, lines are sorted
alphabetically so as to regroup identical levels.
Ecrit un fichier de hiรฉrarchie .hrc lisible par tau-Argus ร
partir d'une table de corrrespondance la dรฉcrivant complรจtement. Par dรฉfaut,
les lignes du tableau seront triรฉes afin de regrouper les niveaux de
hiรฉrarchie identiques.
}
\section{Details about correspondence table & .hrc}{
Hierarchy files read by tau-Argus are expected to follow a strict pattern.
This function mimicks some of its rigidities.
\cr
1 \strong{Ideal case}
Here is how a correspondence table is assumed to look like:
\tabular{lll}{
\strong{type} \tab \strong{details} \cr
\code{-------} \tab \code{------} \cr
planet \tab telluric \cr
planet \tab gasgiant \cr
star \tab bluestar \cr
star \tab whitedwarf \cr
star \tab reddwarf \cr
other \tab blackhole \cr
other \tab pulsar \cr
}
Columns must be ordered from most aggregated to most detailed.
If they are in reverse order, you may want to use rev = TRUE. In any other
case, please reorder columns by hand.\cr
Hierarchy must be well-nested : fine levels must systematically be nested
into unique higher levels. If this is not compatible with your situation,
you will have to split it in different hierarchies and insure common cells
are correctly protected (seek further documentation or help if needed).
\cr
2 \strong{Dealing with NAs}
The write_hrc2 function has to be preferably used without any NAs in your
correspondence table. In presence of NAs, the \strong{sort} argument
has to be to FALSE. Indeed, NAs would be sorted together and, thus,
be separated from their expected place in the hierarchy.
Below, we introduce two common cases where correspondence tables could have
NAs. The first one is supported by the function, the second one is not.
Please be careful when dealing with NAs and check thoroughly the
resulting .hrc file, or consider filling in NAs beforehand.
2.1 \emph{Sparse hierarchies} \cr
Hierarchy is sparse when NAs are inserted instead of repeating under a given
level.
\tabular{lll}{
\strong{type} \tab \strong{details} \cr
\code{-------} \tab \code{------} \cr
planet \tab telluric \cr
\tab gasgiant \cr
star \tab bluestar \cr
\tab whitedwarf \cr
\tab reddwarf \cr
other \tab blackhole \cr
\tab pulsar \cr
}
Such cases still issue a warning for the presence of NAs, but do not pose
any problem, if \strong{sort=FALSE} is set.
2.2 \emph{Non-uniform hierarchies}\cr
Hierarchies with non-uniform depth happen when some levels are not detailed
to the lowest detail, creating NAs.
\tabular{lll}{
\strong{type} \tab \strong{details} \cr
\code{-------} \tab \code{------} \cr
planet \tab telluric \cr
planet \tab gasgiant \cr
star \tab \cr
other \tab blackhole \cr
other \tab pulsar \cr
}
Processing such a file will generate an error with the following messages:
\emph{Missing values on the last column of the correspondence table is not allowed.
If relevant, you could fill in with the value of the previous column}
}
\section{Dรฉtails sur les tables de correspondance et le .hrc}{
Tau-Argus attend des fichiers รฉcrits avec prรฉcision. Certaines de ses
rigiditรฉs sont reproduites par cette fonction.
\cr
1 \strong{Cas idรฉal}
Voici l'aspect gรฉnรฉral que devrait avoir une table de correspondance :
\tabular{lll}{
\strong{type} \tab \strong{details} \cr
\code{-------} \tab \code{------} \cr
planet \tab telluric \cr
planet \tab gasgiant \cr
star \tab bluestar \cr
star \tab whitedwarf \cr
star \tab reddwarf \cr
other \tab blackhole \cr
other \tab pulsar \cr
}
Les colonnes doivent รชtre ordonnรฉes du niveau le plus agrรฉgรฉ au plus fin.
Si elles sont en sens inverse, l'option rev = TRUE permet de les mettre en
ordre. Dans toute autre situation, vous devrez d'abord les ordonner ร la
main.
\cr
La hiรฉrarchie doit รชtre bien emboรฎtรฉe : un niveau fin doit systรฉmatiquement
correspondre ร un unique niveau agrรฉgรฉ. Si cette exigence n'est pas remplie,
il faudra crรฉer plusieurs hiรฉrarchies et faire en sorte que les cellules
communes soient correctement protรฉgรฉes (au besoin, consultez la documentation
ou chercher de l'aide).
\cr
2 \strong{Valeurs manquantes}
La fonction write_hrc2 doit รชtre utilisรฉe de prรฉfรฉrence sans aucun NA dans votre
table de correspondance. En prรฉsence de NAs, l'argument \strong{sort}
doit รชtre ร FALSE. En effet, les NAs seraient triรฉs ensemble et, donc,
รชtre sรฉparรฉes de leur place attendue dans la hiรฉrarchie.
Ci-dessous, nous prรฉsentons deux cas courants oรน les tables de correspondance
pourraient avoir NAs. Le premier cas est pris en charge par la fonction,
le second ne l'est pas.
Soyez prudent lorsque vous manipulez des NA et vรฉrifiez soigneusement
le fichier .hrc rรฉsultant ou envisagez de remplir les NAs ร l'avance.
2.1 \emph{Hiรฉrarchies creuses} \cr
Une hiรฉrarchie est creuse si des NAs sont insรฉrรฉes au lieu de rรฉpรฉter un
niveau donnรฉ verticalement.
\tabular{lll}{
\strong{type} \tab \strong{details} \cr
\code{-------} \tab \code{------} \cr
planet \tab telluric \cr
\tab gasgiant \cr
star \tab bluestar \cr
\tab whitedwarf \cr
\tab reddwarf \cr
other \tab blackhole \cr
\tab pulsar \cr
}
De tels cas รฉmettent toujours un avertissement du fait de la prรฉsence de NA,
mais ne posent aucun problรจme, si on utilise \strong{sort=FALSE}.
2.2 \emph{Hiรฉrarchies non-uniformes}\cr
Les hiรฉrarchies ร profondeur non-uniforme correspondent aux cas oรน certains
niveaux ne sont pas dรฉtaillรฉs jusqu'au bout, la fin de certaines lignes รฉtant
manquante.
\tabular{lll}{
\strong{type} \tab \strong{details} \cr
\code{-------} \tab \code{------} \cr
planet \tab telluric \cr
planet \tab gasgiant \cr
star \tab \cr
other \tab blackhole \cr
other \tab pulsar \cr
}
Le traitement d'un tel fichier gรฉnรฉrera une erreur avec les messages suivants :
\emph{Missing values on the last column of the correspondence table is not allowed.
If relevant, you could fill in with the value of the previous column}
}
\examples{
# 1. Standard example. Table will be written on your working directory.
# Exemple standard. La table sera รฉcrite dans votre rรฉpertoire de travail.
astral <- data.frame(
type = c("planet", "planet", "star", "star", "star", "other", "other"),
details = c(
"telluric", "gasgiant", "bluestar", "whitedwarf",
"reddwarf", "blackhole", "pulsar")
)
path <- write_hrc2(astral)
\dontrun{read.table(path)}
# Note that line order was changed ('other' comes before 'planet'), to no
# consequence whatsoever for Tau-Argus.
# Remarque : l'ordre des lignes a รฉtรฉ modifiรฉ ('other' arrive avant 'planet'),
# ce qui n'a aucune consรฉquence pour Tau-Argus.
# Wrong column order:
# Mauvais ordonnancement des colonnes :
astral_inv <- data.frame(
details = c(
"telluric", "gasgiant", "bluestar", "whitedwarf",
"reddwarf", "blackhole", "pulsar"),
type = c("planet", "planet", "star", "star", "star", "other", "other")
)
path <- write_hrc2(astral_inv)
\dontrun{read.table(path)}
# Because of the inverted order, everything is written backwards : planet is a
# subtype of gasgiant, etc.
# ร cause de l'inversion des colonnes, tout est รฉcrit ร l'envers : planet est
# devenu une sous-catรฉgorie de gasgiant, par exemple.
# Correction :
path <- write_hrc2(astral_inv, rev = TRUE)
\dontrun{read.table(path)}
# 2.1 Sparse case
# Cas creux
astral_sparse <- data.frame(
type = c("planet", NA, "star", NA, NA, "other", NA),
details = c(
"telluric", "gasgiant", "bluestar", "whitedwarf",
"reddwarf", "blackhole", "pulsar")
)
# NAs in general are risky, but, in this case, the function works well.
# Les valeurs manquantes causent un risque, mais, dans ce genre de cas,
# la fonction a le comportement attendu.
path <- write_hrc2(astral_sparse)
\dontrun{read.table(path)}
# 2.2 Non-uniform depth
# Hiรฉrarchie non-uniforme
astral_nu <- data.frame(
type = c("planet", "planet", "star", "other", "other"),
details = c("telluric", "gasgiant", NA, "blackhole", "pulsar")
)
# The following code will generate an error
# (see section Details about correspondence table & .hrc)
\dontrun{
path <- write_hrc2(astral_nu)
}
#To fix the issue, you have to fill in the NAs beforehand.
astral_nu_fill <- data.frame(
type = c("planet", "planet", "star", "other", "other"),
details = c("telluric", "gasgiant", "star", "blackhole", "pulsar")
)
# The following code will work
path <- write_hrc2(astral_nu_fill)
\dontrun{read.table(path)}
}
|
c225dc14bd82e26f163054b9ff652193f7fd3130
|
74d1d03ce2ec81c7f34d6ee0246a7d41db9568f9
|
/simulate_functions_vsv.R
|
38fe1d03beb61d054a3595f40f7b6efba6dae9ab
|
[] |
no_license
|
ruslana-tymchyk/accuracy_simulations
|
3ad3d441c904b93cfd90480a19c78888fed210a9
|
a9c6bd67f304af02cf49270466c8d198d2ac8e93
|
refs/heads/master
| 2020-07-03T05:41:16.078903
| 2019-08-11T19:56:47
| 2019-08-11T19:56:47
| 201,804,926
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,736
|
r
|
simulate_functions_vsv.R
|
#-------------------
#----DESCRIPTION----
#-------------------
#Distributions: Normal & T (truncated or not)
#Analysis: GLM & ANOVA (optional - Bayesian Anova)
#Design: Between-Subjects
#This set of function can be used to simulate and analyse the datasets using Drift Diffusion Model.
#The data is being sampled from a distribution that best fits the given parameter
#The sampled parameters are then passed to rdiffusion to generate the data
#The data is then analysed using GLM and ANOVA
#-----------------Defining Distributions-------
#This code is required for the t-distribution, as the t-distribution specified in R does not permit to set location and scale parameters
dmyt <- function(x, location, scale, df) {
1/scale * dt((x - location)/scale, df)
}
pmyt <- function(q, location, scale, df) {
pt((q-location)/scale, df)
}
qmyt <- function(p, location, scale, df) {
qt(p,df)*scale + location
}
rmyt <- function(n, location, scale, df) {
(rt(n, df)*scale) + location
}
#Function that defines a truncated normal distribution
gen_norm <- function(par, a, b) {
rtrunc(
1,
spec = "norm",
a = a, #lower limit
b = b, #upper limit
mean = par[1],
sd = par[2]
)
}
#Function that defines a truncated T-distribution
gen_myt <- function(par, a, b) {
rtrunc(
1,
spec = "myt",
a = a, #lower limit
b = b, #upper limit
df = par[1],
location = par[2],
scale = par[3]
)
}
#-----------------Sampling the parameter values-------
#Function accepts the summary statistics of each paramter as an argument and
#returns the vector of parameter values sampled from a best-fit distribution
par_from_val <- function(a, z, st0, v, t0, sv) {
c(
#defining limits of truncation and distribution for each parameter
a = gen_norm(par = a, a = 0, b = Inf),
z = gen_norm(par = z, a = 0, b = 1),
st0 = unlist(gen_norm(
par = st0, a = 0, b = Inf
)),
v = unlist(gen_myt(
par = v,
a = -10 + v[2],
b = 10 - v[2]
#addition and substraction of location parameter required
#to ensure than the upper and lower thresholds deviate by 10
#points from the central point of distribution rather than
#from zero
)),
t0 = unlist(gen_myt(
par = t0, a = 0, b = Inf
)),
sv = unlist(gen_myt(
par = sv, a = 0, b = 10
))
)
}
#-----------------Simulating the data-------
simulate_dt <- function(a, z, st0, v, t0, sv, n, pp, group) {
params <- function(a, z, st0, v, t0, sv) {
repeat{
values <- par_from_val(a, z, st0, v, t0, sv)
trials <- rdiffusion(
n = n,
v = values[["v"]],
a = values[["a"]],
t0 = values[["t0"]],
sv = values[["sv"]],
st0 = values[["st0"]],
z = values[["z"]],
stop_on_error = FALSE
) #simulates n trials for 1 participant
if (mean(trials$rt) != 0) {break}
}
#when combination of sampled values produces error in rdiffusion, the
#values are resampled and the simulation is run again
return(trials)
}
result <- rerun(pp, params(a, z, st0, v, t0, sv)) %>% #simulating for pp number of participants
rbindlist(., idcol = TRUE) %>% #adds id column
mutate(group = rep(group)) #adds group number
result <- result %>% rename("id" = ".id") %>%
mutate(id = paste0(group, "_", id)) #adding participant identifier
return(result)
}
#-----------------Analysing the simulated data-------
#Analysing 1 dataset with 'pp' participants and 'n' number of trials
data_analysis <- function(a,z,st0,v,t0,sv,
n,pp) {
s1 <- simulate_dt(
a = a,
z = z,
st0 = st0,
v = v,
t0 = t0,
sv = sv,
n = n,
pp = pp,
group = 1
) #simulating group 1
pp_g1 <- as.numeric(as.character(summarise(s1, n_distinct(id)))) #calculates final number of pp's
s2 <- simulate_dt(
a = a,
z = z,
st0 = st0,
v = v,
t0 = t0,
sv = sv,
n = n,
pp = pp,
group = 2
) #simulating group 2
#Note: parameters for both groups are the same
pp_number <- as.numeric(as.character(summarise(s2, n_distinct(id)))) #number of participants
ss <- rbind(s1, s2) %>% #binds simulated datasets together
mutate(response = ifelse(response == "upper", 1, 0)) #transforms response into numeric
mean_prop <- ss %>%
group_by(group) %>%
summarise(diff_props = mean(response)) #proportion of upper responses by group
g1_prop <- mean_prop$diff_props[1] #proportion of upper for group 1
g2_prop <- mean_prop$diff_props[2] #proportion of upper for group 2
mean_prop_real <- ss %>%
summarise(props = mean(response))
mean_prop_real <- as.numeric(mean_prop_real) #proportion of upper responses
#for both groups combined
diff_props <- g1_prop - g2_prop #difference in proportion
if (mean_prop_real == 1) {
aov_p = 1} #in cases when all responses are the same, set p-value to 1
#so that anova does not produce an error
#particularly important when number of trials is low
else {
aov_ss <- aov_ez(
id = "id",
dv = "response",
data = ss,
between = "group",
fun_aggregate = mean
) #runs anova
aov_p <- summary(aov_ss)[["Pr(>F)"]][[1]] #extracting p-value
}
ss_for_glm <- ss %>%
group_by(id,group) %>%
summarise(resp_prop = mean(response),
n_trials = n()) %>%
ungroup %>%
mutate(group = factor(group))
glm_ss <- glm(
resp_prop ~ group,
data = ss_for_glm,
weights = n_trials,
family = binomial
) #runs glm
glm_anova <- car::Anova(glm_ss, type = 3)
glm_p <- glm_anova$`Pr(>Chisq)` #extracting p-value
# bf_ss <- lmBF(response ~ group,
# data = ss)
#runs Bayesian Anova
#bf <- extractBF(bf_ss)$bf #extracts bayes factor
data <-
tibble(
aov_p = aov_p,
glm_p = glm_p,
#bf = bf,
diff_props = diff_props,
mean_prop_real = mean_prop_real,
g1_prop = g1_prop,
g2_prop = g2_prop,
n_g1 = n,
n_g2 = n,
pp_number = pp_number,
a_mean = a[1],
a_sd = a[2],
z_mean = z[1],
z_sd = z[2],
st0_mean = st0[1],
st0_sd = st0[2],
v_df = v[1],
v_loc = v[2],
v_scale = v[3],
t0_df = t0[1],
t0_loc = t0[2],
t0_scale = t0[3],
sv_df = sv[1],
sv_loc = sv[2],
sv_scale = sv[3]
) #produces list with all the values required for further analysis
data
}
#Repeating analysis for 'runs' number of datasets
reruns <- function(a,z,st0,v,t0,sv,
n,pp,runs) {
result <- rerun(runs,
data_analysis(a,z,st0,v,t0,sv,
n, pp)) %>% map_dfr(., as_tibble)
return(as.data.frame(result))
}
|
10c8de2b650330c5d1e6e27882e6445473358397
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/genius/examples/add_genius.Rd.R
|
2aa6b4455a0be5f9f8db39c4b409c25d84fb4c61
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 444
|
r
|
add_genius.Rd.R
|
library(genius)
### Name: add_genius
### Title: Add lyrics to a data frame
### Aliases: add_genius
### ** Examples
## No test:
artist_albums <- tribble(
~artist, ~album,
"J. Cole", "KOD",
"Sampha", "Process"
)
artist_albums %>%
add_genius(artist, album)
artist_songs <- tribble(
~artist, ~track,
"J. Cole", "Motiv8",
"Andrew Bird", "Anonanimal"
)
artist_songs %>%
add_genius(artist, track, type = "lyrics")
## End(No test)
|
7cb3d3d0c125dde714ceec9b31f9a8ee152cf689
|
ca4cc9c323fe000df7189a448dd59618f70b8c2f
|
/man/imputeMinDiv2.Rd
|
633eb9b3a008f6560378e2430345a7953dc563f3
|
[
"BSD-2-Clause"
] |
permissive
|
PNNL-Comp-Mass-Spec/RomicsProcessor
|
235c338d2192f385d408e55c302868e37ff9dc06
|
72d35c987900febc3e6c6ed416d4d72dc5820075
|
refs/heads/master
| 2023-03-18T08:14:48.098980
| 2023-03-15T16:50:14
| 2023-03-15T16:50:14
| 206,400,976
| 4
| 2
|
BSD-2-Clause
| 2022-12-05T21:55:36
| 2019-09-04T19:49:26
|
HTML
|
UTF-8
|
R
| false
| true
| 889
|
rd
|
imputeMinDiv2.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/04_Manage_Missing.R
\name{imputeMinDiv2}
\alias{imputeMinDiv2}
\title{imputeMinDiv2()}
\usage{
imputeMinDiv2(romics_object)
}
\arguments{
\item{romics_object}{has to be an romics_object created using romicsCreateObject()}
}
\value{
The function will return a modified romics_object that will have imputed data, however the missingdata layer will conserve the location of the missingness, the missingness can subsequently be restored using the function romicsRestoreMissing().
}
\description{
Imputes the data layer of the romics_object using the minimum value of the table divided by 2. this function will work if the data only contains positive values.
}
\details{
This function will use the minimum value of the data table divided by 2 to impute the missing values of the data layer
}
\author{
Geremy Clair
}
|
9a0483e7e947510fb15c657c97c22b6cd7f55088
|
7706cfba17c70548436fb190add59e7ed6e14199
|
/man/crossv_kfold.Rd
|
08477bf7bbf135659db6a352891d7bf1c1adf806
|
[] |
no_license
|
jrnold/resamplr
|
0997d9076f9635963bea867559de742592fbd18b
|
72242df726e87fc3bd9e5c6a93bbdb5e4f9851d1
|
refs/heads/master
| 2021-01-19T22:01:15.942589
| 2018-07-22T21:39:32
| 2018-07-22T21:39:32
| 83,233,025
| 36
| 5
| null | 2018-03-15T10:15:21
| 2017-02-26T19:04:07
|
R
|
UTF-8
|
R
| false
| true
| 3,254
|
rd
|
crossv_kfold.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kfold.R
\name{crossv_kfold}
\alias{crossv_kfold}
\alias{crossv_kfold.data.frame}
\alias{crossv_kfold.grouped_df}
\title{Generate cross-validated K-fold test-training pairs}
\usage{
crossv_kfold(data, K, ...)
\method{crossv_kfold}{data.frame}(data, K = 5L, shuffle = TRUE, ...)
\method{crossv_kfold}{grouped_df}(data, K = 5L, shuffle = TRUE,
stratify = FALSE, ...)
}
\arguments{
\item{data}{A data frame}
\item{K}{The number of folds}
\item{...}{Arguments passed to methods}
\item{shuffle}{If \code{TRUE}, randomly assign observations to folds.
Otherwise, observations are sequentially assigned to folds.}
\item{stratify}{If \code{TRUE}, within each group observations are split
into folds, and those folds combined. If \code{FALSE}, groups are assigned
into folds.}
}
\value{
A data frame with \code{K} rows and the following columns:
\describe{
\item{sample}{A list of \code{\link[modelr]{resample}} objects. Training sets.}
\item{.id}{An integer vector of identifiers.}
}
}
\description{
Generate cross-validated K-fold test-training pairs. In addition to ordinary
K-fold cross-validation, this supports stratified K-fold cross validation
if \code{data} is a grouped data frame and \code{stratify = TRUE}, and
Group K-fold if \code{data} is a grouped data frame and \code{stratify = FALSE}.
}
\section{Methods (by class)}{
\itemize{
\item \code{data.frame}: Splits rows in a data frame into folds.
\item \code{grouped_df}: Partitions rows within each group of a grouped data frame
into folds if \code{stratify = FALSE}. This ensures that the test and training
sets will have approximately equal proportions of each group. If \code{stratify = TRUE},
then the groups are partitioned into folds.
}}
\examples{
# Example originally from modelr::crossv_mc
library("purrr")
library("dplyr")
# 5-fold cross-validation
cv1 <- crossv_kfold(mtcars, K = 5)
models <- map(cv1$train, ~ lm(mpg ~ wt, data = .))
summary(map2_dbl(models, cv1$test, modelr::rmse))
# k-fold by group
cv2 <- crossv_kfold(group_by(mtcars, cyl), K = 2)
models <- map(cv2$train, ~ lm(mpg ~ wt, data = .))
summary(map2_dbl(models, cv2$test, modelr::rmse))
# stratified k-fold
cv3 <- crossv_kfold(group_by(mtcars, am), K = 3, stratify = TRUE)
models <- map(cv3$train, ~ lm(mpg ~ wt, data = .))
summary(map2_dbl(models, cv3$test, modelr::rmse))
}
\references{
\itemize{
\item{Breiman, L., Friedman, J.H., Olshen, R.A. and Stone, C.J. (1984) Classification and Regression Trees. Wadsworth.}
\item{Burman, P. (1989) A comparative study of ordinary cross-validation, v-fold cross-validation and repeated learning-testing methods. Biometrika, 76, 503โ514}
\item{Davison, A.C. and Hinkley, D.V. (1997) Bootstrap Methods and Their Application. Cambridge University Press.}
\item{Efron, B. (1986) How biased is the apparent error rate of a prediction rule? Journal of the American Statistical Association, 81, 461โ470.}
\item{Stone, M. (1974) Cross-validation choice and assessment of statistical predictions (with Discussion). Journal of the Royal Statistical Society, B, 36, 111โ147.}
}
}
\seealso{
This function has more features than the \pkg{modelr} function
\code{\link[modelr]{crossv_kfold}}.
}
|
fe68be34da26f20214579c9e941d3f29903fcadf
|
75a635ec3af04c3898867cca8ea5cf11c6409533
|
/Chapter04/glove.R
|
b414e714d9fb59c026fda14bf1a6a78337d85135
|
[
"MIT"
] |
permissive
|
shantanu1402/R-Machine-Learning-Projects
|
dd644f1fff8f0ed3cd29f7c5fba55bfc78e693b2
|
2f5c1a6a2b6a2e75edfcea6d60feb1ca95d24c85
|
refs/heads/master
| 2023-02-17T11:52:03.583511
| 2021-01-15T05:44:26
| 2021-01-15T05:44:26
| 464,884,312
| 1
| 0
|
MIT
| 2022-03-01T12:29:47
| 2022-03-01T12:29:46
| null |
UTF-8
|
R
| false
| false
| 3,051
|
r
|
glove.R
|
# including the required library
library(text2vec)
# setting the working directory
setwd('/home/sunil/Desktop/sentiment_analysis/')
# reading the dataset
text = read.csv(file='Sentiment Analysis Dataset.csv', header = TRUE)
# subsetting only the review text so as to create Glove word embedding
wiki = as.character(text$SentimentText)
# Create iterator over tokens
tokens = space_tokenizer(wiki)
# Create vocabulary. Terms will be unigrams (simple words).
it = itoken(tokens, progressbar = FALSE)
vocab = create_vocabulary(it)
# consider a term in the vocabulary if and only if the term has appeared aleast three times in the dataset
vocab = prune_vocabulary(vocab, term_count_min = 3L)
# Use the filtered vocabulary
vectorizer = vocab_vectorizer(vocab)
# use window of 5 for context words and create a term co-occurance matrix
tcm = create_tcm(it, vectorizer, skip_grams_window = 5L)
# create the glove embedding for each each in the vocab and
# the dimension of the word embedding should set to 50
# maximum number of co-occurrences to use in the weighting function
glove = GlobalVectors$new(word_vectors_size = 50, vocabulary = vocab, x_max = 100)
wv_main = glove$fit_transform(tcm, n_iter = 10, convergence_tol = 0.01)
# Glove model learns two sets of word vectors - main and context.
# both matrices may be added to get the combined word vector
wv_context = glove$components
word_vectors = wv_main + t(wv_context)
# converting the word_vector to a dataframe for visualization
word_vectors=data.frame(word_vectors)
# the word for each embedding is set as row name by default
# using the tibble library rownames_to_column function, the rownames is copied as first column of the dataframe
# we also name the first column of the dataframe as words
library(tibble)
word_vectors=rownames_to_column(word_vectors, var = "words")
View(word_vectors)
# we make use of the softmaxreg library to obtain the mean word vector for each review
# this is similar to what we did in word2vec pre-train embedding in the previous section
# observe that we are passing our own trained word embedding "word_vectors" to the wordEmbed function
library(softmaxreg)
docVectors = function(x)
{
wordEmbed(x, word_vectors, meanVec = TRUE)
}
# applying the function docVectors function on the entire reviews dataset
# this will result in word embedding representation of the entire reviews dataset
temp=t(sapply(text$SentimentText, docVectors))
View(temp)
# splitting the dataset into train and test portions
temp_train=temp[1:800,]
temp_test=temp[801:1000,]
labels_train=as.factor(as.character(text[1:800,]$Sentiment))
labels_test=as.factor(as.character(text[801:1000,]$Sentiment))
# using randomforest to build a model on train data
library(randomForest)
rf_senti_classifier=randomForest(temp_train, labels_train,ntree=20)
print(rf_senti_classifier)
# predicting labels using the randomforest model created
rf_predicts<-predict(rf_senti_classifier, temp_test)
# estimating the accuracy from the predictions
library(rminer)
print(mmetric(rf_predicts, labels_test, c("ACC")))
|
7700a32b4bc9596b2dbf49b69a966b50808a1ae9
|
bf943775e1f3d22300fa315fc53a0f1895c72902
|
/makePlots.R
|
198a878e50336efdd42a02a5b5dabd8fdd24b6ba
|
[] |
no_license
|
Rajat-181/Tennis-Analytics-in-R
|
e2da720f7598075fb78090466c69fe92138276e4
|
c7763a7eb9275ffd8c735704cf4d5c036ffe1411
|
refs/heads/master
| 2020-04-01T07:36:57.044899
| 2018-10-16T18:39:42
| 2018-10-16T18:39:42
| 152,996,275
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,374
|
r
|
makePlots.R
|
length(unique(data$Player_ID.x))
playerdt <- function(playername,datestart1,datestart2){
playername <- "Federer, Roger"
datestart1 <- as.POSIXct("2016-01-08")
datestart2 <- as.POSIXct("2018-01-08")
subdata = data[(data$Scheduled > datestart1 & data$Scheduled < datestart2),]
player_matches <- subdata[subdata$Player_name.x == playername |subdata$Player_name.y == playername ,]
pid <- ratings[ratings$Player_Name == playername,'player_id'][1]
aces_1_df <- subdata[subdata$Player_name.x == playername,c('Aces.x','Scheduled')]
aces_2_df <- subdata[subdata$Player_name.y == playername,c('Aces.y','Scheduled')]
colnames(aces_1_df) <- c("Aces","Date")
colnames(aces_2_df) <- c("Aces","Date")
aces = rbind(aces_1_df,aces_2_df)
aces$Date <- as.Date(aces$Date, "%m/%d/%Y")
plot(Aces ~ Date, aces)
axis(1, aces$Date, cex.axis = .7)
bp_1 <- subdata[subdata$Player_name.x == playername,'Breakpoints_won.x']
bp_2 <- subdata[subdata$Player_name.y == playername,'Breakpoints_won.y']
total_bp = sum(c(bp_1,bp_2),na.rm = T)
df <- data.frame(playername,player_country,current_points,current_rank,dim(player_matches)[1],total_aces,total_bp)
colnames(df) <- c('Player Name','Player Country','Current Points','Current Ranking','Number of Matches','Total Aces','Total Breakpoints')
return(df)
}
|
e38759fdca8b55da5e2a1446515839f43a1f4693
|
5c861208fb29b256e6d47ec79c41d6fea7d47310
|
/man/set_zenodo_certificate.Rd
|
efbf3bdf5ae3d1cfd94056a5b495b0cc4a5b207a
|
[
"MIT"
] |
permissive
|
codecheckers/codecheck
|
9f2acf527f5aa3e75619f53aca59873e4ce1ba36
|
b0c6c07cc7d24313809de110c3a04925215ea027
|
refs/heads/master
| 2023-05-22T16:24:38.127552
| 2022-10-11T15:10:08
| 2022-10-11T15:10:08
| 256,862,293
| 6
| 1
|
MIT
| 2020-12-08T20:32:26
| 2020-04-18T22:06:52
|
R
|
UTF-8
|
R
| false
| true
| 734
|
rd
|
set_zenodo_certificate.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/codecheck.R
\name{set_zenodo_certificate}
\alias{set_zenodo_certificate}
\title{Upload the CODECHECK certificate to Zenodo.}
\usage{
set_zenodo_certificate(zen, record, certificate)
}
\arguments{
\item{zen}{- Object from zen4R to interact with Zenodo}
\item{record}{- string containing the report URL on Zenodo.}
\item{certificate}{name of the PDF file.}
}
\description{
Upload the CODECHECK certificate to Zenodo.
}
\details{
Upload the CODECHECK certificate to Zenodo as a draft. Warning: if
the file has already been uploaded once, you will need to delete it via
the web interface before being able to upload a new versin.
}
\author{
Stephen Eglen
}
|
1c8dbd7c94b9537b9c51c3e7a927cc92e26a039c
|
8d81ecafe5095bd5b180d5e1c9d871c66b6a8f76
|
/Rscripts/0112_Rscript_two.R
|
1f566052cf2a84085412d241c21b4709b4ef0a2f
|
[] |
no_license
|
jimrothstein/try_things_here
|
0a3447b5578db293685fb71c7368f1460d057106
|
d2da5ce3698dd21c2fe1a96c30f52cefe6f87de4
|
refs/heads/main
| 2023-08-28T08:55:43.314378
| 2023-08-25T23:47:23
| 2023-08-25T23:47:23
| 98,355,239
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 337
|
r
|
0112_Rscript_two.R
|
#!/usr/bin/env Rscript
# at commnad line
#
# =================================
# SELF-CONTAINED
# USAGE: ./095_R_script_execute.R
#
# OTHER CLI:
# - Run R # loads whole thing
# - Rscript -e "8*8" # returns answer only
#
# =================================
print("hello")
args <- commandArgs(trailingOnly = F)
print(args)
|
2749dd84dceb8b426f9e0c7330b384ee8f0d4c46
|
694286ae6914bc02acbaf1ac983f7ba77ad775f8
|
/R/make_contrasts.R
|
099aacabf570ce8b99fff03791debc4571c11c7e
|
[] |
no_license
|
bcjaeger/graft-loss
|
8c3629fd6a15bec96b3454f8255ff3f33eb6b702
|
058aae6ec33b8d7e041f60dbcc0ea77685bafa24
|
refs/heads/master
| 2023-04-24T10:17:41.838232
| 2021-05-12T18:08:44
| 2021-05-12T18:08:44
| 330,760,753
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 998
|
r
|
make_contrasts.R
|
##' .. content for \description{} (no empty lines) ..
##'
##' .. content for \details{} ..
##'
##' @title
##' @param linpred
make_contrasts <- function(linpred) {
rslt_contrast <- tibble(contrast = character(),
estimate = double(),
lower = double(),
upper = double(),
prob_gt_0 = double())
grid <- expand.grid(
m1 = names(linpred),
m2 = names(linpred),
stringsAsFactors = FALSE
) %>%
as_tibble()
for(i in seq(nrow(grid))){
m1 <- linpred[, grid$m1[i], drop = TRUE]
m2 <- linpred[, grid$m2[i], drop = TRUE]
rslt_contrast <- rslt_contrast %>%
add_row(
contrast = paste(grid$m1[i], grid$m2[i], sep = '_minus_'),
estimate = median(m1 - m2),
lower = quantile(m1 - m2, probs = 0.025),
upper = quantile(m1 - m2, probs = 0.975),
prob_gt_0 = mean(m1 - m2 > 0)
)
}
rslt_contrast
}
|
1f07f8fd200bbdc30642fe325e633aa6cffc51d5
|
e6237c044b69c29f1c01e2cb7bb435f58a30eeec
|
/R/formula_rdbe.R
|
d4a6d72e7663f32ae2541271394b6db8b1bfcdef
|
[] |
no_license
|
LiChenPU/Formula_manipulation
|
7dbc5c53110b6363991dc58df209f4dd8712bf90
|
79dc992d2de9038035574e9a04e794586b6c452d
|
refs/heads/master
| 2022-01-23T11:03:52.003455
| 2022-01-12T07:44:25
| 2022-01-12T07:44:25
| 177,156,070
| 2
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,403
|
r
|
formula_rdbe.R
|
#' formula_rdbe
#'
#' @param formula e.g. "C2H4O1"
#' @param elem_table a table records unsaturation
#'
#' @return the ring and double bond number
#' @export
#'
#' @examples formula_rdbe(formula = "C2H4O1", elem_table = lc8::elem_table)
formula_rdbe = function(formula = "C2H4O1", elem_table = lc8::elem_table){
rdbe = numeric()
for(i in 1:length(formula)){
temp_formula = formula[i]
# temp_formula <- gsub("D", "[2]H", temp_formula)
ende2 <- nchar(temp_formula)
element2 <- c()
number2 <- c()
j <- c(1)
while (j <= ende2) {
if (substr(temp_formula, j, j) == c("[")) {
b <- j
while (any(substr(temp_formula, j, j) == c("]")) !=
TRUE) {
j <- c(j + 1)
}
k <- j
while (any(substr(temp_formula, j, j) == c("-", ".", "0", "1",
"2", "3", "4", "5", "6", "7", "8", "9")) !=
TRUE) {
j <- c(j + 1)
}
m <- c(j - 1)
element2 <- c(element2, substr(temp_formula, b, m))
}
if (any(substr(temp_formula, j, j) == c("-", ".", "0", "1", "2", "3",
"4", "5", "6", "7", "8", "9")) != TRUE) {
k <- j
while (any(substr(temp_formula, j, j) == c("-", ".", "0", "1",
"2", "3", "4", "5", "6", "7", "8", "9")) !=
TRUE) {
j <- c(j + 1)
}
m <- c(j - 1)
j <- c(j - 1)
element2 <- c(element2, substr(temp_formula, k, m))
}
if (any(substr(temp_formula, j, j) == c("-", ".", "0", "1", "2", "3",
"4", "5", "6", "7", "8", "9")) == TRUE) {
k <- j
while (any(substr(temp_formula, j, j) == c("-", ".", "0", "1",
"2", "3", "4", "5", "6", "7", "8", "9")) ==
TRUE) {
j <- c(j + 1)
}
m <- c(j - 1)
j <- c(j - 1)
number2 <- c(number2, as.numeric(substr(temp_formula,
k, m)))
}
j <- j + 1
}
rdbe[i]=1
for (j in 1:length(element2)) {
rdbe[i] = rdbe[i] + elem_table$unsaturation[element2[j] ==
elem_table$element] * number2[j]
}
}
return(rdbe)
}
|
95e48929a49ad846f53b0d7cced3954f5296f6c3
|
7067be1932d71266ddaa9a4ecab50e2f26c99539
|
/Submission/Statistical_and_Spatial/Nairobi_Analysis.R
|
146399a1a8efb21cb1e0ad21eb4314427a00ab23
|
[] |
no_license
|
CoryWilliamsGIS/Dissertation
|
d7d793bdde884a2396b4deff8803ca3a9e6c5e5a
|
f182d2eedfd63654dc7ff7fe8b605f1278c0bfb6
|
refs/heads/master
| 2020-03-28T13:55:21.798959
| 2018-09-12T11:44:33
| 2018-09-12T11:44:33
| 148,441,720
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 125,284
|
r
|
Nairobi_Analysis.R
|
#Global Variations in OpenStreetMap
#Cory Williams
#Greater Manchester Ward Analysis
#Load relevant libraries
library(caret)
library(pls)
library(rgdal)
library(dplyr)
library(ggplot2)
library(ggmap)
library(RColorBrewer)
library(readxl)
library(corrplot)
library(Hmisc)
library(relaimpo)
library(readxl)
library(sp)
library(rgeos)
library(tmap)
library(spatstat)
library(maptools)
library(classInt)
library(dplyr)
library(spdep)
#NAIROBI
#Load data
Nairobi <-
read_excel("Case_study_common_data.xlsx", sheet = "Nairobi")
n_sum <-
read_excel("nairobi_ward_data_summary.xlsx", sheet = "nairobi_ward_data_summary")
#Distinct users per ward
#Read in excel file which you have exported each in
n_uid <-
read_excel("nairobi_ward_unique_uid.xlsx", sheet = "Sheet5")
#subset
Nairobi <-
Nairobi[, c(-1,-3,-4,-7,-12:-15, -26:-28, -30, -32, -36, -39)]
#Rename
names(n_sum)[8] <- "ft_density_km2"
ft_density <- n_sum$ft_density_km2
n_sum$population_density <- n_sum$population / n_sum$`Area (km2)`
pop_density <- n_sum$population_density
#Dependent variables
#Load data exported manually colated from PostgreSQL
Nairobi_ward_tags <-
read_excel("casestudy_ward_tags.xlsx", sheet = "Nairobi")
#Extract specific tags
#This is because the imported .csv file includes the wards in which each feature occured
#This extracts the two relevant columns (feature, wards they occurred in)
#This will allow them to be joined in a more appropriate manner later on
n_tag_begin <- Nairobi_ward_tags[, 1]
n_tag_school <- Nairobi_ward_tags[, 2:3]
n_tag_college <- Nairobi_ward_tags[, 4:5]
n_tag_pub <- Nairobi_ward_tags[, 6:7]
n_tag_bar <- Nairobi_ward_tags[, 8:9]
n_tag_pharmacy <- Nairobi_ward_tags[, 10:11]
n_tag_hospital <- Nairobi_ward_tags[, 12:13]
n_tag_dentist <- Nairobi_ward_tags[, 14:15]
n_tag_clinic <- Nairobi_ward_tags[, 16:17]
n_tag_police <- Nairobi_ward_tags[, 18:19]
n_tag_bank <- Nairobi_ward_tags[, 20:21]
n_tag_atm <- Nairobi_ward_tags[, 22:23]
n_tag_restaurant <- Nairobi_ward_tags[, 24:25]
n_tag_fast_food <- Nairobi_ward_tags[, 26:27]
n_tag_toilets <- Nairobi_ward_tags[, 28:29]
n_tag_drinking_water <- Nairobi_ward_tags[, 30:31]
n_tag_place_of_worship <- Nairobi_ward_tags[, 32:33]
n_tag_bus_stop <- Nairobi_ward_tags[, 34:35]
n_tag_street_lamp <- Nairobi_ward_tags[, 36:37]
n_tag_hotel <- Nairobi_ward_tags[, 38:39]
n_tag_industrial <- Nairobi_ward_tags[, 40:41]
n_tag_apartment <- Nairobi_ward_tags[, 42:43]
n_tag_house <- Nairobi_ward_tags[, 44:45]
n_tag_church <- Nairobi_ward_tags[, 46:47]
n_tag_mosque <- Nairobi_ward_tags[, 48:49]
n_tag_footway <- Nairobi_ward_tags[, 50:51]
n_tag_primary <- Nairobi_ward_tags[, 52:53]
n_tag_residential <- Nairobi_ward_tags[, 54:55]
n_tag_unclassified <- Nairobi_ward_tags[, 56:57]
n_tag_unique_users <- n_sum[, 1:6]
#join back together,
n_tag1 <- n_tag_begin
n_tag1 <-
left_join(n_tag1, n_tag_unique_users, by = c("ward" = "WARD"))
n_tag1 <- left_join(n_tag1, n_tag_school, by = c("ward" = "ward1"))
n_tag1 <- left_join(n_tag1, n_tag_college, by = c("ward" = "ward2"))
n_tag1 <- left_join(n_tag1, n_tag_pub, by = c("ward" = "ward3"))
n_tag1 <- left_join(n_tag1, n_tag_bar, by = c("ward" = "ward4"))
n_tag1 <-
left_join(n_tag1, n_tag_pharmacy, by = c("ward" = "ward5"))
n_tag1 <-
left_join(n_tag1, n_tag_hospital, by = c("ward" = "ward6"))
n_tag1 <- left_join(n_tag1, n_tag_dentist, by = c("ward" = "ward7"))
n_tag1 <- left_join(n_tag1, n_tag_clinic, by = c("ward" = "ward8"))
n_tag1 <- left_join(n_tag1, n_tag_police, by = c("ward" = "ward9"))
n_tag1 <- left_join(n_tag1, n_tag_bank, by = c("ward" = "ward10"))
n_tag1 <- left_join(n_tag1, n_tag_atm, by = c("ward" = "ward11"))
n_tag1 <-
left_join(n_tag1, n_tag_restaurant, by = c("ward" = "ward12"))
n_tag1 <-
left_join(n_tag1, n_tag_fast_food, by = c("ward" = "ward13"))
n_tag1 <-
left_join(n_tag1, n_tag_toilets, by = c("ward" = "ward14"))
n_tag1 <-
left_join(n_tag1, n_tag_drinking_water, by = c("ward" = "ward15"))
n_tag1 <-
left_join(n_tag1, n_tag_place_of_worship, by = c("ward" = "ward16"))
n_tag1 <-
left_join(n_tag1, n_tag_bus_stop, by = c("ward" = "ward17"))
n_tag1 <-
left_join(n_tag1, n_tag_street_lamp, by = c("ward" = "ward18"))
n_tag1 <- left_join(n_tag1, n_tag_hotel, by = c("ward" = "ward19"))
n_tag1 <-
left_join(n_tag1, n_tag_industrial, by = c("ward" = "ward20"))
n_tag1 <-
left_join(n_tag1, n_tag_apartment, by = c("ward" = "ward21"))
n_tag1 <- left_join(n_tag1, n_tag_house, by = c("ward" = "ward22"))
n_tag1 <- left_join(n_tag1, n_tag_church, by = c("ward" = "ward23"))
n_tag1 <- left_join(n_tag1, n_tag_mosque, by = c("ward" = "ward24"))
n_tag1 <-
left_join(n_tag1, n_tag_footway, by = c("ward" = "ward25"))
n_tag1 <-
left_join(n_tag1, n_tag_primary, by = c("ward" = "ward26"))
n_tag1 <-
left_join(n_tag1, n_tag_residential, by = c("ward" = "ward27"))
n_tag1 <-
left_join(n_tag1, n_tag_unclassified, by = c("ward" = "ward28"))
#Backup the original
n_tag2 <- n_tag1
#change na to 0
n_tag2[is.na(n_tag2)] <- 0
#Write dataframe to .csv the first time you do it
#write.csv(n_tag2, file = "Nairobi_wards_whatismapped.csv")
#Create variables for dependent and independent variables
n_depdf <- n_tag2
n_indepdf <- Nairobi
#Join together
nairobi_join <-
left_join(n_indepdf, n_depdf, by = c("name" = "ward"))
#Create the pp_hh variable
nairobi_join$pp_hh <-
as.numeric(nairobi_join$`total population`) / as.numeric(nairobi_join$`total households`)
#Reasign
nairobi_join3 <- nairobi_join
#Function to remove the effect of non-scaled variables (Willbrink, 2017)
rescale <- function(x)
(x - min(x)) / (max(x) - min(x)) * 100
#Rescale
rs_total_pop <-
rescale(as.numeric(nairobi_join3$`total population`))
nairobi_join3$`total population` <- rs_total_pop
nairobi_join3$ft_density <- ft_density
nairobi_join3$pop_density <- pop_density
#Change name to avoid issue when perforing regression
names(nairobi_join3)[20] <- "education level index"
#Backup
nairobi_bkup <- nairobi_join3
#Assign column names to a different variable
nairobi_names <- colnames(nairobi_join3)
nairobi_join3 <-
data.frame(sapply(nairobi_join3, function(x)
as.numeric(as.character(x))))
colnames(nairobi_join3) <- nairobi_names
nairobi_join3$name <- nairobi_bkup$name
#Perform linear regression for each OSM variable
n_lm_osmuid <-
lm(
nairobi_join3$`Distinct osm_users` ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density
)
n_lm_totaledits <-
lm(
nairobi_join3$`total edits` ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density
)
n_lm_point <-
lm(
nairobi_join3$Point_Count ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density
)
n_lm_line <-
lm(
nairobi_join3$Line_Count ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density
)
n_lm_polygon <-
lm(
nairobi_join3$Polygon_Count ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density
)
n_lm_ftdensity <-
lm(
nairobi_join3$ft_density ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density
)
n_lm_school <-
lm(
nairobi_join3$School ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density
)
n_lm_college <-
lm(
nairobi_join3$College ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density
)
n_lm_pub <-
lm(
nairobi_join3$Pub ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density
)
n_lm_bar <-
lm(
nairobi_join3$Bar ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density
)
n_lm_pharmacy <-
lm(
nairobi_join3$Pharmacy ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density
)
n_lm_hospital <-
lm(
nairobi_join3$Hospital ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density
)
n_lm_dentist <-
lm(
nairobi_join3$Dentist ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density
)
n_lm_clinic <-
lm(
nairobi_join3$Clinic ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density
)
n_lm_police <-
lm(
nairobi_join3$Police ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density
)
n_lm_bank <-
lm(
nairobi_join3$Bank ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density
)
n_lm_atm <- lm(
nairobi_join3$ATM ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density
)
n_lm_restaurant <-
lm(
nairobi_join3$Restaurant ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density
)
n_lm_fastfood <-
lm(
nairobi_join3$`Fast Food` ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density
)
n_lm_toilets <-
lm(
nairobi_join3$Toilets ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density
)
n_lm_drinkingwater <-
lm(
nairobi_join3$`Drinking Water` ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density
)
n_lm_placeofworship <-
lm(
nairobi_join3$`Place of Worship` ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density
)
n_lm_busstop <-
lm(
nairobi_join3$Bus_Stop ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density
)
n_lm_streetlamp <-
lm(
nairobi_join3$`Street Lamp` ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density
)
n_lm_hotel <-
lm(
nairobi_join3$Hotel ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density
)
n_lm_industrial <-
lm(
nairobi_join3$Industrial ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density
)
n_lm_apartments <-
lm(
nairobi_join3$Apartments ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density
)
n_lm_house <-
lm(
nairobi_join3$House ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density
)
n_lm_church <-
lm(
nairobi_join3$Church ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density
)
n_lm_mosque <-
lm(
nairobi_join3$Mosque ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` +
nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` +
nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` +
nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` +
nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density
)
n_lm_footway <-
lm(
nairobi_join3$Footway ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` +
nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` +
nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` +
nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` +
nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density
)
n_lm_primary <-
lm(
nairobi_join3$Primary ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` +
nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` +
nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` +
nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` +
nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density
)
n_lm_residential <-
lm(
nairobi_join3$Residential ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` +
nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` +
nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` +
nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` +
nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density
)
n_lm_unclassified <-
lm(
nairobi_join3$Unclassified ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` +
nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` +
nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` +
nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` +
nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density
)
#Output of the MLR models - Adjusted R2 and P Value
summary(n_lm_osmuid)
summary(n_lm_totaledits)
summary(n_lm_point)
summary(n_lm_line)
summary(n_lm_polygon)
summary(n_lm_ftdensity)
summary(n_lm_school)
summary(n_lm_college)
summary(n_lm_pub)
summary(n_lm_bar)
summary(n_lm_pharmacy)
summary(n_lm_hospital)
summary(n_lm_dentist)
summary(n_lm_clinic)
summary(n_lm_police)
summary(n_lm_bank)
summary(n_lm_atm)
summary(n_lm_restaurant)
summary(n_lm_fastfood)
summary(n_lm_toilets)
summary(n_lm_drinkingwater)
summary(n_lm_placeofworship)
summary(n_lm_busstop)
summary(n_lm_streetlamp)
summary(n_lm_hotel)
summary(n_lm_industrial)
summary(n_lm_apartments)
summary(n_lm_house)
summary(n_lm_church)
summary(n_lm_mosque)
summary(n_lm_footway)
summary(n_lm_primary)
summary(n_lm_residential)
summary(n_lm_unclassified)
#Calculate the AIC value for the regresssion
#This allows for comparison with GWR later
#But ONLY for the exact same datasetAIC(n_lm_ftdensity)
AIC(n_lm_streetlamp)
AIC(n_lm_footway)
AIC(n_lm_primary)
AIC(n_lm_residential)
AIC(n_lm_unclassified)
#Calculate the importance of residuals
#Reassinging the variable each time to avoid clogging the environment with data and having to remove it laterv.test <- calc.relimp(n_lm_osmuid, type = c("lmg"), rela = TRUE)
v.test <- calc.relimp(n_lm_point, type = c("lmg"), rela = TRUE)
v.test <- calc.relimp(n_lm_line, type = c("lmg"), rela = TRUE)
v.test <- calc.relimp(n_lm_ftdensity, type = c("lmg"), rela = TRUE)
v.test <- calc.relimp(n_lm_college, type = c("lmg"), rela = TRUE)
v.test <- calc.relimp(n_lm_bank, type = c("lmg"), rela = TRUE)
v.test <- calc.relimp(n_lm_restaurant, type = c("lmg"), rela = TRUE)
v.test <- calc.relimp(n_lm_fastfood, type = c("lmg"), rela = TRUE)
v.test <- calc.relimp(n_lm_busstop, type = c("lmg"), rela = TRUE)
v.test <- calc.relimp(n_lm_hotel, type = c("lmg"), rela = TRUE)
v.test <- calc.relimp(n_lm_footway, type = c("lmg"), rela = TRUE)
v.test <-
calc.relimp(n_lm_residential, type = c("lmg"), rela = TRUE)
v.test <- calc.relimp(n.lm.group.rec, type = c("lmg"), rela = TRUE)
v.test <-
calc.relimp(n.lm.group.infra, type = c("lmg"), rela = TRUE)
#Print the output as a percentage
(v.test@lmg) * 100
#Clean up
rm(
n_lm_atm,
n_lm_apartments,
n_lm_bank,
n_lm_bar,
n_lm_busstop,
n_lm_church,
n_lm_clinic,
n_lm_college,
n_lm_dentist,
n_lm_drinkingwater,
n_lm_fastfood,
n_lm_ftdensity,
n_lm_hospital,
n_lm_hotel,
n_lm_house,
n_lm_industrial,
n_lm_line,
n_lm_mosque,
n_lm_osmuid,
n_lm_pharmacy,
n_lm_placeofworship,
n_lm_point,
n_lm_police,
n_lm_polygon,
n_lm_pub,
n_lm_restaurant,
n_lm_school,
n_lm_streetlamp,
n_lm_toilets,
n_lm_totaledits,
n_lm_footway,
n_lm_primary,
n_lm_residential,
n_lm_unclassified
)
rm(
n_tag_apartment,
n_tag_atm,
n_tag_bank,
n_tag_bar,
n_tag_begin,
n_tag_bus_stop,
n_tag_church,
n_tag_clinic,
n_tag_college,
n_tag_dentist,
n_tag_drinking_water,
n_tag_fast_food,
n_tag_hospital,
n_tag_hotel,
n_tag_house,
n_tag_industrial,
n_tag_mosque,
n_tag_pharmacy,
n_tag_place_of_worship,
n_tag_police,
n_tag_pub,
n_tag_restaurant,
n_tag_school,
n_tag_street_lamp,
n_tag_toilets,
n_tag_unique_users,
n_tag_footway,
n_tag_primary,
n_tag_residential,
n_tag_unclassified
)
#spatial autocorrelation morans I
#Load Nairobi shapefile
nairobi_wardshp1 <- readOGR(dsn = ".", layer = "nairobi_wardsshp1")
#plot shapefile
plot(nairobi_wardshp1)
#List the column names which are going to be removed from the Nairobi Shapefile
drops <-
c(
"OBJECTID_2",
"OBJECTID_1",
"OBJECTID",
"OBJECTID_3",
"CONSTITUEN",
"COUNTY_COD",
"Shape_Leng",
"COUNTY_NAME",
"Shape_Le_1",
"Shape_Are",
"Shape_Len",
"Shape_Le_2",
"Shape_Area"
)
#Remove specific columns
nairobi_wardshp2 <-
nairobi_wardshp1[,!(names(nairobi_wardshp1) %in% drops)]
#Change column name to allow for a sucessful merge
colnames(nairobi_join3)[1] <- "NAME"
#Merhe with shapefile
n_jointrial <- merge(nairobi_wardshp2, nairobi_join3)
#Calculate the nearest neighbour
#Adapted from Nick Bearman's spatial analysis practical
n_neighbours <- poly2nb(n_jointrial)
#Create listw and perform moran test
n_listw <- nb2listw(n_neighbours)
#Global Moran I - Indepdendent variables
moran.test(n_jointrial$`total population`, n_listw)
moran.test(n_jointrial$`general sex ratio (females to males)` , n_listw)
moran.test(n_jointrial$`% of primary school attendance (6-13)` , n_listw)
moran.test(n_jointrial$`Secondary School Attendance of 14- to 17-Year-Olds`,n_listw)
moran.test(n_jointrial$`education level index`, n_listw)
moran.test(n_jointrial$`% households owning own livestock` , n_listw)
moran.test(n_jointrial$`% pop 18-64` , n_listw)
moran.test(n_jointrial$`% households with 1-3 people` , n_listw)
moran.test(n_jointrial$`% of female headed households` , n_listw)
moran.test(n_jointrial$`% of households owning house they live in` , n_listw)
moran.test(n_jointrial$`% Employment Rate`, n_listw)
moran.test(n_jointrial$`% access to safe water source` , n_listw)
moran.test(n_jointrial$`% access to improved sanitation` , n_listw)
moran.test(n_jointrial$pop_density , n_listw)
#Global Moran I - Dependent variabls
moran.test(n_jointrial$`Distinct osm_users`, n_listw)
moran.test(n_jointrial$`total edits`, n_listw)
moran.test(n_jointrial$Point_Count, n_listw)
moran.test(n_jointrial$Line_Count, n_listw)
moran.test(n_jointrial$Polygon_Count, n_listw)
moran.test(n_jointrial$ft_density, n_listw)
moran.test(n_jointrial$School, n_listw)
moran.test(n_jointrial$College, n_listw)
moran.test(n_jointrial$Pub, n_listw)
moran.test(n_jointrial$Bar, n_listw)
moran.test(n_jointrial$Pharmacy, n_listw)
moran.test(n_jointrial$Hospital, n_listw)
moran.test(n_jointrial$Dentist, n_listw)
moran.test(n_jointrial$Clinic, n_listw)
moran.test(n_jointrial$Police, n_listw)
moran.test(n_jointrial$Bank, n_listw)
moran.test(n_jointrial$ATM, n_listw)
moran.test(n_jointrial$Restaurant, n_listw)
moran.test(n_jointrial$`Fast Food`, n_listw)
moran.test(n_jointrial$Toilets, n_listw)
moran.test(n_jointrial$`Drinking Water`, n_listw)
moran.test(n_jointrial$`Place of Worship`, n_listw)
moran.test(n_jointrial$Bus_Stop, n_listw)
moran.test(n_jointrial$`Street Lamp`, n_listw)
moran.test(n_jointrial$Hotel, n_listw)
moran.test(n_jointrial$Industrial, n_listw)
moran.test(n_jointrial$Apartments, n_listw)
moran.test(n_jointrial$House, n_listw)
moran.test(n_jointrial$Church, n_listw)
moran.test(n_jointrial$Mosque, n_listw)
moran.test(n_jointrial$Footway, n_listw)
moran.test(n_jointrial$Primary, n_listw)
moran.test(n_jointrial$Residential, n_listw)
moran.test(n_jointrial$Unclassified, n_listw)
#Geographically Weighted Regression (GWR)
#Calculate bandwidth
gwrbandwidth.n_lm_osmuid <-
gwr.sel(
nairobi_join3$`Distinct osm_users` ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = T
)
gwrbandwidth.n_lm_totaledits <-
gwr.sel(
nairobi_join3$`total edits` ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = T
)
gwrbandwidth.n_lm_point <-
gwr.sel(
nairobi_join3$Point_Count ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = T
)
gwrbandwidth.n_lm_line <-
gwr.sel(
nairobi_join3$Line_Count ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = T
)
gwrbandwidth.n_lm_polygon <-
gwr.sel(
nairobi_join3$Polygon_Count ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = T
)
gwrbandwidth.n_lm_ftdensity <-
gwr.sel(
nairobi_join3$ft_density ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = T
)
gwrbandwidth.n_lm_school <-
gwr.sel(
nairobi_join3$School ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = T
)
gwrbandwidth.n_lm_college <-
gwr.sel(
nairobi_join3$College ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = T
)
gwrbandwidth.n_lm_pub <-
gwr.sel(
nairobi_join3$Pub ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = T
)
gwrbandwidth.n_lm_bar <-
gwr.sel(
nairobi_join3$Bar ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = T
)
gwrbandwidth.n_lm_pharmacy <-
gwr.sel(
nairobi_join3$Pharmacy ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = T
)
gwrbandwidth.n_lm_hospital <-
gwr.sel(
nairobi_join3$Hospital ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = T
)
gwrbandwidth.n_lm_dentist <-
gwr.sel(
nairobi_join3$Dentist ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = T
)
gwrbandwidth.n_lm_clinic <-
gwr.sel(
nairobi_join3$Clinic ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = T
)
gwrbandwidth.n_lm_police <-
gwr.sel(
nairobi_join3$Police ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` +
nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` +
nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = T
)
gwrbandwidth.n_lm_bank <-
gwr.sel(
nairobi_join3$Bank ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` +
nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = T
)
gwrbandwidth.n_lm_atm <-
gwr.sel(
nairobi_join3$ATM ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = T
)
gwrbandwidth.n_lm_restaurant <-
gwr.sel(
nairobi_join3$Restaurant ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = T
)
gwrbandwidth.n_lm_fastfood <-
gwr.sel(
nairobi_join3$`Fast Food` ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = T
)
gwrbandwidth.n_lm_toilets <-
gwr.sel(
nairobi_join3$Toilets ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = T
)
gwrbandwidth.n_lm_drinkingwater <-
gwr.sel(
nairobi_join3$`Drinking Water` ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = T
)
gwrbandwidth.n_lm_placeofworship <-
gwr.sel(
nairobi_join3$`Place of Worship` ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = T
)
gwrbandwidth.n_lm_busstop <-
gwr.sel(
nairobi_join3$Bus_Stop ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = T
)
gwrbandwidth.n_lm_streetlamp <-
gwr.sel(
nairobi_join3$`Street Lamp` ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = T
)
gwrbandwidth.n_lm_hotel <-
gwr.sel(
nairobi_join3$Hotel ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = T
)
gwrbandwidth.n_lm_industrial <-
gwr.sel(
nairobi_join3$Industrial ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = T
)
gwrbandwidth.n_lm_apartments <-
gwr.sel(
nairobi_join3$Apartments ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = T
)
gwrbandwidth.n_lm_house <-
gwr.sel(
nairobi_join3$House ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = T
)
gwrbandwidth.n_lm_church <-
gwr.sel(
nairobi_join3$Church ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = T
)
gwrbandwidth.n_lm_mosque <-
gwr.sel(
nairobi_join3$Mosque ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = T
)
gwrbandwidth.n_lm_footway <-
gwr.sel(
nairobi_join3$Footway ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = T
)
gwrbandwidth.n_lm_primary <-
gwr.sel(
nairobi_join3$Primary ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = T
)
gwrbandwidth.n_lm_residential <-
gwr.sel(
nairobi_join3$Residential ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = T
)
gwrbandwidth.n_lm_unclassified <-
gwr.sel(
nairobi_join3$Unclassified ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = T
)
#GWR Models
gwrmodel.n_lm_osmuid <-
gwr(
nairobi_join3$`Distinct osm_users` ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = gwrbandwidth.n_lm_osmuid,
hatmatrix = TRUE,
se.fit = TRUE
)
gwrmodel.n_lm_totaledits <-
gwr(
nairobi_join3$`total edits` ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = gwrbandwidth.n_lm_totaledits,
hatmatrix = TRUE,
se.fit = TRUE
)
gwrmodel.n_lm_point <-
gwr(
nairobi_join3$Point_Count ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = gwrbandwidth.n_lm_point,
hatmatrix = TRUE,
se.fit = TRUE
)
gwrmodel.n_lm_line <-
gwr(
nairobi_join3$Line_Count ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = gwrbandwidth.n_lm_line,
hatmatrix = TRUE,
se.fit = TRUE
)
gwrmodel.n_lm_polygon <-
gwr(
nairobi_join3$Polygon_Count ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = gwrbandwidth.n_lm_polygon,
hatmatrix = TRUE,
se.fit = TRUE
)
gwrmodel.n_lm_ftdensity <-
gwr(
nairobi_join3$ft_density ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = gwrbandwidth.n_lm_ftdensity,
hatmatrix = TRUE,
se.fit = TRUE
)
gwrmodel.n_lm_school <-
gwr(
nairobi_join3$School ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = gwrbandwidth.n_lm_school,
hatmatrix = TRUE,
se.fit = TRUE
)
gwrmodel.n_lm_college <-
gwr(
nairobi_join3$College ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = gwrbandwidth.n_lm_college,
hatmatrix = TRUE,
se.fit = TRUE
)
gwrmodel.n_lm_pub <-
gwr(
nairobi_join3$Pub ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = gwrbandwidth.n_lm_pub,
hatmatrix = TRUE,
se.fit = TRUE
)
gwrmodel.n_lm_bar <-
gwr(
nairobi_join3$Bar ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = gwrbandwidth.n_lm_bar,
hatmatrix = TRUE,
se.fit = TRUE
)
gwrmodel.n_lm_pharmacy <-
gwr(
nairobi_join3$Pharmacy ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = gwrbandwidth.n_lm_pharmacy,
hatmatrix = TRUE,
se.fit = TRUE
)
gwrmodel.n_lm_hospital <-
gwr(
nairobi_join3$Hospital ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = gwrbandwidth.n_lm_hospital,
hatmatrix = TRUE,
se.fit = TRUE
)
gwrmodel.n_lm_dentist <-
gwr(
nairobi_join3$Dentist ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = gwrbandwidth.n_lm_dentist,
hatmatrix = TRUE,
se.fit = TRUE
)
gwrmodel.n_lm_clinic <-
gwr(
nairobi_join3$Clinic ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = gwrbandwidth.n_lm_clinic,
hatmatrix = TRUE,
se.fit = TRUE
)
gwrmodel.n_lm_police <-
gwr(
nairobi_join3$Police ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = gwrbandwidth.n_lm_police,
hatmatrix = TRUE,
se.fit = TRUE
)
gwrmodel.n_lm_bank <-
gwr(
nairobi_join3$Bank ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = gwrbandwidth.n_lm_bank,
hatmatrix = TRUE,
se.fit = TRUE
)
gwrmodel.n_lm_atm <-
gwr(
nairobi_join3$ATM ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = gwrbandwidth.n_lm_atm,
hatmatrix = TRUE,
se.fit = TRUE
)
gwrmodel.n_lm_restaurant <-
gwr(
nairobi_join3$Restaurant ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = gwrbandwidth.n_lm_restaurant,
hatmatrix = TRUE,
se.fit = TRUE
)
gwrmodel.n_lm_fastfood <-
gwr(
nairobi_join3$`Fast Food` ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = gwrbandwidth.n_lm_fastfood,
hatmatrix = TRUE,
se.fit = TRUE
)
gwrmodel.n_lm_toilets <-
gwr(
nairobi_join3$Toilets ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = gwrbandwidth.n_lm_toilets,
hatmatrix = TRUE,
se.fit = TRUE
)
gwrmodel.n_lm_drinkingwater <-
gwr(
nairobi_join3$`Drinking Water` ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = gwrbandwidth.n_lm_drinkingwater,
hatmatrix = TRUE,
se.fit = TRUE
)
gwrmodel.n_lm_placeofworship <-
gwr(
nairobi_join3$`Place of Worship` ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = gwrbandwidth.n_lm_placeofworship,
hatmatrix = TRUE,
se.fit = TRUE
)
gwrmodel.n_lm_busstop <-
gwr(
nairobi_join3$Bus_Stop ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = gwrbandwidth.n_lm_busstop,
hatmatrix = TRUE,
se.fit = TRUE
)
gwrmodel.n_lm_streetlamp <-
gwr(
nairobi_join3$`Street Lamp` ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = gwrbandwidth.n_lm_streetlamp,
hatmatrix = TRUE,
se.fit = TRUE
)
gwrmodel.n_lm_hotel <-
gwr(
nairobi_join3$Hotel ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = gwrbandwidth.n_lm_hotel,
hatmatrix = TRUE,
se.fit = TRUE
)
gwrmodel.n_lm_industrial <-
gwr(
nairobi_join3$Industrial ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = gwrbandwidth.n_lm_industrial,
hatmatrix = TRUE,
se.fit = TRUE
)
gwrmodel.n_lm_apartments <-
gwr(
nairobi_join3$Apartments ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = gwrbandwidth.n_lm_apartments,
hatmatrix = TRUE,
se.fit = TRUE
)
gwrmodel.n_lm_house <-
gwr(
nairobi_join3$House ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = gwrbandwidth.n_lm_house,
hatmatrix = TRUE,
se.fit = TRUE
)
gwrmodel.n_lm_church <-
gwr(
nairobi_join3$Church ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = gwrbandwidth.n_lm_church,
hatmatrix = TRUE,
se.fit = TRUE
)
gwrmodel.n_lm_mosque <-
gwr(
nairobi_join3$Mosque ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = gwrbandwidth.n_lm_mosque,
hatmatrix = TRUE,
se.fit = TRUE
)
gwrmodel.n_lm_footway <-
gwr(
nairobi_join3$Footway ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = gwrbandwidth.n_lm_footway,
hatmatrix = TRUE,
se.fit = TRUE
)
gwrmodel.n_lm_primary <-
gwr(
nairobi_join3$Primary ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = gwrbandwidth.n_lm_primary,
hatmatrix = TRUE,
se.fit = TRUE
)
gwrmodel.n_lm_residential <-
gwr(
nairobi_join3$Residential ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = gwrbandwidth.n_lm_residential,
hatmatrix = TRUE,
se.fit = TRUE
)
gwrmodel.n_lm_unclassified <-
gwr(
nairobi_join3$Unclassified ~ nairobi_join3$`total population` +
nairobi_join3$`general sex ratio (females to males)` +
nairobi_join3$`% of primary school attendance (6-13)` +
nairobi_join3$`Secondary School Attendance of 14- to 17-Year-Olds` + nairobi_join3$`education level index` +
nairobi_join3$`% households owning own livestock` + nairobi_join3$`% pop 18-64` +
nairobi_join3$`% households with 1-3 people` +
nairobi_join3$`% of female headed households` +
nairobi_join3$`% of households owning house they live in` + nairobi_join3$`% Employment Rate` +
nairobi_join3$`% access to safe water source` + nairobi_join3$`% access to improved sanitation` +
nairobi_join3$pop_density,
data = n_jointrial,
adapt = gwrbandwidth.n_lm_unclassified,
hatmatrix = TRUE,
se.fit = TRUE
)
#Assign the results of the models
results.gwrmodel.n_lm_osmuid <-
as.data.frame(gwrmodel.n_lm_osmuid$SDF)
results.gwrmodel.n_lm_totaledits <-
as.data.frame(gwrmodel.n_lm_totaledits$SDF)
results.gwrmodel.n_lm_point <- as.data.frame(gwrmodel.n_lm_point$SDF)
results.gwrmodel.n_lm_line <- as.data.frame(gwrmodel.n_lm_line$SDF)
results.gwrmodel.n_lm_polygon <-
as.data.frame(gwrmodel.n_lm_polygon$SDF)
results.gwrmodel.n_lm_ftdensity <-
as.data.frame(gwrmodel.n_lm_ftdensity$SDF)
results.gwrmodel.n_lm_school <-
as.data.frame(gwrmodel.n_lm_school$SDF)
results.gwrmodel.n_lm_college <-
as.data.frame(gwrmodel.n_lm_college$SDF)
results.gwrmodel.n_lm_pub <- as.data.frame(gwrmodel.n_lm_pub$SDF)
results.gwrmodel.n_lm_bar <- as.data.frame(gwrmodel.n_lm_bar$SDF)
results.gwrmodel.n_lm_pharmacy <-
as.data.frame(gwrmodel.n_lm_pharmacy$SDF)
results.gwrmodel.n_lm_hospital <-
as.data.frame(gwrmodel.n_lm_hospital$SDF)
results.gwrmodel.n_lm_dentist <-
as.data.frame(gwrmodel.n_lm_dentist$SDF)
results.gwrmodel.n_lm_clinic <-
as.data.frame(gwrmodel.n_lm_clinic$SDF)
results.gwrmodel.n_lm_police <-
as.data.frame(gwrmodel.n_lm_police$SDF)
results.gwrmodel.n_lm_bank <- as.data.frame(gwrmodel.n_lm_bank$SDF)
results.gwrmodel.n_lm_atm <- as.data.frame(gwrmodel.n_lm_atm$SDF)
results.gwrmodel.n_lm_restaurant <-
as.data.frame(gwrmodel.n_lm_restaurant$SDF)
results.gwrmodel.n_lm_fastfood <-
as.data.frame(gwrmodel.n_lm_fastfood$SDF)
results.gwrmodel.n_lm_placeofworship <-
as.data.frame(gwrmodel.n_lm_placeofworship$SDF)
results.gwrmodel.n_lm_busstop <-
as.data.frame(gwrmodel.n_lm_busstop$SDF)
results.gwrmodel.n_lm_streetlamp <-
as.data.frame(gwrmodel.n_lm_streetlamp$SDF)
results.gwrmodel.n_lm_hotel <-
as.data.frame(gwrmodel.n_lm_hotel$SDF)
results.gwrmodel.n_lm_industrial <-
as.data.frame(gwrmodel.n_lm_industrial$SDF)
results.gwrmodel.n_lm_apartments <-
as.data.frame(gwrmodel.n_lm_apartments$SDF)
results.gwrmodel.n_lm_house <-
as.data.frame(gwrmodel.n_lm_house$SDF)
results.gwrmodel.n_lm_church <-
as.data.frame(gwrmodel.n_lm_church$SDF)
results.gwrmodel.n_lm_mosque <-
as.data.frame(gwrmodel.n_lm_mosque$SDF)
results.gwrmodel.n_lm_footway <-
as.data.frame(gwrmodel.n_lm_footway$SDF)
results.gwrmodel.n_lm_primary <-
as.data.frame(gwrmodel.n_lm_primary$SDF)
results.gwrmodel.n_lm_residential <-
as.data.frame(gwrmodel.n_lm_residential$SDF)
results.gwrmodel.n_lm_unclassified <-
as.data.frame(gwrmodel.n_lm_unclassified$SDF)
#Bind the GWR result to the SPDF
gwr.map.n_lm_osmuid <-
cbind(n_jointrial, as.matrix(results.gwrmodel.n_lm_osmuid))####
gwr.map.n_lm_totaledits <-
cbind(n_jointrial,
as.matrix(results.gwrmodel.n_lm_totaledits))
gwr.map.n_lm_point <-
cbind(n_jointrial, as.matrix(results.gwrmodel.n_lm_point))
gwr.map.n_lm_line <-
cbind(n_jointrial, as.matrix(results.gwrmodel.n_lm_line))
gwr.map.n_lm_polygon <-
cbind(n_jointrial,
as.matrix(results.gwrmodel.n_lm_polygon))
gwr.map.n_lm_ftdensity <-
cbind(n_jointrial,
as.matrix(results.gwrmodel.n_lm_ftdensity))
gwr.map.n_lm_school <-
cbind(n_jointrial, as.matrix(results.gwrmodel.n_lm_school))
gwr.map.n_lm_college <-
cbind(n_jointrial,
as.matrix(results.gwrmodel.n_lm_college))
gwr.map.n_lm_pub <-
cbind(n_jointrial, as.matrix(results.gwrmodel.n_lm_pub))
gwr.map.n_lm_bar <-
cbind(n_jointrial, as.matrix(results.gwrmodel.n_lm_bar))
gwr.map.n_lm_pharmacy <-
cbind(n_jointrial,
as.matrix(results.gwrmodel.n_lm_pharmacy))
gwr.map.n_lm_hospital <-
cbind(n_jointrial,
as.matrix(results.gwrmodel.n_lm_hospital))
gwr.map.n_lm_dentist <-
cbind(n_jointrial,
as.matrix(results.gwrmodel.n_lm_dentist))
gwr.map.n_lm_clinic <-
cbind(n_jointrial, as.matrix(results.gwrmodel.n_lm_clinic))
gwr.map.n_lm_police <-
cbind(n_jointrial, as.matrix(results.gwrmodel.n_lm_police))
gwr.map.n_lm_bank <-
cbind(n_jointrial, as.matrix(results.gwrmodel.n_lm_bank))
gwr.map.n_lm_atm <-
cbind(n_jointrial, as.matrix(results.gwrmodel.n_lm_atm))
gwr.map.n_lm_restaurant <-
cbind(n_jointrial,
as.matrix(results.gwrmodel.n_lm_restaurant))
gwr.map.n_lm_fastfood <-
cbind(n_jointrial,
as.matrix(results.gwrmodel.n_lm_fastfood))
gwr.map.n_lm_placeofworship <-
cbind(n_jointrial,
as.matrix(results.gwrmodel.n_lm_placeofworship))
gwr.map.n_lm_busstop <-
cbind(n_jointrial,
as.matrix(results.gwrmodel.n_lm_busstop))
gwr.map.n_lm_streetlmap <-
cbind(n_jointrial,
as.matrix(results.gwrmodel.n_lm_streetlamp))
gwr.map.n_lm_hotel <-
cbind(n_jointrial, as.matrix(results.gwrmodel.n_lm_hotel))
gwr.map.n_lm_industrial <-
cbind(n_jointrial,
as.matrix(results.gwrmodel.n_lm_industrial))
gwr.map.n_lm_apartments <-
cbind(n_jointrial,
as.matrix(results.gwrmodel.n_lm_apartments))
gwr.map.n_lm_house <-
cbind(n_jointrial, as.matrix(results.gwrmodel.n_lm_house))
gwr.map.n_lm_church <-
cbind(n_jointrial, as.matrix(results.gwrmodel.n_lm_church))
gwr.map.n_lm_mosque <-
cbind(n_jointrial, as.matrix(results.gwrmodel.n_lm_mosque))
gwr.map.n_lm_footway <-
cbind(n_jointrial, as.matrix(results.gwrmodel.n_lm_footway))
gwr.map.n_lm_primary <-
cbind(n_jointrial, as.matrix(results.gwrmodel.n_lm_primary))
gwr.map.n_lm_residential <-
cbind(n_jointrial, as.matrix(results.gwrmodel.n_lm_residential))
gwr.map.n_lm_unclassified <-
cbind(n_jointrial, as.matrix(results.gwrmodel.n_lm_unclassified))
#Map the local R2 results for visual investigation
#Only global R2 will be used in the report
qtm(gwr.map.n_lm_osmuid, fill = "localR2")
qtm(gwr.map.n_lm_totaledits, fill = "localR2")
qtm(gwr.map.n_lm_point, fill = "localR2")
qtm(gwr.map.n_lm_line, fill = "localR2")
qtm(gwr.map.n_lm_polygon, fill = "localR2")
qtm(gwr.map.n_lm_ftdensity, fill = "localR2")
qtm(gwr.map.n_lm_school, fill = "localR2")
qtm(gwr.map.n_lm_college, fill = "localR2")
qtm(gwr.map.n_lm_pub, fill = "localR2")
qtm(gwr.map.n_lm_bar, fill = "localR2")
qtm(gwr.map.n_lm_pharmacy, fill = "localR2")
qtm(gwr.map.n_lm_hospital, fill = "localR2")
qtm(gwr.map.n_lm_dentist, fill = "localR2")
qtm(gwr.map.n_lm_clinic, fill = "localR2")
qtm(gwr.map.n_lm_police, fill = "localR2")
qtm(gwr.map.n_lm_bank, fill = "localR2")#gooooooood
qtm(gwr.map.n_lm_atm, fill = "localR2")
qtm(gwr.map.n_lm_restaurant, fill = "localR2") #gooood
qtm(gwr.map.n_lm_fastfood, fill = "localR2") #goood
qtm(gwr.map.n_lm_placeofworship, fill = "localR2")
qtm(gwr.map.n_lm_busstop, fill = "localR2")
qtm(gwr.map.n_lm_streetlmap, fill = "localR2")
qtm(gwr.map.n_lm_hotel, fill = "localR2") #gooood
qtm(gwr.map.n_lm_apartments, fill = "localR2")
qtm(gwr.map.n_lm_house, fill = "localR2")
qtm(gwr.map.n_lm_mosque, fill = "localR2")
qtm(gwr.map.n_lm_footway, fill = "localR2")
qtm(gwr.map.n_lm_primary, fill = "localR2")
qtm(gwr.map.n_lm_residential, fill = "localR2")
qtm(gwr.map.n_lm_unclassified, fill = "localR2")
#Write shapefile to be imported into ArcGIS
writeOGR(n_jointrial, ".", "_jointrial", driver = "ESRI Shapefile")
gm_freq <-
read_excel("ward_amenity_frequency.xlsx", sheet = "nairobi")
gm_freqjoin <-
left_join(nairobi_join3, gm_freq, by = c("NAME" = "ward"))
View(gm_freqjoin)
gm_freqjoin[is.na(gm_freqjoin)] <- 0
n_jointrial4 <- merge(nairobi_wardshp2, gm_freqjoin)
writeOGR(n_jointrial4, ".", "n_jointrial4", driver = "ESRI Shapefile")
write.csv(manchester_join3, file = "Manc_Dep_Appendix.csv")
#Print the GWR results to the console
gwrmodel.n_lm_osmuid
gwrmodel.n_lm_totaledits
gwrmodel.n_lm_point
gwrmodel.n_lm_line
gwrmodel.n_lm_polygon
gwrmodel.n_lm_ftdensity
gwrmodel.n_lm_school
gwrmodel.n_lm_college
gwrmodel.n_lm_pub
gwrmodel.n_lm_bar
gwrmodel.n_lm_pharmacy
gwrmodel.n_lm_hospital
gwrmodel.n_lm_dentist
gwrmodel.n_lm_clinic
gwrmodel.n_lm_police
gwrmodel.n_lm_bank
gwrmodel.n_lm_atm
gwrmodel.n_lm_restaurant
gwrmodel.n_lm_fastfood
gwrmodel.n_lm_toilets
gwrmodel.n_lm_drinkingwater
gwrmodel.n_lm_placeofworship
gwrmodel.n_lm_busstop
gwrmodel.n_lm_streetlamp
gwrmodel.n_lm_hotel
gwrmodel.n_lm_industrial
gwrmodel.n_lm_apartments
gwrmodel.n_lm_house
gwrmodel.n_lm_church
gwrmodel.n_lm_mosque
gwrmodel.n_lm_footway
gwrmodel.n_lm_primary
gwrmodel.n_lm_residential
gwrmodel.n_lm_unclassified
#Grouped Variables
#Add in additional grouped variables and repeat the above method
#Grouped dependent variables
n.group <- read_excel("Nairobi_wards_whatismapped.xlsx", sheet = "Sheet1")
#Independent
Nairobi <- read_excel("Case_study_common_data.xlsx", sheet = "Nairobi")
#Join dataframes
n.join <- left_join(n_indepdf, n.group, by=c("name"="ward"))
n.name <- colnames(n.join)
n.join2 <- data.frame(sapply(n.join, function(x) as.numeric(as.character(x))))
colnames(n.join2) <- n.name
n.join2[,1:2] <- n.join[,1:2]
n.join2$pop_density <- n_sum$population_density
names(n.join2)[20] <- "education level index"
#Linear regression for grouped variables
n.lm.group.rec <- lm(n.join2$rec ~
n.join2$`general sex ratio (females to males)`+
n.join2$`% of primary school attendance (6-13)`+
n.join2$`Secondary School Attendance of 14- to 17-Year-Olds`+
n.join2$`education level index`+
n.join2$`% households owning own livestock`+
n.join2$`% pop 18-64`+
n.join2$`% households with 1-3 people`+
n.join2$`% of female headed households`+
n.join2$`% of households owning house they live in`+
n.join2$`% Employment Rate`+
n.join2$`% access to safe water source`+
n.join2$`% access to improved sanitation`+
n.join2$pop_density)
n.lm.group.edu <- lm(n.join2$education ~
n.join2$`general sex ratio (females to males)`+
n.join2$`% of primary school attendance (6-13)`+
n.join2$`Secondary School Attendance of 14- to 17-Year-Olds`+
n.join2$`education level index`+
n.join2$`% households owning own livestock`+
n.join2$`% pop 18-64`+
n.join2$`% households with 1-3 people`+
n.join2$`% of female headed households`+
n.join2$`% of households owning house they live in`+
n.join2$`% Employment Rate`+
n.join2$`% access to safe water source`+
n.join2$`% access to improved sanitation`+
n.join2$pop_density
)
n.lm.group.rel <- lm(n.join2$rel ~
n.join2$`general sex ratio (females to males)`+
n.join2$`% of primary school attendance (6-13)`+
n.join2$`Secondary School Attendance of 14- to 17-Year-Olds`+
n.join2$`education level index`+
n.join2$`% households owning own livestock`+
n.join2$`% pop 18-64`+
n.join2$`% households with 1-3 people`+
n.join2$`% of female headed households`+
n.join2$`% of households owning house they live in`+
n.join2$`% Employment Rate`+
n.join2$`% access to safe water source`+
n.join2$`% access to improved sanitation`+
n.join2$pop_density
)
n.lm.group.prop <- lm(n.join2$prop ~
n.join2$`general sex ratio (females to males)`+
n.join2$`% of primary school attendance (6-13)`+
n.join2$`Secondary School Attendance of 14- to 17-Year-Olds`+
n.join2$`education level index`+
n.join2$`% households owning own livestock`+
n.join2$`% pop 18-64`+
n.join2$`% households with 1-3 people`+
n.join2$`% of female headed households`+
n.join2$`% of households owning house they live in`+
n.join2$`% Employment Rate`+
n.join2$`% access to safe water source`+
n.join2$`% access to improved sanitation`+
n.join2$pop_density
)
n.lm.group.infra <- lm(n.join2$infra ~
n.join2$`general sex ratio (females to males)`+
n.join2$`% of primary school attendance (6-13)`+
n.join2$`Secondary School Attendance of 14- to 17-Year-Olds`+
n.join2$`education level index`+
n.join2$`% households owning own livestock`+
n.join2$`% pop 18-64`+
n.join2$`% households with 1-3 people`+
n.join2$`% of female headed households`+
n.join2$`% of households owning house they live in`+
n.join2$`% Employment Rate`+
n.join2$`% access to safe water source`+
n.join2$`% access to improved sanitation`+
n.join2$pop_density
)
n.lm.group.ps <- lm(n.join2$ps ~
n.join2$`general sex ratio (females to males)`+
n.join2$`% of primary school attendance (6-13)`+
n.join2$`Secondary School Attendance of 14- to 17-Year-Olds`+
n.join2$`education level index`+
n.join2$`% households owning own livestock`+
n.join2$`% pop 18-64`+
n.join2$`% households with 1-3 people`+
n.join2$`% of female headed households`+
n.join2$`% of households owning house they live in`+
n.join2$`% Employment Rate`+
n.join2$`% access to safe water source`+
n.join2$`% access to improved sanitation`+
n.join2$pop_density
)
#Output from grouped variables
summary(n.lm.group.rec)
summary(n.lm.group.edu)
summary(n.lm.group.rel)
summary(n.lm.group.prop)
summary(n.lm.group.infra)
summary(n.lm.group.ps)
#Reload shapefile
nairobi_wardshp1 <- readOGR(dsn=".", layer = "nairobi_wardsshp1")
#Global Spatial Autocorrelation from grouped variables
moran.test(n.join2$rec, n_listw)
moran.test(n.join2$education, n_listw)
moran.test(n.join2$rel, n_listw)
moran.test(n.join2$prop, n_listw)
moran.test(n.join2$infra, n_listw)
moran.test(n.join2$ps, n_listw)
#Recalculate listw
n_listw <- nb2listw(n_neighbours)
#Change column name to facilliate the join
colnames(n.join2)[1] <- "NAME"
#Join dataframe to shapefile
n.group.join <- merge(nairobi_wardshp1, n.join2)
#Write shapeifle for use in ArcGIS
writeOGR(n.group.join, ".", "n_group", driver="ESRI Shapefile")
#Calculatye Bandwidth
n.rec <- gwr.sel(n.join2$rec ~
n.join2$`general sex ratio (females to males)`+
n.join2$`% of primary school attendance (6-13)`+
n.join2$`Secondary School Attendance of 14- to 17-Year-Olds`+
n.join2$`education level index`+
n.join2$`% households owning own livestock`+
n.join2$`% pop 18-64`+
n.join2$`% households with 1-3 people`+
n.join2$`% of female headed households`+
n.join2$`% of households owning house they live in`+
n.join2$`% Employment Rate`+
n.join2$`% access to safe water source`+
n.join2$`% access to improved sanitation`,
data=n.group.join, adapt = T)
n.edu <- gwr.sel(n.join2$education ~
n.join2$`general sex ratio (females to males)`+
n.join2$`% of primary school attendance (6-13)`+
n.join2$`Secondary School Attendance of 14- to 17-Year-Olds`+
n.join2$`education level index`+
n.join2$`% households owning own livestock`+
n.join2$`% pop 18-64`+
n.join2$`% households with 1-3 people`+
n.join2$`% of female headed households`+
n.join2$`% of households owning house they live in`+
n.join2$`% Employment Rate`+
n.join2$`% access to safe water source`+
n.join2$`% access to improved sanitation`,
data=n.group.join, adapt = T
)
n.rel <- gwr.sel(n.join2$rel ~
n.join2$`general sex ratio (females to males)`+
n.join2$`% of primary school attendance (6-13)`+
n.join2$`Secondary School Attendance of 14- to 17-Year-Olds`+
n.join2$`education level index`+
n.join2$`% households owning own livestock`+
n.join2$`% pop 18-64`+
n.join2$`% households with 1-3 people`+
n.join2$`% of female headed households`+
n.join2$`% of households owning house they live in`+
n.join2$`% Employment Rate`+
n.join2$`% access to safe water source`+
n.join2$`% access to improved sanitation`,
data=n.group.join, adapt = T
)
n.prop <- gwr.sel(n.join2$prop ~
n.join2$`general sex ratio (females to males)`+
n.join2$`% of primary school attendance (6-13)`+
n.join2$`Secondary School Attendance of 14- to 17-Year-Olds`+
n.join2$`education level index`+
n.join2$`% households owning own livestock`+
n.join2$`% pop 18-64`+
n.join2$`% households with 1-3 people`+
n.join2$`% of female headed households`+
n.join2$`% of households owning house they live in`+
n.join2$`% Employment Rate`+
n.join2$`% access to safe water source`+
n.join2$`% access to improved sanitation`,
data=n.group.join, adapt = T
)
n.infra <- gwr.sel(n.join2$infra ~
n.join2$`general sex ratio (females to males)`+
n.join2$`% of primary school attendance (6-13)`+
n.join2$`Secondary School Attendance of 14- to 17-Year-Olds`+
n.join2$`education level index`+
n.join2$`% households owning own livestock`+
n.join2$`% pop 18-64`+
n.join2$`% households with 1-3 people`+
n.join2$`% of female headed households`+
n.join2$`% of households owning house they live in`+
n.join2$`% Employment Rate`+
n.join2$`% access to safe water source`+
n.join2$`% access to improved sanitation`,
data=n.group.join, adapt = T
)
n.ps <- gwr.sel(n.join2$ps ~
n.join2$`general sex ratio (females to males)`+
n.join2$`% of primary school attendance (6-13)`+
n.join2$`Secondary School Attendance of 14- to 17-Year-Olds`+
n.join2$`education level index`+
n.join2$`% households owning own livestock`+
n.join2$`% pop 18-64`+
n.join2$`% households with 1-3 people`+
n.join2$`% of female headed households`+
n.join2$`% of households owning house they live in`+
n.join2$`% Employment Rate`+
n.join2$`% access to safe water source`+
n.join2$`% access to improved sanitation`,
data=n.group.join, adapt = T
)
#GWR models
n.rec.model <- gwr(n.join2$rec ~
n.join2$`general sex ratio (females to males)`+
n.join2$`% of primary school attendance (6-13)`+
n.join2$`Secondary School Attendance of 14- to 17-Year-Olds`+
n.join2$`education level index`+
n.join2$`% households owning own livestock`+
n.join2$`% pop 18-64`+
n.join2$`% households with 1-3 people`+
n.join2$`% of female headed households`+
n.join2$`% of households owning house they live in`+
n.join2$`% Employment Rate`+
n.join2$`% access to safe water source`+
n.join2$`% access to improved sanitation`,
data=n.group.join, adapt = n.rec, hatmatrix=TRUE, se.fit=TRUE)
n.edu.model <- gwr(n.join2$education ~
n.join2$`general sex ratio (females to males)`+
n.join2$`% of primary school attendance (6-13)`+
n.join2$`Secondary School Attendance of 14- to 17-Year-Olds`+
n.join2$`education level index`+
n.join2$`% households owning own livestock`+
n.join2$`% pop 18-64`+
n.join2$`% households with 1-3 people`+
n.join2$`% of female headed households`+
n.join2$`% of households owning house they live in`+
n.join2$`% Employment Rate`+
n.join2$`% access to safe water source`+
n.join2$`% access to improved sanitation`,
data=n.group.join, adapt = n.edu, hatmatrix=TRUE, se.fit=TRUE)
n.rel.model <- gwr(n.join2$rel ~
n.join2$`general sex ratio (females to males)`+
n.join2$`% of primary school attendance (6-13)`+
n.join2$`Secondary School Attendance of 14- to 17-Year-Olds`+
n.join2$`education level index`+
n.join2$`% households owning own livestock`+
n.join2$`% pop 18-64`+
n.join2$`% households with 1-3 people`+
n.join2$`% of female headed households`+
n.join2$`% of households owning house they live in`+
n.join2$`% Employment Rate`+
n.join2$`% access to safe water source`+
n.join2$`% access to improved sanitation`,
data=n.group.join, adapt = n.rel, hatmatrix=TRUE, se.fit=TRUE)
n.prop.model <- gwr(n.join2$prop ~
n.join2$`general sex ratio (females to males)`+
n.join2$`% of primary school attendance (6-13)`+
n.join2$`Secondary School Attendance of 14- to 17-Year-Olds`+
n.join2$`education level index`+
n.join2$`% households owning own livestock`+
n.join2$`% pop 18-64`+
n.join2$`% households with 1-3 people`+
n.join2$`% of female headed households`+
n.join2$`% of households owning house they live in`+
n.join2$`% Employment Rate`+
n.join2$`% access to safe water source`+
n.join2$`% access to improved sanitation`,
data=n.group.join, adapt = n.prop, hatmatrix=TRUE, se.fit=TRUE)
n.infra.model <- gwr(n.join2$infra ~
n.join2$`general sex ratio (females to males)`+
n.join2$`% of primary school attendance (6-13)`+
n.join2$`Secondary School Attendance of 14- to 17-Year-Olds`+
n.join2$`education level index`+
n.join2$`% households owning own livestock`+
n.join2$`% pop 18-64`+
n.join2$`% households with 1-3 people`+
n.join2$`% of female headed households`+
n.join2$`% of households owning house they live in`+
n.join2$`% Employment Rate`+
n.join2$`% access to safe water source`+
n.join2$`% access to improved sanitation`,
data=n.group.join, adapt = n.infra,hatmatrix=TRUE, se.fit=TRUE)
n.ps.model <- gwr(n.join2$ps ~
n.join2$`general sex ratio (females to males)`+
n.join2$`% of primary school attendance (6-13)`+
n.join2$`Secondary School Attendance of 14- to 17-Year-Olds`+
n.join2$`education level index`+
n.join2$`% households owning own livestock`+
n.join2$`% pop 18-64`+
n.join2$`% households with 1-3 people`+
n.join2$`% of female headed households`+
n.join2$`% of households owning house they live in`+
n.join2$`% Employment Rate`+
n.join2$`% access to safe water source`+
n.join2$`% access to improved sanitation`,
data=n.group.join, adapt = n.ps,hatmatrix=TRUE, se.fit=TRUE)
#Print output of GWR to console
n.ps.model
n.edu.model
n.rec.model
n.rel.model
n.prop.model
n.infra.model
#Calculate AIC values for comparison with MLR
AIC(n.lm.group.ps)
AIC(n.lm.group.edu)
AIC(n.lm.group.rec)
AIC(n.lm.group.rel)
AIC(n.lm.group.prop)
AIC(n.lm.group.infra)
|
294459558604bbd341fd57d550327a6a98d26d79
|
2baa8f641eb762c36fb50db7a812998531b96687
|
/run_analysis.R
|
1c42a8d7987510a7f57dfa3d51193f7577af82bf
|
[] |
no_license
|
HariharanJayashankar/Getting-and-Cleaning-Data---Programming-Assignment
|
5da3588cbd95bebd47f69e9911488376183fbc25
|
4dc999be96774925fc2ef7594a81723ae43f4ac5
|
refs/heads/master
| 2021-08-31T18:22:05.122430
| 2017-12-22T10:26:03
| 2017-12-22T10:26:03
| 115,102,820
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,793
|
r
|
run_analysis.R
|
library(dplyr)
#downloading and unzipping the file
if(!file.exists("Data.zip")){
download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip", destfile = "Data.zip")
}
if(!file.exists("UCI HAR Dataset")){
unzip("Data.zip")
}
#Importing data
labels <- read.table("UCI HAR Dataset/activity_labels.txt")
features <- read.table("UCI HAR Dataset/features.txt")
features[,2] <- as.character(features[,2]) #this is so that grep works
features_sub <- grep(".*mean.*|.*std.*", features[,2])
features_req <- features[features_sub, 2]
#Importing the training and testing datasets
train_x <- read.table("UCI HAR Dataset/train/X_train.txt")
train_x <- train_x[features_sub]
train_y <- read.table("UCI HAR Dataset/train/y_train.txt")
train_subj <- read.table("UCI HAR Dataset/train/subject_train.txt")
train <- cbind( train_y, train_subj, train_x)
test_x <- read.table("UCI HAR Dataset/test/X_test.txt")
test_x <- test_x[features_sub]
test_y <- read.table("UCI HAR Dataset/test/y_test.txt")
test_subj <- read.table("UCI HAR Dataset/test/subject_test.txt")
test <- cbind(test_y, test_subj, test_x)
fulldata <- rbind(train, test)
#Cleaning up features_req to add to the column names
features_req <- gsub("-mean", "Mean", features_req)
features_req <- gsub("-std", "Std", features_req)
features_req <- gsub("[-()]", "", features_req)
#Names look half decent now
colnames(fulldata) <- c("activity", "subject", features_req)
#Labelling Activity
fulldata$activity <- factor(fulldata$activity, levels = labels[,1], labels = labels[,2])
fulldata_means <- summarise_each(group_by(fulldata, activity, subject), funs(mean))
#saving the data
write.table(fulldata_means, "tidy_averages.txt", row.names = FALSE, quote = FALSE)
|
0e1c039ac3db92b838953c0eba0664dd6730df8e
|
f4105cb1aad7f9110478aa4253a748ee6b585c38
|
/R/Sept2014_ReportFigures.R
|
eb7871aef586ac510bebf8d3c6b0f2b333ffaa8b
|
[] |
no_license
|
kmanlove/SheepBehavior
|
da8611fa81e2a5abfffca7bcf9ec5db696a7bcf2
|
bc54d918212393a9e5d6b0e27364381eef8d3d2e
|
refs/heads/master
| 2021-01-01T20:40:49.984218
| 2015-05-13T16:16:45
| 2015-05-13T16:16:45
| 31,034,897
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,286
|
r
|
Sept2014_ReportFigures.R
|
#---------------------------------------------------------------------#
#-- code for preliminary analysis of bhs contact patterns from 2014 --#
#---------------------------------------------------------------------#
bb <- read.csv("./data/CleanBlackButteFollows_090214.csv", header = T)
aso <- read.csv("./data/CleanAsotinFollows_090214.csv", header = T)
length(levels(bb$SessionID))
length(levels(aso$SessionID))
length(levels(aso$ID))
aso.relocs <- read.csv("./data/Asotin_Locations_Clean.csv", header = T, sep = "\t")
bb.relocs <- read.csv("./data/BlackButte_Locations_Clean.csv", header = T, sep = "\t")
dim(aso.relocs)
dim(bb.relocs)
#--------------------------------------------------------------------------------#
#-- Q0: How do contact patterns and group sizes change over the timescale of a --#
#-- lamb pneumonia event? -------------------------------------------------------#
#--------------------------------------------------------------------------------#
# group sizes
par(mfrow = c(1, 2))
plot(bb$Grpsz ~ bb$JulianDate, ylim = c(0, 40), xlim = c(120, 200), xlab = "Time", ylab = "Total group size", xaxt = "n")
lines(lowess(bb$Grpsz ~ bb$JulianDate), lwd = 2)
points(aso$Grpsz ~ aso$JulianDate, col = "red")
lines(lowess(aso$Grpsz ~ aso$JulianDate), col = "red", lwd = 2)
axis(side = 1, at = c(120, 150, 181), labels = c("May 01", "June 01", "July 01"))
leg.text <- c("Black Butte", "Asotin")
legend("topleft", bty = "n", leg.text, col = c("black", "red"), pch = c(1, 1), lwd = c(2, 2))
plot((bb$Ewes + bb$YrEwes) ~ bb$JulianDate, ylim = c(0, 25), xlim = c(120, 200), xlab = "Time", ylab = "Group size without lambs", xaxt = "n")
lines(lowess((bb$Ewes + bb$YrEwes) ~ bb$JulianDate), lwd = 2)
points((aso$Ewes + aso$YrEwes) ~ aso$JulianDate, col = "red")
lines(lowess((aso$Ewes + aso$YrEwes) ~ aso$JulianDate), col = "red", lwd = 2)
axis(side = 1, at = c(120, 150, 181), labels = c("May 01", "June 01", "July 01"))
# with boxplots
par(mfrow = c(2, 2))
boxplot(bb$Grpsz ~ bb$JulianDate, main = "Black Butte", ylab = "Total group size", xlab = "Time (Julian date: 120 = May 01, 150 = June 01, 181 = July 01)")
boxplot(aso$Grpsz ~ aso$JulianDate, main = "Asotin", ylab = "Total group size", xlab = "Time (Julian date: 120 = May 01, 150 = June 01, 181 = July 01)")
boxplot((bb$Ewes + bb$YrEwes) ~ bb$JulianDate, main = "Black Butte", ylab = "Group size without lambs", xlab = "Time (Julian date: 120 = May 01, 150 = June 01, 181 = July 01)")
boxplot((aso$Ewes + aso$YrEwes) ~ aso$JulianDate, main = "Asotin", ylab = "Group size without lambs", xlab = "Time (Julian date: 120 = May 01, 150 = June 01, 181 = July 01)")
# within-group contacts
# dams and lambs
aso.dams <- subset(aso, !(ID %in% c("150.02", "150.07", "150.36", "151.21", "151.22", "151.232", "151.871", "Floppy", "Orange15", "Orange18", "Orange21", "Yellow41", "White45", "White51", "White52", "White left")))
aso.elcontacts <- subset(aso.dams, EL.Body.Contact.Duration >= 0.1 | ELNurseDuration >= 0.1)
aso.llcontacts <- subset(aso.dams, LL.Body.Duration >= 0.1)
bb.elcontacts <- subset(bb, EL.Body.Contact.Duration >= 0.1 | ELNurseDuration >= 0.1)
bb.llcontacts <- subset(bb, LL.Body.Duration >= 0.1)
par(mfrow = c(1, 2))
plot(log(bb$EL.Body.Contact.Duration + bb$ELNurseDuration + 1) ~ bb$JulianDate, ylim = c(0, 5.5), xlim = c(120, 200), pch = 16, xlab = "Time", ylab = "log(Ewe-lamb body + nursing contacts)", xaxt = "n")
lines(lowess(log(bb.elcontacts$EL.Body.Contact.Duration + bb.elcontacts$ELNurseDuration + 1) ~ bb.elcontacts$JulianDate), col = "black", lwd = 2)
points(log(aso.dams$EL.Body.Contact.Duration + aso.dams$ELNurseDuration + 1) ~ aso.dams$JulianDate, col = "red")
lines(lowess(log(aso.elcontacts$EL.Body.Contact.Duration + aso.elcontacts$ELNurseDuration + 1) ~ aso.elcontacts$JulianDate), col = "red", lwd = 2)
axis(side = 1, at = c(120, 150, 181), labels = c("May 01", "June 01", "July 01"))
leg.text <- c("Black Butte", "Asotin")
legend("topright", leg.text, col = c("black", "red"), pch = c(16, 1), bty = "n")
plot(log(bb$LL.Body.Duration + 1) ~ bb$JulianDate, ylim = c(0, 5.5), xlim = c(120, 200), pch = 16, xlab = "time", ylab = "log(Lamb-lamb contacts + 1)", xaxt = "n")
lines(lowess(log(bb.llcontacts$LL.Body.Duration) ~ bb.llcontacts$JulianDate), col = "black", lwd = 2)
points(log(aso.dams$LL.Body.Duration + 1) ~ aso.dams$JulianDate, col = "red")
lines(lowess(log(aso.llcontacts$LL.Body.Duration) ~ aso.llcontacts$JulianDate), col = "red", lwd = 2)
axis(side = 1, at = c(120, 150, 181), labels = c("May 01", "June 01", "July 01"))
# nursing through time
bb.nurse <- subset(bb, ELNurseDuration >= 0.1)
aso.nurse <- subset(aso, ELNurseDuration >= 0.1)
par(mfrow = c(1, 1))
plot(log(bb$ELNurseDuration + 1) ~ bb$JulianDate, ylim = c(0, 5.5), xlim = c(120, 200), pch = 16, xlab = "Time", ylab = "log(nursing time)", xaxt = "n")
points(log(aso$ELNurseDuration + 1) ~ aso$JulianDate, col = "red")
lines(lowess(log(aso.nurse$ELNurseDuration + 1) ~ aso.nurse$JulianDate), col = "red")
lines(lowess(log(bb.nurse$ELNurseDuration + 1) ~ bb.nurse$JulianDate), col = "black")
axis(side = 1, at = c(120, 150, 181), labels = c("May 01", "June 01", "July 01"))
leg.text <- c("Black Butte", "Asotin")
legend("topright", leg.text, col = c("black", "red"), pch = c(16, 1), bty = "n")
|
0e22345ff7e78aa2eac8efa04e18dd2c78e5e853
|
125a18c7eba0ca722425fadbfd5e7c1e1692ae86
|
/man/data_ynorm.Rd
|
378bcce99856f21aa8821905bb0dff668eba7443
|
[] |
no_license
|
cran/bbemkr
|
8a41414f3161d48028bfbaf4480c894d9357e6bf
|
376a966dfd17a52129d4cbaaa092d1684477674a
|
refs/heads/master
| 2020-04-17T07:51:10.950435
| 2014-04-05T00:00:00
| 2014-04-05T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 470
|
rd
|
data_ynorm.Rd
|
\name{data_ynorm}
\alias{data_ynorm}
\docType{data}
\title{
Simulated response variable
}
\description{
The response variable is simulated from the functional form of
\eqn{data_ynorm = sin(2*pi*x1) + 4*(1-x2)*(1+x2) + 2*x3/(1+0.8*x3*x3) + rnorm(1,0,0.9)}, where \code{x1}, \code{x2} and \code{x3} are
simulated from a uniform distribution between 0 and 1.
}
\usage{data(data_ynorm)}
\format{
A data matrix of 100 by 1
}
\examples{
data(data_ynorm)
}
\keyword{datasets}
|
9c3bd80043f317dac8a195a2cc27580a2f37bb62
|
c7a7e02bfe49d5195cda8cf973b09c24e3094b15
|
/GH_250m_crop_Cocoa.R
|
c436c08909030d598e45ef47d7e9c52a854e9b14
|
[] |
no_license
|
iSDAgri/AlexVerlinden
|
836af00bae78d90da2e3fb52c442ac5eb8f983d3
|
867fbbf57c06a9a4b0347e2df398fa127c76315a
|
refs/heads/master
| 2021-05-04T13:07:43.265314
| 2017-05-07T13:35:24
| 2017-05-07T13:35:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,012
|
r
|
GH_250m_crop_Cocoa.R
|
# Script for crop distribution models GH using ensemble regressions
# basis for cropland mask is 13000 point survey for Ghana conducted by AfSIS for Africa
# grids are from Africasoils.net
# field data are collected by GhaSIS 2016
#script in development to test crop distribution model with glmnet based on presence/absence from crop scout
# Alex Verlinden April 2016 based on M. Walsh and J.Chen
#+ Required packages
# install.packages(c("downloader","raster","rgdal", "caret", "ROSE","dismo", "SpatialEco", "doParallel")), dependencies=TRUE)
require(downloader)
require(raster)
require(rgdal)
require(dismo)
require(caret)
require (ROSE) # for sample imbalances
require(spatialEco)
require(doParallel)
#+ Data downloads ----------------------------------------------------------
# Create a "Data" folder in your current working directory
dir.create("GH_data", showWarnings=F)
dat_dir <- "./GH_data"
# download crop types, livestock etc presence/absence locations in the field
download.file("https://www.dropbox.com/s/2kouegkbrgaug8t/GH_crops_2016%20Sept.zip?dl=0",
"./GH_data/GH_crops_2016%20Sept.zip", mode ="wb")
unzip("./GH_data/GH_crops_2016%20Sept.zip",exdir="./GH_data")
GH_crops <- read.table(paste(dat_dir, "/GH_crops_2016 Sept.csv", sep=""), header=T, sep=",")
GH_crops <- na.omit(GH_crops)
#download grids for GH crops ~200 MB !!!!! and stack in raster
download.file("https://www.dropbox.com/s/nbpi0l4utm32cgw/GH_250_grids.zip?dl=0", "./GH_data/GH_250_grids.zip", mode="wb")
unzip("./GH_data/GH_250_grids.zip", exdir="./GH_data", overwrite=T)
glist <- list.files(path="./GH_data/GH_250_grids", pattern="tif", full.names=T)
#download cover files for GHana
download.file ("https://www.dropbox.com/s/ejho3jo3jn171w6/GH_250m_cov.zip?dl=0",
"./GH_data/GH_250m_cov.zip", mode="wb" )
unzip("./GH_data/GH_250m_cov.zip", exdir="./GH_data/GH_250m_cov", overwrite=T)
glist2=list.files(path="./GH_data/GH_250m_cov", pattern="tif", full.names = T)
glist=c(glist,glist2)
grid <- stack(glist)
t=scale(grid, center=TRUE,scale=TRUE) # scale all covariates
#cropmask
download.file("https://www.dropbox.com/s/0pb5jtlsd6hghet/GH_crp1mask.zip?dl=0",
"./GH_data/GH_crp1mask.zip", mode="wb")
unzip("./GH_data/GH_crp1mask.zip", exdir = "./GH_data",overwrite = T)
#+ Data setup for Crops Ghana--------------------------------------------------------------
# Project crop data to grid CRS
ghcrop.proj <- as.data.frame(project(cbind(GH_crops$X_gps_longitude, GH_crops$X_gps_latitude), "+proj=laea +ellps=WGS84 +lon_0=20 +lat_0=5 +units=m +no_defs"))
colnames(ghcrop.proj) <- c("x","y")
coordinates(ghcrop.proj) <- ~x+y #convert to Spatial DataFrame
projection(ghcrop.proj) <- projection(grid)
# Extract gridded variables for GH crop data observations
ghcropex <- data.frame(coordinates(ghcrop.proj), extract(t, ghcrop.proj))
ghcropex= ghcropex[,3:48]#exclude coordinates
#subset only on cropland
GH_cr_agric=GH_crops[GH_crops$crop_pa=="Y",]
# subset CRS
ghag.proj <- as.data.frame(project(cbind(GH_cr_agric$X_gps_longitude, GH_cr_agric$X_gps_latitude), "+proj=laea +ellps=WGS84 +lon_0=20 +lat_0=5 +units=m +no_defs"))
colnames(ghag.proj) <- c("x","y")
coordinates(ghag.proj) <- ~x+y #convert to Spatial DataFrame
projection(ghag.proj) <- projection(grid)
#subset extract
GH_cr_ex=extract(t, ghag.proj)
###### Regressions for crops
#____________
# now bind crop species column to the covariates
# this has to change with every new crop
#use names (GH_crops) to check crop name
#crop presence
croppresabs=cbind(GH_crops$crop_pa, ghcropex)
colnames(croppresabs)[1]="crop"
croppresabs$crop=as.factor(croppresabs$crop)
prop.table(table(croppresabs$crop))
#for cocoa
cocopresabs=cbind(GH_crops$Cocoa, ghcropex)
cocopresabs=na.omit(cocopresabs)
colnames(cocopresabs)[1]="coco"
cocopresabs$coco=as.factor(cocopresabs$coco)
summary(cocopresabs)
#to test if crop is a rare event as presences of much less than 15 % are difficult to model
prop.table(table(cocopresabs$coco))
#for cocoa only on cropland
cocoagric=data.frame(GH_cr_agric$Cocoa,GH_cr_ex)
cocoagric=na.omit(cocoagric)
colnames(cocoagric)[1]= "coco1"
cocoagric$coco1=as.factor(cocoagric$coco1)
prop.table(table(cocoagric$coco1))
cropmask=raster("./GH_data/GH_crp1mask.tif")
###### Regressions
# set train/test set randomization seed
seed <- 1385321
set.seed(seed)
#parallel processing
mc <- makeCluster(detectCores())
registerDoParallel(mc)
#+ Split data into train and test sets ------------------------------------
# Crop type train/test split
#cocoa
cocoIndex <- createDataPartition(cocopresabs$coco, p = 2/3, list = FALSE, times = 1)
cocoTrain <- cocopresabs[ cocoIndex,]
cocoTest <- cocopresabs[-cocoIndex,]
cocoTest= na.omit(cocoTest)
#cocoa on cropland only
coco1Index=createDataPartition(cocoagric$coco1, p = 2/3, list = FALSE, times = 1)
coco1Train =cocoagric[coco1Index,]
coco1Test=cocoagric[-coco1Index,]
#____________
#set up data for caret
#cocoa on cropland
objControl <- trainControl(method='cv', number=10, classProbs = T,
returnResamp='none', allowParallel = TRUE,
summaryFunction = twoClassSummary)
#glmnet using binomial distribution for coco cropland
coco1.glm=train(coco1 ~ ., data=coco1Train, family= "binomial",method="glmnet",
metric="ROC", trControl=objControl)
confusionMatrix(coco1.glm)
coco1glm.pred=predict(t,coco1.glm, type= "prob")
plot(varImp(coco1.glm,scale=F))
#coco1 rf
coco1.rf=train(coco1 ~ ., data=coco1Train, family= "binomial",method="rf",
metric="ROC", ntree=501, trControl=objControl)
confusionMatrix(coco1.rf)
coco1rf.pred=predict(t,coco1.rf, type= "prob")
plot(varImp(coco1.rf,scale=F))
#coco gbm
coco1.gbm=train(coco1 ~ ., data=coco1Train,method="gbm",
metric="ROC", trControl=objControl)
confusionMatrix(coco1.gbm)
coco1gbm.pred=predict(t,coco1.gbm, type= "prob")
plot(varImp(coco1.gbm,scale=F))
#+ Ensemble predictions <glm> <rf>, <gbm>, -------------------------------
# Ensemble set-up
pred <- stack(1-coco1glm.pred,
1-coco1rf.pred, 1-coco1gbm.pred)
names(pred) <- c("cocoglm",
"cocorf","cocogbm")
geospred <- extract(pred, ghag.proj)
# presence/absence of coco (present = Y, absent = N)
cocoens <- cbind.data.frame(GH_cr_agric$Cocoa, geospred)
cocoens <- na.omit(cocoens)
cocoensTest <- cocoens[-cocoIndex,] ## replicate previous test set
names(cocoensTest)[1]= "coco"
# Regularized ensemble weighting on the test set <glmnet>
# 10-fold CV
ens <- trainControl(method = "cv", number = 10, allowParallel = TRUE )
# presence/absence of coco (present = Y, absent = N)
coco.ens <- train(coco ~. , data = cocoensTest,
family = "binomial",
method = "glmnet",
trControl = ens)
cocoens.pred <- predict(coco.ens, cocoensTest, type="prob") ## predict test-set
confusionMatrix(coco.ens) ## print validation summaries
coco.test <- cbind(cocoensTest, cocoens.pred)
cocop <- subset(coco.test, coco=="Y", select=c(Y))
cocoa <- subset(coco.test, coco=="N", select=c(Y))
coco.eval <- evaluate(p=cocop[,1], a=cocoa[,1]) ## calculate ROC's on test set <dismo>
coco.eval
plot(coco.eval, 'ROC') ## plot ROC curve
coco.thld <- threshold(coco.eval, 'spec_sens') ## TPR+TNR threshold for classification
cocoens.pred <- predict(pred, coco.ens, type="prob") ## spatial prediction
cocoens.pred=(1-cocoens.pred)*cropmask
plot((1-cocoens.pred)*cropmask, axes=F, main ="coco probability ensemble in cropland")
cocoensmask <- 1-cocoens.pred >coco.thld #THLD =0.078
cocoensmask= cocoensmask*cropmask
plot(cocoensmask, axes = F, legend = F, main= "Ensemble distribution prediction of coco")
plot(varImp(coco.ens,scale=F))
dir.create("./GH_results", showWarnings=F)
rf=writeRaster(cocoens.pred, filename="./GH_results/GH_cocoagric_2015_ens.tif", format= "GTiff", overwrite=TRUE)
rf=writeRaster(cocoensmask, filename="./GH_results/GH_cocoagric_2015_mask.tif", format= "GTiff", overwrite=TRUE)
|
0dc1092f74f1291ea7c0adba5370a53dde33e894
|
109734b597c2d760725a1a050174a5d11b3c1a9b
|
/man/stratrand.Rd
|
366f7df8f33714dc272a53608c9be3194389c786
|
[] |
no_license
|
rubak/spatstat
|
c293e16b17cfeba3e1a24cd971b313c47ad89906
|
93e54a8fd8276c9a17123466638c271a8690d12c
|
refs/heads/master
| 2020-12-07T00:54:32.178710
| 2020-11-06T22:51:20
| 2020-11-06T22:51:20
| 44,497,738
| 2
| 0
| null | 2020-11-06T22:51:21
| 2015-10-18T21:40:26
|
R
|
UTF-8
|
R
| false
| false
| 2,073
|
rd
|
stratrand.Rd
|
\name{stratrand}
\alias{stratrand}
\title{Stratified random point pattern}
\description{
Generates a \dQuote{stratified random} pattern of points in a window,
by dividing the window into rectangular tiles and placing
\code{k} random points in each tile.
}
\usage{
stratrand(window, nx, ny, k = 1)
}
\arguments{
\item{window}{A window.
An object of class \code{\link{owin}},
or data in any format acceptable to \code{\link{as.owin}()}.
}
\item{nx}{Number of tiles in each row.
}
\item{ny}{Number of tiles in each column.
}
\item{k}{Number of random points to generate in each tile.
}
}
\value{
A list with two components \code{x} and \code{y}, which are numeric
vectors giving the coordinates of the random points.
}
\details{
The bounding rectangle of \code{window} is divided into
a regular \eqn{nx \times ny}{nx * ny} grid of rectangular tiles.
In each tile, \code{k} random points are generated independently
with a uniform distribution in that tile.
Note that some of these grid points may lie outside the window,
if \code{window} is not of type \code{"rectangle"}. The function
\code{\link{inside.owin}} can be used to select those grid points
which do lie inside the window. See the examples.
This function is useful in creating dummy points for quadrature
schemes (see \code{\link{quadscheme}}) as well as in simulating
random point patterns.
}
\seealso{
\code{\link{quad.object}},
\code{\link{quadscheme}},
\code{\link{inside.owin}},
\code{\link{gridcentres}}
}
\examples{
w <- unit.square()
xy <- stratrand(w, 10, 10)
\dontrun{
plot(w)
points(xy)
}
# polygonal boundary
bdry <- list(x=c(0.1,0.3,0.7,0.4,0.2),
y=c(0.1,0.1,0.5,0.7,0.3))
w <- owin(c(0,1), c(0,1), poly=bdry)
xy <- stratrand(w, 10, 10, 3)
\dontrun{
plot(w)
points(xy)
}
# determine which grid points are inside polygon
ok <- inside.owin(xy$x, xy$y, w)
\dontrun{
plot(w)
points(xy$x[ok], xy$y[ok])
}
}
\author{\adrian
and \rolf
}
\keyword{spatial}
\keyword{datagen}
|
3aa09a229c5eb12cfe533e0c52264418992750a0
|
5f074caca95f3046218031636b62036dded7f56b
|
/src/inverted-index.R
|
63873bf22d1f614680f32cc7f74edbc9077c0870
|
[
"MIT"
] |
permissive
|
xuleisanshi/rhadoop-examples
|
c1471c53966c446746c390494760f1c05640572c
|
7bc197e84bd04147a409fb4b1f287634b7fd3862
|
refs/heads/master
| 2021-05-27T19:43:44.666320
| 2014-03-13T11:32:15
| 2014-03-13T11:32:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 575
|
r
|
inverted-index.R
|
## Load and initialize libraries
library(rhdfs)
hdfs.init()
library(rmr2)
# Define the wordcount application
invertedIndex = function(input, output = NULL, pattern = '[[:punct:][:space:]]+') {
mapper <- function(., lines) {
keyval(tolower(unlist(strsplit(x = lines, split = pattern))), Sys.getenv("map_input_file"))
}
reducer <- function(word, filenames) {
keyval(word, toString(unique(unlist(filenames, use.names=FALSE))))
}
mapreduce(input = input, output = output, input.format = "text",
map = mapper, reduce = reducer, combine = T)
}
|
73625b890905a8a77060fbfee9319d47450e27aa
|
62971ba2128f643d37b85452c293a967c9820b31
|
/plot-violin/violinplot_snr.R
|
c90a7cb960ca176da4b91d658eb6e658b1b48467
|
[
"MIT"
] |
permissive
|
comp-music-lab/lullaby-analysis
|
dd1f1d1d01d18797054d60abdafb85c451b3264c
|
2361eab4b047b7dc2269b42917ab0d8f463af0eb
|
refs/heads/main
| 2023-06-30T11:38:57.255144
| 2021-08-02T00:46:20
| 2021-08-02T00:46:20
| 389,928,027
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,585
|
r
|
violinplot_snr.R
|
# Clear variables
rm(list = ls())
# load libraries
library(ggplot2)
library(Hmisc)
# get data
sr_table <- read.csv("../data/IPL_snr.csv", header = TRUE, sep = ",", quote = "")
# confidence intervals for medians
source("./med_confint_e.R")
# fig : subjective rate violinplots
ylab <- expression(paste("SNR (dB) by Gaussian noise measurement"))
title2a <- expression(bold("b"))
pj <- position_jitter(width = .025, seed = 6012)
figobj <- ggplot(
data = sr_table,
aes(
y = snr,
x = songtype
)
) +
geom_violin(aes(fill = songtype),
trim = FALSE,
alpha = .8
) +
scale_fill_manual(values = c("blue", "red")) +
geom_line(aes(group = pair_id),
position = pj,
alpha = .1
) +
geom_point(
aes(y = snr),
position = pj,
size = 1.1,
pch = 21,
fill = "white"
) +
stat_summary(
geom = "crossbar",
#fun.data = med_confint,
#fun.args = list(al = 0.95, verbose = FALSE),
fun.data = mean_cl_normal,
fun.args = list(conf.int = 0.95),
fill = "white",
width = 0.8,
alpha = 0.2,
size = 0.4
) +
scale_x_discrete(labels = c("Lullabies", "Non-lullabies")) +
theme_bw() +
theme(
axis.text = element_text(colour = "black", size = 10),
axis.title.x = element_text(size = 10, color = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
legend.position = "n"
) +
ylab(ylab) +
xlab("") +
ggtitle(title2a)
# save figure
plot(figobj)
ggsave("./figure/violinplot_snr.png", plot = figobj, width = 2.8, height = 4)
|
79abe8182b33bc477b4c5a8a92a8bf6b6c9189db
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/spectacles/examples/ids.Rd.R
|
57650d22520a61bf9ab094bc53311ef805d7e729
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 549
|
r
|
ids.Rd.R
|
library(spectacles)
### Name: ids
### Title: Retrieves or sets the ids of a 'Spectra*' object.
### Aliases: ids ids<- ids,Spectra-method ids<-,Spectra-method
### ids<-,SpectraDataFrame-method ids<-,Spectra-method
### ids<-,SpectraDataFrame-method
### ** Examples
# Loading example data
data(oz)
spectra(oz) <- sr_no ~ ... ~ 350:2500
# Retrieving ids
ids(oz)
# Setting ids using a vector of values
ids(oz) <- seq_len(nrow(oz))
ids(oz)
# Setting ids using an attribute
oz$new_id <- seq_len(nrow(oz)) + 1000
ids(oz) <- ~ new_id
ids(oz)
|
f625634adbe295f2d88d99c4e78ac4a264aeb614
|
7a95abd73d1ab9826e7f2bd7762f31c98bd0274f
|
/meteor/inst/testfiles/ET0_Makkink/AFL_ET0_Makkink/ET0_Makkink_valgrind_files/1615848808-test.R
|
3baa0a8dd0638a1f2897508e70f58672bbf995c6
|
[] |
no_license
|
akhikolla/updatedatatype-list3
|
536d4e126d14ffb84bb655b8551ed5bc9b16d2c5
|
d1505cabc5bea8badb599bf1ed44efad5306636c
|
refs/heads/master
| 2023-03-25T09:44:15.112369
| 2021-03-20T15:57:10
| 2021-03-20T15:57:10
| 349,770,001
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 712
|
r
|
1615848808-test.R
|
testlist <- list(Rs = numeric(0), atmp = numeric(0), relh = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), temp = c(8.5728629954997e-312, 2.05810269315294e-312, 2.51947000254159e+93, 2.51947000254151e+93, -1.03644984280403e+156, 3.55262942202735e-157, 2.73593267390447e+59, -4.54883668616495e+277, -1.96893208756045e+208, 1.10818199142729e-09, 3.90082255103332e-221, -1.15261897385911e+41, -8.10849672500667e+229, -8.1647463216501e-277, -3.91881664584645e-291, 1.05447987084428e+254, -1.44288984971022e+71, -7.00882470702786e-295, -4.55414938106482e-200, -6.59203011022338e-83, 179.214603488924))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result)
|
1da5969d6306e55ea2ed2857f7c23116dbf87860
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/alphahull/examples/dw_track.Rd.R
|
048219e3775c1719826f32be610600276a8e89cf
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,391
|
r
|
dw_track.Rd.R
|
library(alphahull)
### Name: dw_track
### Title: RBM-sausage calculation of tracking data
### Aliases: dw_track
### Keywords: nonparametric
### ** Examples
## Not run:
##D library(move)
##D library(ggmap)
##D # Data from Movebank
##D # Study Name: Dunn Ranch Bison Tracking Project
##D # Principal Investigator: Stephen Blake, Randy Arndt, Doug Ladd
##D # Max Planck Institute for Ornithology Radolfzell Germany
##D study <- "Dunn Ranch Bison Tracking Project"
##D cainfo <- system.file("CurlSSL", "cacert.pem", package = "RCurl")
##D options(RCurlOptions = list(verbose = FALSE, capath = cainfo, ssl.verifypeer = FALSE))
##D # Login to movebank (first create the login object)
##D curl <- movebankLogin(username = "xxx", password = "zzz")
##D # Downloads study stored in Movebank
##D track <- getMovebankData(study = study, login = curl)
##D dat <- track@data[track@data[, "deployment_id"] == 13848432,]
##D # Map of animal locations
##D bbox <- ggmap::make_bbox(dat[,"location_long"], dat[,"location_lat"], f = 0.3)
##D map_loc <- get_map(location = bbox, source = "google", maptype = 'satellite')
##D map <- ggmap(map_loc, extent = 'panel', maprange=FALSE)
##D p <- map + geom_path(data = dat, aes(x = location_long, y = location_lat), col=2, size=0.3)
##D p
##D ah_dw <- dw_track(x = dat[, c("location_long", "location_lat")], eps = 0.001)
##D p + ah_dw
## End(Not run)
|
34f0c89390a485222daddbb37ca2eb863b76fe6b
|
fdf8f6f14a9ea320629c338890f96a907087c03f
|
/utils/format_data.R
|
8ffe4c5702654f90214c2bb1b5c6849d14eccef1
|
[] |
no_license
|
sds-dubois/mltk
|
cb393a9c7640bb2f0bcb965ea2e54cfc4be90767
|
20bb38a8ffdc2a345e593a58812ce98bf8fe3afe
|
refs/heads/master
| 2021-01-17T12:33:42.111887
| 2016-03-31T23:48:56
| 2016-03-31T23:48:56
| 52,417,018
| 1
| 1
| null | 2016-03-15T01:19:31
| 2016-02-24T05:32:02
|
Java
|
UTF-8
|
R
| false
| false
| 3,076
|
r
|
format_data.R
|
# Author: Sebastien Dubois
# Libraries ---------------------------------------------------------------
library(readr)
library(dplyr)
library(stringr)
# Load --------------------------------------------------------------------
dir <- "sutter/"
# healogics/
# sutter/
val_name <- "val-12-4-15"
# "test-2013-only-4-3-15" #
train_name <- "train-12-4-15"
# "train-2013-only-4-3-15" #
data.val <- str_c("data/", dir, val_name,".txt", sep = "")
data.train <- str_c("data/", dir, train_name, ".txt", sep = "")
df.val <- read_tsv(file = data.val)
df.train <- read_tsv(file = data.train)
df.val %>% dim()
df.train %>% dim()
# change target name for rbind
names(df.val)[1] <- 'Y'
names(df.train)[1] <- 'Y'
df <- rbind(df.val, df.train)
# Create attribute file ---------------------------------------------------
# clean variable names
p <- " |-|&|/|\\*|%"
names(df) <- names(df) %>%
lapply(function(s) str_replace_all(s, pattern = p, replacement = "_")) %>%
unlist()
# attribute file
file_str <- ""
for(i in 1:ncol(df)) {
if(class(df[[i]]) == 'numeric') {
file_str <- str_c(file_str, names(df)[i], ": cont")
} else {
# if integer, check if it's a discrete variable
uniques <- df[,i] %>% unique()
nval <- uniques %>% nrow()
if(sum(uniques < 0 | uniques >= nval) == 0) {
# discrete variable
vals <- str_c(c(0:(nval-1)), sep = "", collapse = ", ")
vals <- str_c(names(df)[i], ": {", vals, "}")
file_str <- str_c(file_str, vals)
} else {
file_str <- str_c(file_str, names(df)[i], ": cont")
}
}
# add 'class' for the target variable
if(i ==1) {
file_str <- str_c(file_str, " (class)\n")
} else {
file_str <- str_c(file_str, "\n")
}
}
# remove last '\n'
file_str <- substr(file_str, 1, (nchar(file_str)-1))
# save attr file
write(file_str,
file = str_c("data/", dir, "data_attr.txt", sep= ""))
# Rewrite files -----------------------------------------------------------
# nrow(df.train)
# 0.75 * nrow(df.train)
# nrow(df.val)
ntrain <- 21568
# sutter : 99081
# healogics : 21568
# shuffle training set
df.train <- df.train[sample(nrow(df.train)),]
test.set <- df.val
valid.set <- df.train %>% slice((ntrain + 1):nrow(df.train))
train.set <- df.train %>% slice(1:ntrain)
# rewrite data without headers and with space separators
# training
write_delim(train.set,
path = str_c("data/", dir, "train.txt", sep = ""),
col_names = FALSE,
delim = " ")
# validation set, a part from original training data
write_delim(valid.set,
path = str_c("data/", dir, "val.txt", sep = ""),
col_names = FALSE,
delim = " ")
# all = training + validation
write_delim(df.train,
path =str_c("data/", dir, "all.txt", sep = ""),
col_names = FALSE,
delim = " ")
# a test set we should never use for training, original 'validation' set
write_delim(test.set,
path = str_c("data/", dir, "test.txt", sep = ""),
col_names = FALSE,
delim = " ")
|
38df17af7dad8a93a94479e57b76f2beada05c5a
|
3b2e52985e8337c9440d696c643459356d105bc6
|
/code/PostToPlotly.R
|
d913b65484d0d81c92ad4d17cbbaaad09d736fb2
|
[] |
no_license
|
jasonboyer/W4150
|
204949d0032dbbf18af86557f8a81f4add04cb96
|
5e314694436c25903ec606d056847ae058aba584
|
refs/heads/master
| 2020-06-19T01:08:39.407020
| 2016-12-13T11:52:54
| 2016-12-13T11:52:54
| 74,928,580
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 681
|
r
|
PostToPlotly.R
|
# PostToPlotly.R - Code to post plots to plot.ly for
# public web viewing and interaction
#
# Add the following lines in .Rprofile to enable uploading:
#
# Sys.setenv("plotly_username"="your_plotly_username")
# Sys.setenv("plotly_api_key"="your_api_key")
#
# See https://plot.ly/ggplot2/getting-started/
#
library(plotly)
# PostToPlotly()
#
# arguments
# plots - list of plots to upload
# plotNames - corresponding list or titles for the plots
PostToPlotly <- function(plots, plotNames=NULL) {
for (i in 1:length(plots)) {
if (is.null(plotNames)) {
name <- paste("Figure ", i)
} else {
name = plotNames[i]
}
plotly_POST(plots[[i]], name)
}
}
|
37bf39db301509c0a26473ef884562c0eaab0928
|
3b3cfcc6673aec9aeda77884a976ca34095b8690
|
/Ex20.R
|
51c8db899c5b173f11fee0f46582823a110be720
|
[] |
no_license
|
MrTorstein/STK4900
|
2f593cd55e7590e537dc8447f942a23da1f35c07
|
de4e69e86ebda08c44890f169cc1ab218a239e0d
|
refs/heads/main
| 2023-07-20T02:15:40.442590
| 2021-08-24T20:51:25
| 2021-08-24T20:51:25
| 399,600,319
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,208
|
r
|
Ex20.R
|
#Exercise 20:
# In this exercise we will study data from an experiment where one wanted to assess the toxicity of the substance rotenone.
# Groups of about 50 insects were exposed to various doses of rotenone, and the number of insects that died at each dose level was recorded.
# The data are available at the course web-page.
# The variables in the data set are coded as follows:
# - LOGDOSE Logarithm of the concentration of rotenone (base 10 logarithm)
# - NUMBER Number of insects in total
# - DEAD Number of insects that died
# a) Compute the proportion of insects that died at each dose level and make a plot of the proportions versus dose of rotenone.
insects = read.table("http://www.uio.no/studier/emner/matnat/math/STK4900/data/insects.txt", header = T, na.strings = ".")
p = insects$DEAD / insects$NUMBER
plot(p ~ insects$LOGDOSE, xlab = "Log(dose)")
print("-----------------------------------------------------------")
# b) Fit a suitable regression model for the relation between the proportions of insects that died and the doses of rotenone.
insects.fit = glm(cbind(DEAD, NUMBER - DEAD) ~ LOGDOSE, data = insects, family = binomial)
summary(insects.fit)
# Give the reasons for your choice of regression model and interpret the fitted model.
# I choose a logistic reg model since a model predicting values lower than 0 or higher than the amount insects in the group are impossible.
# But also since the datapoints are quit linear.
print("-----------------------------------------------------------")
# c) Assess the fit of the model by including the fitted proportions on the plot from question a.
x = seq(0, 2, 0.01)
y = predict(insects.fit, list(LOGDOSE = x), type = "response")
lines(x, y)
# Also give a formal goodness-of-fit test.
anova(insects.fit, test = "Chisq")
#
print("-----------------------------------------------------------")
# d) Use the fitted model to estimate LD50, i.e. the dose required to kill half the members of a tested population.
beta0 = insects.fit$coef[1]
beta1 = insects.fit$coef[2]
p = 0.5
LD50 = log(p / (1 - p)) - beta0 / beta1
print(paste("LD50 = ", LD50))
predict(insects.fit, list(LOGDOSE = LD50), type = "response")
|
d4a75745f540d495823e741f1d60cb4fbed098a0
|
930051798caf4bf826ce6a966b4ba16b28f1d30a
|
/R/signalEarly.R
|
5600a648ede02602705401ff90849c9768cb7af3
|
[] |
no_license
|
jcheng5/future
|
e5891014fd63f52b240e45e31b1b5b6f66506eca
|
f4c170a52fe2333d5b91c0336a74a20056e8de42
|
refs/heads/master
| 2021-06-28T16:19:40.335834
| 2017-09-09T15:12:13
| 2017-09-09T15:12:13
| 103,759,888
| 3
| 0
| null | 2017-09-16T14:51:13
| 2017-09-16T14:51:13
| null |
UTF-8
|
R
| false
| false
| 1,239
|
r
|
signalEarly.R
|
signalEarly <- function(future, collect = TRUE, ...) {
## Future is not yet launched
if (future$state == "created") return(future)
earlySignal <- future$earlySignal
## Don't signal early?
if (!earlySignal) return(future)
debug <- getOption("future.debug", FALSE)
if (debug) mdebug("signalEarly(): Retrieving value ...")
## Collect value?
if (collect) {
if (debug) mdebug("signalEarly(): v <- value(f, signal = FALSE)")
value <- value(future, signal = FALSE)
} else {
if (debug) mdebug("signalEarly(): v <- f$value")
value <- future$value
}
if (debug) {
mdebug("signalEarly(): class(v) = c(%s)", paste(sQuote(class(value)), collapse = ", "))
mdebug("signalEarly(): Retrieving value ... DONE")
}
## Was a condition caught?
if (!inherits(value, "condition")) return(future)
if (debug) mdebug("signalEarly(): signalCondition(v)")
## Signal detected condition
if (inherits(value, "error")) {
stop(FutureError(future))
} else if (inherits(value, "warning")) {
warning(value)
} else if (inherits(value, "message")) {
message(value)
message("\n")
} else {
signalCondition(value)
}
if (debug) mdebug("signalEarly() ... DONE")
invisible(future)
}
|
5c227186107c74bc4efcbcaf640eef3e0d701fbb
|
12a9bea8cfff9e5dcd44651102a5e0adf477164a
|
/R/dollar.R
|
53040c0ada1a783c4dd9ad335541588f07446cc6
|
[] |
no_license
|
duncantl/RLLVMCompile
|
2b98a04f1f7e71f973a281b40457f5730e38f284
|
7fad5bd394a6f74ace0f6053a5d08e4f15cf3a1f
|
refs/heads/master
| 2021-01-19T01:42:02.316459
| 2017-03-07T00:49:31
| 2017-03-07T00:49:31
| 3,894,344
| 32
| 3
| null | 2015-03-03T13:27:54
| 2012-04-01T18:04:28
|
R
|
UTF-8
|
R
| false
| false
| 1,794
|
r
|
dollar.R
|
compile.dollar = `compile.$` =
function(call, env, ir, ..., .targetType = NULL)
{
elName = as.character(call[[3]])
obj = call[[2]]
val = compile(obj, env, ir)
ty = valType = getType(val)
pointerToStruct = isPointerType(ty)
if(pointerToStruct)
valType = getElementType(ty)
if(isStructType(valType)) {
# Should add setAlignment() calls (with 8L) for the local var, store and load instructions
# make local variable to access the parameter. TEMPORARY. See if it is already present.
#XXX
pvar = createLocalVariable(ir, ty, sprintf("%s.addr", as.character(call[[2]]))) # not if call[[2]] is an actual call
ans = createStore(ir, val, pvar) # ??? should val be compiled or just get the parameter value.
tmp = createLoad(ir, pvar)
# now match the name of the field being accessed to its position in the structure.
# we need the structure info from outside of llvm as it doesn't store names.
info = findStructInfo(valType, env$.structInfo)
if(is.null(info))
stop("need information about the elements of the struct")
index = match(elName, info@names) - 1L
if(is.na(index))
stop("no such field '", elName, "' in the struct")
ctx = getContext(env$.module)
elVal = createGEP(ir, tmp, lapply(c(0L, index), createIntegerConstant, ctx), "getfield")
ans = createLoad(ir, elVal)
setAlignment(ans, 4L)
}
else
stop("not implemented yet")
ans
}
findStructInfo =
function(ty, info)
{
i = sapply(info, sameType, ty)
if(!any(i))
return(NULL)
info[[which(i)[1]]]
}
|
d23e9f9e69289b32e8cc174ab753b3168ea572e9
|
ee25547fd3549440d9da5ccd61fa349084562421
|
/man/summary.ctmm.Rd
|
f3bb66bf517a040758a893340553974eb180ad06
|
[] |
no_license
|
ctmm-initiative/ctmm
|
bd63d800261e2ee4d9695473a043fa54157567d2
|
d50c79c1d2a4f72b5cf1d82f2ff4f33e051f8015
|
refs/heads/master
| 2023-08-09T14:05:57.214843
| 2023-07-26T09:37:10
| 2023-07-26T09:37:10
| 81,976,079
| 38
| 8
| null | 2019-07-09T13:34:55
| 2017-02-14T18:26:27
|
R
|
UTF-8
|
R
| false
| false
| 4,357
|
rd
|
summary.ctmm.Rd
|
\name{summary.ctmm}
\alias{summary.ctmm}
\encoding{UTF-8}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Summarize a continuous-time movement model}
\description{ This function returns a list of biologically interesting parameters in human readable format, as derived from a continuous-time movement model.}
\usage{
\S3method{summary}{ctmm}(object,level=0.95,level.UD=0.95,units=TRUE,IC=NULL,MSPE=NULL,...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{object}{ A \code{ctmm} movement-model object from the output of \code{ctmm.fit}. }
\item{level}{ Confidence level for parameter estimates. }
\item{level.UD}{ Coverage level for the Gaussian home-range area. }
\item{units}{Convert result to natural units.}
\item{IC}{Information criteria for sorting lists of \code{ctmm} objects. Can be \code{"AICc"}, \code{"AIC"}, \code{"BIC"}, \code{"LOOCV"}, \code{"HSCV"}, or none (\code{NA}). AICc is approximate.}
\item{MSPE}{Sort models with the same autocovariance structure by the mean square predictive error of \code{"position"}, \code{"velocity"}, or not (\code{NA}).}
\item{...}{Unused options.}
}
%\details{}
\value{
If summary is called with a single \code{ctmm} object output from \code{\link{ctmm.fit}}, then a list is returned with the effective sample sizes of various parameter estimates (\code{DOF}) and a parameter estimate table \code{CI}, with low, point, and high estimates for the following possible parameters:
\describe{
\item{\code{tau}}{The autocorrelation timescales. \code{tau position} is also the home-range crossing timescale.}
\item{\code{area}}{The Gaussian home-range area, where the point estimate has a significance level of \code{level.UD}. I.e., the core home range is where the animal is located 50\% of the time with \code{level.UD=0.50}.
This point estimate itself is subject to uncertainty, and is given confidence intervals derived from \code{level}.
This Gaussian estimate differs from the kernel density estimate of \code{\link{summary.UD}}. The Gaussian estimate has more statistical efficiency, but is less related to space use for non-Gaussian processes.}
\item{\code{speed}}{The Gaussian root-mean-square (RMS) velocity, which is a convenient measure of average speed but not the conventional measure of average speed (see \code{\link{speed}}).}
}
If summary is called on a list of \code{ctmm} objects output from \code{\link{ctmm.select}}, then a table is returned with the model names and IC differences for comparison across autocovariance structures. The mean square prediction error (MSPE) is also returned for comparison across trend structures (with autocovariance structure fixed). For the model names, "IID" denotes the uncorrelated bi-variate Gaussian model, "OU" denotes the continuous-position Ornstein-Uhlenbeck model, "OUF" denotes the continuous-velocity Ornstein-Uhlenbeck-F model, "OUf" denotes the OUF model where the two autocorrelation timescales cannot be statistically distinguished.
}
%\references{}
\author{ C. H. Fleming. }
\note{ Confidence intervals on the autocorrelation timescales assume they are sufficiently greater than zero and less than infinity.
\code{IC="LOOCV"} can only be attempted if also specified during \code{\link{ctmm.select}}, as this argument requires additional calculations.
Prior to \code{ctmm} v0.6.2, timescale confidence intervals were constructed from normal and inverse-normal sampling distributions, whereas v0.6.2 onward uses gamma and inverse-gamma sampling distributions.
In \code{ctmm} v0.5.1 onward the MSPE is averaged over all possible times instead of over all sampled times.
In \code{ctmm} v0.3.4 the speed estimate was fixed to be the RMS velocity and not \eqn{1/\sqrt{2}} times the RMS velocity.
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{ \code{\link{ctmm.fit}}, \code{\link{ctmm.select}}. }
\examples{\donttest{
# Load package and data
library(ctmm)
data(buffalo)
# Extract movement data for a single animal
DATA <- buffalo$Cilla
# fit model
GUESS <- ctmm.guess(DATA,interactive=FALSE)
FIT <- ctmm.fit(DATA,GUESS)
# Tell us something interpretable
summary(FIT)
}}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
%\keyword{ ~kwd1 }
%\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
ae4a19250451d6edfec06e082b8b4c7dd42da1cb
|
de2f15629bdcf7860da6336745253b088484aa7d
|
/R/app.R
|
a9ee77dfa5b3995694829c8192694d6552707ca0
|
[
"Apache-2.0"
] |
permissive
|
MarkMc1089/snapenium
|
27dcefeacb720ac472ebe55c390a5b3003e0ea8d
|
075b1cdab59233b0fddc806a51ba07e7f3ea62c6
|
refs/heads/master
| 2023-09-05T03:24:36.994548
| 2021-11-12T08:07:33
| 2021-11-12T08:07:33
| 427,276,317
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 637
|
r
|
app.R
|
#' Open browser to URL
#'
#' @param url URL to open
#'
#' @return RemoteDriver client
#' @export
#'
#' @examples
#' driver <- open_browser("http://google.co.uk")
#' close_browser(driver)
Open_browser <- function(url = "") {
rD <- RSelenium::rsDriver(port = netstat::free_port(), browser = "chrome", verbose = FALSE)
driver <- rD[["client"]]
driver$navigate(url)
driver
}
#' Close browser associated with RemoteDriver client
#'
#' @param driver
#'
#' @return
#' @export
#'
#' @examples
#' driver <- open_browser("http://google.co.uk")
#' close_browser(driver)
close_browser <- function(driver) {
driver$close()
rm(driver)
}
|
19659f92364135906f3c65789e4a5dd3d4d93866
|
13dbbbced8d21bf74dacb3d0ac32751a84daf6b1
|
/Scripts/Current Magic Scripts/MUX_PLSR_biplots.R
|
fdace6c1941071e64bceb867bd1777e7037e6b12
|
[
"CC0-1.0"
] |
permissive
|
hammondnw/MUX
|
a5b66a937de06b06c886303278aa4cb54957022f
|
a1bfb96de11cecc67b1a9461e2ab297247fb3d45
|
refs/heads/main
| 2023-04-08T00:02:58.215416
| 2023-02-22T18:18:01
| 2023-02-22T18:18:01
| 567,000,683
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,514
|
r
|
MUX_PLSR_biplots.R
|
#### Script for making bi-plots for PLSR calibration data ####
### Author: Nick Hammond
### Last Edited: 09/06/2022
# Set wd, load packages
library(lubridate)
library(tidyverse)
library(magrittr)
require(transformr)
library(stringr)
library(readxl)
library(pls)
library(scales)
library(ggpubr)
library(patchwork) # To display 2 charts together
library(hrbrthemes)
library(zoo)
setwd("./")
### Turnover Experiment ###
# Read in MUX PLSR predictions
MUX_preds = read_csv(paste0(getwd(),"/MagicData/MUX/Figures Files/MUX20_predictions_boot_111522.csv"))
MUX_preds$DateTime = ymd_hms(MUX_preds$DateTime, tz="America/New_York")
MUX_preds = MUX_preds %>% select(-c('...1')) %>% mutate(ID = "Pred")
#### Read in FCR WQ data ####
dataWQ <- read_csv(paste0(getwd(),"/MagicData/MUX/Figures Files/MUX20_dataWQ_111522.csv"))
dataWQ$DateTime = ymd_hms(dataWQ$DateTime, tz="America/New_York")
dataWQ = dataWQ %>% select(-c('...1')) %>% mutate(ID = "Obs")
biplot_df = MUX_preds %>% filter(DateTime %in% dataWQ$DateTime) %>%
select(-c("uncerSMn_max", "uncerSMn_min", "uncerSFe_max", "uncerSFe_min",
"uncerTMn_max", "uncerTMn_min", "uncerTFe_max", "uncerTFe_min"))
biplot_df = union(biplot_df,dataWQ)
biplot_df = biplot_df %>% pivot_wider(names_from = "ID", values_from = c("TFe_mgL","TMn_mgL",
"SFe_mgL","SMn_mgL"))
# Subset to just the hypolimnion/epilimnion
biplot_df_hypo = biplot_df %>% filter(Depth_m %in% c(6.2,8,9))
biplot_df_epi = biplot_df %>% filter(Depth_m %in% c(0.1,1.6,3.8))
# Make Plots ! #
# Turnover Experiment, Hypolimnion Models #
TFe = ggplot(biplot_df_hypo, aes(x=TFe_mgL_Obs,y=TFe_mgL_Pred)) +
geom_point() +
geom_smooth(method='lm', se=FALSE, color='black') +
theme_minimal() +
labs(x='Lab Measured Tot. Fe (mg/L)', y='PLSR Predicted Tot. Fe (mg/L)') +
stat_cor(aes(label = paste(..rr.label.., sep = "~`,`~")), label.y = 4.75) +
theme(axis.title.x = element_text(size=8),
axis.title.y = element_text(color = "black", size=8))
TMn = ggplot(biplot_df_hypo, aes(x=TMn_mgL_Obs,y=TMn_mgL_Pred)) +
geom_point() +
geom_smooth(method='lm', se=FALSE, color='black') +
theme_minimal() +
labs(x='Lab Measured Tot. Mn (mg/L)', y='PLSR Predicted Tot. Mn (mg/L)') +
stat_cor(aes(label = paste(..rr.label.., sep = "~`,`~")), label.y = 2.2) +
theme(axis.title.x = element_text(size=8),
axis.title.y = element_text(color = "black", size=8))
SFe = ggplot(biplot_df_hypo, aes(x=SFe_mgL_Obs,y=SFe_mgL_Pred)) +
geom_point() +
geom_smooth(method='lm', se=FALSE, color='black') +
theme_minimal() +
labs(x='Lab Measured Sol. Fe (mg/L)', y='PLSR Predicted Sol. Fe (mg/L)') +
stat_cor(aes(label = paste(..rr.label.., sep = "~`,`~")), label.y = 0.075, digits = 1) +
theme(axis.title.x = element_text(size=8),
axis.title.y = element_text(color = "black", size=8))
SMn = ggplot(biplot_df_hypo, aes(x=SMn_mgL_Obs,y=SMn_mgL_Pred)) +
geom_point() +
geom_smooth(method='lm', se=FALSE, color='black') +
theme_minimal() +
labs(x='Lab Measured Sol. Mn (mg/L)', y='PLSR Predicted Sol. Mn (mg/L)') +
stat_cor(aes(label = paste(..rr.label.., sep = "~`,`~")), label.y = 2, digits = 2) +
theme(axis.title.x = element_text(size=8),
axis.title.y = element_text(color = "black", size=8))
# Turnover Experiment, Epilimnion Models #
TFe_epi = ggplot(biplot_df_epi, aes(x=TFe_mgL_Obs,y=TFe_mgL_Pred)) +
geom_point() +
geom_smooth(method='lm', se=FALSE, color='black') +
theme_minimal() +
labs(x='Lab Measured Tot. Fe (mg/L)', y='PLSR Predicted Tot. Fe (mg/L)') +
stat_cor(aes(label = paste(..rr.label.., sep = "~`,`~")), label.y = 1.5) +
theme(axis.title.x = element_text(size=8),
axis.title.y = element_text(color = "black", size=8))
TMn_epi = ggplot(biplot_df_epi, aes(x=TMn_mgL_Obs,y=TMn_mgL_Pred)) +
geom_point() +
geom_smooth(method='lm', se=FALSE, color='black') +
theme_minimal() +
labs(x='Lab Measured Tot. Mn (mg/L)', y='PLSR Predicted Tot. Mn (mg/L)') +
stat_cor(aes(label = paste(..rr.label.., sep = "~`,`~")), label.y = 0.75) +
theme(axis.title.x = element_text(size=8),
axis.title.y = element_text(color = "black", size=8))
SFe_epi = ggplot(biplot_df_epi, aes(x=SFe_mgL_Obs,y=SFe_mgL_Pred)) +
geom_point() +
geom_smooth(method='lm', se=FALSE, color='black') +
theme_minimal() +
labs(x='Lab Measured Sol. Fe (mg/L)', y='PLSR Predicted Sol. Fe (mg/L)') +
stat_cor(aes(label = paste(..rr.label.., sep = "~`,`~")), label.y = 0.096, digits = 2) +
theme(axis.title.x = element_text(size=8),
axis.title.y = element_text(color = "black", size=8))
SMn_epi = ggplot(biplot_df_epi, aes(x=SMn_mgL_Obs,y=SMn_mgL_Pred)) +
geom_point() +
geom_smooth(method='lm', se=FALSE, color='black') +
theme_minimal() +
labs(x='Lab Measured Sol. Mn (mg/L)', y='PLSR Predicted Sol. Mn (mg/L)') +
stat_cor(aes(label = paste(..rr.label.., sep = "~`,`~")), label.y = 0.7, digits = 2) +
theme(axis.title.x = element_text(size=8),
axis.title.y = element_text(color = "black", size=8))
# Write plots to jpeg
patchwork_20 = (TFe_epi | TMn_epi | SFe_epi | SMn_epi) / (TFe | TMn | SFe | SMn)
jpeg('MUX20_biplots_111822.jpeg', width = 190, height = 120, units = 'mm', res = 600)
patchwork_20 + plot_annotation(tag_levels = "A") &
theme(plot.tag = element_text(size = 12, hjust = 0, vjust = 0))
dev.off()
### Oxygen ON Experiment ###
# Read in MUX PLSR predictions
MUX_preds = read_csv(paste0(getwd(),"/MagicData/MUX/Figures Files/MUX21_predictions_boot_051322.csv"))
MUX_preds$DateTime = ymd_hms(MUX_preds$DateTime, tz="America/New_York")
MUX_preds = MUX_preds %>% select(-c('...1'))
#### Read in FCR WQ data ####
dataWQ <- read_csv(paste0(getwd(),"/MagicData/MUX/Figures Files/MUX21_dataWQ_051322.csv"))
dataWQ$DateTime = ymd_hms(dataWQ$DateTime, tz="America/New_York")
dataWQ = dataWQ %>% select(-c('...1','ID'))
MUX_preds = MUX_preds %>% select(-c("uncerSMn_max", "uncerSMn_min", "uncerSFe_max", "uncerSFe_min",
"uncerTMn_max", "uncerTMn_min", "uncerTFe_max", "uncerTFe_min"))
biplot_df = left_join(dataWQ,MUX_preds,by = c("DateTime","Depth_m"))
colnames(biplot_df) = c("DateTime","Depth_m","TFe_mgL_Obs","TMn_mgL_Obs",
"SFe_mgL_Obs","SMn_mgL_Obs", "TFe_mgL_Pred","TMn_mgL_Pred",
"SFe_mgL_Pred","SMn_mgL_Pred")
# Subset to just the hypolimnion/epilimnion
biplot_df_hypo = biplot_df %>% filter(Depth_m %in% c(6.2,8,9))
biplot_df_epi = biplot_df %>% filter(Depth_m %in% c(0.1,1.6,3.8))
# Make Plots ! #
# Oxygen On Experiment, Hypolimnion Models #
TFe = ggplot(biplot_df_hypo, aes(x=TFe_mgL_Obs,y=TFe_mgL_Pred)) +
geom_point() +
geom_smooth(method='lm', se=FALSE, color='black') +
theme_minimal() +
labs(x='Lab Measured Tot. Fe (mg/L)', y='PLSR Predicted Tot. Fe (mg/L)') +
stat_cor(aes(label = paste(..rr.label.., sep = "~`,`~")), label.y = 7) +
theme(axis.title.x = element_text(size=8),
axis.title.y = element_text(color = "black", size=8))
TMn = ggplot(biplot_df_hypo, aes(x=TMn_mgL_Obs,y=TMn_mgL_Pred)) +
geom_point() +
geom_smooth(method='lm', se=FALSE, color='black') +
theme_minimal() +
labs(x='Lab Measured Tot. Mn (mg/L)', y='PLSR Predicted Tot. Mn (mg/L)') +
stat_cor(aes(label = paste(..rr.label.., sep = "~`,`~")), label.y = 1) +
theme(axis.title.x = element_text(size=8),
axis.title.y = element_text(color = "black", size=8))
SFe = ggplot(biplot_df_hypo, aes(x=SFe_mgL_Obs,y=SFe_mgL_Pred)) +
geom_point() +
geom_smooth(method='lm', se=FALSE, color='black') +
theme_minimal() +
labs(x='Lab Measured Sol. Fe (mg/L)', y='PLSR Predicted Sol. Fe (mg/L)') +
stat_cor(aes(label = paste(..rr.label.., sep = "~`,`~")), label.y = 7.2, digits = 2) +
theme(axis.title.x = element_text(size=8),
axis.title.y = element_text(color = "black", size=8))
SMn = ggplot(biplot_df_hypo, aes(x=SMn_mgL_Obs,y=SMn_mgL_Pred)) +
geom_point() +
geom_smooth(method='lm', se=FALSE, color='black') +
theme_minimal() +
labs(x='Lab Measured Sol. Mn (mg/L)', y='PLSR Predicted Sol. Mn (mg/L)') +
stat_cor(aes(label = paste(..rr.label.., sep = "~`,`~")), label.y = 0.95, digits = 2) +
theme(axis.title.x = element_text(size=8),
axis.title.y = element_text(color = "black", size=8))
# Oxygen On Experiment, Epilimnion Models #
TFe_epi = ggplot(biplot_df_epi, aes(x=TFe_mgL_Obs,y=TFe_mgL_Pred)) +
geom_point() +
geom_smooth(method='lm', se=FALSE, color='black') +
theme_minimal() +
labs(x='Lab Measured Tot. Fe (mg/L)', y='PLSR Predicted Tot. Fe (mg/L)') +
stat_cor(aes(label = paste(..rr.label.., sep = "~`,`~")), label.y = 0.75) +
theme(axis.title.x = element_text(size=8),
axis.title.y = element_text(color = "black", size=8))
TMn_epi = ggplot(biplot_df_epi, aes(x=TMn_mgL_Obs,y=TMn_mgL_Pred)) +
geom_point() +
geom_smooth(method='lm', se=FALSE, color='black') +
theme_minimal() +
labs(x='Lab Measured Tot. Mn (mg/L)', y='PLSR Predicted Tot. Mn (mg/L)') +
stat_cor(aes(label = paste(..rr.label.., sep = "~`,`~")), label.y = 0.055) +
theme(axis.title.x = element_text(size=8),
axis.title.y = element_text(color = "black", size=8))
SFe_epi = ggplot(biplot_df_epi, aes(x=SFe_mgL_Obs,y=SFe_mgL_Pred)) +
geom_point() +
geom_smooth(method='lm', se=FALSE, color='black') +
theme_minimal() +
labs(x='Lab Measured Sol. Fe (mg/L)', y='PLSR Predicted Sol. Fe (mg/L)') +
stat_cor(aes(label = paste(..rr.label.., sep = "~`,`~")), label.y = 0.65, digits = 2)+
theme(axis.title.x = element_text(size=8),
axis.title.y = element_text(color = "black", size=8))
SMn_epi = ggplot(biplot_df_epi, aes(x=SMn_mgL_Obs,y=SMn_mgL_Pred)) +
geom_point() +
geom_smooth(method='lm', se=FALSE, color='black') +
theme_minimal() +
labs(x='Lab Measured Sol. Mn (mg/L)', y='PLSR Predicted Sol. Mn (mg/L)') +
stat_cor(aes(label = paste(..rr.label.., sep = "~`,`~")), label.y = 0.0165, digits = 2) +
theme(axis.title.x = element_text(size=8),
axis.title.y = element_text(color = "black", size=8),
axis.text = element_text(size = 6))
# Write plots to jpeg
patchwork_21 = (TFe_epi | TMn_epi | SFe_epi | SMn_epi) / (TFe | TMn | SFe | SMn)
jpeg('MUX21_biplots_111822.jpeg', width = 190, height = 120, units = 'mm', res = 600)
patchwork_21 + plot_annotation(tag_levels = "A") &
theme(plot.tag = element_text(size = 12, hjust = 0, vjust = 0))
dev.off()
|
8954833972206ccc4c8a7fa987677a45fccdd9bd
|
085c1f0d348b6be6eef1917e74cfde2247853d84
|
/tests/testthat/test_S3methods.R
|
f2597a863f99ff8e19f88404ae19e372375303a2
|
[] |
no_license
|
saviviro/gmvarkit
|
657691acd9577c5aacefe01778bb53d8746613c2
|
ad17dd159d0dfa816dcdf9f3ff8be8d633e3b0ef
|
refs/heads/master
| 2023-07-11T19:48:43.851308
| 2023-06-26T08:18:28
| 2023-06-26T08:18:28
| 193,906,969
| 6
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,244
|
r
|
test_S3methods.R
|
context("S3 Methods")
library(gmvarkit)
# NOTE that some elements of these tests use random elements obtained from simulation algorithms
## A(M)(p)_(p)(M)(d)
# p=1, M=1, d=2, parametrization="mean"
phi10_112 <- c(0.75, 0.8)
A11_112 <- matrix(c(0.29, 0.02, -0.14, 0.9), nrow=2, byrow=FALSE)
Omega1_112 <- matrix(c(0.60, 0.01, 0.01, 0.07), nrow=2, byrow=FALSE)
theta_112 <- c(phi10_112, vec(A11_112), vech(Omega1_112))
mod_112 <- GSMVAR(gdpdef, p=1, M=1, d=2, params=theta_112, conditional=TRUE, parametrization="mean")
mod_112t <- GSMVAR(gdpdef, p=1, M=1, d=2, params=c(theta_112, 3), model="StMVAR", parametrization="mean")
# p=2, M=2, d=2, no constraints
phi10_222 <- c(0.36, 0.12)
A11_222 <- matrix(c(0.22, 0.06, -0.15, 0.39), nrow=2, byrow=FALSE)
A12_222 <- matrix(c(0.41, -0.01, 0.08, 0.3), nrow=2, byrow=FALSE)
Omega1_222 <- matrix(c(0.21, 0.01, 0.01, 0.03), nrow=2, byrow=FALSE)
phi20_222 <- c(0.48, 0.07)
A21_222 <- matrix(c(0.22, 0.02, -0.12, 0.72), nrow=2, byrow=FALSE)
A22_222 <- matrix(c(0.09, 0.03, 0.04, 0.19), nrow=2, byrow=FALSE)
Omega2_222 <- matrix(c(1.10, 0.01, 0.01, 0.11), nrow=2, byrow=FALSE)
alpha1_222 <- 0.37
upsilon1_222 <- c(phi10_222, vec(A11_222), vec(A12_222), vech(Omega1_222))
upsilon2_222 <- c(phi20_222, vec(A21_222), vec(A22_222), vech(Omega2_222))
theta_222 <- c(upsilon1_222, upsilon2_222, alpha1_222)
mod_222 <- GSMVAR(gdpdef, p=2, M=2, d=2, params=theta_222, conditional=TRUE, parametrization="intercept", constraints=NULL)
mod_222gs <- GSMVAR(gdpdef, p=2, M=c(1, 1), d=2, params=c(theta_222, 20), model="G-StMVAR",
conditional=TRUE, parametrization="intercept")
WL_222 <- diag_Omegas(Omega1_222, Omega2_222)
W_222 <- matrix(WL_222[1:(2^2)], nrow=2, byrow=FALSE)
lambdas_222 <- WL_222[(2^2 + 1):length(WL_222)]
theta_222s <- c(phi10_222, phi20_222, vec(A11_222), vec(A12_222), vec(A21_222),
vec(A22_222), vec(W_222), lambdas_222, alpha1_222) # SGMVAR
mod_222s <- GSMVAR(gdpdef, p=2, M=2, d=2, params=theta_222s, conditional=TRUE, parametrization="intercept", constraints=NULL,
structural_pars=list(W=W_222))
# p=2, M=2, d=2, AR paramars same, non-diagonals zero, intercept
theta_222c <- c(0.33782, 0.183512, 0.472168, 0.095311, 0.201199, 0.600596, 0.237819,
0.23529, 1.077816, -0.016343, 0.112771, 0.22199, 0.005582, 0.028126, 0.492844)
mat0 <- matrix(c(1, rep(0, 10), 1, rep(0, 8), 1, rep(0, 10), 1), nrow=2*2^2, byrow=FALSE)
C_222c <- rbind(mat0, mat0)
mod_222c <- GSMVAR(gdpdef, p=2, M=2, d=2, params=theta_222c, conditional=TRUE, parametrization="intercept", constraints=C_222c)
# p=1, M=2, d=3, no constraints, rand_ind and simulated data
theta_123 <- c(-9.44567, -0.56054, 10.32549, 0.0965, 0.63617, 0.35771, 0.63339,
0.2519, -0.32399, 0.56932, -0.47935, 0.32332, 1.04371, 0.08397,
0.71741, 0.46644, 0.23572, 1.14101, -8.16384, 0.7148, 1.86377,
0.2646, -0.07309, -0.78756, -0.86484, -0.16795, -0.26713,
-0.0035, 0.6088, -0.19626, 0.36186, -0.16349, 0.06036, 0.58441,
1.10884, 2.64874, 0.54711)
mod_123 <- GSMVAR(p=1, M=2, d=3, params=theta_123, conditional=FALSE, parametrization="mean", constraints=NULL)
sim_123 <- simulate.gsmvar(mod_123, nsim=300, seed=2)
data_123 <- sim_123$sample
mod_123 <- GSMVAR(data_123, p=1, M=2, d=3, params=theta_123, conditional=FALSE, parametrization="mean", constraints=NULL)
mod_123t <- GSMVAR(data_123, p=1, M=2, d=3, params=c(theta_123, 20, 30), model="StMVAR",
conditional=FALSE, parametrization="mean")
set.seed(1); pred112t <- predict.gsmvar(mod_112t, n_ahead=2, nsim=10, pi=c(0.80), plot_res=FALSE, pred_type="mean")
set.seed(1); pred222 <- predict.gsmvar(mod_222, n_ahead=2, nsim=10, pi=c(0.95, 0.80), plot_res=FALSE, pred_type="mean")
set.seed(1); pred222gs <- predict.gsmvar(mod_222gs, n_ahead=2, nsim=10, pi=c(0.95, 0.70), plot_res=FALSE, pred_type="mean")
set.seed(2); pred222s <- predict.gsmvar(mod_222s, n_ahead=2, nsim=10, pi=c(0.95, 0.80), plot_res=FALSE, pred_type="mean")
set.seed(3); pred123 <- predict.gsmvar(mod_123, n_ahead=1, nsim=10, pi=0.99, pi_type="upper", pred_type="median", plot_res=FALSE)
set.seed(3); pred123t <- predict.gsmvar(mod_123t, n_ahead=2, nsim=5, pi=0.99, pi_type="lower", pred_type="median", plot_res=FALSE)
tmp222 <- unname(pred222$pred[2,])
# p=2, M=2, d=2, parametrization="mean", constraints=C_mat, same_means=list(1:2)
C_mat <- rbind(diag(2*2^2), diag(2*2^2))
params_222cm <- c(0.811034, 0.578587, 0.212084, 0.020444, -0.193005, 0.624671,
0.235827, 0.013962, 0.053267, 0.262703, 1.06105, -0.013519,
0.114109, 0.229542, 0.003092, 0.027266, 0.424341)
mod_222cm <- GSMVAR(gdpdef, p=2, M=2, params=params_222cm, parametrization="mean", constraints=C_mat, same_means=list(1:2))
set.seed(1); pred222cm <- predict.gsmvar(mod_222cm, n_ahead=2, nsimu=1, pi=0.9, pi_type="two-sided", pred_type="mean", plot_res=FALSE)
test_that("predict works correctly", {
expect_equal(predict.gsmvar(mod_112, n_ahead=1, pred_type="cond_mean", plot_res=FALSE)$pred, c(0.7231782, 0.4431300), tolerance=1e-5)
expect_equal(predict.gsmvar(mod_222c, n_ahead=1, pred_type="cond_mean", plot_res=FALSE)$pred, c(0.7250053, 0.4209626), tolerance=1e-5)
expect_equal(unname(pred112t$pred[2, ]), c(0.9204917, 0.4015286), tolerance=1e-3)
expect_equal(pred112t$pred_ints[, 2, 2], c(0.6106817, 0.5217523), tolerance=1e-3)
expect_equal(pred112t$pred_ints[, 1, 1], c(0.3210093, 0.2488382), tolerance=1e-3)
expect_equal(pred112t$mix_pred_ints[, 1, 1], c(1, 1), tolerance=1e-3)
expect_equal(tmp222, c(0.6709308, 0.4618839), tolerance=1e-5)
expect_equal(pred222$pred_ints[, 1, 1], c(0.07127095, -0.32711717), tolerance=1e-3)
expect_equal(pred222$pred_ints[, 3, 2], c(0.5783407, 0.5995812), tolerance=1e-3)
expect_equal(pred222$mix_pred_ints[, 1, 1], c(0.9352294, 0.8441136), tolerance=1e-3)
expect_equal(pred222s$pred_ints[, 2, 1], c(0.5230737, -0.3738420), tolerance=1e-3)
expect_equal(pred222s$pred_ints[, 1, 2], c(0.09878082, 0.18761453), tolerance=1e-3)
expect_equal(pred222s$mix_pred_ints[, 2, 2], c(0.06477058, 0.05950152), tolerance=1e-3)
expect_equal(unname(pred222gs$pred[2, ]), c(0.7199919, 0.4058923), tolerance=1e-3)
expect_equal(pred222gs$pred_ints[, 4, 2], c(0.7705175, 0.5784606), tolerance=1e-3)
expect_equal(pred222gs$pred_ints[, 2, 2], c(0.1779556, 0.3315794), tolerance=1e-3)
expect_equal(pred222gs$mix_pred_ints[, 2, 1], c(0.9260951, 0.7668288), tolerance=1e-3)
expect_equal(unname(pred123$pred[1,]), c(-8.4121641, -0.3787007, 2.3372331), tolerance=1e-5)
expect_equal(pred123$pred_ints[ , 1, ], c(-7.987103, 1.036073, 4.507460), tolerance=1e-5)
expect_equal(unname(pred123$mix_pred[1 ,]), c(7.987841e-21, 1.000000e+00), tolerance=1e-5)
expect_equal(unname(pred123$mix_pred_ints[1 , 1, ]), c(7.987841e-21, 1.000000e+00), tolerance=1e-5)
expect_equal(unname(pred123t$pred[2,]), c(-7.789771, 1.380211, 2.016605), tolerance=1e-5)
expect_equal(pred123t$pred_ints[2 , 1, ], c(-8.0015443, 0.7553734, 0.1624137), tolerance=1e-5)
expect_equal(unname(pred123t$mix_pred[2,]), c(3.563723e-07, 9.999996e-01), tolerance=1e-5)
expect_equal(unname(pred123t$mix_pred_ints[2 , 1, ]), c(1.457362e-07, 9.999943e-01), tolerance=1e-5)
expect_equal(unname(pred222cm$pred[2,]), c(0.7434035, 0.4107316), tolerance=1e-5)
expect_equal(unname(pred222cm$pred_ints[2, 2, ]), c(1.6821331, 0.7858234), tolerance=1e-5)
})
# p=2, M=2, d=2, parametrization="mean", constraints=C_mat, same_means=list(1:2)
C_mat <- rbind(diag(2*2^2), diag(2*2^2))
params_222cm <- c(0.811034, 0.578587, 0.212084, 0.020444, -0.193005, 0.624671,
0.235827, 0.013962, 0.053267, 0.262703, 1.06105, -0.013519,
0.114109, 0.229542, 0.003092, 0.027266, 0.424341)
mod_222cm <- GSMVAR(gdpdef, p=2, M=2, params=params_222cm, parametrization="mean", constraints=C_mat, same_means=list(1:2))
test_that("summary method works correctly", {
sum222cm <- summary(mod_222cm)
expect_equal(sum222cm$abs_boldA_eigens[3,], c(0.3963773, 0.3963773), tolerance=1e-5)
expect_equal(sum222cm$omega_eigens[1:4], c(1.06124296, 0.11391604, 0.22958925, 0.02721875), tolerance=1e-5)
})
|
bb78286a1b21b40726aa214ee8318fc381fd17dc
|
c5b41136c0d7f9803bbba09023ef56ea42d835fc
|
/3_2.R
|
00bdacb37a0d9b5586d51ee4f3424dbe14f4f416
|
[] |
no_license
|
jaimetbeili/Probability
|
dadc4f6d4624147d661e1232c35e94e3f16b38ba
|
716982bdc804eae8828c91a0d241927f9d19bc66
|
refs/heads/main
| 2023-06-24T09:45:44.707542
| 2021-07-26T03:59:11
| 2021-07-26T03:59:11
| 386,747,914
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,556
|
r
|
3_2.R
|
#PROBABILIDAD CONTINUA (NO DISCRETA)
library(tidyverse)
library(dslabs)
data(heights)
x <- heights %>% filter(sex=="Male") %>% pull(height)
x
F <- function(a) mean(x <= a)
1 - F(70.5) # probability of male taller than 70 inches
F(70)-F(50)
1 - pnorm(70.5, mean(x), sd(x))
# plot distribution of exact heights in data
plot(prop.table(table(x)), xlab = "a = Height in inches", ylab = "Pr(x = a)")
# probabilities in actual data over length 1 ranges containing an integer
mean(x <= 68.5) - mean(x <= 67.5)
mean(x <= 69.5) - mean(x <= 68.5)
mean(x <= 70.5) - mean(x <= 69.5)
# probabilities in normal approximation match well
pnorm(68.5, mean(x), sd(x)) - pnorm(67.5, mean(x), sd(x))
pnorm(69.5, mean(x), sd(x)) - pnorm(68.5, mean(x), sd(x))
pnorm(70.5, mean(x), sd(x)) - pnorm(69.5, mean(x), sd(x))
# probabilities in actual data over other ranges don't match normal approx as well
mean(x <= 70.9) - mean(x <= 70.1)
pnorm(70.9, mean(x), sd(x)) - pnorm(70.1, mean(x), sd(x))
x <- seq(-4, 4, length = 100)
data.frame(x, f = dnorm(x)) %>%
ggplot(aes(x, f)) +
geom_line()
#Una base de datos que se ve igual a nuestra base de datos.
x <- heights %>% filter(sex == "Male") %>% .$height
n <- length(x)
avg <- mean(x)
s <- sd(x)
simulated_heights <- rnorm(n, avg, s)
ds_theme_set()
data.frame(simulated_heights=simulated_heights) %>% ggplot(aes(simulated_heights)) +
geom_histogram(col = "black", binwidth = 2)
#Probabilidad de que de 800 personas el mas alto mida mas de 7 pies.
B <- 10000
tallest <- replicate(B, {
simulated_data <- rnorm(800, avg, s)
max(simulated_data)
})
mean(tallest >= 7*12)
# R uses a convention that lets us remember the names of these functions.
# Namely, using the letters d for density, q
# for quantile, p for probability density function, and r for random.
#norm is the shorthand for normal distribution.
#t is the shorthand of student's t distribution.
#dt es la funcion de densidad para una distribucion t de student.
#La altura de una persona en el percentil 99 o en el 1 se calcula asi
qnorm(.99, avg, s)
qnorm(.01, avg, s)
# The variable `B` specifies the number of times we want the simulation to run.
B <- 1000
# Use the `set.seed` function to make sure your answer matches the expected result after random number generation.
set.seed(1)
# Create an object called `highestIQ` that contains the highest IQ score from each random distribution of 10,000 people.
highestIQ <- replicate(B, {
grupo <- rnorm(10000, 100, 15)
max(grupo)
})
# Make a histogram of the highest IQ scores.
hist(highestIQ)
|
2773493f7cd85f9e76587c2d5551c21ff6aaff8d
|
1381f990ef0cabf26ed25f481467ba9454d1eaf3
|
/mustache.R
|
380214d3c321721c75dd696bdb447b2b798f6e5b
|
[] |
no_license
|
jimsforks/misc
|
baad41003332bdb74e1c8c2571035c5d4ac6fa3a
|
cc71517f23e86e0f699fa48dc25005996e63dbd4
|
refs/heads/master
| 2022-04-12T10:41:29.302523
| 2020-04-03T13:15:25
| 2020-04-03T13:15:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 707
|
r
|
mustache.R
|
library(rlang)
eval_tidy_quo <- function(expr, data = NULL, env = caller_env()) {
eval_tidy(enquo(expr), data = data, env = env)
}
b <- 5
a <- sym("b")
# This is not tidy evaluation, a is unquoted eagerly?
eval_tidy({{a}})
eval_tidy(a)
eval_tidy({{a}}, data = list(b = 3))
eval_tidy(a, data = list(b = 3))
# Tidy evaluation without mustache:
eval_tidy_quo(a)
# Not picking up b from .GlobalEnv here
try(eval_tidy_quo({{a}}))
# ...but works with the data mask
eval_tidy_quo({{a}}, data = list(b = 3))
# A quosure works
a <- quo(b)
eval_tidy_quo({{a}})
# Mustache implements quote-unquote in a function:
quoting_fun <- function(expr) {
eval_tidy_quo({{expr}})
}
quoting_fun(a)
quoting_fun({{a}})
|
ae117ed569b4c89d096a48017c03377d011d3f0a
|
34ff74b689a1ec845f7c042f836869dbe0fb3efb
|
/math.R
|
60f1c97da6bd4f2343be12f55745941d362a0c0e
|
[] |
no_license
|
olk/examples_R
|
1d454c3c050b7cf885d91666aeb6b07082b919fe
|
9abdf814e84875ec6bab789bc28d5e29469d7795
|
refs/heads/master
| 2020-03-08T16:16:48.943453
| 2018-08-09T17:37:02
| 2018-08-16T16:53:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,439
|
r
|
math.R
|
# prod()
eo <- function(p) {
np <- 1 - p # compute for idenpendent events `not happen` propability
total <- 0.0 # initialize
for (i in 1:length(p)) {
total <- total + p[i] + prod(np[-i]) # `prod()` producti of the elements of a vector
}
return(total)
}
# cumulative sums and products
x <- c(1, 15, 3)
cumsum(x) # [1] 1 16 19
cumprod(x) # [1] 1 15 45
# minima and maxima
z <- matrix(c(1, 4, 7, 2, 4, 2), ncol=2)
z
min(z[,1], z[,2]) # [1] 1
pmin(z[,1], z[,2]) # [1] 1 4 2
pmin(z[1,], z[2,], z[3,]) # [1] 1 2
# Nullstellen
nlm(function(x) { return(x^2+1) }, 10)
# differentiation
D(expression(1/x), "x")
D(expression(log(x)), "x")
# statisical
# prefix `r` == random number generation
# prefix `d` == density/probability mass function
# prefix `p` == cumulative distrib. function
# prefix `q` == qunatiles
mean(rchisq(1000, df=2)) # [1] 1.895382
# 95th percentile (0.95 quantil) of chi-square distribution with 2 degrees of freedom
qchisq(0.95, 2) # [1] 5.991465
# 0.50 and 0.95 auantil of chi-square distribution with 2 degrees od freedom
qchisq(c(0.50, 0.95), 2) # [1] 1.386294 5.991465
# sorting
x <- c(12, 5, 11, 4)
sort(x) # [1] 4 5 11 12
order(x) # [1] 4 2 3 1, inidces returned
# sort data.frame via o`rder()`
x <- data.frame(V1=c("xyz", "abc", "uvw"), V2=c(2, 8, 0), stringsAsFactors=FALSE)
x
y <- order(x$V2)
x[y,]
x[order(x$V1),]
# rank()
x <- c(14, 4, 13, 4)
rank(x) # [1] 4.0 1.5 3.0 1.5
|
8cdc2b42028348f014ce4eaadfdb634bb5edee58
|
918732e0125a50ad6c85b2bb8eac70d8706f3bb0
|
/old_files/results_figures_20yr_timestd_bytrt_N01.R
|
0e5740ae9972400518c6b28669d6480b4ea2b2f3
|
[] |
no_license
|
klapierre/community_difference_synthesis
|
8373379a0bdd80eef74fcb2fbafda50def9bbf98
|
c510047549d3a57ee6e3720ef710aefa94f8671d
|
refs/heads/master
| 2020-03-09T20:43:37.382314
| 2019-07-31T21:55:36
| 2019-07-31T21:55:36
| 128,992,032
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 158,677
|
r
|
results_figures_20yr_timestd_bytrt_N01.R
|
################################################################################
## results_figures_20yr_N01.R: Compiles Bayesian output and makes figures for the primary analysis of richness and compositonal differences between treatment and control plots for datasets cut off at 20 years.
##
## Author: Kimberly La Pierre
## Date created: January 17, 2018
## See https://github.com/klapierre/Converge_Diverge/blob/master/core%20data%20paper_bayesian%20results_figures_sig%20test_expinteractions_20yr.R for full history.
################################################################################
library(grid)
library(tidyverse)
#kim laptop
setwd("C:\\Users\\lapie\\Dropbox (Smithsonian)\\working groups\\converge diverge working group\\converge_diverge\\datasets\\LongForm")
#kim desktop
setwd("C:\\Users\\la pierrek\\Dropbox (Smithsonian)\\working groups\\converge diverge working group\\converge_diverge\\datasets\\LongForm")
theme_set(theme_bw())
theme_update(axis.title.x=element_text(size=40, vjust=-0.35, margin=margin(t=15)), axis.text.x=element_text(size=34, color='black'),
axis.title.y=element_text(size=40, angle=90, vjust=0.5, margin=margin(r=15)), axis.text.y=element_text(size=34, color='black'),
plot.title = element_text(size=40, vjust=2),
panel.grid.major=element_blank(), panel.grid.minor=element_blank(),
legend.title=element_blank(), legend.text=element_text(size=20))
###bar graph summary statistics function
#barGraphStats(data=, variable="", byFactorNames=c(""))
barGraphStats <- function(data, variable, byFactorNames) {
count <- length(byFactorNames)
N <- aggregate(data[[variable]], data[byFactorNames], FUN=length)
names(N)[1:count] <- byFactorNames
names(N) <- sub("^x$", "N", names(N))
mean <- aggregate(data[[variable]], data[byFactorNames], FUN=mean)
names(mean)[1:count] <- byFactorNames
names(mean) <- sub("^x$", "mean", names(mean))
sd <- aggregate(data[[variable]], data[byFactorNames], FUN=sd)
names(sd)[1:count] <- byFactorNames
names(sd) <- sub("^x$", "sd", names(sd))
preSummaryStats <- merge(N, mean, by=byFactorNames)
finalSummaryStats <- merge(preSummaryStats, sd, by=byFactorNames)
finalSummaryStats$se <- finalSummaryStats$sd / sqrt(finalSummaryStats$N)
return(finalSummaryStats)
}
#function to get standard deviations of columns in a dataframe
colSd <- function (x, na.rm=FALSE) apply(X=x, MARGIN=2, FUN=sd, na.rm=na.rm)
##################################################################################
##################################################################################
#import experiment information --------------------------------------------------------
expRaw <- read.csv('ExperimentInformation_March2019.csv')
expInfo <- expRaw%>%
#remove any pre-treatment data for the few experiments that have it -- pre-treatment data for experiments is awesome and we should all strive to collect it!
filter(treatment_year!=0)%>%
#make columns for irrigation and drought from precip column
group_by(site_code, project_name, community_type, treatment)%>%
mutate(irrigation=ifelse(precip>0, 1, 0), drought=ifelse(precip<0, 1, 0))%>%
#calcualte minumum years for each project
summarise(min_year=min(treatment_year), nutrients=mean(nutrients), water=mean(water), carbon=mean(carbon), irrigation=mean(irrigation), drought=mean(drought), experiment_length=max(treatment_year))
#import treatment data
trtInfo1 <- read.csv('ExperimentInformation_March2019.csv')
#import diversity metrics that went into Bayesian analysis
rawData <- read.csv('ForAnalysis_allAnalysisAllDatasets_04082019.csv')
#calculate means and standard deviations across all data for richness and compositonal differences to backtransform
rawData2<- rawData%>%
left_join(trtInfo1)%>%
filter(anpp!='NA', treatment_year!=0)%>%
summarise(mean_mean=mean(composition_diff), std_mean=sd(composition_diff), mean_rich=mean(S_lnRR), std_rich=sd(S_lnRR)) #to backtransform
#select just data in this analysis
expInfo2 <- rawData%>%
left_join(trtInfo1)%>%
filter(anpp!='NA', treatment_year!=0)%>%
group_by(site_code, project_name, community_type, treatment)%>%
summarise(experiment_length=mean(experiment_length))
#for table of experiment summarizing various factors
expInfoSummary <- rawData%>%
left_join(trtInfo1)%>%
filter(anpp!='NA', treatment_year!=0)%>%
group_by(site_code, project_name, community_type, treatment)%>%
summarise(experiment_length=mean(experiment_length), plot_mani=mean(plot_mani), rrich=mean(rrich), anpp=mean(anpp), MAT=mean(MAT), MAP=mean(MAP))%>%
ungroup()%>%
summarise(length_mean=mean(experiment_length), length_min=min(experiment_length), length_max=max(experiment_length),
plot_mani_median=mean(plot_mani), plot_mani_min=min(plot_mani), plot_mani_max=max(plot_mani),
rrich_mean=mean(rrich), rrich_min=min(rrich), rrich_max=max(rrich),
anpp_mean=mean(anpp), anpp_min=min(anpp), anpp_max=max(anpp),
MAP_mean=mean(MAP), MAP_min=min(MAP), MAP_max=max(MAP),
MAT_mean=mean(MAT), MAT_min=min(MAT), MAT_max=max(MAT))%>%
gather(variable, estimate)
#treatment info
trtInfo2 <- trtInfo1%>%
select(site_code, project_name, community_type, treatment, plot_mani, trt_type)%>%
unique()
trtInfo <- rawData%>%
select(site_code, project_name, community_type, treatment, trt_type, experiment_length, rrich, anpp, MAT, MAP)%>%
unique()%>%
left_join(expInfo)
################################################################################
trtShape <- read.csv('treatment_response_shape_classification_stdtimebytrt_N01_04072019.csv')
###main figure (Figure 1)
# compositional response panels
#------------------------
meanPlot0 <- ggplot(data=data.frame(x=c(0,0))) +
coord_cartesian(ylim=c(0,1)) +
scale_x_continuous(limits=c(0,19), breaks=seq(4,19,5), labels=seq(5,20,5)) +
ylim(-10,10) +
xlab('') +
ylab('') +
annotate('text', x=0, y=1, label='(b) 63.0%', size=10, hjust='left') +
#below are the individual treatment lines
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-0.6401008315651 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-0.4709532317155 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-0.54089440738607 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-0.3691754406512 + 0*((x-5.5)/3.60555127546399) + 0*((x-5.5)/3.60555127546399)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,11)) +
stat_function(fun=function(x){(1.14471942605 + 0*((x-2.33333333333333)/2.51661147842358) + 0*((x-2.33333333333333)/2.51661147842358)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(1.14935338765 + 0*((x-2.33333333333333)/2.51661147842358) + 0*((x-2.33333333333333)/2.51661147842358)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(-0.59218103903 + 0*((x-2.6)/2.40831891575846) + 0*((x-2.6)/2.40831891575846)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0.6670398764655 + 0*((x-2.6)/2.40831891575846) + 0*((x-2.6)/2.40831891575846)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0.74517583654405 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0.51024117363095 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-0.481013538228 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(-0.86695832445 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(-0.6454678548 + 0*((x-6.30769230769231)/4.44193305113542) + 0*((x-6.30769230769231)/4.44193305113542)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,15)) +
stat_function(fun=function(x){(-1.2006242818 + 0*((x-3)/2.16024689946929) + 0*((x-3)/2.16024689946929)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(-1.25939339475 + 0*((x-3)/2.16024689946929) + 0*((x-3)/2.16024689946929)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(-1.23877545645 + 0*((x-3)/2.16024689946929) + 0*((x-3)/2.16024689946929)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(-0.9472963638 + 0*((x-3)/2.16024689946929) + 0*((x-3)/2.16024689946929)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-2.33333333333333)/3.21455025366432) + 0*((x-2.33333333333333)/3.21455025366432)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(-0.4950797928252 + 0*((x-2.33333333333333)/3.21455025366432) + 0*((x-2.33333333333333)/3.21455025366432)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-2.33333333333333)/3.21455025366432) + 0*((x-2.33333333333333)/3.21455025366432)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-2.33333333333333)/3.21455025366432) + 0*((x-2.33333333333333)/3.21455025366432)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-2.33333333333333)/3.21455025366432) + 0*((x-2.33333333333333)/3.21455025366432)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-2.33333333333333)/3.21455025366432) + 0*((x-2.33333333333333)/3.21455025366432)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-2.33333333333333)/3.21455025366432) + 0*((x-2.33333333333333)/3.21455025366432)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(-0.544712450949425 + 0*((x-2.33333333333333)/3.21455025366432) + 0*((x-2.33333333333333)/3.21455025366432)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(-0.50822456845342 + 0*((x-2.33333333333333)/3.21455025366432) + 0*((x-2.33333333333333)/3.21455025366432)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-2.33333333333333)/3.21455025366432) + 0*((x-2.33333333333333)/3.21455025366432)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-2.33333333333333)/3.21455025366432) + 0*((x-2.33333333333333)/3.21455025366432)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-2.33333333333333)/3.21455025366432) + 0*((x-2.33333333333333)/3.21455025366432)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-2.33333333333333)/3.21455025366432) + 0*((x-2.33333333333333)/3.21455025366432)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-2.33333333333333)/3.21455025366432) + 0*((x-2.33333333333333)/3.21455025366432)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-2.33333333333333)/3.21455025366432) + 0*((x-2.33333333333333)/3.21455025366432)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-2.33333333333333)/3.21455025366432) + 0*((x-2.33333333333333)/3.21455025366432)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(-0.642342220266635 + 0*((x-2.33333333333333)/3.21455025366432) + 0*((x-2.33333333333333)/3.21455025366432)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-2.33333333333333)/3.21455025366432) + 0*((x-2.33333333333333)/3.21455025366432)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(-0.6636239351495 + 0*((x-2.33333333333333)/3.21455025366432) + 0*((x-2.33333333333333)/3.21455025366432)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(-0.474060350137 + 0*((x-6.5)/4.18330013267038) + 0*((x-6.5)/4.18330013267038)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,13)) +
stat_function(fun=function(x){(-0.28186474175064 + 0*((x-6.5)/4.18330013267038) + 0*((x-6.5)/4.18330013267038)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,13)) +
stat_function(fun=function(x){(-1.1457828895 + 0*((x-6.5)/4.18330013267038) + 0*((x-6.5)/4.18330013267038)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,13)) +
stat_function(fun=function(x){(0 + 0*((x-9.5)/5.91607978309962) + 0*((x-9.5)/5.91607978309962)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(-0.47439841149425 + 0*((x-4.5)/3.02765035409749) + 0*((x-4.5)/3.02765035409749)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,9)) +
stat_function(fun=function(x){(0 + 0*((x-4.5)/3.02765035409749) + 0*((x-4.5)/3.02765035409749)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,9)) +
stat_function(fun=function(x){(-0.65130379063355 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(-0.686425937831 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(-0.6046795152315 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(-0.86405550363 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(-1.49985274445 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(-1.4307299435 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(-1.43275046585 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(-1.3489631471 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(-1.47789658645 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(-1.3510168526 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(-1.11524441245 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(-1.19736416625 + 0*((x-3.33333333333333)/3.51188458428425) + 0*((x-3.33333333333333)/3.51188458428425)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,7)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-0.68542849555255 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-0.62992865215155 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-0.742271309868 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-0.52384602882725 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-0.8030773985505 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-0.8500707111182 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-0.5469972727325 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-0.5357380495301 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-0.55377901050675 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-0.699244639513 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-0.8051895177 + 0*((x-3.5)/2.44948974278318) + 0*((x-3.5)/2.44948974278318)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,7)) +
stat_function(fun=function(x){(0 + 0*((x-3.5)/2.44948974278318) + 0*((x-3.5)/2.44948974278318)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,7)) +
stat_function(fun=function(x){(-0.389713770587245 + 0*((x-3.5)/2.44948974278318) + 0*((x-3.5)/2.44948974278318)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,7)) +
stat_function(fun=function(x){(-0.322938759481455 + 0*((x-3.5)/2.44948974278318) + 0*((x-3.5)/2.44948974278318)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,7)) +
stat_function(fun=function(x){(-0.48634290153365 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(-0.736343040663 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(-0.87421853279 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(-0.7556074724866 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(-1.1089358432 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-1.0094152744 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-1.2294250567 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-1.076016852375 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-1.061843125 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-1.13205751525 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-0.934205493975 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-1.09618201375 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-6)/3.89444048184931) + 0*((x-6)/3.89444048184931)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,12)) +
stat_function(fun=function(x){(0 + 0*((x-6)/3.89444048184931) + 0*((x-6)/3.89444048184931)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,12)) +
stat_function(fun=function(x){(0 + 0*((x-6)/3.89444048184931) + 0*((x-6)/3.89444048184931)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,12)) +
stat_function(fun=function(x){(0 + 0*((x-6)/3.89444048184931) + 0*((x-6)/3.89444048184931)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,12)) +
stat_function(fun=function(x){(0 + 0*((x-6)/3.89444048184931) + 0*((x-6)/3.89444048184931)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,12)) +
stat_function(fun=function(x){(0 + 0*((x-6)/3.89444048184931) + 0*((x-6)/3.89444048184931)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,12)) +
stat_function(fun=function(x){(0 + 0*((x-6)/3.89444048184931) + 0*((x-6)/3.89444048184931)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,12)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0.472677304087 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-9.5)/5.91607978309962) + 0*((x-9.5)/5.91607978309962)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(-0.85499895505 + 0*((x-4.5)/3.02765035409749) + 0*((x-4.5)/3.02765035409749)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,9)) +
stat_function(fun=function(x){(0 + 0*((x-2.66666666666667)/2.16024689946929) + 0*((x-2.66666666666667)/2.16024689946929)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(-0.755929450413 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-0.541923989273 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-0.843333315945 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0.947167469930615 + 0*((x-7.75)/6.84957419601151) + 0*((x-7.75)/6.84957419601151)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(-0.592063782538 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(-0.849215014145 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-0.5423125806732 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-0.61184075564005 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-0.5602856434643 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-0.5094238544233 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-0.77288450305685 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-0.6601455386855 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-0.90049744484 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-1.05087181815 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-0.6555821337 + 0*((x-5.5)/3.60555127546399) + 0*((x-5.5)/3.60555127546399)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,11)) +
stat_function(fun=function(x){(-0.80516677045 + 0*((x-5.5)/3.60555127546399) + 0*((x-5.5)/3.60555127546399)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,11)) +
stat_function(fun=function(x){(-0.69189432275 + 0*((x-5.5)/3.60555127546399) + 0*((x-5.5)/3.60555127546399)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,11)) +
stat_function(fun=function(x){(-1.21200712045 + 0*((x-5)/3.3166247903554) + 0*((x-5)/3.3166247903554)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,10)) +
stat_function(fun=function(x){(-0.8462310307 + 0*((x-5)/3.3166247903554) + 0*((x-5)/3.3166247903554)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,10)) +
stat_function(fun=function(x){(-0.9865182035 + 0*((x-3.83333333333333)/3.31159578853861) + 0*((x-3.83333333333333)/3.31159578853861)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,8)) +
stat_function(fun=function(x){(0 + 0*((x-5)/3.3166247903554) + 0*((x-5)/3.3166247903554)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,10)) +
stat_function(fun=function(x){(-0.640313556396 + 0*((x-3.33333333333333)/3.05505046330389) + 0*((x-3.33333333333333)/3.05505046330389)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-3.33333333333333)/3.05505046330389) + 0*((x-3.33333333333333)/3.05505046330389)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-0.7892365990405 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-0.609355438662 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-0.64460857400375 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-0.96520561545 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-1.02877071425 + 0*((x-3.75)/2.81577190634672) + 0*((x-3.75)/2.81577190634672)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,8)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0.559047531857975 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-0.45405023881115 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0.5408435498817 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(-0.425595296066475 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0.4466422569062 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-0.60018395094 + 0*((x-2.5)/1.87082869338697) + 0*((x-2.5)/1.87082869338697)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(-0.7172894933855 + 0*((x-2.5)/1.87082869338697) + 0*((x-2.5)/1.87082869338697)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(-0.4139737015519 + 0*((x-2.5)/1.87082869338697) + 0*((x-2.5)/1.87082869338697)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(-0.4005145893168 + 0*((x-2.5)/1.87082869338697) + 0*((x-2.5)/1.87082869338697)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(0 + 0*((x-2.5)/1.87082869338697) + 0*((x-2.5)/1.87082869338697)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(0 + 0*((x-2.5)/1.87082869338697) + 0*((x-2.5)/1.87082869338697)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(-0.742643770155 + 0*((x-4.25)/3.37003603202441) + 0*((x-4.25)/3.37003603202441)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,10)) +
stat_function(fun=function(x){(-0.78401590256 + 0*((x-4.25)/3.37003603202441) + 0*((x-4.25)/3.37003603202441)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,10)) +
stat_function(fun=function(x){(-0.75117609831 + 0*((x-3)/2.16024689946929) + 0*((x-3)/2.16024689946929)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(-0.73932803658 + 0*((x-2.5)/1.87082869338697) + 0*((x-2.5)/1.87082869338697)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(-1.01165978295 + 0*((x-2.5)/1.87082869338697) + 0*((x-2.5)/1.87082869338697)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(-1.00132111095 + 0*((x-2.5)/1.87082869338697) + 0*((x-2.5)/1.87082869338697)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(-0.76316509182 + 0*((x-2.5)/1.87082869338697) + 0*((x-2.5)/1.87082869338697)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(-0.6961553882025 + 0*((x-2.5)/1.87082869338697) + 0*((x-2.5)/1.87082869338697)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(-1.0499546583 + 0*((x-2.5)/1.87082869338697) + 0*((x-2.5)/1.87082869338697)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(0 + 0*((x-2.5)/3.1091263510296) + 0*((x-2.5)/3.1091263510296)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,7)) +
stat_function(fun=function(x){(-1.36029149515 + 0*((x-3.14285714285714)/2.41029537806548) + 0*((x-3.14285714285714)/2.41029537806548)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,7)) +
stat_function(fun=function(x){(-0.54167496260135 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(-0.476553870787 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(-0.6061341863038 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-2.5)/1.87082869338697) + 0*((x-2.5)/1.87082869338697)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(0 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(-1.5071642933 + 0*((x-4.5)/3.02765035409749) + 0*((x-4.5)/3.02765035409749)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,9)) +
stat_function(fun=function(x){(-1.31483237225 + 0*((x-4.5)/3.02765035409749) + 0*((x-4.5)/3.02765035409749)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,9)) +
stat_function(fun=function(x){(-0.90624733605 + 0*((x-4.5)/3.02765035409749) + 0*((x-4.5)/3.02765035409749)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,16)) +
stat_function(fun=function(x){(0 + 0*((x-4.5)/3.02765035409749) + 0*((x-4.5)/3.02765035409749)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,16)) +
stat_function(fun=function(x){(-0.56625444731615 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(-0.51065151312525 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(-0.524516709192 + 0*((x-4)/2.73861278752583) + 0*((x-4)/2.73861278752583)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,8)) +
stat_function(fun=function(x){(-0.56431890010335 + 0*((x-3)/2.36643191323985) + 0*((x-3)/2.36643191323985)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(-0.567682489938 + 0*((x-3)/2.36643191323985) + 0*((x-3)/2.36643191323985)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(-0.54126033346695 + 0*((x-3)/2.36643191323985) + 0*((x-3)/2.36643191323985)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(-0.8663086466865 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-0.63329920765335 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-0.817628941785 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0.91451470215 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0.647551194977 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0.4969145202982 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(-0.76652581053 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-0.64339563799145 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-3)/2.16024689946929) + 0*((x-3)/2.16024689946929)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-3)/2.16024689946929) + 0*((x-3)/2.16024689946929)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-3)/2.16024689946929) + 0*((x-3)/2.16024689946929)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-0.5434659686049 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-0.52698049275652 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2))
#------------------------
meanPlot1 <- ggplot(data=data.frame(x=c(0,0))) +
coord_cartesian(ylim=c(0,1)) +
scale_x_continuous(limits=c(0,19), breaks=seq(4,19,5), labels=seq(5,20,5)) +
ylim(-10,10) +
xlab('') +
ylab('') +
annotate('text', x=0, y=1, label='(b) 63.0%', size=10, hjust='left') +
#below are the individual treatment lines
stat_function(fun=function(x){(0.648460224905 + 0.466609335325*((x-5.5)/3.60555127546399) + 0*((x-5.5)/3.60555127546399)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,11)) +
stat_function(fun=function(x){(0.73537874155 + 0.6085903779*((x-5.63636363636364)/3.93122696553448) + 0*((x-5.63636363636364)/3.93122696553448)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,12)) +
stat_function(fun=function(x){(0.4485647350565 + 0.7643202309*((x-4.55555555555556)/3.39525813124389) + 0*((x-4.55555555555556)/3.39525813124389)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,10)) +
stat_function(fun=function(x){(1.029155089385 + 0.65550679989*((x-2.33333333333333)/2.51661147842358) + 0*((x-2.33333333333333)/2.51661147842358)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(1.67500792955 + 0.6419503636*((x-2.33333333333333)/2.51661147842358) + 0*((x-2.33333333333333)/2.51661147842358)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(1.896664874 + 0.69785951025*((x-2.33333333333333)/2.51661147842358) + 0*((x-2.33333333333333)/2.51661147842358)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(0.87926623346 + 0.432226902973*((x-2.6)/2.40831891575846) + 0*((x-2.6)/2.40831891575846)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(1.12990685415 + 0.39773178986667*((x-2.6)/2.40831891575846) + 0*((x-2.6)/2.40831891575846)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0.9394478342 + 0.4060182535675*((x-2.6)/2.40831891575846) + 0*((x-2.6)/2.40831891575846)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0.42386840635945*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0.706636425547 + 0.44401527942585*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0.895724100085 + 0.4471879522911*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-0.9496811273 + 0.34937672950285*((x-3)/2.16024689946929) + 0*((x-3)/2.16024689946929)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(-1.16380539685 + 0.33856363415695*((x-3)/2.16024689946929) + 0*((x-3)/2.16024689946929)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0.5218400209935*((x-2.33333333333333)/3.21455025366432) + 0*((x-2.33333333333333)/3.21455025366432)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(-0.3933319282128 + 0.15916001006591*((x-9.5)/5.91607978309962) + 0*((x-9.5)/5.91607978309962)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(0.4499046703288 + 0.50714120245*((x-4.5)/3.02765035409749) + 0*((x-4.5)/3.02765035409749)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,9)) +
stat_function(fun=function(x){(0.67742118305 + 0.49577817065*((x-4.5)/3.02765035409749) + 0*((x-4.5)/3.02765035409749)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,9)) +
stat_function(fun=function(x){(-0.7072990372148 + 0.33809600252165*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + 0.4711223846705*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + 0.2884047032719*((x-2.5)/1.87082869338697) + 0*((x-2.5)/1.87082869338697)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(0 + 0.2734400970362*((x-2.5)/1.87082869338697) + 0*((x-2.5)/1.87082869338697)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(0 + 0.67511187235*((x-2.5)/1.87082869338697) + 0*((x-2.5)/1.87082869338697)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(0 + 0.6009743335345*((x-3.33333333333333)/3.51188458428425) + 0*((x-3.33333333333333)/3.51188458428425)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,7)) +
stat_function(fun=function(x){(0 + 0.705254464275*((x-3.33333333333333)/3.51188458428425) + 0*((x-3.33333333333333)/3.51188458428425)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,7)) +
stat_function(fun=function(x){(0 + 0.37198017885925*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + 0.42560587076766*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + 0.3974763085367*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0.228103702536315*((x-3.5)/2.44948974278318) + 0*((x-3.5)/2.44948974278318)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,7)) +
stat_function(fun=function(x){(0 + 0.277422997930855*((x-3.5)/2.44948974278318) + 0*((x-3.5)/2.44948974278318)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,7)) +
stat_function(fun=function(x){(-0.928743556095 + 0.326213787892015*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0.24155125729625*((x-6)/3.89444048184931) + 0*((x-6)/3.89444048184931)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,12)) +
stat_function(fun=function(x){(0.60125587186245 + 0.31380862585691*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0.337942027481535*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0.34142469611215*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(2.568941679 + 0.17680566297945*((x-9)/5.62731433871138) + 0*((x-9)/5.62731433871138)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(0 + 0.2803716516834*((x-2.66666666666667)/2.16024689946929) + 0*((x-2.66666666666667)/2.16024689946929)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0.38969752672*((x-9)/5.62731433871138) + 0*((x-9)/5.62731433871138)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,18)) +
stat_function(fun=function(x){(-0.643095562655 + 0.19674427619475*((x-9)/5.62731433871138) + 0*((x-9)/5.62731433871138)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,18)) +
stat_function(fun=function(x){(-0.70653887585 + 0.227510433596885*((x-5)/3.3166247903554) + 0*((x-5)/3.3166247903554)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,10)) +
stat_function(fun=function(x){(-0.414177368934735 + 0.4748088536245*((x-3.83333333333333)/3.31159578853861) + 0*((x-3.83333333333333)/3.31159578853861)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,8)) +
stat_function(fun=function(x){(-0.50234271881995 + 0.556604542155*((x-3.83333333333333)/3.31159578853861) + 0*((x-3.83333333333333)/3.31159578853861)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,8)) +
stat_function(fun=function(x){(0.9240168 + 0.476112352*((x-5)/3.3166247903554) + 0*((x-5)/3.3166247903554)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,10)) +
stat_function(fun=function(x){(1.40496754115 + 0.7400813754*((x-5)/3.3166247903554) + 0*((x-5)/3.3166247903554)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,10)) +
stat_function(fun=function(x){(0.4767489929415 + 0.401911352345*((x-5)/3.3166247903554) + 0*((x-5)/3.3166247903554)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,10)) +
stat_function(fun=function(x){(0 + 0.248371058767849*((x-5)/3.3166247903554) + 0*((x-5)/3.3166247903554)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,10)) +
stat_function(fun=function(x){(0.72502102075 + 0.29153577291595*((x-5)/3.3166247903554) + 0*((x-5)/3.3166247903554)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,10)) +
stat_function(fun=function(x){(1.06825644295 + 0.5294026741*((x-5)/3.3166247903554) + 0*((x-5)/3.3166247903554)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,10)) +
stat_function(fun=function(x){(0.6718394385134 + 0.73333577104*((x-3.33333333333333)/3.05505046330389) + 0*((x-3.33333333333333)/3.05505046330389)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0.46036571372795*((x-3.33333333333333)/3.05505046330389) + 0*((x-3.33333333333333)/3.05505046330389)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0.60485726794075 + 0.430249763322755*((x-3.33333333333333)/3.05505046330389) + 0*((x-3.33333333333333)/3.05505046330389)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0.6763052094*((x-3.75)/2.81577190634672) + 0*((x-3.75)/2.81577190634672)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,8)) +
stat_function(fun=function(x){(0 + 0.8360653928*((x-3.75)/2.81577190634672) + 0*((x-3.75)/2.81577190634672)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,8)) +
stat_function(fun=function(x){(0 + 0.90895637595*((x-3.75)/2.81577190634672) + 0*((x-3.75)/2.81577190634672)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,8)) +
stat_function(fun=function(x){(0 + 0.87416079995*((x-3.75)/2.81577190634672) + 0*((x-3.75)/2.81577190634672)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,8)) +
stat_function(fun=function(x){(-0.684079106025 + 0.3935584868145*((x-3.75)/2.81577190634672) + 0*((x-3.75)/2.81577190634672)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,8)) +
stat_function(fun=function(x){(-0.704294368315 + 0.30197021927885*((x-3.75)/2.81577190634672) + 0*((x-3.75)/2.81577190634672)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,8)) +
stat_function(fun=function(x){(0.569639643427605 + 0.93262101915*((x-3.75)/2.81577190634672) + 0*((x-3.75)/2.81577190634672)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,8)) +
stat_function(fun=function(x){(0 + 0.93822871595*((x-3.75)/2.81577190634672) + 0*((x-3.75)/2.81577190634672)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,8)) +
stat_function(fun=function(x){(0.98244815685 + 0.75090990165*((x-3.75)/2.81577190634672) + 0*((x-3.75)/2.81577190634672)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,8)) +
stat_function(fun=function(x){(0 + 0.8603454414*((x-3.75)/2.81577190634672) + 0*((x-3.75)/2.81577190634672)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,8)) +
stat_function(fun=function(x){(0.37517404670615 + 0.8031353379*((x-3.75)/2.81577190634672) + 0*((x-3.75)/2.81577190634672)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,8)) +
stat_function(fun=function(x){(0 + 0.91801384975*((x-3.75)/2.81577190634672) + 0*((x-3.75)/2.81577190634672)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,8)) +
stat_function(fun=function(x){(0.374572134941494 + 0.80746168145*((x-3.75)/2.81577190634672) + 0*((x-3.75)/2.81577190634672)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,8)) +
stat_function(fun=function(x){(0 + 0.531351889765*((x-3.75)/2.81577190634672) + 0*((x-3.75)/2.81577190634672)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,8)) +
stat_function(fun=function(x){(0 + 0.40592845578535*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0.38862270846275*((x-1.33333333333333)/1.52752523165195) + 0*((x-1.33333333333333)/1.52752523165195)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + 0.36231963709115*((x-1.33333333333333)/1.52752523165195) + 0*((x-1.33333333333333)/1.52752523165195)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + 0.514569075692*((x-1.33333333333333)/1.52752523165195) + 0*((x-1.33333333333333)/1.52752523165195)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(-0.9585038089 + 0.34166578984935*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + 0.541755086495*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(-0.6834788965625 + 0.36074731774405*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + 0.38426427269705*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(1.181327652 + 0.591989395045*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0.676334048663 + 0.555972115076*((x-2.5)/1.87082869338697) + 0*((x-2.5)/1.87082869338697)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(0.6917406479357 + 0.49208590756*((x-2.5)/1.87082869338697) + 0*((x-2.5)/1.87082869338697)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(0.6618047464185 + 0.4956726193905*((x-2.5)/1.87082869338697) + 0*((x-2.5)/1.87082869338697)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(0.9448282535 + 0.50610364586*((x-2.5)/1.87082869338697) + 0*((x-2.5)/1.87082869338697)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(1.2833880132 + 0.610198020365*((x-2.5)/1.87082869338697) + 0*((x-2.5)/1.87082869338697)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(-0.56822849906165 + 0.26482193352851*((x-4.25)/3.37003603202441) + 0*((x-4.25)/3.37003603202441)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,10)) +
stat_function(fun=function(x){(-0.35119867118685 + 0.5434063679*((x-4.33333333333333)/3.278719262151) + 0*((x-4.33333333333333)/3.278719262151)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,10)) +
stat_function(fun=function(x){(-0.40600681013116 + 0.45936887173*((x-4.33333333333333)/3.278719262151) + 0*((x-4.33333333333333)/3.278719262151)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,10)) +
stat_function(fun=function(x){(-1.1161897188 + 0.26755970548834*((x-3)/2.16024689946929) + 0*((x-3)/2.16024689946929)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(-0.630122764947 + 0.49902246252*((x-3)/2.16024689946929) + 0*((x-3)/2.16024689946929)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(-0.6114769839685 + 0.294499907653*((x-2.5)/1.87082869338697) + 0*((x-2.5)/1.87082869338697)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(-1.1040156104 + 0.4818117421615*((x-4.5)/3.02765035409749) + 0*((x-4.5)/3.02765035409749)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,16)) +
stat_function(fun=function(x){(0.55900464926085 + 1.0450543834*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(-0.5539495581988 + 0.477531073671*((x-3)/2.36643191323985) + 0*((x-3)/2.36643191323985)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(-0.44009576534335 + 0.542286036005*((x-3)/2.36643191323985) + 0*((x-3)/2.36643191323985)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(-0.630211600678 + 0.31800687202845*((x-3)/2.36643191323985) + 0*((x-3)/2.36643191323985)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(-0.5493514534052 + 0.41511963461515*((x-3)/2.36643191323985) + 0*((x-3)/2.36643191323985)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(-0.6213942110515 + 0.43498566311285*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0.5056801409*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-0.52369465349795 + 0.380725642828055*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(-0.46037215443201 + 0.40532913347485*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + 0.590896647655*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0 + 0.5672213702685*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0 + 0.341444883264445*((x-1)/1) + 0*((x-1)/1)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,2))
#------------------------
meanPlot2 <- ggplot(data=data.frame(x=c(0,0))) +
coord_cartesian(ylim=c(0,1)) +
scale_x_continuous(limits=c(0,19), breaks=seq(4,19,5), labels=seq(5,20,5)) +
ylim(-10,10) +
xlab('') +
ylab('') +
annotate('text', x=0, y=1, label='(f) 0.9%', size=10, hjust='left') +
#below are the individual treatment lines
stat_function(fun=function(x){(-0.593661917435 + 0.2553582834994*((x-5)/3.3166247903554) + 0.24367603430132*((x-5)/3.3166247903554)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,10)) +
stat_function(fun=function(x){(0.37348114333365 + 0.66492015455*((x-5)/3.3166247903554) + 0.2066100937079*((x-5)/3.3166247903554)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,10)) +
stat_function(fun=function(x){(0.7540852598 + 0.63824753185*((x-5)/3.3166247903554) + 0.19784419664395*((x-5)/3.3166247903554)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,10)) +
stat_function(fun=function(x){(0 + 0.3561763521215*((x-5)/3.3166247903554) + 0.29172652755585*((x-5)/3.3166247903554)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,10))
#------------------------
meanPlot3 <- ggplot(data=data.frame(x=c(0,0))) +
coord_cartesian(ylim=c(0,1)) +
scale_x_continuous(limits=c(0,19), breaks=seq(4,19,5), labels=seq(5,20,5)) +
ylim(-10,10) +
xlab('') +
ylab('') +
annotate('text', x=0, y=1, label='(h) 3.7%', size=10, hjust='left') +
#below are the individual treatment lines
stat_function(fun=function(x){(0.6911753343455 + 0*((x-2.33333333333333)/2.51661147842358) + -0.283119187932195*((x-2.33333333333333)/2.51661147842358)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(1.9343722095 + 0.55711504585*((x-9.5)/5.91607978309962) + -0.2581652549465*((x-9.5)/5.91607978309962)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(2.5071341835 + 0.75241550155*((x-9.5)/5.91607978309962) + -0.2712518293587*((x-9.5)/5.91607978309962)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(3.1167620145 + 1.28924450285*((x-9.5)/5.91607978309962) + -0.69208632615*((x-9.5)/5.91607978309962)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(2.509327407 + 1.00629284395*((x-4.5)/3.02765035409749) + -0.459278441845*((x-4.5)/3.02765035409749)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,9)) +
stat_function(fun=function(x){(3.1852865085 + 0.87421238435*((x-4.5)/3.02765035409749) + -0.59276925525*((x-4.5)/3.02765035409749)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,9)) +
stat_function(fun=function(x){(1.09499768735 + 0.7762416523*((x-4.5)/3.02765035409749) + -0.246188968841005*((x-4.5)/3.02765035409749)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,9)) +
stat_function(fun=function(x){(2.0641595805 + 0.95614083735*((x-4.5)/3.02765035409749) + -0.365101186069*((x-4.5)/3.02765035409749)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,9)) +
stat_function(fun=function(x){(0 + 0.2324320355198*((x-6)/3.89444048184931) + -0.16722764969448*((x-6)/3.89444048184931)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,12)) +
stat_function(fun=function(x){(0.95349945275 + 0.61603318035*((x-4.5)/3.02765035409749) + -0.215524884471519*((x-4.5)/3.02765035409749)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,9)) +
stat_function(fun=function(x){(1.1790456512 + 0.56074669355*((x-4.5)/3.02765035409749) + -0.252644285540525*((x-4.5)/3.02765035409749)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,9)) +
stat_function(fun=function(x){(0.348275541123525 + 0.3036686604655*((x-5.5)/3.60555127546399) + -0.2463428108864*((x-5.5)/3.60555127546399)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,11)) +
stat_function(fun=function(x){(0.6824642105 + 0.42998601908*((x-5.5)/3.60555127546399) + -0.2536188667354*((x-5.5)/3.60555127546399)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,11)) +
stat_function(fun=function(x){(0.85776759785 + 0.6192598810655*((x-3.33333333333333)/3.05505046330389) + -0.333610313837625*((x-3.33333333333333)/3.05505046330389)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0.74474589395 + 0.9619131735*((x-4.33333333333333)/3.278719262151) + -0.22598530394343*((x-4.33333333333333)/3.278719262151)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,10)) +
stat_function(fun=function(x){(1.1057840104 + 0.625586158595*((x-2)/1.58113883008419) + -0.292368737867015*((x-2)/1.58113883008419)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,4))
#------------------------
meanPlot4 <- ggplot(data=data.frame(x=c(0,0))) +
coord_cartesian(ylim=c(0,1)) +
scale_x_continuous(limits=c(0,19), breaks=seq(4,19,5), labels=seq(5,20,5)) +
ylim(-10,10) +
xlab('') +
ylab('') +
annotate('text', x=0, y=1, label='(j) 0%', size=10, hjust='left')
#below are the individual treatment lines
#------------------------
meanPlot5 <- ggplot(data=data.frame(x=c(0,0))) +
coord_cartesian(ylim=c(0,1)) +
scale_x_continuous(limits=c(0,19), breaks=seq(4,19,5), labels=seq(5,20,5)) +
ylim(-10,10) +
xlab('') +
ylab('') +
annotate('text', x=0, y=1, label='(l) 0%', size=10, hjust='left')
#below are the individual treatment lines
#------------------------
meanPlot6 <- ggplot(data=data.frame(x=c(0,0))) +
coord_cartesian(ylim=c(0,1)) +
scale_x_continuous(limits=c(0,19), breaks=seq(4,19,5), labels=seq(5,20,5)) +
ylim(-10,10) +
xlab('') +
ylab('') +
annotate('text', x=0, y=1, label='(n) 0%', size=10, hjust='left')
#below are the individual treatment lines
#------------------------
meanPlot7 <- ggplot(data=data.frame(x=c(0,0))) +
coord_cartesian(ylim=c(0,1)) +
scale_x_continuous(limits=c(0,19), breaks=seq(4,19,5), labels=seq(5,20,5)) +
ylim(-10,10) +
xlab('') +
ylab('') +
annotate('text', x=0, y=1, label='(p) 9.4%', size=10, hjust='left') +
#below are the individual treatment lines
stat_function(fun=function(x){(1.42399368825 + 0*((x-2.33333333333333)/2.51661147842358) + -0.3994702040472*((x-2.33333333333333)/2.51661147842358)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(-0.41716202934925 + 0*((x-2)/1.58113883008419) + -0.2502824073133*((x-2)/1.58113883008419)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0 + 0.241361169910025*((x-6.30769230769231)/4.44193305113542) + -0.2104470107111*((x-6.30769230769231)/4.44193305113542)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,15)) +
stat_function(fun=function(x){(0 + 0*((x-6.30769230769231)/4.44193305113542) + -0.236483860804*((x-6.30769230769231)/4.44193305113542)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,15)) +
stat_function(fun=function(x){(0.4196127584335 + 0.3052211171406*((x-6.30769230769231)/4.44193305113542) + -0.340175218935*((x-6.30769230769231)/4.44193305113542)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,15)) +
stat_function(fun=function(x){(-0.3811998793665 + 0*((x-6.30769230769231)/4.44193305113542) + -0.2171542896657*((x-6.30769230769231)/4.44193305113542)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,15)) +
stat_function(fun=function(x){(-0.51122882649 + 0*((x-9.5)/5.91607978309962) + -0.2229047547752*((x-9.5)/5.91607978309962)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(2.733298538 + 0.99260452775*((x-9.5)/5.91607978309962) + -0.7513991392*((x-9.5)/5.91607978309962)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(1.886468257 + 0*((x-9.5)/5.91607978309962) + -0.321778843865*((x-9.5)/5.91607978309962)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(1.931675426 + 0.254464071887*((x-9.5)/5.91607978309962) + -0.325155154075*((x-9.5)/5.91607978309962)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(0.45785583995 + 0*((x-9.5)/5.91607978309962) + -0.262320649931*((x-9.5)/5.91607978309962)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(1.494252662 + 0*((x-9.5)/5.91607978309962) + -0.556604522*((x-9.5)/5.91607978309962)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(2.1916718575 + 0.2563068784935*((x-9.5)/5.91607978309962) + -0.5203419805*((x-9.5)/5.91607978309962)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(0 + 0*((x-4.5)/3.02765035409749) + -0.217540206318055*((x-4.5)/3.02765035409749)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,9)) +
stat_function(fun=function(x){(0 + 0.22250410273955*((x-6)/3.89444048184931) + -0.19410127786708*((x-6)/3.89444048184931)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,12)) +
stat_function(fun=function(x){(0 + 0*((x-6)/3.89444048184931) + -0.2025647310746*((x-6)/3.89444048184931)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,12)) +
stat_function(fun=function(x){(0 + 0*((x-6)/3.89444048184931) + -0.192759097655555*((x-6)/3.89444048184931)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,12)) +
stat_function(fun=function(x){(0.28907880475315 + 0.201322486840305*((x-6)/3.89444048184931) + -0.2147877167529*((x-6)/3.89444048184931)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,12)) +
stat_function(fun=function(x){(0.4493614828235 + 0*((x-6)/3.89444048184931) + -0.30508495602*((x-6)/3.89444048184931)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,12)) +
stat_function(fun=function(x){(0.265763637126124 + 0.20067863224878*((x-6)/3.89444048184931) + -0.18502391885129*((x-6)/3.89444048184931)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,12)) +
stat_function(fun=function(x){(2.81264697 + 0.169313197729875*((x-9)/5.62731433871138) + -0.1997391874648*((x-9)/5.62731433871138)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(0.57525233229345 + 0.3223192109833*((x-2.66666666666667)/2.16024689946929) + -0.26149622558815*((x-2.66666666666667)/2.16024689946929)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(1.19890958825 + 0*((x-7.75)/6.84957419601151) + -0.57518905525*((x-7.75)/6.84957419601151)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(1.7217976979 + 0*((x-7.75)/6.84957419601151) + -0.65755744445*((x-7.75)/6.84957419601151)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(1.004336517745 + 0*((x-7.75)/6.84957419601151) + -0.545922164785*((x-7.75)/6.84957419601151)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(1.51538115765 + 0.3544353269439*((x-7.75)/6.84957419601151) + -0.62048209265*((x-7.75)/6.84957419601151)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(1.51253681515 + 0*((x-7.75)/6.84957419601151) + -0.6523646746*((x-7.75)/6.84957419601151)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(1.53165079635 + 0*((x-7.75)/6.84957419601151) + -0.62714663665*((x-7.75)/6.84957419601151)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(1.0801634156 + 0.29274236285193*((x-7.75)/6.84957419601151) + -0.473480664849*((x-7.75)/6.84957419601151)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(0.94487593055 + 0*((x-7.75)/6.84957419601151) + -0.59398211015*((x-7.75)/6.84957419601151)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(1.47368064175 + 0*((x-7.75)/6.84957419601151) + -0.79395326735*((x-7.75)/6.84957419601151)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(1.46159971185 + 0*((x-7.75)/6.84957419601151) + -0.633683998795*((x-7.75)/6.84957419601151)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(1.5552715191 + 0*((x-7.75)/6.84957419601151) + -0.7070844257*((x-7.75)/6.84957419601151)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(0.88426814121 + 0*((x-7.75)/6.84957419601151) + -0.491065428035*((x-7.75)/6.84957419601151)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(0.723621832526 + 0*((x-7.75)/6.84957419601151) + -0.430137598808*((x-7.75)/6.84957419601151)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(0.43081551046951 + 0*((x-7.75)/6.84957419601151) + -0.4270724637559*((x-7.75)/6.84957419601151)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(0.70626079822 + 0.25907806773245*((x-5.5)/3.60555127546399) + -0.347565306437*((x-5.5)/3.60555127546399)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,11)) +
stat_function(fun=function(x){(0.49287332307 + 0*((x-5.5)/3.60555127546399) + -0.21393782526305*((x-5.5)/3.60555127546399)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,11)) +
stat_function(fun=function(x){(0 + 0*((x-4.5)/3.02765035409749) + -0.378221651254*((x-4.5)/3.02765035409749)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,9)) +
stat_function(fun=function(x){(0 + 0*((x-2)/1.58113883008419) + -0.2925299288263*((x-2)/1.58113883008419)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0 + 0*((x-2)/1.58113883008419) + -0.279793431078316*((x-2)/1.58113883008419)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,4))
#------------------------
meanPlot8 <- ggplot(data=data.frame(x=c(0,0))) +
coord_cartesian(ylim=c(0,1)) +
scale_x_continuous(limits=c(0,19), breaks=seq(4,19,5), labels=seq(5,20,5)) +
ylim(-10,10) +
xlab('') +
ylab('') +
annotate('text', x=0, y=1, label='(r) 0.7%', size=10, hjust='left') +
#below are the individual treatment lines
stat_function(fun=function(x){(-0.612594226855 + 0*((x-5)/3.3166247903554) + 0.256146976411*((x-5)/3.3166247903554)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,10)) +
stat_function(fun=function(x){(0 + 0.201856177582915*((x-5)/3.3166247903554) + 0.2531590546857*((x-5)/3.3166247903554)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,10)) +
stat_function(fun=function(x){(0 + 0.3413042733755*((x-5)/3.3166247903554) + 0.367617449785*((x-5)/3.3166247903554)^2)*(0.1725573)+(0.3215148)}, size=2, xlim=c(0,10))
#richness response panels
#------------------------
richnessPlot0 <- ggplot(data=data.frame(x=c(0,0))) +
coord_cartesian(ylim=c(-1.0,2.0)) +
scale_x_continuous(limits=c(0,19), breaks=seq(4,19,5), labels=seq(5,20,5)) +
scale_y_continuous(limits=c(-2,2), breaks=seq(-2,2,1)) +
xlab('') +
ylab('') +
annotate('text', x=0, y=2.0, label='(a) 76.7%', size=10, hjust='left') +
#below are the individual treatment lines
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0.638629026395 + 0*((x-5.5)/3.60555127546399) + 0*((x-5.5)/3.60555127546399)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,11)) +
stat_function(fun=function(x){(0 + 0*((x-2.33333333333333)/2.51661147842358) + 0*((x-2.33333333333333)/2.51661147842358)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(-0.6593714811254 + 0*((x-2.33333333333333)/2.51661147842358) + 0*((x-2.33333333333333)/2.51661147842358)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(0 + 0*((x-2.6)/2.40831891575846) + 0*((x-2.6)/2.40831891575846)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-2.6)/2.40831891575846) + 0*((x-2.6)/2.40831891575846)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(1.2062405682 + 0*((x-2.6)/2.40831891575846) + 0*((x-2.6)/2.40831891575846)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-2.6)/2.40831891575846) + 0*((x-2.6)/2.40831891575846)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-2.6)/2.40831891575846) + 0*((x-2.6)/2.40831891575846)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0 + 0*((x-6.30769230769231)/4.44193305113542) + 0*((x-6.30769230769231)/4.44193305113542)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,15)) +
stat_function(fun=function(x){(0 + 0*((x-6.30769230769231)/4.44193305113542) + 0*((x-6.30769230769231)/4.44193305113542)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,15)) +
stat_function(fun=function(x){(0 + 0*((x-6.30769230769231)/4.44193305113542) + 0*((x-6.30769230769231)/4.44193305113542)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,15)) +
stat_function(fun=function(x){(0.5270674399205 + 0*((x-6.30769230769231)/4.44193305113542) + 0*((x-6.30769230769231)/4.44193305113542)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,15)) +
stat_function(fun=function(x){(0 + 0*((x-3)/2.16024689946929) + 0*((x-3)/2.16024689946929)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-3)/2.16024689946929) + 0*((x-3)/2.16024689946929)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-3)/2.16024689946929) + 0*((x-3)/2.16024689946929)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-3)/2.16024689946929) + 0*((x-3)/2.16024689946929)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-3)/2.16024689946929) + 0*((x-3)/2.16024689946929)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0.5983117137335 + 0*((x-3)/2.16024689946929) + 0*((x-3)/2.16024689946929)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-2.33333333333333)/3.21455025366432) + 0*((x-2.33333333333333)/3.21455025366432)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-2.33333333333333)/3.21455025366432) + 0*((x-2.33333333333333)/3.21455025366432)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-2.33333333333333)/3.21455025366432) + 0*((x-2.33333333333333)/3.21455025366432)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-2.33333333333333)/3.21455025366432) + 0*((x-2.33333333333333)/3.21455025366432)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-2.33333333333333)/3.21455025366432) + 0*((x-2.33333333333333)/3.21455025366432)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-2.33333333333333)/3.21455025366432) + 0*((x-2.33333333333333)/3.21455025366432)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-2.33333333333333)/3.21455025366432) + 0*((x-2.33333333333333)/3.21455025366432)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-2.33333333333333)/3.21455025366432) + 0*((x-2.33333333333333)/3.21455025366432)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-2.33333333333333)/3.21455025366432) + 0*((x-2.33333333333333)/3.21455025366432)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-2.33333333333333)/3.21455025366432) + 0*((x-2.33333333333333)/3.21455025366432)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0.8122021034055 + 0*((x-2.33333333333333)/3.21455025366432) + 0*((x-2.33333333333333)/3.21455025366432)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-2.33333333333333)/3.21455025366432) + 0*((x-2.33333333333333)/3.21455025366432)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-2.33333333333333)/3.21455025366432) + 0*((x-2.33333333333333)/3.21455025366432)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-2.33333333333333)/3.21455025366432) + 0*((x-2.33333333333333)/3.21455025366432)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-2.33333333333333)/3.21455025366432) + 0*((x-2.33333333333333)/3.21455025366432)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-2.33333333333333)/3.21455025366432) + 0*((x-2.33333333333333)/3.21455025366432)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-2.33333333333333)/3.21455025366432) + 0*((x-2.33333333333333)/3.21455025366432)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-6.5)/4.18330013267038) + 0*((x-6.5)/4.18330013267038)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,13)) +
stat_function(fun=function(x){(0 + 0*((x-6.5)/4.18330013267038) + 0*((x-6.5)/4.18330013267038)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,13)) +
stat_function(fun=function(x){(0 + 0*((x-6.5)/4.18330013267038) + 0*((x-6.5)/4.18330013267038)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,13)) +
stat_function(fun=function(x){(0.632679211855 + 0*((x-9.5)/5.91607978309962) + 0*((x-9.5)/5.91607978309962)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(0.349959489226 + 0*((x-9.5)/5.91607978309962) + 0*((x-9.5)/5.91607978309962)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(0 + 0*((x-4.5)/3.02765035409749) + 0*((x-4.5)/3.02765035409749)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,9)) +
stat_function(fun=function(x){(0 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0.65700748175825 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0.54081606492855 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0 + 0*((x-2.5)/1.87082869338697) + 0*((x-2.5)/1.87082869338697)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(1.0109321058 + 0*((x-2.5)/1.87082869338697) + 0*((x-2.5)/1.87082869338697)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(0.655072142106 + 0*((x-2.5)/1.87082869338697) + 0*((x-2.5)/1.87082869338697)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(0 + 0*((x-3.33333333333333)/3.51188458428425) + 0*((x-3.33333333333333)/3.51188458428425)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,7)) +
stat_function(fun=function(x){(0 + 0*((x-3.33333333333333)/3.51188458428425) + 0*((x-3.33333333333333)/3.51188458428425)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,7)) +
stat_function(fun=function(x){(0.731972712426 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0.59626868737095 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0.8582671076338 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0.7592761634865 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(1.1552530148265 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0.986088985815 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0.68556931230535 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(1.022799489115 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0.97965927109185 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0.803511432861 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0.79344460459175 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0.72159696350755 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0.4814433628784 + 0*((x-6)/3.89444048184931) + 0*((x-6)/3.89444048184931)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,12)) +
stat_function(fun=function(x){(0 + 0*((x-6)/3.89444048184931) + 0*((x-6)/3.89444048184931)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,12)) +
stat_function(fun=function(x){(0 + 0*((x-6)/3.89444048184931) + 0*((x-6)/3.89444048184931)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,12)) +
stat_function(fun=function(x){(0.60521234486 + 0*((x-6)/3.89444048184931) + 0*((x-6)/3.89444048184931)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,12)) +
stat_function(fun=function(x){(0 + 0*((x-6)/3.89444048184931) + 0*((x-6)/3.89444048184931)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,12)) +
stat_function(fun=function(x){(0.3537450782117 + 0*((x-6)/3.89444048184931) + 0*((x-6)/3.89444048184931)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,12)) +
stat_function(fun=function(x){(0.37327263227335 + 0*((x-6)/3.89444048184931) + 0*((x-6)/3.89444048184931)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,12)) +
stat_function(fun=function(x){(0 + 0*((x-6)/3.89444048184931) + 0*((x-6)/3.89444048184931)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,12)) +
stat_function(fun=function(x){(0.377628686065944 + 0*((x-6)/3.89444048184931) + 0*((x-6)/3.89444048184931)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,12)) +
stat_function(fun=function(x){(0.5437516236842 + 0*((x-6)/3.89444048184931) + 0*((x-6)/3.89444048184931)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,12)) +
stat_function(fun=function(x){(0 + 0*((x-6)/3.89444048184931) + 0*((x-6)/3.89444048184931)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,12)) +
stat_function(fun=function(x){(0 + 0*((x-6)/3.89444048184931) + 0*((x-6)/3.89444048184931)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,12)) +
stat_function(fun=function(x){(0.504423500693375 + 0*((x-6)/3.89444048184931) + 0*((x-6)/3.89444048184931)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,12)) +
stat_function(fun=function(x){(0 + 0*((x-6)/3.89444048184931) + 0*((x-6)/3.89444048184931)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,12)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0.80648721753 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0.7913683010295 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0.648945935484336 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-9)/5.62731433871138) + 0*((x-9)/5.62731433871138)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(0 + 0*((x-4.5)/3.02765035409749) + 0*((x-4.5)/3.02765035409749)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,9)) +
stat_function(fun=function(x){(0 + 0*((x-4.5)/3.02765035409749) + 0*((x-4.5)/3.02765035409749)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,9)) +
stat_function(fun=function(x){(0 + 0*((x-2.66666666666667)/2.16024689946929) + 0*((x-2.66666666666667)/2.16024689946929)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-2.66666666666667)/2.16024689946929) + 0*((x-2.66666666666667)/2.16024689946929)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-2.66666666666667)/2.16024689946929) + 0*((x-2.66666666666667)/2.16024689946929)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0.9489028629335 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-7.75)/6.84957419601151) + 0*((x-7.75)/6.84957419601151)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(0 + 0*((x-7.75)/6.84957419601151) + 0*((x-7.75)/6.84957419601151)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(0 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0.8672464011055 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0.9196388284695 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-9)/5.62731433871138) + 0*((x-9)/5.62731433871138)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,18)) +
stat_function(fun=function(x){(0.404605727390785 + 0*((x-5.5)/3.60555127546399) + 0*((x-5.5)/3.60555127546399)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,11)) +
stat_function(fun=function(x){(0.560041003595 + 0*((x-5.5)/3.60555127546399) + 0*((x-5.5)/3.60555127546399)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,11)) +
stat_function(fun=function(x){(0 + 0*((x-5.5)/3.60555127546399) + 0*((x-5.5)/3.60555127546399)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,11)) +
stat_function(fun=function(x){(0 + 0*((x-5.5)/3.60555127546399) + 0*((x-5.5)/3.60555127546399)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,11)) +
stat_function(fun=function(x){(0 + 0*((x-5.5)/3.60555127546399) + 0*((x-5.5)/3.60555127546399)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,11)) +
stat_function(fun=function(x){(0 + 0*((x-5.5)/3.60555127546399) + 0*((x-5.5)/3.60555127546399)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,11)) +
stat_function(fun=function(x){(0 + 0*((x-5.5)/3.60555127546399) + 0*((x-5.5)/3.60555127546399)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,11)) +
stat_function(fun=function(x){(0 + 0*((x-5)/3.3166247903554) + 0*((x-5)/3.3166247903554)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,10)) +
stat_function(fun=function(x){(0.76325318845 + 0*((x-5)/3.3166247903554) + 0*((x-5)/3.3166247903554)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,10)) +
stat_function(fun=function(x){(0.6254828221695 + 0*((x-5)/3.3166247903554) + 0*((x-5)/3.3166247903554)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,10)) +
stat_function(fun=function(x){(0 + 0*((x-3.83333333333333)/3.31159578853861) + 0*((x-3.83333333333333)/3.31159578853861)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,8)) +
stat_function(fun=function(x){(0.7026579292775 + 0*((x-3.83333333333333)/3.31159578853861) + 0*((x-3.83333333333333)/3.31159578853861)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,8)) +
stat_function(fun=function(x){(0 + 0*((x-3.83333333333333)/3.31159578853861) + 0*((x-3.83333333333333)/3.31159578853861)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,8)) +
stat_function(fun=function(x){(0 + 0*((x-5)/3.3166247903554) + 0*((x-5)/3.3166247903554)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,10)) +
stat_function(fun=function(x){(0 + 0*((x-5)/3.3166247903554) + 0*((x-5)/3.3166247903554)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,10)) +
stat_function(fun=function(x){(0 + 0*((x-5)/3.3166247903554) + 0*((x-5)/3.3166247903554)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,10)) +
stat_function(fun=function(x){(0.5984250184945 + 0*((x-5)/3.3166247903554) + 0*((x-5)/3.3166247903554)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,10)) +
stat_function(fun=function(x){(0 + 0*((x-5)/3.3166247903554) + 0*((x-5)/3.3166247903554)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,10)) +
stat_function(fun=function(x){(0 + 0*((x-3.33333333333333)/3.05505046330389) + 0*((x-3.33333333333333)/3.05505046330389)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-3.33333333333333)/3.05505046330389) + 0*((x-3.33333333333333)/3.05505046330389)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-3.33333333333333)/3.05505046330389) + 0*((x-3.33333333333333)/3.05505046330389)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-3.33333333333333)/3.05505046330389) + 0*((x-3.33333333333333)/3.05505046330389)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-3.33333333333333)/3.05505046330389) + 0*((x-3.33333333333333)/3.05505046330389)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0.78454079931975 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0.6450010890139 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0.820802279640465 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0.710819521829125 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0.62095066979663 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-3.75)/2.81577190634672) + 0*((x-3.75)/2.81577190634672)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,8)) +
stat_function(fun=function(x){(0.636464377586266 + 0*((x-3.75)/2.81577190634672) + 0*((x-3.75)/2.81577190634672)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,8)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0.650119418499025 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0.655265056879635 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0.728854690464225 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1.33333333333333)/1.52752523165195) + 0*((x-1.33333333333333)/1.52752523165195)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + 0*((x-1.33333333333333)/1.52752523165195) + 0*((x-1.33333333333333)/1.52752523165195)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + 0*((x-1.33333333333333)/1.52752523165195) + 0*((x-1.33333333333333)/1.52752523165195)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0.915302275897 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(1.200074881 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(1.316926175 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0.98469447145 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0.677649315057 + 0*((x-2.5)/1.87082869338697) + 0*((x-2.5)/1.87082869338697)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(-0.5294942625926 + 0*((x-2.5)/1.87082869338697) + 0*((x-2.5)/1.87082869338697)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(0 + 0*((x-2.5)/1.87082869338697) + 0*((x-2.5)/1.87082869338697)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(0 + 0*((x-2.5)/1.87082869338697) + 0*((x-2.5)/1.87082869338697)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(0 + 0*((x-2.5)/1.87082869338697) + 0*((x-2.5)/1.87082869338697)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(0 + 0*((x-2.5)/1.87082869338697) + 0*((x-2.5)/1.87082869338697)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(0 + 0*((x-2.5)/1.87082869338697) + 0*((x-2.5)/1.87082869338697)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(0 + 0*((x-4.25)/3.37003603202441) + 0*((x-4.25)/3.37003603202441)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,10)) +
stat_function(fun=function(x){(0.473296830502247 + 0*((x-4.25)/3.37003603202441) + 0*((x-4.25)/3.37003603202441)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,10)) +
stat_function(fun=function(x){(0 + 0*((x-4.25)/3.37003603202441) + 0*((x-4.25)/3.37003603202441)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,10)) +
stat_function(fun=function(x){(0.454664785767535 + 0*((x-4.33333333333333)/3.278719262151) + 0*((x-4.33333333333333)/3.278719262151)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,10)) +
stat_function(fun=function(x){(0.72328309763 + 0*((x-4.33333333333333)/3.278719262151) + 0*((x-4.33333333333333)/3.278719262151)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,10)) +
stat_function(fun=function(x){(0.5580855949988 + 0*((x-4.33333333333333)/3.278719262151) + 0*((x-4.33333333333333)/3.278719262151)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,10)) +
stat_function(fun=function(x){(0 + 0*((x-3)/2.16024689946929) + 0*((x-3)/2.16024689946929)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0.767692033 + 0*((x-3)/2.16024689946929) + 0*((x-3)/2.16024689946929)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-2.5)/1.87082869338697) + 0*((x-2.5)/1.87082869338697)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(0 + 0*((x-2.5)/1.87082869338697) + 0*((x-2.5)/1.87082869338697)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(0.56490095048035 + 0*((x-2.5)/1.87082869338697) + 0*((x-2.5)/1.87082869338697)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(0 + 0*((x-2.5)/1.87082869338697) + 0*((x-2.5)/1.87082869338697)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(0 + 0*((x-2.5)/1.87082869338697) + 0*((x-2.5)/1.87082869338697)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(0 + 0*((x-2.5)/1.87082869338697) + 0*((x-2.5)/1.87082869338697)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(0.54574996832919 + 0*((x-2.5)/1.87082869338697) + 0*((x-2.5)/1.87082869338697)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(0.5999509596228 + 0*((x-2.5)/3.1091263510296) + 0*((x-2.5)/3.1091263510296)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,7)) +
stat_function(fun=function(x){(0 + 0*((x-3.14285714285714)/2.41029537806548) + 0*((x-3.14285714285714)/2.41029537806548)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,7)) +
stat_function(fun=function(x){(0.63022365484735 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0.6740871452329 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0.6398150331664 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0.80549033045465 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-0.68714984592705 + 0*((x-2.5)/1.87082869338697) + 0*((x-2.5)/1.87082869338697)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(0 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(1.201980756405 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0.88545121095 + 0*((x-4.5)/3.02765035409749) + 0*((x-4.5)/3.02765035409749)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,9)) +
stat_function(fun=function(x){(0.5533615423922 + 0*((x-4.5)/3.02765035409749) + 0*((x-4.5)/3.02765035409749)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,16)) +
stat_function(fun=function(x){(0.5091896997848 + 0*((x-4.5)/3.02765035409749) + 0*((x-4.5)/3.02765035409749)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,16)) +
stat_function(fun=function(x){(1.14079278196 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0 + 0*((x-4)/2.73861278752583) + 0*((x-4)/2.73861278752583)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,8)) +
stat_function(fun=function(x){(0 + 0*((x-3)/2.36643191323985) + 0*((x-3)/2.36643191323985)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-3)/2.36643191323985) + 0*((x-3)/2.36643191323985)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-3)/2.36643191323985) + 0*((x-3)/2.36643191323985)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-3)/2.36643191323985) + 0*((x-3)/2.36643191323985)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-3)/2.36643191323985) + 0*((x-3)/2.36643191323985)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-3)/2.36643191323985) + 0*((x-3)/2.36643191323985)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-3)/2.36643191323985) + 0*((x-3)/2.36643191323985)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0.639763269731185 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0.70585616707015 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + 0*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0.595055200228685 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0.513235067671825 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0.50705390927465 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0.8595159857035 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0.809539969636 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0.7283989734076 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0 + 0*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0.81378356889533 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0.58842212902895 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0.59828139604185 + 0*((x-3)/2.16024689946929) + 0*((x-3)/2.16024689946929)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0.48790044105748 + 0*((x-3)/2.16024689946929) + 0*((x-3)/2.16024689946929)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-3)/2.16024689946929) + 0*((x-3)/2.16024689946929)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-0.73032363300804 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-1.53679094205 + 0*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2))
#------------------------
richnessPlot1 <- ggplot(data=data.frame(x=c(0,0))) +
coord_cartesian(ylim=c(-1.0,2.0)) +
scale_x_continuous(limits=c(0,19), breaks=seq(4,19,5), labels=seq(5,20,5)) +
scale_y_continuous(limits=c(-2,2), breaks=seq(-2,2,1)) +
xlab('') +
ylab('') +
annotate('text', x=0, y=2.0, label='(c) 0.9%', size=10, hjust='left') +
#below are the individual treatment lines
stat_function(fun=function(x){(0 + 0.44181191409335*((x-2.33333333333333)/3.21455025366432) + 0*((x-2.33333333333333)/3.21455025366432)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0.39616670639991 + 0.290830427392585*((x-4.5)/3.02765035409749) + 0*((x-4.5)/3.02765035409749)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,16)) +
stat_function(fun=function(x){(1.022318756037 + 0.4117991038616*((x-2)/1.58113883008419) + 0*((x-2)/1.58113883008419)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,4)) +
stat_function(fun=function(x){(1.60835336305 + 0.42401222999135*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2))
#------------------------
richnessPlot2 <- ggplot(data=data.frame(x=c(0,0))) +
coord_cartesian(ylim=c(-1.0,2.0)) +
scale_x_continuous(limits=c(0,19), breaks=seq(4,19,5), labels=seq(5,20,5)) +
scale_y_continuous(limits=c(-2,2), breaks=seq(-2,2,1)) +
xlab('') +
ylab('') +
annotate('text', x=0, y=2.0, label='(e) 0.2%', size=10, hjust='left') +
#below are the individual treatment lines
stat_function(fun=function(x){(0 + 0*((x-7.75)/6.84957419601151) + 0.30913779666705*((x-7.75)/6.84957419601151)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,19))
#------------------------
richnessPlot3 <- ggplot(data=data.frame(x=c(0,0))) +
coord_cartesian(ylim=c(-1.0,2.0)) +
scale_x_continuous(limits=c(0,19), breaks=seq(4,19,5), labels=seq(5,20,5)) +
scale_y_continuous(limits=c(-2,2), breaks=seq(-2,2,1)) +
xlab('') +
ylab('') +
annotate('text', x=0, y=2.0, label='(g)', size=10, hjust='left')
#below are the individual treatment lines
#-------------------
richnessPlot4 <- ggplot(data=data.frame(x=c(0,0))) +
coord_cartesian(ylim=c(-1.0,2.0)) +
scale_x_continuous(limits=c(0,19), breaks=seq(4,19,5), labels=seq(5,20,5)) +
scale_y_continuous(limits=c(-2,2), breaks=seq(-2,2,1)) +
xlab('') +
ylab('') +
annotate('text', x=0, y=2.0, label='(i) 0%', size=10, hjust='left') +
#below are the individual treatment lines
stat_function(fun=function(x){(0 + -0.49914040582*((x-5.5)/3.60555127546399) + 0*((x-5.5)/3.60555127546399)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,11)) +
stat_function(fun=function(x){(-0.5016173969134 + -0.52904972436*((x-5.63636363636364)/3.93122696553448) + 0*((x-5.63636363636364)/3.93122696553448)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,12)) +
stat_function(fun=function(x){(0 + -0.3985731688194*((x-4.55555555555556)/3.39525813124389) + 0*((x-4.55555555555556)/3.39525813124389)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,10)) +
stat_function(fun=function(x){(-0.6799896493018 + -0.5685201383569*((x-2.33333333333333)/2.51661147842358) + 0*((x-2.33333333333333)/2.51661147842358)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(-1.6836207002 + -1.03031594695*((x-2.33333333333333)/2.51661147842358) + 0*((x-2.33333333333333)/2.51661147842358)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(-1.68661937945 + -0.9893542251*((x-2.33333333333333)/2.51661147842358) + 0*((x-2.33333333333333)/2.51661147842358)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(-0.9647520531495 + -0.453976046013965*((x-2.33333333333333)/2.51661147842358) + 0*((x-2.33333333333333)/2.51661147842358)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(-0.981598805713 + -0.46609825721715*((x-2.33333333333333)/2.51661147842358) + 0*((x-2.33333333333333)/2.51661147842358)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(0 + -0.37692491154944*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-0.6938334216581 + -0.39008620205657*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + -0.641797242278*((x-2.33333333333333)/3.21455025366432) + 0*((x-2.33333333333333)/3.21455025366432)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + -0.38198149206934*((x-2.33333333333333)/3.21455025366432) + 0*((x-2.33333333333333)/3.21455025366432)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(-0.31850231964505 + -0.400667321885*((x-9.5)/5.91607978309962) + 0*((x-9.5)/5.91607978309962)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(-1.5229561034 + -0.7919432698*((x-9.5)/5.91607978309962) + 0*((x-9.5)/5.91607978309962)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(0 + -0.6219897490857*((x-3.33333333333333)/3.51188458428425) + 0*((x-3.33333333333333)/3.51188458428425)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,7)) +
stat_function(fun=function(x){(-1.012632767455 + -0.51005056398355*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(-0.64538445091145 + -0.459474467253*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0.3765325433606 + -0.2601960535371*((x-6)/3.89444048184931) + 0*((x-6)/3.89444048184931)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,12)) +
stat_function(fun=function(x){(0 + -0.400686828045*((x-9.5)/5.91607978309962) + 0*((x-9.5)/5.91607978309962)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(0 + -0.28230266293315*((x-4.5)/3.02765035409749) + 0*((x-4.5)/3.02765035409749)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,9)) +
stat_function(fun=function(x){(-0.47875854389625 + -0.26701596375974*((x-9)/5.62731433871138) + 0*((x-9)/5.62731433871138)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,18)) +
stat_function(fun=function(x){(-0.4421715212544 + -0.55558134545*((x-5)/3.3166247903554) + 0*((x-5)/3.3166247903554)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,10)) +
stat_function(fun=function(x){(-0.6850095014485 + -0.9524255893*((x-5)/3.3166247903554) + 0*((x-5)/3.3166247903554)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,10)) +
stat_function(fun=function(x){(0 + -0.288246920735535*((x-5)/3.3166247903554) + 0*((x-5)/3.3166247903554)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,10)) +
stat_function(fun=function(x){(0.3723023802952 + -0.32460902555635*((x-5)/3.3166247903554) + 0*((x-5)/3.3166247903554)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,10)) +
stat_function(fun=function(x){(0 + -0.3636965609855*((x-5)/3.3166247903554) + 0*((x-5)/3.3166247903554)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,10)) +
stat_function(fun=function(x){(-0.9947703694865 + -0.6480181671225*((x-3.33333333333333)/3.05505046330389) + 0*((x-3.33333333333333)/3.05505046330389)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + -0.42973725003101*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + -0.4113923563682*((x-1)/1) + 0*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2)) +
stat_function(fun=function(x){(0 + -0.36831846878358*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + -0.547215056013918*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + -0.47795707230105*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + -0.45121444385405*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + -0.36555878037525*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + -0.39801070576235*((x-2.5)/1.87082869338697) + 0*((x-2.5)/1.87082869338697)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(-0.54461740339925 + -0.4411218379335*((x-2.5)/1.87082869338697) + 0*((x-2.5)/1.87082869338697)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(-0.5050888534076 + -0.3828643416076*((x-2.5)/1.87082869338697) + 0*((x-2.5)/1.87082869338697)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(-0.7027819350575 + -0.586156930725*((x-2.5)/1.87082869338697) + 0*((x-2.5)/1.87082869338697)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,5)) +
stat_function(fun=function(x){(0 + -0.28588225567995*((x-3)/2.16024689946929) + 0*((x-3)/2.16024689946929)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,6)) +
stat_function(fun=function(x){(0 + -0.42267954417805*((x-1.5)/1.29099444873581) + 0*((x-1.5)/1.29099444873581)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,3)) +
stat_function(fun=function(x){(0 + -0.36812705989132*((x-4.5)/3.02765035409749) + 0*((x-4.5)/3.02765035409749)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,9))
#------------------------
richnessPlot5 <- ggplot(data=data.frame(x=c(0,0))) +
coord_cartesian(ylim=c(-1.0,2.0)) +
scale_x_continuous(limits=c(0,19), breaks=seq(4,19,5), labels=seq(5,20,5)) +
scale_y_continuous(limits=c(-2,2), breaks=seq(-2,2,1)) +
xlab('') +
ylab('') +
annotate('text', x=0, y=2.0, label='(k) 0.7%', size=10, hjust='left') +
#below are the individual treatment lines
stat_function(fun=function(x){(0 + -0.530878891905*((x-3.5)/2.44948974278318) + -0.287721361557*((x-3.5)/2.44948974278318)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,7)) +
stat_function(fun=function(x){(-0.45950199277256 + -0.8059016976*((x-3.5)/2.44948974278318) + -0.2778404319041*((x-3.5)/2.44948974278318)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,7)) +
stat_function(fun=function(x){(-0.95648348015 + -1.16335031075*((x-3.5)/2.44948974278318) + -0.27147101485848*((x-3.5)/2.44948974278318)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,7))
#------------------------
richnessPlot6 <- ggplot(data=data.frame(x=c(0,0))) +
coord_cartesian(ylim=c(-1.0,2.0)) +
scale_x_continuous(limits=c(0,19), breaks=seq(4,19,5), labels=seq(5,20,5)) +
scale_y_continuous(limits=c(-2,2), breaks=seq(-2,2,1)) +
xlab('') +
ylab('') +
annotate('text', x=0, y=2.0, label='(m) 2.5%', size=10, hjust='left') +
#below are the individual treatment lines
stat_function(fun=function(x){(-2.0939772975 + -0.95962104105*((x-9.5)/5.91607978309962) + 0.4619298023*((x-9.5)/5.91607978309962)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(-2.8331772775 + -1.07016848605*((x-9.5)/5.91607978309962) + 0.53571449275*((x-9.5)/5.91607978309962)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(-3.2987774655 + -1.09201081485*((x-4.5)/3.02765035409749) + 0.8577669581*((x-4.5)/3.02765035409749)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,9)) +
stat_function(fun=function(x){(-5.5241242285 + -1.019568862*((x-4.5)/3.02765035409749) + 1.35809478305*((x-4.5)/3.02765035409749)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,9)) +
stat_function(fun=function(x){(-2.0324486655 + -0.68145109765*((x-4.5)/3.02765035409749) + 0.5793310788*((x-4.5)/3.02765035409749)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,9)) +
stat_function(fun=function(x){(-3.488116406 + -1.42622611105*((x-4.5)/3.02765035409749) + 0.6967644382*((x-4.5)/3.02765035409749)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,9)) +
stat_function(fun=function(x){(-1.6693239423 + -0.4212131046075*((x-4.5)/3.02765035409749) + 0.4172076921975*((x-4.5)/3.02765035409749)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,9)) +
stat_function(fun=function(x){(-2.8742205255 + -0.72351667055*((x-4.5)/3.02765035409749) + 0.5722398146*((x-4.5)/3.02765035409749)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,9)) +
stat_function(fun=function(x){(-0.61393220605132 + 0*((x-7.75)/6.84957419601151) + 0.303487926440495*((x-7.75)/6.84957419601151)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(-0.52292354969942 + 0*((x-7.75)/6.84957419601151) + 0.2881972127081*((x-7.75)/6.84957419601151)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(-1.2649985157 + -0.7065344681*((x-5)/3.3166247903554) + 0.27770669447673*((x-5)/3.3166247903554)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,10))
#------------------------
richnessPlot7 <- ggplot(data=data.frame(x=c(0,0))) +
coord_cartesian(ylim=c(-1.0,2.0)) +
scale_x_continuous(limits=c(0,19), breaks=seq(4,19,5), labels=seq(5,20,5)) +
scale_y_continuous(limits=c(-2,2), breaks=seq(-2,2,1)) +
xlab('') +
ylab('') +
annotate('text', x=0, y=2.0, label='(o) 4.1%', size=10, hjust='left') +
#below are the individual treatment lines
stat_function(fun=function(x){(0.829185616815 + 0*((x-3.5)/2.44948974278318) + -0.308305109611965*((x-3.5)/2.44948974278318)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,7)) +
stat_function(fun=function(x){(0.641172248081 + 0*((x-3.5)/2.44948974278318) + -0.331081780903*((x-3.5)/2.44948974278318)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,7)) +
stat_function(fun=function(x){(0 + -0.283735142101505*((x-3.5)/2.44948974278318) + -0.30319786719905*((x-3.5)/2.44948974278318)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,7)) +
stat_function(fun=function(x){(1.04675907795 + 0.32190340621385*((x-3.75)/2.81577190634672) + -0.31777817122565*((x-3.75)/2.81577190634672)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,8)) +
stat_function(fun=function(x){(1.18326023845 + 0*((x-3.75)/2.81577190634672) + -0.35045500417095*((x-3.75)/2.81577190634672)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,8)) +
stat_function(fun=function(x){(1.0725616444 + 0*((x-3.75)/2.81577190634672) + -0.37782777309975*((x-3.75)/2.81577190634672)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,8)) +
stat_function(fun=function(x){(1.13349721865 + 0*((x-3.75)/2.81577190634672) + -0.407504888705*((x-3.75)/2.81577190634672)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,8)) +
stat_function(fun=function(x){(0.49089506684225 + 0*((x-3.75)/2.81577190634672) + -0.302244075123*((x-3.75)/2.81577190634672)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,8)) +
stat_function(fun=function(x){(1.29897727345 + 0*((x-3.75)/2.81577190634672) + -0.429341620217*((x-3.75)/2.81577190634672)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,8)) +
stat_function(fun=function(x){(0.805117022935 + 0*((x-3.75)/2.81577190634672) + -0.3207876707255*((x-3.75)/2.81577190634672)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,8)) +
stat_function(fun=function(x){(1.74250032365 + 0*((x-3.75)/2.81577190634672) + -0.52838150332*((x-3.75)/2.81577190634672)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,8)) +
stat_function(fun=function(x){(1.21906051815 + 0*((x-3.75)/2.81577190634672) + -0.3331415715565*((x-3.75)/2.81577190634672)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,8)) +
stat_function(fun=function(x){(1.343092555 + 0*((x-3.75)/2.81577190634672) + -0.440030659518*((x-3.75)/2.81577190634672)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,8)) +
stat_function(fun=function(x){(1.18415247325 + 0*((x-3.75)/2.81577190634672) + -0.352000053709*((x-3.75)/2.81577190634672)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,8)) +
stat_function(fun=function(x){(1.58006527065 + 0*((x-3.75)/2.81577190634672) + -0.50197641578*((x-3.75)/2.81577190634672)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,8)) +
stat_function(fun=function(x){(1.24694652735 + 0*((x-3.75)/2.81577190634672) + -0.33600256542555*((x-3.75)/2.81577190634672)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,8)) +
stat_function(fun=function(x){(1.16463387175 + 0*((x-4.5)/3.02765035409749) + -0.278579694842705*((x-4.5)/3.02765035409749)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,9)) +
stat_function(fun=function(x){(1.033945351775 + 0*((x-2)/1.58113883008419) + -0.2858358888419*((x-2)/1.58113883008419)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,4))
#------------------------
richnessPlot8 <- ggplot(data=data.frame(x=c(0,0))) +
coord_cartesian(ylim=c(-1.0,2.0)) +
scale_x_continuous(limits=c(0,19), breaks=seq(4,19,5), labels=seq(5,20,5)) +
scale_y_continuous(limits=c(-2,2), breaks=seq(-2,2,1)) +
xlab('') +
ylab('') +
annotate('text', x=0, y=2.0, label='(q) 5.5%', size=10, hjust='left') +
#below are the individual treatment lines
stat_function(fun=function(x){(-0.3958865882397 + 0*((x-6.30769230769231)/4.44193305113542) + 0.198739653553624*((x-6.30769230769231)/4.44193305113542)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,15)) +
stat_function(fun=function(x){(-1.02862283025 + 0*((x-9.5)/5.91607978309962) + 0.7251356974*((x-9.5)/5.91607978309962)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(-3.3471712875 + 0.3267829701715*((x-9.5)/5.91607978309962) + 1.349283108*((x-9.5)/5.91607978309962)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(-4.153622019 + 0.6764299824*((x-9.5)/5.91607978309962) + 1.5614351265*((x-9.5)/5.91607978309962)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(0 + 0*((x-9.5)/5.91607978309962) + 0.230395758030415*((x-9.5)/5.91607978309962)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(-2.729288661 + -0.250968531383845*((x-9.5)/5.91607978309962) + 0.78380787565*((x-9.5)/5.91607978309962)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(-3.06269179 + -0.260621205556815*((x-9.5)/5.91607978309962) + 0.84092354635*((x-9.5)/5.91607978309962)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(-0.5816567103435 + 0*((x-4.5)/3.02765035409749) + 0.4543186206718*((x-4.5)/3.02765035409749)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,9)) +
stat_function(fun=function(x){(0 + 0*((x-4.5)/3.02765035409749) + 0.27785170266791*((x-4.5)/3.02765035409749)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,9)) +
stat_function(fun=function(x){(-1.13480577335 + 0*((x-9)/5.62731433871138) + 0.31210782598045*((x-9)/5.62731433871138)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(-1.1487965346 + -0.33450547466097*((x-7.75)/6.84957419601151) + 0.484316575829*((x-7.75)/6.84957419601151)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(-0.7814852596725 + 0*((x-7.75)/6.84957419601151) + 0.49445645693*((x-7.75)/6.84957419601151)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(0 + 0*((x-7.75)/6.84957419601151) + 0.39474695175635*((x-7.75)/6.84957419601151)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(-1.3854939163 + 0*((x-7.75)/6.84957419601151) + 0.62869052435*((x-7.75)/6.84957419601151)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(-1.148691291 + 0*((x-7.75)/6.84957419601151) + 0.59001968075*((x-7.75)/6.84957419601151)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(-0.5800057757827 + 0*((x-7.75)/6.84957419601151) + 0.3753986771225*((x-7.75)/6.84957419601151)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(0 + 0*((x-7.75)/6.84957419601151) + 0.327870395070475*((x-7.75)/6.84957419601151)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(0 + 0*((x-7.75)/6.84957419601151) + 0.36502134736995*((x-7.75)/6.84957419601151)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(-0.59346385122955 + 0*((x-7.75)/6.84957419601151) + 0.3558651766508*((x-7.75)/6.84957419601151)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(0 + 0*((x-7.75)/6.84957419601151) + 0.288271603737875*((x-7.75)/6.84957419601151)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,19)) +
stat_function(fun=function(x){(-0.84401680855 + -0.28473236147263*((x-5)/3.3166247903554) + 0.30733364305005*((x-5)/3.3166247903554)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,10)) +
stat_function(fun=function(x){(0 + 0*((x-5)/3.3166247903554) + 0.2292657911235*((x-5)/3.3166247903554)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,10)) +
stat_function(fun=function(x){(-0.9475037901 + -0.274701452995*((x-5)/3.3166247903554) + 0.349366397897*((x-5)/3.3166247903554)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,10)) +
stat_function(fun=function(x){(-1.16758772225 + 0*((x-1)/1) + 0.333685543724983*((x-1)/1)^2)*(0.3294397)+(-0.1075646)}, size=2, xlim=c(0,2))
#print all plots together --------------------------------------------------------
pushViewport(viewport(layout=grid.layout(9,2)))
print(richnessPlot0, vp=viewport(layout.pos.row=1, layout.pos.col=1))
print(richnessPlot1, vp=viewport(layout.pos.row=2, layout.pos.col=1))
print(richnessPlot2, vp=viewport(layout.pos.row=3, layout.pos.col=1))
print(richnessPlot3, vp=viewport(layout.pos.row=4, layout.pos.col=1))
print(richnessPlot4, vp=viewport(layout.pos.row=5, layout.pos.col=1))
print(richnessPlot5, vp=viewport(layout.pos.row=6, layout.pos.col=1))
print(richnessPlot6, vp=viewport(layout.pos.row=7, layout.pos.col=1))
print(richnessPlot7, vp=viewport(layout.pos.row=8, layout.pos.col=1))
print(richnessPlot8, vp=viewport(layout.pos.row=9, layout.pos.col=1))
print(meanPlot0, vp=viewport(layout.pos.row=1, layout.pos.col=2))
print(meanPlot1, vp=viewport(layout.pos.row=2, layout.pos.col=2))
print(meanPlot2, vp=viewport(layout.pos.row=3, layout.pos.col=2))
print(meanPlot3, vp=viewport(layout.pos.row=4, layout.pos.col=2))
print(meanPlot4, vp=viewport(layout.pos.row=5, layout.pos.col=2))
print(meanPlot5, vp=viewport(layout.pos.row=6, layout.pos.col=2))
print(meanPlot6, vp=viewport(layout.pos.row=7, layout.pos.col=2))
print(meanPlot7, vp=viewport(layout.pos.row=8, layout.pos.col=2))
print(meanPlot8, vp=viewport(layout.pos.row=9, layout.pos.col=2))
#export at 1200 x 3600
#remove because we're not comparing across the output anymore due to standardizing time
# #summary stats from bayesian output --------------------------------------------------------
# # #gather summary stats needed and relabel them
# # chainsCommunitySummary <- chainsCommunity%>%
# # select(
# # #trt_type intercepts: center digit refers to trts
# # E.1.1.1, E.2.1.1, E.1.2.1, E.2.2.1, E.1.3.1, E.2.3.1, E.1.4.1, E.2.4.1, E.1.5.1, E.2.5.1,
# # E.1.6.1, E.2.6.1, E.1.7.1, E.2.7.1, E.1.8.1, E.2.8.1, E.1.9.1, E.2.9.1, E.1.10.1, E.2.10.1,
# # E.1.11.1, E.2.11.1, E.1.12.1, E.2.12.1, E.1.13.1, E.2.13.1, E.1.14.1, E.2.14.1, E.1.15.1, E.2.15.1,
# # E.1.16.1, E.2.16.1,
# # #trt_type linear slopes: center digit refers to trts
# # E.1.1.2, E.2.1.2, E.1.2.2, E.2.2.2, E.1.3.2, E.2.3.2, E.1.4.2, E.2.4.2, E.1.5.2, E.2.5.2,
# # E.1.6.2, E.2.6.2, E.1.7.2, E.2.7.2, E.1.8.2, E.2.8.2, E.1.9.2, E.2.9.2, E.1.10.2, E.2.10.2,
# # E.1.11.2, E.2.11.2, E.1.12.2, E.2.12.2, E.1.13.2, E.2.13.2, E.1.14.2, E.2.14.2, E.1.15.2, E.2.15.2,
# # E.1.16.2, E.2.16.2,
# # #trt_type quadratic slopes: center digit refers to trts and interactions with anpp and gamma diversity
# # E.1.1.3, E.2.1.3, E.1.2.3, E.2.2.3, E.1.3.3, E.2.3.3, E.1.4.3, E.2.4.3, E.1.5.3, E.2.5.3,
# # E.1.6.3, E.2.6.3, E.1.7.3, E.2.7.3, E.1.8.3, E.2.8.3, E.1.9.3, E.2.9.3, E.1.10.3, E.2.10.3,
# # E.1.11.3, E.2.11.3, E.1.12.3, E.2.12.3, E.1.13.3, E.2.13.3, E.1.14.3, E.2.14.3, E.1.15.3, E.2.15.3,
# # E.1.16.3, E.2.16.3,
# # #ANPP intercept, linear, and quad slopes (center digit): 2=anpp
# # D.1.2.1, D.2.2.1,
# # D.1.2.2, D.2.2.2,
# # D.1.2.3, D.2.2.3,
# # #richness intercept, linear, and quad slopes (center digit): 3=gamma diversity
# # D.1.3.1, D.2.3.1,
# # D.1.3.2, D.2.3.2,
# # D.1.3.3, D.2.3.3,
# # #overall intercept, linear, and quad slopes (center digit): 1=overall
# # D.1.1.1, D.2.1.1,
# # D.1.1.2, D.2.1.2,
# # D.1.1.3, D.2.1.3)
# #
# # chainsCommunitySummary <- chainsCommunitySummary%>%
# # gather(key=parameter, value=value, D.1.2.1:D.2.1.3)%>%
# # group_by(parameter)%>%
# # summarise(median=median(value), sd=sd(value))%>%
# # ungroup()%>%
# # mutate(CI=sd*2)%>%
# # separate(parameter, c('level', 'variable', 'predictor', 'parameter'))%>%
# # #rename parts to be more clear
# # mutate(variable=ifelse(variable==1, 'mean', 'richness'),
# # parameter=ifelse(parameter==1, 'intercept', ifelse(parameter==2, 'linear', 'quadratic')),
# # predictor2=ifelse(predictor==2, 'ANPP', ifelse(predictor==3, 'rrich', 'overall')))%>%
# # select(variable, parameter, predictor2, median, sd, CI)
#
# # write.csv(chainsCommunitySummary, 'bayesian_output_summary_final plots_expinteraction_20yr_stdtimebytrt_04072019.csv')
#
# chainsCommunitySummary <- read.csv('bayesian_output_summary_final plots_expinteraction_20yr_stdtimebytrt_04072019.csv')
#
# chainsCommunityOverall <- chainsCommunitySummary%>%
# mutate(type=paste(predictor2, parameter, sep='_'))
#
#
#
# ###overall responses from bayesian output --------------------------------------------------------
# meanOverallPlot <- ggplot(data=subset(chainsCommunityOverall, variable=='mean' & predictor2!='trt_type'), aes(x=type, y=median)) +
# geom_point(size=4) +
# geom_errorbar(aes(ymin=median-CI, ymax=median+CI, width=0.4)) +
# # scale_y_continuous(limits=c(-0.15, 0.25), breaks=seq(-0.1, 0.2, 0.1)) +
# scale_x_discrete(limits=c('rrich_quadratic', 'ANPP_quadratic', 'overall_quadratic', 'rrich_linear', 'ANPP_linear', 'overall_linear'),
# labels=c('Gamma', 'ANPP', 'Overall', 'Gamma', 'ANPP', 'Overall')) +
# theme(axis.title.x=element_blank(), axis.title.y=element_blank(), plot.title=element_text(size=40, vjust=2, margin=margin(b=15))) +
# geom_hline(aes(yintercept=0)) +
# geom_vline(aes(xintercept=3.5), linetype='dashed') +
# coord_flip() +
# ggtitle('Compositional Difference') +
# annotate('text', x=6.3, y=-0.15, label='(b)', size=10, hjust='left')
#
# richnessOverallPlot <- ggplot(data=subset(chainsCommunityOverall, variable=='richness' & predictor2!='trt_type'), aes(x=type, y=median)) +
# geom_point(size=4) +
# geom_errorbar(aes(ymin=median-CI, ymax=median+CI, width=0.4)) +
# # scale_y_continuous(limits=c(-0.15, 0.25), breaks=seq(-0.1, 0.2, 0.1)) +
# scale_x_discrete(limits=c('rrich_quadratic', 'ANPP_quadratic', 'overall_quadratic', 'rrich_linear', 'ANPP_linear', 'overall_linear'),
# labels=c('Gamma', 'ANPP', 'Overall', 'Gamma', 'ANPP', 'Overall')) +
# theme(axis.title.x=element_blank(), axis.title.y=element_blank(), plot.title=element_text(size=40, vjust=2, margin=margin(b=15))) +
# geom_hline(aes(yintercept=0)) +
# geom_vline(aes(xintercept=3.5), linetype='dashed') +
# coord_flip() +
# ggtitle('Richness Difference') +
# annotate('text', x=6.3, y=-0.15, label='(a)', size=10, hjust='left')
#
# pushViewport(viewport(layout=grid.layout(1,2)))
# print(richnessOverallPlot, vp=viewport(layout.pos.row = 1, layout.pos.col = 1))
# print(meanOverallPlot, vp=viewport(layout.pos.row = 1, layout.pos.col = 2))
# #export at 1600x1000
###by magnitude of resource manipulated---------------------------------
#N addition
nData <- read.csv('ForAnalysis_allAnalysisNmag.csv')
nDataMean <- nData%>%
summarise(mean_mean_change=mean(composition_diff), sd_mean_change=sd(composition_diff), mean_S_PC=mean(S_PC), sd_S_PC=sd(S_PC))
#mean change
Nmean <- read.csv('C:\\Users\\lapie\\Dropbox (Smithsonian)\\working groups\\converge diverge working group\\converge_diverge\\La Pierre_comm difference_final model results_01122018\\magnitude_042019\\posteriors_N_MeanChange.csv', comment.char='#')
NmeanMean <- as.data.frame(colMeans(Nmean))%>%
add_rownames('parameter')
names(NmeanMean)[names(NmeanMean) == 'colMeans(Nmean)'] <- 'mean'
NmeanSD <- as.data.frame(colSd(Nmean))%>%
add_rownames('parameter')
names(NmeanSD)[names(NmeanSD) == 'colSd(Nmean)'] <- 'sd'
NmeanOverall <- NmeanMean%>%
left_join(NmeanSD)
# #get mean and sd to transform
# nDataSummary <- nData%>%
# summarise(mean_change_mean=mean(composition_diff), mean_change_sd=sd(composition_diff), richness_mean=mean(S_PC), richness_sd=sd(S_PC), n_mean=mean(n), n_sd=sd(n), MAP_mean=mean(MAP), MAP_sd=sd(MAP))
nDataTransform <- nData%>%
#transform mean change
mutate(mean_change_transform=((composition_diff-mean(composition_diff))/sd(composition_diff)))%>%
#transform proportional richness change
mutate(S_PC_transform=((S_PC-mean(S_PC))/sd(S_PC)))%>%
#transform N treatment magnitude
mutate(n_transform=((n-mean(n))/sd(n)))%>%
#transform MAP
mutate(MAP_transform=((MAP-mean(MAP))/sd(MAP)))
meanNPlotFinal <- ggplot(data=subset(nData), aes(x=n, y=composition_diff, color=MAP)) +
geom_point(size=5) +
coord_cartesian(ylim=c(0,1)) +
scale_y_continuous(name='Composition Response') +
stat_function(fun=function(x){(0.02512656 + 0.40341207*((1000-661.9362)/298.3696) + 0.54133077*(x-9.992142)/9.108662 + 0.28058497*((1000-661.9362)/298.3696)*(x-9.992142)/9.108662)*0.1658319+0.3699378}, size=5, color='#4793CF') +
stat_function(fun=function(x){(0.02512656 + 0.40341207*((600-661.9362)/298.3696) + 0.54133077*(x-9.992142)/9.108662 + 0.28058497*((600-661.9362)/298.3696)*(x-9.992142)/9.108662)*0.1658319+0.3699378}, size=5, color='#2D5E88') +
stat_function(fun=function(x){(0.02512656 + 0.40341207*((200-661.9362)/298.3696) + 0.54133077*(x-9.992142)/9.108662 + 0.28058497*((200-661.9362)/298.3696)*(x-9.992142)/9.108662)*0.1658319+0.3699378}, size=5, color='#153049') +
xlab(expression(paste('N added (g', m^-2, ')'))) +
annotate('text', x=0.4, y=1.0, label='(d)', size=12, hjust='left') +
theme(legend.position=c(0.8,0.05), legend.justification=c(0,0), legend.title=element_text(size=24))
#richness difference
Nrichness <- read.csv('C:\\Users\\lapie\\Dropbox (Smithsonian)\\working groups\\converge diverge working group\\converge_diverge\\nate_results\\manipulation\\posteriors_N_Richness.csv', comment.char='#')
NrichnessMean <- as.data.frame(colMeans(Nrichness))%>%
add_rownames('parameter')
names(NrichnessMean)[names(NrichnessMean) == 'colMeans(Nrichness)'] <- 'mean'
NrichnessSD <- as.data.frame(colSd(Nrichness))%>%
add_rownames('parameter')
names(NrichnessSD)[names(NrichnessSD) == 'colSd(Nrichness)'] <- 'sd'
NrichnessOverall <- NrichnessMean%>%
left_join(NrichnessSD)
richnessNPlotFinal <- ggplot(data=nData, aes(x=n, y=S_PC, color=MAP)) +
geom_point(size=5) +
coord_cartesian(ylim=c(-0.8,1)) +
scale_y_continuous(name='Richness Response') +
stat_function(fun=function(x){(-0.005589416 + -0.562241618*(x-9.992142)/9.108662)*0.2548196-0.1338463}, size=5) +
xlab('') +
annotate('text', x=1.0, y=1.0, label='(a)', size=12, hjust='left') +
theme(legend.position='none')
#drought change
droData <- read.csv('ForAnalysis_allAnalysisH2Omag_drought.csv')
meanDroPlotFinal <- ggplot(data=droData, aes(x=precip, y=composition_diff)) +
geom_point(size=5) +
coord_cartesian(ylim=c(0,1)) +
scale_y_continuous(name='') +
xlab(expression(paste(H[2], 'O deviation from ambient (%)'))) +
annotate('text', x=-80, y=1, label='(e)', size=12, hjust='left')
richnessDroPlotFinal <- ggplot(data=droData, aes(x=precip, y=S_PC, color=MAP)) +
geom_point(size=5) +
coord_cartesian(ylim=c(-0.8,1)) +
scale_y_continuous(name='') +
xlab('') +
annotate('text', x=-80, y=1, label='(b)', size=12, hjust='left') +
theme(legend.position='none')
#irrigation change
irrData <- read.csv('ForAnalysis_allAnalysisH2Omag_irr.csv')
meanIrrPlotFinal <- ggplot(data=irrData, aes(x=precip, y=composition_diff)) +
geom_point(size=5) +
coord_cartesian(ylim=c(0,1)) +
scale_y_continuous(name='') +
xlab(expression(paste(H[2], 'O deviation from ambient (%)'))) +
annotate('text', x=0, y=1, label='(f)', size=12, hjust='left')
richnessIrrPlotFinal <- ggplot(data=irrData, aes(x=precip, y=S_PC, color=MAP)) +
geom_point(size=5) +
coord_cartesian(ylim=c(-0.8,1)) +
scale_y_continuous(name='') +
xlab('') +
annotate('text', x=0, y=1, label='(c)', size=12, hjust='left') +
theme(legend.position='none')
pushViewport(viewport(layout=grid.layout(2,3)))
print(richnessNPlotFinal, vp=viewport(layout.pos.row = 1, layout.pos.col = 1))
print(meanNPlotFinal, vp=viewport(layout.pos.row = 2, layout.pos.col = 1))
print(richnessDroPlotFinal, vp=viewport(layout.pos.row = 1, layout.pos.col = 2))
print(meanDroPlotFinal, vp=viewport(layout.pos.row = 2, layout.pos.col = 2))
print(richnessIrrPlotFinal, vp=viewport(layout.pos.row = 1, layout.pos.col = 3))
print(meanIrrPlotFinal, vp=viewport(layout.pos.row = 2, layout.pos.col = 3))
#export at 2700 x 1600
|
49b38788640789e2d39c995943e9b512aa8d62ce
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/FRESA.CAD/R/bootstrapVarElimination.Bin.R
|
ef85104e2322178b65500b5b5d94fc77ca8d2fe6
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,106
|
r
|
bootstrapVarElimination.Bin.R
|
bootstrapVarElimination_Bin <- function (object,pvalue=0.05,Outcome="Class",data,startOffset=0, type = c("LOGIT", "LM","COX"),selectionType=c("zIDI","zNRI"),loops=64,print=TRUE,plots=TRUE)
{
seltype <- match.arg(selectionType)
pvalue <- as.vector(pvalue);
boot.var.IDISelection <- function (object,pvalue=0.05,Outcome="Class",startOffset=0, type = c("LOGIT", "LM","COX"),selectionType=c("zIDI","zNRI"),loops,best.formula=NULL)
{
seltype <- match.arg(selectionType)
type <- match.arg(type);
varsList <- unlist(as.list(attr(terms(object),"variables")))
termList <- str_replace_all(attr(terms(object),"term.labels"),":","\\*")
if (pvalue[1]<0.5)
{
cthr <- abs(qnorm(pvalue));
}
else
{
cthr <- pvalue;
}
removeID <- 0;
outCome <- paste(varsList[2]," ~ 1");
frm1 <- outCome;
testAUC <- 0.5;
removedTerm <- NULL;
who <- 0;
idiCV <- NULL;
modsize <- length(termList);
if (modsize>0)
{
for ( i in 1:modsize)
{
frm1 <- paste(frm1,"+",termList[i]);
}
# print(frm1)
ftmp <- formula(frm1);
idiCV <- bootstrapValidation_Bin(1.0,loops,ftmp,Outcome,data,type,plots =plots,best.model.formula=best.formula)
testAUC <- (idiCV$sensitivity + idiCV$specificity)/2;
testAUC <- median(testAUC,na.rm = TRUE);
resuBin <- getVar.Bin(object,data,Outcome,type);
startSearch <- 1 + startOffset;
frm1 <- outCome;
if (startSearch > 1)
{
for ( i in 1:(startSearch-1))
{
frm1 <- paste(frm1,"+",termList[i]);
}
}
if (startSearch <= modsize)
{
ploc <- 1+modsize-startSearch;
if (ploc>length(cthr)) ploc <- length(cthr);
minlcl <- cthr[ploc];
idlist <- startOffset+1;
for ( i in startSearch:modsize )
{
{
if (seltype=="zIDI")
{
c0 <- resuBin$z.IDIs[idlist];
ci <- median(idiCV$z.IDIs[,idlist], na.rm = TRUE);
ci2 <- median(idiCV$test.z.IDIs[,idlist], na.rm = TRUE);
}
else
{
c0 <- resuBin$z.NRIs[idlist];
ci <- median(idiCV$z.NRIs[,idlist], na.rm = TRUE);
ci2 <- median(idiCV$test.z.NRIs[,idlist], na.rm = TRUE);
}
if (is.nan(ci) || is.na(ci) ) ci <- c0;
if (is.nan(ci2) || is.na(ci2) ) ci2 <- ci;
minz <- min(c(c0,ci,ci2));
# cat(c0,":",ci,":",ci2,":",minlcl,":",minz,":",termList[i],"\n");
if (minz < minlcl)
{
minlcl = minz;
who = i;
}
}
idlist=idlist+1;
}
}
for ( i in startSearch:modsize)
{
if (who != i)
{
if (who != -1)
{
frm1 <- paste(frm1,"+",termList[i]);
}
}
else
{
removeID=i;
removedTerm=termList[i];
}
}
if ((modsize == startSearch) && (who == startSearch))
{
removeID = -removeID;
}
}
ftmp <- formula(frm1);
if ((who>0) && (modsize>1)) idiCV <- bootstrapValidation_Bin(1.0,loops,ftmp,Outcome,data,type,plots=plots)
afterTestAUC <- (idiCV$sensitivity + idiCV$specificity)/2;
afterTestAUC <- median(afterTestAUC,na.rm = TRUE);
if (is.null(afterTestAUC)) afterTestAUC=0.0;
if (is.null(testAUC)) testAUC=0.5;
if (is.na(afterTestAUC)) afterTestAUC=0.0;
if (is.na(testAUC)) testAUC=0.5;
result <- list(Removed=removeID,BootModelAUC=idiCV$blind.ROCAUC$auc,backfrm=frm1,bootval=idiCV,afterTestAUC=afterTestAUC,beforeTestAUC=testAUC,removedTerm=removedTerm);
return (result)
}
bkobj <- NULL;
bestAccuracy <- c(0.5,0.5,0.5);
best.formula=NULL;
startAccuracy = bestAccuracy;
maxAccuracy <- startAccuracy[2];
changes=1;
loopsAux=0;
model <- object;
modelReclas <- NULL;
myOutcome <- Outcome;
varsList <- unlist(as.list(attr(terms(object),"variables")))
termList <- str_replace_all(attr(terms(object),"term.labels"),":","\\*")
outCome = paste(varsList[2]," ~ 1");
frm1 = outCome;
if (length(termList) > 0)
{
for ( i in 1:length(termList))
{
frm1 <- paste(frm1,paste("+",termList[i]));
}
}
beforeFSCmodel.formula <- frm1;
model.formula <- frm1;
if (is.null(best.formula))
{
best.formula <- frm1;
}
min.formula <- best.formula;
beforeFSCmodel <- object;
beforeFormula <- frm1;
bk <- NULL;
changes2 <- 0;
# print(pvalue[1:10]);
while ((changes>0) && (loopsAux<100))
{
bk <- boot.var.IDISelection(model,pvalue,Outcome=myOutcome,startOffset,type,seltype,loops,best.formula);
beforeFormula <- bk$backfrm;
nmodel = modelFitting(formula(bk$backfrm),data,type,TRUE);
if (!is.null(bk$bootval))
{
testAccuracy <-as.vector(quantile(bk$bootval$accuracy, probs = c(0.05, 0.5, 0.95), na.rm = TRUE,names = FALSE, type = 7));
if (loopsAux == 0) startAccuracy <- bk$beforeTestAUC;
}
if ((bk$Removed>0) && (!inherits(nmodel, "try-error")))
{
if (!is.null(bk$bootval))
{
if (!is.na(testAccuracy) && !is.null(testAccuracy))
{
if (testAccuracy[2] >= bestAccuracy[1])
{
best.formula <- bk$backfrm;
if (testAccuracy[2] >= maxAccuracy)
{
min.formula <- bk$backfrm;
maxAccuracy <- testAccuracy[2];
}
bestAccuracy <- testAccuracy;
}
}
}
if (changes>0)
{
changes2<- attr(terms(model),"term.labels")[which(!(attr(terms(model),"term.labels") %in% attr(terms(nmodel),"term.labels")))]
if (length(changes2)>1)
{
changes2<-changes2[2]
}
}
}
changes = as.integer(bk$Removed);
model <- nmodel;
model.formula <- bk$backfrm;
loopsAux = loopsAux + 1
}
idiCV <- NULL;
if (length(all.vars(formula(model.formula))) > 1)
{
modelReclas <- getVar.Bin(model,data=data,Outcome=myOutcome,type);
idiCV <- bootstrapValidation_Bin(1.0000,2*loops,formula(model.formula),myOutcome,data,type,plots=plots);
# if (is.null(bk))
# {
# idiCV <- bootstrapValidation_Bin(1.0000,loops,formula(model.formula),myOutcome,data,type,plots=plots);
# }
# else
# {
# idiCV <- bk$bootval;
# if (is.null(idiCV))
# {
# idiCV <- bootstrapValidation_Bin(1.0000,loops,formula(model.formula),myOutcome,data,type,plots=plots);
# }
# }
}
else
{
model.formula <- outCome;
idiCV <- bootstrapValidation_Bin(1.0000,loops,formula(model.formula),myOutcome,data,type,plots=plots);
}
testAccuracy <-as.vector(quantile(idiCV$accuracy, probs = c(0.05, 0.5, 0.95), na.rm = TRUE,names = FALSE, type = 7));
if (print == TRUE)
{
cat("Before FSC Mod:",beforeFSCmodel.formula,"\n")
cat("At Acc Model :",min.formula,"\n")
cat("Reduced Model :",model.formula,"\n")
cat("Start AUC:",startAccuracy,"last AUC:",idiCV$blind.ROCAUC$auc,"Accuracy:",testAccuracy[2],"\n")
}
back.model<-modelFitting(formula(model.formula),data,type,TRUE);
environment(back.model$formula) <- globalenv()
environment(back.model$terms) <- globalenv()
at.opt.model<-modelFitting(formula(min.formula),data,type,TRUE);
environment(at.opt.model$formula) <- NULL
environment(at.opt.model$terms) <- NULL
result <- list(back.model=back.model,
loops=loopsAux,
reclas.info=modelReclas,
bootCV=idiCV,
back.formula=model.formula,
lastRemoved=changes2,
at.opt.model=at.opt.model,
beforeFSC.formula=beforeFSCmodel.formula,
at.Accuracy.formula=best.formula);
return (result);
}
|
2eac21f0c4322d43e9d35dc8c03cf5758379149d
|
ed4ff8cb04c1f9e05ea8fee202acbd68a1b389c3
|
/R/forcequotes-package.R
|
ec39da252588606ca5d8f31916294df18ffd8450
|
[] |
no_license
|
hrbrmstr/forcequotes
|
bc4d4d1bbbf94491e12abe647e0b412af9e342af
|
9bcc7afc4b6c3c4a6ed5a34ae0ecbbc06c54c092
|
refs/heads/master
| 2020-04-16T08:39:38.933814
| 2019-01-12T21:02:52
| 2019-01-12T21:02:52
| 165,432,513
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 835
|
r
|
forcequotes-package.R
|
#' Return Random Star Wars Quotes
#'
#' \if{html}{
#' \figure{force-quotes.png}{options: align="right" alt="Figure: force-quotes.png"}
#' }
#'
#' \if{latex}{
#' \figure{force-quotes.png}{options: width=10cm}
#' }
#'
#' Now you can use the R 'Force' to get random quotes from your favorite
#' space opera. This is a thin wrapper to the 'Star Wars Quote API'
#' (<http://swquotesapi.digitaljedi.dk/index.html> / <http://swquotes.digitaljedi.dk/home>)
#'
#' - URL: <https://gitlab.com/hrbrmstr/forcequotes>
#' - BugReports: <https://gitlab.com/hrbrmstr/forcequotes/issues>
#'
#'
#'
#' @md
#' @name forcequotes
#' @docType package
#' @references <http://swquotesapi.digitaljedi.dk/index.html>;
#' <http://swquotes.digitaljedi.dk/home>
#' @author Bob Rudis (bob@@rud.is)
#' @importFrom jsonlite fromJSON
#' @import httr cli crayon
NULL
|
6b3b56100fb62822cc98ec7cfc5899f27e9b5994
|
7b8bfc26427028fe08ce34cf460649f7c9496258
|
/RSHINY_Boston_Property.R
|
bfe165cd40b07870de7ed1248717f800399d094a
|
[] |
no_license
|
shivinigam/Boston-Property-Analysis
|
bc6dcaabe8e82b0ccb233f4f56639da81a673bfb
|
d0c2e246a10eeff0720c744eeab809760632601b
|
refs/heads/master
| 2020-12-28T04:21:41.802948
| 2020-02-04T10:35:06
| 2020-02-04T10:35:06
| 238,180,496
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,893
|
r
|
RSHINY_Boston_Property.R
|
library(shiny)
library(tidyverse)
library(ggplot2)
library(dplyr)
library(plotly)
options(scipen=8)
#datacleaning
getwd()
setwd("C:/Users/HP/Documents/")
getwd()
#data_clean <- read.csv("property.csv")
data_clean <- read.csv(file.choose(), header = TRUE)
View(data_clean)
subset1<- subset(data_clean, select = c(PID, CM_ID, GIS_ID, ST_NUM, ST_NAME, ST_NAME_SUF, UNIT_NUM, ZIPCODE, PTYPE, LU, OWN_OCC, OWNER, MAIL.CS,MAIL_ZIPCODE, AV_LAND, AV_BLDG, AV_TOTAL, GROSS_TAX, LAND_SF, YR_BUILT, YR_REMOD, GROSS_AREA, LIVING_AREA, NUM_FLOORS))
subset3 <-na_if(subset1,"")
subset4 <- na.omit(subset3)
subset4<-as.data.frame(subset4)
View(subset4)
subset2 <- subset(subset4, YR_REMOD %in% c(2008, 2009,2010,2011,2012,2013,2014,2015,2016,2017,2018))
View(subset2)
prop_clean <- subset2
#write.csv(subset4,"prop.csv")
summary(prop_clean)
subset5<- subset(data_clean, select = c( AV_LAND, AV_BLDG, AV_TOTAL, GROSS_TAX, LAND_SF, YR_BUILT, YR_REMOD, GROSS_AREA, LIVING_AREA, NUM_FLOORS))
subset6 <-na_if(subset5,"")
subset7 <- na.omit(subset6)
subset7<-as.data.frame(subset7)
View(subset4)
subset8 <- subset(subset7, YR_REMOD %in% c(2008, 2009,2010,2011,2012,2013,2014,2015,2016,2017,2018))
View(subset2)
prop_clean1 <- subset8
# compute a correlation matrix
correlation <- round(cor(prop_clean1), 3)
nms <- names(prop_clean1)
#data_clean <- read.csv("C:\Users\HP\Documents\property.csv")
# Define UI for application inspecting Boston property values
ui <- fluidPage(
# Sidebar layout with a input and output definitions
sidebarLayout(
# Inputs
sidebarPanel(
# Select variable for y-axis
conditionalPanel(condition = "input.ts == 'ls'",
selectInput(inputId = "y",
label = "Y-axis:",
choices = c("bldg value" = "AV_BLDG",
"GROSS TAX" = "GROSS_TAX"),
selected = "AV_BLDG")),
# Select variable for x-axis
conditionalPanel(condition = "input.ts == 'ls' && input.y == 'AV_BLDG'" ,
selectInput(inputId = "x",
label = "X-axis:",
choices = c("bldg value" = "AV_BLDG",
"GROSS TAX" = "GROSS_TAX"),
selected = "GROSS_TAX"),
# Select variable for color
selectInput(inputId = "z",
label = "Color by:",
choices = c("Land Use" = "LU",
"YEAR REMOD" = "YR_REMOD"),
selected = "YR_REMOD")
),
# Output
# mainPanel(
# plotOutput(outputId = "scatterplot", click = "plot_click")
#)
# ),
#),
#sidebarLayout(
# Inputs
# sidebarPanel(
# Select variable for y-axis
conditionalPanel(condition = "input.ts == 'ls'",
selectInput(
inputId = "y1",
label = "Y-axis:",
choices = c("GROSS TAX" = "GROSS_TAX"),
selected = "GROSS_TAX"
)),
# Select variable for x-axis
conditionalPanel(condition = "input.ts == 'ls'&& input.y1 == 'GROSS_TAX'",
selectInput(
inputId = "x1",
label = "X-axis:",
choices = c("AVG TOTAL" = "AV_TOTAL"),
selected = "AV_TOTAL"
),
# Select variable for color
selectInput(
inputId = "z1",
label = "Land Use",
choices = c("Land Use" = "LU",
"Year Remod" = "YR_REMOD"),
selected = "YR_REMOD"
)
)),
#),
# Output
# mainPanel(plotOutput(outputId = "lineChart"))
#),
#mainPanel(
#plotlyOutput("heat"),
# plotlyOutput("scatterplot")
#),
#verbatimTextOutput("selection")
#)
mainPanel(
tabsetPanel(id = 'ts',
tabPanel(title = "Correlation between continuos variables", value = 'hs',
plotlyOutput("heat"),
plotlyOutput("scatterplot"),
verbatimTextOutput("selection"),
br()),
tabPanel(title = "Tree map for zipcode", value = 'ls',
plotOutput(outputId = "lineChart"),
plotOutput(outputId = "scatterplot", click = "plot_click"),
br())
)
)
)
)
# Define server function required to create the scatterplot
server <- function(input, output) {
# Create the scatterplot object the plotOutput function is expecting
output$scatterplot <- renderPlot({
median_AV_BLDG <- prop_clean %>% group_by(AV_BLDG) %>% summarise(median_AV_BLDG = median(AV_BLDG))
median_GROSS_TAX <- prop_clean %>% group_by(GROSS_TAX) %>% summarise(median_GROSS_TAX= median(GROSS_TAX))
ggplot(data = prop_clean, aes_string(x = input$x, y = input$y, color = input$z)) +
geom_point(alpha = 0.5)
})
output$lineChart <- renderPlot({
ggplot(data = prop_clean, aes_string(
x = input$x1,
y = input$y1
)) +
geom_line(size = 2) +
ggtitle("Trends of GrossTax in Boston")
})
output$heat <- renderPlotly({
plot_ly(x = nms, y = nms, z = correlation,
key = correlation, type = "heatmap", source = "heatplot") %>%
layout(xaxis = list(title = ""),
yaxis = list(title = ""))
})
output$selection <- renderPrint({
s <- event_data("plotly_click")
if (length(s) == 0) {
"Click on a cell in the heatmap to display a scatterplot"
} else {
cat("You selected: \n\n")
as.list(s)
}
})
output$scatterplot <- renderPlotly({
s <- event_data("plotly_click", source = "heatplot")
if (length(s)) {
vars <- c(s[["x"]], s[["y"]])
d <- setNames(prop_clean1[vars], c("x", "y"))
yhat <- fitted(lm(y ~ x, data = d))
plot_ly(d, x = ~x) %>%
add_markers(y = ~y) %>%
add_lines(y = ~yhat) %>%
layout(xaxis = list(title = s[["x"]]),
yaxis = list(title = s[["y"]]),
showlegend = FALSE)
} else {
plotly_empty()
}
})
}
# Create a Shiny app object
shinyApp(ui = ui, server = server)
#DataSet1 <- read.csv(file.choose())
#head(DataSet1)
#DataSet1 <-subset(DataSet1, YR_BUILT >1890)
#DataSet1 <-subset(DataSet1, GROSS_TAX >0)
#selected<-c("C","I")
#DataSet1 <- subset(DataSet1, DataSet1$LU%in%selected)
#ui <- fluidPage(# Sidebar layout with a input and output definitions
# sidebarLayout(
# Inputs
# sidebarPanel(
# Select variable for y-axis
# selectInput(
# inputId = "y",
# label = "Y-axis:",
# choices = c("GROSS TAX" = "GROSS_TAX"),
# selected = "GROSS_TAX"
# ),
# Select variable for x-axis
# selectInput(
# inputId = "x",
# label = "X-axis:",
# choices = c("Year Built" = "YR_BUILT"),
# selected = "YR_BUILT"
#),
# Select variable for color
#selectInput(
# inputId = "z",
#label = "Land Use",
#choices = c("Land Use","C","I"),
# selected = "C"
#)
#),
# Output
#mainPanel(plotOutput(outputId = "lineChart"))
#))
# Define server function required to create the lineChart
#server <- function(input, output) {
# Create the scatterplot object the plotOutput function is expecting
# output$lineChart <- renderPlot({
# ggplot(data = DataSet1, aes_string(
# x = input$x,
# y = input$y
#)) +
# geom_line(size = 2) +
#ggtitle("Trends of GrossTax in Boston")
#})
#}
#abline(lm(AV_LAND~GROSS_TAX))
# Create a Shiny app object
#shinyApp(ui = ui, server = server)
|
05ae8917bc77ba225b3e62e501fbea601a1b50f2
|
d538c8d0eeee04b0b0d8c41ada8aed76771d2e47
|
/Week 3/Week 3/RTest-4.R
|
3d09c33520679ba516722655bcb8dc5cce83817c
|
[] |
no_license
|
zeelat7/StatistiscalAnalysisPart1
|
1cdb90a83f3845465f7cf06a765fd392a9671945
|
53ddd71b2945b2b578df3485e484403edfaf35c9
|
refs/heads/master
| 2020-04-24T05:10:35.536941
| 2019-02-20T18:36:50
| 2019-02-20T18:36:50
| 171,727,916
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 403
|
r
|
RTest-4.R
|
#R Test Number 4
#set up matrix
lin_equat <- matrix(c(1,-1,1,1,1,-1,1,1,1),nrow = 3, byrow = TRUE)
lin_equat
#right side of the equal sign
answers <- matrix(c(1,1,3), nrow = 3, byrow = TRUE)
answers
#use solve to solve the equation
solution <- solve(lin_equat, answers)
solution
#assign values to x y and z
x = solution[1,1]
y = solution[2,1]
z = solution[3,1]
#test solution
x-y+z
x+y-z
x+y+z
|
e706168b61be8587d61b22342ed91c1b08930ac8
|
8b7209ca270699166ea918784205d97b820138cb
|
/rpart_demo.R
|
992982d19789767326606c7542f156c1d547ed7c
|
[] |
no_license
|
horver/big-data-hf
|
bc9db81654111bf286eabb216115a321122f7416
|
ea0141e3d03fe6678470c8dab61720c40e5e662b
|
refs/heads/master
| 2021-08-23T07:12:56.865544
| 2017-12-04T02:53:24
| 2017-12-04T02:53:24
| 109,955,383
| 0
| 0
| null | null | null | null |
ISO-8859-2
|
R
| false
| false
| 2,280
|
r
|
rpart_demo.R
|
library("rpart")
library("corrgram")
data<-read.csv("E:/BME-MSc-2.felev/bigdata/Dropbox/data/globalterrorismdb_0617dist.csv", header = T, sep = ";", skipNul = T)
# korrelรกciรณs diagram
vars2 <- c("iyear","imonth","iday","extended","country","region", "specificity","vicinity","multiple","success","suicide",
"attacktype1", "targtype1", "natlty1", "guncertain1", "individual", "weaptype1", "nwound")
round(cor(data[,vars2], use="pair"),2)
corrgram(data[,vars2], order = T, lower.panel = panel.shade, upper.panel = panel.pie)
# klaszterezรฉs
result<-kmeans(data[!is.na(data[,'nkill']),'nkill'],5)
nkill_classes<-cbind(data[!is.na(data[,'nkill']),'nkill'],result$cluster)
result$cluster
# Itt adjuk hozzรก az osztรกlyozรกst az adatokhoz
borders <- as.matrix(c(1,5,10,100))
classes<-rep(0,nrow(data))
for (i in 1:nrow(data))
{
if(is.na(data[i,'nkill']))
classes[i]<--1
else
{
for (j in 1:nrow(borders))
{
if (data[i,'nkill']<borders[j])
{
classes[i]<-j
break()
}
}
if(classes[i]==0)
classes[i]<-5
}
}
# az adatokat รฉs a hozzรกjuk tartozรณ cรญmkรฉket egy tรกblรกzatba tesszรผk
newdata<-cbind(data,classes)
# kiszลฑrjรผk azokat a sorokat, ahol nincs osztรกlyozรกs
newdata<-newdata[newdata[,'classes']!=-1,]
# itt kezdjรผk el a dรถntรฉsi fa tanรญtรกsรกt
tr_idx<-sample(nrow(newdata), nrow(newdata)*0.8)
train<-newdata[tr_idx,]
test<-newdata[-tr_idx,]
# itt lehet jรกtszani a paramรฉterekkel: a fa tanรญtรกsa
tree<-rpart(factor(classes)~eventid+iyear+imonth+iday+extended+country+region+specificity+vicinity+crit1+crit2+crit3+doubtterr+
multiple+success+suicide+attacktype1+targtype1+natlty1+guncertain1+individual+weaptype1+property+ishostkid+ransom+
INT_LOG+INT_IDEO+INT_MISC+INT_ANY+targsubtype1+weapsubtype1+nwound+propextent+alternative+attacktype2+attacktype3+
targtype2+targsubtype2+natlty2+targtype3+targsubtype3+natlty3+guncertain2+guncertain3+nperps+claimed+claimmode+claim2+
claimmode2+claim3+claimmode3+compclaim+weaptype2+weapsubtype2+weaptype3+weapsubtype3+weaptype4+weapsubtype4+nwoundte+
nhostkid+hostkidoutcome+ndays+nreleased,data=train)
# kirajzolja a fรกt
plot(tree)
# szรถveggel lรกtja el az รกbrรกt
text(tree,cex=.8)
|
9d02b538424493c6952cc27968f213fd5649163a
|
a74b1a6a3f69fb6461b1d5fd616fd91adbc276c1
|
/scripts/channel-messages-scraper.R
|
2eeb557e5d4273abe44ef2fc490bdbe04ca27bd1
|
[] |
no_license
|
EndenDragon/INFO201-Group-Project
|
df84eab8f2e2759c1a6be972e631a37eaba0f259
|
343433d8d6316170adfff1318e6cec829c98059e
|
refs/heads/master
| 2020-03-15T01:56:33.006394
| 2018-05-31T00:44:47
| 2018-05-31T00:44:47
| 131,905,817
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,102
|
r
|
channel-messages-scraper.R
|
# This file scrapes the channel messages and saves it to the data folder
library(httr)
library(jsonlite)
library(dplyr)
source("./scripts/api-keys.R")
# All the messages fetched
channel_messages <- data.frame()
# Channel ID to scrape
# 366123119464939534 - #rules
# 369356771804053515 - #announcements
# 362689877751627777 - #general
# 362691650981724160 - #questions
channel_id <- "362689877751627777"
# Discord requires a header that contains the token/password
discord_headers <- add_headers(
"Authorization" = discord_token
)
# Stores the last message id, used in the while loop
# to know which messages to get before the msg id
last_msg_id <- ""
# While there are messages to fetch
while (nrow(channel_messages) %% 100 == 0) {
# Creates the endpoint to discord api
discord_chanmsg_endpoint <- paste0(
"https://discordapp.com/api/v6",
"/channels/",
channel_id,
"/messages",
"?limit=100"
)
# If last message id is not none, append
# a "before" query parameter
if (last_msg_id != "") {
discord_chanmsg_endpoint <- paste0(
discord_chanmsg_endpoint,
"&before=",
last_msg_id
)
}
# Send it out the door
discord_response <- GET(
discord_chanmsg_endpoint,
discord_headers
)
# Parse the response as json
discord_data <- fromJSON(content(discord_response, "text"))
# Save the last message id
last_msg_id <- tail(discord_data, 1)$id
# Flatten the json df
discord_data <- flatten(discord_data)
# Merge the global channel messages with the newly got data
channel_messages <- bind_rows(channel_messages, discord_data)
}
# Flatten it again for good measures
channel_messages <- flatten(channel_messages)
# Get the rows we need
filtered_df <- channel_messages %>%
mutate(
id = id,
timestamp = timestamp,
edited_timestamp = edited_timestamp,
author_id = author.id,
content = content
) %>%
select(
id, timestamp, edited_timestamp, author_id, content
)
# Write that on disk
write.csv(
filtered_df,
paste0(
"data/messages_",
channel_id,
".csv"
),
row.names = FALSE
)
|
e5b4dc7cb4c8f3e20170664293e2c0c36db03dbe
|
6cfede497caf67b5a1e4745b56b029e5ccce128f
|
/Unfiled/SR14Forecast/datasourceid35/script/hhinc_by_category_plots.R
|
2ca05b8f184ff19fe34b3084a03c86ebd6196111
|
[] |
no_license
|
SANDAG/QA
|
3bce623e269c745cd7c60933be8d81bab14a0e27
|
37edb55a7e79f205d44b67eb18e6474689268477
|
refs/heads/master
| 2023-08-19T10:47:05.145384
| 2023-08-17T15:57:01
| 2023-08-17T15:57:01
| 138,326,659
| 6
| 3
| null | 2023-02-24T22:07:34
| 2018-06-22T16:48:58
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 10,543
|
r
|
hhinc_by_category_plots.R
|
#hhinc by category plots
datasource_id=35
pkgTest <- function(pkg){
new.pkg <- pkg[!(pkg %in% installed.packages()[, "Package"])]
if (length(new.pkg))
install.packages(new.pkg, dep = TRUE)
sapply(pkg, require, character.only = TRUE)
}
packages <- c("data.table", "ggplot2", "scales", "sqldf", "rstudioapi", "RODBC", "dplyr", "reshape2",
"stringr","gridExtra","grid","lattice")
pkgTest(packages)
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
source("../Queries/readSQL.R")
#change all factors to character for ease of coding
options(stringsAsFactors=FALSE)
channel <- odbcDriverConnect('driver={SQL Server}; server=sql2014a8; database=demographic_warehouse; trusted_connection=true')
hh_sql = getSQL("../Queries/Household Income (HHINC).sql")
hh_sql <- gsub("ds_id", datasource_id, hh_sql)
hh<-sqlQuery(channel,hh_sql)
odbcClose(channel)
#strip extra characters from cpa names
hh$geozone[hh$geotype =="region"]<- "Region"
hh$geozone <- gsub("\\*","",hh$geozone)
hh$geozone <- gsub("\\-","_",hh$geozone)
hh$geozone <- gsub("\\:","_",hh$geozone)
#
# # rename San Diego region to 'San Diego Region' and then aggregate
# levels(hh$geozone) <- c(levels(hh$geozone), "San Diego Region")
# hh$geozone[hh$geotype=='region'] <- 'San Diego Region'
# sd = subset(hh,geozone=='San Diego')
# sd2 = subset(hh,geozone=='San Diego Region')
# #write.csv(sd,'cityofsandiego.csv')
# #write.csv(sd2,'regionofsandiego.csv')
Geo_totals<-aggregate(hh~yr_id+geozone, data=hh, sum)
hh$tot_pop<-Geo_totals[match(paste(hh$yr_id, hh$geozone),paste(Geo_totals$yr_id, Geo_totals$geozone)),3]
hh$tot_pop[hh$tot_pop==0] <- NA
hh$percent_income = hh$hh/hh$tot_pop * 100
#write.csv(Geo_totals,'geototals.csv')
# specify order of levels for plotting
hh$name <- factor(hh$name, levels = c("Less than $15,000",
"$15,000 to $29,999",
"$30,000 to $44,999",
"$45,000 to $59,999",
"$60,000 to $74,999",
"$75,000 to $99,999",
"$100,000 to $124,999",
"$125,000 to $149,999",
"$150,000 to $199,999",
"$200,000 or more"))
hh$income_id2 <-ifelse(hh$income_group_id>=11 &hh$income_group_id<=12, '1',
ifelse(hh$income_group_id>=13 &hh$income_group_id<=14, '2',
ifelse(hh$income_group_id>=15 &hh$income_group_id<=16, '3',
ifelse(hh$income_group_id>=17 &hh$income_group_id<=18, '4',
ifelse(hh$income_group_id>=19 &hh$income_group_id<=20, '5', NA)))))
hh$name2[hh$income_id2=="1"]<- "Less than $30,000"
hh$name2[hh$income_id2=="2"]<- "$30,000 to $59,999"
hh$name2[hh$income_id2=="3"]<- "$60,000 to $99,999"
hh$name2[hh$income_id2=="4"]<- "$100,000 to $149,999"
hh$name2[hh$income_id2=="5"]<- "$150,000 or more"
hh$name2<- as.factor(hh$name2)
hh$name2<- factor(hh$name2, levels = c("Less than $30,000",
"$30,000 to $59,999","$60,000 to $99,999", "$100,000 to $149,999", "$150,000 or more"))
Cat_agg<-aggregate(hh~yr_id+geozone+name2+geotype+income_id2, data=hh, sum)
Cat_agg$tot_pop<-Geo_totals[match(paste(Cat_agg$yr_id, Cat_agg$geozone),paste(Geo_totals$yr_id, Geo_totals$geozone)),3]
Cat_agg$tot_pop[Cat_agg$tot_pop==0] <- NA
Cat_agg$percent_income = Cat_agg$hh/Cat_agg$tot_pop * 100
Cat_agg$percent_income = round(Cat_agg$percent_income, digits = 1)
Cat_agg$year<- "y"
Cat_agg$yr <- as.factor(paste(Cat_agg$year, Cat_agg$yr, sep = ""))
hh_jur = subset(Cat_agg,geotype=='jurisdiction')
hh_cpa = subset(Cat_agg,geotype=='cpa')
hh_region = subset(Cat_agg,geotype=='region')
colours = c('#ffeda0','#fd8d3c','#bd0026','#800026','#561B07')
#Region plot and table
maindir = dirname(rstudioapi::getSourceEditorContext()$path)
results<-"plots\\hhinc\\"
ifelse(!dir.exists(file.path(maindir,results)), dir.create(file.path(maindir,results), showWarnings = TRUE, recursive=TRUE),0)
plotdat = hh_region
plot<-ggplot(hh_region,aes(x=yr_id, y=percent_income, colour=name2)) +
geom_line(size=1)+ geom_point(size=3, aes(colour=name2)) +
scale_y_continuous(label=comma,limits=c(0.0,41.0))+
labs(title=paste("Household Income: Proportion of Households by Category ds_id= ", datasource_id, '\n Region',sep=''),
y="Proportion of Households", x="Year",
caption="Sources: demographic warehouse: fact.household_income
\nNote:Out of range data may not appear on the plot. Refer to the table below for those related data results.") +
scale_colour_manual(values=colours) +
#sp+scale_colour_manual(values=cbp1) +
theme_bw(base_size = 12) + theme(plot.title = element_text(hjust = 0.5)) +
#theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
theme(legend.position = "bottom",
legend.title=element_blank(),
plot.caption = element_text(size = 9))
#ggsave(plot, file= paste(results, '2_hhinc',datasource_id,".png", sep=''))#, scale=2)
output_table<-data.frame(plotdat$yr_id,plotdat$tot_pop,plotdat$name2,plotdat$percent_income)
output_table <- dcast(output_table,plotdat.yr_id+plotdat.tot_pop~plotdat.name2,value.var = "plotdat.percent_income")
setnames(output_table, old=c("plotdat.yr_id","plotdat.tot_pop"),new=c("Increment","Tot HH"))
tt <- ttheme_default(base_size=9)
tbl <- tableGrob(output_table, rows=NULL, theme=tt)
lay <- rbind(c(1,1,1,1,1),
c(1,1,1,1,1),
c(1,1,1,1,1),
c(2,2,2,2,2),
c(2,2,2,2,2))
output<-grid.arrange(plot,tbl,as.table=TRUE,layout_matrix=lay)
ggsave(output, file= paste(results, 'hhinc region',datasource_id, ".png", sep=''))#, scale=2)
##jurisdiction plots
maindir = dirname(rstudioapi::getSourceEditorContext()$path)
results<-"plots\\hhinc\\jur\\"
ifelse(!dir.exists(file.path(maindir,results)), dir.create(file.path(maindir,results), showWarnings = TRUE, recursive=TRUE),0)
jur_list<- c(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19)
jur_list2<- c("Carlsbad","Chula Vista","Coronado","Del Mar","El Cajon","Encinitas","Escondido","Imperial Beach","La Mesa","Lemon Grove",
"National City","Oceanside","Poway","San Diego","San Marcos","Santee","Solana Beach","Vista","Unincorporated")
citynames <- data.frame(jur_list, jur_list2)
hh_jur$jurisdiction_id<-citynames[match(hh_jur$geozone, citynames$jur_list2),1]
head(hh_jur)
for(i in 1:length(jur_list)){
plotdat = subset(hh_jur, hh_jur$jurisdiction_id==jur_list[i])
plot<-ggplot(plotdat,aes(x=yr_id, y=percent_income, colour=name2)) +
geom_line(size=1) +
geom_point(size=3, aes(colour=name2)) +
scale_y_continuous(label=comma,limits=c(0.0,41.0))+
labs(title=paste(jur_list2[i],"\nHousehold Income: Proportion of Households by Category\n datasource_id ",datasource_id,sep=''),
y="Proportion of Households", x="Year",
caption="Sources: demographic warehouse: fact.household_income
\nNote: Out of range data may not appear on the plot. Refer to the table below for those related data results.") +
scale_colour_manual(values=colours) +
#sp+scale_colour_manual(values=cbp1) +
theme_bw(base_size = 12) + theme(plot.title = element_text(hjust = 0.5)) +
#theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
theme(legend.position = "bottom",
legend.title=element_blank(),
plot.caption = element_text(size = 9))
#ggsave(plot, file= paste(results, '2_hhinc',datasource_id,".png", sep=''))#, scale=2)
output_table<-data.frame(plotdat$yr_id,plotdat$name2,plotdat$tot_pop,plotdat$percent_income)
output_table <- dcast(output_table,plotdat.yr_id+plotdat.tot_pop~plotdat.name2,value.var = "plotdat.percent_income")
setnames(output_table, old=c("plotdat.yr_id","plotdat.tot_pop"),new=c("Increment","Tot HH"))
tt <- ttheme_default(base_size=9)
tbl <- tableGrob(output_table, rows=NULL, theme=tt)
lay <- rbind(c(1,1,1,1,1),
c(1,1,1,1,1),
c(1,1,1,1,1),
c(2,2,2,2,2),
c(2,2,2,2,2))
output<-grid.arrange(plot,tbl,as.table=TRUE,layout_matrix=lay)
ggsave(output, file= paste(results, 'hhinc_', jur_list2[i],datasource_id, ".png", sep=''))#, scale=2)
}
##cpa plots
maindir = dirname(rstudioapi::getSourceEditorContext()$path)
results<-"plots\\hhinc\\cpa\\"
ifelse(!dir.exists(file.path(maindir,results)), dir.create(file.path(maindir,results), showWarnings = TRUE, recursive=TRUE),0)
cpa_list = unique(hh_cpa[["geozone"]])
head(hh_cpa)
for(i in 1:length(cpa_list)){
plotdat = subset(hh_cpa, hh_cpa$geozone==cpa_list[i])
plot<-ggplot(plotdat,aes(x=yr_id, y=percent_income, colour=name2)) +
geom_line(size=1) +
geom_point(size=3, aes(colour=name2)) +
scale_y_continuous(label=comma,limits=c(0.0,41.0))+
labs(title=paste(cpa_list[i],"\nHousehold Income: Proportion of Households by Category\n datasource id ", datasource_id,sep=''),
y="Proportion of Households", x="Year",
caption="Sources: demographic warehouse: fact.household_income
\nNote: Out of range data may not appear on the plot. Refer to the table below for those related data results.") +
scale_colour_manual(values=colours) +
#sp+scale_colour_manual(values=cbp1) +
theme_bw(base_size = 12) + theme(plot.title = element_text(hjust = 0.5)) +
#theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
theme(legend.position = "bottom",
legend.title=element_blank(),
plot.caption = element_text(size = 9))
#ggsave(plot, file= paste(results, '2_hhinc',datasource_id,".png", sep=''))#, scale=2)
output_table<-data.frame(plotdat$yr_id,plotdat$name2,plotdat$tot_pop,plotdat$percent_income)
output_table <- dcast(output_table,plotdat.yr_id+plotdat.tot_pop~plotdat.name2,value.var = "plotdat.percent_income")
setnames(output_table, old=c("plotdat.yr_id","plotdat.tot_pop"),new=c("Increment","Tot HH"))
tt <- ttheme_default(base_size=9)
tbl <- tableGrob(output_table, rows=NULL, theme=tt)
lay <- rbind(c(1,1,1,1,1),
c(1,1,1,1,1),
c(1,1,1,1,1),
c(2,2,2,2,2),
c(2,2,2,2,2))
output<-grid.arrange(plot,tbl,as.table=TRUE,layout_matrix=lay)
ggsave(output, file= paste(results, 'hhinc_', cpa_list[i],datasource_id, ".png", sep=''))#, scale=2)
}
|
c7ad03774af28df8874bae0702e3e39e13509569
|
c9e0c41b6e838d5d91c81cd1800e513ec53cd5ab
|
/man/gtkRcAddWidgetClassStyle.Rd
|
1c8fca5f8a0e9357d83feb99985b13d2022ff402
|
[] |
no_license
|
cran/RGtk2.10
|
3eb71086e637163c34e372c7c742922b079209e3
|
75aacd92d4b2db7d0942a3a6bc62105163b35c5e
|
refs/heads/master
| 2021-01-22T23:26:26.975959
| 2007-05-05T00:00:00
| 2007-05-05T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 726
|
rd
|
gtkRcAddWidgetClassStyle.Rd
|
\alias{gtkRcAddWidgetClassStyle}
\name{gtkRcAddWidgetClassStyle}
\title{gtkRcAddWidgetClassStyle}
\description{
Adds a \code{\link{GtkRcStyle}} that will be looked up by a match against
the widget's class pathname. This is equivalent to a:
\code{
widget_class PATTERN style STYLE
}
statement in a RC file.
\strong{WARNING: \code{gtk_rc_add_widget_class_style} is deprecated and should not be used in newly-written code.}
}
\usage{gtkRcAddWidgetClassStyle(object, pattern)}
\arguments{
\item{\code{object}}{[\code{\link{GtkRcStyle}}] the \code{\link{GtkRcStyle}} to use for widgets matching \code{pattern}}
\item{\code{pattern}}{[character] the pattern}
}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
9339913a96be1c2351a530de9efe4c028c0d640e
|
814f85cb23505aaee9c4680573b44574a493f1e7
|
/motives.R
|
e7b56ec8b68e63a0c614702167a4888dadc57210
|
[] |
no_license
|
shubh24/HomicideOffender
|
61f45eb064f5324914d20e2d3373fca43d08b09a
|
adafb4c2e0cf7de4c80d35dd17e73317e93b1f7f
|
refs/heads/master
| 2021-01-12T07:17:19.008296
| 2016-12-22T11:47:42
| 2016-12-22T11:47:42
| 76,934,684
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,017
|
r
|
motives.R
|
library(ggplot2)
df = read.csv("Serial Killers Data.csv", stringsAsFactors = TRUE)
m_df = df[,c("Code", "Type", "MethodDescription", "DateDeath")]
m_df$DateDeath = as.character(m_df$DateDeath)
m_df$DateDeath[nchar(m_df$DateDeath) == 8] = substr(m_df$DateDeath[nchar(m_df$DateDeath) == 8], 5, 8)
m_df$DateDeath[nchar(m_df$DateDeath) == 9] = substr(m_df$DateDeath[nchar(m_df$DateDeath) == 9],6,9)
m_df$DateDeath[nchar(m_df$DateDeath) == 10] = substr(m_df$DateDeath[nchar(m_df$DateDeath) == 10], 7,10)
m_df$DateDeath = as.numeric(m_df$DateDeath)
m_df = subset(m_df, m_df$DateDeath <= 2016 & m_df$DateDeath >= 1980) #removing noise
motives_list = c("Financial Gain","Attention","Enjoyment","Anger","Mental Illness","cult","Avoid arrest","Gang activity","Convenience","Wildwest Outlaw", "Multiple Motivations")
#Separating methods
m_df$MethodDescription = as.character(m_df$MethodDescription)
m_df$methods = strsplit(m_df$MethodDescription, ",")
for (i in 1:nrow(m_df)){
for (j in m_df[i, "methods"][[1]]){
if (!(j == "")){
if (!(j %in% colnames(m_df))){
m_df[,j] = 0
}
m_df[i,j] = 1
}
}
}
m_df$DateDeath = as.factor(m_df$DateDeath)
m_df$MethodDescription = NULL
m_df$methods = NULL
for (i in 1:nrow(m_df)){
code_int = as.integer(m_df$Code[i])
if (!is.na(code_int)){
col_name = motives_list[code_int]
if (!(col_name %in% colnames(m_df))){
m_df[,col_name] = 0
}
m_df[i,col_name] = 1
}
}
#Correlation of motives with weapons
cor(m_df[-c(1:23)], m_df[,4:22])
motives = as.data.frame(cbind(m_df$code_int, m_df$DateDeath))
colnames(motives) = c("Motive", "Year")
motives$Motive = as.factor(motives$Motive)
motives$freq = 1
motives_freq = aggregate(freq ~ Year+Motive, data = motives, FUN = sum)
ggplot(motives_freq, aes(Year, freq, group = Motive, col = Motive)) +
geom_line()+
ggtitle("Motive Variance over the Years") +
labs(x = "Years", y = "Number of reported homicides")+
scale_color_hue(labels = motives_list)+
theme_bw()
|
b4b6bacb9e711191d9d5e9effc1239fdcb177f9f
|
e125e0841d363410954ddac03d447841202a3c01
|
/R/shapley.R
|
0489bce55eeb2704e24d57d0753c98f40473d423
|
[
"MIT"
] |
permissive
|
laurencelin/SHAPforxgboost
|
d464fc414336cb7316afd3044c4103290528191a
|
10eff9b439f1d6b5fb6abf3027da44fe4e3b106c
|
refs/heads/master
| 2023-04-19T05:10:07.823583
| 2021-05-10T20:40:35
| 2021-05-10T20:40:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,739
|
r
|
shapley.R
|
# return matrix of shap score and mean ranked score list
shap.score.rank <- function(xgb_model = xgb_mod, shap_approx = TRUE,
X_train = mydata$train_mm){
require(xgboost)
require(data.table)
shap_contrib <- predict(xgb_model, X_train,
predcontrib = TRUE, approxcontrib = shap_approx)
shap_contrib <- as.data.table(shap_contrib)
shap_contrib[,BIAS:=NULL]
cat('make SHAP score by decreasing order\n\n')
mean_shap_score <- colMeans(abs(shap_contrib))[order(colMeans(abs(shap_contrib)), decreasing = T)]
return(list(shap_score = shap_contrib,
mean_shap_score = (mean_shap_score)))
}
# a function to standardize feature values into same range
std1 <- function(x){
return ((x - min(x, na.rm = T))/(max(x, na.rm = T) - min(x, na.rm = T)))
}
# prep shap data
shap.prep <- function(shap = shap_result, X_train = mydata$train_mm, top_n){
require(ggforce)
# descending order
if (missing(top_n)) top_n <- dim(X_train)[2] # by default, use all features
if (!top_n%in%c(1:dim(X_train)[2])) stop('supply correct top_n')
require(data.table)
shap_score_sub <- as.data.table(shap$shap_score)
shap_score_sub <- shap_score_sub[, names(shap$mean_shap_score)[1:top_n], with = F]
shap_score_long <- melt.data.table(shap_score_sub, measure.vars = colnames(shap_score_sub))
# feature values: the values in the original dataset
fv_sub <- as.data.table(X_train)[, names(shap$mean_shap_score)[1:top_n], with = F]
# standardize feature values
fv_sub_long <- melt.data.table(fv_sub, measure.vars = colnames(fv_sub))
fv_sub_long[, stdfvalue := std1(value), by = "variable"]
# SHAP value: value
# raw feature value: rfvalue;
# standarized: stdfvalue
names(fv_sub_long) <- c("variable", "rfvalue", "stdfvalue" )
shap_long2 <- cbind(shap_score_long, fv_sub_long[,c('rfvalue','stdfvalue')])
shap_long2[, mean_value := mean(abs(value)), by = variable]
setkey(shap_long2, variable)
return(shap_long2)
}
plot.shap.summary <- function(data_long){
x_bound <- max(abs(data_long$value))
require('ggforce') # for `geom_sina`
plot1 <- ggplot(data = data_long)+
coord_flip() +
# sina plot:
geom_sina(aes(x = variable, y = value, color = stdfvalue)) +
# print the mean absolute value:
geom_text(data = unique(data_long[, c("variable", "mean_value"), with = F]),
aes(x = variable, y=-Inf, label = sprintf("%.3f", mean_value)),
size = 3, alpha = 0.7,
hjust = -0.2,
fontface = "bold") + # bold
# # add a "SHAP" bar notation
# annotate("text", x = -Inf, y = -Inf, vjust = -0.2, hjust = 0, size = 3,
# label = expression(group("|", bar(SHAP), "|"))) +
scale_color_gradient(low="#FFCC33", high="#6600CC",
breaks=c(0,1), labels=c("Low","High")) +
theme_bw() +
theme(axis.line.y = element_blank(), axis.ticks.y = element_blank(), # remove axis line
legend.position="bottom") +
geom_hline(yintercept = 0) + # the vertical line
scale_y_continuous(limits = c(-x_bound, x_bound)) +
# reverse the order of features
scale_x_discrete(limits = rev(levels(data_long$variable))
) +
labs(y = "SHAP value (impact on model output)", x = "", color = "Feature value")
return(plot1)
}
var_importance <- function(shap_result, top_n=10)
{
var_importance=tibble(var=names(shap_result$mean_shap_score), importance=shap_result$mean_shap_score)
var_importance=var_importance[1:top_n,]
ggplot(var_importance, aes(x=reorder(var,importance), y=importance)) +
geom_bar(stat = "identity") +
coord_flip() +
theme_light() +
theme(axis.title.y=element_blank())
}
|
ddacf275532495507031cf43522c158b5707f1da
|
0329677920e29e68778c623b05bf4ca69e528c41
|
/Part 1.2 - test/simple linear regression/Transformation/NORMALITY TRANSFORMATION.R
|
52bf72a4e9045ed2a1d56e58d9354738c91a67ab
|
[] |
no_license
|
celestialized/Machine-Learning
|
b2075b83139f66bc31c02c64cfe27dfbf19e4ab6
|
df30af31f04d03d9796974daf82373436fb6460e
|
refs/heads/master
| 2021-09-21T16:55:51.432572
| 2018-08-29T20:33:13
| 2018-08-29T20:33:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 68
|
r
|
NORMALITY TRANSFORMATION.R
|
# NORMALITY TRANSFORMATION
# if normality assumption does not hold
|
98275c89c7683626b7fc09339287862f72e52c23
|
644a786c1838f3d109cbaa1cf60ba68d54e87703
|
/Blog 16- Dataviz guidelines/guidelines.R
|
f7e5a1180364b87edbc0a3c4b9e04cf87659cb44
|
[] |
no_license
|
j450h1/Blog-post
|
95a9872ea1099d733562fea7a273849ea63bfcbc
|
3a3368e8fb0ba7e58e9fd732eb3bca784fcf9ce4
|
refs/heads/master
| 2023-06-28T23:28:14.541786
| 2021-07-23T06:27:31
| 2021-07-23T06:27:31
| 387,708,069
| 0
| 0
| null | 2021-07-20T07:23:04
| 2021-07-20T07:23:03
| null |
UTF-8
|
R
| false
| false
| 5,350
|
r
|
guidelines.R
|
library(ggplot2)
library(tidyverse)
library(patchwork)
library(scales)
library(magrittr)
# read data file
age <- read.csv("life-expectancy.csv")
# rename columns
age <- age %>% rename(Country = Entity)
# countries of G8 summit
G8 <- c("Canada","France","Germany","Italy",
"Japan","Russia","United Kingdom","United States")
# data for G8 summit members
G8_2019 <- age %>%
filter(Country %in% G8, Year == 2019) %>%
select(Country,Life.expectancy)
# top 10 countries with high life expectancy
top_10 <- age %>% filter(Year == 2019) %>% arrange(desc(Life.expectancy)) %>%
select(Country,Life.expectancy)
# bottom 10 countries with lowest life expectancy
bot_10 <- age %>% filter(Year == 2019) %>% arrange(Life.expectancy) %>%
select(Country,Life.expectancy)
# creating dataframe of 20 rows
age_extreme <- rbind(top_10[1:10,],bot_10[1:10,])
# creating dataframe of 10 rows
age_extreme_1 <- rbind(top_10[1:5,],bot_10[1:5,])
# guideline 1
p1 <- age_extreme_1 %>% ggplot(aes(Country,Life.expectancy)) +
geom_bar(stat = "identity") + ylab("Life expectancy") +
scale_y_continuous(limits = c(0,100))
p2 <- age_extreme_1 %>% ggplot(aes(Country,Life.expectancy)) +
geom_point(col="black",size=3) + theme_minimal() +
scale_y_continuous(limits = c(0,100)) + ylab("Life expectancy")
p1+p2
ggsave("fig1.jpg", plot = last_plot())
# guideline 2
p3 <- age_extreme_1 %>% ggplot(aes(Country,Life.expectancy)) +
geom_bar(stat = "identity") +
theme(axis.text.x = element_text(angle = 90, vjust = 0.3, hjust=1)) +
ylab("Life expectancy") + scale_y_continuous(limits = c(0,100))
p4 <- age_extreme_1 %>% ggplot(aes(Country,Life.expectancy)) +
geom_bar(stat = "identity") +
ylab("Life expectancy") +
coord_flip() +
scale_y_continuous(limits = c(0,100)) + theme_minimal()
p3+p4
ggsave("fig2.jpg", plot = last_plot())
# guideline 3
p5 <- age_extreme %>%
ggplot(aes(Country,Life.expectancy)) +
geom_bar(stat="identity") + coord_flip() +
ylab("Life expectancy") +
scale_y_continuous(limits = c(0,100)) + theme_minimal()
p6 <- age_extreme %>%
ggplot(aes(Country,Life.expectancy)) +
geom_point(col="black") +
coord_flip() + ylab("Life expectancy") +
scale_y_continuous(limits = c(0,100)) + theme_minimal()
p7 <- age_extreme %>%
ggplot(aes(fct_reorder(Country,Life.expectancy),Life.expectancy)) +
geom_bar(stat="identity") +
coord_flip() +
ylab("Life expectancy") +
xlab("Country") +
scale_y_continuous(limits = c(0,100)) +
theme_minimal()
p8 <- age_extreme %>%
ggplot(aes(Life.expectancy,fct_reorder(Country,Life.expectancy))) +
geom_point(col="black") +
scale_x_continuous(limits = c(0,100)) + xlab("Life expectancy") +
ylab("Country") +
theme_minimal()
(p5+p7)
ggsave("fig3_1.jpg", plot = last_plot())
(p6+p8)
ggsave("fig3_2.jpg", plot = last_plot())
# guideline 4
UK_age <- age %>% filter(Country=="United Kingdom") %>%
select(Year,Life.expectancy)
arrow <- data.frame(x1=c(1907,1925),
y1=c(75,25),
x2=c(1916,1919),
y2=c(75,25))
p9 <- UK_age %>% ggplot(aes(Year,Life.expectancy)) +
geom_line(col="black") +
xlim(xmin=1850,xmax=2019) +
ylim(ymin=0,ymax=100) +
annotate(geom = "rect", xmin = 1914, xmax = 1918, ymin = -Inf, ymax = Inf,
fill = "palegreen", alpha = 0.5) +
annotate(geom = "rect", xmin = 1918, xmax = 1920, ymin = -Inf, ymax = Inf,
fill = "orange", alpha = 0.5) +
ylab("Life expectancy") +
theme_minimal() +
annotate("text", x = 1890, y = 75, label = "World War I",col="black") +
annotate("text", x = 1940, y = 25, label = "Spanish Flu",col="black") +
geom_segment(data=arrow, aes(x = x1, y = y1, xend = x2, yend = y2),
arrow = arrow(length = unit(0.08, "inch")),color="black",size=0.5)
ggsave("fig4.jpg", plot = last_plot())
# reading GDP data
GDP_age <- read.csv("life-expectancy-vs-gdp-per-capita.csv")
# filtering data
GDP_age_2018 <- GDP_age %>%
filter(Year==2018) %>%
select(Entity,Life.expectancy,GDP.per.capita)
# remove NA
GDP_age_2018 %<>% na.omit()
# creating label
label <- GDP_age_2018 %>% filter(Life.expectancy == min(Life.expectancy)|
GDP.per.capita == max(GDP.per.capita))
GDP_age_2018 %>% ggplot(aes(GDP.per.capita,Life.expectancy)) +
geom_point(alpha=0.5) + theme_minimal() +
ylim(ymin=0,ymax=100) +
ylab("Life expectancy") +
xlab("GDP per capita") +
geom_point(data=label, aes(GDP.per.capita,Life.expectancy), col = "red") +
geom_text(data = label, aes(label = Entity),color = "blue",
vjust = "inward", hjust = "inward")
ggsave("fig4-1.jpg", plot = last_plot())
# guideline 5
G8_age <- age %>% filter(Country %in% G8)
G8_age %>% ggplot(aes(Year,Life.expectancy)) +
geom_line(col="Black") + facet_wrap(~Country,nrow = 2) +
ylim(ymin=0,ymax=100) + ylab("Life expectancy") +theme_minimal() +coord_flip()
ggsave("fig5.jpg", plot = last_plot())
# guideline 6
GDP_age_2018 %>% ggplot(aes(GDP.per.capita,Life.expectancy)) +
geom_point() + theme_minimal() +
ylim(ymin=0,ymax=100) +
ylab("Life expectancy") +
xlab("GDP per capita") +
scale_x_log10(labels = dollar) +
geom_smooth(method='lm', formula= y~x,col="red")
ggsave("fig6.jpg", plot = last_plot())
###########################
|
e1fff2848d4e1fd37e8c594a006decf8f7af288a
|
6a21a808a668533db92472b1e1adbe59dd37517e
|
/R/dev/rawMatlab.R
|
e095bca2c416b77ba812af9ff105efaadfb850a9
|
[] |
no_license
|
mdsumner/mdsutils
|
7198548e9059750a026a102409b8c88e3b39e7ea
|
f162246b5944050853ecb991e545eae7e3b833d2
|
refs/heads/master
| 2021-01-19T05:57:32.736249
| 2018-01-22T21:08:27
| 2018-01-22T21:08:27
| 11,871,571
| 2
| 1
| null | 2018-01-22T21:08:28
| 2013-08-04T00:34:16
|
R
|
UTF-8
|
R
| false
| false
| 46,593
|
r
|
rawMatlab.R
|
library(R.matlab)
readMat.default <-
structure(function (con, maxLength = NULL, fixNames = TRUE, verbose = FALSE,
sparseMatrixClass = c("Matrix", "SparseM", "matrix"), ...)
{
this <- list()
nbrOfBytesRead <- 0
detectedEndian <- "little"
ASCII <- c("", "\001", "\002", "\003", "\004", "\005", "\006",
"\a", "\b", "\t", "\n", "\v", "\f", "\r", "\016", "\017",
"\020", "\021", "\022", "\023", "\024", "\025", "\026",
"\027", "\030", "\031", "\032", "\033", "\034", "\035",
"\036", "\037", " ", "!", "\"", "#", "$", "%", "&", "'",
"(", ")", "*", "+", ",", "-", ".", "/", "0", "1", "2",
"3", "4", "5", "6", "7", "8", "9", ":", ";", "<", "=",
">", "?", "@", "A", "B", "C", "D", "E", "F", "G", "H",
"I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S",
"T", "U", "V", "W", "X", "Y", "Z", "[", "\\", "]", "^",
"_", "`", "a", "b", "c", "d", "e", "f", "g", "h", "i",
"j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t",
"u", "v", "w", "x", "y", "z", "{", "|", "}", "~", "\177",
"โฌ", "ย", "โ", "ฦ", "โ", "โฆ", "โ ", "โก", "ห", "โฐ", "ล ",
"โน", "ล", "ย", "ลฝ", "ย", "ย", "โ", "โ", "โ", "โ", "โข",
"โ", "โ", "ห", "โข", "ลก", "โบ", "ล", "ย", "ลพ", "ลธ", "ย ",
"ยก", "ยข", "ยฃ", "ยค", "ยฅ", "ยฆ", "ยง", "ยจ", "ยฉ", "ยช", "ยซ",
"ยฌ", "ยญ", "ยฎ", "ยฏ", "ยฐ", "ยฑ", "ยฒ", "ยณ", "ยด", "ยต", "ยถ",
"ยท", "ยธ", "ยน", "ยบ", "ยป", "ยผ", "ยฝ", "ยพ", "ยฟ", "ร", "ร",
"ร", "ร", "ร", "ร
", "ร", "ร", "ร", "ร", "ร", "ร", "ร",
"ร", "ร", "ร", "ร", "ร", "ร", "ร", "ร", "ร", "ร", "ร",
"ร", "ร", "ร", "ร", "ร", "ร", "ร", "ร", "ร ", "รก", "รข",
"รฃ", "รค", "รฅ", "รฆ", "รง", "รจ", "รฉ", "รช", "รซ", "รฌ", "รญ",
"รฎ", "รฏ", "รฐ", "รฑ", "รฒ", "รณ", "รด", "รต", "รถ", "รท", "รธ",
"รน", "รบ", "รป", "รผ", "รฝ", "รพ", "รฟ")
if (compareVersion(as.character(getRversion()), "2.7.0") <
0) {
ASCII[1] <- eval(parse(text = "\"\\000\""))
}
intToChar <- function(i) {
ASCII[i%%256 + 1]
}
willRead <- function(nbrOfBytes) {
if (is.null(maxLength))
return()
if (nbrOfBytesRead + nbrOfBytes <= maxLength)
return()
stop("Trying to read more bytes than expected from connection. Have read ",
nbrOfBytesRead, " byte(s) and trying to read another ",
nbrOfBytes, " byte(s), but expected ", maxLength,
" byte(s).")
}
hasRead <- function(nbrOfBytes) {
nbrOfBytesRead <<- nbrOfBytesRead + nbrOfBytes
if (is.null(maxLength))
return(TRUE)
return(nbrOfBytesRead <= maxLength)
}
isDone <- function() {
if (is.null(maxLength))
return(FALSE)
return(nbrOfBytesRead >= maxLength)
}
rawBuffer <- NULL
fillRawBuffer <- function(need) {
n <- length(rawBuffer)
missing <- (need - n)
if (missing < 0) {
verbose && cat(verbose, level = -500, "Not filling, have enough data.")
return(NULL)
}
raw <- readBin(con = con, what = raw(), n = missing)
rawBuffer <<- c(rawBuffer, raw)
NULL
}
eatRawBuffer <- function(eaten) {
n <- length(rawBuffer)
if (eaten < n) {
rawBuffer <<- rawBuffer[(eaten + 1):n]
}
else {
rawBuffer <<- NULL
}
NULL
}
readBinMat <- function(con, what, size = 1, n, signed = TRUE,
endian = detectedEndian) {
if (isDone())
return(c())
if (is.na(signed))
signed <- TRUE
willRead(size * n)
fillRawBuffer(size * n)
bfr <- readBin(con = rawBuffer, what = what, size = size,
n = n, signed = signed, endian = endian)
eatRawBuffer(size * n)
hasRead(length(bfr) * size)
bfr
}
readCharMat <- function(con, nchars) {
if (isDone())
return(c())
willRead(nchars)
fillRawBuffer(nchars)
bfr <- rawBuffer[1:nchars]
bfr <- as.integer(bfr)
bfr <- intToChar(bfr)
bfr <- paste(bfr, collapse = "")
eatRawBuffer(nchars)
hasRead(nchars)
bfr
}
convertUTF8 <- function(ary) {
ary <- paste(intToChar(as.integer(ary)), collapse = "")
Encoding(ary) <- "UTF-8"
ary
}
convertGeneric <- function(ary) {
ary[ary > 127 | (ary != 0 & ary < 32)] <- NA
convertUTF8(ary)
}
convertUTF16 <- convertUTF32 <- convertGeneric
if (capabilities("iconv")) {
utfs <- grep("UTF", iconvlist(), value = TRUE)
has.utf16 <- utils::head(grep("UTF-?16BE", utfs, value = TRUE),
n = 1)
has.utf32 <- utils::head(grep("UTF-?32BE", utfs, value = TRUE),
n = 1)
rm(utfs)
if (length(has.utf16) > 0) {
convertUTF16 <- function(ary) {
n <- length(ary)
ary16 <- paste(intToChar(c(sapply(ary, function(x) {
c(x%/%2^8, x%%2^8)
}))), collapse = "")
iconv(ary16, has.utf16, "UTF-8")
}
convertUTF32 <- function(ary) {
n <- length(ary)
ary32 <- paste(intToChar(c(sapply(ary, function(x) {
c((x%/%2^24)%%2^8, (x%/%2^16)%%2^8, (x%/%2^8)%%2^8,
x%%2^8)
}))), collapse = "")
iconv(ary32, has.utf32, "UTF-8")
}
}
}
charConverter <- function(type) {
switch(type, miUTF8 = convertUTF8, miUTF16 = convertUTF16,
miUTF32 = convertUTF32, convertGeneric)
}
matToString <- function(ary, type) {
do.call(charConverter(type), list(ary))
}
matToCharArray <- function(ary, type) {
fn <- charConverter(type)
sapply(ary, fn)
}
pushBackRawMat <- function(con, raw) {
rawBuffer <<- c(raw, rawBuffer)
NULL
}
asSafeRName <- function(name) {
if (fixNames) {
name <- gsub("_", ".", name)
}
name
}
uncompress <- function(zraw, sizeRatio = 3, delta = 0.9,
asText = TRUE, ...) {
if (delta <= 0 || delta >= 1) {
throw("Argument 'delta' is out of range (0,1): ",
delta)
}
n <- length(zraw)
unzraw <- NULL
verbose && printf(verbose, level = -50, "Compress data size: %.3f Mb\n",
n/1024^2)
lastException <- NULL
size <- NULL
while (is.null(unzraw) && sizeRatio >= 1) {
size <- sizeRatio * n
verbose && printf(verbose, level = -50, "Size ratio: %.3f\n",
sizeRatio)
lastException <- NULL
tryCatch({
gc();gc();gc();gc();
unzraw <- Rcompression::uncompress(zraw, size = size,
asText = asText)
rm(zraw)
break
}, error = function(ex) {
msg <- ex$message
if (regexpr("corrupted compressed", msg) != -1) {
errorMsg <- paste("Failed to uncompress data: ",
msg, sep = "")
throw(errorMsg)
}
gc()
lastException <<- ex
})
sizeRatio <- delta * sizeRatio
}
if (is.null(unzraw)) {
msg <- lastException$message
throw(sprintf("Failed to uncompress compressed %d bytes (with smallest initial buffer size of %.3f Mb: %s)",
n, size/1024^2, msg))
}
unzraw
}
debugIndent <- 0
debug <- function(..., sep = "") {
if (debugIndent > 0)
cat(paste(rep(" ", length.out = debugIndent), collapse = ""))
cat(..., sep = sep)
cat("\n")
}
debugPrint <- function(...) {
print(...)
}
debugStr <- function(...) {
str(...)
}
debugEnter <- function(..., indent = +1) {
debug(..., "...")
debugIndent <<- debugIndent + indent
}
debugExit <- function(..., indent = -1) {
debugIndent <<- debugIndent + indent
debug(..., "...done\n")
}
isMat4 <- function(MOPT) {
any(MOPT == 0)
}
getMOPT <- function(fourBytes) {
if (length(fourBytes) != 4)
stop("Argument 'fourBytes' must a vector of 4 bytes: ",
length(fourBytes))
fourBytes <- as.integer(fourBytes)
neg <- (fourBytes < 0)
if (any(neg))
fourBytes[neg] <- fourBytes[neg] + 256
base <- 256^(0:3)
MOPT <- c(NA, NA, NA, NA)
for (endian in c("little", "big")) {
mopt <- sum(base * fourBytes)
for (kk in 4:1) {
MOPT[kk] <- mopt%%10
mopt <- mopt%/%10
}
isMOPT <- (MOPT[1] %in% 0:4 && MOPT[2] == 0 && MOPT[3] %in%
0:5 && MOPT[4] %in% 0:2)
if (isMOPT)
break
base <- rev(base)
}
if (!isMOPT)
stop("File format error: Not a valid MAT v4. The first four bytes (MOPT) were: ",
paste(MOPT, collapse = ", "))
verbose && cat(verbose, level = -50, "Read MOPT bytes: ",
moptToString(MOPT))
MOPT
}
readMat4 <- function(con, maxLength = NULL, firstFourBytes = NULL) {
readMat4Header <- function(con, firstFourBytes = NULL) {
header <- list()
if (is.null(firstFourBytes)) {
firstFourBytes <- readBinMat(con, what = integer(),
size = 1, n = 4)
}
if (length(firstFourBytes) == 0)
return(NULL)
MOPT <- getMOPT(firstFourBytes)
if (MOPT[1] == 0) {
detectedEndian <<- "little"
}
else if (MOPT[1] == 1) {
detectedEndian <<- "big"
}
else if (MOPT[1] %in% 2:4) {
stop("Looks like a MAT v4 file, but the storage format of numerics (VAX D-float, VAX G-float or Cray) is not supported. Currently only IEEE numeric formats in big or little endian are supported.")
}
else {
stop("Unknown first byte in MOPT header (not in [0,4]): ",
paste(MOPT, collapse = ", "))
}
header$ocode <- MOPT[2]
if (MOPT[3] == 0) {
header$what <- double()
header$size <- 8
header$signed <- NA
}
else if (MOPT[3] == 1) {
header$what <- double()
header$size <- 4
header$signed <- NA
}
else if (MOPT[3] == 2) {
header$what <- integer()
header$size <- 4
header$signed <- TRUE
}
else if (MOPT[3] == 3) {
header$what <- integer()
header$size <- 2
header$signed <- TRUE
}
else if (MOPT[3] == 4) {
header$what <- integer()
header$size <- 2
header$signed <- FALSE
}
else if (MOPT[3] == 5) {
header$what <- integer()
header$size <- 1
header$signed <- FALSE
}
else {
stop("Unknown third byte in MOPT header (not in [0,5]): ",
paste(MOPT, collapse = ", "))
}
header$matrixType <- "numeric"
if (MOPT[4] == 0) {
header$matrixType <- "numeric"
}
else if (MOPT[4] == 1) {
header$matrixType <- "text"
}
else if (MOPT[4] == 2) {
header$matrixType <- "sparse"
}
else {
}
header$mrows <- readBinMat(con, what = integer(),
size = 4, n = 1)
header$ncols <- readBinMat(con, what = integer(),
size = 4, n = 1)
verbose && cat(verbose, level = -50, "Matrix dimension: ",
header$mrows, "x", header$ncols)
header$imagf <- readBinMat(con, what = integer(),
size = 4, n = 1)
verbose && cat(verbose, level = -60, "Matrix contains imaginary values: ",
as.logical(header$imagf))
header$namlen <- readBinMat(con, what = integer(),
size = 4, n = 1)
verbose && cat(verbose, level = -100, "Matrix name length: ",
header$namlen - 1)
header
}
readMat4Data <- function(con, header) {
name <- readCharMat(con, header$namlen)
verbose && cat(verbose, level = -50, "Matrix name: '",
name, "'")
name <- asSafeRName(name)
verbose && cat(verbose, level = -51, "Matrix safe name: '",
name, "'")
n <- header$mrows * header$ncols
if (header$matrixType == "text") {
data <- readBinMat(con, what = header$what, size = header$size,
signed = header$signed, n = n)
data <- intToChar(data)
dim(data) <- c(header$mrows, header$ncols)
data <- apply(data, MARGIN = 1, FUN = paste,
sep = "", collapse = "")
}
else if (header$matrixType %in% c("numeric", "sparse")) {
real <- readBinMat(con, what = header$what, size = header$size,
signed = header$signed, n = n)
if (header$imagf != 0) {
verbose && cat(verbose, level = -2, "Reading imaginary part of complex data set.")
imag <- readBinMat(con, what = header$what,
size = header$size, signed = header$signed,
n = n)
data <- complex(real = real, imag = imag)
}
else {
data <- real
rm(real)
}
dim(data) <- c(header$mrows, header$ncols)
if (header$matrixType == "sparse") {
i <- as.integer(data[, 1])
j <- as.integer(data[, 2])
s <- data[, 3]
rm(data)
n <- max(i)
m <- max(j)
last <- length(i)
if (last > 1 && i[last] == i[last - 1] && j[last] ==
j[last - 1]) {
i <- i[-last]
j <- j[-last]
s <- s[-last]
}
if (sparseMatrixClass == "Matrix" && require("Matrix",
quietly = TRUE)) {
i <- i - as.integer(1)
j <- j - as.integer(1)
dim <- as.integer(c(n, m))
data <- new("dgTMatrix", i = i, j = j, x = s,
Dim = dim)
data <- as(data, "dgCMatrix")
}
else if (sparseMatrixClass == "SparseM" &&
require("SparseM", quietly = TRUE)) {
dim <- as.integer(c(n, m))
data <- new("matrix.coo", ra = s, ia = i,
ja = j, dimension = dim)
}
else {
pos <- (j - 1) * n + i
rm(i, j)
data <- matrix(0, nrow = n, ncol = m)
data[pos] <- s
rm(pos, s)
}
}
}
else {
stop("MAT v4 file format error: Unknown 'type' in header: ",
header$matrixType)
}
verbose && cat(verbose, level = -60, "Matrix elements:\n")
verbose && str(verbose, level = -60, data)
data <- list(data)
names(data) <- name
data
}
result <- list()
repeat {
header <- readMat4Header(con, firstFourBytes = firstFourBytes)
if (is.null(header))
break
data <- readMat4Data(con, header)
result <- append(result, data)
rm(data)
firstFourBytes <- NULL
}
header <- list(version = "4", endian = detectedEndian)
attr(result, "header") <- header
result
}
moptToString <- function(MOPT) {
if (MOPT[1] == 0)
mStr <- "IEEE Little Endian (PC, 386, 486, DEC Risc)"
else if (MOPT[1] == 1)
mStr <- "IEEE Big Endian (Macintosh, SPARC, Apollo,SGI, HP 9000/300, other Motorola)"
else if (MOPT[1] == 2)
mStr <- "VAX D-float"
else if (MOPT[1] == 3)
mStr <- "VAX G-float"
else if (MOPT[1] == 4)
mStr <- "Cray"
else mStr <- sprintf("<Unknown value of MOPT[1]. Not in range [0,4]: %d.>",
as.integer(MOPT[1]))
if (MOPT[2] == 0)
oStr <- "Reserved for future use"
else oStr <- sprintf("<Unknown value of MOPT[2]. Should be 0: %d.>",
as.integer(MOPT[2]))
if (MOPT[3] == 0)
pStr <- "64-bit double"
else if (MOPT[3] == 1)
pStr <- "32-bit single"
else if (MOPT[3] == 2)
pStr <- "32-bit signed integer"
else if (MOPT[3] == 3)
pStr <- "16-bit signed integer"
else if (MOPT[3] == 4)
pStr <- "16-bit unsigned integer"
else if (MOPT[3] == 5)
pStr <- "8-bit unsigned integer"
else pStr <- sprintf("<Unknown value of MOPT[3]. Not in range [0,5]: %d.>",
as.integer(MOPT[3]))
if (MOPT[4] == 0)
tStr <- "Numeric (Full) matrix"
else if (MOPT[4] == 1)
tStr <- "Text matrix"
else if (MOPT[4] == 2)
tStr <- "Sparse matrix"
else tStr <- sprintf("<Unknown value of MOPT[4]. Not in range [0,2]: %d.>",
as.integer(MOPT[4]))
moptStr <- paste("MOPT[1]: ", mStr, ". MOPT[2]: ", oStr,
". MOPT[3]: ", pStr, ". MOPT[4]: ", tStr, ".", sep = "")
moptStr
}
readMat5 <- function(con, maxLength = NULL, firstFourBytes = NULL) {
left <- NA
readMat5Header <- function(this, firstFourBytes = NULL) {
if (is.null(firstFourBytes))
firstFourBytes <- readBinMat(con, what = integer(),
size = 1, n = 4)
MOPT <- firstFourBytes
if (MOPT[1] %in% 0:4 && MOPT[2] == 0 && MOPT[3] %in%
0:5 && MOPT[4] %in% 0:2) {
stop("Detected MAT file format v4. Do not use readMat5() explicitly, but use readMat().")
}
description <- c(MOPT, readBinMat(con, what = integer(),
size = 1, n = 120))
description <- paste(intToChar(description), collapse = "")
version <- readBinMat(con, what = integer(), size = 2,
n = 1, endian = "little")
endian <- readCharMat(con, nchars = 2)
if (endian == "MI")
detectedEndian <<- "big"
else if (endian == "IM")
detectedEndian <<- "little"
else {
warning("Unknown endian: ", endian, ". Will assume Bigendian.")
detectedEndian <<- "big"
}
if (detectedEndian == "big") {
hi <- version%/%256
low <- version%%256
version <- 256 * low + hi
}
if (version == 256) {
version = "5"
}
else {
warning("Unknown MAT version tag: ", version,
". Will assume version 5.")
version = as.character(version)
}
list(description = description, version = version,
endian = detectedEndian)
}
readMat5DataElement <- function(this) {
isSigned <- function(type) {
signed <- c("mxINT8_CLASS", "mxINT16_CLASS",
"mxINT32_CLASS")
signed <- c(signed, "miINT8", "miINT16", "miINT32")
unsigned <- c("mxUINT8_CLASS", "mxUINT16_CLASS",
"mxUINT32_CLASS")
unsigned <- c(unsigned, "miUINT8", "miUINT16",
"miUINT32")
if (!is.element(type, c(signed, unsigned)))
return(NA)
is.element(type, signed)
}
readTag <- function(this) {
verbose && enter(verbose, level = -80, "Reading Tag")
on.exit(verbose && exit(verbose))
type <- readBinMat(con, what = integer(), size = 4,
n = 1)
if (length(type) == 0)
return(NULL)
left <<- left - 4
knownTypes <- c(miMATRIX = 0, miINT8 = 8, miUINT8 = 8,
miINT16 = 16, miUINT16 = 16, miINT32 = 32,
miUINT32 = 32, miSINGLE = 32, `--` = NA, miDOUBLE = 64,
`--` = NA, `--` = NA, miINT64 = 64, miUINT64 = 64,
miMATRIX = NA, miCOMPRESSED = NA, miUTF8 = 8,
miUTF16 = 16, miUTF32 = 32)
knownWhats <- list(miMATRIX = 0, miINT8 = integer(),
miUINT8 = integer(), miINT16 = integer(), miUINT16 = integer(),
miINT32 = integer(), miUINT32 = integer(),
miSINGLE = double(), `--` = NA, miDOUBLE = double(),
`--` = NA, `--` = NA, miINT64 = integer(),
miUINT64 = integer(), miMATRIX = NA, miUTF8 = integer(),
miUTF16 = integer(), miUTF32 = integer())
nbrOfBytes <- NULL
tmp <- type
bytes <- rep(NA, length = 4)
for (kk in 1:4) {
bytes[kk] <- (tmp%%256)
tmp <- tmp%/%256
}
rm(tmp)
compressed <- any(bytes[3:4] != 0)
verbose && cat(verbose, level = -100, "Compressed tag: ",
compressed)
if (compressed) {
nbrOfBytes <- type%/%2^16
type <- type%%2^16
if (detectedEndian == "big") {
tmp <- type
}
if (type + 1 < 1 || type + 1 > length(knownTypes))
stop("Unknown data type. Not in range [1,",
length(knownTypes), "]: ", type)
padding <- 4 - ((nbrOfBytes - 1)%%4 + 1)
}
else {
nbrOfBytes <- readBinMat(con, what = integer(),
size = 4, n = 1)
left <<- left - 4
padding <- 8 - ((nbrOfBytes - 1)%%8 + 1)
}
type <- names(knownTypes)[type + 1]
sizeOf <- as.integer(knownTypes[type])
what <- knownWhats[[type]]
signed <- isSigned(type)
tag <- list(type = type, signed = signed, sizeOf = sizeOf,
what = what, nbrOfBytes = nbrOfBytes, padding = padding,
compressed = compressed)
verbose && print(verbose, level = -100, unlist(tag))
if (identical(tag$type, "miCOMPRESSED")) {
if (!require("Rcompression", quietly = TRUE)) {
throw("Cannot read compressed data. Omegahat.org package 'Rcompression' could not be loaded. Alternatively, save your data in a non-compressed format by specifying -V6 when calling save() in Matlab or Octave.")
}
n <- tag$nbrOfBytes
zraw <- readBinMat(con = con, what = raw(),
n = n)
verbose && cat(verbose, level = -110, "Uncompressing ",
n, " bytes")
unzraw <- uncompress(zraw, asText = FALSE)
rm(zraw)
verbose && printf(verbose, level = -110, "Inflated %.3f times from %d bytes to %d bytes.\n",
length(unzraw)/length(zraw), length(zraw),
length(unzraw))
pushBackRawMat(con, unzraw)
rm(unzraw)
gc();gc();gc()
##browser()
## this is just the tag
tag <- readTag(this)
## browser()
}
tag
}
readArrayFlags <- function(this) {
verbose && enter(verbose, level = -70, "Reading Array Flags")
on.exit(verbose && exit(verbose))
getBits <- function(i) {
ready <- FALSE
bits <- c()
while (!ready) {
bit <- i%%2
bits <- c(bits, bit)
i <- i%/%2
ready <- (i == 0)
}
bits
}
knownTypes <- c(mxCELL_CLASS = NA, mxSTRUCT_CLASS = NA,
mxOBJECT_CLASS = NA, mxCHAR_CLASS = 8, mxSPARSE_CLASS = NA,
mxDOUBLE_CLASS = NA, mxSINGLE_CLASS = NA, mxINT8_CLASS = 8,
mxUINT8_CLASS = 8, mxINT16_CLASS = 16, mxUINT16_CLASS = 16,
mxINT32_CLASS = 32, mxUINT32_CLASS = 32)
arrayFlags <- readBinMat(con, what = integer(),
size = 4, n = 1)
left <<- left - 4
class <- arrayFlags%%256
if (class < 1 || class > length(knownTypes)) {
stop("Unknown array type (class). Not in [1,",
length(knownTypes), "]: ", class)
}
class <- names(knownTypes)[class]
classSize <- knownTypes[class]
arrayFlags <- arrayFlags%/%256
flags <- arrayFlags%%256
flags <- as.logical(getBits(flags + 2^8)[-9])
logical <- flags[2]
global <- flags[3]
complex <- flags[4]
nzmax <- readBinMat(con, what = integer(), size = 4,
n = 1)
left <<- left - 4
flags <- list(logical = logical, global = global,
complex = complex, class = class, classSize = classSize,
nzmax = nzmax)
verbose && print(verbose, level = -100, unlist(flags[-1]))
flags
}
readDimensionsArray <- function(this) {
verbose && enter(verbose, level = -70, "Reading Dimensions Array")
on.exit(verbose && exit(verbose))
tag <- readTag(this)
if (tag$type != "miINT32") {
throw("Tag type not supported: ", tag$type)
}
sizeOf <- tag$sizeOf%/%8
len <- tag$nbrOfBytes%/%sizeOf
verbose && cat(verbose, level = -100, "Reading ",
len, " integers each of size ", sizeOf, " bytes.")
dim <- readBinMat(con, what = integer(), size = sizeOf,
n = len)
left <<- left - sizeOf * len
verbose && cat(verbose, level = -101, "Reading ",
tag$padding, " padding bytes.")
padding <- readBinMat(con, what = integer(),
size = 1, n = tag$padding)
left <<- left - tag$padding
dimArray <- list(tag = tag, dim = dim)
verbose && print(verbose, level = -100, list(dim = dim))
dimArray
}
readName <- function(this) {
verbose && enter(verbose, level = -70, "Reading Array Name")
on.exit(verbose && exit(verbose))
tag <- readTag(this)
sizeOf <- tag$sizeOf%/%8
nchars <- tag$nbrOfBytes%/%sizeOf
verbose && cat(verbose, level = -100, "Reading ",
nchars, " characters.")
name <- readBinMat(con, what = tag$what, size = sizeOf,
n = nchars)
name <- matToString(name, tag$type)
name <- asSafeRName(name)
left <<- left - nchars
verbose && cat(verbose, level = -101, "Reading ",
tag$padding, " padding bytes.")
padding <- readBinMat(con, what = integer(),
size = 1, n = tag$padding)
left <<- left - tag$padding
verbose && cat(verbose, level = -50, "Name: '",
name, "'")
list(tag = tag, name = name)
}
readFieldNameLength <- function(this) {
verbose && enter(verbose, level = -70, "Reading Field Name Length")
on.exit(verbose && exit(verbose))
tag <- readTag(this)
if (tag$type != "miINT32") {
throw("Tag type not supported: ", tag$type)
}
sizeOf <- tag$sizeOf%/%8
len <- tag$nbrOfBytes%/%sizeOf
maxLength <- readBinMat(con, what = integer(),
size = sizeOf, n = len)
left <<- left - len
padding <- readBinMat(con, what = integer(),
size = 1, n = tag$padding)
left <<- left - tag$padding
verbose && cat(verbose, level = -100, "Field name length+1: ",
maxLength)
list(tag = tag, maxLength = maxLength)
}
readFieldNames <- function(this, maxLength) {
verbose && enter(verbose, level = -70, "Reading Field Names")
on.exit(verbose && exit(verbose))
tag <- readTag(this)
names <- c()
sizeOf <- tag$sizeOf%/%8
nbrOfNames <- tag$nbrOfBytes%/%maxLength
for (k in seq(length = nbrOfNames)) {
name <- readBinMat(con, what = tag$what, size = sizeOf,
n = maxLength)
name <- matToString(name, tag$type)
name <- asSafeRName(name)
left <<- left - maxLength
names <- c(names, name)
}
verbose && cat(verbose, level = -101, "Reading ",
tag$padding, " padding bytes.")
padding <- readBinMat(con, what = integer(),
size = 1, n = tag$padding)
left <<- left - tag$padding
verbose && cat(verbose, level = -50, "Field names: ",
paste(paste("'", names, "'", sep = ""), collapse = ", "))
list(tag = tag, names = names)
}
readFields <- function(this, names) {
verbose && enter(verbose, level = -70, "Reading Fields")
on.exit(verbose && exit(verbose))
fields <- list()
for (k in seq(names)) {
verbose && enter(verbose, level = -3, "Reading field: ",
names[k])
field <- readMat5DataElement(this)
fields <- c(fields, field)
verbose && exit(verbose)
}
names(fields) <- names
fields
}
readValues <- function(this) {
verbose && enter(verbose, level = -70, "Reading Values")
on.exit(verbose && exit(verbose))
tag <- readTag(this)
sizeOf <- tag$sizeOf%/%8
len <- tag$nbrOfBytes%/%sizeOf
verbose && cat(verbose, level = -100, "Reading ",
len, " values each of ", sizeOf, " bytes. In total ",
tag$nbrOfBytes, " bytes.")
value <- readBinMat(con, what = tag$what, size = sizeOf,
n = len, signed = tag$signed)
verbose && str(verbose, level = -102, value)
left <<- left - sizeOf * len
verbose && cat(verbose, level = -101, "Reading ",
tag$padding, " padding bytes.")
padding <- readBinMat(con, what = integer(),
size = 1, n = tag$padding)
left <<- left - tag$padding
list(tag = tag, value = value)
}
readMiMATRIX <- function(this, tag) {
verbose && enter(verbose, level = -70, "Reading miMATRIX")
on.exit(verbose && exit(verbose))
verbose && cat(verbose, level = -60, "Argument 'tag':")
verbose && str(verbose, level = -60, tag)
tag <- readTag(this)
if (is.null(tag)) {
verbose && cat(verbose, "Nothing more to read. Returning NULL.")
verbose && exit(verbose)
return(NULL)
}
if (tag$type == "miMATRIX") {
verbose && enter(verbose, level = -70, "Reading a nested miMATRIX")
node <- readMiMATRIX(this, tag)
verbose && exit(verbose)
verbose && exit(verbose)
return(node)
}
if (tag$type != "miUINT32") {
throw("Tag type not supported: ", tag$type)
}
arrayFlags <- readArrayFlags(this)
arrayFlags$tag <- tag
arrayFlags$signed <- isSigned(tag$type)
dimensionsArray <- readDimensionsArray(this)
arrayName <- readName(this)
if (arrayFlags$class == "mxCELL_CLASS") {
nbrOfCells <- prod(dimensionsArray$dim)
verbose && cat(verbose, level = -4, "Reading mxCELL_CLASS with ",
nbrOfCells, " cells.")
matrix <- list()
for (kk in seq(length = nbrOfCells)) {
tag <- readTag(this)
cell <- readMiMATRIX(this, tag)
matrix <- c(matrix, cell)
}
matrix <- list(matrix)
names(matrix) <- arrayName$name
}
else if (arrayFlags$class == "mxSTRUCT_CLASS") {
nbrOfCells <- prod(dimensionsArray$dim)
verbose && cat(verbose, level = -4, "Reading mxSTRUCT_CLASS with ",
nbrOfCells, " cells in structure.")
maxLength <- readFieldNameLength(this)
names <- readFieldNames(this, maxLength = maxLength$maxLength)
verbose && cat(verbose, level = -100, "Field names: ",
paste(names$names, collapse = ", "))
nbrOfFields <- length(names$names)
matrix <- list()
for (kk in seq(length = nbrOfCells)) {
fields <- readFields(this, names = names$names)
matrix <- c(matrix, fields)
}
names(matrix) <- NULL
dim <- c(nbrOfFields, dimensionsArray$dim)
if (prod(dim) > 0) {
matrix <- structure(matrix, dim = dim)
dimnames <- rep(list(NULL), length(dim(matrix)))
dimnames[[1]] <- names$names
dimnames(matrix) <- dimnames
}
matrix <- list(matrix)
names(matrix) <- arrayName$name
verbose && cat(verbose, level = -60, "Read a 'struct':")
verbose && str(verbose, level = -60, matrix)
}
else if (arrayFlags$class == "mxOBJECT_CLASS") {
className <- readName(this)$name
maxLength <- readFieldNameLength(this)
verbose && cat(verbose, level = -4, "Reading mxOBJECT_CLASS of class '",
className, "' with ", maxLength, " fields.")
names <- readFieldNames(this, maxLength = maxLength$maxLength)
fields <- readFields(this, names = names$names)
class(fields) <- className
matrix <- list(fields)
names(matrix) <- arrayName$name
}
else if (arrayFlags$complex) {
verbose && enter(verbose, level = -4, "Reading complex matrix.")
pr <- readValues(this)
if (left > 0)
pi <- readValues(this)
matrix <- complex(real = pr$value, imaginary = pi$value)
dim(matrix) <- dimensionsArray$dim
verbose && str(verbose, level = -10, matrix)
matrix <- list(matrix)
names(matrix) <- arrayName$name
verbose && exit(verbose, suffix = paste("...done: '",
names(matrix), "' [", mode(matrix), ": ",
paste(dim(matrix), collapse = "x"), " elements]",
sep = ""))
}
else if (arrayFlags$class == "mxSPARSE_CLASS") {
nrow <- dimensionsArray$dim[1]
ncol <- dimensionsArray$dim[2]
verbose && cat(verbose, level = -4, "Reading mxSPARSE_CLASS ",
nrow, "x", ncol, " matrix.")
nzmax <- arrayFlags$nzmax
ir <- c()
jc <- c()
pr <- c()
if (nzmax > 0) {
ir <- readValues(this)$value
ir <- ir + 1
if (any(ir < 1 | ir > nrow)) {
stop("MAT v5 file format error: Some elements in row vector 'ir' (sparse arrays) are out of range [1,",
nrow, "].")
}
jc <- readValues(this)$value
if (length(jc) != ncol + 1) {
stop("MAT v5 file format error: Length of column vector 'jc' (sparse arrays) is not ",
ncol, "+1 as expected: ", length(jc))
}
pr <- readValues(this)$value
verbose && str(verbose, level = -102, ir)
verbose && str(verbose, level = -102, jc)
verbose && str(verbose, level = -102, pr)
if (arrayFlags$complex) {
pi <- readValues(this)$value
verbose && str(verbose, level = -102, pi)
}
nzmax <- min(nzmax, jc[ncol + 1])
if (nzmax < length(ir)) {
ir <- ir[1:nzmax]
}
if (nzmax < length(pr)) {
pr <- pr[1:nzmax]
}
if (arrayFlags$complex) {
if (nzmax < length(pi)) {
pi <- pi[1:nzmax]
}
pr <- complex(real = pr, imaginary = pi)
rm(pi)
}
}
if (sparseMatrixClass == "Matrix" && require("Matrix",
quietly = TRUE)) {
if (is.integer(pr) || is.logical(pr)) {
pr <- as.double(pr)
}
matrix <- new("dgCMatrix", x = pr, p = as.integer(jc),
i = as.integer(ir - 1), Dim = as.integer(c(nrow,
ncol)))
matrix <- list(matrix)
names(matrix) <- arrayName$name
}
else if (sparseMatrixClass == "SparseM" &&
require("SparseM", quietly = TRUE)) {
if (is.integer(pr) || is.logical(pr)) {
pr <- as.double(pr)
}
matrix <- new("matrix.csc", ra = pr, ja = as.integer(ir),
ia = as.integer(jc + 1), dimension = as.integer(c(nrow,
ncol)))
matrix <- list(matrix)
names(matrix) <- arrayName$name
}
else {
matrix <- matrix(0, nrow = nrow, ncol = ncol)
attr(matrix, "name") <- arrayName$name
for (col in seq(length = length(jc) - 1)) {
first <- jc[col]
last <- jc[col + 1] - 1
idx <- seq(from = first, to = last)
value <- pr[idx]
row <- ir[idx]
ok <- is.finite(row)
row <- row[ok]
value <- value[ok]
matrix[row, col] <- value
}
rm(ir, jc, first, last, idx, value, row)
matrix <- list(matrix)
names(matrix) <- arrayName$name
}
}
else {
data <- readValues(this)
matrix <- data$value
verbose && cat(verbose, level = -5, "Converting to ",
arrayFlags$class, " matrix.")
if (arrayFlags$class == "mxDOUBLE_CLASS") {
matrix <- as.double(matrix)
dim(matrix) <- dimensionsArray$dim
}
else if (arrayFlags$class == "mxSINGLE_CLASS") {
matrix <- as.single(matrix)
dim(matrix) <- dimensionsArray$dim
}
else if (is.element(arrayFlags$class, c("mxINT8_CLASS",
"mxUINT8_CLASS", "mxINT16_CLASS", "mxUINT16_CLASS",
"mxINT32_CLASS", "mxUINT32_CLASS"))) {
matrix <- as.integer(matrix)
dim(matrix) <- dimensionsArray$dim
}
else if (arrayFlags$class == "mxCHAR_CLASS") {
matrix <- matToCharArray(matrix, tag$type)
dim(matrix) <- dimensionsArray$dim
matrix <- apply(matrix, MARGIN = 1, FUN = paste,
collapse = "")
matrix <- as.matrix(matrix)
}
else {
stop("Unknown or unsupported class id in array flags: ",
arrayFlags$class)
}
matrix <- list(matrix)
names(matrix) <- arrayName$name
}
matrix
}
tag <- readTag(this)
if (is.null(tag))
return(NULL)
if (tag$nbrOfBytes == 0)
return(list(NULL))
left <<- tag$nbrOfBytes
if (tag$type == "miMATRIX") {
verbose && enter(verbose, level = -3, "Reading (outer) miMATRIX")
data <- readMiMATRIX(this, tag)
verbose && str(verbose, level = -4, data)
verbose && exit(verbose)
}
else {
verbose && printf(verbose, level = -3, "Reading (outer) %.0f integers",
tag$nbrOfBytes)
data <- readBinMat(con, what = integer(), size = 1,
n = tag$nbrOfBytes, signed = tag$signed)
}
data
}
detectedEndian <<- "little"
header <- readMat5Header(this, firstFourBytes = firstFourBytes)
verbose && cat(verbose, level = -100, "Read MAT v5 header:")
verbose && print(verbose, level = -100, header)
verbose && cat(verbose, level = -100, "Endian: ", detectedEndian)
result <- list()
repeat {
verbose && enter(verbose, level = -2, "Reading data element")
data <- readMat5DataElement(this)
if (is.null(data)) {
verbose && exit(verbose)
break
}
result <- append(result, data)
verbose && exit(verbose, suffix = paste("...done: '",
names(data), "' [", mode(data[[1]]), ": ", paste(dim(data[[1]]),
collapse = "x"), "]", sep = ""))
}
attr(result, "header") <- header
result
}
sparseMatrixClass <- match.arg(sparseMatrixClass)
if (inherits(verbose, "Verbose")) {
}
else if (is.numeric(verbose)) {
require("R.utils") || throw("Package not available: R.utils")
verbose <- Verbose(threshold = verbose)
}
else {
verbose <- as.logical(verbose)
if (verbose) {
require("R.utils") || throw("Package not available: R.utils")
verbose <- Verbose(threshold = -1)
}
}
if (inherits(con, "connection")) {
if (!isOpen(con)) {
verbose && cat(verbose, level = -1, "Opens binary connection.")
open(con, open = "rb")
on.exit({
close(con)
verbose && cat(verbose, level = -1, "Binary connection closed.")
})
}
}
else {
con <- as.character(con)
verbose && cat(verbose, level = -1, "Opens binary file: ",
con)
con <- file(con, open = "rb")
on.exit({
close(con)
verbose && cat(verbose, level = -1, "Binary file closed.")
})
}
if (summary(con)$text != "binary")
stop("Can only read a MAT file structure from a *binary* connection.")
nbrOfBytesRead <- 0
firstFourBytes <- readBinMat(con, what = integer(), size = 1,
n = 4)
if (is.null(firstFourBytes))
stop("MAT file format error: Nothing to read. Empty input stream.")
if (isMat4(firstFourBytes)) {
verbose && cat(verbose, level = 0, "Trying to read MAT v4 file stream...")
readMat4(con, firstFourBytes = firstFourBytes, maxLength = maxLength)
}
else {
verbose && cat(verbose, level = 0, "Trying to read MAT v5 file stream...")
readMat5(con, firstFourBytes = firstFourBytes, maxLength = maxLength)
}
}, modifiers = "public")
setwd("G:/GEM/raw_matlab/temp")
d <- readMat("temp_CARS_MDT_divby2_depth_withseason25.mat")
|
e3ebaed008791e23fa8c2b71ec6ad370bb983f02
|
6e707cd7044ecd3bebf0a5013b224e48ef2dc819
|
/results_processing/adhoc_plots.R
|
7b46f1f182708f0f824825cedfec2e2d817192d1
|
[] |
no_license
|
abhivij/bloodbased-pancancer-diagnosis
|
6836e308ae382a56fd4bb45811acd1d5934f2b99
|
c538549d0be03b909c595d32f9b367beba3116b1
|
refs/heads/master
| 2023-06-26T23:35:30.244214
| 2023-06-15T12:23:31
| 2023-06-15T12:23:31
| 283,672,783
| 3
| 4
| null | 2022-07-03T14:50:17
| 2020-07-30T04:52:40
|
R
|
UTF-8
|
R
| false
| false
| 6,233
|
r
|
adhoc_plots.R
|
setwd("~/UNSW/VafaeeLab/bloodbased-pancancer-diagnosis/results_processing/")
library(tidyverse)
library(viridis)
library(ComplexHeatmap)
source("metadata.R")
source("../utils/utils.R")
setwd("~/UNSW/VafaeeLab/bloodbased-pancancer-diagnosis/results/results_breastcanceronly/")
data_info <- read.table('data_info.csv', sep = ',', header = TRUE)
fsm_info <- read.table('fsm_info.csv', sep = ',', header = TRUE)
model_results <- read.table('model_results.csv', sep = ',', header = TRUE)
allowed_datasets <- c('GSE83270_BCVsHC', 'GSE22981_EBCVsHC')
model_results <- model_results %>%
mutate(FSM = factor(FSM)) %>%
mutate(Model = factor(Model, levels = model_vector)) %>%
filter(Model == "L2 Regularized logistic regression") %>%
filter(DataSetId %in% allowed_datasets) %>%
arrange(DataSetId)
model_barplot <- ggplot(model_results, aes(x=DataSetId, fill=FSM, y=Mean_AUC)) +
geom_bar(stat="identity", position="dodge") +
geom_errorbar( aes(x=DataSetId, ymin=X95.CI_AUC_lower, ymax=X95.CI_AUC_upper), position="dodge") +
scale_fill_manual(values=c("blue")) +
theme(axis.text.x = element_text(angle=45, hjust=1, vjust=1, size=rel(1.5)),
axis.text.y = element_text(size=rel(1.5), face="italic", hjust=0.95),
axis.title.x = element_text(size=rel(1.5)),
axis.title.y = element_text(size=rel(1.5)),
strip.text = element_text(size=rel(1.2), face="bold"),
legend.title = element_text(size=rel(1.5)),
legend.text = element_text(size=rel(1.5))) +
labs(x = "Data Sets", y = "Mean AUC with 95 CI")
ggsave("AUC_barplot.png", model_barplot, width=20, height=10, dpi=500)
#JI
all_ji_df <- read.table("JI/all_ji.csv", sep = ',', header = TRUE)
all_ji_df <- all_ji_df %>%
filter(FSM1 == FSM2) %>%
select(-c(FSM2)) %>%
filter(FSM1 %in% 't-test') %>%
filter(DataSetId %in% allowed_datasets) %>%
rename(c("FSM" = "FSM1")) %>%
arrange(DataSetId)
ji_barplot <- ggplot(all_ji_df, aes(x=DataSetId, fill=FSM, y=JI)) +
geom_bar(stat="identity", position="dodge") +
scale_fill_manual(values=c("green")) +
theme(axis.text.x = element_text(angle=45, hjust=1, vjust=1, size=rel(1.5)),
axis.text.y = element_text(size=rel(1.5), face="italic", hjust=0.95),
axis.title.x = element_text(size=rel(1.5)),
axis.title.y = element_text(size=rel(1.5)),
strip.text = element_text(size=rel(1.2), face="bold"),
legend.title = element_text(size=rel(1.5)),
legend.text = element_text(size=rel(1.5))) +
labs(x = "Data Sets", y = "Jaccard Index")
ggsave("JI_barplot.png", ji_barplot, width=20, height=10, dpi=500)
ji_scatterplot <- ggplot(all_ji_df, aes(x=DataSetId, fill=FSM, y=JI)) +
geom_point(shape=23, fill="green", color="darkred", size=5) +
scale_fill_manual(values=c("green")) +
theme(axis.text.x = element_text(angle=45, hjust=1, vjust=1, size=rel(1.5)),
axis.text.y = element_text(size=rel(1.5), face="italic", hjust=0.95),
axis.title.x = element_text(size=rel(1.5)),
axis.title.y = element_text(size=rel(1.5)),
strip.text = element_text(size=rel(1.2), face="bold"),
legend.title = element_text(size=rel(1.5)),
legend.text = element_text(size=rel(1.5))) +
labs(x = "Data Sets", y = "Jaccard Index")
ggsave("JI_scatterplot.png", ji_scatterplot, width=20, height=10, dpi=500)
# tsne plot with iter 1 features
results_dir <- "results/results_breastcanceronly/"
source("../../data_extraction/extract.R")
plot_tsne_iter1 <- function(phenotype_file_name,
read_count_dir_path, read_count_file_name,
skip_row_count = 0, row_count = -1,
na_strings = "NA",
classification_criteria, filter_expression, classes,
extracted_count_file_name = "read_counts.txt",
output_label_file_name = "output_labels.txt",
dataset_id, cores = 16,
results_dir_path = "results_breastcanceronly"){
print(paste("Pipeline Execution on", dataset_id, classification_criteria))
extracted_count_file_name <- paste(classification_criteria, extracted_count_file_name, sep = "_")
output_label_file_name <- paste(classification_criteria, output_label_file_name, sep = "_")
setwd("~/UNSW/VafaeeLab/bloodbased-pancancer-diagnosis/")
data_list <- extract_data(phenotype_file_name, read_count_file_name, read_count_dir_path,
skip_row_count, row_count, na_strings, classification_criteria, filter_expression,
extracted_count_file_name, output_label_file_name)
raw_data_dim <- dim(data_list[[1]])
x <- data_list[[1]]
x <- as.data.frame(t(as.matrix(x)))
output_labels <- data_list[[2]]
features_file <- paste(dataset_id, classification_criteria, "features.csv", sep = "_")
features_info <- read.table(get_file_path(features_file, results_dir), sep = ',', skip = 1, nrows = 1) %>%
select(-c(1,2))
x_selected <- x[, features_info == 1]
set.seed(1)
tsne_result <- Rtsne::Rtsne(x_selected, perplexity = 3)
tsne_df <- data.frame(x = tsne_result$Y[,1], y = tsne_result$Y[,2], Colour = output_labels$Label)
plot_title <- paste(dataset_id, classification_criteria, "tSNE embeddings")
tsne_plot <- ggplot2::ggplot(tsne_df) +
ggplot2::geom_point(ggplot2::aes(x = x, y = y, colour = Colour)) +
ggplot2::labs(colour = "Classes", title = plot_title) +
ggplot2::xlab("Dimension 1") +
ggplot2::ylab("Dimension 2")
plot_file_name <- paste(dataset_id, "tsne_plot.png", sep = "_")
ggplot2::ggsave(plot_file_name, tsne_plot)
x <- as.data.frame(t(as.matrix(x)))
classes <- output_labels$Label
psdR::compare_methods(x, classes,
c('CPM'),
perplexity = 3,
plot_file_name = paste("psd", plot_file_name, sep = "_"),
plot_colour_label = "Classes",
plot_title = "tSNE embeddings")
print('test')
}
for (dparg in dataset_pipeline_arguments[c(20:22)]) {
do.call(plot_tsne_iter1, dparg)
print(class(dparg))
print(str(dparg))
}
|
402d895274ab9c4a9a4f2189dffde6015715c237
|
dfc09c7ef198fee792872212b0d557c202988c43
|
/hSap/gffView.R
|
aff13406a8bac7ec9e7d666699ef96f03e4884fd
|
[] |
no_license
|
jamidifilippo/mscResearch
|
79e6b141e387c8bfb170a160acb905d9ca678af2
|
1127823e51452e2064465357ce7888aac0184cca
|
refs/heads/master
| 2020-06-05T04:53:09.307999
| 2019-09-04T09:33:51
| 2019-09-04T09:33:51
| 192,319,786
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 323
|
r
|
gffView.R
|
#!/usr/bin/env Rscript
args = commandArgs(TRUE)
if(length(args)!=1){
stop("incorrect number of args")
}
library(rtracklayer)
library(Biostrings)
x <- function(file=args[1]){
gff <- readGFF(file)
print(head(gff))
print(nrow(gff))
print(colnames(gff))
print(levels(gff$type))
print(tail(unique(gff$seqid)))
}
x()
|
77f1df57fe294eeef99a8ea11f39739f84db1c81
|
05a54772dc8837743fb69c75147f26cf0ca4031d
|
/man/JSconsole-package.Rd
|
394b068a3b291eee0ebf18b1d3cb45d7346b3e06
|
[] |
no_license
|
stla/JSconsole
|
871532102a48b9f0face672407a497ffb5181f87
|
966d708d2b59ebdb97d64227e5090baf54af6d89
|
refs/heads/master
| 2022-12-27T11:55:19.265904
| 2020-10-08T06:46:02
| 2020-10-08T06:46:02
| 298,496,056
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 463
|
rd
|
JSconsole-package.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/JSconsole-package.R
\docType{package}
\name{JSconsole-package}
\alias{JSconsole-package}
\title{JSconsole addin}
\description{
This package provides a RStudio addin to send some JavaScript code in the
V8 console. To run the addin, open a JavaScript file, select some lines if
you want to send only this piece of code, and select \code{JSconsole} from
the Addins menu within RStudio.
}
|
10055f9afdaccbbc2093e062c07bae2ee0328c18
|
70ff2dd600c75b5f14a76b759cf2bfebab8defd2
|
/Modelo.R
|
402a779f229ce41921e434e25df18f8f6fd5354d
|
[] |
no_license
|
andremenezees/ReinforcementLearningTicTacToe
|
8c8a6ba78106814e96af9935e2abc9b9feb4ce27
|
8a4efe28b33ce43b2df32a27652257dcb1992627
|
refs/heads/master
| 2022-12-08T06:40:58.381761
| 2020-08-29T00:58:45
| 2020-08-29T00:58:45
| 291,164,525
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 336
|
r
|
Modelo.R
|
install.packages("ReinforcementLearning")
library("ReinforcementLearning")
control <- list(alpha = 0.2, gamma = 0.4, epsilon = 0.1)
modelo <- ReinforcementLearning(tictactoe, s = "State", a = "Action",
r="Reward", s_new = "NextState",
iter = 2, control = control)
|
77b2158e0a0a69fe8a8bd0b7ba66f986bd534151
|
d3410af0856f5ed552896a2bcd51548e5dd312eb
|
/man/charitable.Rd
|
fb55b11d27a169733173116c74bc361095a8a14a
|
[
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
lnsongxf/experimentdatar
|
4f0810c49d29656a2757771eb843480a8bda7867
|
f71a9d072aabadf4da95e71baa757842a2d295c9
|
refs/heads/master
| 2021-01-01T09:48:14.429333
| 2019-02-11T12:15:59
| 2019-02-11T12:15:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 518
|
rd
|
charitable.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ExperimentData.R
\docType{data}
\name{charitable}
\alias{charitable}
\title{charitable data}
\format{A tibble with variables:
\describe{
\item{TBA}{TBA}
}}
\source{
\url{https://github.com/gsbDBI/ExperimentData/tree/master/Charitable}
}
\usage{
charitable
}
\description{
Data used for the paper "Does Price matter in charitable giving? Evidence
from a large-Scale Natural Field experiment" by Karlan and List (2007).
}
\keyword{datasets}
|
4500f30aa086c6b8c81926af71dea06221059241
|
393d5197702ff1c73873efe408c4b608bd6cdd7d
|
/UTEFA industry breakout group survey.R
|
e118e28709507ceeedbcca366b7ccc1b7c48a8e2
|
[] |
no_license
|
rhungc/UTEFA-
|
5d0a030d3b9705e843f8d5e3625689bfbf87d524
|
c2dc92315fc6d6f03d3b1c8c56b788e360878e34
|
refs/heads/main
| 2023-01-01T09:39:48.713895
| 2020-10-21T03:17:45
| 2020-10-21T03:17:45
| 305,886,030
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,225
|
r
|
UTEFA industry breakout group survey.R
|
library(tidyverse)
library(ggplot2)
signup_rank <- read.csv("Industry Rank .csv")
## A table of ppl's first choice
head(signup_rank)
choice1 <- signup_rank %>% select(Response.., Industry.choice.1) %>%
group_by(Industry.choice.1) %>%
arrange(Industry.choice.1) %>%
as_tibble() %>%
rename(Industry = Industry.choice.1)
choice1 <- choice1 %>% mutate(score = 3)
## A table of ppl's second choice
choice2 <- signup_rank %>% select(Response.., Industry.choice.2) %>%
group_by(Industry.choice.2) %>%
arrange(Industry.choice.2) %>%
as_tibble() %>%
rename(Industry = Industry.choice.2)
choice2 <- choice2 %>% mutate(score = 2)
## A table of people's third choice
choice3 <- signup_rank %>% select(Response.., Industry.Choice.3) %>%
group_by(Industry.Choice.3) %>%
arrange(Industry.Choice.3) %>%
as_tibble() %>%
rename(Industry = Industry.Choice.3) %>%
mutate(score = 1)
## Row_bind 3 tables together
preference <- rbind(choice1,choice2, choice3)
##See the result of preference
result <- aggregate(preference$score,
by = list(Industry = preference$Industry),
FUN = sum) %>%
arrange(desc(x)) %>%
rename(preference_score = x)
View(result)
|
f751adbfe7785c6118b44e3520c5f4456c29c9ba
|
0128be0f0a6ac91173df430de36aa7903dea7071
|
/R/GBS_QC.R
|
9cc63334568a2b58b8e573e8aec36e0f169bf33e
|
[
"MIT"
] |
permissive
|
solgenomics/sgn
|
f564f0da7fc35a9c127e644d948adc32a9d7bdf9
|
db87e84fc65803cbb4d3fa0c2e46279521650f92
|
refs/heads/master
| 2023-08-31T00:17:49.476691
| 2023-08-30T13:49:46
| 2023-08-30T13:49:46
| 644,423
| 53
| 31
|
MIT
| 2023-09-14T17:11:21
| 2010-05-03T13:50:53
|
PLpgSQL
|
UTF-8
|
R
| false
| false
| 3,629
|
r
|
GBS_QC.R
|
#to use
#R --slave --args output_test00.txt qc_output.txt < ~code/code_R/GBS_QC.R
myarg <- commandArgs()
cat(myarg,"\n");
m=length(myarg)
cat(m,"\n");
f_in<-myarg[4:4]
f_out<-myarg[5:5]
#f_plot<-myarg[6:6]
#f_output<-myarg[7:7]
#cat(f_index,"\n")
#cat(f_acc,"\n")
#cat(f_plot,"\n")
#cat(f_output,"\n")
#f_acc="WEMA_6x1122_entry_number_accession.csv_tail.csv"
#f_plot="Clean data 6x1122_WET11B-MARS-EVALTC-10-8_rep2_sorted.csv_tail.csv"
data.gbs<-read.csv(f_in,sep="\t",header=F)
m=dim(data.gbs)[1];
n=dim(data.gbs)[2];
nn=n-1;
#cat("There are",nn,"accession\n",file=f_out,sep=" ",append=TRUE);
#cat("Each accession has",m,"markers\n",file=f_out,sep=" ",append=TRUE);
data.cnr<-array();
s=1;
for (i in 2:n){
cn=length(which(data.gbs[,i]=="-9"));
cnr=cn/length(data.gbs[,i]);
j=i-1;
data.cnr[s]=cnr;
s=s+1;
cat("Sample",j,"missing rate is",cnr,"\n",file=f_out,sep=" ",append=TRUE);
}
png(file = "./documents/img/MissingRate.png")
hist(data.cnr)
dev.off()
#data.plot<-read.csv(f_plot,sep="\t",header=F)
#colnames(data.plot)[1]="ENTRY"
#V1 V2 V3 V4 V5 V6 V7 V8 V9 V10 V11 V12
#diff_acc_plot=setdiff(data.plot[,1],data.acc[,1])
#same_acc_plot=intersect(data.plot[,1],data.acc[,1])
#diff_acc_plot=diff_acc_plot[order(diff_acc_plot)]
#same_acc_plot=same_acc_plot[order(same_acc_plot)]
#dn=length(diff_acc_plot)
#sn=length(same_acc_plot)
#WEMA6x1008_WET10B-EVALTC-08-1_ungenotyped1_tester_CML395_CML444
#mp=gregexpr("_rep",f_plot)
#data_acc_tester=as.character(data.acc[1,2])
#acc_tester=substr(data_acc_tester,gregexpr("tester",data_acc_tester)[[1]][1],nchar(data_acc_tester))
#ungenotyped=paste("WEMA_",substr(f_plot,12,mp[[1]][1]+4),"_ungenotyped_",acc_tester,"_",diff_acc_plot[1],sep="")
#for(i in 2:dn){
#
#ungenotyped=c(ungenotyped,paste("WEMA_",substr(f_plot,12,mp[[1]][1]+4),"_ungenotyped_",acc_tester,"_",diff_acc_plot[i],sep=""))
#
#}
#ungenotyped=paste("WEMA_",substr(f_plot,12,17),"_ungenotyped_",1,"_",acc_tester,sep="")
#for(i in 2:dn){
#ungenotyped=c(ungenotyped,paste("WEMA_",substr(f_plot,12,17),"_ungenotyped_",i,"_",acc_tester,sep=""))
#}
#diff_acc_plot_ungenotyped<-cbind(diff_acc_plot,ungenotyped)
#colnames(diff_acc_plot_ungenotyped)=c("ENTRY","DESIG")
#data.acc.sorted=data.acc[order(data.acc[,1]),]
#colnames(data.acc.sorted)=c("ENTRY","DESIG")
#data.acc.plus.ungenotyped<-rbind(data.acc.sorted,diff_acc_plot_ungenotyped)
#data.acc.plus.ungenotyped.plot<-merge(data.acc.plus.ungenotyped,data.plot,by="ENTRY",sort=F)
#cn=dim(data.acc.plus.ungenotyped.plot)[2]
#data.acc.plus.ungenotyped.plot.2<-data.acc.plus.ungenotyped.plot[,c(1,3,4,5,2,7:cn)]
#f_output=paste("WEMA_",substr(f_plot,12,mp[[1]][1]+3),"_plot_accession",sep="")
#acc_plot_file_name=paste(f_output,"_output.csv",sep="")
#cat(acc_plot_file_name,"\n")
#write.table(data.acc.plus.ungenotyped.plot.2,file=acc_plot_file_name,append = F, quote = F, sep = "\t",eol = "\n",row.names = F,col.names = F,na=" ");
#ff <- myarg[5:m]
#f<-paste(ff,collapse=" ")
#cat(f_index,"\n")
#cat(f,"\n")
#cat(length(f),"\n")
#cat(m,"\n")
#library(affy)
#eset=justRMA(celfile.path=f)
#write.exprs(eset,file=paste(f_index,"_exprs.txt",sep=""))
#save.image(file=paste(f,".RData",sep=""))
#q()
#list_trait<-function(file.name){
#getwd()
#library(gdata)
#file_name=paste("~/DataFromXuecai/Link genotypes with phenotypes/",f_index,sep="");
#cat(file_name,"\n");
#data.for.read<-read.csv(file_name,header=T,sep="\t")
#print(colnames(data.for.read))
#write.table(data.for.read[2:length(data.for.read[,2]),2],file="F3_name.txt",append = T, quote = F, s#ep = "\t",eol = "\n",row.names = F,col.names = F);
#}
quit("yes")
|
1471ef9eef177c6d2d45ee5b272fb21e45860988
|
d0893f6e1b7ee85dd0ad8973fa7b0176707f68a9
|
/R/numberofclusters.R
|
07e0e93ee840eaa6facb429271c3b3c2619675eb
|
[] |
no_license
|
Displayr/flipCluster
|
0a75b0a66663f46dc6353402914a0a2777175cdf
|
d6457f5771459cae8af2c2d79fb6c17c0913d4e2
|
refs/heads/master
| 2023-06-15T07:35:29.514490
| 2023-06-13T09:35:24
| 2023-06-13T09:35:24
| 69,005,123
| 2
| 2
| null | 2023-04-20T11:14:49
| 2016-09-23T08:26:03
|
R
|
UTF-8
|
R
| false
| false
| 2,232
|
r
|
numberofclusters.R
|
#
# selectkMeans <- function(x, min.n.clusters = 2,max.n.clusters = 8, init.centers = NULL, n.starts = 100, iter.max = 100, ignore.error = FALSE,n.tests = 1000, assumption = "sample",save = TRUE)
# {x <- as.matrix(x)
# result <- statistics <- matrix(NA,nrow = max.n.clusters - min.n.clusters + 1, ncol = 15,
# dimnames = list(clusters = min.n.clusters:max.n.clusters,c("omega.square","Calinski.Harabasz","Split.1","Split.20","Split.100","Split.1000","Rep.1","Rep.20","Rep.100","Rep.1000","Rep.5.Med","Rep.20.Med","Rep.100.Med","Rep.1000.Med","Split.Traditional")))
# for (i in min.n.clusters:max.n.clusters)
# {
# kMeans.solution <- kmeans2(x, i, iter.max, n.starts)#kMeans(x, n.clusters = i, n.starts = n.starts , iter.max = iter.max)
# kMeans.summary <- summary(kMeans.solution,x) #summary.kMeans(kMeans.solution)
# r <- i - min.n.clusters + 1
# result[r,1] <- kMeans.summary$omega.squared
# result[r,2] <- kMeans.summary$Calinski.Harabasz
# replic <- replicability(kMeans.solution, x, iter.max = 100, n.tests = n.tests, assumption = "sample", method="split-half")
# result[r,3] <- sample(replic$all,1)
# result[r,4] <- quantile(sample(replic$all,20),0.05)
# result[r,5] <- quantile(sample(replic$all,100),0.05)
# result[r,6] <- replic$statistic
# split.replic <- replic
# replic <- replicability(kMeans.solution, x, iter.max = 100, n.tests = n.tests, assumption = "sample",method="bootstrap")
# #replicability <- function(object, x, iter.max = 100, n.tests = 1000, nstart = 100, nstart.classify = 1, assumption = "sample", method = "bootstrap")
# result[r,7] <- sample(replic$all,1)
# result[r,8] <- quantile(sample(replic$all,20),0.05)
# result[r,9] <- quantile(sample(replic$all,100),0.05)
# result[r,10] <- replic$statistic
# result[r,11] <- quantile(sample(replic$all,5),0.5)
# result[r,12] <- quantile(sample(replic$all,20),0.5)
# result[r,13] <- quantile(sample(replic$all,100),0.5)
# result[r,14] <- quantile(replic$all,0.5)
# result[r,15] <- split.half.replication(kMeans.solution,x, iter.max,n.starts)}
# list(split.replic=split.replic,replic =replic, table = result)}
#
# #zX <- matrix(runif(1000),ncol=4)
# #selectkMeans(zX)
#
|
c4796f9aa729aaa8a02b75ac93764acef1b74907
|
7285409708ec8be4057a2cd535aef5a96eb84c8c
|
/254a82f9402ff85ce4e4afe97abb2c72930ed01f/types.R
|
8e351416eee3e4b2d58adcdca2cf0f8edadc77ad
|
[] |
no_license
|
mengxingwu/OCEAN5098
|
c7e9196cb09720f559df6c0fedbc44b8bdb23c6f
|
86ca087114a59d1497f528c1818dabbcf5700133
|
refs/heads/main
| 2023-08-11T04:46:33.209493
| 2021-10-16T06:36:53
| 2021-10-16T06:36:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,561
|
r
|
types.R
|
x <- c(1.0, -3.4, 2, 140.1) # numeric and double
typeof(x)
mode(x)
x <- 4
typeof(x)
x <- 4L
typeof(x)
x <- c("bubul", "magpie", "spoonbill", "barbet")
typeof(x)
x <- 3
y <- 5.3
x + y
x <- "3"
y <- "5.3"
# not run: x+ y
###########################################################
# Error in x + y: non-numeric argument to binary operator #
###########################################################
x <- c(TRUE, FALSE, FALSE, TRUE)
x1<-c(1,0,0,1)
x2 <- as.logical(c(1,0,0,1))
# OR: x3 <- as.logical(c(1,0,0,1))
a <- c("M", "F", "F", "U", "F", "M", "M", "M", "F", "U")
typeof(a) # mode character
class(a)# class character
a.fact <- as.factor(a)
class(a.fact)# class factor
mode(a.fact)
typeof(a.fact)
a.fact
attributes(a.fact)
levels(a.fact)
factor(a, levels=c("U","F","M"))
iris.sel<- subset(iris, Species == "setosa" | Species == "virginica")
levels(iris.sel$Species) # 3 species are still there
# boxplot(Petal.Width ~ Species, iris.sel, horizontal = TRUE)
rownames(iris.sel) = seq(length=nrow(iris.sel))
x <- c(23, NA, 1.2, 5)
y <- c(23, NULL, 1.2, 5)
mean(x)
mean(y)
x <- c(674 , 4186 , 5308 , 5083 , 6140 , 6381)
x
x[3]
x[c(1,3,4)]
x[2:4]
x[2] <- 0
x
x <- c("all", "b", "olive")
x
x <- c( 1.2, 5, "Rt", "2000")
typeof(x)
m <- matrix(runif(9,0,10), nrow = 3, ncol = 3)
m
m <- array(runif(27,0,10), c(3,3,3))
m
name <- c("a1", "a2", "b3")
value1 <- c(23, 4, 12)
value2 <- c(1, 45, 5)
dat <- data.frame(name, value1, value2)
dat
str(dat) # provide structure
attributes(dat) # provide attributes
names(dat) # extract colum names
rownames(dat) # extract row names
A <- data.frame(
x = c(7.3, 29.4, 29.4, 2.9, 12.3, 7.5, 36.0, 4.8, 18.8, 4.2),
y = c(5.2, 26.6, 31.2, 2.2, 13.8, 7.8, 35.2, 8.6, 20.3, 1.1) )
B <- c(TRUE, FALSE)
C <- c("apples", "oranges", "round")
my.lst <- list(A = A, B = B, C = C)
str(my.lst)
names(my.lst)
my.lst$A
my.lst[[1]]
class(my.lst[[1]])
lst.notags <- list(A, B, D)
lst.notags
names(lst.notags)
M <- lm( y ~ x, A)
str(M)
names(M)
str(M$qr)
M$qr$rank
y <- c("23.8", "6", "100.01","6")
y.c <- as.numeric(y)
y.c
as.integer(y)
numchar <- as.character(y.c)
numchar
numfac <- as.factor(y)
numfac
charfac <- as.factor(y.c)
charfac
as.numeric() as.double() # Coerce to double
as.integer() # Coerce to integer
as.character() # Coerce to character
as.logical() # Coerce to Boolean (logical: TRUE | FALSE)
as.factor() # Coerce to factor
as.Date() # Coerce to date
as.data.frame() # Coerce to data frame
as.list() # Coerce to list
```{.r .distill-force-highlighting-css}
```
|
5e9320695fa7dff3e9199260018b1a3d114f2654
|
ec65e719d4363226bf16072986de93932ac88357
|
/code/R.R
|
3de10e692a82dd4995f1082a15b21426abd79720
|
[
"MIT"
] |
permissive
|
bigsk05/FibonacciSequence
|
9fad3ea9ab3be36f7b2a85d85f2da1d38493509c
|
cb01cae8f577cc997749f0986449fc6da618e13f
|
refs/heads/master
| 2023-06-19T22:21:26.932892
| 2021-07-18T06:46:53
| 2021-07-18T06:46:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 364
|
r
|
R.R
|
#!/usr/bin/env Rscript
calc <- function(n){
if (n <= 1){
return(n)
}else{
return(calc(n - 1) + calc(n - 2))
}
}
main <- function(){
ts=proc.time()
for ( i in seq(from = 0, to = 29, by = 1)){
calc(i)
#print(calc(i))
}
tf=proc.time()
print(paste0("[",(tf-ts)[3][[1]],"]"))
}
main()
|
2582e58865ecfd5b82cb62bd2b04fc7f930ace30
|
1ad58f3b10a5b5f2566605bbdd77ed1670c40963
|
/cachematrix.R
|
515e50a96c4cefd7bffcfe81fb6316a40f056264
|
[] |
no_license
|
magusverma/ProgrammingAssignment2
|
6ac7e212af83abf78c0515bac54762f6062b3c6a
|
541e1f6206626cc8facc008c6e3d33c619c371c6
|
refs/heads/master
| 2020-12-25T16:02:32.629596
| 2014-06-22T20:32:51
| 2014-06-22T20:32:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,459
|
r
|
cachematrix.R
|
## Code Written by Magus Verma for R Course of Coursera , Week 2 Programming Assignment
## This defines the special "matrix" object needed for assignment
# Start with something like x <- makeCacheMatrix()
makeCacheMatrix <- function(x = matrix()) {
# inverse_of_x holds the inverse of the matrix x computed using setsolve() below
inverse_of_x <- NULL
# Setting matrix for a makeCacheMatrix object
# run using something like
# x$set(matrix(1:4,2,2))
set <- function(y) {
x <<- y
inverse_of_x <<- NULL
}
# Gettings matrix for a makeCacheMatrix object
# run using something like
# x$get()
get <- function() x
# setsolve,getsolve are used setter and getter for inverse of x
setsolve <- function(solve) inverse_of_x <<- solve
getsolve <- function() inverse_of_x
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
## Return a matrix that is the inverse of 'x'
# To verify run something like following twice , if you have an object like x of makeCacheMatrix already up
# cacheSolve(x)
cacheSolve <- function(x, ...) {
inverse_of_x <- x$getsolve()
if(!is.null(inverse_of_x)) {
message("getting cached data")
return(inverse_of_x)
}
data <- x$get()
inverse_of_x <- solve(data, ...)
x$setsolve(inverse_of_x)
inverse_of_x
}
# inverse can be checked using something like
# cacheSolve(x) %*% x$get()
|
59370800183b796deb116845a2b525b7a8a50073
|
e2ac77e6bb5fc9e2169f1b4f827d3108550078d1
|
/Educational/R/Coursera/Getting and Cleaning Data/Week 1/quiz1.R
|
58c314c6fd1a6beeba57a24ba38af8cb7129f9fa
|
[] |
no_license
|
Rajmirukula/programming
|
c2ab4dbe5ee6ca78be6661800eb6e334c71501ee
|
2801e8c641b101494ee8586ca28f50116dffb261
|
refs/heads/master
| 2020-07-03T16:42:06.104576
| 2016-10-02T23:12:14
| 2016-10-02T23:12:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,338
|
r
|
quiz1.R
|
# Ethan Petuchowski
# 6/2/14
#########
# CSV #
#########
# Download the 2006 microdata survey about housing for the state of Idaho
housingUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06hid.csv"
setwd("/Users/ethan/code/non_apple/programming/Educational/R/Coursera/Getting and Cleaning Data/Week 1/")
list.files()
if (!file.exists("data")) { dir.create("data") }
list.files()
download.file(housingUrl, destfile="./data/housing.csv", method = "curl") # curl for https
dateDownloaded <- date() # 6/2/14 11:09:40
dataHousing <- read.csv("./data/housing.csv")
hist(dataHousing$VAL)
daClean <- dataHousing$VAL
daClean <- daClean[complete.cases(daClean)]
d <- dataHousing[complete.cases(dataHousing$VAL),]$VAL
length(d[d == 24]) # => 53
###########
# Excel #
###########
# Download the Excel spreadsheet on Natural Gas Aquisition Program
gasUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FDATA.gov_NGAP.xlsx"
download.file(gasUrl, destfile="./data/gas.xlsx", method = "curl") # curl for https
gasFile <- "./data/gas.xlsx"
library(xlsx)
rows <- 18:23
cols <- 7:15
dat <- read.xlsx(gasFile, sheetIndex=1, rowIndex=rows, colIndex=cols)
sum(dat$Zip*dat$Ext,na.rm=T)
#########
# XML #
#########
# Read the XML data on Baltimore restaurants
restaUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Frestaurants.xml"
restaFile <- "./data/restaurants.xml"
download.file(restaUrl, destfile=restaFile, method="curl")
library(XML)
xmlRest <- xmlTreeParse(restaFile, useInternalNodes=TRUE)
root <- xmlRoot(xmlRest)
zipcodes <- xpathSApply(root, "//zipcode", xmlValue)
twoOnes <- zipcodes[zipcodes == "21231"]
length(twoOnes) # => 127
###########
# FREAD #
###########
# 2006 microdata survey about housing for the state of Idaho
microUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06pid.csv"
microFile <- "./data/micro.csv"
download.file(microUrl, destfile=microFile, method="curl")
library(data.table); library(microbenchmark)
DT <- fread(microFile)
DT$SEX <- as.numeric(DT$SEX)
system.time(replicate(100, mean(DT$pwgtp15,by=DT$SEX)))
system.time(replicate(100, sapply(split(DT$pwgtp15,DT$SEX),mean)))
system.time(replicate(100, tapply(DT$pwgtp15,DT$SEX,mean)))
system.time(replicate(100, DT[,mean(pwgtp15),by=SEX])) # this is fastest because it's a real data.table method
|
bf80839ca0f59a109f8bfd350d3577eedaa85a60
|
fc757ea51c4861201a7ec95c1f7ae7c6dcbac35b
|
/R/mobforest.output.R
|
d32fb309ebb14e261f97225cba6162785e379d78
|
[] |
no_license
|
RTIInternational/mobForest
|
0550d7be8b66d71c2e1a9d57fc5ebf51f11ebb67
|
26530e3ff46c91ef9e25744d9ea65c36a59f1356
|
refs/heads/master
| 2022-10-07T22:51:06.785414
| 2019-07-31T20:21:26
| 2019-07-31T20:21:26
| 116,028,774
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,873
|
r
|
mobforest.output.R
|
#' Model-based random forest object
#'
#' Random Forest Output object that stores all the results including
#' predictions, variable importance matrix, model, family of error
#' distributions, and observed responses.
#'
#' @param oob_predictions Predictions on out-of-bag data.
#' @param general_predictions Predictions on learning data.
#' @param new_data_predictions Predictions on new test data.
#' @param varimp_object The variable importance object.
#' @param model_used The model used.
#' @param family A description of the error distribution and link function to be
#' used in the model.
#' @param train_response Response outcome of training data.
#' @param new_response Response outcome of test data.
#' @seealso \code{\linkS4class{prediction.output}},
#' \code{\linkS4class{varimp.output}}
#'
#' @export
mobforest.output <-
function(oob_predictions, general_predictions, new_data_predictions,
varimp_object, model_used, family, train_response,
new_response = data.frame(matrix(0, 0, 0))) {
rval <- new("mobforest.output", oob_predictions = oob_predictions,
general_predictions = general_predictions,
new_data_predictions = new_data_predictions,
varimp_object = varimp_object, model_used = model_used,
family = family, train_response = train_response,
new_response = new_response)
return(rval)
}
#' @export
#' @rdname mobforest.output-class
#' @importFrom methods show
#' @aliases show,mobforest.output-method
#' @param object object of class \code{\linkS4class{mobforest.output}}
setMethod("show", "mobforest.output", function(object) {
rf <- object
cat("\tRandom Forest of Model Based Recursive Partitioning Trees\n\n")
cat("Number of trees:", ncol( (rf@varimp_object)@varimp_matrix), "\n\n")
cat("Model Used:", rf@model_used, "\n\n")
})
|
daf57740529a41ddad090035d211b4a17abc8078
|
7b102f9c8f2e3f9240090d1d67af50333a2ba98d
|
/gbd_2017/nonfatal_code/ckd/age_sex_split/age_sex_split.R
|
fd5f6975bfedf671667f328d8c9fc9e1a099a076
|
[] |
no_license
|
Nermin-Ghith/ihme-modeling
|
9c8ec56b249cb0c417361102724fef1e6e0bcebd
|
746ea5fb76a9c049c37a8c15aa089c041a90a6d5
|
refs/heads/main
| 2023-04-13T00:26:55.363986
| 2020-10-28T19:51:51
| 2020-10-28T19:51:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,451
|
r
|
age_sex_split.R
|
#######################################################################################
### Date: 11/7/2016
### Project: GBD Nonfatal Estimation
#######################################################################################
###################
### Setting up ####
###################
rm(list=ls())
if (Sys.info()["sysname"] == "Linux") {
j.root <- "FILEPATH"
} else {
j.root <- "FILEPATH"
}
#load packages, install if missing
require(data.table)
require(xlsx)
#set information
a_cause <-
bundle_id <-
request_num <-
download_dir <- FILEPATH
upload_dir <- FILEPATH
lit_dir <- FILEPATH
#######################################################################################################################################
###########################
### Age-Sex Splitting ####
###########################
#Load Data
df <- fread(paste0(upload_dir, FILEPATH, ".csv"), stringsAsFactors=FALSE)
df[, seq := as.character(seq)] #Allow missing seq for new obs
df[, note_modeler := as.character(note_modeler)]
df_split <- copy(df)
#subset to data needing age-sex splitting
df_split[is.na(sample_size), note_modeler := paste0(note_modeler, " | sample size back calculated to age sex split")]
df_split[measure == "prevalence" & is.na(sample_size), sample_size := mean*(1-mean)/standard_error^2]
df_split[measure == "incidence" & is.na(sample_size), sample_size := mean/standard_error^2]
df_split[is.na(cases), cases := sample_size * mean]
df_split <- df_split[!is.na(cases),]
df_split[, split := length(specificity[specificity == "age,sex"]), by = list(nid, group, measure, year_start, year_end)]
df_split <- df_split[specificity %in% c("age", "sex") & split == 0,]
#calculate proportion of cases male and female
df_split[, cases_total:= sum(cases), by = list(nid, group, specificity, measure)]
df_split[, prop_cases := round(cases / cases_total, digits = 3)]
#calculate proportion of sample male and female
df_split[, ss_total:= sum(sample_size), by = list(nid, group, specificity, measure)]
df_split[, prop_ss := round(sample_size / ss_total, digits = 3)]
#calculate standard error of % cases & sample_size M and F
df_split[, se_cases:= sqrt(prop_cases*(1-prop_cases) / cases_total)]
df_split[, se_ss:= sqrt(prop_ss*(1-prop_ss) / ss_total)]
#estimate ratio & standard error of ratio % cases : % sample
df_split[, ratio := round(prop_cases / prop_ss, digits = 3)]
df_split[, se_ratio:= round(sqrt( (prop_cases^2 / prop_ss^2) * (se_cases^2/prop_cases^2 + se_ss^2/prop_ss^2) ), digits = 3)]
#Save these for later
df_ratio <- df_split[specificity == "sex", list(nid, group, sex, measure, ratio, se_ratio, prop_cases, prop_ss, year_start, year_end)]
#Create age,sex observations
age.sex <- copy(df_split[specificity == "age"])
age.sex[,specificity := "age,sex"]
age.sex[,seq := ""]
age.sex[,c("ratio", "se_ratio", "split", "cases_total", "prop_cases", "ss_total", "prop_ss", "se_cases", "se_ss") := NULL]
male <- copy(age.sex[, sex := "Male"])
female <- copy(age.sex[, sex := "Female"])
age.sex <- rbind(male, female)
#Merge sex ratios to age,sex observations
age.sex <- merge(age.sex, df_ratio, by = c("nid", "group", "sex", "measure", "year_start", "year_end"))
#calculate age-sex specific mean, standard_error, cases, sample_size
age.sex[, standard_error := round(sqrt(standard_error^2 * se_ratio^2 + standard_error^2 * ratio^2 + se_ratio^2 * mean^2), digits = 4)]
age.sex[, mean := mean * ratio]
age.sex[, cases := round(cases * prop_cases, digits = 0)]
age.sex[, sample_size := round(sample_size * prop_ss, digits = 0)]
age.sex[,note_modeler := paste(note_modeler, "| age,sex split using sex ratio", round(ratio, digits = 2))]
age.sex[,c("ratio", "se_ratio", "prop_cases", "prop_ss") := NULL]
##create unique set of nid and group to pull from age.sex
age.sex.m <- age.sex[,c("nid","group", "measure"), with=F]
age.sex.m <- unique(age.sex.m, by=c("nid","group", "measure"))
##merge to get the parent rows from age sex split
parent <- merge(age.sex.m, df, by= c("nid", "group", "measure"))
parent[specificity=="age" | specificity=="sex",group_review:=0]
parent[, note_modeler := paste0(note_modeler, "| parent data, has been age-sex split")]
##final dataset
total <- rbind(parent, age.sex)
##Save data
write.xlsx(total, paste0(upload_dir,FILEPATH, ".xlsx"), row.names=F, showNA=F, sheetName="extraction")
|
2cf6863f895b044ec0f80b098bbf7340340f6d68
|
3df381fdd831150bec7d97a705d069e1ba892e13
|
/R/validate_arguments.R
|
76bf00b99f57a716ae28fa505b1aef3d341a135b
|
[
"MIT"
] |
permissive
|
king8w/ecocomDP
|
8e4f451ac69356c6bfe4e3c405c0a02a7ccab6a0
|
7adf6af8070d35a9123138fc35e2b9a6c4893125
|
refs/heads/master
| 2023-03-20T05:57:39.115850
| 2021-03-13T00:26:59
| 2021-03-13T00:26:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,965
|
r
|
validate_arguments.R
|
#' Validate arguments of ecocomDP functions
#'
#' @description
#' Validate input arguments to ecocomDP functions.
#'
#' @param fun.name
#' (character) Name of function from which \code{validate_arguments()} is
#' called.
#' @param fun.args
#' (named list) Arguments passed to calling function and formatted as
#' \code{as.list(environment())}.
#'
#' @details
#' Validation checks are function specific.
#'
validate_arguments <- function(fun.name, fun.args) {
# Parameterize --------------------------------------------------------------
use_i <- sapply(fun.args, function(X) identical(X, quote(expr=)))
fun.args[use_i] <- list(NULL)
criteria <- data.table::fread(
system.file('validation_criteria.txt', package = 'ecocomDP'))
# search_data() -------------------------------------------------------------
if (fun.name == "search_data") {
# text
if (!is.null(fun.args$text) & !is.character(fun.args$text)) {
stop("Input 'text' must be of class 'character'.", call. = F)
}
# taxa
if (!is.null(fun.args$taxa) & !is.character(fun.args$taxa)) {
stop("Input 'taxa' must be of class 'character'.", call. = F)
}
# num.taxa
if (!is.null(fun.args$num.taxa)) {
if (!is.numeric(fun.args$num.taxa)) {
stop("Input 'num.taxa' must be of class 'numeric'.", call. = F)
}
if (length(fun.args$num.taxa) != 2) {
stop(
"Input 'num.taxa' must have a minimum and maximum value.",
call. = F)
}
}
# years
if (!is.null(fun.args$years)) {
if (!is.numeric(fun.args$years)) {
stop("Input 'years' must be of class 'numeric'.", call. = F)
}
if (length(fun.args$years) != 2) {
stop(
"Input 'years' must have a minimum and maximum value.",
call. = F)
}
}
# sd.between.surveys
if (!is.null(fun.args$sd.between.surveys)) {
if (!is.numeric(fun.args$sd.between.surveys)) {
stop(
"Input 'sd.between.surveys' must be of class 'numeric'.",
call. = F)
}
if (length(fun.args$sd.between.surveys) != 2) {
stop(
"Input 'sd.between.surveys' must have a minimum and maximum value.",
call. = F)
}
}
# geographic.area
if (!is.null(fun.args$geographic.area)) {
if (!is.numeric(fun.args$geographic.area)) {
stop(
"Input 'geographic.area' must be of class 'numeric'.",
call. = F)
}
if (length(fun.args$geographic.area) != 4) {
stop(
paste0(
"Input 'geographic.area' must have North, East, South, and West ",
"coordinates." ),
call. = F)
}
}
# boolean.operator
if (!is.null(fun.args$boolean.operator)) {
if (!(tolower(fun.args$boolean.operator) %in% c("and", "or"))) {
stop(
"Valid inputs to 'boolean.operator' are: 'AND', 'OR'",
call. = F)
}
}
}
# validate_ecocomDP() -------------------------------------------------------
if (fun.name == "validate_ecocomDP") {
# data.path - Is a valid path
if (!is.null(fun.args$data.path)) {
if (!dir.exists(fun.args$data.path)) {
stop("Input 'data.path' doesn't exits.", call. = F)
}
}
# data.list - Is a named list containing only ecocomDP tables
if (!is.null(fun.args$data.list)) {
if (!is.list(fun.args$data.list)) {
stop("Input 'data.list' is not a list.", call. = F)
}
use_i <- names(fun.args$data.list) %in% unique(criteria$table)
if (any(!use_i)) {
stop(
"Input 'data.list' has unsupported tables: ",
paste(names(fun.args$data.list)[!use_i], collapse = ", "), call. = F)
}
}
}
# read_data() ---------------------------------------------------------------
if (fun.name == "read_data") {
# id - Is required
if (is.null(fun.args$id)) {
stop("Input 'id' is required.", call. = FALSE)
}
# Because inputs to read_data() can vary (i.e. can be a vector of id, list
# of id with associated arguments), they need to be converted to a
# consistent format for processing.
if (!is.list(fun.args$id)) {
empty_list <- vector(mode = "list", length(fun.args$id))
names(empty_list) <- unlist(fun.args$id)
fun.args$id <- empty_list
# List default NEON arguments directly under id if missing
for (i in 1:length(fun.args$id)) {
if (stringr::str_detect(
names(fun.args$id)[i], "^DP.\\.[:digit:]+\\.[:digit:]+")) {
if (is.null(fun.args$id[[i]]$site)) {
fun.args$id[[i]]$site <- fun.args$site
}
if (is.null(fun.args$id[[i]]$startdate)) {
fun.args$id[[i]]$startdate <- fun.args$startdate
}
if (is.null(fun.args$id[[i]]$enddate)) {
fun.args$id[[i]]$enddate <- fun.args$enddate
}
}
}
# Remove NEON defaults from top level
fun.args$site <- NULL
fun.args$startdate <- NULL
fun.args$enddate <- NULL
}
# Validate general argument values (i.e. not associated with a specific
# id).
# path - Is valid
if (!is.null(fun.args$path)) {
if (!dir.exists(fun.args$path)) {
stop("Input 'path' (", fun.args$path, ") doesn't exist.", call. = FALSE)
}
}
# file.type - Is a supported type
if (!is.null(fun.args$file.type)) {
if (!(fun.args$file.type %in% c(".rda", ".csv"))) {
stop("Unsupported 'file.type'. One of '.rda', '.csv' is expected.",
call. = FALSE)
}
}
# check.size - Is logical
if (!is.null(fun.args$check.size) & !is.logical(fun.args$check.size)) {
stop("Unsupported 'check.size' input. Expected is TRUE or FALSE.",
call. = FALSE)
}
# nCores - Is iteger
if (!is.null(fun.args$check.size)) {
if (!(fun.args$nCores %% 1 == 0)) {
stop("Unsupported 'nCores' input. Expected is an integer value.",
call. = FALSE)
}
}
# forceParallel - Is logical
if (!is.null(fun.args$check.size) & !is.logical(fun.args$forceParallel)) {
stop("Unsupported 'forceParallel' input. Expected is TRUE or FALSE.",
call. = FALSE)
}
# forceParallel - Is logical
if (!is.null(fun.args$globally.unique.keys) &
!is.logical(fun.args$globally.unique.keys)) {
stop("Unsupported 'globally.unique.keys' input. Expected is TRUE or ",
"FALSE.", call. = FALSE)
}
# Validate each id and corresponding set of argument values.
invisible(
lapply(
seq_along(fun.args$id),
function(x) {
id <- names(fun.args$id)[x]
# id - Exists in the search_data() default output. If a newer revision
# exists, a warning is returned.
validate_id(id)
# For a NEON id, validate associated argument values
if (stringr::str_detect(id, "^DP.\\.[:digit:]+\\.[:digit:]+")) {
# site - Listed sites exist for a specified id
if (!is.null(fun.args$id[[x]]$site)) {
if (all(fun.args$id[[x]]$site != "all")) {
validate_site(fun.args$id[[x]]$site, id)
}
}
# startdate - Character of YYYY-MM format, and MM is 1-12
if (!is.null(fun.args$id[[x]]$startdate)) {
if (!is.na(fun.args$id[[x]]$startdate)) {
if (!stringr::str_detect(
fun.args$id[[x]]$startdate, "^[:digit:]{4}-[:digit:]{2}$")) {
stop("Unsupported 'startdate'. Expected format is YYYY-MM.",
call. = FALSE)
}
month <- as.integer(
stringr::str_extract(
fun.args$id[[x]]$startdate, "(?<=-)[:digit:]{2}$"))
if (!((month > 0) & (month <= 12))) {
stop("Unsupported 'startdate'. Expected format is YYYY-MM.",
call. = FALSE)
}
}
}
# enddate - Character of YYYY-MM format, and MM is 1-12
if (!is.null(fun.args$id[[x]]$enddate)) {
if (!is.na(fun.args$id[[x]]$enddate)) {
if (!stringr::str_detect(
fun.args$id[[x]]$enddate, "^[:digit:]{4}-[:digit:]{2}$")) {
stop("Unsupported 'enddate'. Expected format is YYYY-MM.",
call. = FALSE)
}
month <- as.integer(
stringr::str_extract(
fun.args$id[[x]]$enddate, "(?<=-)[:digit:]{2}$"))
if (!((month > 0) & (month <= 12))) {
stop("Unsupported 'enddate'. Expected format is YYYY-MM.",
call. = FALSE)
}
}
}
}
}))
# Return modified arguments
return(fun.args)
}
# read_from_files() ---------------------------------------------------------
if (fun.name == "read_from_files") {
# data.path - Is a valid path
if (!is.null(fun.args$data.path)) {
if (!dir.exists(fun.args$data.path)) {
stop("Input 'data.path' doesn't exits.", call. = F)
}
}
}
}
#' Validate data package/product identifier
#'
#' @param id
#' (character) A data package/product identifier for an ecocomDP dataset.
#'
#' @details
#' If invalid (i.e. not listed in the return of \code{search_data()}), then
#' an error is returned.
#'
#' If the exact \code{id} is not indexed, but it is an EDI data package,
#' then a set of logic determines if a newer version is indexed and
#' available.
#'
validate_id <- function(id) {
search_index <- suppressMessages(search_data())
if (!(id %in% search_index$id)) {
possible_revision <- stringr::str_detect(
id,
"(^knb-lter-[:alpha:]+\\.[:digit:]+\\.[:digit:]+)|(^[:alpha:]+\\.[:digit:]+\\.[:digit:]+)")
if (possible_revision) {
indexed_identifiers <- stringr::str_extract(
search_index$id, ".+(?=\\.[:digit:]$)")
id_identifier <- stringr::str_extract(id, ".+(?=\\.[:digit:]$)")
if (id_identifier %in% indexed_identifiers) {
id_version <- stringr::str_extract(id, "[:digit:]$")
indexed_version <- stringr::str_extract(
search_index$id[which(id_identifier == indexed_identifiers)],
"[:digit:]$")
if (as.numeric(indexed_version) > as.numeric(id_version)) {
warning("A newer version of '", id, "' is available.", call. = FALSE)
}
}
} else {
stop("Invalid identifier '", id, "' cannot be read.", call. = FALSE)
}
}
}
#' Validate site name (for NEON data products only)
#'
#' @param site
#' (character; NEON data only) A character vector of site codes to filter
#' data on. Sites are listed in the "sites" column of the
#' \code{search_data()} output.
#' @param id
#' (character) A data package/product identifier.
#'
#' @details
#' If invalid (i.e. not listed in the return of \code{search_data()}), then
#' an error is returned.
#'
validate_site <- function(site, id) {
search_index <- suppressMessages(search_data())
available_sites <- unlist(
stringr::str_split(
search_index$sites[search_index$id == id],
","))
site_exists <- site %in% available_sites
if (!all(site_exists)) {
stop("Sites not available in ", id, ": ", paste(site[!site_exists], collapse = ", "),
call. = FALSE)
}
}
|
1ddc729a03da90e4ecf75f4583ec4f9f27291e2d
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/powerMediation/examples/ssMediation.VSMc.logistic.Rd.R
|
b237ae011f19c4ab8b47cd8e6405aa2ff6e75e4d
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 485
|
r
|
ssMediation.VSMc.logistic.Rd.R
|
library(powerMediation)
### Name: ssMediation.VSMc.logistic
### Title: Sample size for testing mediation effect in logistic regression
### based on Vittinghoff, Sen and McCulloch's (2009) method
### Aliases: ssMediation.VSMc.logistic
### Keywords: test
### ** Examples
# example in section 4 (page 545) of Vittinghoff et al. (2009).
# n=255
ssMediation.VSMc.logistic(power = 0.80, b2 = log(1.5), sigma.m = 1, p = 0.5,
corr.xm = 0.5, alpha = 0.05, verbose = TRUE)
|
e5b55c500ef630ba1c99d292ff98ac2cd347227e
|
b94bde90fdb3e38483293d906c0b5f0669af647e
|
/simsem/man/tagHeaders.Rd
|
69a2c2cd9d0009241ec2ff31fffd3f180208cc76
|
[] |
no_license
|
pairach/simsem
|
c2da13f31af4b8ed986647320090bbd9edc0c400
|
8194f63851ed0c0dbd447726988b0a58619ec43a
|
refs/heads/master
| 2020-12-25T01:50:53.664082
| 2012-05-29T21:38:06
| 2012-05-29T21:38:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,207
|
rd
|
tagHeaders.Rd
|
\name{tagHeaders}
\alias{tagHeaders}
\alias{tagHeaders-methods}
\alias{tagHeaders,ANY-method}
\alias{tagHeaders,VirtualRSet-method}
\title{
Tag names to each element
}
\description{
This element of a vector will be tagged by the names of the vector with the position of the element. This element of a matrix will be tagged by the names of the matrix with the row and column positions of the matrix.
}
\usage{
tagHeaders(object, ...)
}
\arguments{
\item{object}{
The object to be tagged
}
\item{\dots}{
The additional arguments
}
}
\section{Methods}{
\describe{
\item{signature(object="VirtualRSet")}{
This element of a vector will be tagged by the names of the vector with the position of the element. This element of a matrix will be tagged by the names of the matrix with the row and column positions of the matrix. \emph{Y} means indicators on \emph{Y}-side. \emph{X} means indicators on \emph{X}-side. \emph{E} means endogenous factors. \emph{K} means exogenous factors.
}
}}
\value{
The object with the row, column, or element names.
}
\author{
Sunthud Pornprasertmanit (University of Kansas; \email{psunthud@ku.edu})
}
\examples{
# No example
}
|
b9f50ec0b073175455c5ebb6f8aaaf91363932b5
|
1a98fadbdbc7805c39c51534814664dc34717342
|
/man/qlogout.Rd
|
a1225aa8a4192d520e94cdc6d06874f80a0edd19
|
[] |
no_license
|
jtuomist/quiltr
|
6e613380f63962e4a6be7548adb71ba8b0906234
|
11a9342280e5f482a8d53dea9f8278385fb39831
|
refs/heads/master
| 2020-05-20T05:42:57.690081
| 2019-05-07T14:13:37
| 2019-05-07T14:13:37
| 185,413,347
| 0
| 0
| null | 2019-05-07T13:59:33
| 2019-05-07T13:59:33
| null |
UTF-8
|
R
| false
| true
| 267
|
rd
|
qlogout.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/auth.R
\name{qlogout}
\alias{qlogout}
\title{Log out of Quilt}
\usage{
qlogout()
}
\value{
Deletes your saved auth token
}
\description{
Log out of Quilt
}
\examples{
\dontrun{qlogout()}
}
|
a237f3e45df28444ccac774973807a326419d722
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/sirt/examples/IRT.mle.Rd.R
|
4e9f15da79eaffec8db404b521855d89c922dfa9
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,480
|
r
|
IRT.mle.Rd.R
|
library(sirt)
### Name: IRT.mle
### Title: Person Parameter Estimation
### Aliases: IRT.mle
### Keywords: Person parameters
### ** Examples
## Not run:
##D #############################################################################
##D # EXAMPLE 1: Generalized partial credit model
##D #############################################################################
##D
##D data(data.ratings1)
##D dat <- data.ratings1
##D
##D # estimate model
##D mod1 <- sirt::rm.facets( dat[, paste0( "k",1:5) ], rater=dat$rater,
##D pid=dat$idstud, maxiter=15)
##D # extract dataset and item parameters
##D data <- mod1$procdata$dat2.NA
##D a <- mod1$ipars.dat2$a
##D b <- mod1$ipars.dat2$b
##D theta0 <- mod1$person$EAP
##D # define item response function for item ii
##D calc.pcm <- function( theta, a, b, ii ){
##D K <- ncol(b)
##D N <- length(theta)
##D matrK <- matrix( 0:K, nrow=N, ncol=K+1, byrow=TRUE)
##D eta <- a[ii] * theta * matrK - matrix( c(0,b[ii,]), nrow=N, ncol=K+1, byrow=TRUE)
##D eta <- exp(eta)
##D probs <- eta / rowSums(eta, na.rm=TRUE)
##D return(probs)
##D }
##D arg.list <- list("a"=a, "b"=b )
##D
##D # MLE
##D abil1 <- sirt::IRT.mle( data, irffct=calc.pcm, theta=theta0, arg.list=arg.list )
##D str(abil1)
##D # WLE
##D abil2 <- sirt::IRT.mle( data, irffct=calc.pcm, theta=theta0, arg.list=arg.list, type="WLE")
##D str(abil2)
##D # MAP with prior distribution N(.2, 1.3)
##D abil3 <- sirt::IRT.mle( data, irffct=calc.pcm, theta=theta0, arg.list=arg.list,
##D type="MAP", mu=.2, sigma=1.3 )
##D str(abil3)
##D
##D #############################################################################
##D # EXAMPLE 2: Rasch model
##D #############################################################################
##D
##D data(data.read)
##D dat <- data.read
##D I <- ncol(dat)
##D
##D # estimate Rasch model
##D mod1 <- sirt::rasch.mml2( dat )
##D summary(mod1)
##D
##D # define item response function
##D irffct <- function( theta, b, ii){
##D eta <- exp( theta - b[ii] )
##D probs <- eta / ( 1 + eta )
##D probs <- cbind( 1 - probs, probs )
##D return(probs)
##D }
##D # initial person parameters and item parameters
##D theta0 <- mod1$person$EAP
##D arg.list <- list( "b"=mod1$item$b )
##D
##D # estimate WLE
##D abil <- sirt::IRT.mle( data=dat, irffct=irffct, arg.list=arg.list,
##D theta=theta0, type="WLE")
##D # compare with wle.rasch function
##D theta <- sirt::wle.rasch( dat, b=mod1$item$b )
##D cbind( abil[,1], theta$theta, abil[,2], theta$se.theta )
##D
##D #############################################################################
##D # EXAMPLE 3: Ramsay quotient model
##D #############################################################################
##D
##D data(data.read)
##D dat <- data.read
##D I <- ncol(dat)
##D
##D # estimate Ramsay model
##D mod1 <- sirt::rasch.mml2( dat, irtmodel="ramsay.qm" )
##D summary(mod1)
##D # define item response function
##D irffct <- function( theta, b, K, ii){
##D eta <- exp( theta / b[ii] )
##D probs <- eta / ( K[ii] + eta )
##D probs <- cbind( 1 - probs, probs )
##D return(probs)
##D }
##D # initial person parameters and item parameters
##D theta0 <- exp( mod1$person$EAP )
##D arg.list <- list( "b"=mod1$item2$b, "K"=mod1$item2$K )
##D # estimate MLE
##D res <- sirt::IRT.mle( data=dat, irffct=irffct, arg.list=arg.list, theta=theta0,
##D maxval=20, maxiter=50)
## End(Not run)
|
2f859b73a3d959f67870612ca065dbfc6ef5a732
|
29f8f3ee59c366ea408633d183614bc39b49b26d
|
/Duke_DGNN/[DGNN] incidence_rti_africa.R
|
7a32eec249479c2cacaae9a8076ffe38dfd46c24
|
[] |
no_license
|
souzajvp/analytical_codes
|
92db345dc75f128c2f25fb7b28f0891139ffea98
|
dcc49662253ba1dbd4f54b8c4caea40232632783
|
refs/heads/master
| 2023-05-23T06:06:12.058469
| 2021-06-07T18:11:00
| 2021-06-07T18:11:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,329
|
r
|
[DGNN] incidence_rti_africa.R
|
###################################################
#TEMPLATE_FOR _META_ANALYSIS_OF_DIAGNOSTIC_ACCURACY#
#this script follows a combination of guidelines proposed by Doebler and Holling, according to (http://cran.r-project.org/web/packages/mada/vignettes/mada.pdf)#
#
#
###################################################
#SETTING ENVIRONMENT
###################################################
#Load packages (after installed) with the library function
lapply(c("metafor","ggplot2","gridExtra" ,"psych", "RCurl", "irr", "nortest", "moments","GPArotation","nFactors","gdata","meta","metafor","ggplot2","gridExtra" ,"psych", "RCurl", "irr", "nortest", "moments","GPArotation","nFactors","gdata"), library, character.only=T)
###################################################
#IMPORTING DATA AND RECODING
###################################################
#Instructions here http://goo.gl/Ofa7gQ
#data <- repmis::source_DropboxData("rti_sr_data.csv","yr0yf1szzyqji35",sep = ",",header = TRUE)
data<-read.csv("/home/joao/Dropbox/datasets/DGHI/Africa_DGHI/RTI SSA SR/rti_sr_data.csv",sep=',')
###################################################
#OVERALL ANALYSIS
###################################################
#TOTAL RTI Vs. TOTAL RTI DEATHS
#Aggregating data
meta_rti<-with(data,data.frame(total_rti,total_rti_death,Author))
meta_rti<-na.omit(meta_rti)
#Calculating metanalysis
m3<-metaprop(total_rti_death,total_rti,sm="PLN",data=meta_rti,studlab=Author,comb.fixed=FALSE)
tiff("/home/joao/Desktop/rti_deaths_overall.tiff", width = 700, height = 800,compression = 'lzw')
meta::forest(m3)
dev.off()
metainf(m3)
metainf(m3, pooled="random")
funnel(m3)
#TOTAL TRAUMA Vs. TOTAL RTI
#Aggregating data
meta_trauma<-with(data,data.frame(total_traume,total_rti,Author))
meta_trauma<-na.omit(meta_trauma)
#Calculating metanalysis
m3<-metaprop(total_rti,total_traume,sm="PLN",data=meta_trauma,studlab=Author,comb.fixed=FALSE)
tiff("/home/joao/Desktop/rti_trauma_overall.tiff", width = 700, height = 1200,compression = 'lzw')
meta::forest(m3)
dev.off()
metainf(m3)
metainf(m3, pooled="random")
funnel(m3)
###################################################
#BY COUNTRY
###################################################
#TOTAL RTI Vs. TOTAL RTI DEATHS
meta_bycoutry<-with(data,data.frame(total_rti,total_rti_death,Author,country))
meta_bycoutry<-na.omit(meta_bycoutry)
meta_bycoutry<-as.matrix(meta_bycoutry)
meta_bycoutry<-as.data.frame(meta_bycoutry)
meta_bycoutry$total_rti<-as.numeric(as.character(meta_bycoutry$total_rti))
meta_bycoutry$total_rti_death<-as.numeric(as.character(meta_bycoutry$total_rti_death))
m3_prev<-(meta_bycoutry$total_rti_death/meta_bycoutry$total_rti)
by(m3_prev,meta_bycoutry$country,median)
prop_country1<-as.character(meta_bycoutry$country)
m3<-metaprop(total_rti_death,total_rti,sm="PLN",byvar=country,data=meta_bycoutry,studlab=Author,comb.fixed=FALSE)
tiff("/home/joao/Desktop/rti_deaths_by_country.tiff", width = 700, height = 1500,compression = 'lzw')
meta::forest(m3)
dev.off()
#TOTAL TRAUMA Vs. TOTAL RTI
meta_bycoutry<-with(data,data.frame(total_traume,total_rti,Author,country))
meta_bycoutry<-na.omit(meta_bycoutry)
meta_bycoutry<-as.matrix(meta_bycoutry)
meta_bycoutry<-as.data.frame(meta_bycoutry)
meta_bycoutry$total_rti<-as.numeric(as.character(meta_bycoutry$total_rti))
meta_bycoutry$total_traume<-as.numeric(as.character(meta_bycoutry$total_traume))
m4_prev<-(meta_bycoutry$total_rti/meta_bycoutry$total_traume)
by(m3_prev,meta_bycoutry$country,median)
prop_country2<-as.character(meta_bycoutry$country)
m3<-metaprop(total_rti,total_traume,sm="PLN",byvar=country,data=meta_bycoutry,studlab=Author,comb.fixed=FALSE)
tiff("/home/joao/Desktop/rti_trauma_by_country.tiff", width = 700, height = 2000,compression = 'lzw')
meta::forest(m3)
dev.off()
###################################################
#BY TYPE OF INJURY
###################################################
#TOTAL RTI Vs. TOTAL RTI DEATHS
meta_byinjury<-with(data,data.frame(total_rti,total_rti_death,Author,type_injury))
meta_byinjury<-na.omit(meta_byinjury)
meta_byinjury<-as.matrix(meta_byinjury)
meta_byinjury<-as.data.frame(meta_byinjury)
meta_byinjury$total_rti<-as.numeric(as.character(meta_byinjury$total_rti))
meta_byinjury$total_rti_death<-as.numeric(as.character(meta_byinjury$total_rti_death))
tiff("/home/joao/Desktop/rti_deaths_by_injury.tiff", width = 700, height = 1500,compression = 'lzw')
m3<-metaprop(total_rti_death,total_rti,sm="PLN",byvar=type_injury,data=meta_byinjury,studlab=Author,comb.fixed=FALSE)
meta::forest(m3)
dev.off()
#TOTAL TRAUMA Vs. TOTAL RTI
meta_byinjury<-with(data,data.frame(total_traume,total_rti,Author,type_injury))
meta_byinjury<-na.omit(meta_byinjury)
meta_byinjury<-as.matrix(meta_byinjury)
meta_byinjury<-as.data.frame(meta_byinjury)
meta_byinjury$total_rti<-as.numeric(as.character(meta_byinjury$total_rti))
meta_byinjury$total_traume<-as.numeric(as.character(meta_byinjury$total_traume))
tiff("/home/joao/Desktop/rti_trauma_by_injury.tiff", width = 700, height = 1500,compression = 'lzw')
m3<-metaprop(total_rti,total_traume,sm="PLN",byvar=type_injury,data=meta_byinjury,studlab=Author,comb.fixed=FALSE)
meta::forest(m3)
dev.off()
###################################################
#BY AGE GROUPS
####################################################
#TOTAL RTI Vs. TOTAL RTI DEATHS
meta_byage_groups<-with(data,data.frame(total_rti,total_rti_death,Author,age_groups))
meta_byage_groups<-na.omit(meta_byage_groups)
meta_byage_groups<-as.matrix(meta_byage_groups)
meta_byage_groups<-as.data.frame(meta_byage_groups)
meta_byage_groups$total_rti<-as.numeric(as.character(meta_byage_groups$total_rti))
meta_byage_groups$total_rti_death<-as.numeric(as.character(meta_byage_groups$total_rti_death))
tiff("/home/joao/Desktop/rti_deaths_by_age_groups.tiff", width = 800, height = 1500,compression = 'lzw')
m3<-metaprop(total_rti_death,total_rti,sm="PLN",byvar=age_groups,data=meta_byage_groups,studlab=Author,comb.fixed=FALSE,comb.random=FALSE,print.byvar=FALSE)
meta::forest(m3)
dev.off()
#TOTAL TRAUMA Vs. TOTAL RTI
meta_byage_groups<-with(data,data.frame(total_traume,total_rti,Author,age_groups))
meta_byage_groups<-na.omit(meta_byage_groups)
meta_byage_groups<-as.matrix(meta_byage_groups)
meta_byage_groups<-as.data.frame(meta_byage_groups)
meta_byage_groups$total_rti<-as.numeric(as.character(meta_byage_groups$total_rti))
meta_byage_groups$total_traume<-as.numeric(as.character(meta_byage_groups$total_traume))
tiff("/home/joao/Desktop/rti_trauma_by_age_groups.tiff", width = 800, height = 1500,compression = 'lzw')
m3<-metaprop(total_rti,total_traume,sm="PLN",byvar=age_groups,data=meta_byage_groups,studlab=Author,comb.fixed=FALSE,comb.random=FALSE,print.byvar=FALSE)
meta::forest(m3)
dev.off()
###################################################
#BY TYPE OF POPULATION (
####################################################
#TOTAL RTI Vs. TOTAL RTI DEATHS
meta_bypopulation<-with(data,data.frame(total_rti,total_rti_death,Author,type_population,proportion_death))
meta_bypopulation$type_population<-car::recode(meta_bypopulation$type_population,"'' =NA")
meta_bypopulation<-na.omit(meta_bypopulation)
meta_bypopulation<-as.matrix(meta_bypopulation)
meta_bypopulation<-as.data.frame(meta_bypopulation)
meta_bypopulation$total_rti<-as.numeric(as.character(meta_bypopulation$total_rti))
meta_bypopulation$total_rti_death<-as.numeric(as.character(meta_bypopulation$total_rti_death))
meta_bypopulation$proportion_death<-as.numeric(as.character(meta_bypopulation$proportion_death))
tiff("/home/joao/Desktop/rti_deaths_by_population.tiff", width = 700, height = 1500,compression = 'lzw')
m3<-metaprop(total_rti_death,total_rti,sm="PLN",byvar=type_population,data=meta_bypopulation,studlab=Author,comb.fixed=FALSE,comb.random=FALSE,print.byvar=FALSE)
meta::forest(m3)
dev.off()
by(meta_bypopulation$proportion_death,meta_bypopulation$type_population,summary)
#TOTAL TRAUMA Vs. TOTAL RTI
meta_bypopulation<-with(data,data.frame(total_traume,total_rti,Author,type_population,proportion_rti))
meta_bypopulation$type_population<-car::recode(meta_bypopulation$type_population,"'' =NA")
meta_bypopulation<-na.omit(meta_bypopulation)
meta_bypopulation<-as.matrix(meta_bypopulation)
meta_bypopulation<-as.data.frame(meta_bypopulation)
meta_bypopulation$total_rti<-as.numeric(as.character(meta_bypopulation$total_rti))
meta_bypopulation$total_traume<-as.numeric(as.character(meta_bypopulation$total_traume))
meta_bypopulation$proportion_rti<-as.numeric(as.character(meta_bypopulation$proportion_rti))
tiff("/home/joao/Desktop/rti_trauma_by_population.tiff", width = 700, height = 1500,compression = 'lzw')
m3<-metaprop(total_rti,total_traume,sm="PLN",byvar=type_population,data=meta_bypopulation,studlab=Author,comb.fixed=FALSE,comb.random=FALSE,print.byvar=FALSE)
meta::forest(m3)
dev.off()
by(meta_bypopulation$proportion_rti,meta_bypopulation$type_population,summary)
###################################################
#Separated by time
###################################################
#meta1<-with(data,data.frame(total_rti_death,total_rti,Author,country))
#meta1$country<-as.character(meta1$country)
#meta1<-na.omit(meta1)
#meta1<-meta1[-4,]
#m3<-metaprop(total_rti_death,total_rti,sm="PLN",data=meta1,studlab=Author)
data$year_cat<-car::recode(data$year,"1990:1995='1990-1995';1996:2000='1996-2000';2001:2005='2001-2005';2006:2010='2006-2010';2011:2015='2011-2015';else='< 1990'")
#TOTAL RTI Vs. TOTAL RTI DEATHS
meta_byyear<-with(data,data.frame(total_rti,total_rti_death,Author,year_cat))
meta_byyear<-na.omit(meta_byyear)
meta_byyear<-as.matrix(meta_byyear)
meta_byyear<-as.data.frame(meta_byyear)
meta_byyear$total_rti<-as.numeric(as.character(meta_byyear$total_rti))
meta_byyear$total_rti_death<-as.numeric(as.character(meta_byyear$total_rti_death))
m3_prev<-(meta_byyear$total_rti_death/meta_byyear$total_rti)
m3_year<-meta_byyear$year_cat
m3<-metaprop(total_rti_death,total_rti,sm="PLN",byvar=year_cat,data=meta_byyear,studlab=Author,comb.fixed=FALSE)
tiff("/home/joao/Desktop/rti_deaths_byyear.tiff", width = 700, height = 800,compression = 'lzw')
meta::forest(m3)
dev.off()
metainf(m3)
metainf(m3, pooled="random")
funnel(m3)
meta_byyear<-with(data,data.frame(total_traume,total_rti,Author,year_cat))
meta_byyear<-na.omit(meta_byyear)
meta_byyear<-as.matrix(meta_byyear)
meta_byyear<-as.data.frame(meta_byyear)
meta_byyear$total_rti<-as.numeric(as.character(meta_byyear$total_rti))
meta_byyear$total_traume<-as.numeric(as.character(meta_byyear$total_traume))
m4_prev<-(meta_byyear$total_rti/meta_byyear$total_traume)
m4_year<-meta_byyear$year_cat
m4<-metaprop(total_rti,total_traume,sm="PLN",byvar=year_cat,data=meta_byyear,studlab=Author)
#meta_nigeria<-subset(meta_bygroup,meta_bygroup$country=="Nigeria")
tiff("/home/joao/Desktop/rti_trauma_byyear.tiff", width = 700, height = 1200,compression = 'lzw')
forest(m4)
dev.off()
metainf(m3)
metainf(m3, pooled="random")
funnel(m3)
###################################################
#Sensitivity Analysis
###################################################
m_model<-c(rep("RTI",69),rep("Death",43))
m_year<-c(as.character(m4_year),as.character(m3_year))
m_year<-as.factor(m_year)
m_prev<-c(m4_prev,m3_prev)
m_data<-data.frame(m_model,m_year,m_prev)
value<-c(by(m4_prev,m4_year,median),by(m3_prev,m3_year,median))
npapers<-c(m4$k.w,m3$k.w)
dates<-c(m4$bylevs,m3$bylevs)
model<-c(rep("RTI",6),rep("Death",5))
#proportion<-m
graph_data<-data.frame(value,npapers,dates,model)
ggplot(data=graph_data, aes(x=dates, y=value, group=model,color=model)) + geom_line(size=1.5) + geom_point(size=5,fill="white") + ylab("Prevalence (%)") + xlab("Dates") + scale_colour_manual(values=c("black", "#E69F00"), name="")+ theme_bw() + geom_bar(aes(x=dates,y=npapers/100),stat="identity", alpha=0.5, fill="white") +
annotate("text", x = 1, y = 0.02, label = "2",size=5)+
annotate("text", x = 2, y = 0.17, label = "16",size=5)+
annotate("text", x = 3, y = 0.11, label = "10",size=5)+
annotate("text", x = 4, y = 0.22, label = "21",size=5)+
annotate("text", x = 5, y = 0.29, label = "28",size=5)+
annotate("text", x = 6, y = 0.37, label = "36",size=5) +
geom_jitter(data=m_data, aes(x=m_year,y=m_prev,group=m_model,color=m_model),size=3,fill="black")
year_cat
|
4285f326d1f48704fa0fd43dddb209a191708b60
|
364d3c7f7b87095baadbbacefb69a0a7c107c87c
|
/man/set_hparams_xgbTree.Rd
|
c03376586dcf03dfd11f750523ab25f4934ea701
|
[
"MIT",
"CC-BY-4.0"
] |
permissive
|
SchlossLab/mikropml
|
32c9d0cd7351d667b2fc7522eabdcfb73e28d699
|
3dcc9bc0c49e0e65714fd9a1e0045a749ada76e8
|
refs/heads/main
| 2023-06-11T15:23:19.409104
| 2023-04-15T17:02:49
| 2023-04-15T17:02:49
| 226,981,416
| 41
| 12
|
NOASSERTION
| 2023-08-21T15:44:37
| 2019-12-09T22:37:38
|
R
|
UTF-8
|
R
| false
| true
| 535
|
rd
|
set_hparams_xgbTree.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hyperparameters.R
\name{set_hparams_xgbTree}
\alias{set_hparams_xgbTree}
\title{Set hyperparameters for SVM with radial kernel}
\usage{
set_hparams_xgbTree(n_samples)
}
\arguments{
\item{n_samples}{number of samples in the dataset}
}
\value{
named list of hyperparameters
}
\description{
Set hyperparameters for SVM with radial kernel
}
\examples{
\dontrun{
set_hparams_xgbTree()
}
}
\author{
Kelly Sovacool, \email{sovacool@umich.edu}
}
\keyword{internal}
|
f496080f6ef40276137c18a5d1a28e86711abf7a
|
034104842a843a5f412b880968d2801edcc266dd
|
/text_processing.R
|
22e1c0c5815854c94e440a4c35c94fd8360b8fa8
|
[] |
no_license
|
RedTent/KLADBLOK
|
8d84b9108023772b483a6caf493b1607392d9839
|
23fc2dcc23f7e2e19e73b5b3353a86ce7a169687
|
refs/heads/master
| 2021-09-11T04:01:24.782558
| 2021-08-27T15:03:56
| 2021-08-27T15:03:56
| 138,591,621
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,009
|
r
|
text_processing.R
|
library(readr)
library(stringr)
library(dplyr)
library(wordcloud2)
stopwoorden <- c("de", "in", "het", "en", "van", "een", "te", "op", "voor", "aan", ">", "dat", "dit", "niet",
"als", "die", "naar", "er", "wat", "hun", "of", "zo", "zodat", "tot", "door", "๏ฟฝ", "bij" , "is",
"met", "om", "ad", "uit", "ook", "deze", "a", "over", "bijlagen")
ruwe_tekst <- read_delim("wbp.txt", delim =",") %>% rename(word = `1`) %>% mutate(word = str_to_lower(word)) %>%
mutate(word = str_remove_all(word, "[:digit:]"),
word = str_remove_all(word, "[:blank:]"),
word = str_remove_all(word, "[:punct:]")
) %>%
filter(word != "") %>%
filter(!word %in% stopwoorden) %>%
print()
ruwe_tekst %>% group_by(word) %>% summarize(freq = n()) %>% arrange(desc(freq)) %>% letterCloud(word = "HHSK")
ruwe_tekst %>% group_by(word) %>% summarize(freq = n()) %>% arrange(desc(freq)) %>% .[30:40,]
|
534ff6694e655e7680f906797710481c953b21f3
|
1d669fa585876ecc72322eda3cc75fd5ecebb1a6
|
/R/se.R
|
2b77675f304596ac49c9405e63352bfe07a4c299
|
[] |
no_license
|
afilazzola/LearnCommAnalysis
|
06e01e088f166f6823855e36d07f888201a35c95
|
3eeea18a74ebcba07ba79c2891f1930e155f5da1
|
refs/heads/master
| 2020-04-04T14:24:04.083140
| 2018-11-06T03:34:36
| 2018-11-06T03:34:36
| 155,997,895
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 60
|
r
|
se.R
|
## Standard error
se <- function(x) {sd(x)/sqrt(length(x))}
|
ce2e8b33880e121ee23165c680561e884121f9f4
|
337e914fb4383becb3694d1bb0b453b6a1b01dd2
|
/Shiny_server/server.R
|
84e619cc2ae6a8bd7ff64a1f7c0379bd38f98bee
|
[] |
no_license
|
cguillamet/Shiny-App-Cancer
|
0c14db5f55a449b08b498c2fda01809396680133
|
121351f23c572713ddb950396232a7095b9686a7
|
refs/heads/master
| 2023-04-17T20:39:39.080814
| 2021-04-26T15:45:38
| 2021-04-26T15:45:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,043
|
r
|
server.R
|
library(shiny)
library(ggplot2)
library(dplyr)
library(shinydashboard)
datos <- read.csv("base2015_2020.csv")
datos$aรฑo <- substr(datos$FECDIAG, start = 1, stop = 4)
datos <- filter(datos, aรฑo %in% c("2015", "2016", "2017", "2018", "2019"))
datos2 <- datos %>%
group_by(TOP..cat.) %>%
tally() %>%
mutate(porc = round(n/sum(n),2)) %>%
filter(porc > 0.02)
tipo <- datos %>%
group_by(TOP..cat., aรฑo) %>%
tally() %>% rename(tipo = "TOP..cat.")
tipo2 <- datos %>%
group_by(TOP..cat., aรฑo, SEXO..desc.) %>%
tally() %>% rename(tipo = "TOP..cat.", sexo = "SEXO..desc.")
server <- function(input, output) {
dat <- reactive({
req(input$tipo)
filter(tipo, tipo %in% input$tipo)
})
dat2 <- reactive({
req(input$carac)
filter(tipo2, tipo %in% input$carac)
})
dto <- reactive({
req(input$carac)
filter(datos, TOP..cat. %in% input$carac)
})
# Fill in the spot we created for a plot
output$cancerPlot <- renderPlot({
# Render a plot
ggplot(dat(), aes(x = aรฑo, y = n, group = tipo)) +
geom_line(size = 1, aes(colour = tipo)) +
labs(x = "Aรฑo", y = "Frecuencia", group = "Topografรญa",
color = "Topografรญa", title = "Cantidad de nuevos casos segรบn tipo de cรกncer")
})
output$cplot <- renderPlot({
ggplot(dat2(), aes(x = aรฑo, y = n, fill = sexo)) +
geom_bar(stat = "identity", position=position_dodge()) +
labs(x = "Aรฑo", y = "Frecuencia", fill = "Sexo", title = "Cantidad de nuevos casos segรบn tipo de cรกncer y sexo")
})
output$edadplot <- renderPlot({
ggplot(dto(), aes(x = SEXO..desc., y = EDAD, group = SEXO..desc.))+
geom_boxplot(aes(colour = SEXO..desc.)) +
labs(x = "Sexo", y = "Edad", color = "Sexo", title = "Boxplot de las edades de las personas que presentaron los nuevos casos de cรกncer segรบn tipo y sexo")
})
}
|
73875fe78e7b57de052d7a1af6e3bb34646e1522
|
ae50d81889d88e0510bd4d076c6a559848cf112a
|
/Repositories/Dropbox/Dropbox_Cache_Example/server.R
|
bf74a6aaff47da2ba386bc5d20db139beb19c604
|
[
"MIT"
] |
permissive
|
ToonTalk/Live-Data_Scripts-and-Templates
|
a0f784a0320cc9a8dac5e030b51f44aa72cec32c
|
4855d56f7b6b2be212ff2f7df3c867788e22e225
|
refs/heads/gh-pages
| 2021-01-15T12:30:57.498318
| 2016-08-05T14:54:57
| 2016-08-05T14:54:57
| 65,015,940
| 0
| 0
| null | 2016-08-05T12:12:43
| 2016-08-05T12:12:42
| null |
UTF-8
|
R
| false
| false
| 6,045
|
r
|
server.R
|
library(xlsx)
library(shiny)
library(rdrop2)
library(lubridate)
library(digest)
library(plyr)
token <- readRDS("droptoken.rds")
original_file_name <- "pms_data.csv"
unique_name_fn <-
function() {
sprintf("%s_%s.csv", digest::digest(paste0(as.integer(Sys.time(
)), runif(1))), "user_downloaded")
}
sort_locals_by_date <- function() {
all_local_files <-
c(original_file_name, list.files()[grepl(pattern = "user[_]downloaded", list.files())])
all_local_files_mtime <-
unlist(lapply(all_local_files, function(x)
file.mtime(x)))
sort_locals_by_date <-
all_local_files[order(all_local_files_mtime)]
sort_locals_by_date
}
clear_downloaded_files <- function() {
if (sum(grepl(pattern = "user[_]downloaded", list.files())) > 5) {
sorted_files <- sort_locals_by_date()
sorted_files <-
sorted_files[grepl(pattern = "user[_]downloaded", sorted_files)]
lapply(sorted_files[1:3], function(x)
file.remove(x))
}
}
shinyServer(function(input, output) {
uk_prime_ministers <- eventReactive(input$update,
{
if (drop_exists('/Private_Cache-Tests/UK_Prime_Ministers.csv', dtoken = token)) {
if (any(grepl(pattern = "user[_]downloaded", list.files()))) {
## there are updated files
## Get modification times for local and external file
all_local_files <-
c(original_file_name, list.files()[grepl(pattern = "user[_]downloaded", list.files())])
all_local_files_mtime <-
unlist(lapply(all_local_files, function(x)
file.mtime(x)))
remote_file_mtime <-
dmy_hms(drop_history('/Private_Cache-Tests/UK_Prime_Ministers.csv', dtoken = token)[1, modified])
if (!any(all_local_files_mtime > as.integer(remote_file_mtime))) {
drop_get(
'/Private_Cache-Tests/UK_Prime_Ministers.csv',
local_file = unique_name_fn(),
overwrite = T,
dtoken = token
)
sorted_files <- sort_locals_by_date()
## Import most recently updated file
data_to_use <-
read.csv(sorted_files[length(sorted_files)])
clear_downloaded_files()
data_to_use
} else {
sorted_files <- sort_locals_by_date()
## Import most recently updated file
data_to_use <-
read.csv(sorted_files[length(sorted_files)])
data_to_use
}
} else {
## first deploy, get file and import
drop_get(
'/Private_Cache-Tests/UK_Prime_Ministers.csv',
local_file = unique_name_fn(),
overwrite = T,
dtoken = token
)
sorted_files <- sort_locals_by_date()
## Import most recently updated file
data_to_use <-
read.csv(sorted_files[length(sorted_files)])
clear_downloaded_files()
data_to_use
}
} else {
## if external file does not exist
sorted_files <- sort_locals_by_date()
## Import most recently updated file
data_to_use <- read.csv(sorted_files[length(sorted_files)])
data_to_use
}
},
ignoreNULL = FALSE)
#
# data_to_use <-
# join_all(lapply(all_local_files, function(x) {
# read.csv(x)
# }),
# match = "all",
# type = "right")
# data_to_use <-
# data_to_use[!duplicated(data_to_use),]
# data_to_use)
output$summary <- renderDataTable({
uk_prime_ministers()
})
})
|
496da59acdc3a28028e308fc1f73d928707f07e2
|
de4d8e9a2d968c2f90e17e4aaa384705175525f7
|
/server.R
|
578e12db358d25a6c61c8209f2be35e5de9b4142
|
[] |
no_license
|
Marika3/ShinyVolcano
|
ee8fc282640ae5ad6330bdcbda79b0029f01896f
|
ef67bcbdd0ac24285ccabac5d75bdb78199cc9a2
|
refs/heads/master
| 2021-01-18T19:05:00.537308
| 2014-06-19T09:05:41
| 2014-06-19T09:05:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 534
|
r
|
server.R
|
library(shiny)
shinyServer(
function(input, output) {
output$ThreeDee <- renderPlot({
localHeight <- input$sliderHeightScale
localTheta <- input$sliderTheta
localPhi <- input$sliderPhi
localShade <- input$sliderShade
z <- (localHeight + 1) * volcano
x <- 10 * (1:nrow(z))
y <- 10 * (1:ncol(z))
par(bg = "white")
persp(x, y, z, theta = localTheta, phi = localPhi, col = "green3", scale = FALSE, ltheta = -120, shade = localShade / 100, border = NA, box = FALSE)
})
}
)
|
0a38c71e267a4395a003354d5fc18927823abbd6
|
2daeca90cc3b7c681059feecc8196dfe04a9e793
|
/R_scripts/plotold.R
|
8c6b5325a1613eacf21bb281d6bb8c05e17b9a08
|
[] |
no_license
|
kosticlab/athlete
|
ccadf6e164bc5e8102b153c92f03c1cf3c2c1341
|
8b14036231bc517891a4261235766b42805dcfaf
|
refs/heads/master
| 2021-03-27T14:44:46.189366
| 2018-05-23T18:39:48
| 2018-05-23T18:39:48
| 93,183,051
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,751
|
r
|
plotold.R
|
library(pheatmap)
library(RColorBrewer)
library(viridis)
quantile_breaks <- function(xs, n = 10) {
breaks <- quantile(xs, probs = seq(0, 1, length.out = n))
breaks[!duplicated(breaks)]
}
matdf <- read.csv(file=paste(paste("/Users/jacobluber/Desktop/athlete/",commandArgs(trailingOnly = TRUE),sep=""),"_ra.csv",sep=""),header=TRUE,row.names=1)
png(paste(paste("/Users/jacobluber/Desktop/athlete/",commandArgs(trailingOnly = TRUE),sep=""),".png",sep=""))
matdf$S52_ru2 <- NULL
matdf$S53_ru2 <- NULL
matdf$S54_ru2 <- NULL
matdf$S55_ru2 <- NULL
matdf$S56_ru2 <- NULL
matdf$S57_ru2 <- NULL
matdf$S58_ru2 <- NULL
matdf$S59_ru2 <- NULL
matdf$S60_ru2 <- NULL
matdf$S75_ru2 <- NULL
matdf$S78_ru2 <- NULL
matdf$S7_ru2 <- NULL
mat <- as.matrix(matdf)
mat_breaks <- quantile_breaks(mat, n = 11)
exercise_state <- c("Before","After","After","After","After","Before","Before","Before","Before","Before","After","After","After","After","Before","Before","After","Before","After","After","Before","After","Before","After","After","Before","Before","Before","Before","Before","Before","After","After","After","Before","Before","Before","Before","After","After","After","After","Before","Before","After","Before","Before","Before","Before","After","After","Before","After","After","Before","Before","After","After","After","Before","Before","After","After","After","After","After","After","After","After","Before","Before","Before","After","After","Before","Before")
individual <- c("SG40","SG40","SG40","SG40","SG40","SG48","SG48","SG48","SG48","SG48","SG48","SG48","SG48","SG48","SG49","SG49","SG49","SG49","SG49","SG49","SG49","SG49","SG49","SG49","SG49","SG50","SG50","SG50","SG50","SG50","SG50","SG50","SG50","SG50","SG51","SG51","SG51","SG51","SG51","SG51","SG51","SG51","SG41","SG41","SG41","SG41","SG41","SG42","SG42","SG42","SG42","SG42","SG42","SG42","SG43","SG43","SG43","SG43","SG43","SG45","SG45","SG45","SG45","SG45","SG45","SG45","SG45","SG45","SG45","SG46","SG46","SG46","SG46","SG46","SG46","SG46")
type <- c("UltraMarathon","UltraMarathon","UltraMarathon","UltraMarathon","UltraMarathon","UltraMarathon","UltraMarathon","UltraMarathon","UltraMarathon","UltraMarathon","UltraMarathon","UltraMarathon","UltraMarathon","UltraMarathon","UltraMarathon","UltraMarathon","UltraMarathon","UltraMarathon","UltraMarathon" ,"UltraMarathon","UltraMarathon","UltraMarathon","UltraMarathon","UltraMarathon","UltraMarathon","UltraMarathon","UltraMarathon","UltraMarathon","UltraMarathon","UltraMarathon","UltraMarathon","UltraMarathon","UltraMarathon","UltraMarathon","Marathon","Marathon","Marathon","Marathon","Marathon","Marathon","Marathon","Marathon","Rower","Rower","Rower","Rower","Rower","Rower","Rower","Rower","Rower","Rower","Rower","Rower","Rower","Rower","Rower","Rower","Rower","Rower","Rower","Rower","Rower","Rower","Rower","Rower","Rower","Rower","Rower","Rower","Rower","Rower","Rower","Rower","Rower","Rower")
gradient <- c("Elite","Elite","Elite","Elite","Elite","Everyday","Everyday","Everyday","Everyday","Everyday","Everyday","Everyday","Everyday","Everyday","Elite","Elite","Elite","Elite","Elite","Elite","Elite","Elite" ,"Elite","Elite","Elite","Everyday","Everyday","Everyday","Everyday","Everyday","Everyday","Everyday" ,"Everyday","Everyday","Everyday","Everyday","Everyday","Everyday","Everyday","Everyday","Everyday","Everyday","Elite","Elite","Elite","Elite","Elite","Elite","Elite","Elite","Elite","Elite","Elite","Elite","Elite","Elite","Elite","Elite","Elite","Elite","Elite","Elite","Elite","Elite","Elite","Elite","Elite","Elite","Elite","Elite","Elite","Elite","Elite","Elite","Elite","Elite")
#annotation_row = data.frame( ExerciseState = exercise_state, Individual = individual, Sport = type, AthleteGradient = gradient)
exercise_state <- factor(exercise_state, levels = c("Before", "After"))
annotation_row = data.frame( ExerciseState = exercise_state, Individual = individual)
#annotation_row = data.frame( ExerciseState = exercise_state)
rownames(annotation_row) = colnames(mat)
mat_colors <- list(group = brewer.pal(9, "Set1"))
#names(mat_colors$group) <- unique(col_groups)
mat <- mat[ , order(individual,exercise_state)]
pheatmap(
mat = mat,
color = inferno(length(mat_breaks) - 1),
breaks = mat_breaks,
border_color = NA,
show_colnames = FALSE,
show_rownames = FALSE,
annotation_col = annotation_row,
annotation_colors = mat_colors,
drop_levels = TRUE,
fontsize = 10,
cluster_cols = FALSE,
main = commandArgs(trailingOnly = TRUE)
)
dev.off()
#pheatmap(mtxm,annotation_col = mat_col,breaks = mat_breaks, color = inferno(length(mat_breaks)-1), annotation_colors = mat_colors, drop_levels = TRUE)
|
0d9b2d9c1678ae368b8d57f400c705dc696e26e5
|
b3c39d9bc7cdd82f225cc1707c69c55513519a1d
|
/man/KRV.Rd
|
3a525294aa783ab0e168a80b6f774c54b6679b5a
|
[] |
no_license
|
teyden/MiRKC
|
d81e02a0e2b349635faea46102b5ee69fbafe740
|
7de32668537ff68d7cbebadafb8d70a338525de9
|
refs/heads/master
| 2020-12-27T09:21:48.191058
| 2020-06-21T08:22:56
| 2020-06-21T08:22:56
| 237,850,361
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,325
|
rd
|
KRV.Rd
|
\name{KRV}
\alias{KRV}
\title{
Kernel RV Coefficient Test
}
\description{
kernel RV coefficient test to evaluate the overall association between microbiome composition and high-dimensional or structured phenotype.
}
\usage{
KRV(kernel.otu, y = NULL, X = NULL, kernel.y)
}
\arguments{
\item{kernel.otu}{
A numerical n by n kernel matrix. It can be constructed from microbiome data, such as by transforming from a distance metric.
}
\item{y}{
A numerical n by p matrix of p continuous phenotype variables (Default = NULL). If it is NULL, a phenotype kernel matrix must be entered for "kernel.y". No need to provide if kernel.y is a matrix.
}
\item{X}{
A numerical n by q matrix, containing q additional covariates that you want to adjust for (Default = NULL). If it is NULL, a intercept only model was fit. Covariates can't be adjusted for if kernel.y is a matrix.
}
\item{kernel.y}{
Either a numerical n by n kernel matrix of phenotype or a method to compute the kernel of phenotype.
Gaussian kernel (kernel.y="Gaussian") can capture general relationship between microbiome and phenotypes; and linear kernel (kernel.y="linear") can be preferred if the underlying relationship is close to linear.
}
}
\details{
kernel.otu should be a numerical n by n kernel matrix, where n is sample size.
When kernel.y is a method ("Gaussian" or "linear") to compute the kernel of phenotype, y should be a numerical phenotype matrix, and X (if not NULL) should be a numerical matrix of covariates. Both y and X should have n rows.
When kernel.y is a kernel matrix of phenotype, there is no need to provide X and y, and they will be ignored if provided. In this case, kernel.y and kernel.otu should both be numerical matrices with the same number of rows and columns.
Missing data is not permitted. Please remove all individuals with missing kernel.otu, y (if not NULL), X (if not NULL), and kernel.y (if a matrix is entered) prior to analysis.
}
\value{
P-value calculated from approximated Pearson type III density
}
\references{
Zhan, X., Plantinga, A., Zhao, N., and Wu, M.C. A Fast Small-Sample Kernel Independence Test for Microbiome Community-Level Association Analysis. Biometrics. 2017 Mar 10. doi: 10.1111/biom.12684.
}
\author{
Haotian Zheng, Xiang Zhan, Ni Zhao
}
\examples{
library(MASS)
library(GUniFrac)
data(throat.tree)
data(throat.otu.tab)
data(throat.meta)
attach(throat.meta)
set.seed(123)
n = nrow(throat.otu.tab)
Male = (Sex == "Male")**2
Smoker =(SmokingStatus == "Smoker") **2
anti = (AntibioticUsePast3Months_TimeFromAntibioticUsage != "None")^2
cova = cbind(Male, anti)
otu.tab.rff <- Rarefy(throat.otu.tab)$otu.tab.rff
unifracs <- GUniFrac(otu.tab.rff, throat.tree, alpha=c(0, 0.5, 1))$unifracs
D.weighted = unifracs[,,"d_1"]
D.unweighted = unifracs[,,"d_UW"]
D.BC= as.matrix(vegdist(otu.tab.rff , method="bray"))
K.weighted = D2K(D.weighted)
K.unweighted = D2K(D.unweighted)
K.BC = D2K(D.BC)
rho = 0.2
Va = matrix(rep(rho, (2*n)^2), 2*n, 2*n)+diag(1-rho, 2*n)
G = mvrnorm(n, rep(0, 2*n), Va)
#############################################################
KRV(kernel.otu = K.weighted, y = G, X = cova, kernel.y = "Gaussian")
KRV(kernel.otu = K.weighted, kernel.y = G \%*\% t(G))
}
|
b7f06577cbc6bb5b67766bd44b5be04311c9d156
|
4592565db17d3d5a4bfa8fc820d7516beb4fa115
|
/demo/seminr-primer-chap5.R
|
009629e3d614af385380dc4f4ab327c85755ffbf
|
[] |
no_license
|
sem-in-r/seminr
|
1b370286c58f4e658a02fb5df21fabe585fcfb4a
|
ae2524aae5f4f0bda3eb87faf80378af5baccea1
|
refs/heads/master
| 2023-04-04T00:29:48.969724
| 2022-06-30T17:02:07
| 2022-06-30T17:02:07
| 70,557,585
| 49
| 16
| null | 2022-10-13T15:22:28
| 2016-10-11T04:58:23
|
R
|
UTF-8
|
R
| false
| false
| 6,375
|
r
|
seminr-primer-chap5.R
|
### Accompanying Code for:
## Partial Least Squares Structural Equation Modeling (PLS-SEM) Using R - A Workbook (2021)
## Hair, J.F. (Jr), Hult, T.M., Ringle, C.M., Sarstedt, M., Danks, N.P., and Ray, S.
## Chapter 5: Evaluation of formative measurement models
# Load the SEMinR library
library(seminr)
# Load the corporate repuation data
corp_rep_data <- corp_rep_data
# Create measurement model ----
corp_rep_mm_ext <- constructs(
composite("QUAL", multi_items("qual_", 1:8), weights = mode_B),
composite("PERF", multi_items("perf_", 1:5), weights = mode_B),
composite("CSOR", multi_items("csor_", 1:5), weights = mode_B),
composite("ATTR", multi_items("attr_", 1:3), weights = mode_B),
composite("COMP", multi_items("comp_", 1:3)),
composite("LIKE", multi_items("like_", 1:3)),
composite("CUSA", single_item("cusa")),
composite("CUSL", multi_items("cusl_", 1:3))
)
# Create structural model ----
corp_rep_sm_ext <- relationships(
paths(from = c("QUAL", "PERF", "CSOR", "ATTR"), to = c("COMP", "LIKE")),
paths(from = c("COMP", "LIKE"), to = c("CUSA", "CUSL")),
paths(from = c("CUSA"), to = c("CUSL"))
)
# Estimate the model ----
corp_rep_pls_model_ext <- estimate_pls(
data = corp_rep_data,
measurement_model = corp_rep_mm_ext,
structural_model = corp_rep_sm_ext,
missing = mean_replacement,
missing_value = "-99")
# Summarize the model results
summary_corp_rep_ext <- summary(corp_rep_pls_model_ext)
# Iterations to converge
summary_corp_rep_ext$iterations
# Bootstrap the model
boot_corp_rep_ext <- bootstrap_model(seminr_model = corp_rep_pls_model_ext,
nboot = 1000)
# Store the summary of the bootstrapped model
sum_boot_corp_rep_ext <- summary(boot_corp_rep_ext, alpha = 0.10)
# Inspect the indicator loadings
summary_corp_rep_ext$loadings
# Inspect the indicator reliability
summary_corp_rep_ext$loadings^2
# Inspect the internal consistency and reliability
summary_corp_rep_ext$reliability
# Table of the FL criteria
summary_corp_rep_ext$validity$fl_criteria
# HTMT Ratio
summary_corp_rep_ext$validity$htmt
# Extract the bootstrapped HTMT
sum_boot_corp_rep_ext$bootstrapped_HTMT
# Redundancy analysis ----
# ATTR ----
# Create measurement model
ATTR_redundancy_mm <- constructs(
composite("ATTR_F", multi_items("attr_", 1:3), weights = mode_B),
composite("ATTR_G", single_item("attr_global"))
)
# Create structural model
ATTR_redundancy_sm <- relationships(
paths(from = c("ATTR_F"), to = c("ATTR_G"))
)
# Estimate the model
ATTR_redundancy_pls_model <- estimate_pls(data = corp_rep_data,
measurement_model = ATTR_redundancy_mm,
structural_model = ATTR_redundancy_sm,
missing = mean_replacement,
missing_value = "-99")
# Summarize the model
sum_ATTR_red_model <- summary(ATTR_redundancy_pls_model)
# CSOR ----
# Create measurement model
CSOR_redundancy_mm <- constructs(
composite("CSOR_F", multi_items("csor_", 1:5), weights = mode_B),
composite("CSOR_G", single_item("csor_global"))
)
# Create structural model
CSOR_redundancy_sm <- relationships(
paths(from = c("CSOR_F"), to = c("CSOR_G"))
)
# Estimate the model
CSOR_redundancy_pls_model <- estimate_pls(data = corp_rep_data,
measurement_model = CSOR_redundancy_mm,
structural_model = CSOR_redundancy_sm,
missing = mean_replacement,
missing_value = "-99")
# Summarize the model
sum_CSOR_red_model <- summary(CSOR_redundancy_pls_model)
# PERF ----
# Create measurement model
PERF_redundancy_mm <- constructs(
composite("PERF_F", multi_items("perf_", 1:5), weights = mode_B),
composite("PERF_G", single_item("perf_global"))
)
# Create structural model
PERF_redundancy_sm <- relationships(
paths(from = c("PERF_F"), to = c("PERF_G"))
)
# Estimate the model
PERF_redundancy_pls_model <- estimate_pls(data = corp_rep_data,
measurement_model = PERF_redundancy_mm,
structural_model = PERF_redundancy_sm,
missing = mean_replacement,
missing_value = "-99")
# Summarize the model
sum_PERF_red_model <- summary(PERF_redundancy_pls_model)
# QUAL ----
# Create measurement model
QUAL_redundancy_mm <- constructs(
composite("QUAL_F", multi_items("qual_", 1:8), weights = mode_B),
composite("QUAL_G", single_item("qual_global"))
)
# Create structural model
QUAL_redundancy_sm <- relationships(
paths(from = c("QUAL_F"), to = c("QUAL_G"))
)
# Estimate the model
QUAL_redundancy_pls_model <- estimate_pls(data = corp_rep_data,
measurement_model = QUAL_redundancy_mm,
structural_model = QUAL_redundancy_sm,
missing = mean_replacement,
missing_value = "-99")
# Summarize the model
sum_QUAL_red_model <- summary(QUAL_redundancy_pls_model)
# Check the path coefficients for convergent validity
sum_ATTR_red_model$paths
sum_CSOR_red_model$paths
sum_PERF_red_model$paths
sum_QUAL_red_model$paths
# Collinearity analysis ----
summary_corp_rep_ext$validity$vif_items
# Bootstrap the model ----
# seminr_model is the SEMinR model to be bootstrapped
# nboot is the number of bootstrap iterations to run
# cores is the number of cpu cores to use in multicore bootstrapping
# parallel::detectCores() allows for using the maximum cores on your device
# seed is the seed to be used for making bootstrap replicable
boot_corp_rep_ext <- bootstrap_model(
seminr_model = corp_rep_pls_model_ext,
nboot = 1000,
cores = parallel::detectCores(),
seed = 123)
# Summarize the results of the bootstrap
# alpha sets the specified level for significance, i.e. 0.05
sum_boot_corp_rep_ext <- summary(boot_corp_rep_ext, alpha = 0.05)
# Inspect the bootstrapping results for outer weights
sum_boot_corp_rep_ext$bootstrapped_weights
# Inspect the bootstrapping results for the outer loadings
sum_boot_corp_rep_ext$bootstrapped_loadings
|
a10659f1bbfb37cf9f2f6098423404b97da5234e
|
9db51d5978be195fc2a4abdd6eafb7a7a3bc7cfa
|
/03.gene-processing/conclude-p-value-skat.R
|
f4c9885ac07950e9f3e86e0bb813ee35b186344f
|
[] |
no_license
|
numvarn/SNPsR
|
a9f6bf299a4a2d1efef02a81d1d92dea1c9db226
|
27e8fb2800f0b0c6ed1bd5b020add1bf3c4a03e6
|
refs/heads/master
| 2020-04-12T09:43:34.282004
| 2017-06-02T08:21:55
| 2017-06-02T08:21:55
| 62,023,474
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,255
|
r
|
conclude-p-value-skat.R
|
# Config values
setwd("~/ResearchCode/SNPsR")
gene_grouped_file <- "result/grouping-gene/10.GroupingComplete.csv"
conclude_data <- read.csv(gene_grouped_file)
outfile_conclusion <- "result/skat/conclusion-p-value/conclusion-skat-3000replicated.csv"
# Floder that stroe skat results
skat_result_path <- "result/skat/0.0/linear-weighted"
filez <- list.files(skat_result_path)
filename_list <- c()
file_count <- 0
for (filename in filez) {
p_value_file_path <- paste(skat_result_path, "/", filename, sep = "")
p_value_data <- read.csv(p_value_file_path, header = TRUE)
conclude_data <- cbind(conclude_data, p_value_data[, 6])
cat(sprintf("Processing filename : %s\n", filename))
filename_list <- c(filename_list, filename)
}
colnames(conclude_data) <- c("Group No.",
"Group Name",
"Start No.",
"Stop No.",
"Start BP",
"Stop BP",
"New Members",
"Median",
paste("P-", filename_list, sep = ""))
# Write all data to CSV
write.csv(conclude_data, file = outfile_conclusion, row.names = FALSE)
|
00e6aa9ede6dc9642b3a4f72fbae9083ad8216d5
|
39068b86a43d69300bd9e91c7650dfb0d28403eb
|
/evalsims/getEmpiricalP3_BH.R
|
391625534c4c47948315736cad9f46f2c8a783f6
|
[] |
no_license
|
DrK-Lo/MINOTAUReval
|
c19009747b1e4ef3b30bd68da8fdcfc6476ca413
|
2626fca5300e2b1e53c12e2aa4e0d74cbeb2cb24
|
refs/heads/master
| 2021-01-23T14:03:59.330344
| 2016-09-29T13:33:03
| 2016-09-29T13:33:03
| 56,006,040
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 819
|
r
|
getEmpiricalP3_BH.R
|
returnEmpP <- function(AllObservedValues, NonCodingValues){
getEmpP <- function(Obs, sort.Null){
#Obs is a single observed value
#sort.Null is a list of the null values in ascending order
if(is.na(Obs)){
return(NA)
}else{
options(warn=-1)
out = max(which(sort.Null<=Obs))/length(sort.Null)
if(is.infinite(out)==TRUE){out=0} ## above code is undefined when Obs < min(sort.Null)
return(out)
}
} #end function
sapply(AllObservedValues, getEmpP, sort(NonCodingValues))
}
getEmpPower <- function(stat, neut.logic){
#x <- qvalue(1-returnEmpP(stat, stat[neut.logic]))$q<0.01
x <- p.adjust(1-returnEmpP(stat, stat[neut.logic]), method="BH")<0.05
t.power <- table(x, neut.logic)#table(x, dat.out$s_high)
t.power[2,1]/sum(t.power[,1])
}
|
8144508294b0228628848c03da6eeb1e49828c53
|
22c4136caee081ec1a5bb68dd204b9f765858a2e
|
/R/clipHullsToLand.R
|
06180157b971ce8c2658b5bf55d952fcacf99fc2
|
[
"MIT"
] |
permissive
|
JCur96/sfe
|
03aab8c5911c5e024276f934f2e6195794571df0
|
5d2e5a869f325194d764e5ef3f8a478b8b5f4cf9
|
refs/heads/master
| 2021-07-06T14:06:03.102348
| 2020-09-02T14:19:18
| 2020-09-02T14:19:18
| 193,727,104
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,119
|
r
|
clipHullsToLand.R
|
#' Clips convex hulls to landmasses
#'
#' \code{clipHullsToLand} returns the clipped convex hulls by species for a
#' single object of class sf (a data frame).
#'
#' These hulls are clipped to landmasses only.
#' To generate hulls and clip them to landmasses with a single function see
#' \link[sfe]{makeLandClippedHulls}. \code{x} must have
#' both a \code{geometry} and a \code{binomial} column present to work.
#'
#' For more felxible convex hull generation see \link[sf]{st_convex_hull}
#'
#' Landmasses used to clip to are generated from the \code{rnaturalearth}
#' package using the \code{ne_countries} function. please see its documentation
#' here \link[rnaturalearth]{ne_countries} for a full breakdown.
#' @param x object of class sf, sfc or sfg
#' @return Returns object \code{x} with clipped convex hulls computed and stored
#' in the \code{convex_hull} column.
#' @examples
#' data.frame <- clipHullsToLand(data.frame)
#' @export
clipHullsToLand <- function(x) {
# making a world map of the land
landMap <- rnaturalearth::ne_countries(returnclass = 'sf') %>%
st_union()
#landMap <- st_transform(landMap, 2163)
# print(st_is_valid(landMap))
#x <- st_transform(x, 2163)
output <- c()
for (var in unique(x$binomial)) {
# print(var)
# print(str(x))
#print(st_is_valid(output))
subsetOfDf <- x[x$binomial == var,]
clippedHull <- st_intersection(lwgeom::st_make_valid(subsetOfDf$geometry), lwgeom::st_make_valid(landMap))
# clippedHull <- suppressMessages(st_intersection(subsetOfDf$geometry, landMap))
# ocean <- st_difference(subsetOfDf$convex_hull, landMap)
if (purrr::is_empty(clippedHull)) {
# error handling here
# can make this more complex but currently
# this is just for working terrestrially
print('Hull is entirely in the ocean')
} else {
# print(clippedHull)
subsetOfDf$geometry <- clippedHull
}
subsetOfDf <- st_transform(subsetOfDf, 4326)
output <- rbind(output, subsetOfDf)
output <- st_transform(output, 4326)
}
return(output)
}
|
315a572e66f2e70f1cabe60cf9576084b2f4ed6d
|
a0f6077dbe42b6329e78e9b252f5b51d3ec0c479
|
/holtwinters.R
|
25d8331c7950c1fcb10c0d77a5f319a5f48b0a24
|
[] |
no_license
|
prithvi1029/hierarchichal-time-series
|
a7492aceddefbe583f866c7a6209eabc614c552e
|
e741158830279603ee37e04166eaf5362155a273
|
refs/heads/master
| 2020-09-04T08:02:05.672777
| 2019-11-05T10:07:52
| 2019-11-05T10:07:52
| 219,690,829
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,469
|
r
|
holtwinters.R
|
library(xts)
library(forecast)
library(MLmetrics)
# Create the dates object as an index for your xts object
train_dates <- seq(as.Date("2014-01-19"), length = 154, by = 7)
#train_sales <- xts(train_sales, order.by = train_dates)
test_dates <- seq(as.Date("2017-01-01"), length = 22, by = 7)
#test_sales <- xts(valid,order.by = test_dates)
MET_hi <- ts(train_sales[,"MET.hi"], start = 1, end = 154, frequency = 7)
MET_lo <- ts(train_sales[,"MET.lo"], start = 1, end = 154, frequency = 7)
MET_sp <- ts(train_sales[,"MET.sp"],, start = 1, end = 154, frequency = 7)
M_hi <- ts(train_sales[,"M.hi"], start = 1, end = 154, frequency = 7)
M_lo <- ts(train_sales[,"M.lo"], start = 1, end = 154, frequency = 7)
SEC_hi <- ts(train_sales[,"SEC.hi"], start = 1, end = 154, frequency = 7)
SEC_lo <- ts(train_sales[,"SEC.lo"], start = 1, end = 154, frequency = 7)
M<- ts(train_sales[,"M"], start = 1, end = 154, frequency = 7)
MET = ts(train_sales[,'MET'], start = 1, end = 154, frequency = 7)
SEC = ts(train_sales[,'SEC'], start = 1, end = 154, frequency = 7)
T = ts(train_sales[,'T'], start = 1, end = 154, frequency = 7)
MET_hi_test <- test_sales[,"MET.hi"]
MET_lo_test <- test_sales[,"MET.lo"]
MET_sp_test <- test_sales[,"MET.sp"]
M_hi_test <- test_sales[,"M.hi"]
M_lo_test <- test_sales[,"M.lo"]
SEC_hi_test <- test_sales[,"SEC.hi"]
SEC_lo_test <- test_sales[,"SEC.lo"]
M_test<- test_sales[,"M"]
MET_test = test_sales[,'MET']
SEC_test = test_sales[,'SEC']
T_test = test_sales[,'T']
#**************************************************************************
MET_hi_model_arima <- HoltWinters(MET_hi)
for_MET_hi <- forecast(MET_hi_model_arima, h = 22)
MET_lo_model_arima <- HoltWinters(MET_lo)
for_MET_lo <- forecast(MET_lo_model_arima, h = 22)# Build a time series model
MET_sp_model_arima <- HoltWinters(MET_sp)
for_MET_sp <- forecast(MET_sp_model_arima, h = 22)# Build a time series model
#**************************************************************************
M_hi_model_arima <- HoltWinters(M_hi)
for_M_hi <- forecast(M_hi_model_arima, h = 22)# Build a time series model
M_lo_model_arima <- HoltWinters(M_lo)
for_M_lo <- forecast(M_lo_model_arima, h = 22)# Build a time series model
#**************************************************************************
SEC_hi_model_arima <- HoltWinters(SEC_hi)
for_SEC_hi <- forecast(SEC_hi_model_arima, h = 22)# Build a time series model
SEC_lo_model_arima <- HoltWinters(SEC_lo)
for_SEC_lo <- forecast(SEC_lo_model_arima, h = 22)# Build a time series model
#******************************************************************************
MET_model_arima <- HoltWinters(MET)
for_MET <- forecast(MET_model_arima, h = 22)# Build a time series model
M_model_arima <- HoltWinters(M)
for_M <- forecast(M_model_arima, h = 22)# Build a time series model
SEC_model_arima <- HoltWinters(SEC)
for_SEC <- forecast(SEC_model_arima, h = 22)# Build a time series model
#*******************************************************************************
T_model_arima <- HoltWinters(T)
for_T <- forecast(T_model_arima, h = 22)# Build a time series model
#********************************************************************************
for_MET_hi_xts <- xts((for_MET_hi$mean), order.by = test_dates)
MAPE1 <- MAPE(for_MET_hi_xts[,1], MET_hi_test)
print(MAPE1)
?MAPE
for_MET_sp_xts <- xts((for_MET_sp$mean), order.by = test_dates)
MAPE2 <- MAPE((for_MET_sp_xts[,1]), MET_sp_test)
print(MAPE2)
for_MET_lo_xts <- xts(for_MET_lo$mean, order.by = test_dates)
MAPE3 <- MAPE(for_MET_lo_xts[,1], MET_lo_test)
print(MAPE3)
for_M_hi_xts <- xts(for_M_hi$mean, order.by = test_dates)
MAPE4 <- MAPE(for_M_hi_xts[,1], M_hi_test)
print(MAPE4)
for_M_lo_xts <- xts(for_M_lo$mean, order.by = test_dates)
MAPE5 <- MAPE(for_M_lo_xts[,1], M_lo_test)
print(MAPE5)
for_SEC_lo_xts <- xts(for_SEC_lo$mean, order.by = test_dates)
MAPE6 <- MAPE(for_SEC_lo_xts[,1], SEC_lo_test)
print(MAPE6)
for_SEC_hi_xts <- xts(for_SEC_hi$mean, order.by = test_dates)
MAPE7 <- MAPE(for_SEC_hi_xts[,1], SEC_hi_test)
print(MAPE7)
for_MET_xts <- xts(for_MET$mean, order.by = test_dates)
MAPE8 <- MAPE(for_MET_xts[,1], MET_test)
print(MAPE8)
for_M_xts <- xts(for_M$mean, order.by = test_dates)
MAPE9 <- MAPE(for_M_xts[,1], M_test)
print(MAPE9)
for_SEC_xts <- xts(for_SEC$mean, order.by = test_dates)
MAPE10 <- MAPE(for_SEC_xts[,1], SEC_test)
print(MAPE10)
for_T_xts <- xts(for_T$mean, order.by = test_dates)
MAPE11 <- MAPE(for_T_xts[,1], T_test)
print(MAPE11)
##BOTTOM UP##******************************************
MET_bu = for_MET_hi_xts + for_MET_lo_xts +for_MET_sp_xts
MAPE_MET_bu <- MAPE(MET_bu, MET_test)
print(MAPE8)
print(MAPE_MET_bu)
M_bu = for_M_hi_xts + for_M_lo_xts
MAPE_M_bu <- MAPE(M_bu, M_test)
print(MAPE9)
print(MAPE_M_bu)
SEC_bu = for_SEC_hi_xts + for_SEC_lo_xts
MAPE_SEC_bu <- MAPE(SEC_bu, SEC_test)
print(MAPE10)
print(MAPE_SEC_bu)
T_bu = for_M_xts + for_MET_xts +for_SEC_xts
MAPE_T_bu <- MAPE(T_bu, T_test)
print(MAPE11)
print(MAPE_T_bu)
##TOP-BOTTOM##******************************************
# Calculate the average historical proportions
prop_MET_hi <- mean(MET_hi/MET)
prop_MET_lo <- mean(MET_lo/MET)
prop_MET_sp <- mean(MET_sp/MET)
prop_M_hi <- mean(M_hi/M)
prop_M_lo <- mean(M_lo/M)
prop_SEC_hi <- mean(SEC_hi/SEC)
prop_SEC_lo <- mean(SEC_lo/SEC)
prop_MET <- mean(MET/T)
prop_M <- mean(M/T)
prop_SEC <- mean(SEC/T)
# Distribute out your forecast to each product
for_prop_MET <- prop_MET*for_T_xts
for_prop_M <- prop_M*for_T_xts
for_prop_SEC <- prop_SEC*for_T_xts
# Distribute out your forecast to each product
for_prop_MET_hi <- prop_MET_hi*for_prop_MET
for_prop_MET_lo <- prop_MET_lo*for_prop_MET
for_prop_MET_sp <- prop_MET_sp*for_prop_MET
# Distribute out your forecast to each product
for_prop_M_hi <- prop_M_hi*for_prop_M
for_prop_M_lo <- prop_M_lo*for_prop_M
# Distribute out your forecast to each product
for_prop_SEC_hi <- prop_SEC_hi*for_prop_SEC
for_prop_SEC_lo <- prop_SEC_lo*for_prop_SEC
# Distribute out your forecast to each product
for_prop_MET_hi <- prop_MET_hi*for_prop_MET
for_prop_MET_lo <- prop_MET_lo*for_prop_MET
for_prop_MET_sp <- prop_MET_sp*for_prop_MET
# Distribute out your forecast to each product
for_prop_M_hi <- prop_M_hi*for_prop_M
for_prop_M_lo <- prop_M_lo*for_prop_M
# Distribute out your forecast to each product
for_prop_SEC_hi <- prop_SEC_hi*for_prop_SEC
for_prop_SEC_lo <- prop_SEC_lo*for_prop_SEC
MAPE_MET_tb <- MAPE(for_prop_MET, MET_test)
print(MAPE_MET_tb)
print(MAPE8)
MAPE_M_tb <- MAPE(for_prop_M, M_test)
print(MAPE_M_tb)
print(MAPE9)
MAPE_SEC_tb <- MAPE(for_prop_SEC, SEC_test)
print(MAPE_SEC_tb)
print(MAPE10)
MAPE_MET_hi_tb <- MAPE(for_prop_MET_hi, MET_hi_test)
print(MAPE_MET_hi_tb)
print(MAPE1)
MAPE_MET_lo_tb <- MAPE(for_prop_MET_lo, MET_lo_test)
print(MAPE_MET_lo_tb)
print(MAPE3)
MAPE_MET_sp_tb <- MAPE(for_prop_MET_sp, MET_sp_test)
print(MAPE_MET_sp_tb)
print(MAPE2)
MAPE_M_hi_tb <- MAPE(for_prop_M_hi, M_hi_test)
print(MAPE_M_hi_tb)
print(MAPE4)
MAPE_M_lo_tb <- MAPE(for_prop_M_lo, M_lo_test)
print(MAPE_M_lo_tb)
print(MAPE5)
MAPE_SEC_hi_tb <- MAPE(for_prop_SEC_hi, SEC_hi_test)
print(MAPE_SEC_hi_tb)
print(MAPE6)
MAPE_SEC_lo_tb <- MAPE(for_prop_SEC_lo, SEC_lo_test)
print(MAPE_SEC_lo_tb)
print(MAPE7)
|
69f95b07b8493eb3dde7666cf61aa1add68740c6
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/eseis/man/signal_filter.Rd
|
acbf563ab1bd1faedbf9b375840b67d9132a0ab0
|
[] |
no_license
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,708
|
rd
|
signal_filter.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/signal_filter.R
\name{signal_filter}
\alias{signal_filter}
\title{Filter a seismic signal in the time or frequency domain}
\usage{
signal_filter(
data,
f,
fft = FALSE,
dt,
type,
shape = "butter",
order = 2,
p = 0
)
}
\arguments{
\item{data}{\code{eseis} object, \code{numeric} vector or list of
objects, data set to be processed.}
\item{f}{\code{Numeric} value or vector of length two, lower and/or
upper cutoff frequencies (Hz).}
\item{fft}{\code{Logical} value, option to filter in the time domain
(\code{fft = FALSE}) or the frequency domain (\code{fft = TRUE}). Default
is (\code{fft = FALSE}).}
\item{dt}{\code{Numeric} value, sampling period. If omitted, \code{dt}
is set to 1/200.}
\item{type}{\code{Character} value, type of filter, one out of
\code{"LP"} (low pass), \code{"HP"} (high pass), \code{"BP"} (band
pass) and \code{"BR"} (band rejection). If omitted, the type is interpreted
from \code{f}. If \code{f} is of length two, \code{type} is set to
\code{"BP"}. If \code{f} is of length one, \code{type} is set to
\code{"HP"}.}
\item{shape}{\code{Character} value, one out of \code{"butter"}
(Butterworth), default is \code{"butter"}.}
\item{order}{\code{Numeric} value, order of the filter, default
is \code{2}. Only needed if \code{data} is no \code{eseis} object.}
\item{p}{\code{Numeric} value, fraction of the signal to be tapered.}
}
\value{
\code{Numeric} vector or list of vectors, filtered signal vector.
}
\description{
The function filters the input signal vector in the time or
frequency domain.
}
\examples{
## load example data set
data(rockfall)
## filter data set by bandpass filter between 1 and 90 Hz
rockfall_bp <- signal_filter(data = rockfall_eseis,
f = c(1, 90))
## taper signal to account for edge effects
rockfall_bp <- signal_taper(data = rockfall_bp,
n = 2000)
## plot filtered signal
plot_signal(data = rockfall_bp)
## compare time domain versus frequency domain filtering
rockfall_td <- signal_filter(data = rockfall_eseis,
f = c(10, 40),
fft = FALSE)
rockfall_td_sp <- signal_spectrum(data = rockfall_td)
rockfall_fd <- signal_filter(data = rockfall_eseis,
f = c(10, 40),
fft = TRUE)
rockfall_fd_sp <- signal_spectrum(data = rockfall_fd)
plot_spectrum(data = rockfall_td_sp)
plot_spectrum(data = rockfall_fd_sp)
}
\author{
Michael Dietze
}
\keyword{eseis}
|
79dca9edaf22573cf9444d8781062518fb9a9717
|
6700a5a2525b1d5eeaa47913f2d859c386ed2f81
|
/man/dmcFitSubject.Rd
|
1f27ac18af3b242e22e3227b3dfaf1cce6846e3b
|
[] |
no_license
|
amanirad/DMCfun
|
fbba2d0183b0d1c94e0e60bc6e11f0562a1baf29
|
e717cda849df4234df56837b92a64be1ebe19aec
|
refs/heads/master
| 2023-07-30T14:35:52.483186
| 2021-09-20T10:36:37
| 2021-09-20T10:36:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,741
|
rd
|
dmcFitSubject.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dmcFit.R
\name{dmcFitSubject}
\alias{dmcFitSubject}
\title{dmcFitSubject: Fit DMC to individual participant data using optim (Nelder-Mead)}
\usage{
dmcFitSubject(
resOb,
nTrl = 1e+05,
startVals = list(),
minVals = list(),
maxVals = list(),
fixedFit = list(),
fitInitialGrid = TRUE,
fitInitialGridN = 10,
fixedGrid = list(),
nCAF = 5,
nDelta = 19,
pDelta = vector(),
tDelta = 1,
costFunction = "RMSE",
spDist = 1,
drDist = 0,
drShape = 3,
drLim = c(0.1, 0.7),
rtMax = 5000,
subjects = c(),
printInputArgs = TRUE,
printResults = FALSE,
optimControl = list()
)
}
\arguments{
\item{resOb}{Observed data (see flankerData and simonTask for data format) and the function dmcObservedData to create
the required input from either an R data frame or external *.txt/*.csv files}
\item{nTrl}{Number of trials to use within dmcSim.}
\item{startVals}{Starting values for to-be estimated parameters. This is a list with values specified individually
for amp, tau, drc, bnds, resMean, resSD, aaShape, spShape, spBias, sigm (e.g., startVals = list(amp = 20, tau = 200,
drc = 0.5, bnds = 75, resMean = 300, resSD = 30, aaShape = 2, spShape = 3, spBias = 0, sigm = 4)).}
\item{minVals}{Minimum values for the to-be estimated parameters. This is a list with values specified individually
for amp, tau, drc, bnds, resMean, resSD, aaShape, spShape, spBias, sigm (e.g., minVals = list(amp = 10, tau = 5, drc = 0.1,
bnds = 20, resMean = 200, resSD = 5, aaShape = 1, spShape = 2, spBias = -20, sigm = 1)).}
\item{maxVals}{Maximum values for the to-be estimated parameters. This is a list with values specified individually
for amp, tau, drc, bnds, resMean, resSD, aaShape, spShape, spBias, sigm (e.g., maxVals = list(amp = 40, tau = 300, drc = 1.0,
bnds = 150, resMean = 800, resSD = 100, aaShape = 3, spShape = 4, spBias = 20, sigm = 10))}
\item{fixedFit}{Fix parameter to starting value. This is a list with bool values specified individually for
amp, tau, drc, bnds, resMean, resSD, aaShape, spShape, spBias, sigm (e.g., fixedFit = list(amp = F, tau = F, drc = F,
bnds = F, resMean = F, resSD = F, aaShape = F, spShape = F, spBias = T, sigm = T))}
\item{fitInitialGrid}{TRUE/FALSE}
\item{fitInitialGridN}{10 linear steps between parameters min/max values (reduce if searching more than ~2/3 initial parameters)}
\item{fixedGrid}{Fix parameter for initial grid search. This is a list with bool values specified individually for
amp, tau, drc, bnds, resMean, resSD, aaShape, spShape, spBias, sigm (e.g., fixedGrid = list(amp = T, tau = F, drc = T,
bnds = T, resMean = T, resSD = T, aaShape = T, spShape = T, spBias = T, sigm = T)). As a default, the initial gridsearch
only searches the tau space.}
\item{nCAF}{Number of CAF bins.}
\item{nDelta}{Number of delta bins.}
\item{pDelta}{An alternative option to nDelta by directly specifying required percentile values (vector of values 0-100)}
\item{tDelta}{The type of delta calculation (1=direct percentiles points, 2=percentile bounds (tile) averaging)}
\item{costFunction}{The cost function to minimise: root mean square error ("RMSE": default),
squared percentage error ("SPE"), or likelihood-ratio chi-square statistic ("GS")}
\item{spDist}{The starting point (sp) distribution (0 = constant, 1 = beta, 2 = uniform)}
\item{drDist}{The drift rate (dr) distribution type (0 = constant, 1 = beta, 2 = uniform)}
\item{drShape}{The drift rate (dr) shape parameter}
\item{drLim}{The drift rate (dr) range}
\item{rtMax}{The limit on simulated RT (decision + non-decisional components)}
\item{subjects}{NULL (aggregated data across all subjects) or integer for subject number}
\item{printInputArgs}{TRUE (default) /FALSE}
\item{printResults}{TRUE/FALSE (default)}
\item{optimControl}{Additional control parameters passed to optim (see optim details section)}
}
\value{
dmcfit_subject List of dmcfit per subject fitted (see dmcFit)
}
\description{
Fit theoretical data generated from dmcSim to observed data by
minimizing the root-mean-square error ("RMSE") between a weighted combination
of the CAF and CDF functions using optim (Nelder-Mead). Alternative cost functions
include squared percentage error ("SPE"), and g-squared statistic ("GS").
}
\examples{
\donttest{
# Example 1: Flanker data from Ulrich et al. (2015)
fit <- dmcFitSubject(flankerData, nTrl = 1000, subjects = c(1, 2))
plot(fit, flankerData, subject = 1)
plot(fit, flankerData, subject = 2)
summary(fit)
# Example 2: Simon data from Ulrich et al. (2015)
fit <- dmcFitSubject(simonData, nTrl = 1000, subject = c(1, 2))
plot(fit, simonData, subject = 1)
plot(fit, simonData, subject = 2)
summary(fit)
}
}
|
d7c5257502915b92d0f1c8c39c303cdc0ce7164f
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/vegan/examples/bioenv.Rd.R
|
0ac9a7c0f5fe13d795830f18b084b34e20f8780e
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 511
|
r
|
bioenv.Rd.R
|
library(vegan)
### Name: bioenv
### Title: Best Subset of Environmental Variables with Maximum (Rank)
### Correlation with Community Dissimilarities
### Aliases: bioenv bioenv.default bioenv.formula summary.bioenv bioenvdist
### Keywords: multivariate
### ** Examples
# The method is very slow for large number of possible subsets.
# Therefore only 6 variables in this example.
data(varespec)
data(varechem)
sol <- bioenv(wisconsin(varespec) ~ log(N) + P + K + Ca + pH + Al, varechem)
sol
summary(sol)
|
a0f11ed0d5bcb14bf3b11504cf626ffb7d122725
|
3768f2217015f96978395d6ab0353509f7b6fd10
|
/analysis/02_Run_MAP_Fit.R
|
ba2078258e568bf95d7978a7ae0772487cdfcb1f
|
[] |
no_license
|
Dpananos/PKBayes
|
4cbb0ca20dc6073755a5ab6fb0d3633614ea200c
|
628b145ac3ae8355cb26ff86068aba2bb80d3fa2
|
refs/heads/master
| 2020-09-21T19:43:28.614187
| 2020-08-16T22:35:34
| 2020-08-16T22:35:34
| 224,901,887
| 4
| 0
| null | 2020-03-26T17:48:50
| 2019-11-29T18:08:30
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 2,189
|
r
|
02_Run_MAP_Fit.R
|
library(bayesplot)
library(here)
library(rstan)
suppressPackageStartupMessages(library(tidyverse))
library(tidybayes)
options(mc.cores = parallel::detectCores())
rstan_options(auto_write = TRUE)
`%notin%` <- Negate(`%in%`)
# --- Load in data and model ---
# Load Simulated data. Only want 100 subjects
d = here('data','simulated_data.csv') %>%
read_csv() %>%
filter(subjectids<=100)
obs_times = seq(0.5, 12, 0.5)
# Training
condition = d %>%
filter(times %in% obs_times)
# Testing
no_condition = d %>%
filter(times %notin% obs_times)
model_data = list(
yobs = condition$Cobs,
subjectids = condition$subjectids,
n_subjects = length(unique(condition$subjectids)),
times = condition$times,
N = nrow(condition),
#----------pred data---------
Ntest = nrow(no_condition),
test_ids = no_condition$subjectids,
test_times = no_condition$times
)
saveRDS(model_data, here('data','simulated_data_dump.Rdump'))
#Load model for HMC and MAP
model_file = here('models','strong_model.stan')
model = stan_model(model_file)
# ---- Fit Model with MAP ----
maps = optimizing(
model,
data = model_data,
verbose = TRUE,
algorithm = 'LBFGS',
as_vector = TRUE,
hessian = TRUE,
tol_obj=1e-10,
iter=10000,
draws = 10000,
seed = 19920908
)
H = maps$hessian
S = MASS::ginv(-H)
dimnames(S) = dimnames(H)
theta_tilde = maps$theta_tilde
# Save predictions
ypred_cols = theta_tilde[, grepl('ypred', colnames(theta_tilde))]
map_pred = apply(ypred_cols, 2, mean)
map_low = apply(ypred_cols, 2, function(x) quantile(x, 0.025))
map_high = apply(ypred_cols, 2, function(x) quantile(x, 0.975))
predictions = tibble(map_pred, map_low, map_high)
data_location = here('data', 'map_predictions.csv')
predictions %>%
rename(pred = map_pred, low = map_low, high = map_high) %>%
bind_cols(no_condition) %>%
mutate(type='map') %>%
write_csv(data_location)
# Save parameters
param_draws = list(
ke = theta_tilde[,grep('ke', colnames(theta_tilde))],
ka = theta_tilde[,grep('ka', colnames(theta_tilde))],
cl = theta_tilde[,grep('Cl', colnames(theta_tilde))]
)
data_location = here('data','map_parameter_draws.RDS')
saveRDS(param_draws, data_location)
|
1e6fa4f913c7c7994257a3d23fee918a6928d2fa
|
60b0a066ab9fc0ac131650ab55e2bcf4343a578b
|
/man/callSubtypes.Rd
|
64e830a1aff623e360935598455ecf43e70aabf2
|
[
"Apache-2.0"
] |
permissive
|
sky-xian/ImmuneSubtypeClassifier
|
a3141ea19cb4c0daf4da4e00554a1d2d61dc4e12
|
30e6215c390bf12761d17cbc9647ac4527e319ba
|
refs/heads/master
| 2020-07-15T20:09:20.669888
| 2019-08-15T00:34:45
| 2019-08-15T00:34:45
| 205,640,363
| 0
| 1
|
NOASSERTION
| 2019-09-01T06:51:03
| 2019-09-01T06:51:03
| null |
UTF-8
|
R
| false
| true
| 535
|
rd
|
callSubtypes.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/callSubtypes.R
\name{callSubtypes}
\alias{callSubtypes}
\title{callSubtypes
Make subtype calls for each sample}
\usage{
callSubtypes(mods, X)
}
\arguments{
\item{mods}{xgboost model list}
\item{X}{gene expression matrix, genes in rows, samples in columns}
}
\value{
table, column 1 is best call, remaining columns are subtype prediction scores.
}
\description{
callSubtypes
Make subtype calls for each sample
}
\examples{
calls <- callSubtypes(mods, X)
}
|
50ca902346ce8bbf7eed972c9a8bc136bb2f5406
|
31df5e6e37bee75ba4b22a972cfa57a632ba2d23
|
/_test/test_trec.R
|
a77a24021058e5bcad8de632083ceaf6786bea65
|
[] |
no_license
|
Sun-lab/asSeq
|
47fa0b451530114ce2e0cdcd40e64afbf21335d7
|
3dcfb91d603213057c570a98d532c6cfc5618929
|
refs/heads/master
| 2022-09-28T13:23:00.427278
| 2022-09-11T16:33:06
| 2022-09-11T16:33:06
| 134,029,698
| 8
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,067
|
r
|
test_trec.R
|
# -------------------------------------------------------------------------
# read in data
# -------------------------------------------------------------------------
setwd("~/research/eQTL_seq/result/YRI_Joint_Permute/")
eD = read.table("TReCASE_Permute_YRI_expression_chr2_data_TReC.txt",
sep="\t")
X = read.table("Covariates.txt", sep = "\t", header = TRUE)
dim(X)
X[1:2,1:5]
X = data.matrix(X)
geno = read.table("TReCASE_Permute_YRI_geno_chr2_data.txt", sep="\t", header=TRUE)
dim(geno)
genoInfo = read.table("TReCASE_Permute_YRI_geno_chr2_info.txt", sep="\t", header=TRUE)
dim(genoInfo)
genoInfo[1:5,]
## eQTL pairs identified by TReCASE
eID = 46
mID = 39568
# -------------------------------------------------------------------------
# check Trec model
# -------------------------------------------------------------------------
y = as.numeric(eD[,eID])
z1 = as.numeric(geno[mID,])
z2 = z1
z2[z1==3] = 1
z2[z1==4] = 2
library(asSeq)
trec(y, X, z2, output.tag="test_temp", p.cut=1.0, cis.only = FALSE)
rest = read.table("test_temp_eqtl.txt", header=TRUE, sep="\t")
rest
system("rm test_temp_eqtl.txt")
system("rm test_temp_freq.txt")
# -------------------------------------------------------------------------
# check linear model
# -------------------------------------------------------------------------
normscore = function(vec) {
len = length(na.omit(vec))+1
rank = rank(na.omit(vec))
ties = (rank - floor(rank)) > 0
new.vec = vec[!is.na(vec)]
new.vec[!ties]=qnorm(rank[!ties]/len)
new.vec[ties] =0.5*(qnorm((rank[ties]+0.5)/len)+qnorm((rank[ties]-0.5)/len))
vec[!is.na(vec)] = new.vec
vec
}
z3 = as.numeric(geno[which(genoInfo$rsID == "rs4359651"),])
z4 = z3
z4[z3==3] = 1
z4[z3==4] = 2
y2 = normscore(y)
l3 = lm(y2 ~ X + z4)
summary(l3)
# -------------------------------------------------------------------------
# check TReC model using the marker identified by lienar model
# -------------------------------------------------------------------------
library(asSeq)
trec(y, X, z4, output.tag="test_temp", p.cut=1.0, cis.only = FALSE)
rest = read.table("test_temp_eqtl.txt", header=TRUE, sep="\t")
rest
system("rm test_temp_eqtl.txt")
system("rm test_temp_freq.txt")
# -------------------------------------------------------------------------
# check TReC model using the marker identified by lienar model
# without adjusting Z, i.e., use glmNB instead of glmNBlog
# -------------------------------------------------------------------------
trec(y, X, z4, output.tag="test_temp", p.cut=1.0, cis.only = FALSE, adjZ=FALSE)
rest = read.table("test_temp_eqtl.txt", header=TRUE, sep="\t")
rest
system("rm test_temp_eqtl.txt")
system("rm test_temp_freq.txt")
# -------------------------------------------------------------------------
# check results of glm.nb
# -------------------------------------------------------------------------
library(MASS)
g1 = glm.nb(y ~ X + z4)
g0 = glm.nb(y ~ X)
anova(g0, g1)
|
d75ccd4048c90faaf82ab98855d33d5c9a4dcf3e
|
ef2a9d30cbd541fd282e57f1b28694dce4492320
|
/5133126๊น์ํ.R
|
546bfeb148a83bbfb74f8f62d432f12de2d15fd9
|
[] |
no_license
|
DaeguDude/bigdata
|
34f64500ce8dc7cf150b2205e0d5a70a0b0de519
|
fb2a4aabbaacc4de688d5a1d134ebdd9510ab68e
|
refs/heads/master
| 2020-09-05T00:48:48.307526
| 2019-12-16T04:19:35
| 2019-12-16T04:19:35
| 219,937,809
| 0
| 0
| null | null | null | null |
UHC
|
R
| false
| false
| 2,858
|
r
|
5133126๊น์ํ.R
|
##1. ํ์์๋ฆฌ์ฆ ๋ฐ์ดํฐ๋ฅผ ์์ฑํ๋ ํจ์
timeseries_data<-function(rho,nsmp,dtb, #rho๋ rho,nsmp๋ ํ๋ณธ ์,dtb๋ ๋ถํฌ
rnorm_mean=0,rnorm_std=1, #์ ๊ท๋ถํฌ ์ต์
(ํ๊ท ,ํ์คํธ์ฐจ)
runif_min=-1,runif_max=1, #๊ท ๋ฑ๋ถํฌ ์ต์
(์ต์,์ต๋)
rcauchy_loc=0,rcauchy_scale=1, #์ฝ์๋ถํฌ ์ต์
(์์น๋ชจ์,์ฒ๋๋ชจ์)
rchisq_df=1 #์นด์ด์ ๊ณฑ๋ถํฌ ์ต์
(์์ ๋)
)
{
if(dtb=='rnorm'){n<-rnorm(nsmp,rnorm_mean,rnorm_std)} #์ค์ฐจ ์์ฑ
else if (dtb=='runif'){n<-runif(nsmp,runif_min,runif_max)}
else if (dtb=='rcauchy'){n<-rcauchy(nsmp,rcauchy_loc,rcauchy_scale)}
else if (dtb=='rchisq'){n<-rchisq(nsmp,df=rchisq_df)-1}
else{geterrmessage('์ด ํจ์๋ ์ ๊ท๋ถํฌ, ๊ท ๋ฑ๋ถํฌ,์ฝ์๋ถํฌ์์๋ง ํ๋ณธ์ถ์ถ์ด ๊ฐ๋ฅํฉ๋๋ค.')}
rh<-vector()
xx<-vector() #๊ฐ์ฒด ์์ฑ
for (i in 1:length(n)) # rho ์์ฑ 1, rho, rho**2, ... rho**(t-1)
{
rh[i]<-(rho)**(i-1)
}
for (i in 1:length(n))
{
xx[i]<-sum(rh[1:i]*n[i:1]) #1*Et + rho*Et-1 + .....
}
return(xx)
}
#t์ Xt๊ฐ์ ๊ทธ๋ฆผ
# rnorm
par(mfrow=c(2,2))
set.seed(1234)
plot(timeseries_data(1,100,'rnorm'),xlim=c(0,50), ylim=c(-50,50), type='l',xlab='t',ylab='Xt',main='rho=1')
set.seed(1234)
plot(timeseries_data(0,100,'rnorm'),xlim=c(0,50), ylim=c(-50,50), type='l',xlab='t',ylab='Xt',main='rho=0')
set.seed(1234)
plot(timeseries_data(1.1,100,'rnorm'),xlim=c(0,50), ylim=c(-50,50),type='l',xlab='t',ylab='Xt',main='rho=1.1')
set.seed(1234)
plot(timeseries_data(-1.1,100,'rnorm'),xlim=c(0,50), ylim=c(-50,50),type='l',xlab='t',ylab='Xt',main='rho=-1.1')
# rcauchy
par(mfrow=c(2,2))
set.seed(1234)
plot(timeseries_data(1,100,'rcauchy'),xlim=c(0,50), ylim=c(-50,50), type='l',xlab='t',ylab='Xt',main='rho=1')
set.seed(1234)
plot(timeseries_data(0,100,'rcauchy'),xlim=c(0,50), ylim=c(-50,50), type='l',xlab='t',ylab='Xt',main='rho=0')
set.seed(1234)
plot(timeseries_data(1.1,100,'rcauchy'),xlim=c(0,50), ylim=c(-50,50),type='l',xlab='t',ylab='Xt',main='rho=1.1')
set.seed(1234)
plot(timeseries_data(-1.1,100,'rcauchy'),xlim=c(0,50), ylim=c(-50,50),type='l',xlab='t',ylab='Xt',main='rho=-1.1')
# x^2(1)-1 chi-squarepar(mfrow=c(2,2))
set.seed(1234)
plot(timeseries_data(1,100,'rchisq'),xlim=c(0,50), ylim=c(-50,50), type='l',xlab='t',ylab='Xt',main='rho=1')
set.seed(1234)
plot(timeseries_data(0,100,'rchisq'),xlim=c(0,50), ylim=c(-50,50), type='l',xlab='t',ylab='Xt',main='rho=0')
set.seed(1234)
plot(timeseries_data(1.1,100,'rchisq'),xlim=c(0,50), ylim=c(-50,50),type='l',xlab='t',ylab='Xt',main='rho=1.1')
set.seed(1234)
plot(timeseries_data(-1.1,100,'rchisq'),xlim=c(0,50), ylim=c(-50,50),type='l',xlab='t',ylab='Xt',main='rho=-1.1')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.