blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a9311e5fa72a49b72d6c3a3a60604de4d89d8065 | 645a92f8a56687b56fc63b2942f16f6e0401ed82 | /lsca/project/03_create_ts.R | 4cd9b86561b143d5dec7f0733c11c660a870c63e | [] | no_license | Busyclover/icl | 4c8ef6240003f03881527d3f4b6c74fd4ce50560 | d13e8545415117ab1759dd0d75a15c9f199d6c63 | refs/heads/master | 2021-06-20T00:30:42.655511 | 2017-07-21T09:30:23 | 2017-07-21T09:30:23 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,218 | r | 03_create_ts.R | library(forecast)
make_ts <- function(df) {
series <- ts(df$demand, start = 1, frequency = 1)
return(series)
}
fit_hw <- function(ts) {
HoltWinters(ts, alpha = TRUE, beta = TRUE, gamma = FALSE)
}
fit_ets <- function(ts) {
ets(ts, model = "ZZZ")
}
plot_ets <- function(ets_ob) {
plot(ets_ob)
}
make_fitted_plot <- function(ets_data) {
fitted <- ets_data$fitted
plot <- ggplot(aes(x = fitted)) + geom_histogram()
return(plot)
}
fit_auto_arima <- function(ts) {
auto.arima(ts)
}
test <- daily_demand %>%
select(store, date, lettuce, tomato) %>%
gather(ingredient, demand, c(-store, -date)) %>%
nest(-c(store, ingredient), .key = "ts_data") %>%
mutate(ts_ob = ts_data %>% purrr::map(make_ts),
ts_hw = ts_ob %>% purrr::map(fit_ets),
ts_auto_arime = ts_ob %>% purrr::map(fit_auto_arima))
ob <- test$ts_hw[[1]]
ob <- test$ts_hw[1]
test <- daily_demand %>%
select(store, date, lettuce, tomato) %>%
gather(ingredient, demand, c(-store, -date)) %>%
unite(store_ingredient, store, ingredient) %>%
mutate(store_ingredient = as.factor(store_ingredient)) %>%
split(., .$store_ingredient)
serieses <- lapply(test, make_ts)
lapply(serieses, plot)
|
d417fa99c4ad1201d0a3c4f1dadc61cdf742b48f | d86268c2fdd4195208c3fd5aecab31c324af7bca | /omd/man/intersect_latlon.Rd | e3652e8b905d9ba642e027dfbaa2ac60e53966e6 | [] | no_license | bio-datascience/omd | 0e2edc61e86c135383b5d4bf29c14c95af026f5f | 5f2f532dfe077388f7911cc7999622c4b6a3f8b8 | refs/heads/master | 2023-08-28T21:44:27.488641 | 2021-11-02T15:25:02 | 2021-11-02T15:25:02 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 439 | rd | intersect_latlon.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/landcrossing.R
\name{intersect_latlon}
\alias{intersect_latlon}
\title{Make the two data sources share the same lat/lon points; discard the
non-overlapping ones. Basically erases a-b from a.}
\usage{
intersect_latlon(a, b)
}
\description{
Make the two data sources share the same lat/lon points; discard the
non-overlapping ones. Basically erases a-b from a.
}
|
391a1527778b219eb66a14ab898799e90f01da6d | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/DynTxRegime/examples/TxInfoFactorWithSubsets-class.Rd.R | 9ab83462e3fcbc162dc51f14c7ad3841dba87c30 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,471 | r | TxInfoFactorWithSubsets-class.Rd.R | library(DynTxRegime)
### Name: TxInfoFactorWithSubsets-class
### Title: Class '"TxInfoFactorWithSubsets"'
### Aliases: TxInfoFactorWithSubsets-class
### Keywords: internal
### ** Examples
showClass("TxInfoFactorWithSubsets")
fSet <- function(x1){
subsets <- list("subset1" = c("A","B"),
"subset2" = c("A"))
ptsSubset <- character(length(x1))
ptsSubset[x1 > 0] <- "subset1"
ptsSubset[x1 <= 0] <- "subset2"
}
obj <- new("TxInfoFactorWithSubsets",
superSet = c("A","B"),
txName = "A1",
ptsSubset = c("subset1", "subset2", "subset1", "subset2"),
subsetRule = fSet,
subsets = list("subset1" = c("A","B"),
"subset2" = c("A")),
singleton = c(FALSE,TRUE,FALSE,TRUE))
txVec1 <- as.factor(c("A","B","A","B"))
txVec2 <- as.factor(c("A","A","A","B"))
DynTxRegime:::.compare(object = obj, vec1 = txVec1, vec2 = txVec2)
DynTxRegime:::.convertTx(object = obj, txVec = c("A","B","A"))
DynTxRegime:::.getLevels(object = obj, txVec = txVec1)
DynTxRegime:::.getSuperSet(object = obj)
DynTxRegime:::.getTxName(object = obj)
DynTxRegime:::.getPtsSubset(object = obj)
DynTxRegime:::.getSingleton(object = obj)
DynTxRegime:::.getSubsetRule(object = obj)
DynTxRegime:::.getSubsets(object = obj)
DynTxRegime:::.validTx(object = obj, c("B","A","A","A"))
try(DynTxRegime:::.validTx(object = obj, c("B","B","A","A")))
|
4f849340627d5c546eb41753d16598cbec58b064 | e1b485461c40d593bb22d5d2c0e1b88308f494ec | /man/magrittr-package.Rd | 22603d3744a5c5b5d239245216cfabe22ab72230 | [
"MIT"
] | permissive | tidyverse/magrittr | 59eee2624f7d59a23d7473504a87ebf250d694b6 | 21093d06e43649a72b865811188e77fc8ad87780 | refs/heads/main | 2023-08-25T01:20:58.243298 | 2023-03-08T13:30:31 | 2023-03-08T13:30:31 | 15,564,525 | 625 | 138 | NOASSERTION | 2022-08-12T07:58:20 | 2014-01-01T13:30:01 | R | UTF-8 | R | false | true | 2,265 | rd | magrittr-package.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/magrittr.R
\docType{package}
\name{magrittr-package}
\alias{magrittr}
\alias{magrittr-package}
\title{magrittr - Ceci n'est pas un pipe}
\description{
The magrittr package offers a set of operators which promote semantics
that will improve your code by
\itemize{
\item structuring sequences of data operations left-to-right
(as opposed to from the inside and out),
\item avoiding nested function calls,
\item minimizing the need for local variables and function definitions, and
\item making it easy to add steps anywhere in the sequence of operations.
}
The operators pipe their left-hand side values forward into expressions that
appear on the right-hand side, i.e. one can replace \code{f(x)} with
\code{x \%>\% f}, where \code{\%>\%} is the (main) pipe-operator.
}
\details{
Consider the example below. Four operations are performed to
arrive at the desired data set, and they are written in a natural order:
the same as the order of execution. Also, no temporary variables are needed.
If yet another operation is required, it is straight-forward to add to the
sequence of operations whereever it may be needed.
For a more detailed introduction see the vignette
(\code{vignette("magrittr")}) or the documentation pages for the
available operators:\cr
\tabular{ll}{
\code{\link{\%>\%}} \tab pipe.\cr
\code{\link{\%T>\%}} \tab tee pipe.\cr
\code{\link{\%<>\%}} \tab assignment pipe.\cr
\code{\link{\%$\%}} \tab exposition pipe.\cr
}
}
\examples{
\dontrun{
the_data <-
read.csv('/path/to/data/file.csv') \%>\%
subset(variable_a > x) \%>\%
transform(variable_c = variable_a/variable_b) \%>\%
head(100)
}
}
\seealso{
Useful links:
\itemize{
\item \url{https://magrittr.tidyverse.org}
\item \url{https://github.com/tidyverse/magrittr}
\item Report bugs at \url{https://github.com/tidyverse/magrittr/issues}
}
}
\author{
\strong{Maintainer}: Lionel Henry \email{lionel@rstudio.com}
Authors:
\itemize{
\item Stefan Milton Bache \email{stefan@stefanbache.dk} (Original author and creator of magrittr) [copyright holder]
\item Hadley Wickham \email{hadley@rstudio.com}
}
Other contributors:
\itemize{
\item RStudio [copyright holder, funder]
}
}
\keyword{internal}
|
d778ce4d251a8141ae5d12610c3869c81ededb87 | cefda39ba106202c799d86c598ec5d9cf448c631 | /3/3_code.r | ec5d999df17f250ec04543508c53b89a6f5eb35d | [] | no_license | umarayo/techjam2017 | 676e6a18e15203c5d41935fad14eaf1ad3c63180 | 26df82b1a1291437cd422dbb5e3b8fda9d7bcdec | refs/heads/master | 2020-03-28T00:06:48.553765 | 2018-09-04T16:37:32 | 2018-09-04T16:37:32 | 147,376,415 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,468 | r | 3_code.r | ########## Version 1.2 - Score 88.525/100 #######
## Install packages - First time only
# install.packages("dplyr")
# install.packages("xgboost", repos="http://dmlc.ml/drat/", type = "source")
# install.packages("data.table")
## Load packages - First time only
# library('dplyr') # data manipulation
# library('xgboost') # classification algorithm
# library('data.table') #provides an in-memory columnar structure
## Setup working directory
setwd("C:/uma/0_TechJam/Data/3") #Home
getwd()
## Remove scientific notation
options(scipen = 999)
## Load Data
acc=read.csv('./input/tj_03_account_info.csv',header=TRUE)
txn=read.csv('./input/tj_03_deposit_txn.csv',header=TRUE)
train.org = read.csv('./input/tj_03_training.csv',header=FALSE)
test.org = read.csv('./input/tj_03_test.csv',header=FALSE)
##################################################################
#-----------------------------------------------------------------
# Data Preparation
#-----------------------------------------------------------------
##################################################################
# Add prefix to column name 'info_'
names(acc)[2:length(names(acc))]=paste0('info_',names(acc)[2:length(names(acc))])
# Add column name for train and test
colnames(train.org) = c("account_no","label")
colnames(test.org) = c("account_no")
# Check duplicated data -- NO DUPLICATED DATA
# acc.u <- unique( acc[ , 1:ncol(acc) ] )
# txn.u <- unique( txn[ , 1:ncol(txn) ] )
# dup_flag <- duplicated(txn[ ,1:ncol(txn)])
# dup_f <- data.frame(dup_flag)
# total_dup <- (dup_f[which(dup_f==TRUE),1])
# txn.dup <- txn[dup_flag,]
# txn.dup <- txn.dup[,c("account_no","txn_type","txn_amount","txn_hour","from_to_account_no","txn_dt")]
# txn.dup <- setorderv(txn.dup,c("account_no","txn_type","txn_amount","txn_hour"), c(1,1,1,1))
# Convert format to date
acc$info_txn_dt <- as.Date(acc$info_txn_dt, format='%Y-%m-%d')
acc$info_open_date <- as.Date(acc$info_open_date, format='%Y-%m-%d')
acc$info_last_active_date <- as.Date(acc$info_last_active_date, format='%Y-%m-%d')
txn$txn_dt <- as.Date(txn$txn_dt, format='%Y-%m-%d')
# Extract only max(txn_date) record per account
acc.m <- setDT(acc)[,.SD[which.max(info_txn_dt)],keyby=account_no]
# Introduce new column for open and last_active days
acc.m$info_txn_days <- difftime(Sys.Date(),acc.m$info_txn_dt,units='days')%>% as.numeric()
acc.m$info_open_days <- difftime(Sys.Date(),acc.m$info_open_date,units='days')%>% as.numeric()
acc.m$info_last_active_days <- difftime(Sys.Date(),acc.m$info_last_active_date,units='days')%>% as.numeric()
txn$txn_days <- difftime(Sys.Date(),txn$txn_dt,units='days')%>% as.numeric()
# Combine acc and txn data and add month and day field
data <- merge(x=txn, y=acc.m, by = "account_no", all.x=TRUE)
# Extract more 2 Columns from field txn_type
data$is_DR <- data$txn_type=="DR"
data$is_CR <- data$txn_type=="CR"
# Convert column type
data$txn_type <- as.character(data$txn_type)
data$info_compound_frq_unit <- as.character(data$info_compound_frq_unit)
# Selected column for analysis
##### Column Name List
##### Main - 'account_no'
##### Date - 'txn_dt','info_txn_dt','info_open_date','info_last_active_date'
##### Txn - 'from_to_account_no','txn_amount',,'txn_hour','txn_type'
##### Info - 'info_customer_type','info_dormant_days','info_compound_frq','info_compound_frq_unit','info_eff_interest_rate'
##### New - 'txn_days','is_DR','is_CR','info_txn_days','info_open_days','info_last_active_days'
cols_D=c('account_no','from_to_account_no','txn_amount','txn_hour','txn_type','info_customer_type','info_dormant_days','info_compound_frq','info_compound_frq_unit','info_eff_interest_rate','txn_days','is_DR','is_CR','info_txn_days','info_open_days','info_last_active_days')
#cols_D=c('account_no','from_to','txn_month','txn_day','txn_hour','txn_amount','txn_type','card_no','mer_cat_code','mer_id')
D <- data[,cols_D]
# Replace level by number
for (f in cols_D) {
if (class(D[[f]])=="character") {
levels <- unique(c(D[[f]]))
D[[f]] <- as.numeric(factor(D[[f]], levels=levels))
}
}
# Update Logical value True>>1 False>>0
p_logi <- names(D)[which(sapply(D, is.logical))]
for (col in p_logi) set(D, j = col, value = as.numeric(D[[col]]))
# Convert NA to 0
D[is.na(D)] <- 0
# Data Checking
str(D)
head(D)
tail(D)
# Merge train and test with data table
train <- merge(x=train.org, y=D, by = "account_no", all.x=TRUE)
test <- merge(x=test.org, y=D, by = "account_no", all.x=TRUE)
# Move column 'is_merchant' to be the first column
cols_T =c('label',cols_D)
train <- train[,cols_T]
# Convert data to xgboost format
dtrain <- xgb.DMatrix(data = data.matrix(train[, 2:ncol(train)]), label = train$label)
dtest <- xgb.DMatrix(data.matrix(test))
##################################################################
#-----------------------------------------------------------------
# Tune and Run the model
#-----------------------------------------------------------------
##################################################################
param <- list(
# General Parameters
booster = "gblinear", # default = "gbtree"
silent = 0, # default = 0
# Booster Parameters
eta = 0.3, # default = 0.3, range: [0,1]
gamma = 0.3, # default = 0, range: [0,???]
max_depth = 11, # default = 6, range: [1,???]
min_child_weight = 1, # default = 1, range: [0,???]
subsample = 0.5, # default = 1, range: (0,1]
colsample_bytree = 0.9, # default = 1, range: (0,1]
colsample_bylevel = 0.9, # default = 1, range: (0,1]
lambda = 0.95, # default = 1
alpha = 0, # default = 0
# Task Parameters
objective = "binary:logistic", # default = "reg:linear"
eval_metric = "error"
)
set.seed(1712)
xgm <- xgb.train(data = dtrain,
param, nrounds = 30,
watchlist = list(train = dtrain),
print_every_n = 10,
verbose = 1)
##################################################################
#-----------------------------------------------------------------
# Prediction and Output transformation
#-----------------------------------------------------------------
##################################################################
# Predict
out <- predict(xgm, dtest)
sub <- data.frame(account_no= test$account_no, label = out)
# Observe distribution of predicted data
hist(sub$label,xlim = range(0:max(sub$label)))
# Convert pop to flag 1 or 0
label<-sub$label
CutOff = mean(label)+5*(sqrt(mean(label^2)-mean(label)^2))
p_h <- rownames(sub)[which(sub$label>CutOff)]
for (row in p_h) set(sub, j = as.integer(2), i=as.integer(row), value = 1)
p_l <- rownames(sub)[which(sub$label<=CutOff)]
for (row in p_l) set(sub, j = as.integer(2), i=as.integer(row), value = 0)
# Assign "is_merchant" flag using mode
Mode <- function(x) {
ux <- unique(x)
ux[which.max(tabulate(match(x, ux)))]
}
SB <- aggregate(label ~ account_no, sub, Mode)
# Reorder outcome data to be matched with the test data
TS <-test.org
TS$label <- with(SB, label[match(TS$account_no,SB$account_no)])
OC <- data.frame(label=TS$label)
# Export to file
write.table(OC, file = "./output/3.txt", col.names=FALSE ,row.names = FALSE, quote=FALSE)
|
51420e35b977b544a1740a143e193b11593c25ec | 7a6be82ecc13a6deafa51b207915b08336486d91 | /ARD piece wise regression.R | 396cc2e2cb81a2c6fdb756f6c9bd96ea8bccb0fa | [] | no_license | anerigarg/MSc_data | e60ab5293b382a3280b7688bddddbb399bd578d3 | 37b52f80204d8a02d605b3380003e2fc40cee6ab | refs/heads/master | 2023-01-24T11:16:27.387328 | 2020-12-08T19:37:44 | 2020-12-08T19:37:44 | 285,697,250 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 156,874 | r | ARD piece wise regression.R | # Piecewise Regression
# data files --------------------------------------------------------------
ARD_3_with_0 <- read_csv("Artificial coral experiment/data/Art Recruit data/filter 0 values/test ARD_3/ARD_3_with_0.csv")
ARD_3_with_0 <- ARD_3_with_0 %>%
mutate(treatment = factor(treatment, levels = c("control", "100A", "30L70A", "50L50A", "70L30A", "100L"),
labels = c("control", "0%", "30%", "50%", "70%", "100%"))) %>%
distinct(plot_grid_visit, .keep_all = TRUE)
#This one has days since outplanting as a col:
ARD_3_with_0_new_2 <- read_csv("Artificial coral experiment/data/Art Recruit data/filter 0 values/test ARD_3/ARD_3_with_0_new_2.csv")
ARD_3_with_0_new_2 <- ARD_3_with_0_new_2 %>%
mutate(treatment = factor(treatment, levels = c("control", "100A", "30L70A", "50L50A", "70L30A", "100L"),
labels = c("control", "0%", "30%", "50%", "70%", "100%"))) %>%
distinct(plot_grid_visit, .keep_all = TRUE)
ARD_4to6_with_0 <- read_csv("Artificial coral experiment/data/Art Recruit data/filter 0 values/ARD_4to6/ARD_4to6_with_0.csv")
ARD_4to6_with_0 <- ARD_4to6_with_0 %>%
mutate(treatment = factor(treatment, levels = c("control", "100A", "30L70A", "50L50A", "70L30A", "100L"),
labels = c("control", "0%", "30%", "50%", "70%", "100%"))) %>%
distinct(plot_grid_visit, .keep_all = TRUE)
# packages and pallettes --------------------------------------------------
library(readr)
library(tidyverse)
library(ggplot2)
library(viridis)
library(PNWColors)
library(ggpmisc)
library(purrr)
library(broom)
library(segmented)
library(gapminder)
library(patchwork)
# install.packages("tidyverse")
# install.packages("viridis")
# install.packages("PNWColors")
# install.packages("ggpmisc")
# install.packages("purrr")
# install.packages("segmented")
install.packages("gapminder")
install.packages("patchwork")
pal <- pnw_palette(6, name = "Starfish", type = "discrete")
pal1 <- pnw_palette(3, name = "Winter", type = "continuous")
pal2 <-pnw_palette(6, name = "Cascades", type = "continuous")
pal3 <-pnw_palette(3, name = "Lake", type = "continuous")
# initial plots (ARD3 and ARD4to6 same graph, all time points) ------------
ARD_3_with_0_sum <- ARD_3_with_0 %>%
group_by(complexity, treatment, visit) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(1587))
ARD_4to6_with_0_sum <- ARD_4to6_with_0 %>%
group_by(complexity, treatment, visit) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(1587))
ggplot() +
facet_grid(complexity~treatment) +
geom_jitter(data = ARD_3_with_0,
aes(x = visit,
y = density,
colour = treatment),
alpha = 0.2,
shape = 2,
size = 1) +
geom_line(data = ARD_3_with_0_sum,
aes(x = visit,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_errorbar(data = ARD_3_with_0_sum,
aes(x = visit,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_point(data = ARD_3_with_0_sum,
aes(x = visit,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
# geom_smooth(data = ARD_3_with_0,
# aes(x = visit,
# y = density.mean,
# colour = treatment,
# group = treatment),
# size = 2,
# # method = lm,
# # formula = y ~ x + I(x^2),
# se = FALSE ) +
scale_x_continuous(breaks = c(3,6, 9,12, 15,18)) +
scale_colour_manual(values = c("gray44", "firebrick", "darkorange1", "goldenrod1", "mediumseagreen", "royalblue")) +
ylim(0,21) +
labs(x = expression(Visit),
y = expression(Density~(fish~m^{2}))) +
ggtitle("mean fish recruit density over time (3cm)") +
theme_classic() +
theme(
axis.title = element_text(size = 14),
axis.text = element_text(size = 12),
strip.text = element_text(size = 14),
legend.position = "none",
)
ggplot() +
facet_grid(complexity~treatment) +
geom_jitter(data = ARD_4to6_with_0,
aes(x = visit,
y = density,
colour = treatment),
alpha = 0.2,
size = 1) +
geom_line(data = ARD_4to6_with_0_sum,
aes(x = visit,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.3) +
geom_errorbar(data = ARD_4to6_with_0_sum,
aes(x = visit,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_point(data = ARD_4to6_with_0_sum,
aes(x = visit,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.5) +
# geom_smooth(data = ARD_4to6_with_0,
# aes(x = visit,
# y = density.mean,
# colour = treatment,
# group = treatment),
# size = 2,
# # method = lm,
# # formula = y ~ x + I(x^2),
# se = FALSE ) +
scale_x_continuous(breaks = c(3,6, 9,12, 15,18)) +
scale_colour_manual(values = c("gray44", "firebrick", "darkorange1", "goldenrod1", "mediumseagreen", "royalblue")) +
ylim(0,11) +
labs(x = expression(Visit),
y = expression(Density~(fish~m^{2}))) +
ggtitle("mean fish recruit density over time (4-6 cm)") +
theme_classic() +
theme(
axis.title = element_text(size = 14),
axis.text = element_text(size = 12),
strip.text = element_text(size = 14),
legend.position = "none",
)
#It is WAY too messy when plotted together
# Segmented package (youtube tutprial) (test) -------------------------------------------------------
# https://www.youtube.com/watch?v=onfXC1qe7LI&ab_channel=ShokoufehMirzaei
# estimates linear and GLM with one or more segmented relatinships in linear predictor
# provides estimates of the slopes and breakpoints (and Standard errors)
# iterative procedure, needs starting values only for the breakpoint paramteters
#basic glm: (linear)
ARD_3_0_L <- ARD_3_with_0 %>%
filter(treatment == "0%") %>%
filter(complexity == "Low")
glm_ARD3_0_L <- glm(density ~ visit, data = ARD_3_0_L, family = "poisson")
plot(glm_ARD3_0_L)
summary(glm_ARD3_0_L)
#quadratic glm
ARD_3_0_L_quad <- ARD_3_0_L %>%
mutate(visit2 = visit^2)
glm_quad_ARD3_0_L <- glm(density ~ visit + visit2, data = ARD_3_0_L_quad, family = "poisson")
summary(glm_quad_ARD3_0_L)
#iterative piecewise regression:
# Y = b0 + b1visit + b2 (x-x(k) xk)
ARD3_0_L_pwr <- ARD_3_0_L %>%
group_by(visit) %>%
summarize(density.mean = mean(density))
attach(ARD3_0_L_pwr)
ARD3_0_L_pwr
dummyknot = rep(0,length(visit))
dummyknot[visit > 9] = 1
ARD3_0_L_pwr$Xdif = visit - 9
ARD3_0_L_pwr$DN = dummyknot
ARD3_0_L_pwr$X = ARD3_0_L_pwr$Xdif * ARD3_0_L_pwr$DN
ARD3_0_L_pwr
ARD3_0_L_regout <- lm(density.mean ~ visit + X, data = ARD3_0_L_pwr)
summary(ARD3_0_L_regout)
# Y = b0 + b1visit + b2 (visit-9) xk
# Y = 4.1372 + 0.2214visit + -0.2388 (visit-9) xk
#can replace visit by which visit
#may be more useful to do days since outplanting as a predictor variable...
#make a lookup table with date and day since out-planting for each plot?
# segmented - muggeo paper ------------------------------------------------
ARD_3_0_L_sum
fit.glm <- glm(density ~ days_since_outplanting, data = ARD_3_0_L)
fit.seg <- segmented(fit.glm, seg.Z = days_since_outplanting, psi = list(x = 4,9,23, 30, 43))
#getting error message saying wrong number f terms in seg.Z or psi...
#have tried playing around with the values in psi, added a list to include many possible breakpoints even...
# multiple linear regression analysis -------------------------------------
ARD_3_0_L_left <- ARD_3_0_L %>%
filter(days_since_outplanting <= 23)
ARD_3_0_L_left_lm <- lm(density ~ days_since_outplanting, data = ARD_3_0_L_left)
summary(ARD_3_0_L_left_lm)
# non-significant p value
#
# plots with days since outplant (staggered june 4 & 5) ------------------------------------------
ARD_3_with_0_new <- read_csv("Artificial coral experiment/data/Art Recruit data/filter 0 values/test ARD_3/ARD_3_with_0_new.csv")
ARD_3_with_0_new<- ARD_3_with_0_new %>%
mutate(treatment = factor(treatment, levels = c("control", "0%", "30%", "50%", "70%", "100%"))) %>%
distinct(plot_grid_visit, .keep_all = TRUE)
ARD_3_with_0_new_sum <- ARD_3_with_0_new %>%
group_by(complexity, treatment, days_since_outplanting) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(1587))
ggplot() +
facet_grid(complexity~treatment) +
geom_jitter(data = ARD_3_with_0_new,
aes(x = days_since_outplanting,
y = density,
colour = treatment),
alpha = 0.2,
shape = 2,
size = 1) +
geom_line(data = ARD_3_with_0_new_sum,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_errorbar(data = ARD_3_with_0_new_sum,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_point(data = ARD_3_with_0_new_sum,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_smooth(data = ARD_3_with_0_new_sum,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
# method = lm,
# formula = y ~ x + I(x^2),
se = FALSE ) +
# scale_x_continuous(breaks = c(10, 20, 30, 40, 50, 60)) +
scale_colour_manual(values = c("gray44", "firebrick", "darkorange1", "goldenrod1", "mediumseagreen", "royalblue")) +
ylim(0,11) +
# xlim(0,20) +
labs(x = expression(Days~Since~Outplanting),
y = expression(Density~(fish~m^{2}))) +
ggtitle("mean fish recruit density over time - staggered start (3cm)") +
theme_classic() +
theme(
axis.title = element_text(size = 14),
axis.text = element_text(size = 12),
strip.text = element_text(size = 14),
legend.position = "none",
)
# plots with days since outplant (all starting june 5) USE THIS ONE--------------------
ARD_3_with_0_new_2 <- read_csv("Artificial coral experiment/data/Art Recruit data/filter 0 values/test ARD_3/ARD_3_with_0_new_2.csv")
ARD_3_with_0_new_2<- ARD_3_with_0_new_2 %>%
mutate(treatment = factor(treatment, levels = c("control", "100A", "30L70A", "50L50A", "70L30A", "100L"),
labels = c("control", "0%", "30%", "50%", "70%", "100%"))) %>%
distinct(plot_grid_visit, .keep_all = TRUE)
ARD_3_with_0_new__2sum <- ARD_3_with_0_new_2 %>%
group_by(complexity, treatment, days_since_outplanting) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(1587))
write.csv(ARD_3_with_0_new__2sum, "ARD_3_with_0_new__2sum.csv")
ggplot() +
facet_grid(complexity~treatment) +
geom_jitter(data = ARD_3_with_0_new_2,
aes(x = days_since_outplanting,
y = density,
colour = treatment),
alpha = 0.2,
shape = 2,
size = 1) +
geom_line(data = ARD_3_with_0_new__2sum,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_errorbar(data = ARD_3_with_0_new__2sum,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_point(data = ARD_3_with_0_new__2sum,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_smooth(data = ARD_3_with_0_new__2sum,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
# method = "glm",
# family = "binomial",
# formula = y ~ x + I(x^2),
se = TRUE ) +
# scale_x_continuous(breaks = c(10, 20, 30, 40, 50, 60)) +
scale_colour_manual(values = c("gray44", "firebrick", "darkorange1", "goldenrod1", "mediumseagreen", "royalblue")) +
ylim(0,11) +
# xlim(0,20) +
labs(x = expression(Days~Since~Outplanting),
y = expression(Density~(fish~m^{2}))) +
ggtitle("mean fish recruit density over time - june 5 start (3cm)") +
theme_classic() +
theme(
axis.title = element_text(size = 14),
axis.text = element_text(size = 12),
strip.text = element_text(size = 14),
legend.position = "none",
)
#try plotting median to see how it looks --> essentially the same pattern as mean but more dimunited
ARD_3_with_0_new__2sum_median <- ARD_3_with_0_new_2 %>%
group_by(complexity, treatment, days_since_outplanting) %>%
summarize(density.median = median(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(1587))
# write.csv(ARD_3_with_0_new__2sum, "ARD_3_with_0_new__2sum.csv")
ggplot() +
facet_grid(complexity~treatment) +
geom_jitter(data = ARD_3_with_0_new_2,
aes(x = days_since_outplanting,
y = density,
colour = treatment),
alpha = 0.2,
shape = 2,
size = 1) +
geom_line(data = ARD_3_with_0_new__2sum_median,
aes(x = days_since_outplanting,
y = density.median,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_errorbar(data = ARD_3_with_0_new__2sum_median,
aes(x = days_since_outplanting,
ymin = density.median - density.se,
ymax = density.median + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_point(data = ARD_3_with_0_new__2sum_median,
aes(x = days_since_outplanting,
y = density.median,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_smooth(data = ARD_3_with_0_new__2sum_median,
aes(x = days_since_outplanting,
y = density.median,
colour = treatment,
group = treatment),
size = 2,
# method = "glm",
# family = "binomial",
# formula = y ~ x + I(x^2),
se = TRUE ) +
# scale_x_continuous(breaks = c(10, 20, 30, 40, 50, 60)) +
scale_colour_manual(values = c("gray44", "firebrick", "darkorange1", "goldenrod1", "mediumseagreen", "royalblue")) +
ylim(0,11) +
# xlim(0,20) +
labs(x = expression(Days~Since~Outplanting),
y = expression(Density~(fish~m^{2}))) +
ggtitle("median fish recruit density over time - june 5 start (3cm)") +
theme_classic() +
theme(
axis.title = element_text(size = 14),
axis.text = element_text(size = 12),
strip.text = element_text(size = 14),
legend.position = "none",
)
# manual 1 break-point in R (youtube) --> with days since outplant (test) ----------------
# https://www.youtube.com/watch?v=onfXC1qe7LI&ab_channel=ShokoufehMirzaei
# estimates linear and GLM with one or more segmented relatinships in linear predictor
# provides estimates of the slopes and breakpoints (and Standard errors)
# iterative procedure, needs starting values only for the breakpoint paramteters
ARD_3_with_0_new_2 <- read_csv("Artificial coral experiment/data/Art Recruit data/filter 0 values/test ARD_3/ARD_3_with_0_new_2.csv")
ARD_3_with_0_new_2<- ARD_3_with_0_new_2 %>%
mutate(treatment = factor(treatment, levels = c("control", "100A", "30L70A", "50L50A", "70L30A", "100L"),
labels = c("control", "0%", "30%", "50%", "70%", "100%"))) %>%
distinct(plot_grid_visit, .keep_all = TRUE)
#basic glm: (linear)
ARD_3_0_L <- ARD_3_with_0_new_2 %>%
filter(treatment == "0%") %>%
filter(complexity == "Low")
#iterative piecewise regression:
# Y = b0 + b1visit + b2 (x-x(k) xk)
ARD3_0_L_pwr <- ARD_3_0_L %>%
group_by(days_since_outplanting) %>%
summarize(density.mean = mean(density))
attach(ARD3_0_L_pwr)
ARD3_0_L_pwr
dummyknot = rep(0,length(days_since_outplanting))
dummyknot[days_since_outplanting > 25] = 1
ARD3_0_L_pwr$Xdif = days_since_outplanting - 25
ARD3_0_L_pwr$DN = dummyknot
ARD3_0_L_pwr$X = ARD3_0_L_pwr$Xdif * ARD3_0_L_pwr$DN
ARD3_0_L_pwr
ARD3_0_L_regout <- lm(density.mean ~ days_since_outplanting + X, data = ARD3_0_L_pwr)
summary(ARD3_0_L_regout)
# Y = b0 + b1visit + b2 (visit-9) xk
# density = 2.8595 + 0.08105 * days_since_outplanting - 0.1285 (days_since_outplanting - 25) * xk
# so if days since outplanting is over 25, xk is 1
# if days since outplanting is less than 25 xk is 0
# except R square value is tiny --> maybe I should change the breakpoint?
#try and plot?
ARD_3_0_L_sum <- ARD_3_with_0_new__2sum %>%
filter(treatment == "0%") %>%
filter(complexity == "Low")
ggplot() +
facet_grid(complexity~treatment) +
geom_jitter(data = ARD_3_0_L,
aes(x = days_since_outplanting,
y = density,
colour = treatment),
alpha = 0.2,
shape = 2,
size = 1) +
geom_line(data = ARD_3_0_L_sum,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_errorbar(data = ARD_3_0_L_sum,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_point(data = ARD_3_0_L_sum,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_smooth(data = ARD_3_0_L_sum,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
# method = lm,
# formula = y ~ x + I(x^2),
se = FALSE ) +
# scale_x_continuous(breaks = c(10, 20, 30, 40, 50, 60)) +
scale_colour_manual(values = c( "firebrick")) +
ylim(0,11) +
# xlim(0,20) +
labs(x = expression(Days~Since~Outplanting),
y = expression(Density~(fish~m^{2}))) +
ggtitle("mean fish recruit density over time - 0% Low") +
theme_classic() +
theme(
axis.title = element_text(size = 14),
axis.text = element_text(size = 12),
strip.text = element_text(size = 14),
legend.position = "none",
)
# segmented package (test) ------------------------------------------------------
# https://stackoverflow.com/questions/8758646/piecewise-regression-with-r-plotting-the-segments
# example:
library(segmented)
set.seed(12)
xx <- 1:100
zz <- runif(100)
yy <- 2 + 1.5*pmax(xx - 35, 0) - 1.5*pmax(xx - 70, 0) + 15*pmax(zz - .5, 0) +
rnorm(100,0,2)
dati <- data.frame(x = xx, y = yy, z = zz)
out.lm <- lm(y ~ x, data = dati)
o <- segmented(out.lm, seg.Z = ~x, psi = list(x = c(30,60)),
control = seg.control(display = FALSE)
)
dat2 = data.frame(x = xx, y = broken.line(o)$fit)
library(ggplot2)
ggplot(dati, aes(x = x, y = y)) +
geom_point() +
geom_line(data = dat2, color = 'blue')
ARD_3_with_0_new_2 <- read_csv("Artificial coral experiment/data/Art Recruit data/filter 0 values/test ARD_3/ARD_3_with_0_new_2.csv")
ARD_3_with_0_new_2 <- ARD_3_with_0_new_2 %>%
mutate(treatment = factor(treatment, levels = c("control", "100A", "30L70A", "50L50A", "70L30A", "100L"),
labels = c("control", "0%", "30%", "50%", "70%", "100%"))) %>%
distinct(plot_grid_visit, .keep_all = TRUE)
ARD_3_0_L <- ARD_3_with_0_new_2 %>%
filter(treatment == "0%") %>%
filter(complexity == "Low") %>%
select(treatment, complexity, days_since_outplanting, density)
ARD_3_0_L_lm <- lm(density ~ days_since_outplanting, data = ARD_3_0_L)
ARD_3_0_L_seg <- segmented(ARD_3_0_L_lm, seg.Z = ~ days_since_outplanting, psi = list(days_since_outplanting = c(10,30)),
control = seg.control(display = FALSE))
#getting error message: no breakpoint estimated
#sept 16: one person said to try forcing the command to repeat istelf until it decides there's no error in the model.
lm.model <- lm(density ~ days_since_outplanting, data = ARD_3_0_L)
if.false <- F
while(if.false == F){
tryCatch({
s <- segmented(lm.model, seg.Z =~ydata, psi = NA)
if.false <- T
}, error = function(e){
}, finally = {})
}
#try with 0 high
ARD_3_0_H <- ARD_3_with_0_new_2 %>%
filter(treatment == "0%") %>%
filter(complexity == "High") %>%
select(treatment, complexity, days_since_outplanting, density)
ARD_3_0_H_lm <- lm(density ~ days_since_outplanting, data = ARD_3_0_H)
ARD_3_0_H_seg <- segmented(ARD_3_0_H_lm, seg.Z = ~ days_since_outplanting, psi = list(days_since_outplanting = c(15,35)),
control = seg.control(display = FALSE))
#no psi code
ARD_3_0_H_seg <- segmented(ARD_3_0_H_lm, seg.Z = ~ days_since_outplanting, control = seg.control(display = FALSE))
ggplot(ARD_3_0_H_lm, aes(x = days_since_outplanting, y = density)) +
geom_point() +
# geom_smooth()
geom_line(data = ARD_3_0_H_seg, color = 'blue')
#oh this looks wrong lol. i think i need to do it with summed data?
#maybe I should try with the sum data?
ARD_3_0_L <- ARD_3_with_0_new_2 %>%
filter(treatment == "0%") %>%
filter(complexity == "Low") %>%
select(treatment, complexity, days_since_outplanting, density)
ARD_3_0_L_sum <- ARD_3_0_L %>%
group_by(complexity, treatment, days_since_outplanting) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(133))
ARD_3_0_L_lm <- lm(density.mean ~ days_since_outplanting, data = ARD_3_0_L_sum)
ARD_3_0_L_seg <- segmented(ARD_3_0_L_lm, seg.Z = ~ days_since_outplanting, #psi = list(days_since_outplanting = c(20,50)),
control = seg.control(display = FALSE))
#try taking out the psi piece of the code since that's the part where you visually set the break point
ARD_3_0_L_seg <- segmented(ARD_3_0_L_lm, seg.Z = ~ days_since_outplanting, control = seg.control(display = FALSE))
ggplot(ARD_3_0_L_lm, aes(x = days_since_outplanting, y = density.mean)) +
geom_point() +
geom_line(data = ARD_3_0_L_seg, color = 'blue')
# strucchange package -----------------------------------------------------
install.packages("strucchange")
library(strucchange)
ARD_3_0_L <- ARD_3_with_0_new_2 %>%
filter(treatment == "0%") %>%
filter(complexity == "Low") %>%
select(treatment, complexity, days_since_outplanting, density)
breakpoints(density ~ days_since_outplanting, data = ARD_3_0_L(days_since_outplanting, density), h = 3)
# Manually plot first peak for each treat*comp ----------------------------
# ARD_3 -------------------------------------------------------------------
# Control High ------------------------------------------------------------
ARD_3_C_H <- ARD_3_with_0_new_2 %>%
filter(complexity == "High") %>%
filter(treatment == "control")
ARD_3_C_H_sum <- ARD_3_C_H %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(120))
#plot two linear slopes:
ARD_3_C_H_sum_left <- ARD_3_C_H %>%
filter(days_since_outplanting <= 23) %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(73))
ARD_3_C_H_sum_right <- ARD_3_C_H %>%
filter(days_since_outplanting >= 23) %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(54))
ggplot() +
# facet_grid(complexity~treatment) +
geom_jitter(data = ARD_3_C_H,
aes(x = days_since_outplanting,
y = density,
colour = treatment),
alpha = 0.2,
shape = 2,
size = 1) +
geom_line(data = ARD_3_C_H_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_line(data = ARD_3_C_H_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_errorbar(data = ARD_3_C_H_sum_left,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_errorbar(data = ARD_3_C_H_sum_right,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_point(data = ARD_3_C_H_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_point(data = ARD_3_C_H_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_smooth(data = ARD_3_C_H_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
method = "lm",
# family = "binomial",
# formula = y ~ x + I(x^2),
se = TRUE ) +
geom_smooth(data = ARD_3_C_H_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
method = "lm",
# family = "binomial",
# formula = y ~ x + I(x^2),
se = TRUE ) +
geom_vline(xintercept = 23,
linetype = "dashed",
colour = "black",
size = 0.8,
alpha = 0.3) +
# scale_x_continuous(breaks = c(10, 20, 30, 40, 50, 60)) +
scale_colour_manual(values = c("gray44", "firebrick", "darkorange1", "goldenrod1", "mediumseagreen", "royalblue")) +
ylim(0,10) +
# xlim(0,20) +
labs(x = expression(Days~Since~Outplanting),
y = expression(Density~(fish~m^{2}))) +
ggtitle("Control High") +
theme_classic() +
theme(
axis.title = element_text(size = 14),
axis.text = element_text(size = 12),
strip.text = element_text(size = 14),
legend.position = "none",
axis.title.x = element_blank()
)
#iterative piecewise regression:
# Y = b0 + b1visit + b2 (x-x(k)) xk)
ARD3_C_H_pwr <- ARD_3_C_H %>%
group_by(days_since_outplanting) %>%
summarize(density.mean = mean(density))
attach(ARD3_C_H_pwr)
ARD3_C_H_pwr
dummyknot = rep(0,length(days_since_outplanting))
dummyknot[days_since_outplanting > 23] = 1
ARD3_C_H_pwr$Xdif = days_since_outplanting - 23
ARD3_C_H_pwr$DN = dummyknot
ARD3_C_H_pwr$X = ARD3_C_H_pwr$Xdif * ARD3_C_H_pwr$DN
ARD3_C_H_pwr
ARD3_C_H_regout <- lm(density.mean ~ days_since_outplanting + X, data = ARD3_C_H_pwr)
summary(ARD3_C_H_regout)
# Y = b0 + b1visit + b2 (visit-9) xk
#check which values apply where
# density = 2.8595 + 0.08105 * days_since_outplanting - 0.1285 (days_since_outplanting - 25) * xk
# so if days since outplanting is over 25, xk is 1
# if days since outplanting is less than 25 xk is 0
# except R square value is tiny --> maybe I should change the breakpoint?
# 0% High -----------------------------------------------------------------
ARD_3_0_H <- ARD_3_with_0_new_2 %>%
filter(complexity == "High") %>%
filter(treatment == "0%")
ARD_3_0_H_sum <- ARD_3_0_H %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(126))
#plot two linear slopes:
ARD_3_0_H_sum_left <- ARD_3_0_H %>%
filter(days_since_outplanting <= 30) %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(94))
ARD_3_0_H_sum_right <- ARD_3_0_H %>%
filter(days_since_outplanting >= 30) %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(40))
ggplot() +
# facet_grid(complexity~treatment) +
geom_jitter(data = ARD_3_0_H,
aes(x = days_since_outplanting,
y = density,
colour = treatment),
alpha = 0.2,
shape = 2,
size = 1) +
geom_line(data = ARD_3_0_H_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_line(data = ARD_3_0_H_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_errorbar(data = ARD_3_0_H_sum_left,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_errorbar(data = ARD_3_0_H_sum_right,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_point(data = ARD_3_0_H_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_point(data = ARD_3_0_H_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_smooth(data = ARD_3_0_H_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
method = "lm",
# family = "binomial",
# formula = y ~ x + I(x^2),
se = TRUE ) +
geom_smooth(data = ARD_3_0_H_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
method = "lm",
# family = "binomial",
# formula = y ~ x + I(x^2),
se = TRUE ) +
geom_vline(xintercept = 30,
linetype = "dashed",
colour = "black",
size = 0.8,
alpha = 0.3) +
# scale_x_continuous(breaks = c(10, 20, 30, 40, 50, 60)) +
scale_colour_manual(values = c("firebrick", "darkorange1", "goldenrod1", "mediumseagreen", "royalblue")) +
ylim(0,10) +
# xlim(0,20) +
labs(x = expression(Days~Since~Outplanting),
y = expression(Density~(fish~m^{2}))) +
ggtitle("0% High") +
theme_classic() +
theme(
axis.title = element_text(size = 14),
axis.text = element_text(size = 12),
strip.text = element_text(size = 14),
legend.position = "none",
axis.title.x = element_blank(),
axis.title.y = element_blank()
)
#iterative piecewise regression:
# Y = b0 + b1visit + b2 (x-x(k)) xk)
# ARD3_C_H_pwr <- ARD_3_C_H %>%
# group_by(days_since_outplanting) %>%
# summarize(density.mean = mean(density))
#
# attach(ARD3_C_H_pwr)
# ARD3_C_H_pwr
# dummyknot = rep(0,length(days_since_outplanting))
#
# dummyknot[days_since_outplanting > 23] = 1
#
# ARD3_C_H_pwr$Xdif = days_since_outplanting - 23
# ARD3_C_H_pwr$DN = dummyknot
# ARD3_C_H_pwr$X = ARD3_C_H_pwr$Xdif * ARD3_C_H_pwr$DN
# ARD3_C_H_pwr
#
# ARD3_C_H_regout <- lm(density.mean ~ days_since_outplanting + X, data = ARD3_C_H_pwr)
# summary(ARD3_C_H_regout)
# Y = b0 + b1visit + b2 (visit-9) xk
#check which values apply where
# density = 2.8595 + 0.08105 * days_since_outplanting - 0.1285 (days_since_outplanting - 25) * xk
# so if days since outplanting is over 25, xk is 1
# if days since outplanting is less than 25 xk is 0
# except R square value is tiny --> maybe I should change the breakpoint?
# 30% High ----------------------------------------------------------------
ARD_3_30_H <- ARD_3_with_0_new_2 %>%
filter(complexity == "High") %>%
filter(treatment == "30%")
ARD_3_30_H_sum <- ARD_3_30_H %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(121))
#plot two linear slopes:
ARD_3_30_H_sum_left <- ARD_3_30_H %>%
filter(days_since_outplanting <= 5) %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(30))
ARD_3_30_H_sum_right <- ARD_3_30_H %>%
filter(days_since_outplanting >= 5) %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(98))
ggplot() +
# facet_grid(complexity~treatment) +
geom_jitter(data = ARD_3_30_H,
aes(x = days_since_outplanting,
y = density,
colour = treatment),
alpha = 0.2,
shape = 2,
size = 1) +
geom_line(data = ARD_3_30_H_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_line(data = ARD_3_30_H_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_errorbar(data = ARD_3_30_H_sum_left,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_errorbar(data = ARD_3_30_H_sum_right,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_point(data = ARD_3_30_H_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_point(data = ARD_3_30_H_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_smooth(data = ARD_3_30_H_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
method = "lm",
# family = "binomial",
# formula = y ~ x + I(x^2),
se = TRUE ) +
geom_smooth(data = ARD_3_30_H_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
method = "lm",
# family = "binomial",
# formula = y ~ x + I(x^2),
se = TRUE ) +
geom_vline(xintercept = 5,
linetype = "dashed",
colour = "black",
size = 0.8,
alpha = 0.3) +
# scale_x_continuous(breaks = c(10, 20, 30, 40, 50, 60)) +
scale_colour_manual(values = c("darkorange1", "goldenrod1", "mediumseagreen", "royalblue")) +
ylim(0,10) +
# xlim(0,20) +
labs(x = expression(Days~Since~Outplanting),
y = expression(Density~(fish~m^{2}))) +
ggtitle("30% High") +
theme_classic() +
theme(
axis.title = element_text(size = 14),
axis.text = element_text(size = 12),
strip.text = element_text(size = 14),
legend.position = "none",
axis.title.x = element_blank(),
axis.title.y = element_blank()
)
#iterative piecewise regression:
# Y = b0 + b1visit + b2 (x-x(k)) xk)
# ARD3_C_H_pwr <- ARD_3_C_H %>%
# group_by(days_since_outplanting) %>%
# summarize(density.mean = mean(density))
#
# attach(ARD3_C_H_pwr)
# ARD3_C_H_pwr
# dummyknot = rep(0,length(days_since_outplanting))
#
# dummyknot[days_since_outplanting > 23] = 1
#
# ARD3_C_H_pwr$Xdif = days_since_outplanting - 23
# ARD3_C_H_pwr$DN = dummyknot
# ARD3_C_H_pwr$X = ARD3_C_H_pwr$Xdif * ARD3_C_H_pwr$DN
# ARD3_C_H_pwr
#
# ARD3_C_H_regout <- lm(density.mean ~ days_since_outplanting + X, data = ARD3_C_H_pwr)
# summary(ARD3_C_H_regout)
# Y = b0 + b1visit + b2 (visit-9) xk
#check which values apply where
# density = 2.8595 + 0.08105 * days_since_outplanting - 0.1285 (days_since_outplanting - 25) * xk
# so if days since outplanting is over 25, xk is 1
# if days since outplanting is less than 25 xk is 0
# except R square value is tiny --> maybe I should change the breakpoint?
# 50% High ----------------------------------------------------------------
ARD_3_50_H <- ARD_3_with_0_new_2 %>%
filter(complexity == "High") %>%
filter(treatment == "50%")
ARD_3_50_H_sum <- ARD_3_50_H %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(128))
#plot two linear slopes:
ARD_3_50_H_sum_left <- ARD_3_50_H %>%
filter(days_since_outplanting <= 3) %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(24))
ARD_3_50_H_sum_right <- ARD_3_50_H %>%
filter(days_since_outplanting >= 3) %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(112))
ggplot() +
# facet_grid(complexity~treatment) +
geom_jitter(data = ARD_3_50_H,
aes(x = days_since_outplanting,
y = density,
colour = treatment),
alpha = 0.2,
shape = 2,
size = 1) +
geom_line(data = ARD_3_50_H_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_line(data = ARD_3_50_H_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_errorbar(data = ARD_3_50_H_sum_left,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_errorbar(data = ARD_3_50_H_sum_right,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_point(data = ARD_3_50_H_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_point(data = ARD_3_50_H_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_smooth(data = ARD_3_50_H_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
method = "lm",
# family = "binomial",
# formula = y ~ x + I(x^2),
se = TRUE ) +
geom_smooth(data = ARD_3_50_H_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
method = "lm",
# family = "binomial",
# formula = y ~ x + I(x^2),
se = TRUE ) +
geom_vline(xintercept = 3,
linetype = "dashed",
colour = "black",
size = 0.8,
alpha = 0.3) +
# scale_x_continuous(breaks = c(10, 20, 30, 40, 50, 60)) +
scale_colour_manual(values = c("goldenrod1", "mediumseagreen", "royalblue")) +
ylim(0,10) +
# xlim(0,20) +
labs(x = expression(Days~Since~Outplanting),
y = expression(Density~(fish~m^{2}))) +
ggtitle("50% High") +
theme_classic() +
theme(
axis.title = element_text(size = 14),
axis.text = element_text(size = 12),
strip.text = element_text(size = 14),
legend.position = "none",
axis.title.x = element_blank(),
axis.title.y = element_blank()
)
#iterative piecewise regression:
# Y = b0 + b1visit + b2 (x-x(k)) xk)
# ARD3_C_H_pwr <- ARD_3_C_H %>%
# group_by(days_since_outplanting) %>%
# summarize(density.mean = mean(density))
#
# attach(ARD3_C_H_pwr)
# ARD3_C_H_pwr
# dummyknot = rep(0,length(days_since_outplanting))
#
# dummyknot[days_since_outplanting > 23] = 1
#
# ARD3_C_H_pwr$Xdif = days_since_outplanting - 23
# ARD3_C_H_pwr$DN = dummyknot
# ARD3_C_H_pwr$X = ARD3_C_H_pwr$Xdif * ARD3_C_H_pwr$DN
# ARD3_C_H_pwr
#
# ARD3_C_H_regout <- lm(density.mean ~ days_since_outplanting + X, data = ARD3_C_H_pwr)
# summary(ARD3_C_H_regout)
# Y = b0 + b1visit + b2 (visit-9) xk
#check which values apply where
# density = 2.8595 + 0.08105 * days_since_outplanting - 0.1285 (days_since_outplanting - 25) * xk
# so if days since outplanting is over 25, xk is 1
# if days since outplanting is less than 25 xk is 0
# except R square value is tiny --> maybe I should change the breakpoint?
# 70% High ----------------------------------------------------------------
ARD_3_70_H <- ARD_3_with_0_new_2 %>%
filter(complexity == "High") %>%
filter(treatment == "70%")
ARD_3_70_H_sum <- ARD_3_70_H %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(121))
#plot two linear slopes:
ARD_3_70_H_sum_left <- ARD_3_70_H %>%
filter(days_since_outplanting <= 4) %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(26))
ARD_3_70_H_sum_right <- ARD_3_70_H %>%
filter(days_since_outplanting >= 4) %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(99))
ggplot() +
# facet_grid(complexity~treatment) +
geom_jitter(data = ARD_3_70_H,
aes(x = days_since_outplanting,
y = density,
colour = treatment),
alpha = 0.2,
shape = 2,
size = 1) +
geom_line(data = ARD_3_70_H_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_line(data = ARD_3_70_H_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_errorbar(data = ARD_3_70_H_sum_left,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_errorbar(data = ARD_3_70_H_sum_right,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_point(data = ARD_3_70_H_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_point(data = ARD_3_70_H_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_smooth(data = ARD_3_70_H_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
method = "lm",
# family = "binomial",
# formula = y ~ x + I(x^2),
se = TRUE ) +
geom_smooth(data = ARD_3_70_H_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
method = "lm",
# family = "binomial",
# formula = y ~ x + I(x^2),
se = TRUE ) +
geom_vline(xintercept = 4,
linetype = "dashed",
colour = "black",
size = 0.8,
alpha = 0.3) +
# scale_x_continuous(breaks = c(10, 20, 30, 40, 50, 60)) +
scale_colour_manual(values = c( "mediumseagreen", "royalblue")) +
ylim(0,10) +
# xlim(0,20) +
labs(x = expression(Days~Since~Outplanting),
y = expression(Density~(fish~m^{2}))) +
ggtitle("70% High") +
theme_classic() +
theme(
axis.title = element_text(size = 14),
axis.text = element_text(size = 12),
strip.text = element_text(size = 14),
legend.position = "none",
axis.title.x = element_blank(),
axis.title.y = element_blank()
)
#iterative piecewise regression:
# Y = b0 + b1visit + b2 (x-x(k)) xk)
# ARD3_C_H_pwr <- ARD_3_C_H %>%
# group_by(days_since_outplanting) %>%
# summarize(density.mean = mean(density))
#
# attach(ARD3_C_H_pwr)
# ARD3_C_H_pwr
# dummyknot = rep(0,length(days_since_outplanting))
#
# dummyknot[days_since_outplanting > 23] = 1
#
# ARD3_C_H_pwr$Xdif = days_since_outplanting - 23
# ARD3_C_H_pwr$DN = dummyknot
# ARD3_C_H_pwr$X = ARD3_C_H_pwr$Xdif * ARD3_C_H_pwr$DN
# ARD3_C_H_pwr
#
# ARD3_C_H_regout <- lm(density.mean ~ days_since_outplanting + X, data = ARD3_C_H_pwr)
# summary(ARD3_C_H_regout)
# Y = b0 + b1visit + b2 (visit-9) xk
#check which values apply where
# density = 2.8595 + 0.08105 * days_since_outplanting - 0.1285 (days_since_outplanting - 25) * xk
# so if days since outplanting is over 25, xk is 1
# if days since outplanting is less than 25 xk is 0
# except R square value is tiny --> maybe I should change the breakpoint?
# 100% High ---------------------------------------------------------------
ARD_3_100_H <- ARD_3_with_0_new_2 %>%
filter(complexity == "High") %>%
filter(treatment == "100%")
ARD_3_100_H_sum <- ARD_3_100_H %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(124))
#plot two linear slopes:
ARD_3_100_H_sum_left <- ARD_3_100_H %>%
filter(days_since_outplanting <= 23) %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(76))
ARD_3_100_H_sum_right <- ARD_3_100_H %>%
filter(days_since_outplanting >= 23) %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(48))
ggplot() +
# facet_grid(complexity~treatment) +
geom_jitter(data = ARD_3_100_H,
aes(x = days_since_outplanting,
y = density,
colour = treatment),
alpha = 0.2,
shape = 2,
size = 1) +
geom_line(data = ARD_3_100_H_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_line(data = ARD_3_100_H_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_errorbar(data = ARD_3_100_H_sum_left,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_errorbar(data = ARD_3_100_H_sum_right,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_point(data = ARD_3_100_H_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_point(data = ARD_3_100_H_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_smooth(data = ARD_3_100_H_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
method = "lm",
# family = "binomial",
# formula = y ~ x + I(x^2),
se = TRUE ) +
geom_smooth(data = ARD_3_100_H_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
method = "lm",
# family = "binomial",
# formula = y ~ x + I(x^2),
se = TRUE ) +
geom_vline(xintercept = 23,
linetype = "dashed",
colour = "black",
size = 0.8,
alpha = 0.3) +
# scale_x_continuous(breaks = c(10, 20, 30, 40, 50, 60)) +
scale_colour_manual(values = c("royalblue")) +
ylim(0,10) +
# xlim(0,20) +
labs(x = expression(Days~Since~Outplanting),
y = expression(Density~(fish~m^{2}))) +
ggtitle("100% High") +
theme_classic() +
theme(
axis.title = element_text(size = 14),
axis.text = element_text(size = 12),
strip.text = element_text(size = 14),
legend.position = "none",
axis.title.x = element_blank(),
axis.title.y = element_blank()
)
#iterative piecewise regression:
# Y = b0 + b1visit + b2 (x-x(k)) xk)
# ARD3_C_H_pwr <- ARD_3_C_H %>%
# group_by(days_since_outplanting) %>%
# summarize(density.mean = mean(density))
#
# attach(ARD3_C_H_pwr)
# ARD3_C_H_pwr
# dummyknot = rep(0,length(days_since_outplanting))
#
# dummyknot[days_since_outplanting > 23] = 1
#
# ARD3_C_H_pwr$Xdif = days_since_outplanting - 23
# ARD3_C_H_pwr$DN = dummyknot
# ARD3_C_H_pwr$X = ARD3_C_H_pwr$Xdif * ARD3_C_H_pwr$DN
# ARD3_C_H_pwr
#
# ARD3_C_H_regout <- lm(density.mean ~ days_since_outplanting + X, data = ARD3_C_H_pwr)
# summary(ARD3_C_H_regout)
# Y = b0 + b1visit + b2 (visit-9) xk
#check which values apply where
# density = 2.8595 + 0.08105 * days_since_outplanting - 0.1285 (days_since_outplanting - 25) * xk
# so if days since outplanting is over 25, xk is 1
# if days since outplanting is less than 25 xk is 0
# except R square value is tiny --> maybe I should change the breakpoint?
# Control Low -------------------------------------------------------------
ARD_3_C_L <- ARD_3_with_0_new_2 %>%
filter(complexity == "Low") %>%
filter(treatment == "control")
ARD_3_C_L_sum <- ARD_3_C_L %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(127))
#plot two linear slopes:
ARD_3_C_L_sum_left <- ARD_3_C_L %>%
filter(days_since_outplanting <= 13) %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(68))
ARD_3_C_L_sum_right <- ARD_3_C_L %>%
filter(days_since_outplanting >= 13) %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(67))
ggplot() +
# facet_grid(complexity~treatment) +
geom_jitter(data = ARD_3_C_L,
aes(x = days_since_outplanting,
y = density,
colour = treatment),
alpha = 0.2,
shape = 2,
size = 1) +
geom_line(data = ARD_3_C_L_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_line(data = ARD_3_C_L_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_errorbar(data = ARD_3_C_L_sum_left,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_errorbar(data = ARD_3_C_L_sum_right,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_point(data = ARD_3_C_L_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_point(data = ARD_3_C_L_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_smooth(data = ARD_3_C_L_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
method = "lm",
# family = "binomial",
# formula = y ~ x + I(x^2),
se = TRUE ) +
geom_smooth(data = ARD_3_C_L_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
method = "lm",
# family = "binomial",
# formula = y ~ x + I(x^2),
se = TRUE ) +
geom_vline(xintercept = 13,
linetype = "dashed",
colour = "black",
size = 0.8,
alpha = 0.3) +
# scale_x_continuous(breaks = c(10, 20, 30, 40, 50, 60)) +
scale_colour_manual(values = c("gray44", "firebrick", "darkorange1", "goldenrod1", "mediumseagreen", "royalblue")) +
ylim(0,10) +
# xlim(0,20) +
labs(x = expression(Days~Since~Outplanting),
y = expression(Density~(fish~m^{2}))) +
ggtitle("Control Low") +
theme_classic() +
theme(
axis.title = element_text(size = 14),
axis.text = element_text(size = 12),
strip.text = element_text(size = 14),
legend.position = "none"
# axis.title.x = element_blank()
)
# 0% Low ------------------------------------------------------------------
ARD_3_0_L <- ARD_3_with_0_new_2 %>%
filter(complexity == "Low") %>%
filter(treatment == "0%")
ARD_3_0_L_sum <- ARD_3_0_L %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(133))
#plot two linear slopes:
ARD_3_0_L_sum_left <- ARD_3_0_L %>%
filter(days_since_outplanting <= 23) %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(86))
ARD_3_0_L_sum_right <- ARD_3_0_L %>%
filter(days_since_outplanting >= 23) %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(55))
ggplot() +
# facet_grid(complexity~treatment) +
geom_jitter(data = ARD_3_0_L,
aes(x = days_since_outplanting,
y = density,
colour = treatment),
alpha = 0.2,
shape = 2,
size = 1) +
geom_line(data = ARD_3_0_L_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_line(data = ARD_3_0_L_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_errorbar(data = ARD_3_0_L_sum_left,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_errorbar(data = ARD_3_0_L_sum_right,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_point(data = ARD_3_0_L_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_point(data = ARD_3_0_L_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_smooth(data = ARD_3_0_L_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
method = "lm",
# family = "binomial",
# formula = y ~ x + I(x^2),
se = TRUE ) +
geom_smooth(data = ARD_3_0_L_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
method = "lm",
# family = "binomial",
# formula = y ~ x + I(x^2),
se = TRUE ) +
geom_vline(xintercept = 23,
linetype = "dashed",
colour = "black",
size = 0.8,
alpha = 0.3) +
# scale_x_continuous(breaks = c(10, 20, 30, 40, 50, 60)) +
scale_colour_manual(values = c("firebrick", "darkorange1", "goldenrod1", "mediumseagreen", "royalblue")) +
ylim(0,10) +
# xlim(0,20) +
labs(x = expression(Days~Since~Outplanting),
y = expression(Density~(fish~m^{2}))) +
ggtitle("0% Low") +
theme_classic() +
theme(
axis.title = element_text(size = 14),
axis.text = element_text(size = 12),
strip.text = element_text(size = 14),
legend.position = "none",
axis.title.y = element_blank()
)
# 30% low -----------------------------------------------------------------
ARD_3_30_L <- ARD_3_with_0_new_2 %>%
filter(complexity == "Low") %>%
filter(treatment == "30%")
ARD_3_30_L_sum <- ARD_3_30_L %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(136))
#plot two linear slopes:
ARD_3_30_L_sum_left <- ARD_3_30_L %>%
filter(days_since_outplanting <= 23) %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(88))
ARD_3_30_L_sum_right <- ARD_3_30_L %>%
filter(days_since_outplanting >= 23) %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(56))
ggplot() +
# facet_grid(complexity~treatment) +
geom_jitter(data = ARD_3_30_L,
aes(x = days_since_outplanting,
y = density,
colour = treatment),
alpha = 0.2,
shape = 2,
size = 1) +
geom_line(data = ARD_3_30_L_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_line(data = ARD_3_30_L_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_errorbar(data = ARD_3_30_L_sum_left,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_errorbar(data = ARD_3_30_L_sum_right,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_point(data = ARD_3_30_L_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_point(data = ARD_3_30_L_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_smooth(data = ARD_3_30_L_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
method = "lm",
# family = "binomial",
# formula = y ~ x + I(x^2),
se = TRUE ) +
geom_smooth(data = ARD_3_30_L_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
method = "lm",
# family = "binomial",
# formula = y ~ x + I(x^2),
se = TRUE ) +
geom_vline(xintercept = 23,
linetype = "dashed",
colour = "black",
size = 0.8,
alpha = 0.3) +
# scale_x_continuous(breaks = c(10, 20, 30, 40, 50, 60)) +
scale_colour_manual(values = c("darkorange1", "goldenrod1", "mediumseagreen", "royalblue")) +
ylim(0,10) +
# xlim(0,20) +
labs(x = expression(Days~Since~Outplanting),
y = expression(Density~(fish~m^{2}))) +
ggtitle("30% low") +
theme_classic() +
theme(
axis.title = element_text(size = 14),
axis.text = element_text(size = 12),
strip.text = element_text(size = 14),
legend.position = "none",
axis.title.y = element_blank()
)
# 50% low -----------------------------------------------------------------
ARD_3_50_L <- ARD_3_with_0_new_2 %>%
filter(complexity == "Low") %>%
filter(treatment == "50%")
ARD_3_50_L_sum <- ARD_3_50_L %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(134))
#plot two linear slopes:
ARD_3_50_L_sum_left <- ARD_3_50_L %>%
filter(days_since_outplanting <= 30) %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(104))
ARD_3_50_L_sum_right <- ARD_3_50_L %>%
filter(days_since_outplanting >= 30) %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(38))
ggplot() +
# facet_grid(complexity~treatment) +
geom_jitter(data = ARD_3_50_L,
aes(x = days_since_outplanting,
y = density,
colour = treatment),
alpha = 0.2,
shape = 2,
size = 1) +
geom_line(data = ARD_3_50_L_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_line(data = ARD_3_50_L_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_errorbar(data = ARD_3_50_L_sum_left,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_errorbar(data = ARD_3_50_L_sum_right,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_point(data = ARD_3_50_L_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_point(data = ARD_3_50_L_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_smooth(data = ARD_3_50_L_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
method = "lm",
# family = "binomial",
# formula = y ~ x + I(x^2),
se = TRUE ) +
geom_smooth(data = ARD_3_50_L_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
method = "lm",
# family = "binomial",
# formula = y ~ x + I(x^2),
se = TRUE ) +
geom_vline(xintercept = 30,
linetype = "dashed",
colour = "black",
size = 0.8,
alpha = 0.3) +
# scale_x_continuous(breaks = c(10, 20, 30, 40, 50, 60)) +
scale_colour_manual(values = c("goldenrod1", "mediumseagreen", "royalblue")) +
ylim(0,10) +
# xlim(0,20) +
labs(x = expression(Days~Since~Outplanting),
y = expression(Density~(fish~m^{2}))) +
ggtitle("50% low") +
theme_classic() +
theme(
axis.title = element_text(size = 14),
axis.text = element_text(size = 12),
strip.text = element_text(size = 14),
legend.position = "none",
axis.title.y = element_blank()
)
# 70% low -----------------------------------------------------------------
ARD_3_70_L <- ARD_3_with_0_new_2 %>%
filter(complexity == "Low") %>%
filter(treatment == "70%")
ARD_3_70_L_sum <- ARD_3_70_L %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(134))
#plot two linear slopes:
ARD_3_70_L_sum_left <- ARD_3_70_L %>%
filter(days_since_outplanting <= 7) %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(46))
ARD_3_70_L_sum_right <- ARD_3_70_L %>%
filter(days_since_outplanting >= 7) %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(94))
ggplot() +
# facet_grid(complexity~treatment) +
geom_jitter(data = ARD_3_70_L,
aes(x = days_since_outplanting,
y = density,
colour = treatment),
alpha = 0.2,
shape = 2,
size = 1) +
geom_line(data = ARD_3_70_L_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_line(data = ARD_3_70_L_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_errorbar(data = ARD_3_70_L_sum_left,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_errorbar(data = ARD_3_70_L_sum_right,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_point(data = ARD_3_70_L_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_point(data = ARD_3_70_L_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_smooth(data = ARD_3_70_L_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
method = "lm",
# family = "binomial",
# formula = y ~ x + I(x^2),
se = TRUE ) +
geom_smooth(data = ARD_3_70_L_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
method = "lm",
# family = "binomial",
# formula = y ~ x + I(x^2),
se = TRUE ) +
geom_vline(xintercept = 7,
linetype = "dashed",
colour = "black",
size = 0.8,
alpha = 0.3) +
# scale_x_continuous(breaks = c(10, 20, 30, 40, 50, 60)) +
scale_colour_manual(values = c("mediumseagreen", "royalblue")) +
ylim(0,10) +
# xlim(0,20) +
labs(x = expression(Days~Since~Outplanting),
y = expression(Density~(fish~m^{2}))) +
ggtitle("70% low") +
theme_classic() +
theme(
axis.title = element_text(size = 14),
axis.text = element_text(size = 12),
strip.text = element_text(size = 14),
legend.position = "none",
axis.title.y = element_blank()
)
# 100% low ----------------------------------------------------------------
ARD_3_100_L <- ARD_3_with_0_new_2 %>%
filter(complexity == "Low") %>%
filter(treatment == "100%")
ARD_3_100_L_sum <- ARD_3_100_L %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(135))
#plot two linear slopes:
ARD_3_100_L_sum_left <- ARD_3_100_L %>%
filter(days_since_outplanting <= 13) %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(71))
ARD_3_100_L_sum_right <- ARD_3_100_L %>%
filter(days_since_outplanting >= 13) %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(72))
ggplot() +
# facet_grid(complexity~treatment) +
geom_jitter(data = ARD_3_100_L,
aes(x = days_since_outplanting,
y = density,
colour = treatment),
alpha = 0.2,
shape = 2,
size = 1) +
geom_line(data = ARD_3_100_L_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_line(data = ARD_3_100_L_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_errorbar(data = ARD_3_100_L_sum_left,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_errorbar(data = ARD_3_100_L_sum_right,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_point(data = ARD_3_100_L_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_point(data = ARD_3_100_L_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_smooth(data = ARD_3_100_L_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
method = "lm",
# family = "binomial",
# formula = y ~ x + I(x^2),
se = TRUE ) +
geom_smooth(data = ARD_3_100_L_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
method = "lm",
# family = "binomial",
# formula = y ~ x + I(x^2),
se = TRUE ) +
geom_vline(xintercept = 13,
linetype = "dashed",
colour = "black",
size = 0.8,
alpha = 0.3) +
# scale_x_continuous(breaks = c(10, 20, 30, 40, 50, 60)) +
scale_colour_manual(values = c("royalblue")) +
ylim(0,10) +
# xlim(0,20) +
labs(x = expression(Days~Since~Outplanting),
y = expression(Density~(fish~m^{2}))) +
ggtitle("100% low") +
theme_classic() +
theme(
axis.title = element_text(size = 14),
axis.text = element_text(size = 12),
strip.text = element_text(size = 14),
legend.position = "none",
axis.title.y = element_blank()
)
# ARD 4-6 -----------------------------------------------------------------
ARD_4to6_with_0_new_2 <- read_csv("Artificial coral experiment/data/Art Recruit data/filter 0 values/ARD_4to6/ARD_4to6_with_0_new_2.csv")
#full plot
ARD_4to6_with_0_new_2<- ARD_4to6_with_0_new_2 %>%
mutate(treatment = factor(treatment, levels = c("control", "100A", "30L70A", "50L50A", "70L30A", "100L"),
labels = c("control", "0%", "30%", "50%", "70%", "100%"))) %>%
distinct(plot_grid_visit, .keep_all = TRUE)
ARD_4to6_with_0_new__2sum <- ARD_4to6_with_0_new_2 %>%
group_by(complexity, treatment, days_since_outplanting) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(1539))
ggplot() +
facet_grid(complexity~treatment) +
geom_jitter(data = ARD_4to6_with_0_new_2,
aes(x = days_since_outplanting,
y = density,
colour = treatment),
alpha = 0.4,
shape = 2,
size = 1) +
geom_line(data = ARD_4to6_with_0_new__2sum,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_errorbar(data = ARD_4to6_with_0_new__2sum,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_point(data = ARD_4to6_with_0_new__2sum,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_smooth(data = ARD_4to6_with_0_new__2sum,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
# method = "glm",
# family = "binomial",
# formula = y ~ x + I(x^2),
se = TRUE ) +
# scale_x_continuous(breaks = c(10, 20, 30, 40, 50, 60)) +
scale_colour_manual(values = c("gray44", "firebrick", "darkorange1", "goldenrod1", "mediumseagreen", "royalblue")) +
ylim(0,11) +
# xlim(0,20) +
labs(x = expression(Days~Since~Outplanting),
y = expression(Density~(fish~m^{2}))) +
ggtitle("mean fish recruit density over time - june 5 start (4-6cm)") +
theme_classic() +
theme(
axis.title = element_text(size = 14),
axis.text = element_text(size = 12),
strip.text = element_text(size = 14),
legend.position = "none",
)
# ARD_3 and ARD4to6 together ----------------------------------------------
ggplot() +
facet_grid(complexity~treatment) +
# geom_jitter(data = ARD_3_with_0_new_2,
# aes(x = days_since_outplanting,
# y = density,
# colour = treatment),
# alpha = 0.2,
# shape = 2,
# size = 1) +
geom_line(data = ARD_3_with_0_new__2sum,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_errorbar(data = ARD_3_with_0_new__2sum,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_point(data = ARD_3_with_0_new__2sum,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_smooth(data = ARD_3_with_0_new__2sum,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
# method = "glm",
# family = "binomial",
# formula = y ~ x + I(x^2),
se = FALSE) +
# geom_jitter(data = ARD_4to6_with_0_new_2,
# aes(x = days_since_outplanting,
# y = density,
# colour = treatment),
# alpha = 0.4,
# # shape = 2,
# size = 1) +
geom_line(data = ARD_4to6_with_0_new__2sum,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.3) +
geom_errorbar(data = ARD_4to6_with_0_new__2sum,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_point(data = ARD_4to6_with_0_new__2sum,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.5) +
# shape = 17) +
geom_line(data = ARD_4to6_with_0_new__2sum,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
stat = "smooth",
size = 2,
alpha = 0.5,
# linetype = 5,
method = "loess",
# # family = "binomial",
# # formula = y ~ x + I(x^2),
se = FALSE ) +
# scale_x_continuous(breaks = c(10, 20, 30, 40, 50, 60)) +
scale_colour_manual(values = c("gray44", "firebrick", "darkorange1", "goldenrod1", "mediumseagreen", "royalblue")) +
# ylim(0,11) +
# xlim(0,20) +
labs(x = expression(Days~Since~Outplanting),
y = expression(Density~(fish~m^{2}))) +
ggtitle("mean fish recruit density over time - june 5 start (3cm and 4-6cm)") +
theme_classic() +
theme(
axis.title = element_text(size = 14),
axis.text = element_text(size = 12),
strip.text = element_text(size = 14),
legend.position = "none",
)
# ARD 4to6 ----------------------------------------------------------------
# Control High ------------------------------------------------------------
ARD_4to6_C_H <- ARD_4to6_with_0_new_2 %>%
filter(complexity == "High") %>%
filter(treatment == "control")
ARD_4to6_C_H_sum <- ARD_4to6_C_H %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(120))
#plot two linear slopes:
ARD_4to6_C_H_sum_left <- ARD_4to6_C_H %>%
filter(days_since_outplanting <= 23) %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(73))
ARD_4to6_C_H_sum_right <- ARD_4to6_C_H %>%
filter(days_since_outplanting >= 23) %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(54))
ggplot() +
# facet_grid(complexity~treatment) +
geom_jitter(data = ARD_4to6_C_H,
aes(x = days_since_outplanting,
y = density,
colour = treatment),
alpha = 0.2,
shape = 2,
size = 1) +
geom_line(data = ARD_4to6_C_H_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_line(data = ARD_4to6_C_H_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_errorbar(data = ARD_4to6_C_H_sum_left,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_errorbar(data = ARD_4to6_C_H_sum_right,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_point(data = ARD_4to6_C_H_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_point(data = ARD_4to6_C_H_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_smooth(data = ARD_4to6_C_H_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
method = "lm",
# family = "binomial",
# formula = y ~ x + I(x^2),
se = TRUE ) +
geom_smooth(data = ARD_4to6_C_H_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
method = "lm",
# family = "binomial",
# formula = y ~ x + I(x^2),
se = TRUE ) +
geom_vline(xintercept = 23,
linetype = "dashed",
colour = "black",
size = 0.8,
alpha = 0.3) +
# scale_x_continuous(breaks = c(10, 20, 30, 40, 50, 60)) +
scale_colour_manual(values = c("gray44", "firebrick", "darkorange1", "goldenrod1", "mediumseagreen", "royalblue")) +
ylim(0,10) +
# xlim(0,20) +
labs(x = expression(Days~Since~Outplanting),
y = expression(Density~(fish~m^{2}))) +
ggtitle("Control High") +
theme_classic() +
theme(
axis.title = element_text(size = 14),
axis.text = element_text(size = 12),
strip.text = element_text(size = 14),
legend.position = "none",
axis.title.x = element_blank()
)
# 0% High -----------------------------------------------------------------
ARD_4to6_0_H <- ARD_4to6_with_0_new_2 %>%
filter(complexity == "High") %>%
filter(treatment == "0%")
# ARD_4to6_0_H_sum <- ARD_4to6_0_H %>%
# select(treatment, complexity, days_since_outplanting, density) %>%
# group_by(days_since_outplanting, treatment, complexity) %>%
# summarize(density.mean = mean(density), density.sd = sd(density)) %>%
# mutate(density.se = density.sd/sqrt(120))
#plot two linear slopes:
ARD_4to6_0_H_sum_left <- ARD_4to6_0_H %>%
filter(days_since_outplanting <= 30) %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(94))
ARD_4to6_0_H_sum_right <- ARD_4to6_0_H %>%
filter(days_since_outplanting >= 30) %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(40))
ggplot() +
# facet_grid(complexity~treatment) +
geom_jitter(data = ARD_4to6_0_H,
aes(x = days_since_outplanting,
y = density,
colour = treatment),
alpha = 0.2,
shape = 2,
size = 1) +
geom_line(data = ARD_4to6_0_H_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_line(data = ARD_4to6_0_H_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_errorbar(data = ARD_4to6_0_H_sum_left,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_errorbar(data = ARD_4to6_0_H_sum_right,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_point(data = ARD_4to6_0_H_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_point(data = ARD_4to6_0_H_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_smooth(data = ARD_4to6_0_H_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
method = "lm",
# family = "binomial",
# formula = y ~ x + I(x^2),
se = TRUE ) +
geom_smooth(data = ARD_4to6_0_H_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
method = "lm",
# family = "binomial",
# formula = y ~ x + I(x^2),
se = TRUE ) +
geom_vline(xintercept = 30,
linetype = "dashed",
colour = "black",
size = 0.8,
alpha = 0.3) +
# scale_x_continuous(breaks = c(10, 20, 30, 40, 50, 60)) +
scale_colour_manual(values = c("firebrick", "darkorange1", "goldenrod1", "mediumseagreen", "royalblue")) +
ylim(0,10) +
# xlim(0,20) +
labs(x = expression(Days~Since~Outplanting),
y = expression(Density~(fish~m^{2}))) +
ggtitle("0% High") +
theme_classic() +
theme(
axis.title = element_text(size = 14),
axis.text = element_text(size = 12),
strip.text = element_text(size = 14),
legend.position = "none",
axis.title.x = element_blank(),
axis.title.y = element_blank()
)
# 30% High ----------------------------------------------------------------
ARD_4to6_30_H <- ARD_4to6_with_0_new_2 %>%
filter(complexity == "High") %>%
filter(treatment == "30%")
# ARD_4to6_0_H_sum <- ARD_4to6_0_H %>%
# select(treatment, complexity, days_since_outplanting, density) %>%
# group_by(days_since_outplanting, treatment, complexity) %>%
# summarize(density.mean = mean(density), density.sd = sd(density)) %>%
# mutate(density.se = density.sd/sqrt(120))
#plot two linear slopes:
ARD_4to6_30_H_sum_left <- ARD_4to6_30_H %>%
filter(days_since_outplanting <= 5) %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(30))
ARD_4to6_30_H_sum_right <- ARD_4to6_30_H %>%
filter(days_since_outplanting >= 5) %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(98))
ggplot() +
# facet_grid(complexity~treatment) +
geom_jitter(data = ARD_4to6_30_H,
aes(x = days_since_outplanting,
y = density,
colour = treatment),
alpha = 0.2,
shape = 2,
size = 1) +
geom_line(data = ARD_4to6_30_H_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_line(data = ARD_4to6_30_H_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_errorbar(data = ARD_4to6_30_H_sum_left,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_errorbar(data = ARD_4to6_30_H_sum_right,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_point(data = ARD_4to6_30_H_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_point(data = ARD_4to6_30_H_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_smooth(data = ARD_4to6_30_H_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
method = "lm",
# family = "binomial",
# formula = y ~ x + I(x^2),
se = TRUE ) +
geom_smooth(data = ARD_4to6_30_H_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
method = "lm",
# family = "binomial",
# formula = y ~ x + I(x^2),
se = TRUE ) +
geom_vline(xintercept = 5,
linetype = "dashed",
colour = "black",
size = 0.8,
alpha = 0.3) +
# scale_x_continuous(breaks = c(10, 20, 30, 40, 50, 60)) +
scale_colour_manual(values = c("darkorange1", "goldenrod1", "mediumseagreen", "royalblue")) +
ylim(0,10) +
# xlim(0,20) +
labs(x = expression(Days~Since~Outplanting),
y = expression(Density~(fish~m^{2}))) +
ggtitle("30% High") +
theme_classic() +
theme(
axis.title = element_text(size = 14),
axis.text = element_text(size = 12),
strip.text = element_text(size = 14),
legend.position = "none",
axis.title.x = element_blank(),
axis.title.y = element_blank()
)
# 50% High ----------------------------------------------------------------
ARD_4to6_50_H <- ARD_4to6_with_0_new_2 %>%
filter(complexity == "High") %>%
filter(treatment == "50%")
# ARD_4to6_0_H_sum <- ARD_4to6_0_H %>%
# select(treatment, complexity, days_since_outplanting, density) %>%
# group_by(days_since_outplanting, treatment, complexity) %>%
# summarize(density.mean = mean(density), density.sd = sd(density)) %>%
# mutate(density.se = density.sd/sqrt(120))
#plot two linear slopes:
ARD_4to6_50_H_sum_left <- ARD_4to6_50_H %>%
filter(days_since_outplanting <= 3) %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(24))
ARD_4to6_50_H_sum_right <- ARD_4to6_50_H %>%
filter(days_since_outplanting >= 3) %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(112))
ggplot() +
# facet_grid(complexity~treatment) +
geom_jitter(data = ARD_4to6_50_H,
aes(x = days_since_outplanting,
y = density,
colour = treatment),
alpha = 0.2,
shape = 2,
size = 1) +
geom_line(data = ARD_4to6_50_H_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_line(data = ARD_4to6_50_H_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_errorbar(data = ARD_4to6_50_H_sum_left,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_errorbar(data = ARD_4to6_50_H_sum_right,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_point(data = ARD_4to6_50_H_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_point(data = ARD_4to6_50_H_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_smooth(data = ARD_4to6_50_H_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
method = "lm",
# family = "binomial",
# formula = y ~ x + I(x^2),
se = TRUE ) +
geom_smooth(data = ARD_4to6_50_H_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
method = "lm",
# family = "binomial",
# formula = y ~ x + I(x^2),
se = TRUE ) +
geom_vline(xintercept = 3,
linetype = "dashed",
colour = "black",
size = 0.8,
alpha = 0.3) +
# scale_x_continuous(breaks = c(10, 20, 30, 40, 50, 60)) +
scale_colour_manual(values = c("goldenrod1", "mediumseagreen", "royalblue")) +
ylim(0,10) +
# xlim(0,20) +
labs(x = expression(Days~Since~Outplanting),
y = expression(Density~(fish~m^{2}))) +
ggtitle("50% High") +
theme_classic() +
theme(
axis.title = element_text(size = 14),
axis.text = element_text(size = 12),
strip.text = element_text(size = 14),
legend.position = "none",
axis.title.x = element_blank(),
axis.title.y = element_blank()
)
# 70% High ----------------------------------------------------------------
ARD_4to6_70_H <- ARD_4to6_with_0_new_2 %>%
filter(complexity == "High") %>%
filter(treatment == "70%")
# ARD_4to6_0_H_sum <- ARD_4to6_0_H %>%
# select(treatment, complexity, days_since_outplanting, density) %>%
# group_by(days_since_outplanting, treatment, complexity) %>%
# summarize(density.mean = mean(density), density.sd = sd(density)) %>%
# mutate(density.se = density.sd/sqrt(120))
#plot two linear slopes:
ARD_4to6_70_H_sum_left <- ARD_4to6_70_H %>%
filter(days_since_outplanting <= 4) %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(26))
ARD_4to6_70_H_sum_right <- ARD_4to6_70_H %>%
filter(days_since_outplanting >= 4) %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(99))
ggplot() +
# facet_grid(complexity~treatment) +
geom_jitter(data = ARD_4to6_70_H,
aes(x = days_since_outplanting,
y = density,
colour = treatment),
alpha = 0.2,
shape = 2,
size = 1) +
geom_line(data = ARD_4to6_70_H_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_line(data = ARD_4to6_70_H_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_errorbar(data = ARD_4to6_70_H_sum_left,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_errorbar(data = ARD_4to6_70_H_sum_right,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_point(data = ARD_4to6_70_H_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_point(data = ARD_4to6_70_H_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_smooth(data = ARD_4to6_70_H_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
method = "lm",
# family = "binomial",
# formula = y ~ x + I(x^2),
se = TRUE ) +
geom_smooth(data = ARD_4to6_70_H_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
method = "lm",
# family = "binomial",
# formula = y ~ x + I(x^2),
se = TRUE ) +
geom_vline(xintercept = 4,
linetype = "dashed",
colour = "black",
size = 0.8,
alpha = 0.3) +
# scale_x_continuous(breaks = c(10, 20, 30, 40, 50, 60)) +
scale_colour_manual(values = c( "mediumseagreen", "royalblue")) +
ylim(0,10) +
# xlim(0,20) +
labs(x = expression(Days~Since~Outplanting),
y = expression(Density~(fish~m^{2}))) +
ggtitle("70% High") +
theme_classic() +
theme(
axis.title = element_text(size = 14),
axis.text = element_text(size = 12),
strip.text = element_text(size = 14),
legend.position = "none",
axis.title.x = element_blank(),
axis.title.y = element_blank()
)
# 100% High ---------------------------------------------------------------
ARD_4to6_100_H <- ARD_4to6_with_0_new_2 %>%
filter(complexity == "High") %>%
filter(treatment == "100%")
ARD_4to6_100_H_sum <- ARD_4to6_100_H %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(124))
#plot two linear slopes:
ARD_4to6_100_H_sum_left <- ARD_4to6_100_H %>%
filter(days_since_outplanting <= 23) %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(76))
ARD_4to6_100_H_sum_right <- ARD_4to6_100_H %>%
filter(days_since_outplanting >= 23) %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(48))
ggplot() +
# facet_grid(complexity~treatment) +
geom_jitter(data = ARD_4to6_100_H,
aes(x = days_since_outplanting,
y = density,
colour = treatment),
alpha = 0.2,
shape = 2,
size = 1) +
geom_line(data = ARD_4to6_100_H_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_line(data = ARD_4to6_100_H_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_errorbar(data = ARD_4to6_100_H_sum_left,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_errorbar(data = ARD_4to6_100_H_sum_right,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_point(data = ARD_4to6_100_H_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_point(data = ARD_4to6_100_H_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_smooth(data = ARD_4to6_100_H_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
method = "lm",
# family = "binomial",
# formula = y ~ x + I(x^2),
se = TRUE ) +
geom_smooth(data = ARD_4to6_100_H_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
method = "lm",
# family = "binomial",
# formula = y ~ x + I(x^2),
se = TRUE ) +
geom_vline(xintercept = 23,
linetype = "dashed",
colour = "black",
size = 0.8,
alpha = 0.3) +
# scale_x_continuous(breaks = c(10, 20, 30, 40, 50, 60)) +
scale_colour_manual(values = c("royalblue")) +
ylim(0,10) +
# xlim(0,20) +
labs(x = expression(Days~Since~Outplanting),
y = expression(Density~(fish~m^{2}))) +
ggtitle("100% High") +
theme_classic() +
theme(
axis.title = element_text(size = 14),
axis.text = element_text(size = 12),
strip.text = element_text(size = 14),
legend.position = "none",
axis.title.x = element_blank(),
axis.title.y = element_blank()
)
# Control Low -------------------------------------------------------------
ARD_4to6_C_L <- ARD_4to6_with_0_new_2 %>%
filter(complexity == "Low") %>%
filter(treatment == "control")
ARD_4to6_C_L_sum <- ARD_4to6_C_L %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(127))
#plot two linear slopes:
ARD_4to6_C_L_sum_left <- ARD_4to6_C_L %>%
filter(days_since_outplanting <= 13) %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(68))
ARD_4to6_C_L_sum_right <- ARD_4to6_C_L %>%
filter(days_since_outplanting >= 13) %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(67))
ggplot() +
# facet_grid(complexity~treatment) +
geom_jitter(data = ARD_4to6_C_L,
aes(x = days_since_outplanting,
y = density,
colour = treatment),
alpha = 0.2,
shape = 2,
size = 1) +
geom_line(data = ARD_4to6_C_L_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_line(data = ARD_4to6_C_L_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_errorbar(data = ARD_4to6_C_L_sum_left,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_errorbar(data = ARD_4to6_C_L_sum_right,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_point(data = ARD_4to6_C_L_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_point(data = ARD_4to6_C_L_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_smooth(data = ARD_4to6_C_L_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
method = "lm",
# family = "binomial",
# formula = y ~ x + I(x^2),
se = TRUE ) +
geom_smooth(data = ARD_4to6_C_L_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
method = "lm",
# family = "binomial",
# formula = y ~ x + I(x^2),
se = TRUE ) +
geom_vline(xintercept = 13,
linetype = "dashed",
colour = "black",
size = 0.8,
alpha = 0.3) +
# scale_x_continuous(breaks = c(10, 20, 30, 40, 50, 60)) +
scale_colour_manual(values = c("gray44", "firebrick", "darkorange1", "goldenrod1", "mediumseagreen", "royalblue")) +
ylim(0,10) +
# xlim(0,20) +
labs(x = expression(Days~Since~Outplanting),
y = expression(Density~(fish~m^{2}))) +
ggtitle("Control Low") +
theme_classic() +
theme(
axis.title = element_text(size = 14),
axis.text = element_text(size = 12),
strip.text = element_text(size = 14),
legend.position = "none"
# axis.title.x = element_blank()
)
# 0% Low ------------------------------------------------------------------
ARD_4to6_0_L <- ARD_4to6_with_0_new_2 %>%
filter(complexity == "Low") %>%
filter(treatment == "0%")
ARD_4to6_0_L_sum <- ARD_4to6_0_L %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(133))
#plot two linear slopes:
ARD_4to6_0_L_sum_left <- ARD_4to6_0_L %>%
filter(days_since_outplanting <= 23) %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(86))
ARD_4to6_0_L_sum_right <- ARD_4to6_0_L %>%
filter(days_since_outplanting >= 23) %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(55))
ggplot() +
# facet_grid(complexity~treatment) +
geom_jitter(data = ARD_4to6_0_L,
aes(x = days_since_outplanting,
y = density,
colour = treatment),
alpha = 0.2,
shape = 2,
size = 1) +
geom_line(data = ARD_4to6_0_L_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_line(data = ARD_4to6_0_L_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_errorbar(data = ARD_4to6_0_L_sum_left,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_errorbar(data = ARD_4to6_0_L_sum_right,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_point(data = ARD_4to6_0_L_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_point(data = ARD_4to6_0_L_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_smooth(data = ARD_4to6_0_L_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
method = "lm",
# family = "binomial",
# formula = y ~ x + I(x^2),
se = TRUE ) +
geom_smooth(data = ARD_4to6_0_L_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
method = "lm",
# family = "binomial",
# formula = y ~ x + I(x^2),
se = TRUE ) +
geom_vline(xintercept = 23,
linetype = "dashed",
colour = "black",
size = 0.8,
alpha = 0.3) +
# scale_x_continuous(breaks = c(10, 20, 30, 40, 50, 60)) +
scale_colour_manual(values = c("firebrick", "darkorange1", "goldenrod1", "mediumseagreen", "royalblue")) +
ylim(0,10) +
# xlim(0,20) +
labs(x = expression(Days~Since~Outplanting),
y = expression(Density~(fish~m^{2}))) +
ggtitle("0% Low") +
theme_classic() +
theme(
axis.title = element_text(size = 14),
axis.text = element_text(size = 12),
strip.text = element_text(size = 14),
legend.position = "none",
axis.title.y = element_blank()
)
# 30% low -----------------------------------------------------------------
ARD_4to6_30_L <- ARD_4to6_with_0_new_2 %>%
filter(complexity == "Low") %>%
filter(treatment == "30%")
ARD_4to6_30_L_sum <- ARD_4to6_30_L %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(136))
#plot two linear slopes:
ARD_4to6_30_L_sum_left <- ARD_4to6_30_L %>%
filter(days_since_outplanting <= 23) %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(88))
ARD_4to6_30_L_sum_right <- ARD_4to6_30_L %>%
filter(days_since_outplanting >= 23) %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(56))
ggplot() +
# facet_grid(complexity~treatment) +
geom_jitter(data = ARD_4to6_30_L,
aes(x = days_since_outplanting,
y = density,
colour = treatment),
alpha = 0.2,
shape = 2,
size = 1) +
geom_line(data = ARD_4to6_30_L_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_line(data = ARD_4to6_30_L_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_errorbar(data = ARD_4to6_30_L_sum_left,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_errorbar(data = ARD_4to6_30_L_sum_right,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_point(data = ARD_4to6_30_L_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_point(data = ARD_4to6_30_L_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_smooth(data = ARD_4to6_30_L_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
method = "lm",
# family = "binomial",
# formula = y ~ x + I(x^2),
se = TRUE ) +
geom_smooth(data = ARD_4to6_30_L_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
method = "lm",
# family = "binomial",
# formula = y ~ x + I(x^2),
se = TRUE ) +
geom_vline(xintercept = 23,
linetype = "dashed",
colour = "black",
size = 0.8,
alpha = 0.3) +
# scale_x_continuous(breaks = c(10, 20, 30, 40, 50, 60)) +
scale_colour_manual(values = c("darkorange1", "goldenrod1", "mediumseagreen", "royalblue")) +
ylim(0,10) +
# xlim(0,20) +
labs(x = expression(Days~Since~Outplanting),
y = expression(Density~(fish~m^{2}))) +
ggtitle("30% low") +
theme_classic() +
theme(
axis.title = element_text(size = 14),
axis.text = element_text(size = 12),
strip.text = element_text(size = 14),
legend.position = "none",
axis.title.y = element_blank()
)
# 50% low -----------------------------------------------------------------
ARD_4to6_50_L <- ARD_4to6_with_0_new_2 %>%
filter(complexity == "Low") %>%
filter(treatment == "50%")
ARD_4to6_50_L_sum <- ARD_4to6_50_L %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(134))
#plot two linear slopes:
ARD_4to6_50_L_sum_left <- ARD_4to6_50_L %>%
filter(days_since_outplanting <= 30) %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(104))
ARD_4to6_50_L_sum_right <- ARD_4to6_50_L %>%
filter(days_since_outplanting >= 30) %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(38))
ggplot() +
# facet_grid(complexity~treatment) +
geom_jitter(data = ARD_4to6_50_L,
aes(x = days_since_outplanting,
y = density,
colour = treatment),
alpha = 0.2,
shape = 2,
size = 1) +
geom_line(data = ARD_4to6_50_L_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_line(data = ARD_4to6_50_L_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_errorbar(data = ARD_4to6_50_L_sum_left,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_errorbar(data = ARD_4to6_50_L_sum_right,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_point(data = ARD_4to6_50_L_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_point(data = ARD_4to6_50_L_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_smooth(data = ARD_4to6_50_L_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
method = "lm",
# family = "binomial",
# formula = y ~ x + I(x^2),
se = TRUE ) +
geom_smooth(data = ARD_4to6_50_L_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
method = "lm",
# family = "binomial",
# formula = y ~ x + I(x^2),
se = TRUE ) +
geom_vline(xintercept = 30,
linetype = "dashed",
colour = "black",
size = 0.8,
alpha = 0.3) +
# scale_x_continuous(breaks = c(10, 20, 30, 40, 50, 60)) +
scale_colour_manual(values = c("goldenrod1", "mediumseagreen", "royalblue")) +
ylim(0,10) +
# xlim(0,20) +
labs(x = expression(Days~Since~Outplanting),
y = expression(Density~(fish~m^{2}))) +
ggtitle("50% low") +
theme_classic() +
theme(
axis.title = element_text(size = 14),
axis.text = element_text(size = 12),
strip.text = element_text(size = 14),
legend.position = "none",
axis.title.y = element_blank()
)
# 70% low -----------------------------------------------------------------
ARD_4to6_70_L <- ARD_4to6_with_0_new_2 %>%
filter(complexity == "Low") %>%
filter(treatment == "70%")
ARD_4to6_70_L_sum <- ARD_4to6_70_L %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(134))
#plot two linear slopes:
ARD_4to6_70_L_sum_left <- ARD_4to6_70_L %>%
filter(days_since_outplanting <= 7) %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(46))
ARD_4to6_70_L_sum_right <- ARD_4to6_70_L %>%
filter(days_since_outplanting >= 7) %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(94))
ggplot() +
# facet_grid(complexity~treatment) +
geom_jitter(data = ARD_4to6_70_L,
aes(x = days_since_outplanting,
y = density,
colour = treatment),
alpha = 0.2,
shape = 2,
size = 1) +
geom_line(data = ARD_4to6_70_L_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_line(data = ARD_4to6_70_L_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_errorbar(data = ARD_4to6_70_L_sum_left,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_errorbar(data = ARD_4to6_70_L_sum_right,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_point(data = ARD_4to6_70_L_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_point(data = ARD_4to6_70_L_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_smooth(data = ARD_4to6_70_L_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
method = "lm",
# family = "binomial",
# formula = y ~ x + I(x^2),
se = TRUE ) +
geom_smooth(data = ARD_4to6_70_L_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
method = "lm",
# family = "binomial",
# formula = y ~ x + I(x^2),
se = TRUE ) +
geom_vline(xintercept = 7,
linetype = "dashed",
colour = "black",
size = 0.8,
alpha = 0.3) +
# scale_x_continuous(breaks = c(10, 20, 30, 40, 50, 60)) +
scale_colour_manual(values = c("mediumseagreen", "royalblue")) +
ylim(0,10) +
# xlim(0,20) +
labs(x = expression(Days~Since~Outplanting),
y = expression(Density~(fish~m^{2}))) +
ggtitle("70% low") +
theme_classic() +
theme(
axis.title = element_text(size = 14),
axis.text = element_text(size = 12),
strip.text = element_text(size = 14),
legend.position = "none",
axis.title.y = element_blank()
)
# 100% low ----------------------------------------------------------------
ARD_4to6_100_L <- ARD_4to6_with_0_new_2 %>%
filter(complexity == "Low") %>%
filter(treatment == "100%")
ARD_4to6_100_L_sum <- ARD_4to6_100_L %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(135))
#plot two linear slopes:
ARD_4to6_100_L_sum_left <- ARD_4to6_100_L %>%
filter(days_since_outplanting <= 13) %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(71))
ARD_4to6_100_L_sum_right <- ARD_4to6_100_L %>%
filter(days_since_outplanting >= 13) %>%
select(treatment, complexity, days_since_outplanting, density) %>%
group_by(days_since_outplanting, treatment, complexity) %>%
summarize(density.mean = mean(density), density.sd = sd(density)) %>%
mutate(density.se = density.sd/sqrt(72))
ggplot() +
# facet_grid(complexity~treatment) +
geom_jitter(data = ARD_4to6_100_L,
aes(x = days_since_outplanting,
y = density,
colour = treatment),
alpha = 0.2,
shape = 2,
size = 1) +
geom_line(data = ARD_4to6_100_L_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_line(data = ARD_4to6_100_L_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 1.5,
alpha = 0.5) +
geom_errorbar(data = ARD_4to6_100_L_sum_left,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_errorbar(data = ARD_4to6_100_L_sum_right,
aes(x = days_since_outplanting,
ymin = density.mean - density.se,
ymax = density.mean + density.se,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
width = 0.3,
size = 0.5) +
geom_point(data = ARD_4to6_100_L_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_point(data = ARD_4to6_100_L_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
position = position_dodge(0.2),
size = 3,
alpha = 0.7,
shape = 17) +
geom_smooth(data = ARD_4to6_100_L_sum_left,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
method = "lm",
# family = "binomial",
# formula = y ~ x + I(x^2),
se = TRUE ) +
geom_smooth(data = ARD_4to6_100_L_sum_right,
aes(x = days_since_outplanting,
y = density.mean,
colour = treatment,
group = treatment),
size = 2,
method = "lm",
# family = "binomial",
# formula = y ~ x + I(x^2),
se = TRUE ) +
geom_vline(xintercept = 13,
linetype = "dashed",
colour = "black",
size = 0.8,
alpha = 0.3) +
# scale_x_continuous(breaks = c(10, 20, 30, 40, 50, 60)) +
scale_colour_manual(values = c("royalblue")) +
ylim(0,10) +
# xlim(0,20) +
labs(x = expression(Days~Since~Outplanting),
y = expression(Density~(fish~m^{2}))) +
ggtitle("100% low") +
theme_classic() +
theme(
axis.title = element_text(size = 14),
axis.text = element_text(size = 12),
strip.text = element_text(size = 14),
legend.position = "none",
axis.title.y = element_blank()
)
|
2df1e2218a14ad593be0f2623b591ca92acd7c71 | decfe1249ce71079bf978e977a578a7cd4221837 | /man/annotate_variants.Rd | c53f49e352f23d5822453e4e164adc0f129a3e2b | [] | no_license | tubuliferous/bedanno | 817ffb2f5350122f4cd4eab7745f40469ec6e7aa | 0c37c4516313c7b69b38cf1ff0070639a9a7239f | refs/heads/master | 2021-01-19T03:00:21.568970 | 2016-12-12T21:29:29 | 2016-12-12T21:29:29 | 43,706,348 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 574 | rd | annotate_variants.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main_functions.R
\name{annotate_variants}
\alias{annotate_variants}
\title{Annotate a data.table with binary annotations.}
\usage{
annotate_variants(bed_dir_path, variant_path, cores = 1)
}
\arguments{
\item{bed_dir_path}{A character.}
\item{variant_path}{A data.table.}
\item{cores}{A numeric.}
}
\value{
Annotated data.frame.
}
\description{
Annotate a data.table with binary annotations.
}
\seealso{
Other annotation functions with NO intermediate file output: \code{\link{get_anno_col}}
}
|
9bdb0aecacb7c87b49c99fe26ce521294b80b4c8 | 9d78022b44892e6b14045a40eeb4421ac8b69903 | /code/baseline/create-baseline.R | 6ba75e2a458050aeca2c128844993b383c69f85a | [
"MIT"
] | permissive | seabbs/covid19-forecast-hub-europe | a143811b368cd787a9c8d94d413fc8f2d04e6108 | 3c0a4113a003a4c91c4cfd820b61693bd52fe867 | refs/heads/main | 2023-09-02T17:46:25.553455 | 2021-10-19T12:41:05 | 2021-10-19T12:41:05 | 345,817,566 | 0 | 0 | NOASSERTION | 2021-03-08T22:52:27 | 2021-03-08T22:52:26 | null | UTF-8 | R | false | false | 1,140 | r | create-baseline.R | library(dplyr)
library(purrr)
library(lubridate)
library(yaml)
library(covidModels)
library(here)
library(readr)
library(EuroForecastHub)
model_name <- "EuroCOVIDhub-baseline"
model_folder <- here("data-processed", model_name)
if (!dir.exists(model_folder)) {
dir.create(model_folder)
}
hub_quantiles <- get_hub_config("forecast_type")[["quantiles"]]
hub_horizon <- max(get_hub_config("horizon")[["values"]])
hub_targets <- get_hub_config("target_variables")
forecast_date <- today()
raw_truth <- covidHubUtils::load_truth(
truth_source = "JHU",
temporal_resolution = "weekly",
truth_end_date = forecast_date - 1,
hub = "ECDC"
)
baseline_forecast <- raw_truth %>%
filter(!is.na(value)) %>%
group_by(location, target_variable) %>%
group_map(
~ full_join(
.y,
build_baseline(.x$value, quantiles = hub_quantiles, horizon = hub_horizon),
by = character()
)
) %>%
bind_rows() %>%
filter(type %in% substr(hub_targets, 1, 3)) %>%
mutate(type = "quantile")
format_ensemble(baseline_forecast, forecast_date) %>%
write_csv(paste0(model_folder, "/", forecast_date, "-", model_name, ".csv"))
|
9689738556a034c02d89b141a3faf401c7f4f527 | 1ec9989c095b9399e2b5acb9d9524cb8eb7ac6be | /heritability/ACE_wcovariates_Saturated_nested.R | 8a6f4d34fe46d6339a40fffead2db6b76cbacdc6 | [] | no_license | haitaoge/Heritability | a4c71360f6207c466b8ff84a8233d3df867d3ae9 | 80b6f925b1827502d7d4cdeef33b1c7bb3acf900 | refs/heads/master | 2021-09-26T18:03:28.828166 | 2018-11-01T07:07:29 | 2018-11-01T07:07:29 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 13,487 | r | ACE_wcovariates_Saturated_nested.R |
#----------------------------------------------------------------------------------------------------------------------
# Twin simulate
# DVM Bishop, 11th March 2010, Based on script in OpenMXUserGuide, p 15
#----------------------------------------------------------------------------------------------------------------------
require(OpenMx) # not needed for the simulation, but will be needed when we come to model specification
require(MASS) # needed for multivariate random number generation
library(R.matlab)
set.seed(200) # specified seed ensures same random number set generated on each run
parcellation = "HCPMMP1" #custom200" #"HCPMMP1"
tract = "iFOD2"
weights = "FA"
options(warn=1)
setwd("~/GoogleDrive/Genetics_connectome/Heritability/data/output")
data_covar = readMat("~/GoogleDrive/Genetics_connectome/Heritability/data/general/twinCovariatesDWI.mat")
data = readMat(sprintf("~/GoogleDrive/Genetics_connectome/Heritability/data/output/twinEdges_%s_%s_%sTEST5973.mat",parcellation, tract, weights))
# mark 0 values as NA because these will be the outliers in the data - we want to exclude them.
data$Output.DZ[data$Output.DZ == 0] <- NA
data$Output.MZ[data$Output.MZ == 0] <- NA
#data$Output.DZ <- data$Output.DZ/100
#data$Output.MZ <- data$Output.MZ/100
# all edges
numEdges = dim(data$Output.DZ)[3]
heritabilityA <- numeric(numEdges)
heritabilityC <- numeric(numEdges)
heritabilityE <- numeric(numEdges)
PvalsSat = data.frame(matrix(ncol = 3, nrow = numEdges))
PvalsACE = data.frame(matrix(ncol = 2, nrow = numEdges))
colnames(PvalsSat) <- c("ACE", "CE", "AE")
colnames(PvalsACE) <- c("CE", "AE")
mxOption(NULL,"Default optimizer","SLSQP")
#mxOption(NULL,"Default optimizer","NPSOL")
# change age from years to centuries
data_covar$DZ.age = data_covar$DZ.age/100
data_covar$MZ.age = data_covar$MZ.age/100
p_AE <- numeric(numEdges)
p_CE <- numeric(numEdges)
p_AC <- numeric(numEdges)
# fit models for each edge
for (edge in c(1:numEdges)){
myDataMZ<-data.frame(data$Output.MZ[,,edge], data_covar$MZ.age[,1], data_covar$MZ.age[,2], data_covar$MZ.sex[,1], data_covar$MZ.sex[,2])
myDataDZ<-data.frame(data$Output.DZ[,,edge], data_covar$DZ.age[,1], data_covar$DZ.age[,2], data_covar$DZ.sex[,1], data_covar$DZ.sex[,2])
myDataMZ_measure<-data$Output.MZ[,,edge]
myDataDZ_measure<-data$Output.DZ[,,edge]
colnames(myDataMZ) <- c('twin1', 'twin2','ageT1MZ', 'ageT2MZ', 'sexT1MZ', 'sexT2MZ')
colnames(myDataDZ) <- c('twin1', 'twin2','ageT1DZ', 'ageT2DZ', 'sexT1DZ', 'sexT2DZ')
selVars <- c('twin1','twin2')
# "complete" specifies use only cases with data in all columns
CovMZ = cov(data$Output.MZ[,,edge],use="complete")
CovDZ = cov(data$Output.DZ[,,edge],use="complete")
MeanMZ = colMeans(data$Output.MZ[,,edge],na.rm=TRUE)
MeanMZ = mean(MeanMZ)
MeanDZ = colMeans(data$Output.DZ[,,edge],na.rm=TRUE)
MeanDZ = mean(MeanDZ)
# do scatterplots for MZ and DZ
#split.screen(c(1,2)) # split display into two screens side by side
#screen(1)
#plot(myDataMZ_measure,main='MZ') # main specifies overall plot title
#screen(2)
#plot(myDataDZ_measure,main='DZ')
#--------------------------------------------------------------------------------------------
# Define saturated model
#--------------------------------------------------------------------------------------------
mylabels=c("twin1","twin2")
MZsat <- mxModel("MZsat",
mxMatrix( type = "Full", nrow=1, ncol=2, free=T, c(MeanMZ,MeanMZ), labels =c("b0_mz1","b0_mz2"), name="Intercepts" ),
mxMatrix( type="Full", nrow=1, ncol=2, free=TRUE, values= 0, labels=c("betaAge","betaSex"), name="beta"),
mxMatrix( type="Full", nrow=2, ncol=2, free=F, labels=c("data.ageT1MZ","data.sexT1MZ","data.ageT2MZ","data.sexT2MZ"), name="MZDefVars"),
mxAlgebra( expression=Intercepts + beta %*% MZDefVars, name="expMeanMZ"),
mxMatrix( type = "Lower", nrow=2, ncol=2, free=T, 0.5, name="CholMZ" ),
mxAlgebra( CholMZ %*% t(CholMZ), name="expCovMZ"),
mxData( myDataMZ, type="raw"), mxFitFunctionML(),
mxExpectationNormal( "expCovMZ", "expMeanMZ", mylabels))
DZsat <- mxModel("DZsat",
mxMatrix( type = "Full", nrow=1, ncol=2, free=T, c(MeanDZ,MeanDZ), labels=c("b0_dz1","b0_dz2"), name="Intercepts" ),
mxMatrix( type="Full", nrow=1, ncol=2, free=TRUE, values= 0, labels=c("betaAge","betaSex"), name="beta"),
mxMatrix( type="Full", nrow=2, ncol=2, free=F, labels=c("data.ageT1DZ","data.sexT1DZ","data.ageT2DZ","data.sexT2DZ"), name="DZDefVars"),
mxAlgebra( expression=Intercepts + beta %*% DZDefVars, name="expMeanDZ"),
mxMatrix( type = "Lower", nrow=2, ncol=2, free=T, 0.5, name="CholDZ" ),
mxAlgebra( CholDZ %*% t(CholDZ),name="expCovDZ"),
mxData( myDataDZ, type="raw"), mxFitFunctionML(),
mxExpectationNormal( "expCovDZ", "expMeanDZ", mylabels))
mylabels=c("twin1","twin2")
# Model specification starts here
SatModel <- mxModel("twinSat", MZsat, DZsat, mxFitFunctionMultigroup(c('MZsat', 'DZsat')))
# adds together likelihoods for MZ and DZ groups
#---------------------------------------------------------------------------------------------------------------------------
#SatModelFit <- mxTryHard(SatModel) #The mxTryHard command evaluates the model.
#summary(SatModelFit)
#Equate intercepts across twin order:
sub1 <- omxSetParameters(model=SatModel,labels=c("b0_mz1","b0_mz2"),newlabels="b0_mz",name="Submodel1")
sub1 <- omxSetParameters(model=sub1,labels=c("b0_dz1","b0_dz2"),newlabels="b0_dz",name="Submodel1")
#sub1Fit <- mxRun(sub1)
#mxCompare(SatModelFit, sub1Fit) # compare models
#Equate intercepts across zygosity:
#sub2 <- omxSetParameters(model=sub1,labels=c("b0_mz","b0_dz"),newlabels="b0",name="Submodel2")
#sub2Fit <- mxTryHard(sub2)
#mxCompare(sub1Fit, sub2Fit) # compare models
#---------------------------------------------------------------------------------------------------------------------------
#this part doesn't work for the current parameterization of the covariance matrices
#Equate variance across twin order:
#sub3 <- omxSetParameters(model=sub2,labels=c("mzv1","mzv2"),newlabels="mzv",name="Submodel3")
#sub3 <- omxSetParameters(model=sub3,labels=c("dzv1","dzv2"),newlabels="dzv",name="Submodel3")
#Equate variance across zygosity:
#sub4 <- omxSetParameters(model=sub3,labels=c("mzv","dzv"),newlabels="v",name="Submodel4")
# -----------------------------------------------------------------------
#Fit ACE Model with RawData and Matrices Input
# -----------------------------------------------------------------------
twinACE <- mxModel("twinACE",
# Matrices X, Y, and Z to store a, c, and e path coefficients
mxMatrix( type="Full", nrow=1, ncol=1, free=TRUE, values=sqrt((CovDZ[2,2]/3)), label="a", name="X" ),
mxMatrix( type="Full", nrow=1, ncol=1, free=TRUE, values=sqrt((CovDZ[2,2]/3)), label="c", name="Y" ),
mxMatrix( type="Full", nrow=1, ncol=1, free=TRUE, values=sqrt((CovDZ[2,2]/3)), label="e", name="Z" ),
# Matrices A, C, and E compute variance components
mxAlgebra( expression=X %*% t(X), name="A" ),
mxAlgebra( expression=Y %*% t(Y), name="C" ),
mxAlgebra( expression=Z %*% t(Z), name="E" ),
#mxMatrix( type="Full", nrow=1, ncol=2, free=TRUE, values = MeanDZ, label="mean", name="expMean" ),
# Declare a matrix for the definition variable regression parameters, called beta
#mxMatrix( type="Full", nrow=1, ncol=2, free=TRUE, values= 0, label=c("betaAge","betaSex"), name="beta"),
# Algebra for expected variance/covariance matrix in MZ
mxAlgebra(
expression= rbind (cbind(A+C+E , A+C),
cbind(A+C , A+C+E)),
name="expCovMZ"),
# Algebra for expected variance/covariance matrix in DZ
# note use of 0.5, converted to 1*1 matrix
mxAlgebra(
expression= rbind (cbind(A+C+E , 0.5%x%A+C),
cbind(0.5%x%A+C , A+C+E)),
name="expCovDZ"),
mxModel("MZ", mxData( observed=myDataMZ, type="raw" ),
# Algebra for making the means a function of the definition variables age and sex
mxMatrix( type = "Full", nrow=1, ncol=2, free=T, c(MeanMZ,MeanMZ), labels =c("b0_mz1","b0_mz2"), name="Intercepts" ),
#mxMatrix( type = "Full", nrow=1, ncol=1, free=T, c(MeanMZ), labels =c("b0"), name="Intercepts" ),
mxMatrix( type="Full", nrow=1, ncol=2, free=TRUE, values= 0, labels=c("betaAge","betaSex"), name="beta"),
mxMatrix( type="Full", nrow=2, ncol=2, free=F, labels=c("data.ageT1MZ","data.sexT1MZ","data.ageT2MZ","data.sexT2MZ"), name="MZDefVars"),
mxAlgebra( expression=Intercepts + beta %*% MZDefVars, name="expMeanMZ"),
mxExpectationNormal( covariance="twinACE.expCovMZ", means="expMeanMZ", dimnames=selVars ),
mxFitFunctionML()),
mxModel("DZ", mxData( observed=myDataDZ, type="raw" ),
mxMatrix( type = "Full", nrow=1, ncol=2, free=T, c(MeanDZ,MeanDZ), labels=c("b0_dz1","b0_dz2"), name="Intercepts" ),
#mxMatrix( type = "Full", nrow=1, ncol=1, free=T, c(MeanMZ), labels=c("b0"), name="Intercepts" ),
mxMatrix( type="Full", nrow=1, ncol=2, free=TRUE, values= 0, labels=c("betaAge","betaSex"), name="beta"),
mxMatrix( type="Full", nrow=2, ncol=2, free=F, labels=c("data.ageT1DZ","data.sexT1DZ","data.ageT2DZ","data.sexT2DZ"), name="DZDefVars"),
mxAlgebra( expression=Intercepts + beta %*% DZDefVars, name="expMeanDZ"),
mxExpectationNormal( covariance="twinACE.expCovDZ", means="expMeanDZ", dimnames=selVars ),
mxFitFunctionML()),
mxFitFunctionMultigroup( c("MZ.fitfunction", "DZ.fitfunction"))
)
twinACEFit<-mxTryHard(twinACE)
#Run ACE model
# -----------------------------------------------------------------------
estCovMZ <- mxEval(twinACE.expCovMZ, twinACEFit) # expected covariance matrix for MZ's
estCovDZ <- mxEval(twinACE.expCovDZ, twinACEFit) # expected covariance matrix for DZ's
estVA <- mxEval(a*a, twinACEFit) # additive genetic variance, a^2
estVC <- mxEval(c*c, twinACEFit) # shared enviromnemtal variance, c^2
estVE <- mxEval(e*e, twinACEFit) # unique environmental variance, e^2
estVP <- (estVA+estVC+estVE) # total variance
estPropVA <- estVA/estVP # standardized additive genetic variance
estPropVC <- estVC/estVP # standardized shared enviromnemtal variance
estPropVE <- estVE/estVP # standardized unique environmental variance
estACE <- rbind(cbind(estVA,estVC,estVE), # table of estimates
cbind(estPropVA,estPropVC,estPropVE))
LL_ACE <- mxEval(objective, twinACEFit) # likelihood of ADE model
heritabilityA[edge] <- estPropVA
heritabilityC[edge] <- estPropVC
heritabilityE[edge] <- estPropVE
#summary(twinACEFit)
# Generate AE Model - C=0
twinAE <- twinACE
twinAE <- mxRename(twinAE, "twinAE")
twinAE <- omxSetParameters(twinAE, labels="c", free=FALSE, values=0 )
#AEFit <- mxTryHard(twinAE)
#AESumm <- summary(AEFit)
#Generate CE Model - A=0
twinCE <- twinACE
twinCE <- mxRename(twinCE, "twinCE")
twinCE <- omxSetParameters(twinCE, labels="a", free=FALSE, values=0 )
#CEFit <- mxTryHard(twinCE)
#CESumm <- summary(CEFit)
#Generate AC Model, E=0 # this model fails to run with an error ("All fit attempts resulted in errors - check starting values or model specification")
#twinAC <- twinACE
#twinAC <- mxRename(twinAC, "twinAC")
#twinAC <- omxSetParameters(twinAC, labels="e", free=FALSE, values=0 )
#ACFit <- mxTryHard(twinAC)
#ACSumm <- summary(ACFit)
#mxCompare(twinACEFit, ACFit)
# model comparison
options('digits' = 5)
# compare saturated model to ACE model to see if ACE is significantlly worse that saturated
#compValuesSat = mxCompare(SatModelFit, twinACEFit)
# compare ACE to AC and CE (given EC failed) to see if a simpler model still fits the data not significantly worse than the full
#compValuesACE = mxCompare(twinACEFit, c(CEFit, AEFit))
# compare saturated model to all other models
#compValuesACEsat = mxCompare(SatModelFit, c(twinACEFit,CEFit, AEFit))
# save p and AIC values form the comparisons
#PvalsSat[edge,] = compValuesACEsat$p[2:4]
#PvalsACE[edge,] = compValuesACE$p[2:3]
#AICvals[edge,] = c(compValuesSat$AIC[2],compValuesACE$AIC[2:3])
}
heritabilityACE <- data.frame(heritabilityA,heritabilityC,heritabilityE)
write.csv(heritabilityACE,"heritabilityACE_TESTHCP5973.txt",row.names=FALSE)
|
a3b5bd5075b84237d9b435da8cf99d6800927d2d | 0229705f89eb090f7bc5b7899dc07e7609e2cc80 | /R/parse.expression.R | 99cb75e192480ed31fa4a982345e1b89fb7a4537 | [] | no_license | cran/causaleffect | d77a060d14f6d985b355c6b743c947bd8093a751 | 687d5177aa178cb89782fd496523c0e1f3ceb9f5 | refs/heads/master | 2022-07-24T03:48:40.739576 | 2022-07-14T08:10:05 | 2022-07-14T08:10:05 | 24,474,611 | 5 | 6 | null | null | null | null | UTF-8 | R | false | false | 3,219 | r | parse.expression.R | parse.expression <- function(P, topo, G.unobs, G, G.obs) {
if (P$fraction) {
P <- cancel.out(P)
if (P$fraction) {
P$den <- parse.expression(P$den, topo, G.unobs, G, G.obs)
if (length(P$den) == 0) {
sum_p <- P$sumset
P <- P$num
P$sumset <- union(sum_p, P$sumset) %ts% topo
if (P$product) {
if (length(P$children) == 1) {
sum_p <- P$sumset
P <- P$children[[1]]
P$sumset <- union(sum_p, P$sumset) %ts% topo
}
}
return(P)
}
if (length(P$sumset) > 0 && length(P$den) > 0) {
nodep <- setdiff(P$sumset, dependencies(P$den))
if (length(nodep) > 0) {
P$num$sumset <- union(P$num$sumset, nodep) %ts% topo
P$sumset <- setdiff(P$sumset, nodep) %ts% topo
}
}
P$num <- parse.expression(P$num, topo, G.unobs, G, G.obs)
P <- cancel.out(P)
}
return(P)
}
simplify_terms <- TRUE
if (P$product) {
non_atomic <- sapply(P$children, FUN = function(x) (x$product || length(x$sumset) > 0 || x$fraction || x$sum))
if (sum(non_atomic) > 0) {
parse_children <- P$children[non_atomic]
P$children <- P$children[!non_atomic]
for (i in 1:length(parse_children)) {
P.parse <- parse.expression(parse_children[[i]], topo, G.unobs, G, G.obs)
if (!is.null(P.parse$collapse)) {
P$children <- c(P$children, P.parse$children)
} else {
P$children[[length(P$children) + 1]] <- P.parse
}
}
}
if (length(P$children) > 0) {
non_atomic <- sapply(P$children, FUN = function(x) (x$product || length(x$sumset) > 0 || x$fraction || x$sum))
if (sum(non_atomic) > 0) simplify_terms <- FALSE
} else return(NULL)
}
if (length(P$sumset) == 0) return(P)
if (!P$product) {
if (identical(P$sumset, P$var)) return(NULL)
else return(P)
}
if (simplify_terms) {
ord.children <- order(unlist(lapply(P$children, FUN = function(x) which(topo == x$var))), decreasing = TRUE)
ord.sum <- order(sapply(P$sumset, FUN = function(x) which(topo == x)), decreasing = TRUE)
P$children <- P$children[ord.children]
P$sumset <- P$sumset[ord.sum]
P <- simplify(P, topo, G.unobs, G, G.obs)
if (length(P$children) == 0) return(NULL)
}
P.parse <- probability(product = TRUE, children = list())
remove <- c()
j <- 0
if (length(P$sumset) > 0) {
for (i in 1:length(P$children)) {
dep <- dependencies(P$children[[i]])
if (length(intersect(dep, P$sumset)) == 0) {
remove <- c(remove, i)
j <- j + 1
}
}
} else return(P)
if (j > 0) {
P.parse$children <- P$children[remove]
P.parse$collapse <- TRUE
P$children <- P$children[-remove]
if (length(P$sumset) > 0) {
if (length(P$children) == 1) {
sum_p <- P$sumset
P <- P$children[[1]]
P$sumset <- union(sum_p, P$sumset) %ts% topo
P <- parse.expression(P, topo, G.unobs, G, G.obs)
}
}
if (length(P$children) > 0) P.parse$children[[j + 1]] <- P
return(P.parse)
}
return(P)
}
|
7ca90c19a7c47fcf13aaa8acd9f60995dd467f45 | 59eae2d3aea156f68a046ffc7fae33440260eca4 | /dwf_analysis.R | c7a64fac6b1a3a31cbd39dc9ff38cdb5991612aa | [] | no_license | dleutnant/dwf_detect | 36b5ad732996c9e064a949be5187e5a04351eff8 | 965c71830b4f0ccb5387a7f314aadbcd724ad9ff | refs/heads/master | 2021-01-22T21:54:15.910919 | 2017-07-02T12:09:50 | 2017-07-02T12:09:50 | 92,744,546 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,782 | r | dwf_analysis.R | ## calculate similarity of daily discharge patterns
#' Get Dry Weather Flow Patterns
#'
#' @param x The discharge time series (xts)
#' @param h Height of Cluster (defaults to 0.1)
#' @param separate_weekend logical. Should the dry weather flow separately analyzed?
#' @param make_equidistant logical. Should the time series be made equidistant?
#' @param plot logical. Should the sequences be plotted?
#' @param fig.opts USe additional fig.ops (currently legend.position only)
#' @param verbose logical. Need more information?
#' @return A list with dwf pattern (median) and sequences.
#' @export get_dwf_pattern
#' @importFrom magrittr "%>%"
get_dwf_pattern <- function(x,
h = 0.1,
separate_weekend = FALSE,
make_equidistant = FALSE,
plot = TRUE,
fig.opts = NULL,
verbose = FALSE) {
# take the original discharge time series and create a list for weekdays and
# weekends
list_of_daily_xts <- .prepare_daily_series(x = x,
separate_weekend = separate_weekend,
make_equidistant = make_equidistant)
# perform algorithm for weekdays and weekends separately
dwf_seq <- lapply(list_of_daily_xts, function(weektype) {
# take list of xts objects
t <- lapply(weektype, zoo::coredata)
# remove days which have no variance
t <- t[which(unlist(lapply(t, stats::sd)) != 0)]
## create a matrix and zscale it
t_scaled <- do.call(cbind, t) %>%
scale
# perform SBD distance calculation and hierarchical clusters
hcl <- proxy::dist(t_scaled,
method = "SBD",
by_rows = FALSE) %>%
base::as.matrix(.) %>%
stats::as.dist(.) %>%
stats::hclust(d = ., method = "complete")
# cut the hclut tree based on height
ct <- stats::cutree(hcl, h = h)
# get elements within the most densed cluster aka most similar sequences
dwf_seq <- which(ct == Mode(ct))
return(dwf_seq)
})
# How many sequences are in the most densed cluster?
if (verbose) message(paste("No of sequences in the most densed cluster:",
paste(sapply(dwf_seq, length), collapse = ",")))
# subset list based on dwf_seq
list_of_xts_pattern <- mapply("[", list_of_daily_xts, dwf_seq, SIMPLIFY = FALSE)
# compute mean dry weather flow
list_of_median_xts_pattern <- lapply(list_of_xts_pattern, function(x) {
ret <- cbind(matrixStats::colMedians(t(do.call(cbind, lapply(x, zoo::coredata)))))
return(ret)
})
# return a list with dry weather flow days and median
res <- list(dwf_sequences = list_of_xts_pattern,
dwf_median = list_of_median_xts_pattern)
if (plot) .plot_pattern(x = res,
fig.opts = fig.opts)
return(res)
}
#' @keywords internal
#' @importFrom magrittr "%>%"
.plot_pattern <- function(x, fig.opts = NULL) {
if (is.null(fig.opts)) {
fig.opts <- list(title = "NN",
legend.position = "right")
}
panel_list <- lapply(x$dwf_sequences, function(x) {
# extract the index for plotting
# we take the index of the first element and assume it to be the same for
# the others
index <- strftime(zoo::index(x[[1]]),
format = "%H:%M:%S") %>%
# add a dummy date
paste("2016-12-12", .) %>%
# make posixct again
as.POSIXct(., tz = xts::tzone(x[[1]]))
x <- lapply(x, zoo::coredata)
# create data.frame to be ggplotable
m <- data.frame(index, do.call(cbind, x))
colmed <- matrixStats::colMedians(x = t(m[,-1]))
colnames(m) <- c("index", seq_len(length(x)))
dfcm <- data.frame(index = index, colmed = colmed)
gg_obj <- m %>%
# wide to long
tidyr::gather(id, discharge, -index) %>%
# change id to factor to be sortable
dplyr::mutate(id = factor(id,
levels = unique(as.numeric(id)))) %>%
ggplot2::ggplot(., ggplot2::aes(x = index,
y = discharge,
color = id)) +
ggplot2::geom_line() +
ggplot2::geom_line(data = dfcm,
mapping = ggplot2::aes(x = index, y = colmed),
size = 2,
linetype = 1,
color = "gray45") +
# scale x labels
ggplot2::scale_x_datetime(labels = scales::date_format("%H:%M")) +
ggplot2::labs(x = "",
y = "discharge (l/s)",
subtitle = paste("no of sequences:", length(x))) +
#.udtools_plot_theme() +
ggplot2::theme_bw() +
ggplot2::theme(legend.position = fig.opts$legend.position)
return(gg_obj)
})
# change labs manually
for (i in 1:length(panel_list)) {
tmp <- panel_list[[i]]$labels$subtitle
panel_list[[i]]$labels$subtitle <- paste0(names(panel_list)[i], " (", tmp, ")")
}
# make grobs
grob_list <- lapply(panel_list, ggplot2::ggplotGrob)
# set same widths
widths <- do.call(grid::unit.pmax, lapply(grob_list, "[[", "widths"))
grob_list <- lapply(grob_list, function(g) {g$widths <- widths; g})
# new page
grid::grid.newpage()
if (!is.null(fig.opts$top)) {
t <- paste()
}
# assemble grobs
p <- gridExtra::arrangeGrob(grobs = grob_list, top = "Analysis of Dry Weather Flow Pattern",
ncol = 1,
heights = rep(4,length(grob_list)))
# plot
grid::grid.draw(p)
}
#' @param x The xts object to prepare
#' @param separate_weekend If weekends should be separated
#' @param make_equidistant Equidistance required?
#' @keywords internal
.prepare_daily_series <- function(x, separate_weekend = FALSE, make_equidistant = TRUE) {
# we need the following packages:
# dtwclust, magrittr
# set zero to NA
x[x < 0] <- NA
# remove NA
x <- stats::na.omit(x)
# error handling
if (nrow(x) == 0) {
warning(paste(names(x), "only NA"))
return(NULL)
}
if (separate_weekend) {
# create a list of xts objects distinguished by weekday and weekend
list_of_xts <- separate_xts(xts = x,
interval = "wday",
index = list(1:5, c(0,6)),
names = c("weekdays", "weekend"))
} else {
# create a list of xts objects with no further segmentation
list_of_xts <- separate_xts(xts = x,
interval = "wday",
index = list(0:6),
names = c("weekdays and weekend"))
}
# create now daily xts objects
list_of_daily_series <- lapply(list_of_xts,
FUN = function(x) separate_xts(xts = x,
interval = "day",
index = unique(xts::.indexday(x))))
# need to make equidistant?
if (make_equidistant) {
list_of_daily_series <- list_of_daily_series %>%
purrr::map(., ~ purrr::map(., ~ udtools::make_equidistant(., mode = 3, by = "15 mins", maxgap = 4, all = c(FALSE, TRUE))))
}
# remove all days with too little data (e.g. less than 24, 48, 96 values (depends on the frequency...))
# first estimate median periodicity and find nu
m_period <- 60 / as.numeric(mode_periodicity(x, units = "mins")) * 24
# now use R's Higher-Order Function to filter the list
list_of_daily_series_cleaned <- lapply(list_of_daily_series,
function(x) Filter(function(y) length(y) == m_period, x))
# if not sequences are generated, create an equistant one but give warning
if (max(unlist(lapply(list_of_daily_series_cleaned, length))) == 0) {
warning("sequences needed to be harmonized.")
list_of_daily_series <- list_of_daily_series %>%
purrr::map(., ~ purrr::map(., ~ udtools::make_equidistant(., mode = 3, by = "15 mins", maxgap = 4, all = c(FALSE, TRUE))))
# remove all days with too little data (e.g. less than 24, 48, 96 values (depends on the frequency...))
# first estimate median periodicity and find nu
m_period <- 60 / as.numeric(mode_periodicity(x, units = "mins")) * 24
# now use R's Higher-Order Function to filter the list
list_of_daily_series_cleaned <- lapply(list_of_daily_series,
function(x) Filter(function(y) length(y) == m_period, x))
}
return(list_of_daily_series_cleaned)
}
|
52c8638e91a3ad0eb1990e6c9723fae6409305b4 | 7b74f00cd80694634e6925067aaeb6572b09aef8 | /2019/Assignment/FE8828-Arihant_Jain/Assignment 1/app.R | 0b068304ff7b59f7648a637c9f571dfebe6792f6 | [] | no_license | leafyoung/fe8828 | 64c3c52f1587a8e55ef404e8cedacbb28dd10f3f | ccd569c1caed8baae8680731d4ff89699405b0f9 | refs/heads/master | 2023-01-13T00:08:13.213027 | 2020-11-08T14:08:10 | 2020-11-08T14:08:10 | 107,782,106 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,157 | r | app.R | library(shiny)
# install.packages("shinythemes")
library(shinythemes)
companyName<-"Roam With ReOm"
selectedTheme<-shinytheme("cerulean")
headerPanel<-titlePanel(tags$a(h1(companyName), style = "font-size:50px;text-align:center;",href="javascript:history.go(0)"))
#page1
page1pic<-HTML('<center><img src="pic_1.jpg" width="500"></center>')
page1section<-"About Roam with ReOm"
para_1 <- "We are Reema and Om and we are travelholics! We are working professionals and we understand the significance of trips in our lives. We welcome you to explore the world with us."
para_2 <- "We have travelled to 3 continents and 7 countries. Being explorer, we believe in enjoying the locations than making a remarkable country count. One target is to explore all the continents before our last breath."
para_3 <- "Roam with ReOm is here to share the travel experience and ideas for your next vacations or weekend getaways."
para_4 <- "Let's explore the world with us!"
page1content<-wellPanel(para_1,HTML('<br></br>'),para_2,HTML('<br></br>'),para_3,HTML('<br></br><b>'),para_4,HTML('</b'))
page1<-mainPanel(page1pic,h1(page1section),h3(page1content), width=12)
navPage1<-tabPanel("Home", page1)
#page2
page2pic<-HTML('<center><img src="pic_4.jpg" width="800"></center>')
para_1 <- "Roam with ReOm is all about living life to the fullest and not just travel to check off the bucket list."
para_2 <- "We are a power couple travelling to distinct destinations and living our dreams together. We have been exploring places for over 2 years now! It has been a wonderful journey together after our marriage."
para_2_2 <- "We have already explored 7 countries in these 2 years and many more are waiting ahead. Since we both are working in Singapore, it becomes a little difficult to take out time from the work life to visit our dream destinations."
para_2_3 <- "So, here we are trying to help people by providing plans on how to travel while working."
para_2_4 <- "It really needs a passion to travel if you are a working professional. Here is a bit more about us:"
para_2_Reema <- "I am an Engineer and MBA (marketing) I am living my life to fullest as I am doing what I love to do. I am a digital marketer who loves the online world. It's my passion to travel across the world and make people aware about unexplored beauty of nature. Expect all kind of childish behaviour and a love for photography from me."
para_2_Om <- "I am a typical finance geek who loves his work a lot. Since the day I got engaged to Reema, explored a travel freak in me. I just love booking tickets and never think twice even when it's a plan to go abroad. I know it's weird but that's how I am. I love to investigate nature's grace and can spend a day with this."
page_2_conclusion <- fluidRow(column(6,HTML('<center>'),h1("REema (Kratika)"),HTML('</center>'),HTML('<center><img src="pic_5.jpg" width="300"></center>'),h3(para_2_Reema)),column(6,HTML('<center>'),h1("OM (Arihant)"),HTML('</center>'),HTML('<center><img src="pic_6.jpg" width="300"></center>'),h3(para_2_Om)))
page2content<-wellPanel(para_1,HTML('<br></br>'),para_2,para_2_2,HTML('<b>'),para_2_3,HTML('</b>'),para_2_4)
page2<-mainPanel(page2pic,h3(page2content),page_2_conclusion, width=12)
navPage2<-tabPanel("Who are we?", page2)
#page3
page3<-fluidPage(
fluidRow(
column(4,
HTML('<center><img src="pic_7.jpg" width="300"></center>'),
HTML('<center>'),h2("Hawaii"),HTML('</center>')
),
column(4,
HTML('<center><img src="pic_8.jpg" width="300"></center>'),
HTML('<center>'),h2("Finland"),HTML('</center>')
),
column(4,
HTML('<center><img src="pic_10.jpg" width="300"></center>'),
HTML('<center>'),h2("Hong Kong"),HTML('</center>')
)
),
HTML('<br></br>'),
fluidRow(
column(4,
HTML('<center><img src="pic_9.jpg" width="300"></center>'),
HTML('<center>'),h2("Indonesia"),HTML('</center>')
),
column(4,
HTML('<center><img src="pic_12.jpg" width="300"></center>'),
HTML('<center>'),h2("Singapore"),HTML('</center>')
),
column(4,
HTML('<center><img src="pic_11.jpg" width="300"></center>'),
HTML('<center>'),h2("Thailand"),HTML('</center>')
)
),
HTML('<br></br>'),
fluidRow(
column(4,""
),
column(4,
HTML('<center><img src="pic_13.jpg" width="300"></center>'),
HTML('<center>'),h2("India"),HTML('</center>')
),
column(4,""
)
)
)
page3_final<-mainPanel(page3,width=12)
navPage3<-tabPanel("Gallery", page3_final)
#page4
page_4_para_1 <- "Whether you'd like to ask us a question about your upcoming travels or get in touch with us regarding a work collaboration, we're always happy to hear from you. If you catch us at a time when we are prancing around in some remote corner of the world, it might take us a bit longer to get back to you, but you'll surely hear from us. Just pick your favourite medium of getting in touch with us :)"
page4_contact<-fluidPage(
fluidRow(
column(6,
tags$a(HTML('<center><img src="message.png" width="100"></center>'),href="mailto:aihant001@e.ntu.edu.sg"),
HTML('<center>'),h2("Email"),HTML('</center>'),
p("The best way to reach us via email. We promise to get back to you ASAP!")
),
column(6,
tags$a(HTML('<center><img src="instagram.jpg" width="100"></center>'),href="https://www.instagram.com/j_kratika/"),
HTML('<center>'),h2("Instagram"),HTML('</center>'),
p("Why not check out Instagram gallery and drop us a message there?")
)
)
)
page4<-mainPanel(h1("Thanks for dropping by"),h3(page_4_para_1),page4_contact, width=12)
navPage4<-tabPanel(title = "Contact Us", page4 )
navBar<-navbarPage("",navPage1,navPage2,navPage3,navPage4)
ui <- fluidPage(fluidPage(theme=selectedTheme,headerPanel,navBar))
server <- function(input, output,session){}
shinyApp(ui = ui, server = server)
|
98ee14e6f5da003676ea3f63d433a60deefd91dc | 706507a316e89c92eafbe64bdccc30849c0d29bd | /CLIRUN/Senarios/CRMPUPCODE.r | 5d202ec3dfff96a940dfa5277a01009f791207b4 | [] | no_license | sfletcher23/Fletcher_2019_Learning_Climate | 84141ac42354aa11ff8a4187a03c95405562d1a4 | 9f1aca15e7001062249a8b39f92ff59323445a27 | refs/heads/master | 2021-07-01T01:54:01.431167 | 2020-10-01T19:49:35 | 2020-10-01T19:49:35 | 166,290,544 | 3 | 2 | null | null | null | null | UTF-8 | R | false | false | 540 | r | CRMPUPCODE.r | library(base)
setwd("C:/Works/Vietnam/Model4/OutputData/data/sim")
Allf <- matrix(scan("Allout.txt"),1200,1254)
dd= (1:1200)*0
for( i in 0:56)
{
dd=cbind(dd,Allf[,(i*22)+7]) # [,(i*22)+DD]) DD is the subbasin number ranging from 1:22
}
dd=dd[,-1]
summ=matrix(0,100,57)
for(i in 1:100)
summ[i,]=apply(dd[((i-1)*12+1):(i*12),],2,sum)
plot(summ[,1],type="l",col="red",xlim=c(0,100),ylim=c(min(summ),max(summ)))
for(i in 1:56)
lines(summ[,i],type="l")
lines(summ[,1],type="l",col="RED")
win.graph()
boxplot(t(summ)) |
f7e394f3c7dc6c798f1d748cf00b5119f2c0bba2 | 07b5d358a2ba2ec11a9744903e0ffd8b6adbfef4 | /09_dCamp - Data Manipulation in R with dplyr.r | 4a6b360ecb6b5bc4747b9f17ead77617a858cfd7 | [] | no_license | Foudzz/DataCamp-DS-for-R | b1908d85e696f169697a069a988be686fc9d3b0e | b7d9572d79937ed8b081f5e1ce7ed065f25ab583 | refs/heads/master | 2020-07-02T20:15:01.162386 | 2019-01-24T21:48:41 | 2019-01-24T21:48:41 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,706 | r | 09_dCamp - Data Manipulation in R with dplyr.r | # --------------------------------------------
# ---------- Data Manipulation in R ---------
# --------------------------------------------
library(hflights);library(dplyr) # done; install.packages("hflights")
# Both the dplyr and hflights packages are loaded
# Convert the hflights data.frame into a hflights tbl
hflights <- tbl_df(hflights)
# Display the hflights tbl
print(hflights)
# Create the object carriers
carriers <- hflights$UniqueCarrier
carriers
# Changing labels of hflights, part 1 of 2
# Both the dplyr and hflights packages are loaded into workspace
lut <- c("AA" = "American", "AS" = "Alaska", "B6" = "JetBlue", "CO" = "Continental",
"DL" = "Delta", "OO" = "SkyWest", "UA" = "United", "US" = "US_Airways",
"WN" = "Southwest", "EV" = "Atlantic_Southeast", "F9" = "Frontier",
"FL" = "AirTran", "MQ" = "American_Eagle", "XE" = "ExpressJet", "YV" = "Mesa")
# Add the Carrier column to hflights
hflights$Carrier <- lut[hflights$UniqueCarrier]
# Glimpse at hflights
glimpse(hflights$Carrier)
# The five verbs and select in more detail
# onVariables(select, mutate (new col, values)), onObservations(filter, arrange), onGroups(summarize)
# do delays tend to shrink or grow during a flight?
# hflights is pre-loaded as a tbl, together with the necessary libraries.
# Print out a tbl with the four columns of hflights related to delay
print(select(hflights, ActualElapsedTime, AirTime, ArrDelay, DepDelay))
# Print out the columns Origin up to Cancelled of hflights
print(select(hflights, 14:19))
# Answer to last question: be concise!
select(hflights, 1:4, 12:21)
# dplyr comes with a set of helper functions that can help you select
# groups of variables inside a select() call:
# starts_with("X"): every name that starts with "X",
# ends_with("X"): every name that ends with "X",
# contains("X"): every name that contains "X",
# matches("X"): every name that matches "X", where "X" can be a regular expression,
# num_range("x", 1:5): the variables named x01, x02, x03, x04 and x05,
# one_of(x): every name that appears in x, which should be a character vector.
# Pay attention here: When you refer to columns directly inside select(), you don't use quotes.
# If you use the helper functions, you do use quotes.
# As usual, hflights is pre-loaded as a tbl, together with the necessary libraries.
# Print out a tbl containing just ArrDelay and DepDelay
select(hflights, contains("Delay"))
# Print out a tbl as described in the second instruction, using both helper functions and variable names
select(hflights, 7:9, 19:20)
select(hflights, 7:9, Cancelled, CancellationCode)
# Print out a tbl as described in the third instruction, using only helper functions.
select(hflights, contains("Time"), contains("Delay"))
names(hflights)
# both hflights and dplyr are available
# Finish select call so that ex1d matches ex1r
ex1r <- hflights[c("TaxiIn", "TaxiOut", "Distance")]
ex1d <- select(hflights, contains("Taxi"), Distance)
# Finish select call so that ex2d matches ex2r
ex2r <- hflights[c("Year", "Month", "DayOfWeek", "DepTime", "ArrTime")]
ex2d <- select(hflights, 1:2, 4:5, ArrTime)
# Finish select call so that ex3d matches ex3r
ex3r <- hflights[c("TailNum", "TaxiIn", "TaxiOut")]
ex3d <- select(hflights,TailNum, 17:18 )
# mutate, adds new data to a dataset (adds columns)
h2 <- mutate(h1, loss = ArrDelay - DepDelay)
# hflights and dplyr are loaded and ready to serve you.
# Add the new variable ActualGroundTime to a copy of hflights and save the result as g1.
g1 <- mutate(hflights, ActualGroundTime = ActualElapsedTime - AirTime)
# Add the new variable GroundTime to g1. Save the result as g2.
g2 <- mutate(g1,GroundTime = TaxiIn + TaxiOut )
# Add the new variable AverageSpeed to g2. Save the result as g3.
g3 <- mutate(g2, AverageSpeed = Distance / AirTime * 60)
# Print out g3
g3
# hflights and dplyr are ready, are you?
# Add a second variable loss_ratio to the dataset: m1
m1 <- mutate(hflights, loss = ArrDelay - DepDelay, loss_ratio = loss/DepDelay)
# Add the three variables as described in the third instruction: m2
m2 <- mutate(hflights, TotalTaxi = TaxiIn + TaxiOut,
ActualGroundTime = ActualElapsedTime - AirTime,
Diff = TotalTaxi - ActualGroundTime)
View(m2)
# Filter and arrange
# R comes with a set of logical operators that you can use inside filter():
#
# x < y, TRUE if x is less than y
# x <= y, TRUE if x is less than or equal to y
# x == y, TRUE if x equals y
# x != y, TRUE if x does not equal y
# x >= y, TRUE if x is greater than or equal to y
# x > y, TRUE if x is greater than y
# x %in% c(a, b, c), TRUE if x is in the vector c(a, b, c)
# hflights is at your disposal as a tbl, with clean carrier names
# All flights that traveled 3000 miles or more
filter(hflights, Distance >= 3000)
# All flights flown by one of JetBlue, Southwest, or Delta
filter(hflights, UniqueCarrier %in% c("JetBlue","Southwest","Delta"))
# All flights where taxiing took longer than flying
filter(hflights,TaxiIn + TaxiOut > AirTime )
# hflights is at your service as a tbl!
# All flights that departed before 5am or arrived after 10pm
filter(hflights, DepTime < 500 | ArrTime > 2200)
# All flights that departed late but arrived ahead of schedule
filter(hflights, DepDelay > 0 & ArrDelay < 0)
# All flights that were cancelled after being delayed
filter(hflights, DepDelay > 0 & Cancelled == 1)
# hflights is already available in the workspace
# Select the flights that had JFK as their destination: c1
c1 <- filter(hflights, Dest == "JFK")
# Combine the Year, Month and DayofMonth variables to create a Date column: c2
c2 <- mutate(c1, Date = paste(Year,Month,DayofMonth, sep="-"))
# Print out a selection of columns of c2
select(c2, Date, DepTime, ArrTime, TailNum)
c1 <- filter(hflights, DayOfWeek %in% c(6,7), Distance > 1000, TaxiIn + TaxiOut < 15)
str(c1)
View(c1)
# dplyr and the hflights tbl are available
# Definition of dtc
dtc <- filter(hflights, Cancelled == 1, !is.na(DepDelay))
# Arrange dtc by departure delays
arrange(dtc, DepDelay)
# Arrange dtc so that cancellation reasons are grouped
arrange(dtc,CancellationCode )
# Arrange dtc according to carrier and departure delays
arrange(dtc,UniqueCarrier, DepDelay)
# dplyr and the hflights tbl are available
# Arrange according to carrier and decreasing departure delays
arrange(hflights, UniqueCarrier, desc(DepDelay))
# Arrange flights by total delay (normal order).
arrange(hflights,ArrDelay + DepDelay)
# Summarise and the pipe operator
# hflights and dplyr are loaded in the workspace
# Print out a summary with variables min_dist and max_dist
summarise(hflights, min_dist = min(Distance), max_dist = max(Distance))
# Print out a summary with variable max_div
h2 <- filter(hflights, Diverted == 1)
summarise(h2, max_div = max(Distance))
# Aggregate functions
#
# You can use any function you like in summarise() so long as the function can take a vector of data and return a single number. R contains many aggregating functions, as dplyr calls them:
#
# min(x) - minimum value of vector x.
# max(x) - maximum value of vector x.
# mean(x) - mean value of vector x.
# median(x) - median value of vector x.
# quantile(x, p) - pth quantile of vector x.
# sd(x) - standard deviation of vector x.
# var(x) - variance of vector x.
# IQR(x) - Inter Quartile Range (IQR) of vector x.
# diff(range(x)) - total range of vector x.
#
# hflights is available
# Remove rows that have NA ArrDelay: temp1
temp1 <- filter(hflights, !is.na(ArrDelay))
# Generate summary about ArrDelay column of temp1
summarise(temp1, earliest = min(ArrDelay), average = mean(ArrDelay), latest = max(ArrDelay), sd = sd(ArrDelay))
# Keep rows that have no NA TaxiIn and no NA TaxiOut: temp2
temp2 <- filter(hflights, !is.na(TaxiIn), !is.na(TaxiOut))
# Print the maximum taxiing difference of temp2 with summarise()
summarise(temp2, max_taxi_diff = max(abs(TaxiIn-TaxiOut)))
# dplyr aggregate functions
#
# dplyr provides several helpful aggregate functions of its own, in addition to the ones that are already defined in R. These include:
#
# first(x) - The first element of vector x.
# last(x) - The last element of vector x.
# nth(x, n) - The nth element of vector x.
# n() - The number of rows in the data.frame or group of observations that summarise() describes.
# n_distinct(x) - The number of unique values in vector x.
#
# Next to these dplyr-specific functions, you can also turn a logical test into an aggregating function
# with sum() or mean(). A logical test returns a vector of TRUE's and FALSE's.
# When you apply sum() or mean() to such a vector, R coerces each TRUE to a 1 and each FALSE to a 0. sum()
# then represents the total number of observations that passed the test; mean() represents the proportion.
# hflights is available with full names for the carriers
# Generate summarizing statistics for hflights
summarise(hflights,
n_obs = n(),
n_carrier = n_distinct(UniqueCarrier),
n_dest = n_distinct(Dest)
)
# All American Airline flights
aa <- filter(hflights, UniqueCarrier == "American")
# Generate summarizing statistics for aa
summarise(aa,
n_flights = n(),
n_canc = sum(Cancelled == 1),
avg_delay = mean(ArrDelay, na.rm = TRUE)
)
# Chain together mutate(), filter() and summarise()
hflights %>% mutate(RealTime = ActualElapsedTime + 100, mph = Distance / RealTime * 60) %>%
filter(!is.na(mph) & mph < 70) %>%
summarise(n_less = n(), n_dest = n_distinct(Dest), min_dist = min(Distance), max_dist = max(Distance))
# Finish the command with a filter() and summarise() call
hflights %>%
mutate(RealTime = ActualElapsedTime + 100, mph = Distance / RealTime * 60) %>%
filter(mph < 105 | Cancelled == 1 | Diverted == 1) %>%
summarise(n_non = n(), n_dest = n_distinct(Dest), min_dist = min(Distance), max_dist = max(Distance))
# Group_by and working with databases
# hflights is in the workspace as a tbl, with translated carrier names
# Make an ordered per-carrier summary of hflights
hflights %>%
group_by(UniqueCarrier) %>%
summarise(p_canc = mean(Cancelled == 1) * 100,
avg_delay = mean(ArrDelay, na.rm = TRUE)) %>%
arrange(avg_delay, p_canc)
# dplyr is loaded, hflights is loaded with translated carrier names
# Ordered overview of average arrival delays per carrier
hflights %>% filter(ArrDelay > 0) %>%
group_by(UniqueCarrier) %>%
summarise(avg = mean(ArrDelay)) %>%
mutate(rank = rank(avg)) %>%
arrange(rank)
# dplyr and hflights (with translated carrier names) are pre-loaded
# How many airplanes only flew to one destination?
hflights %>%
group_by(TailNum) %>%
summarise(ndest = n_distinct(Dest)) %>%
filter(ndest == 1) %>%
summarise(nplanes = n())
# Find the most visited destination for each carrier
hflights %>%
group_by(UniqueCarrier, Dest) %>%
summarise(n = n()) %>%
mutate(rank = rank(desc(n))) %>%
filter(rank == 1)
# Connect to database
# library(data.table)
# hflights2 <- as.data.table(hflights)
# hflights2 is pre-loaded as a data.table
# Use summarise to calculate n_carrier
# summarise(hflights2, n_carrier = n_distinct(UniqueCarrier))
# Set up a connection to the mysql database
my_db <- src_mysql(dbname = "dplyr",
host = "courses.csrrinzqubik.us-east-1.rds.amazonaws.com",
port = 3306,
user = "student",
password = "datacamp")
# Reference a table within that source: nycflights
nycflights <- tbl(my_db, "dplyr")
# glimpse at nycflights
glimpse(nycflights)
# Ordered, grouped summary of nycflights
nycflights %>% group_by(carrier) %>%
summarise(n_flights = n(), avg_delay = mean(arr_delay)) %>%
arrange(avg_delay)
# new version; rename, mute, count, group_by short hand, non ep joins, multi table, distinct,
# test on local sql
# Set up a connection to the mysql database
my_db <- src_mysql(dbname = "book_other",
host = "127.0.0.1",
port = 3307,
user = "root",
password = "admin1")
# Reference a table within that source: nycflights
t_tweets <- tbl(my_db, "trumpstweets")
# glimpse at nycflights
glimpse(t_tweets)
# Ordered, grouped summary of nycflights
t_tweets %>%
select(row_names, source, text, created_at, retweet_count, favorite_count, is_retweet, id_str) %>%
filter(row_names >=1 & row_names < 20)
|
826d384bc266b70e78e268ec859c17e756caead2 | 5b75e203e114bbf720c0476868bce4f8a38ed52c | /Análisis Discriminante.R | de65391f798155250bdc402a463f55057d86f123 | [] | no_license | DataLabUsal/BloodPrediction | 67a6228393b5e3ecdeeaa7fece12fdcca6b04524 | 78fa77960a751fec0d74baa68e36cc9094bf06a7 | refs/heads/master | 2021-01-01T04:35:00.671037 | 2016-05-25T09:47:26 | 2016-05-25T09:47:26 | 59,094,843 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 384 | r | Análisis Discriminante.R | #Anális discriminante
library(MASS)
rld <- lda(dona_Marzo2007~n+ultima_don+primera_don,data = Training)
plda <- predict(object = rld, newdata = Training_Data)
res <- plda$posterior[,2]
library("ROCR")
#Prediction
pred <- prediction(res,Training_Data$dona_Marzo2007)
#Performance
perf <- performance(pred, "tpr","fpr")
#Gráficos ROC
plot(perf1)
plot(perf,add=TRUE,col='red') |
5284b3fa757225a507a067b2415a884ef8fc8447 | 04ec8bd151b04d9836122bc2d088d04c3164e7ca | /Player_Effect_Model.r | e577499bf7cc2a5ad0e49483082ab896598a631f | [] | no_license | dcbasley18/xGoals | c366d5c872a3f558a37fb5b5cfbc5735fb41cfdb | ca764558b28924df4391fdbb0b7c6757447404be | refs/heads/main | 2023-06-25T17:41:49.757867 | 2021-07-30T02:18:18 | 2021-07-30T02:18:18 | 385,971,076 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,299 | r | Player_Effect_Model.r | data <- read.csv("shot_ages.csv")
library(lme4)
player_effects_ages <- glmer(goal ~ arenaAdjustedShotDistance + shotAngleAdjusted +
shotGeneratedRebound + shotRush +
(1|shooterName) + (1|goalieName) +
ns(shooterAge, df = 6) + ns(goalieAge, df = 6),
data = shots_ages, family = "binomial")
# Creating data frame to visualize spline (shooter age) ---------------------
df_shooter = data.frame(shooterAge = seq(from = 18, to = 42, by=0.1),
goalieAge = 30,
arenaAdjustedShotDistance = 30,
shotAngleAdjusted = 30,
shotGeneratedRebound = 1,
shotRush = 1,
shooterName = "Shooter A",
goalieName = "Goalie A")
#get predicted values from model
df_shooter$pred = predict(player_effects_ages, newdata = df_shooter, type = "response", allow.new.levels = TRUE)
#plot predicted values by age
df_shooter %>%
ggplot(aes(x= shooterAge, y= pred))+
geom_line()+
labs(x = "Shooter Age",
y = "Goal Probability") +
theme_bw()
# Creating data frame to visualize spline (goalie age) -----------------------
df_goalie = data.frame(shooterAge = 30,
goalieAge = seq(from = 18, to = 42, by=0.1),
arenaAdjustedShotDistance = 30,
shotAngleAdjusted = 30,
shotGeneratedRebound = 0,
shotRush = 0,
shooterName = "Shooter A",
goalieName = "Goalie A")
#get predicted values from model
df_goalie$pred = predict(player_effects_ages, newdata = df_goalie, type = "response", allow.new.levels = TRUE)
#plot predicted values by age
df_goalie %>%
ggplot(aes(x= goalieAge, y= 1-pred))+
geom_line()+
labs(x = "Goalie Age",
y = "Save Percentage") +
theme_bw()
player_effects <- glmer(goal ~ arenaAdjustedShotDistance + shotAngleAdjusted +
shotGeneratedRebound + shotRush +
(1|shooterName) + (1|goalieNameForShot),
data = shots_5v5, family = "binomial")
|
ddab5b201dc41e8a11a7714b56d22fb1db2f8b7d | e9d90b9b16a7ce9571f9d37cfb289e2791dbdd67 | /R/bank.R | 297699972c8913a4ac2dfe389cb54b94de16edd7 | [
"MIT"
] | permissive | Jgosr/multiserver | 0eec73a2656f7892fa25611a116a04c4f98df393 | 1cfc82b0e5484f48f586f8f839620c56e2fe6538 | refs/heads/master | 2023-09-06T00:18:11.468077 | 2021-10-15T09:20:08 | 2021-10-15T09:20:08 | 415,495,012 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 301 | r | bank.R | #' Arrival time and service duration of customers
#'
#' A dataset containing the arrival time and service edit
#'
#' @format A data frame with 100 rows and 2 variables:
#' \describe{
#' \item{arrival_time}{Arrival time of customer}
#' \item{service_time}{duration of service}
#' ...
#' }
"bank"
|
527a98b9c22ea4dd37750220ecabe9ac27c1d21a | b2f61fde194bfcb362b2266da124138efd27d867 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Database/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query49_query48_1344n/query49_query48_1344n.R | 21a48484996ea56b24ea1a4c403b2229ae4eba3a | [] | no_license | arey0pushpa/dcnf-autarky | e95fddba85c035e8b229f5fe9ac540b692a4d5c0 | a6c9a52236af11d7f7e165a4b25b32c538da1c98 | refs/heads/master | 2021-06-09T00:56:32.937250 | 2021-02-19T15:15:23 | 2021-02-19T15:15:23 | 136,440,042 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 73 | r | query49_query48_1344n.R | 8b15f16c9c726c1fedaac9b72204fe86 query49_query48_1344n.qdimacs 8207 46574 |
64e601ef35029f8aa7138e7d1b80dc43187a191d | 184d33fbe6d0ab73a260d0db9d3849df00d33786 | /rcmdr.temis/man/specificTermsDlg.rd | 6018689f65a432217fd2853a65f76744b5ab50c6 | [] | no_license | nalimilan/R.TeMiS | 65660d9fbe4c8ca7253aeba5571eab4445736c99 | 3a8398038595807790087c36375bb26417ca606a | refs/heads/master | 2023-04-30T18:04:49.721122 | 2023-04-25T19:45:04 | 2023-04-25T19:45:04 | 81,315,737 | 25 | 7 | null | 2020-06-29T21:45:06 | 2017-02-08T10:07:16 | C | UTF-8 | R | false | false | 2,348 | rd | specificTermsDlg.rd | \name{specificTermsDlg}
\alias{specificTermsDlg}
\title{List terms specific of a document or level}
\description{List terms most associated (positively or negatively) with each document or each
of a variable's levels.}
\details{Specific terms reported here are those whose observed frequency in the document or level has
the lowest probability under an hypergeometric distribution, based on their global frequencies
in the corpus and on the number of occurrences in the document or variable level considered.
The positive or negative character of the association is visible from the sign of the t value,
or by comparing the value of the \dQuote{\% Term/Level} column with that of the \dQuote{Global \%}
column.
All terms with a probability below the value chosen using the first slider are reported, ignoring
terms with fewer occurrences in the whole corpus than the value of the second slider (these terms
can often have a low probability but are too rare to be of interest). The last slider allows limiting
the number of terms that will be shown for each level.
The result is a list of matrices, one for each level of the chosen variable, with seven columns:
\describe{
\item{\dQuote{\% Term/Level}:}{the percent of the term's occurrences in all terms occurrences in the level.}
\item{\dQuote{\% Level/Term}:}{the percent of the term's occurrences that appear in the level
(rather than in other levels).}
\item{\dQuote{Global \%}:}{the percent of the term's occurrences in all terms occurrences in the corpus.}
\item{\dQuote{Level}:}{the number of occurrences of the term in the level (\dQuote{internal}).}
\item{\dQuote{Global}:}{the number of occurrences of the term in the corpus.}
\item{\dQuote{t value}:}{the quantile of a normal distribution corresponding the probability \dQuote{Prob.}.}
\item{\dQuote{Prob.}:}{the probability of observing such an extreme (high or low) number of occurrences of
the term in the level, under an hypergeometric distribution.}
}
}
\seealso{\code{\link{specificTerms}}, \code{\link{setCorpusVariables}}, \code{\link{meta}},
\code{\link{restrictTermsDlg}}, \code{\link{termsDictionary}} }
|
5206ec9585445422d95eab6fbba3cb31b4e6db83 | 5095e821a79abf1784b4b3909f183621139af4b7 | /cachematrix.R | 2cb1484f13fb46a815b0b5aa8ba1cecadd7ddd53 | [] | no_license | efrainhr/ProgrammingAssignment2 | 1e424eb471e1af399d36c28e092e581700993c93 | ef053ebbe11f8165fcc7672f65428c365ddb4f88 | refs/heads/master | 2020-04-30T12:14:18.507554 | 2015-04-26T18:29:58 | 2015-04-26T18:29:58 | 34,590,360 | 0 | 0 | null | 2015-04-25T23:32:57 | 2015-04-25T23:32:56 | null | UTF-8 | R | false | false | 1,581 | r | cachematrix.R | ## This is an adapted version of the vector-mean function provided by Prof Peng.
## This R function was written as a requirement of the R Programming course from
## Coursera's Data Science Specialization (programming assignment 2). The
## purpose of the exercise was to develop a function that could cache the
## inverse of a matrix, allowing for improved code performance, i.e. faster
## matrix inverse operations.
## Sets up a list that allows for the caching of the inverse of a matrix.
### The makeCacheMatrix function makes a special ``vector'' (list) that enables
### the caching of the x-matrix inverse. The list contains functions that: (1)
### set the matrix; (2) get the matrix; (3) set the inverse of the matrix; and
### (4) get the inverse of the matrix.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinv <- function(solve) m <<- solve
getinv <- function() m
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## Return a matrix that is the inverse of 'x'
### First try to get the cached inverse of the matrix. If inverse has been
### calculated, the cache is accessed and returned. Otherwise, the inverse is
### calculated and then cached, which takes longer than accessing a cached
### result.
cacheSolve <- function(x = matrix(), ...) {
m <- x$getinv()
if (!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinv(m)
m
}
|
281a4b4603826b4b6726ff541418bd55c033b2a4 | 93356416c6941126c3627edccabb08696d10dd05 | /phospho_network/regression/plot/overlap_multi_single_model.R | c4df889d204cf07088c7c0dc5eeeee7becc3b9f8 | [] | no_license | ding-lab/phosphoproteomics | 22c9a69127e7397c54dddba044d4588b495f21c5 | 00538a56143be08c0fffae8df6dd54f2bfdd4734 | refs/heads/master | 2021-03-19T06:18:15.490109 | 2019-04-25T21:36:45 | 2019-04-25T21:36:45 | 90,892,479 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,257 | r | overlap_multi_single_model.R | # Yige Wu @ WashU 2017 Feb
# choose kinase/phosphotase, significance level, outlier threshold and least sample number-------------------------
protein <- "kinase"
# protein <- "phosphotase"
# library -----------------------------------------------------------------
library(stringr)
library(ggplot2)
library(reshape)
library(grid)
require(plyr)
# # for working on Kuan's mac
# baseD = "/Users/khuang/Box\ Sync/PhD/proteogenomics/CPTAC_pan3Cancer/"
# for working on Yige's mac
baseD = "/Users/yigewu/Box\ Sync/"
setwd(paste(baseD,"pan3can_analysis/phospho_network",sep=""))
source("../pan3can_aes.R") # aes for general purposes; it should be one directory out of the working directory
# input processed data for single-model and multi-model -----------------------------------
table_single <- read.delim(paste(baseD,"pan3can_shared_data/analysis_results/tables/",protein,"_substrate_regression_trans_edited.txt",sep = ""))
table_multi <- read.delim(paste(baseD,"pan3can_shared_data/analysis_results/tables/multi_",protein,"_substrate_regression.txt",sep = ""))
# qqplot ------------------------------------------------------------------
multi_single_overlap = function(c) {
table_multi_temp <- table_multi[table_multi$Cancer==c & !table_multi$self]
table_can_temp <- table_single[table_single$Cancer == c & !table_single$self,]
merge_id <- c("KINASE","SUBSTRATE","SUB_MOD_RSD","pair")
merge_cols <- c(merge_id,"scale_FDR","FDR_pho_kin","coef_pho_kin","model","Size")
table_overlap <- merge(table_multi_temp[,merge_cols],table_can_temp[,merge_cols], by = merge_id)
return(table_overlap)
}
#for (cancer in c("BRCA","OV")) {
for (cancer in "BRCA") {
for (iscis in "trans") { # loop around cis and trans
if (iscis == "trans") {
var <- "pho_kin"
}
fdr_var <- paste("FDR_",var,sep = "")
coef_var <- paste("coef_",var,sep = "")
table_multi_temp <- table_multi[table_multi$Cancer==cancer & table_multi$SELF == iscis,]
table_can_temp <- table_single[table_single$Cancer == cancer & table_single$SELF == iscis,]
table_multi_temp$scale_FDR <- -log10(table_multi_temp$FDR_pho_kin)
table_can_temp$scale_FDR <- -log10(table_can_temp$FDR_pho_kin)
merge_id <- c("KINASE","SUBSTRATE","SUB_MOD_RSD","pair")
merge_cols <- c(merge_id,"scale_FDR","FDR_pho_kin","coef_pho_kin","model","Size")
table_overlap <- merge(table_multi_temp[,merge_cols],table_can_temp[,merge_cols], by = merge_id)
cat(paste("multi-model overlap with single-model ",cancer," ",iscis," regression result:\n",
nrow(table_overlap)," kinase:substrate:phosphosite pairs (",
nrow(table_multi_temp)," PDX pairs vs ",
nrow(table_can_temp)," BRCA pairs)\n\n",
sep = ""))
p = ggplot(table_overlap,aes(x=scale_FDR.x, y=scale_FDR.y))
p = p + geom_point(alpha=0.05)
#p = p + geom_text(aes(label= ifelse( ( scale_FDR.x >= -log10(0.01) & scale_FDR.y >= -log10(0.01)) , pair, NA)),size=2,alpha=0.5)
p = p + geom_text(aes(label= ifelse( ( scale_FDR.y >= -log10(0.05) & scale_FDR.x < -log10(0.05) ) , pair, NA)),size=2,alpha=0.2)
#p = p + geom_text(aes(label= ifelse( top & scale_FDR.x >= -log10(0.05) , pair, NA)),size=2,alpha=0.5)
p = p + theme_bw() #+ theme_nogrid()
p = p + geom_abline(slope=1)
p = p + theme(axis.title = element_text(size=10), axis.text.x = element_text(colour="black", size=6,angle=90, vjust=0.5), axis.text.y = element_text(colour="black", size=10))#element_text(colour="black", size=14))
p = p + labs(x = "-log(FDR) in multi-model", y="-log(FDR) in single-model")
p = p + xlim(0,20) + ylim(0,20)
p
}
}
# substrate <- "APC";rsd <- "S2129"
# substrate <- "RAF1";rsd <- "S29"
substrate <- "NPM1";rsd <- "S70"
old <- table_single[table_single$SUBSTRATE==substrate & table_single$SUB_MOD_RSD==rsd & !table_single$self,]
new <- table_multi[table_multi$SUBSTRATE==substrate & table_multi$SUB_MOD_RSD==rsd,]
change10 <- table_overlap[table_overlap$FDR_pho_kin.y<sig & table_overlap$FDR_pho_kin.x>=sig,]
phosphosites10 <- unique(change10[,c("SUBSTRATE","SUB_MOD_RSD")])
substrates <- as.vector(phosphosites10$SUBSTRATE)
rsds <- as.vector(phosphosites10$SUB_MOD_RSD)
for (i in 1:nrow(phosphosites10)) {
substrate <- substrates[i]
rsd <- rsds[i]
#table_old <- table_single[table_single$SUBSTRATE==substrate & table_single$SUB_MOD_RSD==rsd & !table_single$self,]
table_new <- table_multi[table_multi$SUBSTRATE==substrate & table_multi$SUB_MOD_RSD==rsd & table_multi$Cancer==cancer,]
kin_sig <- table_new$KINASE[table_new$FDR_pho_kin<sig]
if (length(kin_sig) > 0) {
# print(phosphosites10[i,])
for (k in kin_sig) {
k_subs <- unique(table_single$SUBSTRATE[table_single$KINASE==k & !table_single$self])
print(k_subs)
# if (length(k_subs)==1) {
# cat(k,substrate,rsd,sep = ":")
# }
}
}
}
#for (cancer in c("BRCA","OV")) {
for (cancer in "BRCA") {
for (iscis in "trans") { # loop around cis and trans
if (iscis == "cis") {
var <- "pro_kin"
}
if (iscis == "trans") {
var <- "pho_kin"
}
fdr_var <- paste("FDR_",var,sep = "")
coef_var <- paste("coef_",var,sep = "")
table_multi_temp <- table_multi[table_multi$Cancer==cancer & table_multi$SELF == iscis,]
table_can_temp <- table_single[table_single$Cancer == cancer & table_single$SELF == iscis,]
table_multi_temp$scale_FDR <- -log10(table_multi_temp$FDR_pho_kin)
table_can_temp$scale_FDR <- -log10(table_can_temp$FDR_pho_kin)
merge_id <- c("KINASE","SUBSTRATE","SUB_MOD_RSD","pair")
merge_cols <- c(merge_id,"scale_FDR","FDR_pho_kin","coef_pho_kin","model","Size")
table_overlap <- merge(table_multi_temp[,merge_cols],table_can_temp[,merge_cols], by = merge_id)
cat(paste("multi-model overlap with single-model ",cancer," ",iscis," regression result:\n",
nrow(table_overlap)," kinase:substrate:phosphosite pairs (",
nrow(table_multi_temp)," PDX pairs vs ",
nrow(table_can_temp)," BRCA pairs)\n\n",
sep = ""))
p = ggplot(table_overlap,aes(x=scale_FDR.x, y=scale_FDR.y))
p = p + geom_point(alpha=0.05)
#p = p + geom_text(aes(label= ifelse( ( scale_FDR.x >= -log10(0.01) & scale_FDR.y >= -log10(0.01)) , pair, NA)),size=2,alpha=0.5)
p = p + geom_text(aes(label= ifelse( ( scale_FDR.y >= -log10(0.05) & scale_FDR.x < -log10(0.05) ) , pair, NA)),size=2,alpha=0.2)
#p = p + geom_text(aes(label= ifelse( top & scale_FDR.x >= -log10(0.05) , pair, NA)),size=2,alpha=0.5)
p = p + theme_bw() #+ theme_nogrid()
p = p + geom_abline(slope=1)
p = p + theme(axis.title = element_text(size=10), axis.text.x = element_text(colour="black", size=6,angle=90, vjust=0.5), axis.text.y = element_text(colour="black", size=10))#element_text(colour="black", size=14))
p = p + labs(x = "-log(FDR) in multi-model", y="-log(FDR) in single-model")
p = p + xlim(0,20) + ylim(0,20)
p
}
}
# substrate <- "APC";rsd <- "S2129"
# substrate <- "RAF1";rsd <- "S29"
substrate <- "NPM1";rsd <- "S70"
old <- table_single[table_single$SUBSTRATE==substrate & table_single$SUB_MOD_RSD==rsd & !table_single$self,]
new <- table_multi[table_multi$SUBSTRATE==substrate & table_multi$SUB_MOD_RSD==rsd,]
change10 <- table_overlap[table_overlap$FDR_pho_kin.y<sig & table_overlap$FDR_pho_kin.x>=sig,]
phosphosites10 <- unique(change10[,c("SUBSTRATE","SUB_MOD_RSD")])
substrates <- as.vector(phosphosites10$SUBSTRATE)
rsds <- as.vector(phosphosites10$SUB_MOD_RSD)
for (i in 1:nrow(phosphosites10)) {
substrate <- substrates[i]
rsd <- rsds[i]
#table_old <- table_single[table_single$SUBSTRATE==substrate & table_single$SUB_MOD_RSD==rsd & !table_single$self,]
table_new <- table_multi[table_multi$SUBSTRATE==substrate & table_multi$SUB_MOD_RSD==rsd & table_multi$Cancer==cancer,]
kin_sig <- table_new$KINASE[table_new$FDR_pho_kin<sig]
if (length(kin_sig) > 0) {
# print(phosphosites10[i,])
for (k in kin_sig) {
k_subs <- unique(table_single$SUBSTRATE[table_single$KINASE==k & !table_single$self])
print(k_subs)
# if (length(k_subs)==1) {
# cat(k,substrate,rsd,sep = ":")
# }
}
}
} |
708b96832b23e590d4c8309d575ed66de558f2e0 | 9e34d50d9ebcc34732a5aa987966f080cbb980dc | /plot1.R | 30a2f3037755ec0631ad05f07e2a9fbfaef3369e | [] | no_license | KathrynAlexander/Exploratory_Data_Analysis_Plotting | a8c691a846ebf8ac8d84fea59c236a1b308739b6 | 0f334521a65d4deeb4471aafb96307cbac9faaaf | refs/heads/master | 2022-01-31T02:19:13.244137 | 2019-06-24T00:00:49 | 2019-06-24T00:00:49 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,235 | r | plot1.R | ## Course 4 Week 1 Project 1 Plot 1 Histogram
## Data at https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip
setwd("C:\\Users\\Kathy\\Desktop\\RRR\\datasciencecoursera\\Course4Week1") ## Set working directory
if (!file.exists("data")) {
dir.create("data")
} ## Create directory of data if it does not exist
## Download file
temp <- tempfile()
fileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileURL,destfile = "./data/temp")
powerdata <- read.table(unz("./data/temp", "household_power_consumption.txt"),header = TRUE, sep = ";")
unlink(temp)
##List file and retrieve date downloaded
list.files("./data")
dateDownloaded <- date()
dateDownloaded
## exlore data
is.data.frame(powerdata)
ncol(powerdata)
nrow(powerdata)
row.names(powerdata)
str(powerdata)
head(powerdata)
tail(powerdata)
## Put date in date format
powerdata$Date <- as.Date(powerdata$Date, format="%d/%m/%Y")
## Put time in time format
powerdata$Time <- format(strptime(powerdata$Time, "%H:%M:%S"),"%H:%M:%S")
## Put Global Active Power in correct format
powerdata$Global_active_power <- as.numeric(as.character(powerdata$Global_active_power))
## filter on the dates of interest and combine them in one dataframe
library(dplyr)
powerdata2 <- select(filter(powerdata, ReadingDate == "2007-02-01"),c(Date,Time,Global_active_power,Global_reactive_power,Voltage,Global_intensity,Sub_metering_1,Sub_metering_2,Sub_metering_3))
powerdata3 <- select(filter(powerdata, ReadingDate == "2007-02-02"),c(Date,Time,Global_active_power,Global_reactive_power,Voltage,Global_intensity,Sub_metering_1,Sub_metering_2,Sub_metering_3))
powerdata4 <- rbind(powerdata2,powerdata3)
## Create histogram and plot on screen
hist(powerdata4$Global_active_power, main="Global Active Power",xlab="Global Active Power (kilowatts)", col = "red")
## create histogram and plot to png file
png(file = "plot1.png", width = 480, height = 480, units = "px") ## Open device; create file in working directory
hist(powerdata4$Global_active_power, main="Global Active Power",xlab="Global Active Power (kilowatts)", col = "red") ## Create plot
dev.off() ## Close the file device
|
941b1ef02f32ac95eecda60fa5f87a4777d28256 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/Rsampletrees/examples/estimateHap.Rd.R | 74a41050504dc5fe2fb8ea727df4d3f49f7a942f | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 902 | r | estimateHap.Rd.R | library(Rsampletrees)
### Name: estimateHap
### Title: Estimate and write haplotype probabilities and initial values to
### files
### Aliases: estimateHap
### ** Examples
#\dontrun{
#system.file("Examples/geno_Theta8_Rho8.txt",package="Rsampletrees")
#system.file("Examples/locations_Theta8_Rho8.txt",package="Rsampletrees")
#system.file("Examples/weights-g.txt",package="Rsampletrees")
datname=paste(path.package("Rsampletrees"),"/extdata/geno_Theta8_Rho8.txt",sep="")
locname=paste(path.package("Rsampletrees"),"/extdata/ocations_Theta8_Rho8.txt",sep="")
weightname=paste(path.package("Rsampletrees"),"/extdata/weights-g.txt",sep="")
runpars=newArgs(DataFile=datname, DataType="g", LocationFile=locname, WeightFile="weights-g.txt",
RunName="Test-g",FocalPoint=10000)
runpars=estimateHap(runpars,"EM-hapfreqs",InitialHaploFile="EM-initial.dat",
HaploListFile="EM-known_haplotypes")
#}
|
dc0ee9e45a3940f0992438ba786f88999cfc35c5 | 3aaa4d40f3592f5df07f3cd33b961c9028b8b2ed | /R/src/batch_effects_old.R | 2b0585cfcf2c81f4acedccf945a5d6e6f516d58d | [] | no_license | alzel/kinase_swath | c25132be35a79039536bccf7083152d47a6cea7c | 24a3c0a5d7c6264448c7e3ea4f06bab817c98f8c | refs/heads/master | 2020-12-25T17:24:48.200444 | 2017-07-05T12:57:30 | 2017-07-05T12:57:30 | 39,252,993 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 19,940 | r | batch_effects_old.R | #!/usr/bin/env Rscript
rm(list=ls())
source("./R/functions.R")
source("./R/boot.R")
plots.list = list()
fun_name = "batch_effects"
## ---- data_load ----
load("./R/objects/peptides.data.RData") # clean.R::prepare_peptides()
load("./R/objects/experiment.map.RData")# load.R::load_batch_map()
load("./R/objects/dates_map.RData") # load.R::load_dates_map()
load("./R/objects/sample.map.RData") # load.R::load_sample_map()
load("./R/objects/sample_exp.map.RData") # load.R::load_sample_map()
load("./R/objects/peptides2orfs.RData")
#peptides.data$batch.exp.n = factor(as.numeric(peptides.data$batch.exp))
## ---- selecting peptides based on spectronaut Q-value
peptides.data$EG.StrippedSequence = factor(peptides.data$EG.StrippedSequence)
peptides.data$R.Label = factor(peptides.data$R.Label)
peptides.peak_sums <- group_by(peptides.data, batch_date, batch, batch.exp.n, R.Label,sample, replicate, EG.StrippedSequence) %>%
dplyr::summarise(count = n(),
signal = FG.TotalPeakArea[1],
EG.Qvalue = EG.Qvalue[1],
FG.PrecursorMz = FG.PrecursorMz[1])
qvalues.stats.batch <- peptides.peak_sums %>% group_by(EG.StrippedSequence, batch.exp.n) %>% dplyr::summarise(qvalue.median = median(EG.Qvalue))
p = ggplot(qvalues.stats.batch, aes(x=batch.exp.n, y=log(qvalue.median))) +
geom_boxplot() +
geom_hline(yintercept = log(0.01))
plots.list = lappend(plots.list, p)
qvalues.stats <- peptides.peak_sums %>% group_by(EG.StrippedSequence) %>% dplyr::summarise(qvalue.median = median(EG.Qvalue))
p = ggplot(qvalues.stats, aes(x = log(qvalue.median))) +
geom_density() +
geom_vline(xintercept = log(0.01))
plots.list = lappend(plots.list, p)
#Qvalue is based on median per batch
#peptides.peak_sums = merge(peptides.peak_sums, qvalues.stats.batch, by = c("R.Label", "EG.StrippedSequence", "batch.exp.n") )
peptides.peak_sums = merge(peptides.peak_sums, qvalues.stats, by = c("EG.StrippedSequence"))
peptides.peak_sums = tbl_df(peptides.peak_sums)
peptides.peak_sums = filter(peptides.peak_sums, qvalue.median <= 0.01)
## ---- transforming data to normal distribution ----
peptides.peak_sums$T_signal = with(peptides.peak_sums, log(signal))
thr_remove = 0 #removing thr_remove/2% from each side of data
peptides.peak_sums.trimmed = peptides.peak_sums
if (thr_remove > 0) {
message(paste("Removing ", thr_remove," fraction of dataset", sep=""))
peptides.peak_sums.trimmed = ddply(peptides.peak_sums, .(R.Label),
.fun = function(z) {
v<<-z
tmp.data = z[z$T_signal < quantile(z$T_signal, 1 - thr_remove/2) &
z$T_signal > quantile(z$T_signal, 0 + thr_remove/2),]
return(droplevels(tmp.data))
})
}
set.seed(123)
peptides.peak_sums.subset = peptides.peak_sums.trimmed[peptides.peak_sums.trimmed$R.Label %in% sample(unique(peptides.peak_sums.trimmed$R.Label),50),]
peptides.peak_sums.subset.norm_stats = ddply(peptides.peak_sums.subset, .(R.Label, batch.exp.n, sample),
.fun = function(z) {
v<<-z
tmp.data = z$T_signal
# tmp.data = z$T_signal[z$T_signal < quantile(z$T_signal,0.975) &
# z$T_signal > quantile(z$T_signal,0.025)]
tmp.dist = rnorm(n=length(tmp.data), mean=mean(tmp.data), sd=sd(tmp.data))
tmp = ks.test(tmp.data, tmp.dist)
y <- quantile(na.omit(tmp.data), c(0.25, 0.75))
x <- qnorm(c(0.25, 0.75))
slope = diff(y)/diff(x)
int = y[1L] - slope * x[1L]
ret = data.frame(slope = slope,
intercept = int,
pval = tmp$p.value,
x_min = -3,
y_max = max(tmp.data)
)
return(ret)
})
1 - sum(peptides.peak_sums.subset.norm_stats$pval < (0.05/length(peptides.peak_sums.subset.norm_stats$pval)))/length(peptides.peak_sums.subset.norm_stats$pval)
1 - sum(peptides.peak_sums.subset.norm_stats$pval < 0.05)/length(peptides.peak_sums.subset.norm_stats$pval)
message("Plotting QQ plots")
p = ggplot(peptides.peak_sums.subset) +
geom_point(aes(sample=T_signal), alpha=0.1, stat="qq") +
geom_abline(data=peptides.peak_sums.subset.norm_stats, aes(intercept=intercept, slope=slope)) +
geom_text(data=peptides.peak_sums.subset.norm_stats, aes(x=x_min, y=y_max, label=round(pval,2))) +
facet_wrap(~R.Label, scales="free") +
theme(axis.title=element_text(size=20))
file_name = "qqplots.T_all_samples.png"
file_path = paste(figures_dir, file_name, sep="/")
ggsave(filename=file_path, plot=p, height=8.27, width=11.7)
plots.list = lappend(plots.list, p)
p = ggplot(peptides.peak_sums.subset) +
geom_density(aes(x=T_signal)) +
#geom_abline(data=peptides.peak_sums.subset.norm_stats, aes(intercept=intercept, slope=slope)) +
#geom_text(data=peptides.peak_sums.subset.norm_stats, aes(x=x_min, y=y_max, label=round(pval,2))) +
facet_wrap(~R.Label, scales="free") +
theme(axis.title=element_text(size=20), aspect.ratio = 1)
file_name = "density_plots.T_all_samples.png"
file_path = paste(figures_dir, file_name, sep="/")
ggsave(filename=file_path, plot=p, height=8.27, width=11.7)
plots.list = lappend(plots.list, p)
## QC statistics CV
message("Performing QC stats")
peptides.peak_stats = group_by(peptides.peak_sums, batch.exp.n, sample, EG.StrippedSequence) %>%
dplyr::summarise(count = n(),
mean.signal = mean(signal),
CV = sd(signal)/mean.signal,
mean.T = mean(T_signal),
CV.T = sd(T_signal)/mean.T)
to_plot = filter(peptides.peak_stats, count>=2)
p = ggplot(to_plot, aes(x=as.numeric(EG.StrippedSequence), y=CV.T)) +
geom_point(alpha=0.1) +
facet_wrap(~sample, scales="free") +
ggtitle(paste("Grouped by", paste(attr(to_plot, which="vars"), collapse="."))) +
stat_smooth()
plots.list = lappend(plots.list, p)
file_name = "CV_all_samples.png"
file_path = paste(figures_dir, file_name, sep="/")
ggsave(filename=file_path, plot=p, height=8.27, width=11.7)
p = ggplot(to_plot, aes(x=as.numeric(EG.StrippedSequence), y=CV)) +
geom_point(alpha=0.1) +
ggtitle(paste("Grouped by", paste(attr(to_plot, which="vars"), collapse="."))) +
stat_smooth() +
theme(text = element_text(size=20))
plots.list = lappend(plots.list, p)
file_name = "CV_all_samples.png"
file_path = paste(figures_dir, file_name, sep="/")
ggsave(filename=file_path, plot=p, height=8.27, width=11.7)
p = ggplot(to_plot, aes(x=as.numeric(EG.StrippedSequence), y=CV.T)) +
geom_point(alpha=0.1) +
ggtitle(paste("Grouped by", paste(attr(to_plot, which="vars"), collapse="."))) +
stat_smooth() +
theme(text = element_text(size=20))
plots.list = lappend(plots.list, p)
file_name = "CV.T_all_samples.png"
file_path = paste(figures_dir, file_name, sep="/")
ggsave(filename=file_path, plot=p, height=8.27, width=11.7)
# ---- using ComBat to correct for Batch effects ----
message("correcting for batch effects")
peptides.df = dcast(data=peptides.peak_sums.trimmed, formula=EG.StrippedSequence~sample+replicate+batch_date+batch.exp.n+batch, value.var="T_signal")
peptides.matrix = as.matrix(peptides.df[,-1])
rownames(peptides.matrix) = peptides.df$EG.StrippedSequence
sp_batches = sub(x=colnames(peptides.matrix), pattern=".*?_([A-Za-z0-9]+)$", perl=T, replacement="\\1")
exp_batches = sub(x=colnames(peptides.matrix), pattern=".*?_([A-Za-z0-9]+)_[A-Za-z0-9]+$", perl=T, replacement="\\1")
batch_date = sub(x=colnames(peptides.matrix), pattern=".*?_([0-9]+_[0-9]+_[0-9]+)_[A-Za-z0-9]+_[A-Za-z0-9]+$", perl=T, replacement="\\1")
sample_name = sub(x=colnames(peptides.matrix), pattern="(.*?)_[0-9]+_[0-9]+_[0-9]+_[A-Za-z0-9]+_[A-Za-z0-9]+$", perl=T, replacement="\\1")
pheno = data.frame(exp_batches = exp_batches,
sample_name = sample_name,
batch_date = batch_date)
rownames(pheno) = colnames(peptides.matrix)
pheno$ORF = droplevels(sample_exp.map$ORF[match(pheno$sample_name, sample_exp.map$SampleName)])
pheno$ORF[pheno$sample_name == "KL_Try_027_c"] = "WT"
pheno$exp_batches[pheno$sample_name == "KL_Try_027_c"] = 5
pheno = droplevels(pheno[which(!is.na(pheno$ORF)),])
tmp.factor_size = ddply(pheno, .(batch_date, ORF), summarise, factor_size = length(ORF))
tmp.factor_size$batch_date.ORF = paste(tmp.factor_size$batch_date, tmp.factor_size$ORF, sep=".")
pheno$batch_date.ORF = paste(pheno$batch_date, pheno$ORF, sep=".")
pheno = droplevels(pheno[pheno$batch_date.ORF %in% tmp.factor_size$batch_date.ORF[tmp.factor_size$factor_size >=2],])
pheno = droplevels(pheno[pheno$batch_date %in% levels(droplevels(pheno$batch_date[grep(pattern="mix", pheno$sample_name, ignore.case=T)])),])
peptides.matrix.f = peptides.matrix[,match(rownames(pheno), colnames(peptides.matrix))]
# saving raw data ##########################
message("Tidying up batch raw data", appendLF=F)
peptides.long = melt(peptides.matrix.f, id.vars=rownames)
names(peptides.long) = c("EG.StrippedSequence", "variable", "value")
peptides.long = peptides.long %>% extract(variable,
into=c("R.Label", "batch_date", "batch.exp.n", "batch"),
regex="(.*?)_([0-9]+_[0-9]+_[0-9]+)_([A-Za-z0-9]+)_([A-Za-z0-9]+)$")
file_name = "peptides.long.RData"
file_path = paste(output_dir, file_name, sep="/")
save(peptides.long, file=file_path)
message("...Done")
#############################################
########HERE£££££££££££££
rm(list=ls())
source("./R/functions.R")
source("./R/boot.R")
plots.list = list()
fun_name = "batch_effects"
load("./R/objects/peptides.peak_sums.trimmed.RData")
load("./R/objects/protein_annotations.RData")
load("./R/objects/peptides.cor.stats.top.RData")
peptides.long = peptides.peak_sums.trimmed
peptides.selected = tbl_df(droplevels(filter(peptides.cor.stats.top, top == "3")))
peptides.long.selected = tbl_df(peptides.long[peptides.long$EG.StrippedSequence %in% peptides.selected$EG.StrippedSequence,])
peptides.long.selected$ORF = peptides.selected$ORF[match(peptides.long.selected$EG.StrippedSequence, peptides.selected$EG.StrippedSequence)]
proteins.long = peptides.long.selected %>% group_by(ORF, R.Label, batch_date, batch.exp.n, batch) %>% summarise(mean_signal = mean(T_signal, na.rm=T),
sum_signal = sum(T_signal, na.rm=T))
proteins.df = dcast(data=proteins.long, formula=ORF~R.Label+batch_date+batch.exp.n+batch, value.var="mean_signal")
proteins.matrix = proteins.df[,-1]
rownames(proteins.matrix) = proteins.df$ORF
pattern.p = "(.*?)_([0-9]+_[0-9]+_[0-9]+|[A-Za-z]?|[A-Za-z]+)_([A-Za-z0-9]+)_([A-Za-z0-9]+)$"
matches = stringr::str_match_all(pattern=pattern.p, colnames(proteins.matrix))
stopifnot(sum(lapply(matches,length)!=0) == ncol(proteins.matrix))
pheno = data.frame(matrix(unlist(matches), ncol=length(matches[[1]]), byrow=T))
colnames(pheno) = c("name", "R.Label", "batch_date", "batch.exp.n", "batch" )
rownames(pheno) = colnames(proteins.matrix)
pheno$ORF = droplevels(sample_exp.map$ORF[match(pheno$R.Label, sample_exp.map$SampleName)])
pheno$ORF[pheno$R.Label == "KL_Try_027_c"] = "WT"
pheno$batch.exp.n[pheno$R.Label == "KL_Try_027_c"] = 5
pheno = droplevels(pheno[which(!is.na(pheno$ORF)),])
pheno$group = pheno$batch.exp.n #grouping variable to estimate batch effects
tmp.factor_size = ddply(pheno, .(group, ORF), summarise, factor_size = length(ORF))
tmp.factor_size$group.ORF = paste(tmp.factor_size$group, tmp.factor_size$ORF, sep=".")
pheno$group.ORF = paste(pheno$group, pheno$ORF, sep=".")
pheno = droplevels(pheno[pheno$group.ORF %in% tmp.factor_size$group.ORF[tmp.factor_size$factor_size >=2],])
pheno = droplevels(pheno[pheno$group %in% levels(droplevels(pheno$group[grep(pattern="mix", pheno$R.Label, ignore.case=T)])),])
proteins.matrix.f = proteins.matrix[,match(rownames(pheno), colnames(proteins.matrix))]
mod = model.matrix(~as.factor(ORF), data=pheno)
tmp.size_factors = DESeq::estimateSizeFactorsForMatrix(exp(proteins.matrix.f))
proteins.matrix.f.deseq = log(exp(proteins.matrix.f)/tmp.size_factors, base=2)
#proteins.matrix.f.deseq = log(exp(proteins.matrix.f), base=2)
proteins.matrix.f.combat = ComBat(na.omit(proteins.matrix.f), batch=pheno$group, mod=mod)
proteins.matrix.f.deseq.combat = ComBat(na.omit(proteins.matrix.f.deseq), batch=pheno$group, mod=mod)
file_name = "proteins.matrix.f.combat.RData"
file_path = paste(output_dir, file_name, sep="/")
save(proteins.matrix.f.combat,file=file_path)
file_name = "proteins.matrix.f.deseq.combat.RData"
file_path = paste(output_dir, file_name, sep="/")
save(proteins.matrix.f.deseq.combat,file=file_path)
# pca
message("plotting PCA results")
# pca with quantile normalization
file_name = "PCA_batch_effects.png"
file_path = paste(figures_dir, file_name, sep="/")
png(file_path, width=297, height=210, units="mm", res=150)
par(pty="s", mfrow=c(1,3), cex=0.75)
pca = prcomp(t(proteins.matrix.f[complete.cases(proteins.matrix.f),]), scale.=T)
x_var = round(pca$sdev[1]^2/sum(pca$sdev^2)*100,2)
y_var = round(pca$sdev[2]^2/sum(pca$sdev^2)*100,2)
plot(pca$x[,1], pca$x[,2], col=pheno$batch_date, pch=16, main="Before adjustments for batch effects",
xlab=paste("PC1,", x_var),
ylab=paste("PC2,", y_var))
text(pca$x[,1], pca$x[,2], labels=pheno$group, cex=0.66)
pca = prcomp(t(proteins.matrix.f.combat), scale.=T)
x_var = round(pca$sdev[1]^2/sum(pca$sdev^2)*100,2)
y_var = round(pca$sdev[2]^2/sum(pca$sdev^2)*100,2)
plot(pca$x[,1], pca$x[,2], col=pheno$batch_date, pch=16, main="After adjustments for batch effects",
xlab=paste("PC1,", x_var),
ylab=paste("PC2,", y_var))
text(pca$x[,1], pca$x[,2], labels=pheno$group, cex=0.66)
pca = prcomp(t(proteins.matrix.f.deseq.combat), scale.=T)
x_var = round(pca$sdev[1]^2/sum(pca$sdev^2)*100,2)
y_var = round(pca$sdev[2]^2/sum(pca$sdev^2)*100,2)
plot(pca$x[,1], pca$x[,2], col=pheno$batch_date, pch=16, main="After normalization and adjustments for batch effects",
xlab=paste("PC1,", x_var),
ylab=paste("PC2,", y_var))
text(pca$x[,1], pca$x[,2], labels=pheno$group, cex=0.66)
p = recordPlot()
plots.list = lappend(plots.list, p)
dev.off()
# dendrograms
file_name = "Clustering_batch_effects.png"
file_path = paste(figures_dir, file_name, sep="/")
png(file_path, width=297, height=210, units="mm", res=150)
resetPar()
par(mfrow=c(2,1))
h = hclust(dist(scale(t(proteins.matrix.f))))
plot(h, labels=pheno$group, cex=0.5, main="Before batch correction")
h = hclust(dist(scale(t(proteins.matrix.f.deseq.combat))))
plot(h, labels=pheno$group, cex=0.5, main="After batch correction")
p = recordPlot()
plots.list = lappend(plots.list, p)
dev.off()
#tidying batch corrected data
message("Tidying up batch corrected data", appendLF=F)
tmp.wide = proteins.matrix.f.combat
tmp.wide$ORF = rownames(proteins.matrix.f.combat)
proteins.deseq.combat.long = melt(tmp.wide, id.vars="ORF")
names(proteins.deseq.combat.long) = c("ORF", "variable", "value")
proteins.deseq.combat.long = proteins.deseq.combat.long %>% extract(variable,
into=c("R.Label", "batch_date", "batch.exp.n", "batch"),
regex="(.*?)_([0-9]+_[0-9]+_[0-9]+|[A-Za-z]?|[A-Za-z]+)_([A-Za-z0-9]+)_([A-Za-z0-9]+)$")
col_names <- names(proteins.deseq.combat.long)[names(proteins.deseq.combat.long) != "value"]
proteins.deseq.combat.long[,col_names] <- lapply(proteins.deseq.combat.long[,col_names] , factor)
proteins.deseq.combat.long = tbl_df(proteins.deseq.combat.long)
file_name = "proteins.deseq.combat.long.RData"
file_path = paste(output_dir, file_name, sep="/")
save(proteins.deseq.combat.long, file=file_path)
message("...Done")
tmp.wide = proteins.matrix.f.combat
tmp.wide$ORF = rownames(proteins.matrix.f.combat)
proteins.combat.long = melt(tmp.wide, id.vars="ORF")
names(proteins.combat.long) = c("ORF", "variable", "value")
proteins.combat.long = proteins.combat.long %>% extract(variable,
into=c("R.Label", "batch_date", "batch.exp.n", "batch"),
regex="(.*?)_([0-9]+_[0-9]+_[0-9]+|[A-Za-z]?|[A-Za-z]+)_([A-Za-z0-9]+)_([A-Za-z0-9]+)$")
col_names <- names(proteins.combat.long)[names(proteins.combat.long) != "value"]
proteins.combat.long[,col_names] <- lapply(proteins.combat.long[,col_names] , factor)
proteins.combat.long = tbl_df(proteins.combat.long)
file_name = "proteins.combat.long.RData"
file_path = paste(output_dir, file_name, sep="/")
save(proteins.combat.long, file=file_path)
message("...Done")
tmp.wide = proteins.matrix.f
tmp.wide$ORF = rownames(proteins.matrix.f)
proteins.long = melt(tmp.wide, id.vars="ORF")
names(proteins.long) = c("ORF", "variable", "value")
proteins.long = proteins.long %>% extract(variable,
into=c("R.Label", "batch_date", "batch.exp.n", "batch"),
regex="(.*?)_([0-9]+_[0-9]+_[0-9]+|[A-Za-z]?|[A-Za-z]+)_([A-Za-z0-9]+)_([A-Za-z0-9]+)$")
col_names <- names(proteins.long)[names(proteins.long) != "value"]
proteins.long[,col_names] <- lapply(proteins.long[,col_names] , factor)
proteins.long = tbl_df(proteins.long)
file_name = "proteins.long.RData"
file_path = paste(output_dir, file_name, sep="/")
save(proteins.long, file=file_path)
message("...Done")
set.seed(123)
toSelect = droplevels(sample(size=20 , x=unique(proteins.deseq.combat.long$R.Label)))
toPlot = proteins.deseq.combat.long[proteins.deseq.combat.long$R.Label %in% toSelect,]
p = ggplot(toPlot, aes(x=value)) +
geom_histogram(aes(y=..density..),
breaks=seq(1,10, 0.2),
colour="black",
fill="white") +
stat_function(fun=dnorm, args=list(mean=mean(toPlot$value), sd=sd(toPlot$value)))+
facet_wrap(~R.Label, scales="free")
p1 = ggplot(proteins.long, aes(x=batch.exp.n, y=value, fill=batch.exp.n)) +
geom_boxplot() +
theme(legend.position="none")
p3 = ggplot(proteins.deseq.combat.long, aes(x=batch.exp.n, y=value, fill=batch.exp.n)) +
geom_boxplot() +
theme(legend.position="top")
g = arrangeGrob(p1, p3)
plots.list = lappend(plots.list, g)
|
cafed3f63616a9f029c1e3c6a216afd739fc5778 | 9aafde089eb3d8bba05aec912e61fbd9fb84bd49 | /codeml_files/newick_trees_processed/611_44/rinput.R | 0d71a590d20d4181a9651a6abd79eb042114ea96 | [] | no_license | DaniBoo/cyanobacteria_project | 6a816bb0ccf285842b61bfd3612c176f5877a1fb | be08ff723284b0c38f9c758d3e250c664bbfbf3b | refs/heads/master | 2021-01-25T05:28:00.686474 | 2013-03-23T15:09:39 | 2013-03-23T15:09:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 135 | r | rinput.R | library(ape)
testtree <- read.tree("611_44.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="611_44_unrooted.txt") |
025daf9f30c416cf02e6e87450161d745216ecb4 | acb3220d40b4244819c3515e5e69613dcb710ebc | /DataPartition.R | 8a4059a2ed428338369b9242b4768c409e3264e7 | [] | no_license | IncharaManjunatha/analytics1 | cad0b59a3c252f7ee2e3a5d206b46a3de9f46eb0 | 5df2d9fcf55468af812dcbd791f0c82b1c13fa02 | refs/heads/master | 2020-03-26T20:06:41.913479 | 2018-08-21T18:06:03 | 2018-08-21T18:06:03 | 145,305,301 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 890 | r | DataPartition.R | # partition the given data into train and test data
mtcars
dim(mtcars)
library(caret) # helps partition data for classification
#index-createDataPartition(y=mtcars$am, p=0.7, list=F) # mention probability
index = sample(x=1:nrow(mtcars), size=.7 * nrow(mtcars)) # .7 *32
index
train=mtcars[index,]
test=mtcars[-index,]
nrow(train)
nrow(test)
nrow(train) + nrow(test)
library(olsrr)
fit = lm(mpg~ disp + hp + wt + qsec, data = train)
k = ols_step_all_possible(fit)
plot(k)
k # display combinations of variables with some assumptions
summary(lm(mpg ~ wt, data= train))
summary(lm(mpg ~ wt + hp, data= train))
library(gvlma)
gvmodel= gvlma(fit) # checking for assumptions
gvmodel # assumptions are acceptable or not
summary(fit)
finalmodel = lm(mpg ~ wt + hp, data= train)
summary(finalmodel)
(predictedvalues=predict(finalmodel,ndata = test))
cbind(test$mpg,predictedvalues)
|
15fb819ff9a9df761fdfd48d1f2b1aa59ccf01d1 | 04bf444bf40498ba6672d8558b6aac2e7b2c8031 | /vectors-basic-operations-and-subsetting.R | 3fdd96c5205083ecd7d2a3b180b3b414a0355a87 | [] | no_license | triadicaxis/quickr | dea5bb836dce9ece41c614db002bf7477f0a70e2 | 784723c3ac9a43304257788abcd7d0a2dc2e066a | refs/heads/master | 2020-03-11T11:10:19.392031 | 2019-10-21T14:54:12 | 2019-10-21T14:54:12 | 129,503,198 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 753 | r | vectors-basic-operations-and-subsetting.R | ## A vector sequence can be any data type, and run in both directions and over zero
v1 <- c(1:10)
v2 <- c(10:1)
v3 <- c(-5:4)
v4 <- c(5:-4)
v5 <- c(1,2)
v6 <- c(-1,4,-7)
v7 <- c("Joe", "Alice", "Bill")
v8 <- 3
v9 <- c("a", "b", "c")
v10 <- c("male", "female")
## Multiply a vector by a constant (works for any operation)
v1 * 3
v1 + 2
v1 - 3
v1 / 2
sqrt(v1)
## Multiply each element in a vector by each element in another vector (works for any operation)
v1 + v2
v1 - v2
v1 * v2
v1 / v2
## Add two vectors of different lengths, element by element (recycling)
v1 + v5
v5 + v6 # still works but gives a warning that the shorter vector is not an exact multiple of the longer one
## Subset a particular element by its position
v3[2]
v3[2:5]
v3[c(2,5)] |
bf290c79a70672985a6c786ca4bba139a98aedcd | 9aafde089eb3d8bba05aec912e61fbd9fb84bd49 | /codeml_files/newick_trees_processed/13042_0/rinput.R | 3b9263755eec106a44d35db4b1336977b5e1e586 | [] | no_license | DaniBoo/cyanobacteria_project | 6a816bb0ccf285842b61bfd3612c176f5877a1fb | be08ff723284b0c38f9c758d3e250c664bbfbf3b | refs/heads/master | 2021-01-25T05:28:00.686474 | 2013-03-23T15:09:39 | 2013-03-23T15:09:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 137 | r | rinput.R | library(ape)
testtree <- read.tree("13042_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="13042_0_unrooted.txt") |
f0b66875b74d23f48363858c02ac02a3359d0f39 | 0d1ac132e88e9e2640405c2503279bdaf660ce3b | /man/st_stars.Rd | c3554bd8e2361cc9bacd4e52f6c9da862c52e7fe | [] | no_license | nemochina2008/stars | ac3a7ead343f4cf2333e995afe8bdb20ee114507 | 3925bd04d73005834d6f7723bba3ee6bc7928c6b | refs/heads/master | 2021-01-23T11:40:00.991962 | 2017-09-05T15:02:28 | 2017-09-05T15:02:28 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 612 | rd | st_stars.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stars.R
\name{st_stars}
\alias{st_stars}
\title{read raster/array dataset from file or connection
read raster/array dataset from file or connection}
\usage{
st_stars(file, options = character(0), driver = character(0))
}
\arguments{
\item{file}{character; file name to read}
\item{options}{character; opening options}
\item{driver}{character; driver to use for opening file}
}
\value{
object of class \code{stars}
}
\description{
read raster/array dataset from file or connection
read raster/array dataset from file or connection
}
|
7a8c7d492ae94a588315fe30cfb7619d6a0d77f4 | b867e556609b6530ccb820922c98a2612a30d9f1 | /Data analysis/R_Scripts/R_Script_22_Identify_Asian_hot_pop_by_nation_GRUMP.R | fe010854c8a744d4c4d7cf94ca002eb6d93d24b4 | [] | no_license | crstn/CISC | 3b2bf07dbdb6bd60908dc039e7ce028c4e4a43f8 | 03cda57e7c29309e31547f2d159468b242792f6b | refs/heads/master | 2021-03-30T16:52:56.148550 | 2020-07-15T09:00:48 | 2020-07-15T09:00:48 | 41,560,065 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,800 | r | R_Script_22_Identify_Asian_hot_pop_by_nation_GRUMP.R | # =======================================================================================================
# DESCRIPTION: This script collects all the 15 day heat wave national population files and selects only
# nations in Asia and the population experiencing very warm heat indices (>42C). It outputs
# these data by nation, SSP, region and sub-region. It then takes these individual tables
# and combines them into 5 final output (Summary) files by SSP.
#
# DATE: 21 July 2018
# Developer: Peter J. Marcotullio
# =======================================================================================================
# Get libraries
library(dplyr)
library(stringr)
library(reshape2)
# Make lists
years <-c(2010, 2030, 2070, 2100)
ssps<-c("SSP1", "SSP2", "SSP3", "SSP4", "SSP5")
# Put R in appropriate folder
start_folder<-"E:/July_2017/Pop_&_Temp/GRUMP_HI/GRUMP_HI_Final/Summary_tables/Population/15_day"
setwd(start_folder)
# make path to final folder (created this folder previously)
final_path<-"E:/July_2017/Pop_&_Temp/GRUMP_HI/GRUMP_HI_Final/Summary_tables/Population/15_day/Asia"
# Get all the files we want
pat<-"Nation_w_UHI"
all_files<-list.files(start_folder, pattern = pat)
# Loop through the files
for(p in 1:length(all_files)){
# Get the RCP and the SSP from the name
file_name_list<-strsplit(all_files[p], "_")
rcp<-file_name_list[[1]][7]
ssp_n<-file_name_list[[1]][9]
ssp<-paste("SSP", ssp_n, sep="")
# Read in the data
df<-read.csv(all_files[p])
# select only Asian countries
df_asia<-df[ which(df$region == 142), ]
# select only hot populations
df_asia_1<-df_asia[ which(df_asia$Heat_Index_cat_UHI == ">42 & <=55" | df_asia$Heat_Index_cat_UHI == ">55"),]
# Create new datasets by looping through years
for(i in 1:length(years)){
# Select only for the current year
df_asia_y<-df_asia_1[which(df_asia_1$Year == years[i]), ]
# Create new variables
df_asia_y$SSP<-ssp
df_asia_y$RCP<-rcp
# Create a new variable for heat index categories to help with the summary
df_asia_y$HI_cat<-">42"
# Get year in characters and make new name
Asia_year<-as.character(years[i])
label_y<-paste("Nation", "hotPop", rcp, ssp, Asia_year, sep = "_")
# Summarize data by country iso and new heat index category
df_1<-df_asia_y%>%
group_by(ISO, HI_cat, SSP, RCP, Year)%>%
summarize(Region = mean(region),
Sub_region = mean(sub_region),
Population = sum(as.numeric(Population)))
# Create final name and path for the file
final_file_name<-paste(label_y, ".csv", sep = "")
final_file_path<-file.path(final_path, final_file_name)
# save the file
write.csv(df_1, final_file_path, row.names =FALSE)
}
}
# re-set the working folder
setwd(final_path)
# Loop through the new csv files by SSP
for(q in 1:length(ssps)){
# Collect all the new files into a list
list.data<-list()
# create a pattern of SSP
patt<-ssps[q]
# Select all files by ssp
all_new_files<-list.files(final_path, pattern = patt)
# loop through the files and put them into a new list of dataframes
for(m in 1:length(all_new_files)){
list.data[[m]]<-read.csv(all_new_files[m])
}
# bind the files from the dataframe list
big_file<-do.call(rbind, list.data)
# Create a Summary_file_name and path
new_file_name<-paste("AC_Summary_", ssps[q], ".csv", sep="")
new_final_file_path<-file.path(final_path, new_file_name)
# save file
write.csv(big_file, new_final_file_path, row.names = FALSE)
}
# DONE! |
5f4512fffbeaa5b3defa3c2d7a3b6de7c302d412 | cdc0504ea03ec5c439006f1e47bbc618fb983ba0 | /man/allo.Rd | 7e7a0310712fcb8d9556e14cc182915f4101ac7f | [] | no_license | jvanschalkwyk/corona | a0ae3df8ff81199b848747f2133d685c40b5f1d4 | 5d4621092cc8bb3772595ea5b50390cfcd564098 | refs/heads/master | 2022-12-24T20:43:37.738883 | 2020-10-01T01:34:55 | 2020-10-01T01:34:55 | 270,596,873 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 507 | rd | allo.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{allo}
\alias{allo}
\title{Allometric scaling data.}
\format{
A data frame with 455 rows.
\describe{
\item{Species}{}
\item{Mass}{}
\item{Temperature}{ }
\item{MR}{Metabolic rate}
\item{AvgMass}{}
\item{Q10SMR}{ }
\item{Reference}{}
}
}
\source{
\url{https://royalsocietypublishing.org/doi/suppl/10.1098/rsbl.2005.0378}
}
\usage{
allo
}
\description{
Used to introduce power laws.
}
\keyword{datasets}
|
ae4555bb59785a9c96d9b043439aa4797e2132bc | ab7890a5fcc4becdb084fad38a46fd8afee0fd90 | /man/read_Licor.Rd | 62164adf574ff91e5f67563e09694d79aa5f9b02 | [] | no_license | erikerhardt/RLicor | 521f8889d8fdc0489841cf15384eedc9202a8cbf | 82d8274f76a921fe503fb979661ab1b69361a7aa | refs/heads/master | 2020-06-03T19:17:44.796011 | 2019-07-03T00:00:46 | 2019-07-03T00:00:46 | 191,699,209 | 2 | 2 | null | null | null | null | UTF-8 | R | false | true | 579 | rd | read_Licor.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_Licor.R
\name{read_Licor}
\alias{read_Licor}
\title{Read Licor 6400 and 6800 files}
\usage{
read_Licor(Licor_fn, sw_model = c("autodetect", 6800, 6400)[1],
n_header_rows_min = 100)
}
\arguments{
\item{Licor_fn}{Licor file to read}
\item{sw_model}{Model of Licor or autodetect}
\item{n_header_rows_min}{Minimum number of rows to read in order to detect where header ends and data begins}
}
\value{
Licor is a list including the header and data
}
\description{
Read Licor 6400 and 6800 files
}
|
46987f232521005b614c2d404e17eaf31257b06c | 8bd212daf8d8af52df1d2c0f8cd12c2d7cb404cf | /man/apa.regression.Rd | 1897d316c11b119b4aec795118cf557df65402d6 | [] | no_license | cran/apaStyle | dfaa541b607740db073c600b2e1625a888f88b6e | 96b638e2c93e376ca180b865884c13152dbf1e4c | refs/heads/master | 2020-04-07T06:40:12.359664 | 2017-03-29T20:27:31 | 2017-03-29T20:27:31 | 48,076,638 | 1 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,440 | rd | apa.regression.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/apaRegression.R
\name{apa.regression}
\alias{apa.regression}
\title{Generic method to generate an APA style table with regression parameters for MS Word.}
\usage{
apa.regression(..., variables = NULL, number = "XX", title = "APA Table",
filename = "APA Table.docx", note = NULL, landscape = FALSE,
save = TRUE, type = "wide")
}
\arguments{
\item{...}{Regression (i.e., lm) result objects.}
\item{variables}{The variable names for in the table.}
\item{number}{(optional) The table number in the document.}
\item{title}{(optional) Name of the table.}
\item{filename}{(optional) Specify the filename (including valid '\code{.docx}' extension).}
\item{note}{(optional) Add a footnote to the bottom of the table.}
\item{landscape}{(optional) Set (\code{TRUE}) if the table should be generated in landscape mode.}
\item{save}{(optional) Set (\code{FALSE}) if the table should not be saved in a document.}
\item{type}{(optional) Not implemented.}
}
\value{
\code{apa.regression} object; a list consisting of
\item{succes}{message in case of an error}
\item{save}{flag which indicates whther the document is saved}
\item{data}{dataset with regression statistics}
\item{table}{\code{FlexTable {ReporteRs}} object}
}
\description{
Generic method to generate an APA style table with regression parameters for MS Word.
}
|
107d4e9b4fb074a9cc8612a40ab63e7f4142596e | 9423c8fe8a160eaebf5f47d428631c4a4eac109c | /tests/testthat/test-locations.R | 7ef8596d0db40b3355092ba5c5a8f7c992241f61 | [] | no_license | cran/spew | 2cfd5f555b4a8a501cc26e9349f8362e5b3ff767 | ae60d628773c832be0c290a079bf55aad390bab7 | refs/heads/master | 2021-07-24T20:57:16.344473 | 2017-11-03T22:28:38 | 2017-11-03T22:28:38 | 109,451,817 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,510 | r | test-locations.R | context("Location Sampling")
test_that("Single and Multiple Polygons", {
library(sp)
data(delaware)
row <- delaware$pop_table[17, ]
multiple_polygons <- sample_locations(method = "uniform",
place_id = row[, "place_id"], n_house = row[, "n_house"],
shapefile = delaware$shapefiles$shapefile)
expect_equal(is.null(multiple_polygons), FALSE)
num_samples <- floor(runif(1, min = 1, max = 200))
rand_row <- floor(runif(1, min = 1, max = nrow(delaware$pop_table)))
single_polygon <- sample_locations(method = "uniform",
place_id = delaware$pop_table[rand_row, "place_id"],
n_house = num_samples,
shapefile = delaware$shapefiles$shapefile)
expect_equal(length(single_polygon), num_samples)
})
test_that("IPUMS Shapefiles work", {
library(sp)
data(uruguay)
num_samples <- 100
place_names <- uruguay$shapefiles$place_id
for (place in place_names[1:6]) {
samp <- sample_locations(method = "uniform", place, num_samples, uruguay$shapefiles)
expect_equal(length(samp), num_samples)
}
})
test_that("Road sampling works", {
skip_if_not_installed("rgeos")
library(rgeos)
library(sp)
library(maptools)
data(delaware)
row <- 17
pid <- delaware$pop_table$place_id[row]
number_houses <- delaware$pop_table$n_house[row]
data_path <- system.file("extdata/10/input", package = "spew")
roads_path <- paste0(data_path, "/roads/natstat/2010/county")
roads_shapefile <- list(regions = delaware$shapefiles$shapefile, roads = roads_path)
# Sample from the roads shapefile
road_locs <- sample_locations(method = "roads",
place_id = pid,
n_house = number_houses,
shapefile = roads_shapefile,
noise = .01)
# Sample Uniformly
uniform_locs <- sample_locations(method = "uniform",
place_id = pid,
n_house = number_houses,
shapefile = delaware$shapefiles$shapefile,
noise = .01)
expect_true(length(uniform_locs) == length(road_locs))
# Verify sampling from roads works with a small number of houses
small_number_houses <- 10
small_road_locs <- sample_locations(method = "roads",
place_id = pid,
n_house = small_number_houses,
shapefile = roads_shapefile,
noise = .01)
expect_true(length(small_road_locs) == small_number_houses)
# Verify the Spatial Points class works for road sampling
road_pts <- sp::spsample(roads_shapefile[[1]], n = 100, type = "random")
road_pts_locs <- samp_roads(100, road_pts, .01)
expect_true(class(road_pts_locs) == "SpatialPoints")
road_pts2 <- road_pts[1:2, ]
road_pts_locs2 <- samp_roads(100, road_pts2, .01)
expect_true(class(road_pts_locs) == "SpatialPoints")
expect_true(length(road_pts_locs) == length(road_pts_locs2))
})
test_that("Uniform Large Households", {
# Load in the South Dakota data
library(maptools)
library(sp)
data(delaware)
# Verify the Uniform sampling methodology still works
number_houses <- 1000
pid <- delaware$pop_table[17, "place_id"]
uniform_locations <- sample_locations(method = "uniform",
place_id = pid,
n_house = number_houses,
shapefile = delaware$shapefiles$shapefile,
noise = .01)
# Verify uniform sampling works for a large number of points
large_num_houses <- 3000000
uniform_locations_large <- sample_locations(method = "uniform",
place_id = pid,
n_house = large_num_houses,
shapefile = delaware$shapefiles$shapefile,
noise = .001)
# Verify the results are the correct class and equal length
expect_true(class(uniform_locations) == "SpatialPoints")
expect_true(length(uniform_locations_large) == large_num_houses)
})
|
544d0d18d8f8dc0eaad58fe1c1c911a082976888 | 31a52f916e5e09f5153389b6d8addde83e0776ed | /hospital_data.R | 75f39e443176e9f370f8fb8fcd5339b60964dcb9 | [] | no_license | kostask84/snakebite | 119bc42fe460639a245543512ba78dbb15c68589 | e15bb2aae3f31d8728c3f4c1a3b0f4d1e739a69d | refs/heads/master | 2020-03-28T14:39:06.094507 | 2018-04-25T20:38:58 | 2018-04-25T20:38:58 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,932 | r | hospital_data.R | # compare hospitals
# clear workspace
rm(list = ls())
# load required packages
pacman::p_load(raster, foreign, reshape2, ggplot2, scales, RColorBrewer, seegSDM, rowr)
# set working directory
setwd('H:/Work/Snakebite')
# load in travel time surface
travel_surface <- raster('rasters/accessibility/accessibility_50k+_2017-07-31_aggregate_5k_2017_08_09.tif')
# load in hospital network
hospitals <- read.csv('africa hospitals/Ouma_Okiro_Snow_Africa_Hospitals_Data.tab.csv',
stringsAsFactors = FALSE)
# get coordinates
hospital_locs <- hospitals[c('Long', 'Lat')]
# get travel times at each of these points
hospital_vals <- extract(travel_surface, hospital_locs)
# convert to hours
hospital_vals <- hospital_vals/60
# add back into dataset
hospitals$distance <- hospital_vals
breaks <- seq(0, 143, 0.5)
hospital_cut <- cut(hospital_vals, breaks, right=FALSE)
hospital_freq <- table(hospital_cut)
# randomly sample some points in Africa
# read in Africa shapefile
africa <- shapefile('World shapefiles/Africa.shp')
africa_raster <- rasterize(africa, travel_surface)
# generate samples (100 iterations)
for(i in 1:100){
africa_samples <- bgSample(africa_raster, n = 4908)
# generate values
random_distances <- extract(travel_surface, africa_samples)
# convert to hours
random_distance_vals <- random_distances/60
if(i == 1){
random_samples <- random_distance_vals
} else {
random_samples <- cbind.fill(random_samples,
random_distance_vals, fill = 0)
}
}
random_samples <- as.data.frame(random_samples)
for(i in 1:100){
breaks <- seq(0, 143, 0.5)
random_sample <- random_samples[c(i)]
random_sample <- unlist(random_sample)
random_cut <- cut(random_sample, breaks, right=FALSE)
random_freq <- table(random_cut)
cumfreq0 <- c(0, cumsum(random_freq))
if(i == 1){
plot(breaks, cumfreq0,
main ="Cumulative Frequency",
xlab ="Duration (Hours)",
ylab ="Cumulative points", type = 'l')
} else {
lines(breaks, cumfreq0, col = 'black')
}
}
abline(a = 4908, b = 0, lty = 2, col = 'red')
cumfreq0 <- c(0, cumsum(hospital_freq))
lines(breaks, cumfreq0, col = 'blue')
# loop through and plot based on country
hosp_duplicate <- hospitals
countries <- sort(unique(hosp_duplicate$Country))
pdf('africa hospitals/comparison_plots.pdf',
width = 8.27,
height = 11.29)
par(mfrow = c(4, 3))
br <- seq(0, 143, by = 0.5)
for (s in seq_along(countries)) {
sub <- hosp_duplicate[which(hosp_duplicate$Country == countries[s]), ]
# set title for the plot
title <- unique(sub$Country)
max_m <- round(max((sub$distance), na.rm = TRUE))
hist(sub$distance,
col = grey(0.4),
xlim = c(0, max_m),
border = 'white',
breaks = br,
xlab = 'Duration (Hours)',
main = title)
}
dev.off()
|
fe03c3be4b4debbdf42a18b0a46dbb7bf9699475 | 3fc85cb0c150aa24803fea713a0c611b70cc6bde | /Scripts/1_RIntro_basic.R | cc9355af8c405d079117f63ad47cfa0b5e2e04a5 | [] | no_license | xime377/IIAP | 4e3dbba3648f1837aa913f62dee66652286b0c5a | 45387a1545821a0f1077ddf0218be8f27f4dab53 | refs/heads/master | 2021-05-02T17:34:02.612566 | 2017-12-08T23:04:30 | 2017-12-08T23:04:30 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,992 | r | 1_RIntro_basic.R | ##%######################################################%##
# #
#### Our first R script ####
#### Mirza Cengic | mirzaceng@gmail.com ####
# #
##%######################################################%##
# Add # to write a comment (notes about code, code description).
# It is important to write comments, your future self will be thankful!
# This is a function.
# Use keyboard shortcut Ctrl + Enter to run a line of code (or code selection)
data()
# Get function help. In R Studio you can also press F1 button when cursor is in the function.
# Function data() lists data sets that already come with R or with one of the loaded packages.
?data()
# There can be multiple ways to do something in R
# Use help function to get help
help(data)
# This is an object - in R everything is an object.
# Iris is a dataset that comes with R
iris
?iris
# Inspecting data ---------------------------------------------------------
# Show first six rows
head(iris)
head(iris, 3)
# Show last six rows
tail(iris)
# Show object's dimension
dim(iris)
# Show number of rows
nrow(iris)
# Object class
class(iris)
# Object structure
str(iris)
View(iris)
# Subsetting
# NOTE:
# In R you can use 3 types of brackets - (), [] and {}
# Function use ()
# Use [] to subset objects
# Use {} to control flow (write loops) and write functions
# Other useful symbols - () [] {} $ @ %>% <- ? > <
# When subsetting object with rows and columns, rows are before the comma -- [x, ], and columns after comma -- [, x]
iris[1, 1]
# Use <- or = to assign value to an object
# If you use R Studio, use keyboard shortcut alt + - to insert assignment operator
iris[1, 1] <- 2
# Data types --------------------------------------------------------------
## Numeric
# Printing one number or sequence of numbers
1
1:10
# Combining values
c(1, 5)
# Create number sequence and store into variable
numbers <- seq(0, 10, 2.5)
numbers
# Mathematical operations
numbers + 1
numbers * 1
# Statistical operations
mean(numbers)
sum(numbers)
sd(numbers)
# Check object class
class(numbers)
# In R float and integer are both numeric
class(1)
class(1.0)
## String
string <- "numbers"
class(strings)
strings <- c("These", "are", "strings")
str(strings)
levels(strings)
## Factors
factors <- factor(c("These", "are", "factors"))
str(factors)
levels(factors)
## Boolean
# Less than
3 < 5
# Equals
3 == 3
# Not equals
3 != 3
## Logical with boolean
3 & 5 < 8
3 & 10 < 8
"a" < "b"
"c" < "b"
# Special
# Doesn't exist
?NULL
# Missing value
?NA
# Not a number
?NaN
#
iris
# See data structure
str(iris)
# See basic statistical summary for the data
summary(iris)
# You can use dollar sign $ to access a column in a dataframe
# Get table - frequency of values
table(iris$Species)
# See unique values
unique(iris$Species)
# See length of an object
length(iris$Species)
|
b97a26c86e9f957bf63ac51356bcab4e3a8bcbab | b624e44d581e5bf57d80002e418c5e74ab2861ef | /mtcars_automatic_vs_manual_transmission/ui.R | ce9374421db5c395844288c0f2d5eb6b996965e3 | [] | no_license | delta-peg/data-products-shiny | ab1aad4b43c022f177527b5cd445e99896ff8b12 | ded072a53f5fb0dd56e816802c4f339b0381aeab | refs/heads/main | 2023-08-28T07:55:27.177873 | 2021-11-15T12:42:48 | 2021-11-15T12:42:48 | 428,160,933 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,984 | r | ui.R | #
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("Comparing two Prediction Models for the Efficiency (Miles per Gallon) of a Car"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
checkboxInput("naiveModel", "Show prediction for naive model", FALSE),
checkboxInput("betterModel", "Show prediction for better model", FALSE)
),
# Show a plot of the generated distribution
mainPanel(
h2("How to use:"),
p("This Application builds two linear prediction models based on the", code("mtcars"), "data set."),
p("Both models predict the value of Miles per Gallon", code("mpg"), "."),
p("The first", code("naive"), "model uses the formula", code("lm(mpg ~ transmission)."), "The second", code("better"), "model uses the formula", code("lm(mpg ~ transmission + hp + wt)")),
p(strong("You may select with the checkboxes on the left, which model you would like to see predictions for.")),
p("Note that the predicted values for the better model are, overall, indeed better. This is especially obvious in the residual plot. The blue dots
are closer around the value 0 than the green dots (0 represents the real value in the residual plot)"),
p("Also note that the ", code("naive"), "model only predicts two distinct values for mpg. This is because it predicts solely based
on the transmission type of a car, which takes only two distinc values: ", code("automatic"), "and", code("manual"), "."),
plotOutput("predict_vs_real"),
plotOutput("residuals"))
)
))
|
3e4d847f7f62371634ac2a5770024090d621fc88 | 036846d7f4c99713352b6d18907504148927b442 | /two-file-analysis_funcs.R | 2f394cacee341e3064d590d8ea90a63369175112 | [
"CC0-1.0"
] | permissive | jdnewmil/FunctionsTalk | 4a5dc7d9d4768e0d22d153cca78a6a0485687f15 | d05cb56cab25ace2db6fe8c4326494f33c89f050 | refs/heads/master | 2023-08-14T02:05:30.252282 | 2021-09-15T01:12:58 | 2021-09-15T01:12:58 | 399,001,864 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,713 | r | two-file-analysis_funcs.R | # two-file-analysis_funcs.R
my_spacy_reader <- function( fname ) {
lns <- readLines( fname )
lns <- lns[ -2 ] # removes the second element
connection <- textConnection( lns )
dta <- read.table( connection
, header = TRUE
, sep = ","
)
close( connection )
dta # have to return what we obtained
# after closing the connection
}
my_bent_qc <- function( dta, y_break_maxfrac ) {
ybreak <- max( dta$Y ) * y_break_maxfrac
dta$segment <- factor( ifelse( dta$Y < ybreak
, "Sloped"
, "Horizontal" )
, levels = c( "Sloped", "Horizontal" )
)
dta
}
my_analysis <- function( dta ) {
slope_fit <- lm( Y ~ X
, data = dta
, subset = "Sloped" == segment
)
slope_coefs <- coef( slope_fit )
Slope <- as.vector( slope_coefs[ "X" ] )
Offset <- as.vector( slope_coefs[ "(Intercept)" ] )
Mean <- mean( dta$Y[ "Horizontal" == dta$segment ] )
result <- data.frame( Slope = Slope
, Offset = Offset
, Mean = Mean
)
result
}
my_collect_results <- function( result1, result_label1
, result2, result_label2 ) {
results <- rbind( cbind( Source = result_label1
, result1
)
, cbind( Source = result_label2
, result2
)
)
results
}
my_write_results <- function( results, file ) {
write.csv( results
, file = file
, row.names = FALSE
, quote = FALSE
)
}
|
47ac9c65f92251925311b4f81672edacc8eb9af0 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/stampr/examples/stamp.stgroup.summary.Rd.R | 5db97fa108b87e86bc7c9c50117371192a26430b | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 392 | r | stamp.stgroup.summary.Rd.R | library(stampr)
### Name: stamp.stgroup.summary
### Title: Compile stamp summary statistics by space-time group
### Aliases: stamp.stgroup.summary
### Keywords: stamp
### ** Examples
library(sp)
data("katrina")
katrina$ID <- katrina$Id
ch <- stamp.multichange(katrina, changeByRow = TRUE, dc = 0, distance = TRUE, direction = FALSE)
STGroup <- stamp.stgroup.summary(ch)
head(STGroup)
|
e8e9a5bede16c1c8a8dd340308f711cba21a5da7 | 5ffbed587f1de0db8dc8ebb910e71c77f083fc41 | /uit.R | 29c081e5b1143ebc76e020c69fdbd259d68af07c | [
"MIT"
] | permissive | wanruofenfang123/MVNtest_SpatialDependence | 4aafbb281a4cf00ea09d99c8aa6b7c789312fe0a | 41ed4c0e14860d46626e933dc547bf858c623781 | refs/heads/main | 2023-08-14T13:53:42.911813 | 2021-09-26T01:15:32 | 2021-09-26T01:15:32 | 302,851,457 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 591 | r | uit.R | ## union-intersection test
uit <- function(coord,x, K, alpha) {
n <- nrow(x)
p <- ncol(x)
pp <- rep(NA,K)
for (i in 1:K) {
theta <- runif(p,0,2*pi)
a <- cos(theta)
y <- x %*% a
pp[i] <- univariate_normality_test_spatial(coord, y)
}
p_ordered <- sort(pp)
# R <- ifelse(length(which(p_ordered/c(1:K) < alpha/K))>0,max(which(p_ordered/c(1:K) < alpha/K)),1)
# T <- p_ordered[R]
# reject <- NA
#reject <- ifelse ((any(pp<=alpha)==TRUE), 1, 0)
reject <- ifelse(any(p_ordered/c(1:K) < alpha/K/sum(1/c(1:K)))==TRUE,1,0)
return(reject)
} |
f3b90581f7ad992833cc13617fd590187246f416 | f209b0dd46cac6e125bea6e4edb20e199c1bd264 | /R/pkg-wrapper-shinyWidgets.R | bbf42d861dc294be7df245993c742d22df79e9ee | [] | no_license | hzongyao/confuns | 6618c8936c5673eb06130549865446ffd3c960ab | 79ea2e376029bbd976191a136c4247cde119c7d3 | refs/heads/master | 2023-05-14T19:46:24.487730 | 2021-06-04T05:07:53 | 2021-06-04T05:07:53 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,492 | r | pkg-wrapper-shinyWidgets.R |
#' @title Picker inputs
#'
across_picker_input <- function(ns,
id = "across",
choices,
selected = "condition",
...){
shinyWidgets::pickerInput(inputId = ns("across"),
label = "Compare across:",
choices = choices,
selected = selected,
...)
}
#' @rdname across_picker_input
across_subset_picker_input <- function(ns,
id = "across_subset",
choices,
selected,
multiple = TRUE,
...){
shinyWidgets::pickerInput(inputId = ns(id),
label = "Subset Groups:",
choices = choices,
multiple = multiple,
selected = selected)
}
#' @rdname across_picker_input
colorpanel_picker_input <- function(ns, id = "pt_clrp"){
shinyWidgets::pickerInput(inputId = ns(id),
choices = pretty_colorpanels_list,
label = "Color Panel:",
multiple = FALSE,
selected = "milo")
}
#' @rdname across_picker_input
color_picker_input <- function(ns, id = "pt_color"){
shinyWidgets::pickerInput(
inputId = ns(id),
label = "Color:",
choices = grDevices::colors(),
selected = "black"
)
}
#' @rdname across_picker_input
bar_position_picker_input <- function(ns, id = "bar_position", selected = "fill", ...){
shinyWidgets::pickerInput(inputId = ns(id),
label = "Bar Position:",
choices = pretty_bar_positions,
selected = selected,
...)
}
#' @rdname across_picker_input
include_variables_picker_input <- function(ns, id = "variables", choices, selected, options = list()){
shinyWidgets::pickerInput(inputId = ns(id),
label = "Include Variables:",
choices = choices,
selected = selected,
options = base::append(list(`actions-box`= TRUE), values = options),
multiple = TRUE)
}
|
6270a5cf6299125d1491d324186a8051bc2897fa | bc55076d2ca0537d0ce209eaaf4d436ab4a54fcc | /man/genNeighbors.Rd | d8398e37a11aa68e327f6ec238e69804310e9b6c | [] | no_license | heike/peptider | a2aed62468b07ca975b0226a0dfd326582b1b821 | 13cdd69ad9fc6dbc3079e48466af28d31ccd886c | refs/heads/master | 2021-01-18T23:33:14.189324 | 2016-06-28T11:48:59 | 2016-06-28T11:48:59 | 10,589,876 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 804 | rd | genNeighbors.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/peptider.r
\name{genNeighbors}
\alias{genNeighbors}
\title{Calculate neighborhood distribution}
\usage{
genNeighbors(sch, k)
}
\arguments{
\item{sch}{library scheme}
\item{k}{length of the peptide sequences}
}
\value{
dataset of peptide sequences: AA are amino acid sequences,
c0 are codons for self representation,
cr is the ratio of #neighbors in first degree neighborhood (not counting self representations) and #codons in self representation
N1 is the number of neighbors in codon representation (including self representation)
}
\description{
Calculate distribution of neighbors under library scheme lib for peptide sequences of length k.
}
\examples{
genNeighbors(scheme("NNK"), 2)
genNeighbors(scheme("2020"), 2)
}
|
fe3783c8006b097901bcf1cceea808a6b62399ed | 7c210bf1d85690915e99065a269336126914971e | /iris_practice.R | a43560bef343b09d54678f20a3be41a8f0ad5446 | [] | no_license | ariasmiguel/tidyverse | 4c5e1411fa87dc3cd221c999ef4fd20d9411fb4d | ea1b7b40e0e0e18797fc087ab1f14e86eab18163 | refs/heads/master | 2021-01-21T13:35:03.597505 | 2017-10-04T15:30:40 | 2017-10-04T15:30:40 | 102,133,802 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,067 | r | iris_practice.R | library(tidyverse)
library(stringr)
#------------------
# CONVERT TO TIBBLE
#------------------
# – the iris dataframe is an old-school dataframe
# ... this means that by default, it prints out
# large numbers of records.
# - By converting to a tibble, functions like head()
# will print out a small number of records by default
df.iris <- as_tibble(iris)
#-----------------
# RENAME VARIABLES
#-----------------
# - here, we're just renaming these variables to be more
# consistent with common R coding "style"
# - We're changing all characters to lower case
# and changing variable names to "snake case"
colnames(df.iris) <- df.iris %>%
colnames() %>%
str_to_lower() %>%
str_replace_all("\\.","_")
# INSPECT
df.iris %>%
select(species, everything()) %>%
head()
# NEST ----------------------------------------------------------
nested <- df.iris %>%
group_by(species) %>%
nest()
# Nesting the data frame makes it more readable and more efficient
# for iteration.
nested$data %>%
map(colMeans)
nested %>% map(data, colMeans)
|
e54b4234ba76947e57171ff04e2b2abf9d276668 | 36ee8ef39604c54293f736c27ebfe9dc056fc9a9 | /man/bestcub.Rd | dcc67efdb5af232112bfa139bab92f9833e3a4f7 | [] | no_license | CristianPachacama/FastCUB | 1ff026db7c67d742333ecbe90f2a89198a74ea5a | 8a121f3d581ca08d097a8367dc1d5679d767b229 | refs/heads/master | 2021-01-03T01:49:28.489962 | 2020-01-31T16:00:17 | 2020-01-31T16:00:17 | 239,865,798 | 2 | 0 | null | 2020-02-11T21:13:56 | 2020-02-11T21:13:55 | null | UTF-8 | R | false | true | 3,081 | rd | bestcub.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bestcub.R
\name{bestcub}
\alias{bestcub}
\title{Best-subset variable selection for CUB models via fast EM algorithm}
\usage{
bestcub(ordinal,m,Y,W,toler=1e-4,maxiter=200,iterc=5,alpha=0.05,mix=FALSE,
tolmix=1e+2,fmix=NULL,invgen=TRUE,verbose=FALSE)
}
\arguments{
\item{ordinal}{Vector of ordinal responses}
\item{m}{Number of ordinal categories}
\item{Y}{Matrix of selected covariates for the uncertainty parameter}
\item{W}{Matrix of selected covariates for the feeling parameter}
\item{toler}{Fixed convergence tolerance for final estimates}
\item{maxiter}{Maximum number of iterations allowed for running the optimization algorithm}
\item{iterc}{Iteration from which the acceleration strategy starts}
\item{alpha}{Significant level for Wald test}
\item{mix}{Logical: should a first preliminary standard EM be run at toler equal to tolmix? (default is FALSE)}
\item{tolmix}{Convergence tolerance for first preliminary EM (if mix=TRUE).}
\item{fmix}{Fraction of iteration needed for first preliminary EM (if mix=TRUE). Default is null.}
\item{invgen}{Logical: should the recursive formula for the inverse of the information matrix be considered? (Default is TRUE)}
\item{verbose}{Logical: should messages about acceleration steps be printed out? (Default is FALSE)}
}
\value{
A list containing the following results:
\item{vsel}{List of all estimated models (with the accelerated EM) as FastCUB objects}
\item{bestmodel}{FastCUB object corresponding to the best CUB model (according to BIC), if not null}
\item{bestW}{Names of covariates for feeling in the best model with all significant effect}
\item{bestY}{Names of covariates for feeling in the best model with all significant effect}
\item{param}{ML estimates of the best model}
\item{se}{Estimated standard errors for the best model}
\item{bic}{BIC index of the best (significant) model}
\item{mattime}{Matrix of computational time for each of the estimated model}
\item{matiter}{Matrix of number of iterations occurred for each of the estimated model}
}
\description{
Perform a best-subset search for CUB models on the basis of the BIC index, by combining all possible covariates'
specification for feeling and for uncertainty parameters
}
\examples{
\donttest{
library(FastCUB)
data(univer)
ordinal<-univer$global
m<-7
Y<-univer[,c(2,3,4)]
W<-univer[,8:11]
## Search for the best CUB model with covariates only for feeling
best0q<-bestcub(ordinal,m,Y=NULL,W,toler=1e-4,maxiter=100,iterc=5,alpha=0.05,invgen=TRUE)
## Search for the best CUB model with covariates only for uncertainty
bestp0<-bestcub(ordinal,m,Y,W=NULL,toler=1e-4,maxiter=100,iterc=5,alpha=0.05,invgen=TRUE)
## Search for the best CUB model with covariates for both parameters
bestpq<-bestcub(ordinal,m,Y,W,toler=1e-4,maxiter=100,iterc=5,alpha=0.05,invgen=TRUE,
mix=TRUE,tolmix=1e+3,fmix=1)
final<-bestpq$bestmodel; summary(final)
}
}
\seealso{
\code{\link{fastCUB}}
}
\keyword{stats}
|
e03f8ebf9b0d9c798f73859fb9daa1bce45e8744 | da66a906009814737731f41805d220976737d80b | /Code/meRcury mass download.R | 463f962bc9b9fb68beea8fe522fd25daddfa4fbe | [] | no_license | hwalbert/GiveMercuryAMoon | 7b22bbcdf72c0cac8110647362bb28dafe8c1874 | da796b2e519c8c56701f30c75fe0c3b46b265b2f | refs/heads/master | 2020-03-30T05:24:05.829964 | 2018-12-14T05:15:39 | 2018-12-14T05:15:39 | 150,796,680 | 0 | 0 | null | 2018-12-09T22:11:09 | 2018-09-28T21:33:03 | R | UTF-8 | R | false | false | 10,231 | r | meRcury mass download.R | # USER INPUT
# set directory
location<-'/Mercury Challenge/code and data/Data/'
# install.packages('lubridate')
# install.packages('icesTAF')
# install.packages('purrr')
library("meRcury", lib.loc="C:/Program Files/R/R-3.5.0/library")
library(lubridate)
library(icesTAF)
library(purrr)
library(stringr)
library(tidyr)
library(gdeltr2)
# rewrite some functions
ExtractEGYPT <- function(GDELTData){
#Location filter
vec.EgyptLocation <-
c("egypt")
#Look for Organizations:####
vec.EgyptOrganizations <-
c("muslim brotherhood", "egyptian", "Harakat Sawa'd Misr", "Harakat Sawad Misr",
"HASM", "ISIS-Sinai", "ISIS Sinai", "Liwa al-Thawra","sisi")
#Look for Themes:####
vec.EgyptThemes <- c('revolution','strike','econ_inflation','grievance','arrest','rebellion',
'corruption','conflict','violence','protest','terror','unrest')
#Look for domains:
vec.EgyptDomains <-
c("almasryalyoum.com", "ahram.org.eg", "alwafd.org", "youm7.com",
"egyptindependent.com", "dailynewsegypt.com","alrai.com",
'wsj.com','al-monitor.com')
filtered.gdelt<-
(data.table(GDELTData)
[grepl(paste0(vec.EgyptLocation,collapse = "|"), tolower(locations))]
[grepl(paste0(vec.EgyptOrganizations,collapse = "|"), tolower(organizations))]
[grepl(paste0(vec.EgyptThemes,collapse = "|"), tolower(themes))]
[grepl(paste0(vec.EgyptDomains,collapse = "|"), tolower(domainSource))])
return(filtered.gdelt)
}
GetGDELT <- function(DaysBack = 1, writeResults = T, SpecificDates = NULL, location){
if(is.null(SpecificDates)){
dates = DatesForGDELT(daysBack = DaysBack)
}else{
dates = SpecificDates
}
#This code grabs the Global Knowledge Graph data for the dates specified by the DatesForGDELT() function
GDELTData <-
get_data_gkg_days_detailed(
dates = dates,
# dates = c("2018-09-08"),
table_name = 'gkg',
return_message = T
)
if(nrow(GDELTData)>0){
GDELTData<-ExtractEGYPT(GDELTData)
#dates <- gsub("-", "_", dates)
if(writeResults==T){
FolderForResults <- location
fwrite(GDELTData, file = paste(FolderForResults, "/GDELTDataDate", dates, ".csv", sep = ""))
}
}
}
ExpandEgypt <- function(EgyptData, writeResults = T){
AllData <- EgyptData
cat("Parsing Tone, Names, Locations, and Organizations... \n")
ToneData <- data.frame(t(data.frame(strsplit(AllData$tone, ","))))
names(ToneData) <- c("Tone","PositiveScore", "NegativeScore", "Polarity", "ActivityReferenceDensity", "Self_GroupReferenceDensity", "WordCount")
ToneData[] <- lapply(ToneData, as.character)
ToneData[] <- lapply(ToneData, as.numeric)
AllData <- cbind(AllData, ToneData)
#dates <- data.frame(parse_gkg_mentioned_dates(AllData))
locations <- data.frame(parse_gkg_mentioned_locations(AllData))
# names <- data.frame(parse_gkg_mentioned_names(AllData))
organizations <- data.frame(parse_gkg_mentioned_organizations(AllData))
#people <- data.frame(parse_gkg_mentioned_people(AllData))
AllData <- left_join(AllData, locations, by = "idGKG")
AllData <- left_join(AllData, names, by = "idGKG")
AllData <- left_join(AllData, organizations, by = "idGKG")
EgyptDataExpanded <<- AllData
if(writeResults==T){
FolderForResults <- choose.dir(caption = "Select where to store Egypt specific GDELT Results")
fwrite(EgyptDataExpanded, file = paste(FolderForResults, "/EgyptExpandedDataDate", dates, ".csv", sep = ""))
}
}
# folder structure
icesTAF::mkdir(paste0(getwd(),location,'/Master'))
# a giant for loop that downloads gdelt data for a given month,
# extracts the egypt related info, combines it, saves it to a master folder,
# and deletes them if all of the above is done successfully
# Enter in start date and end date
for (Month in seq.Date(from=as.Date('2018-12-01'),to=as.Date('2018-12-01'),by='month')){
#create folder
icesTAF::mkdir(paste0(getwd(),location,as_date(Month)))
for (i in seq.Date(from=as_date(Month),as_date(Month)+months(1)-days(1),by='day')){
print(as_date(i))
suppressWarnings(suppressMessages(GetGDELT(SpecificDates = as_date(i),
location=paste0(getwd(),location,as_date(Month)))))
}
#once downloaded, let's combine
lst.egypt<-
lapply(X = dir(paste0(getwd(),location,as_date(Month))),
FUN = function(X){
print(paste0("Read GDELT data from ",X))
egypt<-fread(paste0(paste0(getwd(),location,as_date(Month)),'/',X))
#filter broken entries
egypt<-egypt[is.Date(as_date(substr(dateTimeDocument,0,10)))]
}
)
egypt_gdelt <- rbindlist(lst.egypt)
#perform sanity checks
message<-c()
#check filter outputed non-zero items
if(nrow(egypt_gdelt)!=0){
message<-c(message,'Output check is good')
#check # of columns (needs 29 columns)
if(ncol(egypt_gdelt)==29){
message<-c(message,'Column check is good')
#check dates on dateTimeDocument are in deed from that month
if(sum(format(as_date(substr(egypt_gdelt$dateTimeDocument,0,10)),
'%Y-%m-01')!=as_date(Month))==0){
message<-c(message,'Date check is good')
#write to master folder
fwrite(egypt_gdelt, file = paste0(getwd(),location,'/Master/',as_date(Month),'-filtered_gdelt.csv'))
#delete original month (for size reasons)
sapply(X = paste0(paste0(getwd(),location,as_date(Month)),'/',
dir(paste0(getwd(),location,as_date(Month)))),FUN = file.remove)
}else{
message<-c(message,'Date check failed')
}
}else{
message<-c(message,'Column check failed')
}
}else{
message<-c(message,'Output check failed')
}
# write messages
message<-paste0(message,collapse='\n')
writeLines(message, paste0(getwd(),location,as_date(Month),'.txt'))
# delete temp files or the hard drive will explode
unlink(paste0(tempdir(),"\\",dir(tempdir())),recursive=T)
}
# once everything is downloaded, let's combine, expand, and date each article
lst.master <- lapply(X = paste0(paste0(getwd(),location,'/Master/'),grep('.csv',dir(paste0(getwd(),location,'/Master/')),value=T)),
FUN = fread)
dt.master <- rbindlist(lst.master)
dt.master <- cbind(Date=as_date(substr(dt.master$dateTimeDocument,1,10)),dt.master)
fwrite(dt.master,
file = 'GitHub/GiveMercuryAMoon/Data/GDELT_EGYPT.csv')
dt.master <- ExpandEgypt(dt.master,writeResults = F)
parse_gkg_mentioned_names<-function (gdelt_data, filter_na = T, return_wide = T) {
parse_mentioned_names_counts <- function(field = "Interior Minister Chaudhry Nisar Ali Khan,47;Mullah Mansour,87;Afghan Taliban,180;Mullah Mansour,382;Mullah Mansor,753;Mullah Mansour,815;Mullah Mansour,1025",
return_wide = return_wide) {
options(scipen = 99999)
if (field %>% is.na()|field=="") {
if (return_wide) {
field_data <- data_frame(nameMentionedName1 = NA,
charLoc1 = NA)
}
else {
field_data <- data_frame(nameMentionedName = NA,
charLoc = NA, idArticleMentionedName = 1)
}
}
else {
fields <- field %>% str_split("\\;") %>% flatten_chr() %>%
.[!. %in% ""]
fields_df <- data_frame(field = fields) %>% dplyr::mutate(idArticleMentionedName = 1:n()) %>%
separate(field, into = c("nameMentionedName",
"charLoc"), sep = "\\,") %>% mutate(charLoc = charLoc %>%
as.numeric()) %>% suppressMessages() %>% suppressWarnings()
if (return_wide) {
fields_df <- fields_df %>% gather(item, value,
-c(idArticleMentionedName, charLoc)) %>% arrange(idArticleMentionedName) %>%
unite(item, item, idArticleMentionedName, sep = "")
order_fields <- fields_df$item
field_data <- fields_df %>% dplyr::select(-matches("charLoc")) %>%
spread(item, value) %>% dplyr::select_(.dots = order_fields)
field_data <- field_data %>% mutate_at(field_data %>%
dplyr::select(matches("charLoc")) %>% names(),
funs(. %>% as.character() %>% readr::parse_number()))
}
else {
field_data <- fields_df
field_data <- field_data %>% dplyr::select(idArticleMentionedName,
charLoc, nameMentionedName)
}
}
return(field_data)
}
if (!"mentionedNamesCounts" %in% names(gdelt_data)) {
stop("Sorry missing metioned name column")
}
counts_data <- gdelt_data %>% dplyr::select(idGKG, mentionedNamesCounts)
all_counts <- 1:length(counts_data$mentionedNamesCounts) %>%
purrr::map_df(function(x) {
parse_mentioned_names_counts(field = counts_data$mentionedNamesCounts[x],
return_wide = F) %>% dplyr::mutate(idGKG = counts_data$idGKG[x])
}) %>% dplyr::select(idGKG, everything())
if (filter_na) {
if ("nameMentionedName" %in% names(all_counts)) {
all_counts <- all_counts %>% dplyr::filter(!nameMentionedName %>%
is.na())
}
}
all_counts <- all_counts %>% gdeltr2::get_clean_count_data(count_col = "idArticleMentionedName",
return_wide = F) %>% separate(idGKG, into = c("GKG",
"dateTime"), sep = "\\-", remove = F) %>% mutate(dateTime = dateTime %>%
as.numeric()) %>% select(-matches("charLoc")) %>% arrange(dateTime) %>%
dplyr::select(-c(dateTime, GKG)) %>% suppressWarnings()
if (return_wide) {
all_counts <- all_counts %>% spread(item, value)
}
if (!return_wide) {
all_counts <- all_counts %>% .resolve_long_names()
}
return(all_counts)
}
ExpandEgypt
|
bad3a047996f5e1eb8ad4168635ed14e245001c8 | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/higlasso/R/higlasso.R | 9fb1afc7e91c459569837edc8d26840379496341 | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | false | 9,481 | r | higlasso.R | #' Hierarchical Integrative Group LASSO
#'
#' HiGLASSO is a regularization based selection method designed to detect
#' non-linear interactions between variables, particularly exposures in
#' environmental health studies.
#'
#'
#' There are a few things to keep in mind when using \code{higlasso}
#' \itemize{
#' \item{\code{higlasso} uses the strong heredity principle. That is,
#' \code{X_1} and \code{X_2} must included as main effects before the
#' interaction \code{X_1 X_2} can be included.
#' }
#' \item{While \code{higlasso} uses integrative weights to help with
#' estimation, \code{higlasso} is more of a selection method.
#' As a result, \code{higlasso} does not output coefficient estimates,
#' only which variables are selected.
#' }
#' \item{Simulation studies suggest that `higlasso` is a very
#' conservative method when it comes to selecting interactions.
#' That is, \code{higlasso} has a low false positive rate and the
#' identification of a nonlinear interaction is a good indicator that
#' further investigation is worthwhile.
#' }
#' \item{\code{higlasso} can be slow, so it may may be beneficial to
#' tweak some of its settings (for example, \code{nlambda1} and
#' \code{nlambda2}) to get a handle on how long the method will take
#' before running the full model.
#' }}
#' @param Y A length n numeric response vector
#' @param X A n x p numeric matrix of covariates to basis expand
#' @param Z A n x m numeric matrix of non basis expanded and non
#' regularized covariates
#' @param method Type of initialization to use. Possible choices are \code{gglasso}
#' for group LASSO and \code{aenet} for adaptive elastic net. Default is
#' \code{aenet}
#' @param lambda1 A numeric vector of main effect penalties on which to tune
#' By default, \code{lambda1 = NULL} and higlasso generates a length
#' \code{nlambda1} sequence of lambda1s based off of the data and
#' \code{min.lambda.ratio}
#' @param lambda2 A numeric vector of interaction effects penalties on which to
#' tune. By default, \code{lambda2 = NULL} and generates a sequence (length
#' \code{nlambda2}) of lambda2s based off of the data and
#' \code{min.lambda.ratio}
#' @param nlambda1 The number of lambda1 values to generate. Default is 10,
#' minimum is 2. If \code{lambda1 != NULL}, this parameter is ignored
#' @param nlambda2 The number of lambda2 values to generate. Default is 10,
#' minimum is 2. If \code{lambda2 != NULL}, this parameter is ignored
#' @param lambda.min.ratio Ratio that calculates min lambda from max lambda.
#' Ignored if 'lambda1' or 'lambda2' is non NULL. Default is 0.05
#' @param sigma Scale parameter for integrative weights. Technically a third
#' tuning parameter but defaults to 1 for computational tractability
#' @param degree Degree of \code{bs} basis expansion. Default is 2
#' @param maxit Maximum number of iterations. Default is 5000
#' @param tol Tolerance for convergence. Default is 1e-5
#' @author Alexander Rix
#' @references
#' A Hierarchical Integrative Group LASSO (HiGLASSO) Framework for Analyzing
#' Environmental Mixtures. Jonathan Boss, Alexander Rix, Yin-Hsiu Chen, Naveen N.
#' Narisetty, Zhenke Wu, Kelly K. Ferguson, Thomas F. McElrath, John D. Meeker,
#' Bhramar Mukherjee. 2020.
#' arXiv:2003.12844
#' @return
#' An object of type "higlasso" with 4 elements:
#' \describe{
#' \item{lambda}{An \code{nlambda1 x nlambda2 x 2} array containing each
#' pair \code{(lambda1, lambda2)} pair.}
#' \item{selected}{An \code{nlambda1 x nlambda2 x ncol(X)} array containing
#' higlasso's selections for each lambda pair.}
#' \item{df}{The number of nonzero selections for each lambda pair.}
#' \item{call}{The call that generated the output.}
#' }
#' @examples
#' library(higlasso)
#'
#' X <- as.matrix(higlasso.df[, paste0("V", 1:7)])
#' Y <- higlasso.df$Y
#' Z <- matrix(1, nrow(X))
#'
#' \donttest{
#' # This can take a bit of time
#' higlasso.fit <- higlasso(Y, X, Z)
#' }
#' @export
higlasso <- function(Y, X, Z, method = c("aenet", "gglasso"), lambda1 = NULL,
lambda2 = NULL, nlambda1 = 10, nlambda2 = 10,
lambda.min.ratio = .05, sigma = 1, degree = 2,
maxit = 5000, tol = 1e-5)
{
call <- match.call()
check.Y(Y)
check.XZ(X, Y)
check.XZ(Z, Y)
method <- match.arg(method)
if (!is.numeric(sigma) || sigma < 0)
stop("'sigma' must be a nonnegative number.")
if (length(sigma) > 1)
stop("'sigma' must have unit length.")
if (!is.numeric(degree) || degree < 1)
stop("'degree' should be an integer >= 1.")
if (!is.numeric(maxit) || maxit < 1)
stop("'maxit' should be an integer >= 1.")
if (!is.numeric(tol) || tol <= 0)
stop("'tol' should be a positive number.")
# get number of main effect variables.
matrices <- generate_design_matrices(X, degree)
Xm <- matrices$Xm
Xi <- matrices$Xi
X.xp <- matrices$X.xp
groups <- matrices$groups
igroups <- matrices$igroups
# X.xp <- do.call("cbind", c(unname(Xm), Xi[j]))
# generate lambda sequences if user does not pre-specify them
p <- ncol(X) * degree
YtX <- abs(Y %*% X.xp)[1,] / nrow(X.xp)
if (!is.null(lambda1)) {
if (!is.numeric(lambda1) || any(lambda1 <= 0))
stop("'lambda1' must be a nonnegative numeric array.")
nlambda1 <- length(lambda1)
} else {
lambda1.max <- max(YtX[1:p])
lambda1.min <- lambda.min.ratio * lambda1.max
lambda1 <- exp(seq(log(lambda1.max), log(lambda1.min), length.out =
nlambda1))
}
if (!is.null(lambda2)) {
if (!is.numeric(lambda2) || any(lambda2 <= 0))
stop("'lambda2' must be a nonnegative numeric array.")
nlambda2 <- length(lambda2)
} else {
lambda2.max <- max(YtX[-(1:p)])
lambda2.min <- lambda.min.ratio * lambda2.max
lambda2 <- exp(seq(log(lambda2.max), log(lambda2.min), len = nlambda2))
}
px <- ncol(X.xp)
pz <- ncol(Z)
X.xp <- cbind(X.xp, Z)
fit <- higlasso.fit(Y, Xm, Xi, Z, X.xp, px, pz, method, lambda1, lambda2,
sigma, groups, igroups, maxit, tol, call)
fit$coef <- NULL
return(fit)
}
initialise_gglasso <- function(X.xp, pz, Y, tol, maxit, groups, igroups)
{
groups <- c(groups, seq(pz) + max(groups))
fit <- gglasso::cv.gglasso(X.xp, Y, group = groups)
i <- which.min(fit$cvm)
purrr::map(igroups, ~ fit$gglasso.fit$beta[.x, i])
}
initialise_aenet <- function(X.xp, px, pz, Y, lambda2, tol, maxit, groups,
igroups)
{
# penalty factor for enet. Z contains unregularized coefficents so we set
# those weights to 0
pf <- c(rep(1, px), rep(0, pz))
enet <- gcdnet::cv.gcdnet(X.xp, Y, method = "ls", lambda2 = lambda2,
pf = pf, pf2 = pf, eps = tol,
maxit = max(maxit, 1e6))
# get the best scoring lambda from gcdnet and use that to generate
# inital weights for the adpative elastic net
i <- which.min(enet$cvm)
weights <- enet$gcdnet.fit$beta[1:px, i]
weights <- 1 / abs(weights + 1 / nrow(X.xp)) ^ 2
weights <- c(weights, rep(0, pz))
aenet <- gcdnet::cv.gcdnet(X.xp, Y, method = "ls", lambda2 = lambda2,
pf = weights, pf2 = pf, eps = tol,
maxit = max(maxit, 1e6))
i <- which.min(aenet$cvm)
purrr::map(igroups, ~ aenet$gcdnet.fit$beta[.x, i])
}
higlasso.fit <- function(Y, Xm, Xi, Z, X.xp, px, pz, method, lambda1, lambda2,
sigma, groups, igroups, maxit, tol, call)
{
ngroups <- length(Xm)
nlambda1 <- length(lambda1)
nlambda2 <- length(lambda2)
lambda <- array(0, c(nlambda1, nlambda2, 2))
coef <- array(0, c(nlambda1, nlambda2, ncol(X.xp)))
selected <- array(0, c(nlambda1, nlambda2, choose(ngroups, 2) + ngroups))
nm <- names(Xm)
k <- purrr::map_lgl(Xi, ~ ncol(.x) > 0)
nmi <- purrr::map_chr(purrr::cross2(nm, nm),
~ paste(.x[[1]], .x[[2]], sep = "."))
dimnames(selected) <- list(NULL, NULL, c(nm, nmi[k]))
df <- matrix(0, nlambda1, nlambda2)
if (method == "gglasso")
start <- initialise_gglasso(X.xp, pz, Y, tol, maxit, groups, igroups)
for (j in seq_along(lambda2)) {
if (method == "aenet")
start <- initialise_aenet(X.xp, px, pz, Y, lambda2[j], tol, maxit,
groups, igroups)
beta <- start[1:ngroups]
eta <- start[-(1:ngroups)]
for (i in seq_along(lambda1)) {
fit <- higlasso_internal(Y, Xm, Xi, Z, beta, eta, lambda1[i],
lambda2[j], sigma, maxit, tol)
lambda[i, j,] <- c(lambda1[i], lambda2[j])
coef[i, j, ] <- unlist(c(fit$beta, fit$gamma[k], fit$alpha))
selected[i, j,] <- purrr::map_lgl(c(fit$beta, fit$gamma[k]),
~ any(.x != 0))
df[i, j] <- sum(selected[i, j,])
}
}
structure(list(lambda = lambda, coef = coef, selected = selected,
df = df, call = call), class = "higlasso")
}
|
5da19da943100613915a2b2b92a344829a9a6d85 | a213908885c74c682746d16fe6dd1ff796f0f5db | /INCADataBr/R/plotGraficoComSlider.R | d74d89dcde00d916ae9ed70f7436b05cf06bffcc | [] | no_license | costawilliam/INCADataBr | b7ae59f3fdff74d91182472902ed3df0d7964d2c | ec5503f09e0d443749f00cf9b46559dc215f5746 | refs/heads/master | 2020-04-30T05:11:04.832364 | 2019-06-17T21:12:42 | 2019-06-17T21:12:42 | 176,623,100 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 529 | r | plotGraficoComSlider.R | #' Title
#'
#' @param df
#' @param params
#'
#' @return
#' @export
#'
#' @examples
plotGraficoComSlider <- function(df, params) {
if (!require("plotly", character.only = TRUE)) {
install.packages("plotly", dep = TRUE)
}
library(plotly)
p <- plotly::plot_ly(df, x = ~ df$var) %>%
add_lines(y = ~ df$nrocasos) %>%
layout(
title = params$title,
xaxis = list(
title = params$titleX,
rangeslider = list(type = "date")
),
yaxis = list(title = params$titleY)
)
p
}
|
b234b3b863e1d5d067ed19b20536f1d0d8189ea8 | e81df8e395bf870c6b4a5b728fc293a00a029127 | /0_settings.R | 4a6e28e42dd53679c9830d93acd7f149fc09aaff | [] | no_license | Barakwiye/GRP | 50fcbfdea623d3cc122fa7e29337b981d344b0ae | 0b107bdea4892daad17e64028dcd291ccd9a93ec | refs/heads/master | 2020-12-25T21:34:49.071355 | 2015-05-07T20:39:15 | 2015-05-07T20:39:15 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,116 | r | 0_settings.R | PLOT_WIDTH <- 6.5
PLOT_HEIGHT <- 6.5
PLOT_DPI <- 300
prefixes <- c('C:/Data', # HP current work
'S:/Data', # Shared blue drive
'D:/azvoleff/Data', # CI-TEAM
'H:/Data', # Buffalo drive
'O:/Data', # Blue drive
'/localdisk/home/azvoleff/Data') # vertica1
prefix <- prefixes[match(TRUE, unlist(lapply(prefixes, function(x) file_test('-d', x))))]
geogs <- read.csv(file.path(prefix, 'GRP', 'GRP_Countries.csv'))
temps <- c('C:/Temp', # Local
'H:/Temp', # Buffalo drive
'O:/Temp', # Blue drive (HP or thinkpad)
'/localdisk/home/azvoleff/Temp', # vertica1
'D:/Temp') # CI-TEAM
temp <- temps[match(TRUE, unlist(lapply(temps, function(x) file_test('-d', x))))]
library(raster)
rasterOptions(tmpdir=temp)
# Specify how many processors to use for parallel processing. On CI-TEAM, this
# should be set to 6. On your laptop, set it somewhere between 2 and 4.
if (Sys.info()[4] == 'CI-TEAM') {
n_cpus <- 8
} else if (Sys.info()[4] == 'vertica1.team.sdsc.edu') {
n_cpus <- 16
} else {
n_cpus <- 3
}
|
fd842a5b39b855b3d231085fb08ae8343ad3835c | 839d9fd750aa5513ad9cc6950239f01313befd88 | /scripts/utils.R | 4aa60c62be94840e011b3cb96ddebeb8bd8f65bc | [] | no_license | stats-learning-fall2019/final-project | 5727707b9cd5057910cafe229738d1362509af87 | 0c62a02544f8153a72cb24368c58444cc03fc2e1 | refs/heads/master | 2020-07-28T09:09:42.021331 | 2019-12-05T22:50:11 | 2019-12-05T22:50:11 | 209,375,278 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,029 | r | utils.R | #*********#
# ALIASES #
#*********#
`%notin%` <- Negate(`%in%`)
#*****************#
# SUPPORTING DATA #
#*****************#
if (! exists('country_codes')) {
library(readr)
country_codes = read_csv('data/gtdb_country_codes.csv')
}
if (! exists('region_codes')) {
region_codes = read_csv('data/gtdb_region_codes.csv')
}
#***********#
# FUNCTIONS #
#***********#
load_pkgs <- function(packages){
new.pkg <- packages[!(packages %in% installed.packages()[, "Package"])]
if (length(new.pkg))
install.packages(new.pkg, dependencies = TRUE)
sapply(packages, require, character.only = TRUE)
}
# Maps codes to their corresponding names
# Note: assumes the mapping dataframe (df) has "code" and "name" fields
map_code_to_name <- function(code, df) {
match = which(df$code==code)
if (length(match) > 0) {
return(df[match,]$name)
}
return("Other")
}
# Maps names to their corresponding codes
# Note: assumes the mapping dataframe (df) has "code" and "name" fields
map_name_to_code <- function(name, df) {
match = which(df$name==name)
if (length(match) > 0) {
return(df[match,]$code)
}
else {
warning(sprintf("No matching code found. Returning -1"))
return(-1)
}
}
cleanse_data <- function(data, drop_columns = FALSE) {
library(readr)
# This should not be needed again. It was used to prune the text fields from the
# data set initially, but the data was updated it github after running this command.
if (drop_columns) {
data = subset(
data,
select = -c(
addnotes,
alternative,
alternative_txt,
approxdate,
attacktype1_txt,
attacktype2_txt,
attacktype3_txt,
claimmode2_txt,
claimmode3_txt,
claimmode_txt,
corp1,
country_txt,
dbsource,
hostkidoutcome_txt,
location,
motive,
natlty1_txt,
natlty2_txt,
propcomment,
propextent_txt,
region_txt,
resolution,
scite1,
scite2,
scite3,
summary,
target1,
targsubtype1_txt,
targsubtype2_txt,
targsubtype3_txt,
targtype1_txt,
targtype2_txt,
targtype3_txt,
weapdetail,
weapsubtype1_txt,
weapsubtype2_txt,
weapsubtype3_txt,
weapsubtype4_txt,
weaptype1_txt,
weaptype2_txt,
weaptype3_txt,
weaptype4_txt
)
)
}
# Do Something
return(data)
}
distinct_values <- function(variable, data) {
return(data %>% select(variable) %>% filter(! is.na(variable)) %>% distinct())
}
random_colors <- function(n) {
load_pkgs(c('RColorBrewer'))
qual_col_pals = brewer.pal.info[brewer.pal.info$category == 'qual',]
col_vector = unlist(mapply(brewer.pal, qual_col_pals$maxcolors, rownames(qual_col_pals)))
colors <- sample(col_vector, n)
return(colors)
}
load_windows_fonts <- function() {
load_pkgs(c('extrafont'))
font_import()
loadfonts(device = "win")
}
|
16860861919fafd73fbb213989e92ba6030b23b7 | bd52b89310371b4870998e71386df0ebfbc377ad | /R/packagecheck.R | 83d958be450dd6687b36e3af32c8c0a139bfe90b | [] | no_license | jonathon-love/jsq | f42559d729bb7c65d116add8c3fb0abba49cab45 | a17cda8dcdbd8e54837b702c51432698dbfbd6f3 | refs/heads/master | 2023-03-10T01:02:54.410039 | 2023-03-01T00:14:45 | 2023-03-01T00:14:45 | 181,661,233 | 5 | 3 | null | 2019-11-13T00:08:35 | 2019-04-16T09:47:01 | R | UTF-8 | R | false | false | 13,078 | r | packagecheck.R | #
# Copyright (C) 2013-2018 University of Amsterdam
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
.checkPackages <- function() {
expected <- matrix(nrow=0, ncol=2, dimnames=list(NULL, c("Package", "Version")))
#--auto-generated
expected <- rbind(expected, c('BAS', '1.5.0'))
expected <- rbind(expected, c('BDgraph', '2.50'))
expected <- rbind(expected, c('BMS', '0.3.4'))
expected <- rbind(expected, c('BSDA', '1.2.0'))
expected <- rbind(expected, c('BayesFactor', '0.9.12-4.2'))
expected <- rbind(expected, c('Epi', '2.30'))
expected <- rbind(expected, c('Formula', '1.2-3'))
expected <- rbind(expected, c('GPArotation', '2014.11-1'))
expected <- rbind(expected, c('GeneNet', '1.2.13'))
expected <- rbind(expected, c('Hmisc', '4.1-1'))
expected <- rbind(expected, c('IsingFit', '0.3.1'))
expected <- rbind(expected, c('IsingSampler', '0.2'))
expected <- rbind(expected, c('KernSmooth', '2.23-15'))
expected <- rbind(expected, c('MASS', '7.3-50'))
expected <- rbind(expected, c('Matrix', '1.2-12'))
expected <- rbind(expected, c('MatrixModels', '0.4-1'))
expected <- rbind(expected, c('NetworkToolbox', '1.1.2'))
expected <- rbind(expected, c('R.matlab', '3.6.1'))
expected <- rbind(expected, c('R.methodsS3', '1.7.1'))
expected <- rbind(expected, c('R.oo', '1.22.0'))
expected <- rbind(expected, c('R.utils', '2.6.0'))
expected <- rbind(expected, c('R6', '2.2.2'))
expected <- rbind(expected, c('RColorBrewer', '1.1-2'))
expected <- rbind(expected, c('Rcpp', '0.12.17'))
expected <- rbind(expected, c('SparseM', '1.77'))
expected <- rbind(expected, c('SuppDists', '1.1-9.4'))
expected <- rbind(expected, c('TH.data', '1.0-8'))
expected <- rbind(expected, c('TTR', '0.23-3'))
expected <- rbind(expected, c('abind', '1.4-5'))
expected <- rbind(expected, c('acepack', '1.4.1'))
expected <- rbind(expected, c('afex', '0.20-2'))
expected <- rbind(expected, c('arm', '1.10-1'))
expected <- rbind(expected, c('assertthat', '0.2.0'))
expected <- rbind(expected, c('backports', '1.1.2'))
expected <- rbind(expected, c('base', '3.4.4'))
expected <- rbind(expected, c('base64enc', '0.1-3'))
expected <- rbind(expected, c('bindr', '0.1.1'))
expected <- rbind(expected, c('bindrcpp', '0.2.2'))
expected <- rbind(expected, c('boot', '1.3-20'))
expected <- rbind(expected, c('bootnet', '1.1.0'))
expected <- rbind(expected, c('ca', '0.70'))
expected <- rbind(expected, c('car', '3.0-0'))
expected <- rbind(expected, c('carData', '3.0-1'))
expected <- rbind(expected, c('cellranger', '1.1.0'))
expected <- rbind(expected, c('checkmate', '1.8.5'))
expected <- rbind(expected, c('class', '7.3-14'))
expected <- rbind(expected, c('cli', '1.0.0'))
expected <- rbind(expected, c('cluster', '2.0.6'))
expected <- rbind(expected, c('cmprsk', '2.2-7'))
expected <- rbind(expected, c('coda', '0.19-1'))
expected <- rbind(expected, c('codetools', '0.2-15'))
expected <- rbind(expected, c('coin', '1.2-2'))
expected <- rbind(expected, c('colorspace', '1.3-2'))
expected <- rbind(expected, c('colourpicker', '1.0'))
expected <- rbind(expected, c('compiler', '3.4.4'))
expected <- rbind(expected, c('contfrac', '1.1-12'))
expected <- rbind(expected, c('conting', '1.6.1'))
expected <- rbind(expected, c('corpcor', '1.6.9'))
expected <- rbind(expected, c('corrplot', '0.84'))
expected <- rbind(expected, c('crayon', '1.3.4'))
expected <- rbind(expected, c('curl', '3.2'))
expected <- rbind(expected, c('d3Network', '0.5.2.1'))
expected <- rbind(expected, c('data.table', '1.11.4'))
expected <- rbind(expected, c('datasets', '3.4.4'))
expected <- rbind(expected, c('deSolve', '1.21'))
expected <- rbind(expected, c('dichromat', '2.0-0'))
expected <- rbind(expected, c('digest', '0.6.15'))
expected <- rbind(expected, c('doSNOW', '1.0.16'))
expected <- rbind(expected, c('dplyr', '0.7.5'))
expected <- rbind(expected, c('e1071', '1.6-8'))
expected <- rbind(expected, c('ellipse', '0.4.1'))
expected <- rbind(expected, c('elliptic', '1.3-7'))
expected <- rbind(expected, c('emmeans', '1.2.1'))
expected <- rbind(expected, c('estimability', '1.3'))
expected <- rbind(expected, c('etm', '0.6-2'))
expected <- rbind(expected, c('evaluate', '0.10.1'))
expected <- rbind(expected, c('fdrtool', '1.2.15'))
expected <- rbind(expected, c('forcats', '0.3.0'))
expected <- rbind(expected, c('foreach', '1.4.4'))
expected <- rbind(expected, c('foreign', '0.8-69'))
expected <- rbind(expected, c('ggedit', '0.2.1'))
expected <- rbind(expected, c('ggm', '2.3'))
expected <- rbind(expected, c('ggplot2', '2.2.1'))
expected <- rbind(expected, c('ggrepel', '0.8.0'))
expected <- rbind(expected, c('glasso', '1.8'))
expected <- rbind(expected, c('glmnet', '2.0-16'))
expected <- rbind(expected, c('glue', '1.2.0'))
expected <- rbind(expected, c('gnm', '1.0-8'))
expected <- rbind(expected, c('grDevices', '3.4.4'))
expected <- rbind(expected, c('graphics', '3.4.4'))
expected <- rbind(expected, c('grid', '3.4.4'))
expected <- rbind(expected, c('gridExtra', '2.3'))
expected <- rbind(expected, c('gtable', '0.2.0'))
expected <- rbind(expected, c('gtools', '3.5.0'))
expected <- rbind(expected, c('haven', '1.1.1'))
expected <- rbind(expected, c('highr', '0.6'))
expected <- rbind(expected, c('hmeasure', '1.0'))
expected <- rbind(expected, c('hms', '0.4.2'))
expected <- rbind(expected, c('htmlTable', '1.12'))
expected <- rbind(expected, c('htmltools', '0.3.6'))
expected <- rbind(expected, c('htmlwidgets', '1.2'))
expected <- rbind(expected, c('httpuv', '1.4.3'))
expected <- rbind(expected, c('huge', '1.2.7'))
expected <- rbind(expected, c('hypergeo', '1.2-13'))
expected <- rbind(expected, c('igraph', '1.2.1'))
expected <- rbind(expected, c('iterators', '1.0.9'))
expected <- rbind(expected, c('jpeg', '0.1-8'))
expected <- rbind(expected, c('jsonlite', '1.5'))
expected <- rbind(expected, c('knitr', '1.20'))
expected <- rbind(expected, c('labeling', '0.3'))
expected <- rbind(expected, c('later', '0.7.2'))
expected <- rbind(expected, c('lattice', '0.20-35'))
expected <- rbind(expected, c('latticeExtra', '0.6-28'))
expected <- rbind(expected, c('lavaan', '0.6-1'))
expected <- rbind(expected, c('lazyeval', '0.2.1'))
expected <- rbind(expected, c('lme4', '1.1-17'))
expected <- rbind(expected, c('lmerTest', '3.0-1'))
expected <- rbind(expected, c('lmtest', '0.9-36'))
expected <- rbind(expected, c('logspline', '2.1.10'))
expected <- rbind(expected, c('longitudinal', '1.1.12'))
expected <- rbind(expected, c('magrittr', '1.5'))
expected <- rbind(expected, c('maptools', '0.9-2'))
expected <- rbind(expected, c('markdown', '0.8'))
expected <- rbind(expected, c('matrixStats', '0.54.0'))
expected <- rbind(expected, c('matrixcalc', '1.0-3'))
expected <- rbind(expected, c('metafor', '2.0-0'))
expected <- rbind(expected, c('methods', '3.4.4'))
expected <- rbind(expected, c('mgcv', '1.8-23'))
expected <- rbind(expected, c('mgm', '1.2-2'))
expected <- rbind(expected, c('mi', '1.0'))
expected <- rbind(expected, c('mime', '0.5'))
expected <- rbind(expected, c('miniUI', '0.1.1.1'))
expected <- rbind(expected, c('minqa', '1.2.4'))
expected <- rbind(expected, c('mitools', '2.3'))
expected <- rbind(expected, c('mnormt', '1.5-5'))
expected <- rbind(expected, c('modeltools', '0.2-21'))
expected <- rbind(expected, c('moments', '0.14'))
expected <- rbind(expected, c('multcomp', '1.4-8'))
expected <- rbind(expected, c('munsell', '0.4.3'))
expected <- rbind(expected, c('mvtnorm', '1.0-8'))
expected <- rbind(expected, c('network', '1.13.0.1'))
expected <- rbind(expected, c('nlme', '3.1-131.1'))
expected <- rbind(expected, c('nloptr', '1.0.4'))
expected <- rbind(expected, c('nnet', '7.3-12'))
expected <- rbind(expected, c('nortest', '1.0-4'))
expected <- rbind(expected, c('numDeriv', '2016.8-1'))
expected <- rbind(expected, c('onewaytests', '1.8'))
expected <- rbind(expected, c('openxlsx', '4.1.0'))
expected <- rbind(expected, c('parallel', '3.4.4'))
expected <- rbind(expected, c('parcor', '0.2-6'))
expected <- rbind(expected, c('pbapply', '1.3-4'))
expected <- rbind(expected, c('pbivnorm', '0.6.0'))
expected <- rbind(expected, c('pbkrtest', '0.4-7'))
expected <- rbind(expected, c('pillar', '1.2.3'))
expected <- rbind(expected, c('pkgconfig', '2.0.1'))
expected <- rbind(expected, c('plotrix', '3.7-2'))
expected <- rbind(expected, c('plyr', '1.8.4'))
expected <- rbind(expected, c('png', '0.1-7'))
expected <- rbind(expected, c('ppcor', '1.1'))
expected <- rbind(expected, c('ppls', '1.6-1'))
expected <- rbind(expected, c('promises', '1.0.1'))
expected <- rbind(expected, c('psych', '1.8.4'))
expected <- rbind(expected, c('purrr', '0.2.5'))
expected <- rbind(expected, c('pwr', '1.2-2'))
expected <- rbind(expected, c('qgraph', '1.5'))
expected <- rbind(expected, c('quadprog', '1.5-5'))
expected <- rbind(expected, c('quantmod', '0.4-13'))
expected <- rbind(expected, c('quantreg', '5.36'))
expected <- rbind(expected, c('qvcalc', '0.9-1'))
expected <- rbind(expected, c('readr', '1.1.1'))
expected <- rbind(expected, c('readxl', '1.1.0'))
expected <- rbind(expected, c('relaimpo', '2.2-3'))
expected <- rbind(expected, c('relimp', '1.0-5'))
expected <- rbind(expected, c('rematch', '1.0.1'))
expected <- rbind(expected, c('reshape2', '1.4.3'))
expected <- rbind(expected, c('rio', '0.5.10'))
expected <- rbind(expected, c('rjson', '0.2.19'))
expected <- rbind(expected, c('rlang', '0.2.1'))
expected <- rbind(expected, c('rpart', '4.1-13'))
expected <- rbind(expected, c('rstudioapi', '0.7'))
expected <- rbind(expected, c('sandwich', '2.4-0'))
expected <- rbind(expected, c('scales', '0.5.0'))
expected <- rbind(expected, c('sem', '3.1-9'))
expected <- rbind(expected, c('semTools', '0.4-14'))
expected <- rbind(expected, c('shiny', '1.1.0'))
expected <- rbind(expected, c('shinyAce', '0.3.1'))
expected <- rbind(expected, c('shinyBS', '0.61'))
expected <- rbind(expected, c('shinyjs', '1.0'))
expected <- rbind(expected, c('sna', '2.4'))
expected <- rbind(expected, c('snow', '0.4-2'))
expected <- rbind(expected, c('sourcetools', '0.1.7'))
expected <- rbind(expected, c('sp', '1.2-7'))
expected <- rbind(expected, c('spatial', '7.3-11'))
expected <- rbind(expected, c('splines', '3.4.4'))
expected <- rbind(expected, c('statnet.common', '4.0.0'))
expected <- rbind(expected, c('stats', '3.4.4'))
expected <- rbind(expected, c('stats4', '3.4.4'))
expected <- rbind(expected, c('stringi', '1.2.2'))
expected <- rbind(expected, c('stringr', '1.3.1'))
expected <- rbind(expected, c('survey', '3.33-2'))
expected <- rbind(expected, c('survival', '2.41-3'))
expected <- rbind(expected, c('tcltk', '3.4.4'))
expected <- rbind(expected, c('tibble', '1.4.2'))
expected <- rbind(expected, c('tidyr', '0.8.1'))
expected <- rbind(expected, c('tidyselect', '0.2.4'))
expected <- rbind(expected, c('tools', '3.4.4'))
expected <- rbind(expected, c('tseries', '0.10-44'))
expected <- rbind(expected, c('utf8', '1.1.3'))
expected <- rbind(expected, c('utils', '3.4.4'))
expected <- rbind(expected, c('vcd', '1.4-4'))
expected <- rbind(expected, c('vcdExtra', '0.7-1'))
expected <- rbind(expected, c('viridis', '0.5.1'))
expected <- rbind(expected, c('viridisLite', '0.3.0'))
expected <- rbind(expected, c('whisker', '0.3-2'))
expected <- rbind(expected, c('xtable', '1.8-2'))
expected <- rbind(expected, c('xts', '0.10-2'))
expected <- rbind(expected, c('yaml', '2.1.19'))
expected <- rbind(expected, c('zip', '1.0.0'))
expected <- rbind(expected, c('zoo', '1.8-1'))
#--auto-generated
expected.package.names <- expected[,1]
dimnames(expected) <- list(expected.package.names, c("Package", "Version"))
installed <- installed.packages()
installed.package.names <- dimnames(installed)[[1]]
messages <- c()
for (package.name in expected.package.names) {
if (package.name %in% installed.package.names) {
installed.version <- installed[package.name, "Version"]
expected.version <- expected[package.name, "Version"]
if (installed.version != expected.version)
messages <- c(messages, paste("Package ", package.name, " is not the correct version; expected: ", expected.version, ", installed: ", installed.version, sep=""))
} else {
messages <- c(messages, paste("Package ", package.name, " not installed!", sep=""))
}
}
if (length(messages) == 0) {
list("All R-packages are up-to-date!")
} else {
list(official=FALSE, messages=messages)
}
}
|
29472233917d177ca5d3544e90b05a647f65ef30 | a0f1cad740b09d49b7fda76403a3a49513ded2a9 | /tests/testthat/test-generate_igraph_inputs.R | fd586105ed370136e51529dd71d2220f627b6da3 | [
"MIT"
] | permissive | jandraor/readsdr | 2f2d15ba78337161d22d5727399acb1aa1c78271 | b92dc373eaba5625bd322e8da8a925a690e6f167 | refs/heads/master | 2023-06-23T15:51:22.659018 | 2023-06-09T12:05:48 | 2023-06-09T12:05:48 | 205,191,444 | 17 | 3 | null | null | null | null | UTF-8 | R | false | false | 4,285 | r | test-generate_igraph_inputs.R | context("Generate igraph inputs")
structure_m1 <- list(
parameters = NULL,
levels = list(
list(name = "Population",
equation = "net_growth",
initValue = 100)
),
variables = list(
list(name = "net_growth",
equation = "Population*growth_rate")
),
constants = list(
list(name = "growth_rate",
value = 0.01)
))
#get_igraph_inputs()------------------------------------------------------------
test_that("get_igraph_inputs() returns the expected elements", {
expect_named(get_igraph_inputs(structure_m1), c("nodes", "edges"))
})
test_that("get_igraph_inputs() valid inputs for igraph", {
graph_dfs <- get_igraph_inputs(structure_m1)
gr <- igraph::graph_from_data_frame(graph_dfs$edges, directed = T,
vertices = graph_dfs$nodes)
expect_is(gr, "igraph")
})
stocks <- list(list(name = "population",
equation = "births",
initValue = 100))
variables <- list(list(name = "births", equation = "population*birthRate"),
list(name = "birthRate", equation = "birthRate2"))
constants <- list(list(name = "birthRate2", value = 0.1))
stocks2 <- stocks
variables2 <- list(list(name = "births", equation = "population*birthRate"))
constants2 <- list(list(name = "birthRate", value = 0.1))
# generate_edges_df()-----------------------------------------------------------
test_that("generate_edges_df() returns the correct number of edges", {
e_df <- with(structure_m1, generate_edges_df(levels, variables, constants))
expect_equal(nrow(e_df), 2)
})
test_that("generate_edges_df() returns the correct number of flows", {
e_df <- with(structure_m1, generate_edges_df(levels, variables, constants))
e_df <- e_df[e_df$type == "flow", ]
expect_equal(nrow(e_df), 1)
})
test_that("generate_edges_df() ignores info-links whose tail is a constant", {
edges_df <- generate_edges_df(stocks, variables, constants)
expect_equal(nrow(edges_df), 3)
})
# generate_nodes_df()-----------------------------------------------------------
test_that("generate_nodes_df() returns a df with the correct columns", {
actual_val <- with(structure_m1,
generate_nodes_df(levels, variables, constants))
expect_named(actual_val, c("name", "type", "equation"))
})
test_that("generate_nodes_df() returns the correct number of nodes", {
df <- with(structure_m1, generate_nodes_df(levels, variables, constants))
expect_equal(nrow(df), 2)
})
test_that("generate_nodes_df() replaces auxiliar consts with their value in equations", {
nodes_df <- generate_nodes_df(stocks2, variables2, constants2)
expect_equal(nodes_df[[2, "equation"]], "population*0.1")
})
test_that("generate_nodes_df() throws an error should a variable directly
depends on time", {
variables <- list(
list(name = "net_growth",
equation = "ifelse(time>1Population*growth_rate,0)")
)
expect_error(
generate_nodes_df(structure_m1$levels, variables, structure_m1$constants),
"A variable depends on time")
})
# construct_var_edge()----------------------------------------------------------
test_that("construct_var_edge() ignores scalars in equations", {
var_obj <- list(name = "w",
equation = "x*y/(k+1/z)")
const_names_test <- c("k")
actual <- construct_var_edge(var_obj, const_names_test)
expected <- data.frame(from = c("x", "y", "z"), to = "w", type = "info_link",
stringsAsFactors = FALSE)
expect_equal(actual, expected)
})
test_that("construct_var_edge() accounts for repeated variables", {
var_obj <- list(name = "z",
equation = "(x+y)/x")
const_names_test <- c("k")
actual <- construct_var_edge(var_obj, const_names_test)
expected <- data.frame(from = c("x", "y"), to = "z", type = "info_link",
stringsAsFactors = FALSE)
expect_equal(actual, expected)
})
# construct_stock_edge()--------------------------------------------------------
test_that('construct_stock_edge returns NULL constant-alike stocks', {
stock_obj <- list(name = "test_stock",
equation = "0")
expect_equal(construct_stock_edge(stock_obj), NULL)
} )
|
6e9c8ae91657fc07010136b9013e95c4656f056d | 6077c6ffe370c6aacc8cfeab2de85ea69b7fec18 | /birth_weight.R | b81ef173c45993808da524711567c459e2d25f69 | [] | no_license | cmdoret/data_analysis | 2a74672505dfcb7ec35d6ac7eb05065bcee44cd4 | caf0657ab5ec7c5ec314ad47c09a777d75b22203 | refs/heads/master | 2021-06-10T12:45:52.116673 | 2017-01-09T01:23:40 | 2017-01-09T01:23:40 | 72,785,771 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 897 | r | birth_weight.R | # First report for advanced data analysis. Birth weight dataset.
# Cyril Matthey-Doret
# Thu Nov 3 10:34:26 2016 ------------------------------
# Loading data:
setwd ("/home/cyril/Documents/Master/sem_1/Data_analysis/data/")
birth_weight<-read.table("birtweigth_study.txt", sep= " ")
row.names(birth_weight) <- NULL
#=======================================================
# WARNING:
# LWT == POUNDS
# BWT == GRAMS
# Tranforming units
birth_weight$lwt <- birth_weight$lwt*0.45359237
birth_weight$bwt <- birth_weight$bwt/1000
#=======================================================
# Checking data:
shapiro.test(birth_weight$bwt)
qqnorm(birth_weight$bwt)
qqline(birth_weight$bwt)
#=======================================================
# Analysing which variable impacts birthweight
anova(lm(bwt ~ age+lwt+ht+smoke+ptd,birth_weight))
anova(lm(bwt ~ ht*smoke*lwt*ptd*age,birth_weight))
|
ad695e0355bb3299fd5cda35585b2489416c7f1e | 2e0d783a6efe476d9f3c51d255d61e2450a207b8 | /loop_script.R | ac5b6ba08cefb5c6eb562256d6b7ef793ede026d | [] | no_license | TomKellyGenetics/R-Parallel-Lesson | 93805a2ce15764627b7eefed8087192da462d873 | 083d5d3ec9a31d6d31936648807c8518dd77191e | refs/heads/master | 2020-05-23T08:03:11.694731 | 2017-02-07T10:25:16 | 2017-02-07T10:25:16 | 80,489,310 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 368 | r | loop_script.R | library("snow")
dataset <- matrix(rnorm(600), 60, 10)
cl <- makeSOCKcluster(2) # number of cores
clusterExport(cl, list=ls())
mg_data <- matrix(NA, nrow(dataset)/3, ncol(dataset))
mg_data <-parLapply(cl, 1:(nrow(mg_data)), function(ii){
svd(dataset[(ii-1)*3+1:3,])$v[,1]
})
mg_data <- t(as.data.frame(mg_data))
stopCluster(cl)
write.csv(mg_data, file="mg_data.csv") |
980567c7ad5b2102e48fa004072857358c43aa63 | 88f03f016156266d669af61fc437b410ba65433e | /2_script/calculos_enigh.R | 7048243b3b6e40aeea9aacdb6e4d7ad2c574d5b2 | [
"CC-BY-4.0"
] | permissive | pCobosAlcala/tesina | de778af29e4e8a1aab90949128a221428ff67696 | 0b4cbad4a8192af565e3801378c885e13ede80f2 | refs/heads/master | 2022-11-02T20:19:25.030276 | 2020-06-17T01:51:46 | 2020-06-17T01:51:46 | 268,036,321 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,473 | r | calculos_enigh.R | ### Configuración inicial ----
if (!require("pacman")) install.packages("pacman")
p_load(tidyverse, readr, doBy, survey, gt)
## Setup
Sys.setlocale("LC_ALL", "es_ES.UTF-8") # Mac
Sys.setlocale("LC_ALL", "Spanish_Mexico.1252") # Windows
options(scipen=999)
## Eliminar objetos
rm(list = ls())
### Base de datos ----
Conc <- read_csv("1_data/datos_abiertos_enigh_2018/conjunto_de_datos_concentradohogar_enigh_2018_ns/conjunto_de_datos/conjunto_de_datos_concentradohogar_enigh_2018_ns.csv")
### Código del INEGI ----
# 2.1 Ingreso corriente total promedio trimestral por hogar en deciles de hogares
# opción para tratar los casos de los estratos con una sola una UPM
options(survey.lonely.psu="adjust")
# selección de las variables de interés (añadí la de transporte público)
Conc <- Conc [ c("folioviv", "foliohog", "ing_cor", "ingtrab", "trabajo", "negocio", "otros_trab", "rentas", "utilidad",
"arrenda", "transfer", "jubilacion", "becas", "donativos", "remesas", "bene_gob", "transf_hog", "trans_inst",
"estim_alqu", "otros_ing","factor","upm","est_dis", "transporte", "publico")]
# se crea una variable para agregar la entidad federativa
Conc$entidad <- substr(Conc$folioviv,1,2)
## Añadí esto para filtrar la base y quedarme sólo con el Estado de México
Conc <- Conc %>%
filter(entidad == 15)
# se define la columna con el nombre de las entidades federativas
Numdec<-c("Total", "I", "II", "III","IV", "V", "VI", "VII", "VIII", "IX","X")
# se crea una bandera para numerar a los hogares
Conc$Nhog <- 1
# deja activa la tabla Conc
attach(Conc)
# ordena Conc de acuerdo a ing_cor, folioviv, foliohog.
Conc<- orderBy (~+ing_cor+folioviv+foliohog, data=Conc)
# suma todos los factores y guarda el valor en el vector tot_hogares.
tot_hogares <- sum(factor,to.data.frame=TRUE)
# se divide la suma de factores entre diez para sacar el tamaño del decil
# se debe de truncar el resultado quitando los decimales.
tam_dec<-trunc(tot_hogares/10)
# muestra la suma del factor en variable hog.
Conc$tam_dec=tam_dec
# se renombra la tabla concentrado a BD1.
BD1 <- Conc
# dentro de la tabla BD1 se crea la variable MAXT y se le asigna los valores que tienen el ing_cor.
BD1$MAXT<-BD1$ing_cor
# se ordena de menor a mayor según la variable MAXT.
BD1<-BD1[with(BD1, order(rank(MAXT))),]
# se aplica la función cumsum, suma acumulada a la variable factor.
BD1$ACUMULA<-cumsum(BD1$factor)
# entra a un ciclo donde iremos generando los deciles 1 a 10.
for(i in 1:9)
{
a1<-BD1[dim(BD1[BD1$ACUMULA<tam_dec*i,])[1]+1,]$factor
BD1<-rbind(BD1[1:(dim(BD1[BD1$ACUMULA<tam_dec*i,])[1]+1),],
BD1[(dim(BD1[BD1$ACUMULA<tam_dec*i,])[1]+1):dim(BD1[1])[1],])
b1<-tam_dec*i-BD1[dim(BD1[BD1$ACUMULA<tam_dec*i,])[1],]$ACUMULA
BD1[(dim(BD1[BD1$ACUMULA<tam_dec*i,])[1]+1),]$factor<-b1
BD1[(dim(BD1[BD1$ACUMULA<tam_dec*i,])[1]+2),]$factor<-(a1-b1)
}
BD1$ACUMULA2<-cumsum(BD1$factor)
BD1$DECIL<-0
BD1[(BD1$ACUMULA2<=tam_dec),]$DECIL<-1
for(i in 1:9)
{
BD1[((BD1$ACUMULA2>tam_dec*i)&(BD1$ACUMULA2<=tam_dec*(i+1))),]$DECIL<-(i+1)
}
BD1[BD1$DECIL%in%"0",]$DECIL<-10
x<-tapply(BD1$factor,BD1$Nhog,sum)
# DECILES
y<-tapply(BD1$factor,BD1$DECIL,sum)
# se calcula el promedio (ingreso entre los hogares) tanto para el total como para cada uno de los deciles
ingreso_promedio<-tapply(BD1$factor*BD1$ing_cor,BD1$Nhog,sum)/x
ingreso_deciles<-tapply(BD1$factor*BD1$ing_cor,BD1$DECIL,sum)/y
# Fin del código del INEGI y cálculo del gasto en transporte público
tp_promedio<-tapply(BD1$factor*BD1$publico,BD1$Nhog,sum)/x
tp_deciles<-tapply(BD1$factor*BD1$publico,BD1$DECIL,sum)/y
transporte_promedio<-tapply(BD1$factor*BD1$transporte,BD1$Nhog,sum)/x
transporte_deciles<-tapply(BD1$factor*BD1$transporte,BD1$DECIL,sum)/y
# Conformación de una nueva base
base <- as_tibble(cbind(ingreso_deciles, tp_deciles, transporte_deciles))
base <- base %>%
mutate(decil = factor(c("I", "II", "III", "IV", "V", "VI", "VII", "VIII", "IX", "X"),
order = T,
levels = c("I", "II", "III", "IV", "V", "VI", "VII", "VIII", "IX", "X"))) %>%
select(decil, ingreso_deciles, tp_deciles, transporte_deciles)
### Visualizaciones ----
# Porcentaje de gasto en transporte
base %>%
mutate(publico_entre_ingreso = tp_deciles/ingreso_deciles,
transporte_entre_ingreso = transporte_deciles/ingreso_deciles,
transporte_menos_publico = transporte_entre_ingreso - publico_entre_ingreso,
ingresos_menos_transporte = 1 - transporte_entre_ingreso) %>%
select(decil, transporte_menos_publico, publico_entre_ingreso, ingresos_menos_transporte) %>%
pivot_longer(-decil,
names_to = "variable",
values_to = "n") %>%
ggplot() +
geom_col(aes(x = decil,
y = n,
fill = factor(variable,
order = T,
levels = c("ingresos_menos_transporte",
"transporte_menos_publico",
"publico_entre_ingreso")))) +
scale_y_continuous(labels = percent,
breaks = c(seq(0, 1, .1)),
limits = c(0, 1)) +
scale_fill_manual(values = c("grey", "steelblue", "salmon"),
labels = c("Ingreso", "-TP", "TP")) +
labs(title = "title",
subtitle = "subtitle",
x = "Decil",
y = "Porcentaje",
fill = "Rubro",
caption = "Elaboración propia con datos de X (AÑO)") +
theme_minimal() +
theme(panel.grid.major.x = element_blank(),
plot.caption = element_text(hjust = 0))
# Gasto en transporte y transporte público
base %>%
mutate(dif_publico_transporte = transporte_deciles - tp_deciles,
dif_publico_ingreso = ingreso_deciles - tp_deciles,
dif_transporte_ingreso = ingreso_deciles - tp_deciles) %>%
select(decil, dif_publico_transporte, tp_deciles) %>%
pivot_longer(-decil,
names_to = "variable",
values_to = "n") %>%
ggplot() +
geom_col(aes(x = decil, y = n, fill = variable)) +
scale_y_continuous(labels = comma) +
scale_fill_manual(values = c("steelblue", "salmon"),
labels = c("-TP", "TP")) +
labs(title = "title",
subtitle = "subtitle",
x = "Decil",
y = "Porcentaje",
fill = "Rubro",
caption = "Elaboración propia con datos de X (AÑO)") +
theme_minimal() +
theme(legend.position = c(.4,.8),
panel.grid.major.x = element_blank(),
plot.caption = element_text(hjust = 0))
# Gasto en transporte público e ingreso
base %>%
mutate(dif_publico_transporte = transporte_deciles - tp_deciles,
dif_publico_ingreso = ingreso_deciles - tp_deciles,
dif_transporte_ingreso = ingreso_deciles - tp_deciles) %>%
select(decil, dif_publico_ingreso, tp_deciles) %>%
pivot_longer(-decil,
names_to = "variable",
values_to = "n") %>%
ggplot() +
geom_col(aes(x = decil, y = n, fill = variable)) +
scale_y_continuous(labels = comma) +
scale_fill_manual(values = c("steelblue", "salmon"),
labels = c("Gasto", "Gasto en transporte público")) +
labs(title = "title",
subtitle = "subtitle",
x = "Decil",
y = "Porcentaje",
fill = "Rubro",
caption = "Elaboración propia con datos de X (AÑO)") +
theme_minimal() +
theme(axis.text.x = element_blank(),
legend.position = c(.3,.8),
panel.grid.major.x = element_blank(),
plot.caption = element_text(hjust = 0))
# Cálculos finales
q <- base %>%
mutate(porcentaje = tp_deciles/ingreso_deciles) %>%
select(decil, tp_deciles, porcentaje)
gt_tbl <- gt(data = q)
gt_tbl %>%
tab_header(title = "Gasto trimestral en transporte público",
subtitle = "Hogares por deciles de ingreso en el Estado de México") %>%
tab_source_note(source_note = "Elaboración propia con datos de la ENIGH (INEGI 2018).") %>%
fmt_currency(columns = "tp_deciles", currency = "USD") %>%
fmt_percent(columns = "porcentaje") %>%
cols_label(decil = "Decil",
tp_deciles = "Gasto",
porcentaje = "% del ingreso")
|
457b932a45b95d2ea22599ef6322648ba9dfb19a | 8c50265b43add0e91e30245cc7af3c2558c248f5 | /R-package/tests/testthat/test_img_seg.R | 4af7d62cf533125a146484f7b9209c1b5ea9b2d7 | [
"BSD-3-Clause",
"BSD-2-Clause-Views",
"Zlib",
"Apache-2.0",
"BSD-2-Clause",
"Intel"
] | permissive | awslabs/dynamic-training-with-apache-mxnet-on-aws | 6a67f35d7e4b12fa8bba628bd03b2b031924e211 | 1063a979417fee8c820af73860eebd2a4f670380 | refs/heads/master | 2023-08-15T11:22:36.922245 | 2022-07-06T22:44:39 | 2022-07-06T22:44:39 | 157,440,687 | 60 | 19 | Apache-2.0 | 2022-11-25T22:23:19 | 2018-11-13T20:17:09 | Python | UTF-8 | R | false | false | 5,944 | r | test_img_seg.R | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
require(mxnet)
source("get_data.R")
if (Sys.getenv("R_GPU_ENABLE") != "" & as.integer(Sys.getenv("R_GPU_ENABLE")) ==
1) {
mx.ctx.default(new = mx.gpu())
message("Using GPU for testing.")
}
print_inferred_shape <- function(net) {
slist <- mx.symbol.infer.shape(symbol = net, data = c(168, 168, 1, 2))
print(slist$out.shapes)
}
convolution_module <- function(net, kernel_size, pad_size, filter_count, stride = c(1,
1), work_space = 2048, batch_norm = TRUE, down_pool = FALSE, up_pool = FALSE,
act_type = "relu", convolution = TRUE) {
if (up_pool) {
net <- mx.symbol.Deconvolution(net, kernel = c(2, 2), pad = c(0, 0), stride = c(2,
2), num_filter = filter_count, workspace = work_space)
net <- mx.symbol.BatchNorm(net)
if (act_type != "") {
net <- mx.symbol.Activation(net, act_type = act_type)
}
}
if (convolution) {
conv <- mx.symbol.Convolution(data = net, kernel = kernel_size, stride = stride,
pad = pad_size, num_filter = filter_count, workspace = work_space)
net <- conv
}
if (batch_norm) {
net <- mx.symbol.BatchNorm(net)
}
if (act_type != "") {
net <- mx.symbol.Activation(net, act_type = act_type)
}
if (down_pool) {
pool <- mx.symbol.Pooling(net, pool_type = "max", kernel = c(2, 2), stride = c(2,
2))
net <- pool
}
print_inferred_shape(net)
return(net)
}
get_unet <- function() {
data <- mx.symbol.Variable("data")
kernel_size <- c(3, 3)
pad_size <- c(1, 1)
filter_count <- 32
pool1 <- convolution_module(data, kernel_size, pad_size, filter_count = filter_count,
down_pool = TRUE)
net <- pool1
pool2 <- convolution_module(net, kernel_size, pad_size, filter_count = filter_count *
2, down_pool = TRUE)
net <- pool2
pool3 <- convolution_module(net, kernel_size, pad_size, filter_count = filter_count *
4, down_pool = TRUE)
net <- pool3
pool4 <- convolution_module(net, kernel_size, pad_size, filter_count = filter_count *
4, down_pool = TRUE)
net <- pool4
net <- mx.symbol.Dropout(net)
pool5 <- convolution_module(net, kernel_size, pad_size, filter_count = filter_count *
8, down_pool = TRUE)
net <- pool5
net <- convolution_module(net, kernel_size, pad_size, filter_count = filter_count *
4, up_pool = TRUE)
net <- convolution_module(net, kernel_size, pad_size = c(2, 2), filter_count = filter_count *
4, up_pool = TRUE)
net <- mx.symbol.Crop(net, pool3, num.args = 2)
net <- mx.symbol.concat(c(pool3, net), num.args = 2)
net <- mx.symbol.Dropout(net)
net <- convolution_module(net, kernel_size, pad_size, filter_count = filter_count *
4)
net <- convolution_module(net, kernel_size, pad_size, filter_count = filter_count *
4, up_pool = TRUE)
net <- mx.symbol.concat(c(pool2, net), num.args = 2)
net <- mx.symbol.Dropout(net)
net <- convolution_module(net, kernel_size, pad_size, filter_count = filter_count *
4)
net <- convolution_module(net, kernel_size, pad_size, filter_count = filter_count *
4, up_pool = TRUE)
convolution_module(net, kernel_size, pad_size, filter_count = filter_count *
4)
net <- mx.symbol.concat(c(pool1, net), num.args = 2)
net <- mx.symbol.Dropout(net)
net <- convolution_module(net, kernel_size, pad_size, filter_count = filter_count *
2)
net <- convolution_module(net, kernel_size, pad_size, filter_count = filter_count *
2, up_pool = TRUE)
net <- convolution_module(net, kernel_size, pad_size, filter_count = 1, batch_norm = FALSE,
act_type = "")
net <- mx.symbol.SoftmaxOutput(data = net, name = "sm")
return(net)
}
context("Image segmentation")
test_that("UNET", {
list.of.packages <- c("imager")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,
"Package"])]
if (length(new.packages))
install.packages(new.packages, repos = "https://cloud.r-project.org/")
GetISBI_data()
library(imager)
IMG_SIZE <- 168
files <- list.files(path = "data/ISBI/train-volume/")
a <- "data/ISBI/train-volume/"
filess <- paste(a, files, sep = "")
list_of_images <- lapply(filess, function(x) {
x <- load.image(x)
y <- resize(x, size_x = IMG_SIZE, size_y = IMG_SIZE)
})
train.x <- do.call("cbind", lapply(list_of_images, as.vector))
train.array <- train.x
dim(train.array) <- c(IMG_SIZE, IMG_SIZE, 1, 30)
files <- list.files(path = "data/ISBI/train-labels")
b <- "data/ISBI/train-labels/"
filess <- paste(b, files, sep = "")
list_of_images <- lapply(filess, function(x) {
x <- load.image(x)
y <- resize(x, size_x = IMG_SIZE, size_y = IMG_SIZE)
})
train.y <- do.call("cbind", lapply(list_of_images, as.vector))
train.y[which(train.y < 0.5)] <- 0
train.y[which(train.y > 0.5)] <- 1
train.y.array <- train.y
dim(train.y.array) <- c(IMG_SIZE, IMG_SIZE, 1, 30)
devices <- mx.ctx.default()
mx.set.seed(0)
net <- get_unet()
model <- mx.model.FeedForward.create(net, X = train.array, y = train.y.array,
ctx = devices, num.round = 2, initializer = mx.init.normal(sqrt(2/576)),
learning.rate = 0.05, momentum = 0.99, array.batch.size = 2)
})
|
8f6fdf0e5d7ba6c89c794f905d22d1bab95ffc94 | 940ef234ab31e206a4d94a607a93bdf0f33069b4 | /HGTS_processing.R | e22b44a54cccdb870bb3bbbaa97282030ff865a5 | [
"MIT"
] | permissive | salvatoreloguercio/RepSeqPipe | f7ffd7716afbf0f5b67945075144aea66452d434 | b647f923da792fa17fb2699f5603ef1d176c556f | refs/heads/master | 2021-09-10T15:43:08.803596 | 2018-03-28T19:19:16 | 2018-03-28T19:19:16 | 105,821,593 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 15,342 | r | HGTS_processing.R | #! /usr/bin/Rscript
options(stringsAsFactors = F)
options(java.parameters = "- Xmx1024m")
# HTGS abstar processing
# input_file="EK-gDNA-053017_S22_mergedpairedend_15overlap.csv"
# input_type="gDNA"
# dedup=T
library(optparse)
option_list = list(
make_option(c("-i", "--input_file"), type="character", default=NULL,
help="Abstar output file name", metavar="character"),
make_option(c("-t", "--input_type"), type="character", default=NULL,
help="input type (gDNA or RNA)", metavar="character"),
make_option(c("-d", "--dedup"), type="logical", default=NULL,
help="deduplication", metavar="character"));
opt_parser = OptionParser(option_list=option_list);
opt = parse_args(opt_parser);
library(stringr)
library(Biostrings)
HGTS_processing<-function(input_file,input_type,dedup){
logfile<-file(paste0(sub(".csv","",input_file),"_",format(Sys.time(), format = "%Y-%m-%d-%H%M"),"_dedup_",dedup,"_report.txt"),open="a")
cat(paste0("input file: ",input_file),file = logfile, sep="\n")
cat(paste0("input type: ",input_type),file = logfile, sep="\n")
cat(paste0("deduplication: ",dedup),file = logfile, sep="\n")
cat("-----",file = logfile, sep="\n")
#infile<-read.csv("EK-gDNA-053017_S22_mergedpairedend_15overlap_sample.csv")
infile<-read.csv(input_file)
cat(paste0("Input file read successfully. Number of reads: ",nrow(infile)),file = logfile, sep="\n")
cat("-----",file = logfile, sep="\n")
# filtering common to both gDNA and RNA
# filter I - kappa chain only
chaincol<-grep("chain",colnames(infile))
infile_I<-infile[which(infile[,chaincol]=="kappa"),]
cat("Filter I - kappa chain only",file = logfile, sep="\n")
cat(paste0("Reads removed: ",length(which(infile[,chaincol]!="kappa"))),file = logfile, sep="\n")
cat(paste0("Infile I rows: ",dim(infile_I)[1]),file = logfile, sep="\n")
cat("-----",file = logfile, sep="\n")
# filter 2 - “_nt_identity_v" – 95% and above
nt_identity_v_col<-grep("_nt_identity_v",colnames(infile))
infile_II<-infile_I[which(infile_I[,nt_identity_v_col]>=95),]
cat("Filter II - nt_identity_v – 95% and above",file = logfile, sep="\n")
cat(paste0("Reads removed: ",length(which(infile_I[,nt_identity_v_col]<95))),file = logfile, sep="\n")
cat(paste0("Infile II rows: ",dim(infile_II)[1]),file = logfile, sep="\n")
cat("-----",file = logfile, sep="\n")
# filter III
# Vk gene length >= 150bp.
Vk_gene_length_col<-grep("_germ_alignments_nt_var_query",colnames(infile_II))
glength<-apply(infile_II,1,function(x) nchar(x[Vk_gene_length_col]))
infile_III<-infile_II[which(glength>=150),]
cat("Filter III - Vk gene length >= 150bp",file = logfile, sep="\n")
cat(paste0("Reads removed: ",length(which(glength<150))),file = logfile, sep="\n")
cat(paste0("Infile III rows: ",dim(infile_III)[1]),file = logfile, sep="\n")
cat("-----",file = logfile, sep="\n")
### Deduplication - gDNA
# I - all VJ combos
dedup_gdna<-function(infile_III){
None_raw_input_col<-grep("None_raw_input",colnames(infile_III))
None_seq_id_col<-grep("None_seq_id",colnames(infile_III))
dedup_out<-vector()
VJ<-unique(paste(infile_III$None_v_gene_gene,infile_III$None_j_gene_gene,sep="_"))
# for each combo, take pools of the same length
#getVJ_len_dups<-function(VJ_combo,infile){
flagger<-function(VJ_adapt,ad_pattern){
NNs<-sapply(VJ_adapt$None_raw_input,function(x) substr(x,str_locate(x,ad_pattern)[1]-6,str_locate(x,ad_pattern)[1]-1))
dedup<-VJ_adapt$None_seq_id[duplicated(NNs)]
dedup
}
for(j in 1:length(VJ)){
VJ_combo<-VJ[j]
print(VJ_combo)
V<-strsplit(VJ_combo,"_")[[1]][1]
J<-strsplit(VJ_combo,"_")[[1]][2]
VJ_tab<-infile_III[which(infile_III$None_v_gene_gene==V&infile_III$None_j_gene_gene==J),c(None_seq_id_col,None_raw_input_col)]
#VJ_tab<-infile[which(infile$v_gene==V&infile$j_gene==J),]
VJ_len<-nchar(VJ_tab$None_raw_input)
n_occur<-data.frame(table(nchar(VJ_tab$None_raw_input)))
VJ_dups<-VJ_tab[VJ_len%in%n_occur$Var1[n_occur$Freq>1],]
VJ_len<-nchar(VJ_dups$None_raw_input)
for(i in 1:length(unique(VJ_len))){
VJ_len_dups<-VJ_dups[VJ_len%in%unique(VJ_len)[i],]
adapt1<-str_detect(VJ_len_dups$None_raw_input,"GACTCGT")
adapt2<-str_detect(VJ_len_dups$None_raw_input,"CTGCTCCT")
VJ_adapt1<-VJ_len_dups[adapt1,]
if(dim(VJ_adapt1)[1]>1){
dedup<-flagger(VJ_adapt1,"GACTCGT")
if(length(dedup)>0){dedup_out=c(dedup_out,dedup)}
}
VJ_adapt2<-VJ_len_dups[adapt2,]
if(dim(VJ_adapt2)[1]>1){
dedup<-flagger(VJ_adapt2,"CTGCTCCT")
if(length(dedup)>0){
dedup_out=c(dedup_out,dedup)
print(dedup)
}
}
}
}
infile_IV<-infile_III[!infile_III$None_seq_id%in%dedup_out,]
}
# infile_IV<-dedup_gdna(infile_III)
### Deduplication - RNA
# I - all VJ combos
dedup_rna<-function(infile_rna_III){
None_raw_input_col<-grep("None_raw_input",colnames(infile_rna_III))
None_seq_id_col<-grep("None_seq_id",colnames(infile_rna_III))
dedup_out<-vector()
VJ<-unique(paste(infile_rna_III$None_v_gene_gene,infile_rna_III$None_j_gene_gene,sep="_"))
# for each combo, take pools of the same length
#getVJ_len_dups<-function(VJ_combo,infile){
for(j in 1:length(VJ)){
VJ_combo<-VJ[j]
print(VJ_combo)
V<-strsplit(VJ_combo,"_")[[1]][1]
J<-strsplit(VJ_combo,"_")[[1]][2]
VJ_tab<-infile_rna_III[which(infile_rna_III$None_v_gene_gene==V&infile_rna_III$None_j_gene_gene==J),c(None_seq_id_col,None_raw_input_col)]
# print(nrow(VJ_tab))
vjset<-DNAStringSet(VJ_tab$None_raw_input)
adapt1_Ir<-vmatchPattern("CCACGCGTGCCCTAT",vjset,min.mismatch = 0,max.mismatch = 1) # match first 15 bp of adaptor, one mismatch)
adapt1<-unlist(lapply(adapt1_Ir,function(x) length(x)))
# str_detect(VJ_tab$raw_input,"CCACGCGTGCCCTATAGT")
# adapt2<-str_detect(VJ_len_dups$raw_input,"CTGCTCCT")
VJ_adapt1<-VJ_tab[adapt1>0,]
vjset<-DNAStringSet(VJ_adapt1$None_raw_input)
adapt1f_Ir<-vmatchPattern("CCACGCGTGCCCTAT",vjset,min.mismatch = 0,max.mismatch = 1)
adapt1f<-adapt1[adapt1>0]
starts<-vector()
NNs<-vector()
if(nrow(VJ_adapt1)>0){ # handle nrow(VJ_adapt1)==0 exception
for(i in 1:dim(VJ_adapt1)[1]){
st<-adapt1f_Ir[[i]][[adapt1f[i]]][1]
NNs=c(NNs,substr(VJ_adapt1$None_raw_input[i],st-6,st-1))
starts=c(starts,st)
}
}else NNs<-0
dedup<-VJ_adapt1$None_seq_id[duplicated(NNs)]
if(length(dedup)>0){dedup_out=c(dedup_out,dedup)}
}
infile_rna_IV<-infile_rna_III[!infile_rna_III$None_seq_id%in%dedup_out,]
}
#infile_rna<-read.csv("EK-RNA-sort041717-062717_S10_mergedpairedend_15overlap_sample.csv")
# filter I - kappa chain only
#chaincol<-grep("chain",colnames(infile_rna))
#infile_rna_I<-infile_rna[which(infile_rna[,chaincol]=="kappa"),]
# filter 2 - “_nt_identity_v" – 95% and above
#nt_identity_v_col<-grep("_nt_identity_v",colnames(infile_rna_I))
#infile_rna_II<-infile_rna_I[which(infile_rna_I[,nt_identity_v_col]>=95),]
# filter III
# Vk gene length >= 150bp.
#Vk_gene_length_col<-grep("_germ_alignments_nt_var_query",colnames(infile_rna_II))
#glength<-apply(infile_rna_II,1,function(x) nchar(x[Vk_gene_length_col]))
#infile_rna_III<-infile_rna_II[which(glength>=150),]
if(dedup==T){
if(input_type=="gDNA"){
infile_IV<-dedup_gdna(infile_III)
}else if(input_type=="RNA"){
infile_IV<-dedup_rna(infile_III)
}else{
stop("please specify a valid input type ('gDNA' or 'RNA')")
}
cat(paste0("Deduplication - ",input_type),file = logfile, sep="\n")
cat(paste0("Reads removed: ",nrow(infile_III)-nrow(infile_IV)),file = logfile, sep="\n")
cat(paste0("Infile IV rows: ",dim(infile_IV)[1]),file = logfile, sep="\n")
cat("-----",file = logfile, sep="\n")
}else infile_IV<-infile_III
## Output stats
# I. All Jks combined
# 1. Percentage of each V gene (Column F) from total number of reads
# v_genes<-unique(infile_IV$v_gene)
vgenes<-table(infile_IV$None_v_gene_full)
vgenes_percent<-(vgenes/dim(infile_IV)[1])*100
# 2. Percentage of productive and non-productive (Column D) from the total reads
prod<-table(infile_IV$None_prod)
prod_percent<-(prod/dim(infile_IV)[1])*100
# 3. V gene percentage within productive and non-productive
# (e.g % Vk1-88 non-productive within non-productive pool or within total pool, % Vk1-88 productive within productive pool or total pool)
v_genes<-unique(infile_IV$None_v_gene_full)
v_genes_prod<-sapply(v_genes,function(x) length(which(infile_IV$None_v_gene_full==x&infile_IV$None_prod=="yes")))
v_genes_nonprod<-sapply(v_genes,function(x) length(which(infile_IV$None_v_gene_full==x&infile_IV$None_prod=="no")))
prod_total<-(v_genes_prod/dim(infile_IV)[1])*100
prod_prod<-(v_genes_prod/prod[2])*100
nonprod_total<-(v_genes_nonprod/dim(infile_IV)[1])*100
nonprod_nonprod<-(v_genes_nonprod/prod[1])*100
outtab_allJks<-cbind(vgenes[v_genes],vgenes_percent[v_genes],v_genes_prod,prod_total,prod_prod,v_genes_nonprod,nonprod_total,nonprod_nonprod)
#outtab_allJks=outtab_allJks[order(rownames(outtab_allJks)),]
colnames(outtab_allJks)[1:2]<-c("total occurrences","% total occurrences")
v_genes_ord<-sapply(v_genes,function(x) as.numeric(strsplit(x,"[-*]")[[1]][2]))
outtab_allJks=outtab_allJks[order(v_genes_ord),]
library(xlsx)
write.xlsx(outtab_allJks,file=paste0(sub(".csv","",input_file),"_dedup_",dedup,"_output_stats_allJk.xlsx"))
# gDNA: keep only exact matches of Jk primer
if(input_type=="gDNA"){
Jk_seqs<-c("CGTTCGGTGGAGGCACCAAGCTGGAAAT[ACTG]","ACGTTCGGAGGGGGGACCAAGCTGGAAATAAAACGTAAG[ACTG]","TTCACGTTCGGCTCGGGGACAAAGT[ACTG]","CGTTCGGTGCTGGGACCAAGCTGGAGCTGAAAC[ACTG]")
infile_exact<-sapply(infile_IV$None_raw_input,function(x) any(str_detect(x,Jk_seqs)))
infile_IV_exact<-infile_IV[infile_exact,]
cat(paste0("Jk exact matches: ",dim(infile_IV_exact)[1]),file = logfile, sep="\n")
# reassign Jks
# Jk1 reassignment
Jk1_reassign<-c("CGTTCGGTGGAGGCACCAAGCTGGAAATAAAACGTAAG[ACTG]","CGTTCGGTGGAGGGACCAAGCTGGAAATAAAACGTAAG[ACTG]","TGGACGTTCGGCTCGGGGACAAAGT[ACTG]","CGTTCGGTGGAGGCACCAAGCTGGAGCTGAAAC[ACTG]","CGTTCGGTGGAGGGACCAAGCTGGAGCTGAAAC[ACTG]")
Jk2_reassign<-c("GTACACGTTCGGCTCGGGGACAAAGT[ACTG]","ACGTTCGGAGGGGGGACCAAGCTGGAGCTGAAAC[ACTG]")
Jk4_reassign<-"CGTTCGGCTCGGGGACCAAGCTGGAAATAAAA[ACTG]"
Jk5_reassign<-c("CGTTCGGTGCTGGGACCAAGCTGGAAATAAAA[ACTG]","CTCACGTTCGGCTCGGGGACAAAGT[ACTG]")
Jk_reassign<-function(infile_IV,Jk_reassign_seqs,target_reassign){
None_raw_input_col<-grep("None_raw_input",colnames(infile_IV))
None_j_gene_gene_col<-grep("None_j_gene_gene",colnames(infile_IV))
Jk_reassign_count<-sapply(infile_IV$None_raw_input,function(x) any(str_detect(x,Jk_reassign_seqs)))
block<-infile_IV[Jk_reassign_count,]
if(nrow(block)>0) block$None_j_gene_gene<-target_reassign
#Jk_reassign_vec<-apply(infile_IV,1,function(x) ifelse(any(str_detect(x[None_raw_input_col],Jk_reassign_seqs)),target_reassign,x[None_j_gene_gene_col]))
cat(paste0(target_reassign," reassignments: ",length(which(Jk_reassign_count))),file = logfile, sep="\n")
return(list(block,Jk_reassign_count))
}
infile_IV_crossampl<-vector()
Jk1_reassign_vec<-Jk_reassign(infile_IV,Jk1_reassign,"IGKJ1")
infile_IV_crossampl=rbind(infile_IV_crossampl,Jk1_reassign_vec[[1]])
Jk2_reassign_vec<-Jk_reassign(infile_IV,Jk2_reassign,"IGKJ2")
infile_IV_crossampl=rbind(infile_IV_crossampl,Jk2_reassign_vec[[1]])
Jk4_reassign_vec<-Jk_reassign(infile_IV,Jk4_reassign,"IGKJ4")
infile_IV_crossampl=rbind(infile_IV_crossampl,Jk4_reassign_vec[[1]])
Jk5_reassign_vec<-Jk_reassign(infile_IV,Jk5_reassign,"IGKJ5")
infile_IV_crossampl=rbind(infile_IV_crossampl,Jk5_reassign_vec[[1]])
Jks_vec<-paste0("Jk",c(1,2,4,5),"_reassign_vec")
cat(paste0("Total: ", sum(sapply(Jks_vec,function(x) length(which(get(x)[[2]]))))),file = logfile, sep="\n")
infile_IV_combined<-rbind(infile_IV_exact,infile_IV_crossampl)
infile_IV_combined=infile_IV_combined[!duplicated(infile_IV_combined),]
}
individual_Jk_stats<-function(infile,targetJk){
v_genes<-unique(infile$None_v_gene_full)
block<-vector()
for(x in 1:length(v_genes)){
total_Jk_gene<-infile[which(infile$None_v_gene_full==v_genes[x]&infile$None_j_gene_gene==targetJk),]
Jk_total<-length(which(infile$None_j_gene_gene==targetJk))
total_Jk_gene_percent<-(nrow(total_Jk_gene)/Jk_total)*100
Jk_prod<-length(which(total_Jk_gene$None_prod=="yes"))
Jk_total_prod<-length(which(infile$None_j_gene_gene==targetJk&infile$None_prod=="yes"))
Jk_total_nonprod<-length(which(infile$None_j_gene_gene==targetJk&infile$None_prod=="no"))
Jk_prod_prod_percent<-(Jk_prod/Jk_total_prod)*100
Jk_prod_total_percent<-(Jk_prod/Jk_total)*100
Jk_nonprod<-length(which(total_Jk_gene$None_prod=="no"))
Jk_nonprod_nonprod_percent<-(Jk_nonprod/Jk_total_nonprod)*100
Jk_nonprod_total_percent<-(Jk_nonprod/Jk_total)*100
block=rbind(block,c(nrow(total_Jk_gene),total_Jk_gene_percent,Jk_prod,Jk_prod_prod_percent,Jk_prod_total_percent,Jk_nonprod,Jk_nonprod_nonprod_percent,Jk_nonprod_total_percent))
}
colnames(block)<-c(targetJk,paste0(targetJk," %"),paste0(targetJk,"_prod"),paste0(targetJk,"_prod/total_",targetJk,"_prod"),paste0(targetJk,"_prod/total_",targetJk),paste0(targetJk,"_nonprod"),paste0(targetJk,"_nonprod/total_",targetJk,"_nonprod"),paste0(targetJk,"_nonprod/total_",targetJk))
rownames(block)<-v_genes
v_genes_ord<-sapply(v_genes,function(x) as.numeric(strsplit(x,"[-*]")[[1]][2]))
block=block[order(v_genes_ord),]
block
}
infile_IV_simple<-infile_IV[,c("None_v_gene_full","None_v_gene_gene","None_j_gene_full","None_j_gene_gene","None_seq_id","None_chain","None_cdr3_nt","None_cdr3_len","None_nt_identity_v","None_germ_alignments_nt_var_query","None_prod","None_raw_input")]
if(input_type=="RNA"){
Jk_individual_stats_tab<-do.call("cbind",lapply(c("IGKJ1","IGKJ2","IGKJ4","IGKJ5"),function(x) individual_Jk_stats(infile_IV,x)))
write.xlsx(Jk_individual_stats_tab,file=paste0(sub(".csv","",input_file),"_dedup_",dedup,"_output_stats_individual_Jks.xlsx"))
write.xlsx2(infile_IV_simple,file=paste0(sub(".csv","",input_file),"_dedup_",dedup,"_Abstar_input_simplified.xlsx"))
}else if(input_type=="gDNA"){
Jk_stats_exact_tab<-do.call("cbind",lapply(c("IGKJ1","IGKJ2","IGKJ4","IGKJ5"),function(x) individual_Jk_stats(infile_IV_exact,x)))
write.xlsx(Jk_stats_exact_tab,file=paste0(sub(".csv","",input_file),"_dedup_",dedup,"_output_stats_individual_Jks_exact.xlsx"))
Jk_stats_combined_tab<-do.call("cbind",lapply(c("IGKJ1","IGKJ2","IGKJ4","IGKJ5"),function(x) individual_Jk_stats(infile_IV_combined,x)))
write.xlsx(Jk_stats_combined_tab,file=paste0(sub(".csv","",input_file),"_dedup_",dedup,"_output_stats_individual_Jks_combined.xlsx"))
exact_match<-sapply(infile_IV$None_seq_id,function(x) ifelse(x%in%infile_IV_exact$None_seq_id,"yes","no"))
crossampl_match<-sapply(infile_IV$None_seq_id,function(x) ifelse(x%in%infile_IV_crossampl$None_seq_id,"yes","no"))
exact_or_crossampl_match<-sapply(infile_IV$None_seq_id,function(x) ifelse(x%in%infile_IV_exact$None_seq_id|x%in%infile_IV_crossampl$None_seq_id,"yes","no"))
infile_IV_simple=cbind(infile_IV_simple,exact_match,crossampl_match,exact_or_crossampl_match)
write.xlsx2(infile_IV_simple,file=paste0(sub(".csv","",input_file),"_dedup_",dedup,"_Abstar_input_simplified.xlsx"))
}
close(logfile)
}
HGTS_processing(opt$input_file,opt$input_type,opt$dedup)
|
7c234bee58685eb27bcb6c6adadc43219714a6f9 | 3b573c294b0edca37dfc78dc836056132bf3cced | /tests/testthat/test_trinROC.R | b6e694cd188d5daa61044e007cfb0b8afe1b596e | [] | no_license | cran/trinROC | 86511c3cb3b640da01979f9fee58dc5fa7dd2e67 | a3c96fffe4ef2bf8b37fef7168bd3307944b7095 | refs/heads/master | 2022-11-10T03:44:34.633497 | 2022-10-27T22:40:02 | 2022-10-27T22:40:02 | 147,187,664 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,806 | r | test_trinROC.R | ###############################################################################
## test.R ---
## Author : Samuel Noll
## Document created: 21/12/2017
## Last modified : ././2018 Descripiton
## : RF: 26/05/2019 adapt seed change
## : AC: 20/10/2022 adapt SD() and VAR() changes
###############################################################################
##Purpose: Test the trinROC package.
seedvalues <- if (R.version$minor<6 & R.version$major<4) c(.24,.537,1.163) else c(.25,.534,1.145)
# In this file we test all functions of trinROC.
# Each function is embedded in one context().
# Order is alphabetically
context("Test boot.test, trinVUS.test and trinROC.test:")
# generate test data:
n<-5
set.seed(1)
x1 <-rnorm(n)
y1 <-rnorm(n, mean=.3)
z1 <-rnorm(n, mean=.6)
x2<-rnorm(n,mean=4,sd=1)
y2<-rnorm(n, mean=5,sd=1.5)
z2<-rnorm(n, mean=6,sd=2)
classes <- gl(3,n, labels = c("D-","D0","D+"))
# combine sampled data with cp (chance plane) and cs (complete separation) data:
dat <- data.frame(group=classes, FAC1=c(x1,y1,z1), FAC2=c(x2,y2,z2),
cp = as.numeric(c(1:5,1:5,1:5)),
cs = as.numeric(c(1:5,6:10,11:15)) )
test_that("boot.test() function", {
# test perfect discrimination (cs):
temp <- boot.test(dat = dat[,c(1,5)])
expect_equal(temp$p.value, 0)
# here emp.vus is tested:
expect_equal(unname(temp$estimate[1]), 1)
# test uniformative case (cp):
temp <- boot.test(dat = dat[,c(1,4)])
expect_equal(temp$p.value, 1)
expect_equal(unname(round(temp$estimate[1],3)), 0.167)
# test sampled data, single assessment:
temp <- boot.test(dat = dat[,1:2])
expect_equal(round(temp$p.value,2), seedvalues[1])
expect_equal(unname(round(temp$estimate[1],3)), 0.368)
# test sampled data, comparison of markers:
temp <- boot.test(dat = dat[,1:3])
expect_equal(round(temp$p.value,2), 0.59)
expect_equal(unname(round(temp$statistic,3)), seedvalues[2])
})
test_that("trinVUS.test() function", {
# test perfect discrimination (cs):
temp <- trinVUS.test(dat = dat[,c(1,5)])
expect_equal(round(temp$p.value,3), 0)
expect_equal(unname(round(temp$estimate[1],3)), 0.988)
# test uniformative case (cp):
temp <- trinVUS.test(dat = dat[,c(1,4)])
expect_equal(temp$p.value, 1)
expect_equal(unname(round(temp$estimate[1],3)), 0.167)
# test sampled data, single assessment:
temp <- trinVUS.test(dat = dat[,1:2])
expect_equal(round(temp$p.value,2), 0.3)
expect_equal(unname(round(temp$estimate[1],3)), 0.306)
# test sampled data, comparison of markers:
temp <- trinVUS.test(dat = dat[,1:3])
expect_equal(round(temp$p.value,2), 0.66)
expect_equal(unname(round(temp$statistic,3)), 0.445)
})
test_that("trinROC.test() function", {
# test perfect discrimination (cs):
temp <- trinROC.test(dat = dat[,c(1,5)])
expect_equal(round(temp$p.value,3), 0)
expect_equal(unname(round(temp$statistic,3)), 24.194)
# test uniformative case (cp):
temp <- trinROC.test(dat = dat[,c(1,4)])
expect_equal(temp$p.value, 1)
expect_equal(unname(round(temp$statistic,3)), 0)
# test sampled data, single assessment:
temp <- trinROC.test(dat = dat[,1:2])
expect_equal(round(temp$p.value,2), 0.07)
# check arbitrarily parameter A:
expect_equal(as.numeric(round(temp$estimate[2],3)), 0.696)
# test sampled data, comparison of markers:
temp <- trinROC.test(dat = dat[,1:3])
expect_equal(round(temp$p.value,2), 0.44)
# check arbitrarily parameter D2:
expect_equal(round(temp$estimate[2,5],3), 0.144)
})
context("Test EDA functions: roc.eda(), rocsurf.emp(), rocsurf.trin():")
test_that("roc.eda(), whole functionality", {
temp <- roc.eda(dat = dat[,1:2], plotVUS = FALSE)
expect_equal(unname(round(temp$statistic,3)), seedvalues[3])
expect_equal(unname(temp$VUS), 0.368)
expect_equal(unname(round(temp$dat.summary[1,3],3)), 0.86)
temp <- roc.eda(dat = dat[,1:2], type = "t", plotVUS = FALSE)
expect_equal(unname(round(temp$statistic,3)), c(8.690,1.041))
expect_equal(unname(round(temp$VUS,3)), 0.306)
expect_equal(unname(round(temp$dat.summary[1,3],3)), 0.86)
})
test_that("rocsurf.emp(), whole functionality", {
temp <- rocsurf.emp(x1, y1, z1, plot=FALSE)
expect_equal(temp$zVUS[1,], c(0,0,0,0,.2,.2,.4,.4,.4,.6,.8,.8,1,1,1,1))
temp <- rocsurf.emp(1:5, 1:5, 1:5, plot=FALSE)
expect_equal(temp$zVUS[1,], c(0,.2,.4,.6,.8,1))
})
test_that("rocsurf.trin(), whole functionality", {
temp <- rocsurf.trin(x1,y1,z1, plot=FALSE)
expect_equal(round(temp$zVUS[1,133],3), 0.904)
})
context("Test supplementory functions: emp.vus(), findmu(), boxcoxROC():")
test_that("emp.vus(), whole functionality", {
expect_true(suppressWarnings(is.na(emp.vus("a","v","c"))))
expect_error(emp.vus())
expect_error(emp.vus(x, y))
# generate test data:
n<-100
set.seed(1)
x<-rnorm(n); y<-rnorm(n, 0.4); z<-rnorm(n, .8)
classes1 <- gl(3,n, labels = c("D-","D0","D+"))
classes2 <- gl(3,n, labels = c("c","a","b"))
dat1 <- data.frame(group=classes1, FAC1=c(x,y,z))
dat2 <- data.frame(group=classes2, FAC1=c(x,y,z))
# compare output:
expect_equal(emp.vus(x,y,z), emp.vus(dat = dat1))
expect_equal(emp.vus(x,y,z), emp.vus(dat = dat2))
expect_equal(emp.vus(x,y,z), emp.vus(1,1,1, dat = dat1))
expect_error(emp.vus(x, y))
})
test_that("boxcoxROC(), whole functionality", {
temp <- boxcoxROC(x1,y1,z1, lambda2 = abs(min(c(x1,y1,z1)))+6,
lambda = seq(-2.01, 2.01, 0.02), eps = 0.03, verbose = F)
expect_equal(round(temp$xbc[1],3), 19,769)
expect_equal(round(temp$lambda,3), 1.87)
})
test_that("findmu(), whole functionality", {
temp <- findmu(mux = 2, sdx = 1, sdy = 2, sdz = 4, VUS = 0.5)
expect_equal(round(temp$Coeff,3), c(2,3.212,6.424, 0.5))
})
|
4e10a2000de2266531e1df688f2bd3363a48d6b9 | 5f3c70f720b234b2239c6bc5c9690767ec604df0 | /MarousisA_Inclass2_Part1.R | 784ed919409def85c0de82aa623e76fd825cf561 | [] | no_license | am8739/CMDA | efdd75da4dc32a7f89ac1cc235018a73dad0d903 | d22979143ab72a0eb081baf44489de4a214612d2 | refs/heads/master | 2021-01-02T22:51:45.822861 | 2014-09-15T15:15:51 | 2014-09-15T15:15:51 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 649 | r | MarousisA_Inclass2_Part1.R | #Lecture 7
getwd()
setwd('C:\Users\amandamarousis\CMDA')
#The health insurance customer data
load('exampleData.rData')
#Examine data
names(custdata)
dim(custdata)
class(custdata)
#Summary statistics
summary(custdata) #for the entire data frame
#look at individual variables to spot problems
summary(custdata$is.employed)
#
summary(custdata$income)
#There are 328 missing values, which could point to an error because it is extreme
#A minimum income of zero might not be useful in the data
summary(custdata$age)
#The minimum is fairly useless to our analysis.
#The Maximum is an outlier that seems to be an entry error so we should delete it.
|
0b4be6857495b00eaa42f64d63b37e66b906716a | c9208d8e7a16364ef4a811bb16a53fa1ce27634f | /cachematrix.R | dc2215f145141ed16caf43114a69a726805e32ec | [] | no_license | jd2g/ProgrammingAssignment2 | adfb6d8ffd60a85d54995642174e4e48309aca70 | 4f888d2e5c8310ed86d8f8284bcfcd6c31a36a1e | refs/heads/master | 2021-01-22T11:29:09.383352 | 2015-05-24T23:10:35 | 2015-05-24T23:10:35 | 35,384,545 | 0 | 0 | null | 2015-05-10T19:20:21 | 2015-05-10T19:20:21 | null | UTF-8 | R | false | false | 1,563 | r | cachematrix.R | ## This code is for Coursera Data Science Specialization 2015.
## It is based in the Example: "Caching the Mean of a Vector"
## provided in the readme to introduce the assign operator `<<-`
## Contains a pair of functions to compute the inverse of a matrix
## To get the result the function includes a cache to store the value
## to speed calculations.
## This function creates a special "matrix" object
## that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
##Set the variable to store the value of the inverse value of a matrix
mInv <- NULL
## The Set and Get methods to create the matrix in the environment
set <- function(y) {
x <<- y
mInv <<- NULL
}
get <- function() x
setinverse <- function(mInverse) mInv <<- mInverse
getinverse <- function() mInv
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
## This function return the inverse of a matrix
## If the result is in cache, returns its value so we can return the result faster
## If the result is not in the cache, compute its value
cacheSolve <- function(x, ...) {
## Get the inverse of the matrix 'x'
mInv <- x$getinverse()
if(!is.null(mInv)) {
message("Getting cached data...")
## REturn the stored value instead of the cumputed value.
return(mInv)
}
#Get the Inverse of the Matrix
m <- x$get()
#Solve the Inverse of The Matrix m
mInv <- solve(m, ...)
#Set the Inverse of the Matrix
x$setinverse(mInv)
#Return the Inverse of the matrix m
return(mInv)
}
|
601030efaccad3bbabdacfbb79c1d8bdfce5d38e | 9517d726128e5a65541ffb253225e50b0ba14a46 | /fibbonaci.r | a77f56ad6e46bb7e28fc9f735729a2c6645a24d6 | [] | no_license | KiltzX/r-repository | 8095b39b79d021fd1b0651fa41fef00996f309e9 | 0af8c336f2275c278718ffb57a3e07298bd7079c | refs/heads/master | 2021-05-22T00:03:31.286623 | 2020-04-04T01:16:17 | 2020-04-04T01:16:17 | 252,872,649 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 118 | r | fibbonaci.r | #Fibbonaci
n1 <- 0
n2 <- 1
n3 <- 0
while (n3 <= 200){
n3 <- n2 + n1
n1 <- n2
n2 <- n3
print(n3)
} |
1346710eefcd9b1fc489d2af303d21d2f92b2daf | 1b149e5703bd6acff71f52ef5a880ee8461c7bbc | /Combine_MotifEvalue_with_Mismatch.R | 4a3f5fc37b93a0324e36ea6fca0571e355e6c686 | [] | no_license | Tlicknack/Paramecium_Motif | 8e02f192c8e25545137623d02e2f49eac8af98c8 | 20594156b15b43d2c5573a7f505adc6a424814f1 | refs/heads/master | 2020-03-14T19:18:57.666174 | 2018-11-26T21:52:53 | 2018-11-26T21:52:53 | 131,758,834 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,116 | r | Combine_MotifEvalue_with_Mismatch.R | #DONE
#This script will take a csv which has motif, evalue, wgd_family and another table attached to it that has file and mismatch per site... can change to include two separate tables
#It will combine them on the basis of the file from which the motif/evalue and mismatch count came
mismatch_table = read.csv("All_Aurelia_12ntMotif_MismatchPerSite.csv", sep=",", header=T)
motif_table = read.csv("All_Aurelias_12nt_Regexs_Evalues.csv", sep=",", header=T)
wgd_families = as.character(unique(motif_table$File))
final_df = data.frame(matrix(ncol = 4))
colnames(final_df) = c("Regex", "E_Value", "File", "rep_mismatch")
for(family in wgd_families){
family_df = motif_table[which(motif_table$File == family),]
file_df = mismatch_table[which(mismatch_table$File == family),]
if(nrow(file_df) > 0){
rep_mismatch = rep(file_df$Mismatch.Site, times=nrow(family_df))
new_df = as.data.frame(cbind(family_df, rep_mismatch))
final_df = as.data.frame(rbind(final_df, new_df))
}
}
final_df = final_df[-1,]
write.csv(final_df, file="All_Aurelias_12nt_Motif_Evalue_Mismatch.csv", sep=",", row.names=F)
|
55f13d79706e753fb186411503cebad7a22f1619 | 63caf4d9e0f4b9c9cb5ab101f5795a94f27d575d | /man/landsatAdd.Rd | 9bd6ef43890fe80d2e8c3cc24366d14bc311bfb0 | [] | no_license | marie-geissler/oce | b2e596c29050c5e2076d02730adfc0c4f4b07bb4 | 2206aaef7c750d6c193b9c6d6b171a1bdec4f93d | refs/heads/develop | 2021-01-17T20:13:33.429798 | 2015-12-24T15:38:23 | 2015-12-24T15:38:23 | 48,561,769 | 1 | 0 | null | 2015-12-25T01:36:30 | 2015-12-25T01:36:30 | null | UTF-8 | R | false | false | 1,779 | rd | landsatAdd.Rd | % vim:textwidth=80:expandtab:shiftwidth=2:softtabstop=2
\name{landsatAdd}
\alias{landsatAdd}
\title{Add a band to a landsat image}
\description{Add a band to a landsat image}
\usage{landsatAdd(x, data, name, debug=getOption("oceDebug"))}
\arguments{
\item{x}{A \code{landsat} object, e.g. as read by \code{\link{read.landsat}}.}
\item{data}{A matrix of data, with dimensions matching that of entries already in \code{x}.}
\item{name}{Name to be used for the data.}
\item{debug}{a flag that turns on debugging. Set to 1 to get a moderate amount of debugging
information, or a higher value for more debugging.}
}
\details{This may be used to add new ``bands'' to a landsat object. These are
stored differently than the real bands. Real bands are stored in one or two
byte (raw) matrices (see \code{\link{landsat-class}}) but added bands are
conventional numerical matrices. See examples.}
\value{An object of \code{\link[base]{class}} \code{"landsat"}, with a new
``band''.}
\seealso{The documentation for \code{\link{landsat-class}} explains the
structure of landsat objects, and also outlines the other functions dealing
with them.}
\examples{
\dontrun{
library(oce)
## Winter-time Nova Scotia and surrounding seas
ns <- read.landsat("LC80080292014065LGN00", band="tirs1")
tirs1 <- ns[["tirs1"]]
ML <- ns@metadata$header$radiance_mult_band_10
AL <- ns@metadata$header$radiance_add_band_10
K1 <- ns@metadata$header$k1_constant_band_10
K2 <- ns@metadata$header$k2_constant_band_10
Llambda <- ML * tirs1 + AL
d <- K2 / log(K1 / Llambda + 1)
temperature <- d - 273.15
temperature[tirs1 == 0] <- NA
ns <- landsatAdd(ns, temperature, "temperature")
plot(ns, band="temperature")
title(ns[['time']])
}
}
\author{Dan Kelley}
\keyword{misc}
|
630ef80568712323826bfff3c77244f2ec947ea5 | 78dba4fac357dfd416a339b711452fecdc30cdd3 | /R/getMethylInfo.R | 744a19fe787b33d24ea3c144d53d013777cca73b | [] | no_license | pterzian/dmprocr | 55aadaa7db38ddaf065e6d5aa60499a3b5f9fd64 | 6ddfd34738b821f9df49f5aacbf053a9ca72890d | refs/heads/master | 2020-06-20T05:49:41.720514 | 2017-07-25T23:08:30 | 2017-07-25T23:08:30 | 94,194,039 | 0 | 0 | null | 2017-06-13T09:15:30 | 2017-06-13T09:15:30 | null | UTF-8 | R | false | false | 1,648 | r | getMethylInfo.R | #'getMethylInfo
#'
#'Return a subset of diff_meth_study list for a given gene bedline and a size of window
#'
#'@param diff_meth_study list composed of a methylation differential table, an exp_grp dataframe and a platform
#'@param bedline is a line of of the bedfile dataframe
#'@param win is the width of the window on the chromosome in bp where the function will fetch probes position and differential methylation value. Default is 5000 bp
#'@param pf_chr_colname string matching the name of the column in the platform that contain the chromosome on which we find a probes
#'@param pf_pos_colname string matching the name of the column in the platform that contain the position information of probes
#'
#'@example examples/example-dmRandomDataset.R
#'@example examples/example-dmTable.R
#'@example examples/example-getMethylInfo.R
#'
#'@export
getMethylInfo <- function(diff_meth_study, bedline, win = 5000, pf_chr_colname="Chromosome", pf_pos_colname="Start") {
strand <- bedline[[6]]
if(strand == "-"){
txstart <- as.numeric(bedline[[3]])
} else{
txstart <- as.numeric(bedline[[2]])
}
chrom <- bedline[[1]]
idx = diff_meth_study$platform[[pf_chr_colname]] == chrom & diff_meth_study$platform[[pf_pos_colname]] < txstart + 5000 & diff_meth_study$platform[[pf_pos_colname]] > txstart - 5000
probes = rownames(diff_meth_study$platform)[idx]
data <- diff_meth_study$data[idx, ]
data <- data[, !colSums(is.na(data)) > 0]
gene_study_info <- list(data = data, probes = diff_meth_study$platform[probes,], promoterPos = txstart, strand = strand, id = bedline[[4]])
return(gene_study_info)
} |
686bd05e9cfb98c14dbef95c6ffd9afb5abb5260 | 617a2b428dbee689943fbeb0d7058592666dc33b | /keyUtilities.R | f5896c09dbad49d0f099f9bcc90240cea0ec5fd8 | [] | no_license | mksaraf/finance-stockStrategy | b4c9aacbc562e3de01e530dc5500d11dbeddc722 | 6cb09e8a08bf62256eb5e0d2de144fb70fc0c840 | refs/heads/master | 2021-09-27T14:26:47.496263 | 2018-11-09T04:03:16 | 2018-11-09T04:03:16 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,430 | r | keyUtilities.R | library(XML)
stocks <- c("AXP","BA",'aapl','bb','cat','cost')
getKeyStats <- function(stk){
stk = toupper(stk)
url <- paste0("http://finviz.com/quote.ashx?t=", stk)
webpage <- readLines(url)
html <- htmlTreeParse(webpage, useInternalNodes = TRUE, asText = TRUE)
tableNodes <- getNodeSet(html, "//table")
# ASSIGN TO STOCK NAMED DFS
dt = data.table(readHTMLTable(tableNodes[[9]], header= c("data1", "data2", "data3", "data4", "data5", "data6",
"data7", "data8", "data9", "data10", "data11", "data12")))
dt = rbind(dt[,.(data1,data2)],dt[,.(data3,data4)],dt[,.(data5,data6)],dt[,.(data7,data8)],dt[,.(data9,data10)],dt[,.(data11,data12)],use.names=F )
dt = cbind(stk,dt)
colnames(dt)<- c('stk','key','value')
dt = dcast(dt,stk~key,value.var = 'value',fun=I,fill = NA)
return(dt)
}
slist = lapply(stocks,function(s) getKeyStats(s))
rbindlist(slist)
## Update Optionable Stocks
library(data.table)
library(odbc)
uss = DBI::dbReadTable(dbConnect(odbc::odbc(), "USStocks"),'USStocks')
optionable = rbindlist(sapply(uss$symbol,function(s) {data.table(nrow(getOptions(s))) }),idcol = T) ## to refresh optionalble
load(file = 'symbols.liq.rdata') ## Alternate
lapply(optionable[V1>30]$.id, function(x) {up = paste0("Update USStocks Set Options=1 where symbol ='",x,"'"); DBI::dbExecute(con, up )})
## END
|
297faa701afbbe665c95df1a5c1b8194fb2c9cb2 | 1cde4de03b724bc76fbc5d9baec0707088e70ac6 | /Notes/Slides/03_GettingData/Code/03_05_mergingData.R | f43e9e96bb429476fa6ad0b85a3c24be9a0665fc | [] | no_license | k173man/R | d4f937389e17dcacc29e6f70df7e54e1f2b54ccd | 893cc65b126c3289309c3d31762a6c34f5e49e2c | refs/heads/master | 2021-01-10T20:32:33.620837 | 2017-07-27T03:57:16 | 2017-07-27T03:57:16 | 31,445,963 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,008 | r | 03_05_mergingData.R | if(!file.exists("./data")) {
dir.create("./data")
}
fileUrl1 = "https://dl.dropboxusercontent.com/u/7710864/data/reviews-apr29.csv"
fileUrl2 = "https://dl.dropboxusercontent.com/u/7710864/data/solutions-apr29.csv"
download.file(fileUrl1,destfile="./data/reviews.csv",method="curl")
download.file(fileUrl2,destfile="./data/solutions.csv",method="curl")
reviews = read.csv("./data/reviews.csv")
solutions <- read.csv("./data/solutions.csv")
head(reviews,2)
head(solutions,2)
names(reviews)
names(solutions)
mergedData = merge(reviews,solutions,by.x="solution_id",by.y="id",all=TRUE)
head(mergedData)
intersect(names(solutions),names(reviews))
mergedData2 = merge(reviews,solutions,all=TRUE)
head(mergedData2)
df1 = data.frame(id=sample(1:10),x=rnorm(10))
df2 = data.frame(id=sample(1:10),y=rnorm(10))
arrange(join(df1,df2),id)
df1 = data.frame(id=sample(1:10),x=rnorm(10))
df2 = data.frame(id=sample(1:10),y=rnorm(10))
df3 = data.frame(id=sample(1:10),z=rnorm(10))
dfList = list(df1,df2,df3)
join_all(dfList)
|
d6b6a40e508b5055713a67d80262372560412a79 | d1f82b18c7a6214872083d62f468fc95c34e4aab | /diss2/scripts/woodPrice.R | a4c29cb57d1faa401bd0b9180eb11a891fe92167 | [] | no_license | sbannist/publish | c3b40ee86ef4bcf24547f954d841e62f40867b8d | 8654b5ddfe3719998ac858399f8c48b59cfee229 | refs/heads/master | 2020-04-19T10:38:05.729098 | 2015-09-21T01:43:08 | 2015-09-21T01:43:08 | 9,862,659 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,048 | r | woodPrice.R | #woodPrice
#wood = read.csv('../data/woodPrice.csv')
filepath <- c('../data/')
imagepath <- c('../images/')
wood <- read.csv(paste(filepath,'woodPrice.csv',sep=''),header=TRUE,stringsAsFactors=FALSE)
require(ggplot2)
ggplot(data=wood)+
geom_line(aes(x=midPoint,y=price,colour='blue'),size=1)+
# geom_point(aes(x=midPoint,y=price),shape=2,colour='blue',size=4)+
geom_line(aes(x=midPoint,y=wood,colour='red'),size=1)+
# geom_point(aes(x=midPoint,y=wood),shape=8,colour='red',size=4)+
scale_colour_manual(name='Price index',values=c('blue','red'),
# breaks=c('price','wood'),
labels=c('General prices','Wood prices'),
guide = guide_legend(reverse=TRUE))+
# scale_shape_manual(values=c(2,8))+
# scale_shape_identity()+
geom_point(aes(x=midPoint,y=price),colour='blue',shape=2,size=4)+
# geom_point(aes(shape=factor(price)))+
# scale_shape(solid=FALSE)+
geom_point(aes(x=midPoint,y=wood),colour='red',shape=8,size=4)+
theme(legend.position=c(0.1,0.9),legend.background = element_rect(fill="gray90"))+
labs(title='English general and wood price indices, 1451 - 1702,\n 1451 - 1500 = 100',
y='Price indices, 1451-1500 = 100',x='Year')
ggsave(file=paste(imagepath,'woodPrice.png',sep='')) #to save file just printed, so change file name as appropriate
ggplot(data=wood)+
# geom_line(aes(x=midPoint,y=price,colour='blue'),size=1)+
geom_point(aes(x=midPoint,y=price,colour='blue'),shape=2,size=4)+
# geom_line(aes(x=midPoint,y=wood,colour='red'),size=1)+
geom_point(aes(x=midPoint,y=wood,colour='red'),shape=8,size=4)+
scale_colour_manual(name='Price index',values=c('blue','red'),
# breaks=c('price','wood'),
labels=c('General prices','Wood prices'),
guide = guide_legend(reverse=TRUE))+
# scale_shape_manual(values=c('blue','red'))+
theme(legend.position=c(0.1,0.9),legend.background = element_rect(fill="gray90"))+
labs(title='English general and wood price indices, 1451 - 1702,\n 1451 - 1500 = 100',
y='Price indices',x='Year')
|
c50bb56469124da75a3c692c0e1d4209dd650c68 | 2251dce56bd92b89ba4651baffccffdcd6a6f06c | /man/Logrank.stat.tie.Rd | 1611634e67237c1cf494625913983c2f42c0b444 | [] | no_license | cran/depend.truncation | c6d8bc5aa98f6fc0647cf18ebf42e95c2a7cd5da | ede799f2324e4ce0147b3b50456b97e67bf9caa6 | refs/heads/master | 2021-05-04T09:34:01.450749 | 2018-02-27T11:43:41 | 2018-02-27T11:43:41 | 17,695,460 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,931 | rd | Logrank.stat.tie.Rd | \name{Logrank.stat.tie}
\alias{Logrank.stat.tie}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
The weighted log-rank statistics for testing quasi-independence (with ties in data)
}
\description{
The three log-rank statistics (L_0, L_1, and L_log) corresponding to 3 different weights.
}
\usage{
Logrank.stat.tie(x.trunc, z.trunc, d)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x.trunc}{vector of variables satisfying x.trunc<=z.trunc}
\item{z.trunc}{vector of variables satisfying x.trunc<=z.trunc}
\item{d}{censoring indicator(0=censoring,1=failure) for z.trunc}
}
\details{
If there is no tie in the data, the function "Logrank.stat.tie" and "Logrank.stat" give identical results.
However, "Logrank.stat" is computationally more efficient. The simulations of Emura & Wang (2010) are
based on "Logrank.stat" since simulated data are generated from continuous distributions. The real data analyses
of Emura & Wang (2010) are based on "Logrank.stat.tie" since there are many ties in the data.
}
\value{
\item{L0}{Logrank statistics (most powerfull to detect the Clayton copula type dependence)}
\item{L1}{Logrank statistics (most powerfull to detect the Frank copula type dependence)}
\item{Llog}{Logrank statistics (most powerfull to detect the Gumbel copula type dependence)}
}
\references{
Emura T, Wang W (2010) Testing quasi-independence for truncation data. Journal of Multivariate Analysis 101, 223-239
}
\author{Takeshi Emura}
\examples{
x.trunc=c(10,5,7,1,3,9)
z.trunc=c(12,11,8,6,4,13)
d=c(1,1,1,1,0,1)
Logrank.stat.tie(x.trunc,z.trunc,d)
Logrank.stat(x.trunc,z.trunc,d) ## since there is no tie, the results are the same.
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ Copula }
\keyword{ Quasi-independence test }% __ONLY ONE__ keyword per line
|
4e1f9d069d8f410178166163ee2a5eed9ad089fc | 4eb8d68d4dc3bf23ccdae7cc0b93082570321434 | /pkg/man/binned_tabulate.Rd | 61909299971625de95c9a7418fe35a58c9a02b74 | [] | no_license | edwindj/ffbase | 5d034d8d1ec65e94e7f4feec3da81e20841aa405 | 98236ab7501fc9765741300879f80baddfe991a3 | refs/heads/master | 2023-07-08T22:39:38.448615 | 2023-06-21T07:59:40 | 2023-06-21T07:59:40 | 8,315,596 | 26 | 8 | null | 2023-06-21T07:59:42 | 2013-02-20T15:15:55 | R | UTF-8 | R | false | true | 1,271 | rd | binned_tabulate.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/binned_tabulate.R
\name{binned_tabulate}
\alias{binned_tabulate}
\alias{binned_tabulate.default}
\alias{binned_tabulate.ff}
\title{Fast tabulating in different bins}
\usage{
binned_tabulate(x, bin, nbins = max(bin), nlevels = nlevels(x), ...)
\method{binned_tabulate}{default}(x, bin, nbins = max(bin), nlevels = nlevels(x), ...)
\method{binned_tabulate}{ff}(x, bin, nbins = max(bin), nlevels = nlevels(x), ...)
}
\arguments{
\item{x}{\code{factor} or \code{integer} vector with the data to be tabulated}
\item{bin}{\code{integer} vector with the bin number for each data point}
\item{nbins}{\code{integer} maximum bin number}
\item{nlevels}{\code{integer} number of levels used in x}
\item{...}{used by binned_tabulate.ff}
}
\value{
\code{numeric} matrix where each row is a bin and each column a level
}
\description{
\code{binned_sum} implements fast tabulating for given bins by calling c-code.
It also returns the number of NA's per bin.
Please note that incorrect use of this function may crash your R-session.
the values of \code{bins} must be between \code{1} and \code{nbins} and may not contain \code{NA}.
The values of \code{x} must be between \code{1} and \code{nlevels}.
}
|
2e85cf41d42c8e0ad5c98005461d644993a7184b | d00d7eb6a2a1a784500dc61ec39fac3b9fe3dc0c | /data.R | 168ab643f822d20e659b554419a1f33ae196d970 | [] | no_license | 91Mrwu/SpectralImputation | 4a052a8f369364f4f34f37de2a008089a1d5ccfe | 62bd7930c28caef4b677072b654e39b497c2d86e | refs/heads/master | 2023-04-22T09:45:55.078739 | 2020-08-08T21:00:12 | 2020-08-08T21:00:12 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,824 | r | data.R | ## data selection and testing, explore several datasets to choose one as an approporaite validation set
dat17 <- readRDS("../Downloads/OceanLocations17.rds") # one track all through 2017
dat177 <- readRDS("../Downloads/OceanLocations177_1.rds") # first part of July of 2017 in the region selected
dat <- readRDS("../2017July/17xiOrbits.rds") # first pc score in one track of 2017
dat <- readRDS("../2017July/177xiOrbits.rds") # first PC score in July of 2017 in selected region
## tracks on pacific next to China and Austrilia: select from multiple tracks of 2017 set
rad17 <- readRDS("../Downloads/OceanRadiance17.rds")
wvs17 <- readRDS("../Downloads/OceanWavelength17.rds")
for (obt in unique(dat17$orbit)) {
idx <- which(dat17$orbit == obt)
wvs <- wvs17[idx]
cat(obt, "\n")
#print(range(sapply(wvs, length)));
print(length(wvs))
}
## 14793, 16191, 17123 have three most locations
## tracks on pacific ocean and next to United States: select from 5 days in 2017
# 15985, 15970, 15942 have three most locations
## explore the dataset, if there can be appropriate points for imputation
sets <- c(14793, 15985, 17123)
for (s in sets) {
locs <- readRDS(paste0("./pacific_data/nasa_", s, "_locs.rds"))
locs <- subset(locs, mix == 0 & lat < 70) # only water pixels
# find center points where 8by8 grids around satisfies conditions: no track missing (if time period is more than 0.5)
# and no more than 2 points missing each track
# FPCA selected region around center point have at least 200 points
tks <- unique(locs$track)
cpts <- which(locs$ftprint==4)
valids <- NULL
for (idx in cpts) {
pcareg <- subset(locs, lat < locs$lat[idx]+0.25 & lat > locs$lat[idx]-0.25)
if (nrow(pcareg) < 164) { # 164 = 100+8*8
#cat("not enough points for doing FPCA", "\n")
next
}
# check no track is missing
tkidx <- which(tks == locs$track[idx])
candtk <- tks[(tkidx-4):(tkidx+3)]
candsec <- sapply(candtk, function(x) {
as.numeric(substr(x,1,2))*3600+as.numeric(substr(x,3,4))*60+as.numeric(substr(x,5,6))+as.numeric(substr(x,7,7))/10
})
if (any(diff(candsec) > 0.5)) {
cat("tracks are not continuous", "\n")
next
}
locsi <- subset(locs, track %in% candtk)
if (nrow(locsi) > 64) {
stop("something is wrong, check code!")
}
mincheck <- min(table(locsi$track)) < 6
maxcheck <- max(table(locsi$track)) < 8
if (mincheck || maxcheck) {
cat("too much missing in the 8by8 grid", "\n")
next
}
# if all above passed
valids <- rbind(valids, data.frame(idx = locs$id[idx], diff = max(diff(candsec)), num = nrow(locsi),
total = nrow(pcareg), stringsAsFactors = FALSE))
}
saveRDS(valids, file = paste0("./pacific_data/nasa_", s, "_validpoints.rds"))
}
|
e3b145d9096ba5ad3b37cf921eda90e7f0f667b4 | 0db279202a70363e9df4b55c7dc07f2e6119bb83 | /3_visualize.R | dba41766b0288a1bf44b18f73749434a902ca6ca | [] | no_license | AndyMcAliley/ds-pipelines-targets-2 | 9594185b94c189bbb49c2e0cd954ed4bfc474212 | 0ff4bfecfcbd2cfa88072a874c7a870f860e7747 | refs/heads/main | 2023-08-02T17:04:15.370762 | 2021-09-21T18:56:57 | 2021-09-21T18:56:57 | 406,921,657 | 0 | 0 | null | 2021-09-21T18:56:57 | 2021-09-15T20:53:07 | R | UTF-8 | R | false | false | 342 | r | 3_visualize.R | source("3_visualize/src/plot_timeseries.R")
p_width <- 12
p_height <- 7
p_units <- "in"
p3_targets_list <- list(
tar_target(
p3_figure_1_png,
plot_nwis_timeseries(fileout = "3_visualize/out/figure_1.png", p2_site_data_clean,
width = p_width, height = p_height, units = p_units),
format = "file"
)
)
|
e84b704e3e7b18ad8d2f2eb7430362915b472b51 | ca17e23492efd1a8cf1af9ad9d950c62d0364e94 | /R/colorplot.R | e083b426df5b047d1769eb9e99b5b4cc6763677e | [] | no_license | thibautjombart/adegenet | a8b7f535510687dbf29c81f6da96eeacb442344e | bb8b9e89674adf55993d7d5abc5995485b38b8c9 | refs/heads/master | 2023-02-07T11:36:19.650854 | 2023-01-28T00:12:40 | 2023-01-28T00:12:40 | 31,032,458 | 169 | 70 | null | 2023-01-28T00:12:41 | 2015-02-19T19:25:14 | R | UTF-8 | R | false | false | 1,790 | r | colorplot.R | ##
## COLOR PLOT
##
## used to plot up to 3 variables in space using RGB system
##
## all coded in S3 method (arguments vary largely)
##
##########
# generic
##########
colorplot <- function(...){
UseMethod("colorplot")
}
#################
# default method
#################
#' @export
colorplot.default <- function(xy, X, axes=NULL, add.plot=FALSE, defaultLevel=0, transp=FALSE, alpha=.5, ...){
## some checks
if(any(is.na(xy))) stop("NAs exist in xy")
xy <- as.matrix(xy)
if(!is.numeric(xy)) stop("xy is not numeric")
if(nrow(xy) != nrow(X)) stop("xy and X have different row numbers")
if(is.null(axes)) {
axes <- 1:min(ncol(X),3)
}
X <- as.matrix(X[,axes,drop=FALSE])
if(any(is.na(X))) stop("NAs exist in X")
if(!is.numeric(X)) stop("X is not numeric")
if(defaultLevel < 0 | defaultLevel>1) stop("defaultLevel must be between 0 and 1")
## function mapping x to [0,+inf[
f1 <- function(x){
if(any(x<0)) {
x <- x + abs(min(x))
}
return(x)
}
## apply f1 to X
X <- apply(X, 2, f1)
v1 <- X[,1]
if(ncol(X)>=2) {v2 <- X[,2]} else {v2 <- defaultLevel}
if(ncol(X)>=3) {v3 <- X[,3]} else {v3 <- defaultLevel}
## make colors
if(transp){
col <- rgb(v1/max(X), v2/max(X), v3/max(X), alpha)
} else {
col <- rgb(v1, v2, v3, maxColorValue=max(X))
}
## handle ...
listArgs <- list(...)
if(is.null(listArgs$pch)) {listArgs$pch <- 20}
## build list of arguments
listArgs$x <- xy
listArgs$col <- col
## plot data
if(!add.plot) {
do.call(plot,listArgs)
} else {
do.call(points,listArgs)
}
##return(invisible(match.call()))
return(invisible(col))
} # end colorplot.default
|
7837cccbf9b633abafce2f129b57d78ecd63db14 | 56fc93f3d1003af3c18e8e0c729a854412974439 | /man/TwoWayFEWeights.Rd | e8a981f3992bca0c9cf13a2f302d8dbe22e7d93a | [
"MIT"
] | permissive | econjoseph/twowayfeweights | 4942fb1022bd9dedfa1329f47a62a20c1f4ba852 | 9c0d42f19b55967e9d6c1166ec2929bbe18b8dd5 | refs/heads/main | 2023-05-05T11:35:33.401350 | 2021-05-16T07:53:35 | 2021-05-16T08:07:50 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,704 | rd | TwoWayFEWeights.Rd | \name{twowayfeweights}
\alias{twowayfeweights}
\title{Estimation of the weights attached to the two-way fixed effects regressions.}
\description{
Estimates the weights and measure of robustness to treatment effect heterogeneity attached to two-way fixed effects regressions.}
\details{
It estimates the weights attached to the two-way fixed effects regressions studied in de Chaisemartin & D'Haultfoeuille (2020a), as well as summary measures of these regressions' robustness to heterogeneous treatment effects.}
\usage{
twowayfeweights(df, Y, G, T, D, cmd_type, D0 = NULL, controls = c(), weights = NULL,
other_treatments = c(), test_random_weights = c())
}
\arguments{
\item{df}{ data frame or data matrix.}
\item{Y}{ the dependent variable in the regression. Y is the level of the outcome if one wants to estimate the weights attached to the fixed-effects regression, and Y is the first difference of the outcome if one wants to estimate the weights attached to the first-difference regression.}
\item{G}{ a variable identifying each group.}
\item{T}{ a variable identifying each period.}
\item{D}{ the treatment variable in the regression. D is the level of the treatment if one wants to estimate the weights attached to the fixed-effects regression, and D is the first difference of the treatment if one wants to estimate the weights attached to the first-difference regression.}
\item{cmd_type}{ a required option that can take four values: feTR, feS, fdTR, fdS.
If feTR is specified, the command estimates the weights and sensitivity measures attached to the fixed-effects regression under the common trends assumption. With feS, it estimates the weights and sensitivity measures attached to the fixed-effects regression under common trends and the assumption that groups' treatment effect does not change over time. With fdTR, it estimates the weights and sensitivity measures attached to the first-difference regression under the common trends assumption. Finally, with fdS it estimates the weights and sensitivity measures attached to the first-difference regression under common trends and the assumption that groups' treatment effect does not change over time.
}
\item{D0}{If \code{cmd_type} is specified in the option type below, then the command requires a fifth argument, D0. D0 is the mean of the treatment in group g and at period t. It should be non-missing at the first period when a group appears in the data (e.g. at t=1 for the groups that are in the data from the beginning), and for all observations for which the first-difference of the group-level mean outcome and treatment are non missing.}
\item{controls}{ a list of control variables that are included in the regression. Controls should not vary within each group*period cell, because the results in in de Chaisemartin & D'Haultfoeuille (2020a) apply to two-way fixed effects regressions with group*period level controls. If a control does vary within a group*period cell, the command will replace it by its average value within each group*period cell, default to \code{c()} if not specified.}
\item{weights}{ if the regression is weighted, the weight variable can be specified in weight. If type(fdTR) is specified, then the weight variable should be non-missing at the first period when a group appears in the data (e.g. at t=1 for the groups that are in the data from the beginning), and for all observations for which the first-difference of the group-level mean outcome and treatment are non missing.}
\item{other_treatments}{ a list of other treatment variables that are included in the regression. While the results in de Chaisemartin & D'Haultfoeuille (2020a) do not cover two-way fixed effects regressions with several treatments, those in de Chaisemartin & D'Haultfoeuille(2020b) do, so the command follows results from that second paper when other_treatments is specified. This option can only be used when type(feTR) is specified. When it is specified, the command reports the number and sum of positive and negative weights attached to the treatment, but it does not report the summary measures of the regression's robustness to heterogeneous treatment effects, as these summary measures are no longer applicable when the regression has several treatment variables. The command also reports the weights attached to the other treatments. The weights reported by the command are those in Corollary 1 in de Chaisemartin & D'Haultfoeuille (2020b). See de Chaisemartin & D'Haultfoeuille (2020b) for further details.}
\item{test_random_weights}{ weights when this option is specified, the command estimates the correlation between each variable in varlist and the weights. Testing if those correlations significantly differ from zero is a way to assess whether the weights are as good as randomly assigned to groups and time periods.}
}
\value{
\item{saved results}{ it saves the results in a dataframe containing 3 variables (Group, Time, Weight). This option allows the user to see the weight attached to each group*time cell. If the other_treatments option is specified, the weights attached to the other treatments are also saved.}
}
\section{FAQ}{
How can one interpret the summary measures of the regression's robustness to heterogeneous treatment effects?
When the two-way fixed effects regression has only one treatment variable, the command reports two summary measures of the robustness of the treatment coefficient beta to treatment heterogeneity across groups and over time. The first one is defined in point (i) of Corollary 1 in de Chaisemartin & D'Haultfoeuille (2020a). It corresponds to the minimal value of the standard deviation of the treatment effect across the treated groups and time periods under which beta and the average treatment effect on the treated (ATT) could be of opposite signs. When that number is large, this means that beta and the ATT can only be of opposite signs if there is a lot of treatment effect heterogeneity across groups and time periods. When that number is low, this means that beta and the ATT can be of opposite signs even if there is not a lot of treatment effect heterogeneity across groups and time periods. The second summary measure is defined in point (ii) of Corollary 1 in de Chaisemartin & D'Haultfoeuille (2020a). It corresponds to the minimal value of the standard deviation of the treatment effect across the treated groups and time periods under which beta could be of a different sign than the treatment effect in all the treated group and time periods.
How can I tell if the first summary measure is high or low?
Assume that the first summary measure is equal to x. How can you tell if x is a low or a high amount of treatment effect heterogeneity? This is not an easy question to answer, but here is one possibility. Let us assume that you find it a priori reasonable to assume that the treatment effect of every group and time period cannot be larger in absolute value than some real number B>0. If you are trying to assess beta's robustness to heterogeneous effects, beta presumably falls within your range of a priori plausible values for the treatment effect, so it seems fair to argue that B is at least as large as |beta|. Now let us also assume that the treatment effects of the treated groups and time periods are drawn from a uniform distribution. Then, to have that the mean of that distribution is 0 while its standard deviation is x, the treatment effects should be uniformly distributed on the [-sqrt(3)x,sqrt(3)x] interval. If |beta|>=sqrt(3)x, then uniformly distributed treatment effects with mean 0 and standard deviation x are compatible with your a priori plausible values for the treatment effect, so x may not be an implausibly high amount of treatment effect heterogeneity, and the ATT may be equal to 0. If on the other hand |beta|<sqrt(3)x, x may or may not be an implausibly high amount of treatment effect heterogeneity, depending on whether B<sqrt(3)x or B>=sqrt(3)x.
The previous reasoning relies on the assumption that treatment effects follow a uniform distribution. You may find it more reasonable to assume that they are, say, normally distributed. Then you can conduct the following, similar exercise. Let us assume that you find it a priori reasonable to assume that most, say 95\%, of the treatment effects are not larger in absolute value than some real number B>0. If the treatment effects of the treated groups and time periods are drawn from a mean 0 and standard deviation x normal distribution, then 95\% of them will fall within the [-1.96x,1.96x] interval. If B>=1.96x, N(0,x^2) distributed treatment effects don't seem incompatible with your prior, so x may not be an implausibly high amount of treatment effect heterogeneity.
How can I tell if the second summary measure is high or low?
Assume that the second summary measure is equal to x. Again, let us assume that you find it a priori reasonable to assume that the treatment effect of every group and time period cannot be larger in absolute value than some real number B>0. Again, it seems fair to argue that B is at least as large as |beta|. To fix ideas, let us assume that beta>0. Let us also assume that the treatment effects of the treated groups and time periods are drawn from a uniform distribution. Then, one could have that those effects are all negative, with a standard deviation equal to x, for instance if they are uniformly drawn from the [-2sqrt(3)x,0] interval. If |beta|>=2sqrt(3)x, then treatment effects distributed on the [-2sqrt(3)x,0] interval seem compatible with your a priori plausible range of values for the treatment effect, so x may not be an implausibly high amount of treatment effect heterogeneity. If on the other hand |beta|<2sqrt(3)x, x may or may not be an implausibly high amount of treatment effect heterogeneity, depending on whether B<2sqrt(3)x or B>=2sqrt(3)x. If the treatment effects of the treated groups and time periods are all negative, they cannot follow a normal distribution, so we do not discuss that possibility here.
}
\examples{
# using the same panel of workers as in Vella and Verbeek (1998)
library("wooldridge")
Y = "lwage"
G = "nr"
T = "year"
D = "union"
controls = c("hours")
twowayfeweights(wagepan, Y, G, T, D, cmd_type = "feTR", controls = controls)
}
\references{
de Chaisemartin, C and D'Haultfoeuille, X (2020a). American Economic Review, vol. 110, no. 9. Two-Way Fixed Effects Estimators with Heterogeneous Treatment Effects.
de Chaisemartin, C and D'Haultfoeuille, X (2020b). Two-way fixed effects regressions with several treatments.
}
|
555d24efa375f70fe973241df55208415d824e91 | 0a021f843670c168c4a212207c13be1b0a88ddbe | /man/add_bars.Rd | fc4b90b79060b19bba8e9ba5528310893a8e19cf | [] | no_license | cran/plotfunctions | ddc4dd741ad2a43d81deb0ef13fe2d7b37ca84bd | ebacdd83686e1a32a4432a35f244bf82015a19a5 | refs/heads/master | 2021-01-20T18:53:05.464513 | 2020-04-28T09:00:02 | 2020-04-28T09:00:02 | 59,847,744 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,781 | rd | add_bars.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{add_bars}
\alias{add_bars}
\title{Adding bars to an existing plot.}
\usage{
add_bars(x, y, y0 = NULL, width = 1, horiz = FALSE, ...)
}
\arguments{
\item{x}{Numeric vector with x-positions of bars.}
\item{y}{Numeric vector with height of the bars.}
\item{y0}{Optional numeric value or vector with the onset(s) of the bars.
When \\code{y0} is not specified, the lowest value of the y-axis is used.}
\item{width}{Numeric value, determining the width of the bars in units of
the x-axis.}
\item{horiz}{Logical value: whether or not to plot horizontal bars.
Defaults to FALSE.}
\item{...}{Other arguments for plotting, see
\\code{\\link[graphics]{par}}.}
}
\description{
Adding bars to an existing plot.
}
\examples{
# hypothetical experiment:
adults = stats::rpois(100, lambda = 5)
children = stats::rpois(100, lambda = 4)
newd <- data.frame(Adults = table( factor(adults, levels=0:15) ),
Children = table( factor(children, levels=0:15) ) )
newd <- newd[,c(1,2,4)]
names(newd)[1] <- 'value'
# barplot of Adults:
b <- barplot(newd$Adults.Freq, beside=TRUE, names.arg=newd$value,
border=NA, ylim=c(0,30))
# overlay Children measures:
add_bars(b, newd$Children.Freq, col='red', density=25, xpd=TRUE)
# variants:
b <- barplot(newd$Adults.Freq, beside=TRUE, names.arg=newd$value,
border=NA, ylim=c(0,30))
add_bars(b+.1, newd$Children.Freq, width=.85, col=alpha('red'),
border=NA, xpd=TRUE)
emptyPlot(c(-30,30), c(0,15), v0=0, ylab='Condition')
add_bars(-1*newd$Children.Freq, 0:15, y0=0, col=alpha('blue'),
border='blue', horiz=TRUE)
add_bars(newd$Adults.Freq, 0:15, y0=0, col=alpha('red'),
border='red', horiz=TRUE)
mtext(c('Children', 'Adults'), side=3, at=c(-15,15), line=1, cex=1.25,
font=2)
# adding shadow:
b <- barplot(newd$Adults.Freq, beside=TRUE, names.arg=newd$value,
width=.9,
col='black', border=NA)
add_bars(b+.2, newd$Adults.Freq+.2, y0=.2, width=.9,
col=alpha('black', f=.2), border=NA, xpd=TRUE)
}
\seealso{
Other Functions for plotting:
\code{\link{addInterval}()},
\code{\link{add_n_points}()},
\code{\link{alphaPalette}()},
\code{\link{alpha}()},
\code{\link{check_normaldist}()},
\code{\link{color_contour}()},
\code{\link{dotplot_error}()},
\code{\link{drawDevArrows}()},
\code{\link{emptyPlot}()},
\code{\link{errorBars}()},
\code{\link{fill_area}()},
\code{\link{getCoords}()},
\code{\link{getFigCoords}()},
\code{\link{getProps}()},
\code{\link{gradientLegend}()},
\code{\link{legend_margin}()},
\code{\link{marginDensityPlot}()},
\code{\link{plot_error}()},
\code{\link{plot_image}()},
\code{\link{plotsurface}()},
\code{\link{sortBoxplot}()}
}
\author{
Jacolien van Rij
}
\concept{Functions for plotting}
|
39ca799dddd8186a64ed25c94bebbe9b61708f32 | a203b66dcc815505fbf69ab5057593481f296e83 | /R/MTFM.R | 3765c7da264d5a8f766d6720f6f2db69dc85b0a3 | [] | no_license | chr1swallace/MFM-analysis | f776108a21fa35ffc7c455c29f7e045a04c6cc42 | 741813c98fa9a7f49cfef3b68216da23b4de9e5e | refs/heads/master | 2020-04-23T02:29:59.812991 | 2019-02-15T10:46:52 | 2019-02-15T10:46:52 | 170,848,393 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,924 | r | MTFM.R | #!/usr/bin/env Rscript
library(methods)
library(randomFunctions)
library(annotSnpStats)
library(magrittr)
source("~/DIRS.txt")
setwd("~/Projects/cd4chic/GUESSFM")
#library(GUESSFM)
library(devtools)
load_all("~/RP/GUESSFM")
load_all("~/RP/MTFM")
args <- getArgs(default=list(d=file.path(CVSROOT,
## "14q-69168821-69318062" # JIA not found
## "15q-38814377-38972177" # int only
## "2q-100544954-101038647" # v slow
"10p-6030000-6220000" # even slower
## "18p-12738413-12924117"
## "10p-6030243-6169685"
## "2p-43310482-43362380"
),cppthr=0.99,keep=FALSE))
args$d <- sub("/GUESS$","",args$d)
args
## do diseases with minp < 1e-6
library(data.table)
(load(file.path(args$d,"conditional.RData")))
conditional <- as.data.table(conditional)
conditional <- conditional[p<1e-6,]
source("~/Projects/cvs/runguess/R/functions.R")
##snpmod
thr=0.99 # or 0.9999
(load(file.path(args$d,paste0("snpmod-",sub("0.","",args$cppthr),".RData"))))
## check - this should be uk only
nd <- structure(c(12747L, 2772L, 7728L, 1214L, 4461L, 3870L, 6681L,11475L, 12041L),names= c("CONTROL", "ATD", "CEL", "JIA", "MS", "RA", "T1D","iRA","iCEL"))
dis <- intersect(names(SM2),conditional$trait) # min p < 1e-6
dis <- intersect(dis,names(nd)) # uk only
if(length(setdiff(dis,c("RA","CEL")))<2) { # otherwise count CEL+iCEL=2, for example
system(paste("touch ",file.path(args$d,"skip-mtfm")))
if(!interactive())
q("no")
}
## why do this?
## if("iCEL" %in% dis && "CEL" %in% names(SM2) && !("CEL" %in% dis))
## dis <- c(dis,"CEL")
## if("iRA" %in% dis && "RA" %in% names(SM2) && !("RA" %in% dis))
## dis <- c(dis,"RA")
## dis <- c("GRAVES","ICOELIAC","MS","T1D")
message("found results for disease:")
print(dis)
## bestmod<- best.models(SM2[dis],cpp.thr=1.2)
## lapply(bestmod,dim)
bestmod.thr <- best.models(SM2,cpp.thr=0.99)
M <- lapply(bestmod.thr, "[[", "str")
pr <- lapply(bestmod.thr, "[[", "prior")
abf <- lapply(bestmod.thr, "[[", "logABF")
PP <- lapply(bestmod.thr, "[[", "PP")
names(M)
p0=snpprior(n=1000,expected=3)["0"]
STR=M[dis]
ABF=abf[dis]
PP <- PP[dis]
pr=pr[dis]
message("\n\nCPP threshold = ",thr, "\n\tn.each (",paste(dis,collapse="/"),") = ",paste(sapply(M[dis],length),collapse="/"))
## todo
## f.geno <- file.path(args$d ,"GUESS","all-data.RData")
## with(DATA@samples,table(phenotype))
N0 <- nd["CONTROL"]
I0 <- structure(c(0L,0L,0L,0L,0L,0L,7443L,4814L),names= c("ATD", "CEL", "JIA", "MS", "RA", "T1D","iRA","iCEL"))
ND <- nd[dis]
ns <- nrow(SM2[[1]]@snps)
diseases <- dis
library(parallel); options(mc.cores=3)
f <- function(dis) {
kappa <- calckappa(nsnps=ns,p=2/ns,ndis=length(dis),target.odds=1)
if(length(dis)>2) {
ndis <- length(dis)
kappa <- c(dep=kappa,
mid=calckappa(nsnps=ns,p=2/ns,ndis=length(SM2),target.odds=0.5^sqrt(ndis-1)/(1-0.5^sqrt(ndis-1))),
ind=calckappa(nsnps=ns,p=2/ns,ndis=length(SM2),target.odds=0.5^(ndis-1))/(1-0.5^(ndis-1)))
}
if(length(kappa)>1) {
mpp <- mclapply(kappa,function(k) { marginalpp(STR[dis],ABF[dis],pr[dis],k,p0,N0=N0,ND=ND[dis],nsnps=ns,I0=I0[dis]) })
mpp <- do.call("c",mpp)
ss <- strsplit(names(mpp),"\\.") %>% lapply(.,rev) %>% lapply(.,paste,collapse=".") %>% unlist()
names(mpp) <- ss
} else {
mpp <- marginalpp(STR[dis],ABF[dis],pr[dis],kappa,p0,N0=N0,ND=ND[dis],nsnps=ns,I0=I0[dis])
}
return(mpp)
}
## add groups
f.groups <- file.path(args$d,"snpmod-99-groups.RData")
(load(f.groups))
## no internationals
## if(!file.exists(file.path(args$d,"MTFM.RData"))) {
dis <- setdiff(diseases,c("iRA","iCEL"))
if(length(dis)>=2) {
mpp <- f(dis)
mpp <- lapply(mpp,function(x) {x$gr <- mod2group(rownames(x),groups=groups$groups); x})
gpp <- lapply(mpp, function(x) {
tapply(1:nrow(x),x$gr,function(i)
sum(x$shared.pp[i]))
})
message("saving to MTFM.RData")
save(mpp,gpp,file=file.path(args$d,"MTFM.RData"))
}
## }
## international
dis <- diseases
if("iRA" %in% dis)
dis <- setdiff(dis,"RA")
if("iCEL" %in% dis)
dis <- setdiff(dis,"CEL")
## if(!file.exists(file.path(args$d,"iMTFM.RData")) && any(c("iRA","iCEL") %in% diseases)) {
if(any(c("iRA","iCEL") %in% diseases)) {
message("running iMTFM for ",length(diseases)," diseases")
impp <- f(dis)
mpp <- lapply(impp,function(x) {x$gr <- mod2group(rownames(x),groups=groups$groups); x})
gpp <- lapply(mpp, function(x) {
tapply(1:nrow(x),x$gr,function(i)
sum(x$shared.pp[i]))
})
message("saving to iMTFM.RData")
save(mpp,gpp,file=file.path(args$d,"iMTFM.RData"))
}
|
7b476125d1a7d2b69d1da395b41adc12e97a8244 | 7a95abd73d1ab9826e7f2bd7762f31c98bd0274f | /metafolio/inst/testfiles/est_beta_params/libFuzzer_est_beta_params/est_beta_params_valgrind_files/1612988243-test.R | 14461da1eca9659f45f6b4f431a29168e4d19861 | [] | no_license | akhikolla/updatedatatype-list3 | 536d4e126d14ffb84bb655b8551ed5bc9b16d2c5 | d1505cabc5bea8badb599bf1ed44efad5306636c | refs/heads/master | 2023-03-25T09:44:15.112369 | 2021-03-20T15:57:10 | 2021-03-20T15:57:10 | 349,770,001 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 141 | r | 1612988243-test.R | testlist <- list(mu = 2.81776900841821e-202, var = 2.81776900829939e-202)
result <- do.call(metafolio:::est_beta_params,testlist)
str(result) |
436d400197815e7f88475512f5edc05c03e2f74e | 185eb75246acc598d15d43a6a487ef2ee0b3d231 | /R/geo/preprocessing/preprocess-GSE75330.R | de2dca825f6ffeee5f608531c28003fec89280db | [] | no_license | suzannejin/SCT-MoA | 4cd295da2252475d482905bbdfffa48aa9ca4c2d | bfd455479d94db92d30153b763d06f5732879606 | refs/heads/master | 2023-05-30T01:18:39.043455 | 2019-02-25T18:20:10 | 2019-02-25T18:20:10 | 362,417,400 | 0 | 0 | null | 2021-04-28T09:50:38 | 2021-04-28T09:50:37 | null | UTF-8 | R | false | false | 1,237 | r | preprocess-GSE75330.R | # Dataset: GSE75330
# Cells: single oligodendrocytes from mouse brain regions
# Values: counts
setwd("~/git/SCT-MoA")
options(stringsAsFactors = F)
source("R/functions.R")
# read data
dat = read.delim(
"data/geo/raw/GSE75330/GSE75330_Marques_et_al_mol_counts2.tab.gz",
check.names = F)
# read SOFT files
lines = readLines("data/geo/raw/GSE75330/GSE75330_family.soft.gz")
samples = lines[grepl("Sample_title", lines)]
types = lines[grepl("inferred cell type", lines)]
cell_types = setNames(gsub("^.*\\: ", "", types), gsub("^.*= ", "", samples))
# match symbols to Ensembl
symbols = dat$cellid
map = AnnotationDbi::select(org.Mm.eg.db, keys = symbols, keytype = 'SYMBOL',
columns = 'ENSEMBL')
genes = map$ENSEMBL[match(symbols, map$SYMBOL)]
# convert to matrix
expr = t(dat[, -1])
colnames(expr) = genes
expr = expr[, !is.na(colnames(expr)) & !duplicated(colnames(expr))]
# split up by cell types
for (type in unique(cell_types)) {
cells = names(which(cell_types == type))
subset = expr[rownames(expr) %in% cells,]
output_file = paste0("data/geo/processed/GSE75330_", type, ".txt")
write_and_gzip(subset, output_file)
message("wrote cell type `", type, "` with ", nrow(subset), " cells")
}
|
b52c3aefabbe7401299031f2392268990cff3d7d | 25815be16bffa67b6f5c862a5829b0b2867a5efb | /draft2.R | 42ba1f8444bfcbe009a808c036cdadebd5d3d197 | [] | no_license | sayefi/wearableml | d578fc4220c869ec20f0fe9f19baec4c0fdf7d4c | c5cf3f0f1840cf7602766319db291750d631043a | refs/heads/master | 2021-08-08T20:38:04.313286 | 2017-11-11T04:15:02 | 2017-11-11T04:15:02 | 109,638,476 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,713 | r | draft2.R | url<-"https://d396qusza40orc.cloudfront.net/predmachlearn/pml-training.csv"
download.file(url,"data/training.csv")
url<-"https://d396qusza40orc.cloudfront.net/predmachlearn/pml-testing.csv"
download.file(url,"data/testing.csv")
trainData<-read.csv("data/training.csv")
testData<-read.csv("data/testing.csv")
summary(trainData)
# trainData0<-read.csv("data/training.csv",nrows=500)
#
# colnames(trainData0)
reqdCols<-c("roll_belt",
"pitch_belt",
"yaw_belt",
"total_accel_belt",
"gyros_belt_x",
"gyros_belt_y",
"gyros_belt_z",
"accel_belt_x",
"accel_belt_y",
"accel_belt_z",
"magnet_belt_x",
"magnet_belt_y",
"magnet_belt_z",
"roll_arm",
"pitch_arm",
"yaw_arm",
"total_accel_arm",
"gyros_arm_x",
"gyros_arm_y",
"gyros_arm_z",
"accel_arm_x",
"accel_arm_y",
"accel_arm_z",
"magnet_arm_x",
"magnet_arm_y",
"magnet_arm_z",
"roll_dumbbell",
"pitch_dumbbell",
"yaw_dumbbell",
"gyros_dumbbell_x",
"gyros_dumbbell_y",
"gyros_dumbbell_z",
"accel_dumbbell_x",
"accel_dumbbell_y",
"accel_dumbbell_z",
"magnet_dumbbell_x",
"magnet_dumbbell_y",
"magnet_dumbbell_z",
"roll_forearm",
"pitch_forearm",
"yaw_forearm",
"gyros_forearm_x",
"gyros_forearm_y",
"gyros_forearm_z",
"accel_forearm_x",
"accel_forearm_y",
"accel_forearm_z",
"magnet_forearm_x",
"magnet_forearm_y",
"magnet_forearm_z",
"classe")
library(dplyr)
library(caret)
trainDataX<-select(trainData,reqdCols)
compCases<-complete.cases(trainDataX)
trainDataX<-trainDataX[compCases,]
summary(trainDataX)
trainDataP<-createDataPartition(trainDataX$classe,p=0.8,list=FALSE)
trainDataPt<-trainDataX[trainDataP,]
validateDataPt<-trainDataX[-trainDataP,]
str(trainDataPt)
cluster <- makeCluster(detectCores()-1) # convention to leave 1 core for OS
registerDoParallel(cluster)
fitControl<-trainControl(method="oob",allowParallel = TRUE)
start_time <- Sys.time()
modelFit<-train(classe~.,data=trainDataPt,method="rf",ntree = 200,
preProcess=c("center","scale","pca"),
trainControl=fitControl)
end_time <- Sys.time()
end_time <- Sys.time()
start_time
end_time
end_time - start_time
stopCluster(cluster)
registerDoSEQ()
res<-predict(modelFit,validateDataPt)
confusionMatrix(res,validateDataPt$classe)
saveRDS(modelFit,"warableModel.rds")
library(randomForest)
model<-readRDS(file = "warableModel.rds")
res1<-predict(model,testData)
#-------------------------------------------------------------------
trainDataP<-createDataPartition(trainDataX$classe,p=0.01,list=FALSE)
trainDataPt<-trainDataX[trainDataP,]
validateDataPt<-trainDataX[-trainDataP,]
dim(trainDataPt)[1]
timeChart<-data.frame()
set.seed(100)
start_time<-Sys.time()
modelFit<-train(classe~.,data=trainDataPt,method="rf")
end_time<-Sys.time()
res<-predict(modelFit,validateDataPt)
cm<-confusionMatrix(res,validateDataPt$classe)
end_time-start_time
timeChart[1,1]<-"rf"
timeChart[1,2]<-"None"
timeChart[1,3]<-dim(trainDataPt)[1]
timeChart[1,4]<-round(end_time-start_time,2)
timeChart[1,5]<-round(cm$overall[1],3)
names(timeChart)<-c("Method","Pre-Processing/Parameters","No.Records",
"Duration","Accuracy")
timeChart
#-----------------------------------------------
set.seed(100)
start_time<-Sys.time()
modelFit<-train(classe~.,data=trainDataPt,method="rf",
preProcess=c("scale","center"))
end_time<-Sys.time()
res<-predict(modelFit,validateDataPt)
cm<-confusionMatrix(res,validateDataPt$classe)
end_time-start_time
timeChart[2,1]<-"rf"
timeChart[2,2]<-"scale,center"
timeChart[2,3]<-dim(trainDataPt)[1]
timeChart[2,4]<-round(end_time-start_time,2)
timeChart[2,5]<-round(cm$overall[1],3)
timeChart
#---------------------------------------------------
set.seed(100)
start_time<-Sys.time()
modelFit<-train(classe~.,data=trainDataPt,method="rf",
preProcess=c("pca"))
end_time<-Sys.time()
res<-predict(modelFit,validateDataPt)
cm<-confusionMatrix(res,validateDataPt$classe)
end_time-start_time
timeChart[3,1]<-"rf"
timeChart[3,2]<-"pca"
timeChart[3,3]<-dim(trainDataPt)[1]
timeChart[3,4]<-round(end_time-start_time,2)
timeChart[3,5]<-round(cm$overall[1],3)
timeChart
##------------------------------------------------
set.seed(100)
start_time<-Sys.time()
modelFit<-train(classe~.,data=trainDataPt,method="rf",
preProcess=c("center","scale","pca"))
end_time<-Sys.time()
res<-predict(modelFit,validateDataPt)
cm<-confusionMatrix(res,validateDataPt$classe)
end_time-start_time
timeChart[4,1]<-"rf"
timeChart[4,2]<-"center,scale,pca"
timeChart[4,3]<-dim(trainDataPt)[1]
timeChart[4,4]<-round(end_time-start_time,2)
timeChart[4,5]<-round(cm$overall[1],3)
timeChart
#----------------------------------------------------
trainDataP<-createDataPartition(trainDataX$classe,p=0.05,list=FALSE)
trainDataPt<-trainDataX[trainDataP,]
validateDataPt<-trainDataX[-trainDataP,]
#-------------------------------------------------------------
set.seed(100)
start_time<-Sys.time()
modelFit<-train(classe~.,data=trainDataPt,method="rf",
preProcess=c("scale","center"))
end_time<-Sys.time()
res<-predict(modelFit,validateDataPt)
cm<-confusionMatrix(res,validateDataPt$classe)
end_time-start_time
timeChart[5,1]<-"rf"
timeChart[5,2]<-"scale,center"
timeChart[5,3]<-dim(trainDataPt)[1]
timeChart[5,4]<-round(end_time-start_time,2)
timeChart[5,5]<-round(cm$overall[1],3)
timeChart
#----------------------------------------------------------------
set.seed(100)
start_time<-Sys.time()
modelFit<-train(classe~.,data=trainDataPt,method="rf",
preProcess=c("pca"))
end_time<-Sys.time()
res<-predict(modelFit,validateDataPt)
cm<-confusionMatrix(res,validateDataPt$classe)
end_time-start_time
timeChart[6,1]<-"rf"
timeChart[6,2]<-"pca"
timeChart[6,3]<-dim(trainDataPt)[1]
timeChart[6,4]<-round(end_time-start_time,2)
timeChart[6,5]<-round(cm$overall[1],3)
timeChart
#----------------------------------------------------------------
set.seed(100)
start_time<-Sys.time()
modelFit<-train(classe~.,data=trainDataPt,method="rf",
preProcess=c("pca"))
end_time<-Sys.time()
res<-predict(modelFit,validateDataPt)
cm<-confusionMatrix(res,validateDataPt$classe)
end_time-start_time
timeChart[6,1]<-"rf"
timeChart[6,2]<-"pca"
timeChart[6,3]<-dim(trainDataPt)[1]
timeChart[6,4]<-round(end_time-start_time,2)
timeChart[6,5]<-round(cm$overall[1],3)
timeChart
#--------------------------------------------------------------
set.seed(100)
start_time<-Sys.time()
modelFit<-train(classe~.,data=trainDataPt,method="rf",
preProcess=c("scale","center","pca"))
end_time<-Sys.time()
res<-predict(modelFit,validateDataPt)
cm<-confusionMatrix(res,validateDataPt$classe)
end_time-start_time
timeChart[7,1]<-"rf"
timeChart[7,2]<-"center, scale, pca"
timeChart[7,3]<-dim(trainDataPt)[1]
timeChart[7,4]<-round(end_time-start_time,2)
timeChart[7,5]<-round(cm$overall[1],3)
timeChart
#--------------------------------------------------------------
trainDataP<-createDataPartition(trainDataX$classe,p=0.1,list=FALSE)
trainDataPt<-trainDataX[trainDataP,]
validateDataPt<-trainDataX[-trainDataP,]
#-----------------------------------------------------------------
set.seed(100)
start_time<-Sys.time()
modelFit<-train(classe~.,data=trainDataPt,method="rf",
preProcess=c("scale","center"))
end_time<-Sys.time()
res<-predict(modelFit,validateDataPt)
cm<-confusionMatrix(res,validateDataPt$classe)
end_time-start_time
timeChart[8,1]<-"rf"
timeChart[8,2]<-"scale,center"
timeChart[8,3]<-dim(trainDataPt)[1]
timeChart[8,4]<-round(end_time-start_time,2)
timeChart[8,5]<-round(cm$overall[1],3)
timeChart
#----------------------------------------------------------------------
set.seed(100)
start_time<-Sys.time()
modelFit<-train(classe~.,data=trainDataPt,method="rf",
preProcess=c("pca"))
end_time<-Sys.time()
res<-predict(modelFit,validateDataPt)
cm<-confusionMatrix(res,validateDataPt$classe)
end_time-start_time
timeChart[9,1]<-"rf"
timeChart[9,2]<-"pca"
timeChart[9,3]<-dim(trainDataPt)[1]
timeChart[9,4]<-round(end_time-start_time,2)
timeChart[9,5]<-round(cm$overall[1],3)
timeChart
#------------------------------------------------------------------------
set.seed(100)
fitControl<-trainControl(method="oob")
start_time<-Sys.time()
modelFit<-train(classe~.,data=trainDataPt,method="rf",
preProcess=c("pca"),
trainControl=fitControl)
end_time<-Sys.time()
res<-predict(modelFit,validateDataPt)
cm<-confusionMatrix(res,validateDataPt$classe)
end_time-start_time
timeChart[10,1]<-"rf"
timeChart[10,2]<-"pca with oob resampling"
timeChart[10,3]<-dim(trainDataPt)[1]
timeChart[10,4]<-round(end_time-start_time,2)
timeChart[10,5]<-round(cm$overall[1],3)
timeChart
plot(modelFit$finalModel)
#------------------------------------------------------------------------
set.seed(100)
fitControl<-trainControl(method="oob")
start_time<-Sys.time()
modelFit<-train(classe~.,data=trainDataPt,method="rf",ntree = 200,
preProcess=c("pca"),
trainControl=fitControl)
end_time<-Sys.time()
res<-predict(modelFit,validateDataPt)
cm<-confusionMatrix(res,validateDataPt$classe)
end_time-start_time
timeChart[11,1]<-"rf"
timeChart[11,2]<-"pca, oob resampling, 200 trees"
timeChart[11,3]<-dim(trainDataPt)[1]
timeChart[11,4]<-round(end_time-start_time,2)
timeChart[11,5]<-round(cm$overall[1],3)
timeChart
#--------------------------------------------------------------------------
library(parallel)
library(doParallel)
set.seed(100)
fitControl<-trainControl(method="oob",allowParallel = TRUE)
cluster <- makeCluster(detectCores()-1) # convention to leave 1 core for OS
registerDoParallel(cluster)
start_time<-Sys.time()
modelFit<-train(classe~.,data=trainDataPt,method="rf",ntree = 200,
preProcess=c("pca"),
trainControl=fitControl)
end_time<-Sys.time()
stopCluster(cluster)
registerDoSEQ()
res<-predict(modelFit,validateDataPt)
cm<-confusionMatrix(res,validateDataPt$classe)
end_time-start_time
timeChart[12,1]<-"rf"
timeChart[12,2]<-"pca,oob, 200 trees, pp"
timeChart[12,3]<-dim(trainDataPt)[1]
timeChart[12,4]<-round(end_time-start_time,2)
timeChart[12,5]<-round(cm$overall[1],3)
timeChart
saveRDS(modelFit,"wearableModel2000.rds")
#-------------------------------------------------------------------------
trainDataP<-createDataPartition(trainDataX$classe,p=0.8,list=FALSE)
trainDataPt<-trainDataX[trainDataP,]
validateDataPt<-trainDataX[-trainDataP,]
set.seed(100)
fitControl<-trainControl(method="oob")
start_time<-Sys.time()
modelFit<-train(classe~.,data=trainDataPt,method="rf",ntree = 200,
preProcess=c("pca"),
trainControl=fitControl)
end_time<-Sys.time()
res<-predict(modelFit,validateDataPt)
cm<-confusionMatrix(res,validateDataPt$classe)
timeChart[13,1]<-"rf"
timeChart[13,2]<-"pca,oob resampling, 200 trees"
timeChart[13,3]<-dim(trainDataPt)[1]
timeChart[13,4]<-round(end_time-start_time,2)
timeChart[13,5]<-round(cm$overall[1],3)
timeChart
cm
saveRDS(modelFit,"wearableModel.rds")
saveRDS(timeChart,"timechart.rds")
timeChart
|
4214ea45e4eb10a0c1aa060f4d2e5f88c31462d0 | 7d3b834c7eec5c5bf3bd1b1206347c8969ec1320 | /working/R/power_gen_orig.R | d08e75be39760b358ddbd7800f616cc75f2b4021 | [] | no_license | camerynbrock/esm-262 | ebd790815b5b008b478ba05ebb7bf1951f44fa17 | b806700ff56945e30810e3d49efcceee076e7236 | refs/heads/master | 2023-03-22T19:54:49.713977 | 2021-03-03T01:08:18 | 2021-03-03T01:08:18 | 337,192,259 | 0 | 5 | null | null | null | null | UTF-8 | R | false | false | 60 | r | power_gen_orig.R | power_gen_orig = function(a,b) {
add = a+b
return(add)
} |
ca13e7490325c996e238491ce0e617ad3b87686a | 184180d341d2928ab7c5a626d94f2a9863726c65 | /issuestests/metadynminer3d/inst/testfiles/hills3d2p13/hills3d2p13_output/log_c9adb054ca2d424da9297ff68fa8399f24de4220/hills3d2p13-test.R | aa5c1ada54f4058d8ddeab230068d81a08f3bdf2 | [] | no_license | akhikolla/RcppDeepStateTest | f102ddf03a22b0fc05e02239d53405c8977cbc2b | 97e73fe4f8cb0f8e5415f52a2474c8bc322bbbe5 | refs/heads/master | 2023-03-03T12:19:31.725234 | 2021-02-12T21:50:12 | 2021-02-12T21:50:12 | 254,214,504 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 8,977 | r | hills3d2p13-test.R | testlist <- list(cv1 = c(-5.26369149538019e+153, -5.77049913748844e+199, 8.94491153200418e-224, -1.13317236858005e-264, -1.71354221939578e-36, 1.06697892804715e+153, -5.69990451539225e+25, -6.28641323885007e+122, 6.5705330222511e-167, -2.21837045632867e+264, 3.65979666020449e+52, -3.43953413019155e-209, Inf, 8.05454989690644e-141, -9.09528889648417e+264, 1.02084571509291e-104, 5.65939043520958e+274, -8.15111492761405e-199, 12200002952962.1, -Inf, -2.50454323018927e-184, NaN, 2.83159998997568e+132, -1.11196894977889e+75, 1.3334457812707e+97, -3.85428468043869e-231, -1.95159450909329e-187, -5.99845380554052e+20, -7.71178496393173e-286, -1.20574248592534e+52, 2.20939862082753e+45, 8.23295321975463e-95, -1.72806622300374e-13, -3.18952441471396e+179, -2.61982703083468e+222, 1.22439580684673e-45, -3.61959151898159e+199, 1.34641789265334e-140, -1.64572034141002e-179, -3.51507229439945e+250, -145565462313.858, 4.68013068334992e+194, 8.50911114288852e+49, -Inf, -3.35252714572047e-261, 1.86196362146936e+219, 1.18971676575851e+121, -2.87368343494693e+277, 2.23225503832024e-274, 6.09956479980839e+145, -5.12602420957042e-287, -4.34571954661732e+170, -7.11424465661382e+137, 2.48242410736649e+259, 8.0617614964706e+276, -3.4036119743466e+213, -1.94753096646727e+33, 2.21648932093901e+271, -Inf, 2.54754436523087e+33, -1.37920571695422e+271, 1.34525113339739e+79, 2.80340544382357e-257, 0), cv2 = c(7.34951572143252e+158, 1.18375568999115e-130, -1.25092225056581e+245, -1.11699184113772e-117, -1.30487845723826e-182, NaN, -4.48775401495829e-276, -1.24965648614887e-301, 5.39930222953523e+218, -6.02911428150894e-137, -2.1942874084634e-56, 1.70899548206101e+31, 1.26116460033554e-276, -3.48917627359598e+221, -8.35167614143772e+182, -4.89633370523665e-173, -4.28738151059298e+299, -4.01122633963098e+158, 2.42582640913626e-173, 4.42161837304758e-244, 7.61805680288738e-35, -6.75116948650055e+254, 1.72422541462329e+104, 1.44340248516645e-244, 4.04775251401572e+297, -2.89388497719056e-240, -Inf, -1.99550088869106e+202, 1.62878018644578e+299, 1.01394376395067e-272, 9.93681285843926e-17, NA, -1.3001631605813e+111, -1.02805515812197e+278, -1.72702986450365e-53, 5.5551523250861e-219, -2.94023996615734e+278, 5.5131234467839e+144, 9.39244729335219e+145, -7.88041271531231e+40, Inf, -2.50767757913564e+154, 5.18064178505099e+151, -8.84838719594906e-297, -1.80155663433081e-252, 4.44761957217924e+46, -1.22805115027332e-97, -1.20573316468017e+105, -1.23853512069189e-187, -9.01819044090388e-36, 1.67845066014699e+103, -1.22996095522869e-279, -7.3156806980772e-69, 1.74434947943935e-165, 1.17920268960171e+144, 1.23181060759517e+53, -1.00798807378577e+129, 1.53968020914095e-45, -2.17657020273772e-305, -2.54473153397724e+289, Inf, 4.61456932104957e-298, 9.92897189620668e+278, 0), cv3 = c(NA, -1.49172874797462e+144, -Inf, -9.57780604940828e-138, -Inf, -1.59151104272225e+42, 6.55138418534574e+47, -9.18652480116851e+217, NA, -8.39603684741746e-276, 4.09232975421505e-153, 11464947.7712968, NA), heights = c(2.84392561339988e+182, -2.53788149993427e-187, -8.3876908443239e-45, Inf, NA, -6.07326522141325e+29, NA), n = -2119843780L, tmax = -591994603L, tmin = 1801182665L, width1 = c(3.08642996067562e+212, 4.56284253531389e-86, 7.90532973805452e+176, -5.48204836881838e+109, 5.24245986338689e-151, 97481434.256936, -3.34417288264182e-28, -1.59808293577783e-167, 2.5838609625772e+197, 4.27105163958115e+307, -6.30633286754132e+268, 4.78642910561015e-133, -3.99203581700451e+48, 1.0219403574459e+282, 1.89044858702399e+197, 2.21431264236537e-303, 1.78420571416929e+279, 2.55876000517699e-64, -4.11704511598269e+142, -3.29279188750298e-198, 1.56968622705247e-12, -2.29010657669107e-123, -2.10202134688047e+230, 2.22222401554921e-52, 3.76667715600505e+182, -8.66637386017802e+294, 2.59448656704022e+189, 1.35239364854671e-09, -2.16041220387097e-149, -7.9297626570166e-181, 9.33472302061354e-232, Inf, 9.21318837515976e+240, -3.50425533258333e-42, 2.47198172418025e-19, 1.11073183896523e+168, NA, -2.6745133920275e+279, -7.55182862430543e-69, 5.47254990189992e-128, -4.30500505913595e-60, 1.81284286590002e-163, 10395200100254260, 1.05940474806875e-63, -2.99938179415191e+143, -8.91363801062435e-141, 9.18464495353898e-103, 2.14482709162461e-128, -107.54708794823, -3.78578228876443e+138, 1.35101787455008e-43, 2.21431264236537e-303, 5.92317275273379e-294, -1252037950284522, -11631785507215202, -1.28635553828712e+167, 1.94737625294742e+253, 9.48648586707916e-196, 1.57726561459344e+60, -6.75257856715424e+248, -3.4582132340158e+91, 1.01780329227136e+280, -1.22499581467262e-14, 7.70219056079101e+158, -6.67793744243422e+234, 1.89212088482041e-129, -6.11840068936794e+296, -3.72964266018336e+171, -Inf, -2.97131426955786e-121, 6.32267114523839e-35, -1.33260105951241e+236, -1.59327427180302e+30, 1.4671738555026e-300, 1.92679331823182e-105, -1.03786355146214e+203, 3.46745294796453e+100, 3.12514642680892e-273, 1.50379911448062e+123, -2.39659673242424e+83, -8.81359804257384e-57, -3.0561870777883e+182, -4.29557373794155e+76, 8.45494164517191e-144, -1.27579198067571e-08, 6.11588105838544e+152, 4.05950717251524e-164, -4.91312860950957e-77, -9.12252786381326e-134, 3.30119619790783e+159, -1.94195747562281e-117, -8.29882108481319e+86, 0), width2 = c(1.44068654733168e+260, -2.21323896524717e+183, -4.58907545689838e+70, -8.96683722847603e-71, NA, 7.56366892548564e-274, 4.32568463015961e-131, -7.05918339301913e+249, 6.65933836536177e+151, 2.18701625051693e-113, -9.90855140654005e-64, -8.91911992671584e+188, -3.81460402799034e-287, -4.54699786310988e-160, 2.53187227301659e+189, -928684222632761728, -1.09585303081409e-256, -1.30072844113694e-177, -4.83320126255432e+215, 3.99944700514334e+250, -4.00347037831965e-140, 1.53385355491474e+257, 7.05138880528257e+224, 3.70038910936668e+223, -2.0471116087117e+208, -9.3730839175399e+71, -2.77704291146483e+211, 6.13328408861836e+262, 1.47676253470499e+153, -1.08608476516543e+93, 1.96799505549822e+45, -7.39281774016534e-246, -5.04390858741212e+129, -8.66372665355723e-166, 1.01788507251646e+277, -3.61520670003187e+151, 1.29707596085309e+141, 4.93303141800338e+148, Inf, 5.28784996984715e+237, -8068663837.22821, -5.02366139603117e+72, 3.51291079304099e+65, -1.52455570724159e+143, 3.8023210403961e-153, -4.16163497019368e-224, -2.81244788052319e-110, -6.7566203216146e-222, -5.53069834599294e-79, NaN, 8.24421914172281e+281, -1.03884737305709e+179, NaN, 2.39356226902389e+41, 1.77615441966252e+137, -4.29869342082499e+104, -2.81528672462346e+154, 1.9544836077064e+75, 2.72769344233401e-210, 1.79891753622712e+300, 2.50666871473404e-194, 1.16851920307289e-177, -2.1097265126537e+192, -4.80934293816086e+56, 4.74416383556077e+120, -3.21989726616081e+177, 1.42539244369644e+260, -7140122993248829, 4.87287044726449e-97, 2.2062473189124e+290, -7.93243374865923e-94, -1.2817137700496e+272, -5.07815618123066e+22, -8.17277240348963e-21, -2.4225085999529e-278, -8.96158863913609e+213, 5.70848163966102e-192, -8.27201830418398e+64, -2.4503108024239e-276, 1.05051505465271e-25, NaN, -7.06445122259455e-166, -8.00290059189092e+170, -0.000163780981309219, -1.96262167632677e+282, -1.91176709945437e+197, 0), width3 = c(-4.32015044783676e-216, -1.23895177693332e+274, 1.10522068951221e+133, -5.47991424413849e-81, -6.61368830236864e-133, 1.48999792213864e+160, 5.22832623818043e+271, -5.30818745141301e+219, -4.39576243287973e-94, 6.11681199353138e+143, -1.08814255297761e-09, -2.66273123752323e-265, -8.14024434631302e-166, 5.14606885623533e+181, -1.19247133187568e-71, -9.93403007210284e-249, -1.60724653490032e-229, 6.37552309751644e-281, -1.04642051232934e+24, 3.66137611984805e-183, -5.60225813081672e-292, NA, -6.55859524549322e+62, -3.367994720609e+18, -2.73427268939092e+160, 8.21935369737833e-142, -4.77059905511191e+239, 9.02237439046916e+98, -1.21602516835509e+221, -4.74612531214371e-100, Inf, -9.27560251379048e-126, -9.79711298683815e+34, -1.54968063568453e-156, -1.56778156081792e-73, -2.02745275913314e+87, -1.08220950542504e-166, -1.85089731382849e-144, -1.90279655215486e+180, 5.00311456142105e+121, -6.71200595164737e-12, -5.60305868546544e-288, -1.4927729001192e+282, 3.42962332387251e+258, 1.50531158887732e-145, -2.79591880263626e+284, 1.37255842097162e-303, 1.502799154461e-257, 1.12362128105803e+144, -4.70350892674298e-157, 2.33874060524537e-136, 4.13861162802599e+251, 2.65741165377901e+296, 1.0471592783432e-101, -5.99278251331776e-189, 1.13105198939141e-293, -1.91411676686426e+198, 1.81799799515026e-230, -Inf, NA, 0))
result <- do.call(metadynminer3d:::hills3d2p13,testlist)
str(result) |
f07500d020bff107a51b4f5b94baed7d30098721 | bb8f6b2327f427d2874a6b2e7d112f2f883261e8 | /Trial2.R | cd0390506e66d735736703c97512f7cfde5113fd | [] | no_license | JuliaHarvie/CanislupusPopulationInvestigation | 6ab1d1dade92370e5b7163c8c5aa3f50b8d1a107 | d5e9af25ccc0628ce2ab26b4db5f8a674ee8d087 | refs/heads/main | 2023-01-28T22:25:37.348328 | 2020-12-13T19:24:07 | 2020-12-13T19:24:07 | 319,728,305 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,836 | r | Trial2.R |
library(Biostrings)
library(tidyverse)
library(muscle)
library(DECIPHER)
library(ape)
library(viridis)
library(cluster)
library(seqinr)
library(adegenet)
library(pegas)
library(apex)
library(mmod)
library(poppr)
Wolf <- read_tsv("http://www.boldsystems.org/index.php/API_Public/combined?taxon=Canis&format=tsv")
write_tsv(Wolf, "Wolf_BOLD_data.tsv")
Canis <- read_tsv("Wolf_BOLD_data.tsv")
#Reduced Canis
Canis_rd <- Canis %>%
select(processid, genus_name, species_name, subspecies_name, bin_uri, country, markercode, nucleotides, lon, lat, recordID) %>%
filter(species_name == "Canis lupus" | species_name == "Canis familiaris") %>%
filter(markercode == "COI-5P")
#Check
#count gets masked so do this
dplyr::count(Canis_rd, species_name)
dplyr::count(Canis_rd, subspecies_name)
dplyr::count(Canis_rd, markercode)
#Working on the null Canis familiaris is just a sub species of Canis and should be treated as such
#Relabel any Canis familiaris as such
for (n in 1:nrow(Canis_rd)){
if (Canis_rd[n,"species_name"] == "Canis familiaris") {
Canis_rd[n,"species_name"] <- "Canis lupus"
Canis_rd[n,"subspecies_name"] <- "Canis lupus familiaris"
} else if (is.na(Canis_rd[n,"subspecies_name"])){
Canis_rd[n,"subspecies_name"] <- "Canis lupus"
}
}
#Check
dplyr::count(Canis_rd, species_name)
dplyr::count(Canis_rd, subspecies_name)
#Increased by 4 as expected
#Quality checks and filtering
Canis_filtered <- Canis_rd %>%
mutate(nucleotides2 = str_remove_all(nucleotides, "^N+|N+$|-")) %>%
filter(!is.na(nucleotides2)) %>%
mutate(species_name = str_replace(species_name, "Canis", "C.")) %>%
mutate(subspecies_name = str_replace(subspecies_name, "Canis", "C.")) %>%
mutate(ID = paste(subspecies_name, recordID, sep="_"))
summary(nchar(Canis_filtered$nucleotides2))
ggplot(Canis_filtered,aes(x=nchar(nucleotides2), y = subspecies_name, colour = subspecies_name), xmin = 0, xmax = max(nchar(nucleotides2))+100) +
scale_x_continuous(breaks =seq(0, max(nchar(Canis_filtered$nucleotides2))+100 , by = 200)) +
geom_jitter(show.legend = F) +
geom_boxplot(outlier.shape = NA, colour="black", show.legend = F, fill = NA) +
xlab("Sequence Length")
#Going to filter over 600 - 1600
Canis_filtered <- Canis_filtered %>%
filter(nchar(nucleotides2) >= 600) %>%
filter(nchar(nucleotides2) <= 1600)
summary(nchar(Canis_filtered$nucleotides2))
dplyr::count(Canis_filtered, subspecies_name)
Canis_filtered <- Canis_filtered %>%
mutate(Undefined = str_count(nucleotides2, "N")/nchar(nucleotides2))
summary(Canis_filtered$Undefined)
sort(Canis_filtered$Undefined, decreasing = T)[1:20]
Canis_filtered <- Canis_filtered %>%
filter(Undefined < 0.01)
dplyr::count(Canis_filtered, subspecies_name)
#rm(Canis_rd)
# Align time
Canis_filtered <- as.data.frame(Canis_filtered)
class(Canis_filtered$nucleotides2)
Canis_filtered$nucleotides2 <- DNAStringSet(Canis_filtered$nucleotides2)
names(Canis_filtered$nucleotides2) <- Canis_filtered$ID
##Analysis with muscle. Dataset is small so will not adjust maxiters. Will run twice, with out setting a gap penalty and once with an intense gap penalty to see how that changes the alignment and the results
Canis_alignment <- DNAStringSet(muscle::muscle(Canis_filtered$nucleotides2, maxhours = 1), use.names = TRUE)
writeXStringSet(Canis_alignment, file = "CanisAlignment.fasta")
BrowseSeqs(Canis_alignment)
Canis_Bin <- as.DNAbin(Canis_alignment)
Canis_distanceMatrix <- dist.dna(Canis_Bin, model = "k80", as.matrix = TRUE, pairwise.deletion = TRUE)
Canis_clusters <- IdClusters(Canis_distanceMatrix, method = "NJ", cutoff = 0.02, showPlot = TRUE, type = "both", verbose = TRUE)
count(Canis_clusters[[1]], cluster)
Check <- filter(Canis_clusters[[1]], cluster == 1)
#Good might be a reverse
# Lets start analysis
#Will help later
Canis_Multi <- read.multiFASTA("CanisAlignment.fasta")
Canis_genind <- multidna2genind(Canis_Multi)
#Set pop
strata(Canis_genind) <- data.frame("Pop" = Canis_filtered$subspecies_name)
head(strata(Canis_genind))
setPop(Canis_genind) <- ~Pop
#Set of outputs to analysis
diff_stats(Canis_genind, phi_st = T)
bootstrap <- chao_bootstrap(Canis_genind, nreps = 100)
#Could change statistic used here, probs should and justify
summarise_bootstrap(bootstrap, Gst_Nei)
#amova
Canis_DistPair <- dist.multidna(Canis_Multi, pool = T)
#performing AMOVA
amova(Canis_DistPair ~ Pop, data = strata(Canis_genind), nperm = 100)
#PCA
X <- scaleGen(Canis_genind, NA.method="mean")
pca1 <- dudi.pca(X,cent=FALSE,scale=FALSE,scannf=FALSE,nf=3)
barplot(pca1$eig[1:50],main="PCA eigenvalues", col=heat.colors(50))
s.label(pca1$li)
col <- funky(15)
CanFac <- as.factor(Canis_filtered$subspecies_name)
s.class(pca1$li, CanFac,xax=1,yax=3, col=transp(col,.6), axesell=FALSE,
cstar=0, cpoint=3, grid=FALSE)
class(pop(Canis_genind))
|
02909108f6e5dabd0eb4e340d3973e7de003a969 | e4f30066cfe4c6e12d9d1d1ec435289da64fb396 | /AES_set1/lm_simple_features.R | ae76f4e9b3368fbfdcdb7fd397991654cbd340bc | [] | no_license | dpalmasan/tesismagister | 8d79c6a98fad912905209ad9c0896f6a443f11c5 | e9579b72698d3693dda3c8cdd9fcdedf72b4cdc2 | refs/heads/master | 2020-04-05T23:47:48.895640 | 2016-09-13T22:18:18 | 2016-09-13T22:18:18 | 48,250,676 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,123 | r | lm_simple_features.R | library(caret); library(Metrics); source("AgreementMeasures.R"); source("auxiliares.R"); library(ggplot2)
load("grades")
dataset <- read.csv("dataset_features.csv")
nzv <- nearZeroVar(dataset)
dataset <- dataset[, -nzv]
dataset$grades <- grades
# Crea particion de datos
trainIndex <- createDataPartition(dataset$grades, p = 0.8, list = FALSE)
training <- dataset[trainIndex, ]
test <- dataset[-trainIndex, ]
mod <- lm(grades ~ ., data=training)
pred_base <- predict(mod, test)
ScoreQuadraticWeightedKappa(round2(pred_base), test$grades, 2, 12)
exactAgreement(round2(pred_base), test$grades)
adjacentAgreement(round2(pred_base), test$grades)
# Testear Seleccion de Features
library(MASS)
step <- stepAIC(mod, direction="both")
processed <- step$model
mod2 <- train(grades ~ ., data=processed, method="rf", trControl=trainControl(method="cv",number=5),
prox=TRUE, allowParallel=TRUE)
pred_base <- predict(mod2, test[, names(processed)])
ScoreQuadraticWeightedKappa(round2(pred_base), test$grades, 2, 12)
exactAgreement(round2(pred_base), test$grades)
adjacentAgreement(round2(pred_base), test$grades)
|
461c88b1f133046d6cb16bee9eb04d84e65b8a71 | 963472d385805d4f266da785c9d9e798de2e99d3 | /Rossmann/Kaggle_Rossmann_2015_10_11.R | 5c6beadbdd2d48e119a598c2bd757d56bb3bc23c | [] | no_license | hanakysela/Kaggle | e4bf030dbdf9a9614a9ca00c36897bb755e37fb1 | 234c995703d76bfc6a2af348163495872a0caeb6 | refs/heads/master | 2021-01-18T05:02:41.688457 | 2016-04-22T14:29:08 | 2016-04-22T14:29:08 | 44,120,317 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,586 | r | Kaggle_Rossmann_2015_10_11.R | ### Kaggle Rossmann completition 2015_10_11 ###
#### Intro and packages ####
setwd("C:/Users/Hana/R/Kaggle_Rossmann")
## delete everything old
rm(list=ls())
#install.packages("ggplot2")
library("ggplot2")
#### Import and Clean Up ####
## import train and test data ##
train<-read.csv("train.csv")
test<-read.csv("test.csv")
store<-read.csv("store.csv")
str(train)
str(test)
## see what they are - change numbers to factors
train$Store<-as.factor(train$Store)
train$DayOfWeek<-as.factor(train$DayOfWeek)
train$Open<-as.factor(train$Open)
train$Promo<-as.factor(train$Promo)
train$SchoolHoliday<-as.factor(train$SchoolHoliday)
test$Store<-as.factor(test$Store)
test$DayOfWeek<-as.factor(test$DayOfWeek)
test$Open<-as.factor(test$Open)
test$Promo<-as.factor(test$Promo)
test$SchoolHoliday<-as.factor(test$SchoolHoliday)
## manage date format (adds year and month column)
train$Date<-as.Date(as.character(train$Date))
train$year<-as.factor(format(train$Date, "%Y"))
train$month<-as.factor(format(train$Date, "%m"))
test$Date<-as.Date(as.character(test$Date))
test$year<-as.factor(format(test$Date, "%Y"))
test$month<-as.factor(format(test$Date, "%m"))
## add store info to either train and test
train<-merge(train, store)
test<-merge(test, store)
# looking at only stores that were open in the train set
train <- train[ which(train$Open== "1"),]
#### Are some data missing? ####
summary(train$StateHoliday) #OK
summary(train$DayOfWeek) #OK
summary(train$Open) #OK
summary(test$Open) #11 NA's
summary(train$year) #OK |
6507b5778b3f10061750be842deb4c0dac5354e3 | 7ad9ae835d29ea5325775ed56a5ff4898820dafb | /R/weightSign.R | f6fc52ef2af63c99a2d8e965387672e901b7839b | [] | no_license | mronkko/matrixpls | 829528a76ee2c81ddcdd93aa3c9a39e8174cef1e | 7ae8b9c6e7b20d2503d452b328e12496278f5a70 | refs/heads/master | 2022-09-04T13:39:52.636494 | 2022-08-05T07:54:50 | 2022-08-05T07:54:50 | 7,702,950 | 6 | 8 | null | 2021-04-29T04:57:34 | 2013-01-19T12:31:41 | R | UTF-8 | R | false | false | 1,715 | r | weightSign.R | # =========== Weight sign corrections ===========
#'Sign ambiguity corrections
#'
#'Sign ambiguity corrections adjust the signs of the weights to satisfy a criterion.
#'
#'Instead of fixing a weight to a particular value, composite variables are typically provided a
#'scale by standardization. This leads to sign indeterminacy because standardized weights \code{W}
#'and \code{-W} both satisfy the scaling constraint. The sing ambiguity corrections add additional
#'constraints that make
#'
#'The sign indeterminacy
#'corrections should not be confused with sign chance corrections applied to boostrap samples
#'(See \code{\link{signChange}}).
#'
#'@inheritParams matrixpls-common
#'@return \code{W} after sign correction.
#'
#'@name weightSign
#'
#'@references
#'Wold, H. (1985). Partial Least Squares. In S. Kotz & N. L. Johnson (Eds.), Encyclopedia of
#'statistical sciences (Vol. 6, pp. 581–591). New York: Wiley.
#'@seealso
#'\code{\link{matrixpls}};
#
NULL
#'@describeIn weightSign Adjust the signs of W so that the majority of the indicators are positively
#'correlated with the composite as proposed by Wold (1985).
#'
#'@export
weightSign.Wold1985 <- function(W,S){
# Calculate the covariance matrix between indicators and composites
IC <- W %*% S
signSums <- rowSums(sign(IC * (W!=0)))
sweep(W, 1,ifelse(signSums<0, -1, 1),"*")
}
#'@describeIn weightSign Adjust the signs of W so that the first indicator of each composite has positive
#'weight.
#'
#'@export
weightSign.dominantIndicator <- function(W,S){
# Calculate the covariance matrix between indicators and composites
signs <- apply(W,1,function(x){
i <- min(which(x!=0))
sign(x[i])
})
sweep(W, 1,signs,"*")
}
|
415f255acb31f1c8eb363e300046af07fa7d7c48 | 08c3bd0ad768c63a08cce512c16a3b1626983aa0 | /figure2.R | 4e8a2a3d91b07a341bf10de23a5689e1662c2939 | [] | no_license | cxhnaklih/ExData_Plotting1 | f7654c7312a10ba27cf6f471ff30f56ca79db1c0 | 36b00906e593519b1d43ebbce3328e91bea7ac0a | refs/heads/master | 2021-01-18T04:55:05.782152 | 2015-05-13T04:11:45 | 2015-05-13T04:11:45 | 35,474,615 | 0 | 0 | null | 2015-05-12T07:52:47 | 2015-05-12T07:52:46 | null | UTF-8 | R | false | false | 689 | r | figure2.R | household_power_consumption <- read.csv("../household_power_consumption.txt", sep=";", na.strings="?", colClasses = c("character","character", rep("numeric",7)))
household_power_consumption$dateTime <-with(household_power_consumption, as.POSIXlt(paste(Date, Time), format="%d/%m/%Y %H:%M:%S", tz='UTC'))
hpcSub <-household_power_consumption[household_power_consumption$dateTime >= as.POSIXlt('2007-02-01', '%Y-%m-%d', tz='UTC') & household_power_consumption$dateTime < as.POSIXlt('2007-02-03', '%Y-%m-%d', tz='UTC'),]
png(file="figure2.png",width=480,height=480)
plot(hpcSub$dateTime, hpcSub$Global_active_power, lty =1, type="l", ylab="Global Active Power (kilowatts)", xlab="")
dev.off() |
6054b8da2b283e3cc945f2dd932443f1612d4278 | fc49e481a679798e5aeaf999b82881b064118bb8 | /SupportVectorMachine.R | 2cc3981e1ab15c17f6bca706ff0726630ab0119c | [] | no_license | FranziskaDee/MachineLearning | 038a2374fe580c1f6da1ac123cd4ca9c49418f6d | 21389c513e6f51eba1b3597fd0d3b28761435827 | refs/heads/main | 2023-09-01T08:19:56.704580 | 2021-10-25T14:06:25 | 2021-10-25T14:06:25 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,210 | r | SupportVectorMachine.R | # Machine Learning with R - Chapter 7
## Second example - OCR data in support vector machine
#install.packages("kernlab")
library(kernlab)
letters <- read.csv("letterdata.csv", stringsAsFactors = TRUE)
str(letters)
letters_train <- letters[1:16000, ]
letters_test <- letters[16001:20000, ]
###Training the model
letter_classifier <- ksvm(letter ~ ., data = letters_train,
kernel = "vanilladot")
letter_classifier
###Test the model
letter_predictions <- predict(letter_classifier, letters_test)
head(letter_predictions)
table(letter_predictions, letters_test$letter)
agreement <- letter_predictions == letters_test$letter
table(agreement)
prop.table(table(agreement))
###Improving model performance
# Using the Gaussian RBF kernel function in the ksvm() -> actually always good to start out with this one
letter_classifier_rbf <- ksvm(letter ~ ., data = letters_train,
kernel = "rbfdot")
letter_predictions_rbf <- predict(letter_classifier_rbf,
letters_test)
agreement_rbf <- letter_predictions_rbf == letters_test$letter
table(agreement_rbf)
prop.table(table(agreement_rbf))
|
142231f388e37c1c644a399ef2d7ae4cee81acfe | 63e2218c1764408c21d4dbf7f31567bfe48ceade | /Inferència I/Inf_pr_04_codi.R | de0b4b7188ce2d578c392f636be9d9a09b1f0326 | [] | no_license | dessipat/Mentoria | b0d9793a5640ff0681dbef1fb83b395df0d48b8e | fef2679362016eb3b1d7a0745964be54e7bf5b28 | refs/heads/main | 2023-05-13T00:29:42.330230 | 2021-06-07T10:16:47 | 2021-06-07T10:16:47 | 368,145,572 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,950 | r | Inf_pr_04_codi.R |
# Ex 1
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
dades <- read.csv(file = 'HDI-Data.csv')
head(dades)
summary(dades)
dades$HDI_group <- factor(dades$HDI_group,
levels = c(1,2,3,4),
labels = c("Low", "Medium","High", "Very high"))
dades$GDI_group <- factor(dades$GDI_group,
levels = c(1,2,3,4,5),
labels = c("Very high", "High", "Medium","Low", "Very low"))
plot(dades$HDI_group, main="Human Development \n Index Group",
col="lightblue", ylim=c(0,50)); grid()
plot(dades$GDI_group, main="Gender Development \n Index Group",
col="lightblue", ylim=c(0,50)); grid()
hist(dades$MYS, main="Mean Years of Schooling", col="lightblue"); grid()
hist(dades$SWSP, main="Share of Women Sits in Parliament", col="lightblue"); grid()
hist(dades$MMR, main="Maternal Mortality Rate", col="lightblue"); grid()
hist(dades$ABR, main="Adolescent Birth Rate", col="lightblue"); grid()
hist(dades$GNI, main="National Income", col="lightblue"); grid()
# Ex 2
plot(dades$MYS, dades$GNI)
cor(dades$GNI, dades$MYS)
(ct <- cor.test(dades$GNI, dades$MYS))
ct$p.value
# Ex 3
x <- c(29.8, 30.1, 30.5, 30.6, 31.3, 31.7, 32.6, 33.1, 32.7, 32.8)
y <- c(327, 456, 509, 497, 596, 573, 661, 741, 809, 717)
cor.test(x, y)
# Ex 4
mod1<-lm(GNI~MYS, data=dades)
(smod1 <- summary(mod1))
smod1$r.squared
residus <- dades$GNI - mod1$fitted.values
mean(residus)
sum(residus)
sum(residus * dades$MYS)
plot(dades$MYS, dades$GNI, ylim = c(-10000, max(dades$GNI))); abline(mod1, col=2)
plot(dades$MYS, residus); abline(h=0, col=2)
predict(mod1, newdata = data.frame(MYS=9), interval = "prediction", level = 0.95)
cor.test(dades$GNI, dades$ABR)
mod2<-lm(GNI~ABR, data=dades)
(smod2 <- summary(mod2))
mod2<-lm(GNI~ABR, data=dades)
(smod2 <- summary(mod2))
mean(mod2$residuals)
sum(mod2$residuals)
sum(mod2$residuals * dades$ABR)
|
d4b110fb5b27bd3cf1da5747df5499ed8d1444b5 | 5c9a4259b32b72505f914b52777372771adfe6e8 | /scripts/env/8.14.18.MercuryInfo.R | 3e947561f45475210449140f46a8518ab11850ea | [] | no_license | lianwguo/FUIteam | b82d8b140d147d065ed8076f8911faebbaf4fef4 | 7e9d15a9a27fdfec8d79f8f8bf823c5ef92db800 | refs/heads/master | 2021-06-09T05:57:55.341333 | 2021-04-13T20:35:41 | 2021-04-13T20:35:41 | 142,158,910 | 0 | 0 | null | 2018-07-24T13:04:49 | 2018-07-24T12:59:30 | null | UTF-8 | R | false | false | 1,901 | r | 8.14.18.MercuryInfo.R | #looking at average mercury content
head(CloseMerc306)
str(CloseMerc306)
unique(CloseMerc306$Common.Name)
#aggregate mercury results and fish weight by common name and year
agg306 <- aggregate(cbind(Mercury.Results, Average.Fish.Weight..grams.)~Common.Name, data=CloseMerc306, mean)
agg306
write.csv(agg306, "~/FUIteam/PydioData/env/data_outputs/aggHg306.csv")
aggYr306 <- aggregate(cbind(Mercury.Results, Average.Fish.Weight..grams.)~Common.Name + CollectYear, data=CloseMerc306, mean)
aggYr306
write.csv(aggYr306, "~/FUIteam/PydioData/env/data_outputs/aggHgYr306.csv")
agg222 <- aggregate(cbind(Mercury.Results, Average.Fish.Weight..grams.)~Common.Name, data=CloseMerc222, mean)
agg222
write.csv(agg222, "~/FUIteam/PydioData/env/data_outputs/aggHg222.csv")
aggYr222 <- aggregate(cbind(Mercury.Results, Average.Fish.Weight..grams.)~Common.Name + CollectYear, data=CloseMerc222, mean)
aggYr222
write.csv(aggYr222, "~/FUIteam/PydioData/env/data_outputs/aggHgYr222.csv")
ggplot(data = CloseMerc306, aes(x = Average.Fish.Length..cm., y = Mercury.Results, color = Common.Name)) +
geom_point() +
geom_smooth(method="glm",
method.args=list(family=gaussian(link="log")), se = FALSE) +
labs(x="Total Length (cm)", y="Methylmercury content (ppm)") + #labels
font("xylab", size = 14, face = "bold") +
font("axis.text", size = 14) +
font("legend.title", size = 14, face = "bold") +
font("legend.text", size = 10)
ggplot(data = CloseMerc222, aes(x = Average.Fish.Length..cm., y = Mercury.Results, color = Common.Name)) +
geom_point() +
geom_smooth(method="glm",
method.args=list(family=gaussian(link="log")), se = FALSE) +
labs(x="Total Length (cm)", y="Methylmercury content (ppm)") + #labels
font("xylab", size = 14, face = "bold") +
font("axis.text", size = 14) +
font("legend.title", size = 14, face = "bold") +
font("legend.text", size = 10)
|
808690586204222114b56bb1170881ed8e7c956c | 14e3be6cbfddcc912a6705500f301d4e2159be7a | /run_analysis.R | 2e463923bac1ba1b0d686a896c4c55a2c41a75ae | [] | no_license | StreetsefeR/GettingCleaningDataCourseProject | 513fb159647e493a81787b0ab92e79b134b27a81 | 2eac8bf73103dbd821dc32976ee981f029b0a703 | refs/heads/master | 2020-06-04T03:43:39.534065 | 2015-03-22T15:13:39 | 2015-03-22T15:13:39 | 32,680,369 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,971 | r | run_analysis.R | # GettingCleaningDataCourseProject
# 1. Merges the training and the test sets to create one data set.
temp1 <- read.table("UCI HAR Dataset/train/X_train.txt")
temp2 <- read.table("UCI HAR Dataset/test/X_test.txt")
X <- rbind(temp1, temp2)
temp1 <- read.table("UCI HAR Dataset/train/subject_train.txt")
temp2 <- read.table("UCI HAR Dataset/test/subject_test.txt")
Subj <- rbind(temp1, temp2)
temp1 <- read.table("UCI HAR Dataset/train/y_train.txt")
temp2 <- read.table("UCI HAR Dataset/test/y_test.txt")
Y <- rbind(temp1, temp2)
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
features <- read.table("UCI HAR Dataset/features.txt")
good_featur_ind <- grep("-mean\\(\\)|-std\\(\\)", features[, 2])
X <- X[, good_featur_ind]
names(X) <- features[good_featur_ind, 2]
names(X) <- gsub("\\(|\\)", "", names(X))
# 3. Uses descriptive activity names to name the activities in the data set.
activities <- read.table("UCI HAR Dataset/activity_labels.txt")
activities[, 2] = gsub("_", "", tolower(as.character(activities[, 2])))
Y[,1] = activities[Y[,1], 2]
names(Y) <- "activity"
# 4. Appropriately labels the data set with descriptive variable names.
names(Subj) <- "subject"
tidy_data <- cbind(Subj, Y, X)
# 5. Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
uniqueSubj = unique(Subj)[,1]
qtySubj = length(unique(Subj)[,1])
qtyActiv = length(activities[,1])
qtyCols = dim(tidy_data)[2]
tidy2_data = tidy_data[1:(qtySubj*qtyActiv), ]
curRow = 1
for (i in 1:qtySubj) {
for (j in 1:qtyActiv) {
tidy2_data[curRow , 1] = uniqueSubj[i]
tidy2_data[curRow , 2] = activities[j, 2]
temp <- tidy_data[tidy_data$subject==i & tidy_data$activity==activities[j, 2], ]
tidy2_data [curRow , 3:qtyCols] <- colMeans(temp[, 3:qtyCols])
curRow = curRow +1
}
}
write.table(tidy2_data, "second_tidy_data_set.txt", row.name=FALSE)
|
8a399abdbf346af621c64d3e519db84b8c439524 | 4249ddaf7daa8381627889ba2c03208e53783566 | /r-package/prep_data/read_quilombo_area.R | c8bedcb1d3a6e62c8b169d9809da3b53c4f8ea34 | [] | no_license | ipeaGIT/geobr | 04d16b346398485c39788dca202b15ac65099c2a | 9d7e89efc88871378711086671a11b47940dad47 | refs/heads/master | 2023-09-01T19:13:11.811800 | 2023-08-31T01:49:56 | 2023-08-31T01:49:56 | 177,215,782 | 711 | 133 | null | 2023-09-14T12:14:05 | 2019-03-22T22:15:22 | R | UTF-8 | R | false | false | 1,377 | r | read_quilombo_area.R | #' Download spatial data of quilombo areas in Brazil
#'
#' @description
#' The data set covers the whole of Brazil and it includes quilombo areas. The original data
#' comes from the National Institute of Colonization and Agrarian Reform (INCRA) and can be found at
#' \url{ttps://certificacao.incra.gov.br/csv_shp/export_shp.py}.
#'
#'
#' @param simplified Logic `FALSE` or `TRUE`, indicating whether the function
#' returns the data set with original' resolution or a data set with 'simplified'
#' borders. Defaults to `TRUE`. For spatial analysis and statistics users should
#' set `simplified = FALSE`. Borders have been simplified by removing vertices of
#' borders using `sf::st_simplify()` preserving topology with a `dTolerance` of 100.
#' @param showProgress Logical. Defaults to `TRUE` display progress bar
#'
#' @return An `"sf" "data.frame"` object
#'
#' @export
#' @examples \dontrun{ if (interactive()) {
#' # Read all quilombo areas
#' i <- read_quilombo_area()
#' }}
read_quilombo_area <- function(simplified=TRUE, showProgress=TRUE){
# Get metadata with data url addresses
temp_meta <- select_metadata(geography="quilombo_area", simplified=simplified)
# list paths of files to download
file_url <- as.character(temp_meta$download_path)
# download files
temp_sf <- download_gpkg(file_url, progress_bar = showProgress)
return(temp_sf)
}
|
316ac62460e5d3c9cd2864ddb181f04d8337ee1e | 4ed306474b02cf5c074a8e8dcb35b98018671fe0 | /munging_results_cor.R | 717f13bb9067e11fe8a2cf6cb40709572151cd62 | [] | no_license | Karagul/ahp-scenarios | e999712e34f3c7f03a38255bb8a942e3d0b80f9d | cb2141c6e132630fb9c6fe44b36d43d1f4140a45 | refs/heads/master | 2020-06-17T20:09:30.690031 | 2018-11-06T18:30:23 | 2018-11-06T18:30:23 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,655 | r | munging_results_cor.R | # concatenate individual results files and save as one big file
library(tidyverse)
# result files from experiment
res_files <- list.files("output", pattern = "cor")
# names of the models used in the experiment
models <- c("allst", "sc1", "sc2", "sc3", "sc4")
# parameter combinations for experiment 1 & 2
pars <- expand.grid(Ni = c(9), J = c(10), Ns = 10, pNs_poscor = 1, poscor = c(0.01),
pcat = c(2), stddevbase = 0.01, stdevrange = c(0.04),
pNs_poscor_w = c(1), poscor_w = c(0.33, 0.66, 0.99), n_futures_used = c(1, 3, 5, 10))
pars <- pars %>% filter(!(pNs_poscor != 1 & poscor == 0.01)) %>% filter(!(pNs_poscor_w != 1 & poscor_w == 0.01))
pars <- pars %>% arrange(Ni, J)
#save(pars, file = "output/simstudy_pars.RData")
#load("output/simstudy_pars.RData")
# each results file contains results for N sim runs at all par combos, so
# repeat the parameters data frame N times
pars$id <- 1:nrow(pars)
mypars12 <- bind_rows(replicate(50, pars, simplify = FALSE)) %>% arrange(id) %>% select(-id)
# parameter combinations for experiment 3 & 4
# save.image(file = "output/AHPSP_simulation_results_wcor3/4.RData")
pars <- expand.grid(Ni = c(9), J = c(10), Ns = 10, pNs_poscor = 1, poscor = c(0.33, 0.66, 0.99),
pcat = c(2), stddevbase = 0.01, stdevrange = c(0.04),
pNs_poscor_w = c(1), poscor_w = c(0.01, 0.33, 0.66, 0.99), n_futures_used = c(1, 3, 5, 10))
pars <- pars %>% filter(!(pNs_poscor != 1 & poscor == 0.01)) %>% filter(!(pNs_poscor_w != 1 & poscor_w == 0.01))
pars <- pars %>% arrange(Ni, J)
# each results file contains results for N sim runs at all par combos, so
# repeat the parameters data frame N times
pars$id <- 1:nrow(pars)
mypars34 <- bind_rows(replicate(50, pars, simplify = FALSE)) %>% arrange(id) %>% select(-id)
# concatenate all the results files, and the parameter values that generated them
# first file (sets up the structure, colnames etc)
i <- 1
x <- load(paste0("output/",res_files[i]))
rm(pars) # x contains a pars df, remove to avoid confusion with above!
res_out <- data.frame(t(res_out), row.names = NULL)
colnames(res_out) <- c(paste0("Rtb_", models), paste0("Rmb_", models), paste0("cor_", models))
res_out[,11:15] <- 1 - res_out[,11:15] # make correls into 1 - correls, make range similar to other outcomes
res_cor <- cbind.data.frame(mypars12, res_out)
i <- 2
x <- load(paste0("output/",res_files[i]))
rm(pars) # x contains a pars df, remove to avoid confusion with above!
res_out <- data.frame(t(res_out), row.names = NULL)
colnames(res_out) <- c(paste0("Rtb_", models), paste0("Rmb_", models), paste0("cor_", models))
res_out[,11:15] <- 1 - res_out[,11:15] # make correls into 1 - correls, make range similar to other outcomes
res_cor <- rbind.data.frame(res_cor, cbind.data.frame(mypars12, res_out))
# rest of the files
for(i in 3:4){
x <- load(paste0("output/",res_files[i]))
rm(pars) # x contains a pars df, remove to avoid confusion with above!
res_out <- data.frame(t(res_out), row.names = NULL)
colnames(res_out) <- c(paste0("Rtb_", models), paste0("Rmb_", models), paste0("cor_", models))
res_out[,11:15] <- 1 - res_out[,11:15] # make correls into 1 - correls, make range similar to other outcomes
res_cor <- rbind.data.frame(res_cor, cbind.data.frame(mypars34, res_out))
}
# reshape res_all to tidy format
res_cor <- res_cor %>% gather(out_mod, value, 12:26)
# split out_mod variable into separate outcome and model variables
res_cor <- res_cor %>% separate(out_mod, sep = "_", c("outcome", "model"))
# save all simulation runs
save(res_cor, file = "output/AHPSP_simulation_corresults.rdata")
|
9d2777f94aeac988c27ac5afa8a660df1ad9c18f | 7917fc0a7108a994bf39359385fb5728d189c182 | /cran/paws.end.user.computing/man/workdocs_describe_document_versions.Rd | e4ed7b430bc6117c4f518b01376152b1bec90f9e | [
"Apache-2.0"
] | permissive | TWarczak/paws | b59300a5c41e374542a80aba223f84e1e2538bec | e70532e3e245286452e97e3286b5decce5c4eb90 | refs/heads/main | 2023-07-06T21:51:31.572720 | 2021-08-06T02:08:53 | 2021-08-06T02:08:53 | 396,131,582 | 1 | 0 | NOASSERTION | 2021-08-14T21:11:04 | 2021-08-14T21:11:04 | null | UTF-8 | R | false | true | 2,018 | rd | workdocs_describe_document_versions.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/workdocs_operations.R
\name{workdocs_describe_document_versions}
\alias{workdocs_describe_document_versions}
\title{Retrieves the document versions for the specified document}
\usage{
workdocs_describe_document_versions(AuthenticationToken, DocumentId,
Marker, Limit, Include, Fields)
}
\arguments{
\item{AuthenticationToken}{Amazon WorkDocs authentication token. Not required when using AWS
administrator credentials to access the API.}
\item{DocumentId}{[required] The ID of the document.}
\item{Marker}{The marker for the next set of results. (You received this marker from a
previous call.)}
\item{Limit}{The maximum number of versions to return with this call.}
\item{Include}{A comma-separated list of values. Specify "INITIALIZED" to include
incomplete versions.}
\item{Fields}{Specify "SOURCE" to include initialized versions and a URL for the
source document.}
}
\value{
A list with the following syntax:\preformatted{list(
DocumentVersions = list(
list(
Id = "string",
Name = "string",
ContentType = "string",
Size = 123,
Signature = "string",
Status = "INITIALIZED"|"ACTIVE",
CreatedTimestamp = as.POSIXct(
"2015-01-01"
),
ModifiedTimestamp = as.POSIXct(
"2015-01-01"
),
ContentCreatedTimestamp = as.POSIXct(
"2015-01-01"
),
ContentModifiedTimestamp = as.POSIXct(
"2015-01-01"
),
CreatorId = "string",
Thumbnail = list(
"string"
),
Source = list(
"string"
)
)
),
Marker = "string"
)
}
}
\description{
Retrieves the document versions for the specified document.
By default, only active versions are returned.
}
\section{Request syntax}{
\preformatted{svc$describe_document_versions(
AuthenticationToken = "string",
DocumentId = "string",
Marker = "string",
Limit = 123,
Include = "string",
Fields = "string"
)
}
}
\keyword{internal}
|
0ea459260dbc6f21ff319a70db8e22c92d4ec46e | df21fb8b2efe7e9c7b753699dba066b0500aae55 | /rstudio/CustomerChurnDashboard/clientPanel.R | e188e4eb4b0d3bc4982f36b5bf2995421a228336 | [] | no_license | hunglb/Telco-churn | e92d91d0098c01cb1e55386148d16c9a3987cab7 | 26a2cfc448f849edb913f0877ad4cabc7d3bfa6b | refs/heads/master | 2022-07-08T19:49:46.895388 | 2022-06-30T06:15:23 | 2022-06-30T06:15:23 | 204,580,701 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 12,704 | r | clientPanel.R | # Sample Materials, provided under license.
# Licensed Materials - Property of IBM
# © Copyright IBM Corp. 2019. All Rights Reserved.
# US Government Users Restricted Rights - Use, duplication or disclosure restricted by GSA ADP Schedule Contract with IBM Corp.
# Load shap JavaScript
shapjs <- content(GET("https://github.com/slundberg/shap/raw/0849aa20551cf9825f9e294fcc29d7fbe7b9f932/shap/plots/resources/bundle.js"))
clientPanel <- function() {
tabPanel(
"Client View",
value = "clientPanel",
h1(textOutput("customerName")),
br(),
panel(
br(),br(),
fluidRow(
column(3,
div(id = "customerImage")),
column(9,
br(),
tableOutput("customerTable")
)
),
br(),br(),
fluidRow(
column(3,
div(h3("Financial Profile:"), style="text-align:center;")
),
column(9, tableOutput("customerFinancesTable"))
),
br()
),
panel(
h3("Account Details"),
br(),
tableOutput("customerAccountsTable"),
br(),
tableOutput("customerAccountSummaryTable")
),
panel(
h3("Churn Prediction "),
br(),
# Load shap JS
tags$script(HTML(shapjs)),
# Listen for responseInserted messages
tags$script("
Shiny.addCustomMessageHandler('responseInserted', function(elem) {
var checkExist = setInterval(function() {
if ($('#'+elem.id).length) {
if (window.SHAP)
SHAP.ReactDom.render(
SHAP.React.createElement(SHAP.AdditiveForceVisualizer, elem.data),
document.getElementById(elem.id)
);
clearInterval(checkExist);
}
}, 100); // check every 100ms
});
"),
tags$div(
id = "authPanel",
column(4,
panel(
h4("Connect to ICP for Data API"),
textInput("hostname", "ICP4D Hostname"),
textInput("username", "ICP4D Username"),
passwordInput("password", "ICP4D Password"),
actionButton("authBtn", "Authenticate API", class = "btn-primary btn-lg btn-block", style = "max-width:300px", disabled = TRUE),
tags$head(tags$style("#authError{color:red;}")),
verbatimTextOutput("authError")
),
style = "max-width:360px;"
)
),
hidden(
tags$div(
id = "deploymentPanel",
column(4,
panel(
tags$h4("Model Scoring Pipeline Deployment"),
pickerInput(
inputId = 'deploymentSelector',
label = 'Deployment Name:',
choices = list(),
options = pickerOptions(width = "auto", style = "btn-primary")
),
tags$p(
tags$strong("Scoring URL: "),
textOutput(outputId = "scoring_url", inline = TRUE),
style = "word-wrap: break-word"
),
tags$p(
tags$strong("Project Release: "),
textOutput(outputId = "release_name", inline = TRUE)
),
tags$p(
tags$strong("Script Name: "),
textOutput(outputId = "script_name", inline = TRUE)
),
tags$p(
tags$strong("Engine: "),
textOutput(outputId = "runtime", inline = TRUE)
))
),
tags$div(id = "scoreBtnSection",
column(4,
br(),
actionButton(
"scoreBtn",
"Predict Churn",
class = "btn-primary btn-lg btn-block",
disabled = TRUE
),
br(),
h4("Input JSON:"),
verbatimTextOutput("pipelineInput"),
br(),
tags$head(tags$style("#scoringError{color:red;}")),
verbatimTextOutput("scoringError"))
),
column(8,
hidden(
tags$div(id = "scoringResponse")
)
)
)
)
)
)
}
# Reactive server variables store (pervades across all sessions)
serverVariables = reactiveValues(deployments = list())
clientServer <- function(input, output, session, sessionVars) {
observe({
client <- clients[[toString(sessionVars$selectedClientId)]]
# Update client name & image
output$customerName <- renderText(client$name)
removeUI(selector = "#customerImage > *")
insertUI(
selector = "#customerImage",
where = "beforeEnd",
ui = img(src = paste0("profiles/",client$image), style = "display: block;margin-left: auto;margin-right: auto;", width=150, height=150)
)
# Load customer data for customer sessionVars$selectedClientId
selection <- customer[customer$CUSTOMER_ID == sessionVars$selectedClientId,][1,]
# Table displays for Customer View
output$customerTable <- renderTable({
demoDeets <- selection[,c("CUSTOMER_ID", "AGE_RANGE", "MARITAL_STATUS", "FAMILY_SIZE", "PROFESSION", "EDUCATION_LEVEL")]
demoDeets[["CUSTOMER_ID"]] <- as.integer(demoDeets[["CUSTOMER_ID"]])
demoDeets[["FAMILY_SIZE"]] <- as.integer(demoDeets[["FAMILY_SIZE"]])
demoDeets[["ADDRESS"]] <- paste(selection[,"ADDRESS_HOME_CITY"], selection[,"ADDRESS_HOME_STATE"], sep = ', ')
demoDeets[,c("CUSTOMER_ID", "AGE_RANGE", "ADDRESS", "MARITAL_STATUS", "FAMILY_SIZE", "PROFESSION", "EDUCATION_LEVEL")]
}, bordered = TRUE, align = 'l')
output$customerFinancesTable <- renderTable({
finDeets <- selection[,c("ANNUAL_INCOME", "HOME_OWNER_INDICATOR", "MONTHLY_HOUSING_COST", "CREDIT_SCORE", "CREDIT_AUTHORITY_LEVEL")]
finDeets[["ANNUAL_INCOME"]] <- dollar(finDeets[["ANNUAL_INCOME"]])
finDeets[["MONTHLY_HOUSING_COST"]] <- dollar(finDeets[["MONTHLY_HOUSING_COST"]])
finDeets
}, bordered = TRUE, align = 'l')
customerAccounts <- account[account$PRIMARY_CUSTOMER_ID == sessionVars$selectedClientId,][1,]
output$customerAccountsTable <- renderTable(
customerAccounts[,c("ACCOUNT_ID", "ACCOUNT_TYPE", "PRODUCT_ID", "BASE_CURRENCY", "INVESTMENT_OBJECTIVE", "LIFE_CYCLE_STATUS", "RISK_TOLERANCE", "TAX_ADVANTAGE_INDICATOR")],
bordered = TRUE, align = 'l')
customerAccountSummaries <- account_summary[account_summary$ACCOUNT_ID %in% customerAccounts$ACCOUNT_ID,][1,]
output$customerAccountSummaryTable <- renderTable({
accountDeets <- customerAccountSummaries[,c("CLOSING_BALANCE", "AMOUNT_OF_DEPOSITS", "AMOUNT_OF_INTEREST_EARNED", "NUMBER_OF_BUY_TRADES", "AMOUNT_OF_BUY_TRADES", "AMOUNT_OF_MARKET_CHANGE")] %>%
mutate_at(vars(contains("AMOUNT_")), dollar) %>%
mutate_at(vars(contains("_BALANCE")), dollar) %>%
mutate_at(vars(contains("NUMBER_")), as.integer)
accountDeets
}, bordered = TRUE, align = 'l')
# Reset scoring
removeUI(selector = "#scoringResponse > *", multiple = TRUE)
shinyjs::hide(id = "scoringResponse")
shinyjs::show(id = "scoreBtnSection")
output$scoringError <- renderText('')
sessionVars$pipelineInput <- list(dataset_name = 'customer_history.csv', cust_id = sessionVars$selectedClientId, sc_end_date = '2018-09-30')
output$pipelineInput <- renderText(toJSON(sessionVars$pipelineInput, indent = 2))
})
# Set default hostname for ICP4D API
observeEvent(session$clientData$url_hostname, {
updateTextInput(session, "hostname", value = session$clientData$url_hostname)
})
# Enable buttons when inputs are provided
observe({
toggleState("authBtn", nchar(input$hostname) > 0 && nchar(input$username) > 0 && nchar(input$password) > 0)
toggleState("scoreBtn", nchar(input$endpoint) > 0 && nchar(input$token) > 0 && length(input$allCustomers_rows_selected) > 0)
})
# Handle ICP4D API authentication button
observeEvent(input$authBtn, {
shinyjs::disable("authBtn")
tryCatch({
serverVariables$deployments <- collectDeployments(input$hostname, input$username, input$password, "Churn_Scoring_Pipeline.py")
}, warning = function(w) {
output$authError <- renderText(w$message)
}, error = function(e) {
output$authError <- renderText(e$message)
})
shinyjs::enable("authBtn")
})
observe({
if(length(serverVariables$deployments) > 0) {
updateSelectInput(session, "deploymentSelector", choices = names(serverVariables$deployments))
shinyjs::hide(id = "authPanel")
shinyjs::show(id = "deploymentPanel")
}
})
# Handle model deployment dropdown switching
observeEvent(input$deploymentSelector, {
selectedDeployment <- serverVariables$deployments[[input$deploymentSelector]]
output$release_name <- renderText(selectedDeployment$release_name)
output$scoring_url <- renderText(selectedDeployment$scoring_url)
output$script_name <- renderText(selectedDeployment$deployment_asset_name)
output$runtime <- renderText(selectedDeployment$runtime_definition_name)
toggleState("scoreBtn", nchar(selectedDeployment$deployment_url) > 0 && nchar(selectedDeployment$deployment_token) > 0)
})
# Handle model deployment scoring button
observeEvent(input$scoreBtn, {
shinyjs::disable("scoreBtn")
selectedDeployment <- serverVariables$deployments[[input$deploymentSelector]]
payload <- sessionVars$pipelineInput
payload$to_drop <- colsToDrop
response <- scoreModelDeployment(selectedDeployment$scoring_url, selectedDeployment$deployment_token, payload)
if(length(response$error) > 0) {
output$scoringError <- renderText(toString(response$error))
}
else if(length(response$result) > 0) {
shinyjs::hide(id = "scoreBtnSection")
shinyjs::show(id = "scoringResponse")
result <- response$result
insertUI(
selector = "#scoringResponse",
where = "beforeEnd",
ui = panel(
h3("Customer Churn Prediction:"),
p(
ggiraphOutput("probPlot", width = "600px", height = "300px")
),
h4("Highest Impact Features: "),
br(),
p(
tableOutput("explainTable")
),
br(),
h4("Explanation Plot: "),
HTML(result$explain_plot_html)
)
)
session$sendCustomMessage('responseInserted',
list(
id=result$explain_plot_elem_id,
data=fromJSON(result$explain_plot_data))
)
# render high impact features table
vertical <- t(data.frame(result$explain))
Impact <- vertical[order(vertical, decreasing = TRUE),]
df <- data.frame(Impact)
dispTable <- tibble::rownames_to_column(df, "Feature")
output$explainTable <- renderTable(dispTable, bordered = TRUE)
# generate probability pie
probDF <- data.frame(result$probabilities)
colnames(probDF) <- "Probability"
row.names(probDF) <- c("FALSE", "TRUE")
probDF <- tibble::rownames_to_column(probDF, "Prediction")
probDF <- probDF %>%
mutate(percentage = paste0(round(100 * Probability, 1), "%")) %>%
mutate(hover_text = paste0(Prediction, ": ", percentage))
probPlot <- ggplot(probDF, aes(y = Probability, fill = Prediction)) +
geom_bar_interactive(
aes(x = 1, tooltip = hover_text),
width = 0.4,
stat = "identity",
show.legend = TRUE
) +
annotate("text", x = 0, y = 0, size = 12,
label = probDF[["percentage"]][probDF[["Prediction"]] == "TRUE"]
) +
coord_polar(theta = "y") +
theme_void() +
theme(legend.title=element_text(size=22),
legend.text=element_text(size=16)) +
guides(fill = guide_legend(reverse=TRUE))
output$probPlot <- renderggiraph(ggiraph(ggobj = probPlot, width_svg=6, height_svg=4))
} else {
output$scoringError <- renderText(response)
}
shinyjs::enable("scoreBtn")
})
}
|
27da6a4b42c5a8023f180f8f4caabfe21ace964e | 7b842e47b36c5eccaee6b71c77e22519b49c0168 | /R/variables-webdata.R | 2cc94734296d932efd24ec21bf8cf87d9acab00f | [] | no_license | cran/geoknife | 5dc92ca0aa7e7afe2afac3fd848e3b7fc99c07c4 | e6dba004a958f5954317cfcd7faaec1d8d094ae9 | refs/heads/master | 2023-07-20T09:34:10.506446 | 2023-07-06T07:00:12 | 2023-07-06T07:00:12 | 48,080,629 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,072 | r | variables-webdata.R | #' the variables of a webdata object
#'
#' access or set the variables of a webdata object
#'
#' @param .Object a \code{\link{webdata}} object
#' @param value a character vector for variables
#' @docType methods
#' @usage
#' variables(.Object)
#' variables(.Object) <- value
#' @rdname variables-webdata
#' @aliases
#' variables<-
#' variables
#' @export
setGeneric(name="variables",def=function(.Object){standardGeneric("variables")})
#'@rdname variables-webdata
#'@docType methods
#'@export
setGeneric(name="variables<-",def=function(.Object, value){standardGeneric("variables<-")})
#'@rdname variables-webdata
#'@export
setMethod(f = "variables",signature(.Object = "webdata"),
definition = function(.Object){
return(.Object@variables)
})
#'@rdname variables-webdata
#'@export
setMethod(f = "variables<-",signature(.Object= "webdata", value = "ANY"),
definition = function(.Object, value){
return(initialize(.Object, variables = as.character(value)))
})
|
9a70a1297a6faab09b024254c163333d8dd2537d | 0a906cf8b1b7da2aea87de958e3662870df49727 | /borrowr/inst/testfiles/matchesToCor/libFuzzer_matchesToCor/matchesToCor_valgrind_files/1609958224-test.R | f46ab3875416685366c9d6fd8748fd706298c81d | [] | no_license | akhikolla/updated-only-Issues | a85c887f0e1aae8a8dc358717d55b21678d04660 | 7d74489dfc7ddfec3955ae7891f15e920cad2e0c | refs/heads/master | 2023-04-13T08:22:15.699449 | 2021-04-21T16:25:35 | 2021-04-21T16:25:35 | 360,232,775 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 215 | r | 1609958224-test.R | testlist <- list(x = structure(c(6.15188497603355e-304, 7.00767890330898e-310, 3.23469718988723e-319, 7.29023541236781e-304, 0, 0), .Dim = c(1L, 6L)))
result <- do.call(borrowr:::matchesToCor,testlist)
str(result) |
142b8e3b0ab052390ff961d54c5ca4b72c520b6f | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/assocInd/examples/CECI.Rd.R | 4deb88e589c0b210ee973b7f387b74fa7612a80d | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 351 | r | CECI.Rd.R | library(assocInd)
### Name: CECI
### Title: The Combined Error Corrected Index
### Aliases: CECI
### ** Examples
# Simulated values
x <- ya <- yb <- yab <- 10
ynull <- 0
# Set parameters to make equivalent to the SRI
psi <- 1.0
w <- 0
E <- 0
# Calculate the group location error corrected index
CECI(x,ya,yb,yab,ynull,w,psi,E)
|
8201510e02a2469cd288614853413e2b5c4bc17f | bd2ce17ca2fe9b630ef755b91639178bfe11f45c | /examples/beast.R | cd7c43a58d257c12cfe1f4b66f796ed47643198f | [] | no_license | DomBennett/om..beast | 96866656a5c4de1c4a5ef79a3423b31bb2e8356c | 586d605eaca51578720145af5f996b08f22b42fd | refs/heads/master | 2020-04-22T17:15:04.640758 | 2019-10-25T13:06:59 | 2019-10-25T13:06:59 | 170,534,357 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 96 | r | beast.R | library(outsider)
beast <- module_import('beast', repo = 'dombennett/om..beast')
beast('-help')
|
fb04bd4e172bff60913521f8154b04ad50db389d | 44e47ad78f8c4588a4b0dc5813a79eda6fa04a24 | /src/ESWA_2-clusteringValidation.R | 6f7c8ad220d4a58478748d728f1189b945bfa090 | [
"MIT"
] | permissive | vrodriguezf/ESWA-2017 | 895ec5e66e1592b9bef667e3a040e8b02846a5c0 | 9778cf54724b6c55f68dfe77bbfc206aab769730 | refs/heads/master | 2021-06-18T05:13:37.510292 | 2017-06-30T16:34:41 | 2017-06-30T16:34:41 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,582 | r | ESWA_2-clusteringValidation.R | #
# This scripts take the object "simSeries", i.e, the processed performance measures from the simulations in DWR,
# and perform time series clustering for every performance measure.
#
#
library('ProjectTemplate')
load.project()
SAVE_PLOT <- FALSE
SAVE_OUTPUT<- TRUE
#Check dependencies with previos source script files (metricsTimeSeries)
if (!exists("simSeries")) {
source("./src/ESWA/ESWA_1-TimeSeriesMeasures.R")
}
#TS clustering (dissimilarity measures, clustering methods, internal validation measures)
TS <- list(
score=lapply(simSeries,function (simTS) simTS$scoreTS),
agility=lapply(simSeries,function (simTS) simTS$agilityTS),
attention=lapply(simSeries,function (simTS) simTS$attentionTS),
cooperation=lapply(simSeries,function (simTS) simTS$cooperationTS),
aggressiveness=lapply(simSeries,function (simTS) simTS$aggressivenessTS),
precision=lapply(simSeries,function (simTS) simTS$precisionTS)
)
#Dissimilarity methods to use
dissMethods <- list(c(name="FRECHET",command="FRECHET",params=NULL),
c(name="DTWARP",command="DTWARP",params=NULL))
#c(name="NCD",command="NCD",params=NULL))
#Clustering methods to use
clustMethods <- list(c(name="PAM",command=pam,params=NULL),
c(name="hclust",command=hclust,params=NULL))
#c(name="diana",command=diana,params=NULL))
#Number of clusters for each time series clustering
K <- 2:8
validationResults <- list()
for (i in 1:length(TS)) {
dataTS <- TS[[i]]
clResults <- lapply(dissMethods, function (dissMethod) {
print(paste("Applying dissimilarity measure: ",dissMethod["name"]))
dissMatrix <- diss(SERIES = dataTS, METHOD = dissMethod["command"])
dissMatrix <- as.dist(dissMatrix)
list(
method = dissMethod["name"],
data = dissMatrix,
results = lapply(clustMethods, function (clustMethod) {
print(paste("\tApplying clustering meethod: ",clustMethod["name"]))
list(
clustMethod=clustMethod$name,
results = lapply(K, function (k) {
if (clustMethod$name == "PAM")
clustering <- pam(x=dissMatrix,k=k,diss=TRUE)$clustering
else if (clustMethod$name == "hclust")
clustering <- cutree(hclust(d = dissMatrix, method="complete"), k=k)
else if (clustMethod$name == "diana")
clustering <- diana(x = dissMatrix) %>% as.hclust() %>% cutree(k=k)
else
stop(paste("Clustering Method [",clustMethod$name,"] not recognized"))
list(
k=k,
clust=clustering,
stats=cluster.stats(d = dissMatrix,clustering = clustering, G2 = TRUE, G3 = TRUE, wgap = TRUE,sepindex = TRUE,silhouette = TRUE)
)
})
)
})
)
})
summary <- do.call(rbind, lapply(clResults, function (resultByDiss) {
clustMethodDf <- do.call(rbind,lapply(resultByDiss$results, function (resultByClustMethod) {
kValDf <- do.call(rbind,lapply(resultByClustMethod$results, function (resultByK) {
c(
#dunn=resultByK$stats$dunn2,
clusteringResult=resultByK$clust,
ASW=resultByK$stats$avg.silwidth,
CH=resultByK$stats$ch,
PH=resultByK$stats$pearsongamma
#sindex=resultByK$stats$sindex,
#widestgap=resultByK$stats$stats$widestgap
)
}))
rownames(kValDf) <- K
kValDf <- melt(as.matrix(kValDf),varnames = c("k","valMetric"))
kValDf <- cbind(clustMethod = resultByClustMethod$clustMethod, kValDf)
kValDf
}))
clustMethodDf <- cbind(dissMeasure=resultByDiss$method,clustMethodDf)
clustMethodDf
}))
#Cast the data frame
summary <- dcast(summary, dissMeasure + clustMethod + k ~ valMetric)
validationResults[[i]] <- summary
}
names(validationResults) <- names(TS)
##
#
# Extract the best clusterizations
#
##
TSClustering <- list()
for (i in 1:length(TS)) {
aux <- validationResults[[i]] %>%
transform(valRating = ASW/max(ASW) + CH/max(CH) + PH/max(PH)) %>%
filter(valRating == max(valRating)) %>%
head(n=1)
dissMethodIndex <- which((laply(dissMethods, function (dm) dm["name"])) == aux$dissMeasure)
clustMethodIndex <- which((laply(clustMethods, function (cm) cm["name"])) == aux$clustMethod)
kIndex <- which((laply(K, function (k) k == aux$k)))
TSClustering[[i]] <- list(
stats = as.list(aux),
clustering = clResults[[dissMethodIndex]]$results[[clustMethodIndex]]$results[[kIndex]]$clust
)
}
names(TSClustering) <- names(TS)
#Summary validation results
summaryOfBestResults <- data.frame(lapply(TSClustering, function (tsClust) unlist(tsClust$stats)))
#rownames(summaryOfBestResults) <- names(TS)
#Write validation resuts in csvs
l_ply(names(validationResults), function (tsName) {
write.csv(validationResults[[tsName]], file = paste("./output/ESWA/",tsName,".csv"),row.names = FALSE)
})
write.csv(summaryOfBestResults,file = "./output/ESWA/summaryOfBestResults.csv",row.names = TRUE)
# Save final clusterization
saveRDS(TSClustering, file = "./output/ESWA/performanceMeasuresClusterization.Rds") #Pass to script 3
saveRDS(TSClustering, file = "./output/ESWA/methodologyPMClusterization.Rds") #Special save
saveRDS(validationResults, file="output/ESWA/validationResults.Rds")
saveRDS(clResults, file="output/ESWA/clResults.Rds")
# Final
print("############################### Script successfully executed :) ##################################")
|
3985234a5ef399593ace89db829e06716b8f7590 | 29585dff702209dd446c0ab52ceea046c58e384e | /IntClust/R/WeightedClust.R | df18a4882f0292a6055c757698eb4f87b95674fe | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,441 | r | WeightedClust.R | WeightedClust <- function(List,type=c("data","dist","clusters"),distmeasure=c("tanimoto","tanimoto"),normalize=FALSE,method=NULL,weight=seq(1,0,-0.1),WeightClust=0.5,clust="agnes",linkage="ward",alpha=0.625,StopRange=FALSE){ # weight = weight to data1
#Step 1: compute distance matrices:
type<-match.arg(type)
CheckDist<-function(Dist,StopRange){
if(StopRange==FALSE & !(0<=min(Dist) & max(Dist)<=1)){
message("It was detected that a distance matrix had values not between zero and one. Range Normalization was performed to secure this. Put StopRange=TRUE if this was not necessary")
Dist=Normalization(Dist,method="Range")
}
else{
Dist=Dist
}
}
if(type=="data"){
OrderNames=rownames(List[[1]])
for(i in 1:length(List)){
List[[i]]=List[[i]][OrderNames,]
}
Dist=lapply(seq(length(List)),function(i) Distance(List[[i]],distmeasure[i],normalize,method))
Dist=lapply(seq(length(Dist)),function(i) CheckDist(Dist[[i]],StopRange))
}
else if(type=="dist"){
OrderNames=rownames(List[[1]])
for(i in 1:length(List)){
List[[i]]=List[[i]][OrderNames,OrderNames]
}
Dist=List
Dist=lapply(seq(length(Dist)),function(i) CheckDist(Dist[[i]],StopRange))
}
else{
Dist=lapply(seq(length(List)),function(i) return(List[[i]]$Dist))
Dist=lapply(seq(length(Dist)),function(i) CheckDist(Dist[[i]],StopRange))
OrderNames=rownames(DistM[[1]])
for(i in 1:length(DistM)){
DistM[[i]]=DistM[[i]][OrderNames,OrderNames]
}
}
#Step 2: Weighted linear combination of the distance matrices:
if(is.null(weight)){
equalweights=1/length(List)
weight=list(rep(equalweights,length(List)))
}
else if(class(weight)=='list' & length(weight[[1]])!=length(List)){
stop("Give a weight for each data matrix or specify a sequence of weights")
}
if(class(weight)!="list"){
condition<-function(l){
l=as.numeric(l)
if( sum(l)==1 ){ #working with characters since with the numeric values of comb or permutations something goes not the way is should: 0.999999999<0.7+0.3<1??
#return(row.match(l,t1))
return(l)
}
else(return(0))
}
if(all(seq(1,0,-0.1)!=weight)){
for(i in 1:length(weight)){
rest=1-weight[i]
if(!(rest%in%weight)){
weight=c(weight,rest)
}
}
}
t1=gtools::permutations(n=length(weight),r=length(List),v=as.character(weight),repeats.allowed = TRUE)
t2=lapply(seq_len(nrow(t1)), function(i) if(sum(as.numeric(t1[i,]))==1) return(as.numeric(t1[i,])) else return(0)) #make this faster: lapply on a list or adapt permutations function itself: first perform combinations under restriction then perform permutations
t3=sapply(seq(length(t2)),function(i) if(!all(t2[[i]]==0)) return (i) else return(0))
t4=t2[which(t3!=0)]
weight=lapply(seq(length(t4)),function(i) rev(t4[[i]]))
}
if(class(weight)=="list" & "x" %in% weight[[1]]){ #x indicates a free weight
newweight=list()
for(k in 1:length(weight)){
w=weight[[k]]
weightsfordata=which(w!="x") #position of the provided weight = position of the data to which the weight is given
givenweights=as.numeric(w[weightsfordata])
stilltodistribute=1-sum(givenweights)
newweights=seq(stilltodistribute,0,-0.1)
t1=gtools::permutations(n=length(newweights),r=length(List)-length(weightsfordata),v=as.character(newweights),repeats.allowed = TRUE)
Input1=as.list(seq_len(nrow(t1)))
Input2=lapply(seq(length(Input1)),function(i) {Input1[[i]][length(Input1[[i]])+1]=stilltodistribute
return(Input1[[i]])})
t2=lapply(seq(length(Input2)), FUN=function(i){if(sum(as.numeric(t1[Input2[[i]][1],])+0.00000000000000002775)==Input2[[i]][2]) return(as.numeric(t1[i,])) else return(0)}) #make this faster: lapply on a list or adapt permutations function itself: first perform combinations under restriction then perform permutations
t3=sapply(seq(length(t2)),function(i) if(!all(t2[[i]]==0)) return (i) else return(0))
weightsforotherdata=t2[which(t3!=0)]
new=list()
for(i in 1:length(weightsforotherdata)){
w1=weightsforotherdata[[i]]
new[[i]]=rep(0,length(List))
new[[i]][weightsfordata]=givenweights
new[[i]][which(new[[i]]==0)]=w1
}
newweight[k]=new
}
weight=newweight
}
weightedcomb<-function(w,Dist){
temp=lapply(seq_len(length(Dist)),function(i) w[i]*Dist[[i]])
temp=Reduce("+",temp)
return(temp)
}
DistClust=NULL
Clust=NULL
DistM=lapply(seq(length(weight)),function(i) weightedcomb(weight[[i]],Dist=Dist))
namesweights=c()
WeightedClust=lapply(seq(length(weight)),function(i) cluster::agnes(DistM[[i]],diss=TRUE,method=linkage,par.method=alpha))
for(i in 1:length(WeightedClust)){
namesweights=c(namesweights,paste("Weight",weight[i],sep=" "))
if(all(weight[[i]]==WeightClust)){
Clust=WeightedClust[[i]]
DistClust=DistM[[i]]
}
}
if(is.null(DistClust)){
DistClust=weightedcomb(WeightClust,Dist=Dist)
Temp=cluster::agnes(DistClust,diss=TRUE,method=linkage,par.method=alpha)
Clust=Temp
}
Results=lapply(seq(1,length(WeightedClust)),function(i) return(c("DistM"=DistM[i],"Clust"=WeightedClust[i])))
names(Results)=namesweights
# return list with objects
out=list(Dist=Dist,Results=Results,Clust=list("DistM"=DistClust,"Clust"=Clust))
attr(out,'method')<-'Weighted'
return(out)
}
|
d39f2db8d842f9aef42c190ce4b635c6ab7ffe11 | acd55e98c04534f8100d6878c7baa863b818b8f1 | /man/text_to_upper.Rd | 9b08440e4bc37cf50d215ee1d28930ba4ced0427 | [] | no_license | cran/stringb | 28f1912ec4caad26560b51cf9bd2ff5e5657ea23 | ee0aa7e4a4fc594b03749bf9dd327dca863e3378 | refs/heads/master | 2021-07-25T10:18:26.950969 | 2021-01-25T21:10:02 | 2021-01-25T21:10:02 | 72,453,861 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 422 | rd | text_to_upper.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/text_to_lower.R
\name{text_to_upper}
\alias{text_to_upper}
\alias{text_to_upper.default}
\title{function for make text lower case}
\usage{
text_to_upper(x)
\method{text_to_upper}{default}(x)
}
\arguments{
\item{x}{text to be processed}
}
\description{
function for make text lower case
default method for text_to_upper()
}
|
ebcbe3c88f20bb38b7bf7b31b0ca0dabcb96fc78 | 6477bb5c57a8af47a2f025634a195910d25ebf39 | /Old/Class Projects/PS7/PS7/PS7-Problem 1.R | d18c888e91ebc5d83d8be2b60e9afabf922da88d | [
"MIT"
] | permissive | clintonTE/CCA | b011683673818fd9887d2767dd49049a14efa012 | a555cc1fa4b6d5f1464de44e2e322d32336d1e3a | refs/heads/master | 2023-05-11T20:43:35.457854 | 2021-06-03T23:00:23 | 2021-06-03T23:00:23 | 373,653,707 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,772 | r | PS7-Problem 1.R | set.seed(11); #allows for reproducibility
#The likelihood function for a multinomial distribution
#In: A set of parameters and a data set
#Out: The sum of the log likelihoods for each data point
logitLogLike = function(V, cMat) {
minLike = -10 ^ 9;
maxLike = 10 ^ 9;
denom = matrix(apply(V, 2, function(x) log(sum(exp(x)))), nrow = 1);
#calculate the denominator for each price
#calculate the log probability for each value of v
logPVec = t(apply(V, 1, function(x) x - denom));
#The below command looks up the logged probability for each entry in cMat and sums the total
return(sum(sapply(1:ncol(V), function(x) sum(logPVec[cMat[, x], x]))));
}
#A wrapper function for logitLike amenable to the setup in problem 6
# Note that a null alternative is added. The null alternative is assumed to be J=1.
#In: Alpha, Beta, and the data set
#Out: The sum of the log likelihoods for each data point
multinomChoiceModelLike = function(alpha, beta, lPrice, cMat) {
#form the matrix of Vs
return(logitLogLike(apply(lPrice, 2, function(x) alpha + beta * x), cMat));
}
#A gradient function specific to problem 1
# Note that a null alternative is added. The null alternative is assumed to be J=1.
#In: Alpha, Beta, price, and the data set. Alpha[0] and price[0] are assumed to be zero.
#Out: The gradient of the likelihood
multinomChoiceModelGrad = function(alpha, beta, lPrice, cMat) {
L = nrow(cMat);
W = ncol(cMat);
J = length(alpha);
eta = matrix(sapply(1:J,
function(x) apply(cMat, 2, function(y) sum(y == x))), nrow = J, ncol = W, byrow = TRUE);
#calculate the denominator for each price series
denom = apply(lPrice, 2, function(x) sum(exp(alpha + x * beta)));
#the below return calculates and sums the gradients accross prices
return(rbind(matrix(sapply(2:J,
function(x) sum(eta[x,] - L * exp(alpha[x] + beta * lPrice[x,]) / denom)), ncol = 1),
sum(sapply(1:J,
function(x) sum(lPrice[x,] * (eta[x,] - L * exp(alpha[x] + beta * lPrice[x,]) / denom))))));
}
##########################Problem 1 Entry Point##########################
#Define the Parameters
numSamples = 50;
numPriceVectrs = 20;
alpha = matrix(NA, 2);
alpha[1] = 0;
alpha[2] = 0;
beta = 500;
lowerBound = 0.1;
upperBound = 5000;
maxIter = 100;
numPrices = length(alpha);
#get the prices
logPrices = rbind(matrix(rep(0, numPriceVectrs), nrow = 1),
matrix(runif(numPriceVectrs * (numPrices - 1), -1, 1), nrow = numPrices - 1));
#Simulate the data
unifPts = 0
iter=0
while ((unifPts != numSamples * numPriceVectrs) && (iter<maxIter)) {
popPVec = exp(rep(alpha, numPriceVectrs) + logPrices * beta);
popPVec = apply(popPVec, 2, function(x) x / sum(x));
sampMat = matrix(sapply(1:numPriceVectrs,
function(x) sample.int(numPrices, numSamples, replace = TRUE, popPVec[, x])),
nrow = numSamples, ncol = numPriceVectrs);
unifPts = sum(sapply(1:numPriceVectrs, function(x) sum(sampMat[1, x] == sampMat[, x])));
iter = iter + 1;
}
#print(sampMat);
if (iter == maxIter) print("Warning: Conformed data set not generated");
#Get the maximum likelihood of the parameters
likeFunc = function(b)
- multinomChoiceModelLike(alpha, b, logPrices, sampMat);
gradFunc = function(b)
-multinomChoiceModelGrad(alpha, b, logPrices, sampMat)[numPrices];
gMin = 100;
gMax = 1000;
xVec = gMin:gMax;
yVec = sapply(gMin:gMax, likeFunc)
plot(xVec, yVec, main = "Problem 1 Plot", xlab = "beta", ylab = "likelyhood");
out = optim(700, likeFunc, gradFunc,
method = "L-BFGS-B",
lower = c(rep(lowerBound, numPrices)),
upper = c(rep(upperBound, numPrices)));
print(out);
#multinomChoiceModelLike(out$par[1:numPrices], out$par[numPrices + 1], logPrices, sampVec)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.