content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/leaflet_map.R
\name{eq_create_label}
\alias{eq_create_label}
\title{Creates a label for leaflet map}
\usage{
eq_create_label(data)
}
\arguments{
\item{data}{A data frame containing cleaned NOAA earthquake data}
}
\value{
A character vector with labels
}
\description{
This function creates a label for the \code{leaflet} map based on location
name, magnitude and casualties from NOAA earthquake data
}
\details{
The input \code{data.frame} needs to include columns LOCATION_NAME,
EQ_PRIMARY and TOTAL_DEATHS with the earthquake location, magintude and
total casualties respectively.
}
\examples{
\dontrun{
eq_create_label(data)
}
}
| /man/eq_create_label.Rd | permissive | JimMeister/capstoneJH | R | false | true | 711 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/leaflet_map.R
\name{eq_create_label}
\alias{eq_create_label}
\title{Creates a label for leaflet map}
\usage{
eq_create_label(data)
}
\arguments{
\item{data}{A data frame containing cleaned NOAA earthquake data}
}
\value{
A character vector with labels
}
\description{
This function creates a label for the \code{leaflet} map based on location
name, magnitude and casualties from NOAA earthquake data
}
\details{
The input \code{data.frame} needs to include columns LOCATION_NAME,
EQ_PRIMARY and TOTAL_DEATHS with the earthquake location, magintude and
total casualties respectively.
}
\examples{
\dontrun{
eq_create_label(data)
}
}
|
testlist <- list(A = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536108214e+146, 2.44105655436418e-308, 0, 0, 0), .Dim = c(1L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) | /multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613111913-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 257 | r | testlist <- list(A = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536108214e+146, 2.44105655436418e-308, 0, 0, 0), .Dim = c(1L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) |
library(swirl)
install_from_swirl("Exploratory Data Analysis")
swirl()
head(pollution)
dim(pollution)
summary(pollution$pm25)
quantile(ppm)
boxplot(ppm,col="blue")
quantile(ppm)
0% 25% 50% 75% 100%
3.382626 8.548799 10.046697 11.356012 18.440731
abline(h=12)
abline(h=18)
hist(ppm,col="green")
rug(ppm)
hist(ppm,col="green",breaks = 100)
abline(v = 12, lwd=2)
abline(v = median(ppm),col="magenta", lwd=4)
names(pollution)
reg <- table(pollution$region)
barplot(reg,col="wheat",main="Number of Counties in Each Region")
boxplot(pm25~region,data=pollution,col="red")
par(mfrow=c(2,1),mar=c(4,4,2,1))
east <- subset(pollution,region=="east")
hist(subset(pollution,region=="west")$pm25, col = "green")
with(pollution,plot(latitude,pm25))
abline(h=12, lwd=2,lty=2)
plot(pollution$latitude,pollution$pm25, col=pollution$region)
plot(pollution$latitude, ppm, col = pollution$region)
abline(h=12, lwd=2,lty=2)
par(mfrow = c(1, 2), mar = c(5, 4, 2, 1))
west <- subset(pollution,region=="west")
plot(west$latitude,west$pm25,main="West")
plot(east$latitude,east$pm25,main="East")
with(faithful, plot(eruptions, waiting))
title(main = "Old Faithful Geyser data")
dev.cur()
pdf(file="myplot.pdf")
with(faithful, plot(eruptions, waiting))
title(main = "Old Faithful Geyser data")
dev.cur()
dev.off()
dev.copy(png,"geyserplot.png")
getwd()
head(cars)
with(cars,plot(speed,dist))
text(mean(cars$speed),max(cars$dist),"SWIRL rules!")
head(state)
table(state$region)
xyplot(Life.Exp ~ Income | region, data = state, layout = c(4, 1))
xyplot(Life.Exp ~ Income | region, data = state, layout = c(2, 2))
head(mpg)
dim(mpg)
table(mpg$model)
qplot(displ,hwy,data=mpg)
head(airquality)
range(airquality$Ozone,na.rm = TRUE)
hist(airquality$Ozone)
table(airquality$Month)
boxplot(Ozone~Month,airquality,xlab="Month",ylab="Ozone (ppb)",col.axis="blue",col.lab="Red")
boxplot(Ozone~Month, airquality, xlab="Month", ylab="Ozone (ppb)",col.axis="blue",col.lab="red")
title("Ozone and Wind in New York City")
with(airquality,plot(Wind,Ozone))
title(main="Ozone and Wind in New York City")
length(par())
names(par())
par()$pin
par("fg")
par()$bg
par("pch")
par("lty")
plot(airquality$Wind, type="n",airquality$Ozone)
title(main="Wind and Ozone in NYC")
may <- subset(airquality, Month==5)
points(may$Wind,may$Ozone,col="blue",pch=17)
notmay <-subset(airquality, Month!=5)
points(notmay$Wind,notmay$Ozone,col="red",pch=8)
legend("topright",pch=c(17,8),col=c("blue","red"),legend=c("May","Other Months"))
abline(v=median(airquality$Wind),lty=2,lwd=2)
par(mfrow=c(1,2))
plot(airquality$Wind, airquality$Ozone, main = "Ozone and Wind")
plot(airquality$Ozone, airquality$Solar.R, main = "Ozone and Solar Radiation")
par(mfrow = c(1, 3), mar = c(4, 4, 2, 1), oma = c(0, 0, 2, 0))
plot(airquality$Wind, airquality$Ozone, main = "Ozone and Wind")
plot(airquality$Solar.R, airquality$Ozone, main = "Ozone and Solar Radiation")
plot(airquality$Temp, airquality$Ozone, main = "Ozone and Temperature")
mtext("Ozone and Weather in New York City", outer = TRUE)
| /Practical1_ExploratoryDataAnalysis.R | no_license | itforankit/datasciencecoursera | R | false | false | 3,071 | r | library(swirl)
install_from_swirl("Exploratory Data Analysis")
swirl()
head(pollution)
dim(pollution)
summary(pollution$pm25)
quantile(ppm)
boxplot(ppm,col="blue")
quantile(ppm)
0% 25% 50% 75% 100%
3.382626 8.548799 10.046697 11.356012 18.440731
abline(h=12)
abline(h=18)
hist(ppm,col="green")
rug(ppm)
hist(ppm,col="green",breaks = 100)
abline(v = 12, lwd=2)
abline(v = median(ppm),col="magenta", lwd=4)
names(pollution)
reg <- table(pollution$region)
barplot(reg,col="wheat",main="Number of Counties in Each Region")
boxplot(pm25~region,data=pollution,col="red")
par(mfrow=c(2,1),mar=c(4,4,2,1))
east <- subset(pollution,region=="east")
hist(subset(pollution,region=="west")$pm25, col = "green")
with(pollution,plot(latitude,pm25))
abline(h=12, lwd=2,lty=2)
plot(pollution$latitude,pollution$pm25, col=pollution$region)
plot(pollution$latitude, ppm, col = pollution$region)
abline(h=12, lwd=2,lty=2)
par(mfrow = c(1, 2), mar = c(5, 4, 2, 1))
west <- subset(pollution,region=="west")
plot(west$latitude,west$pm25,main="West")
plot(east$latitude,east$pm25,main="East")
with(faithful, plot(eruptions, waiting))
title(main = "Old Faithful Geyser data")
dev.cur()
pdf(file="myplot.pdf")
with(faithful, plot(eruptions, waiting))
title(main = "Old Faithful Geyser data")
dev.cur()
dev.off()
dev.copy(png,"geyserplot.png")
getwd()
head(cars)
with(cars,plot(speed,dist))
text(mean(cars$speed),max(cars$dist),"SWIRL rules!")
head(state)
table(state$region)
xyplot(Life.Exp ~ Income | region, data = state, layout = c(4, 1))
xyplot(Life.Exp ~ Income | region, data = state, layout = c(2, 2))
head(mpg)
dim(mpg)
table(mpg$model)
qplot(displ,hwy,data=mpg)
head(airquality)
range(airquality$Ozone,na.rm = TRUE)
hist(airquality$Ozone)
table(airquality$Month)
boxplot(Ozone~Month,airquality,xlab="Month",ylab="Ozone (ppb)",col.axis="blue",col.lab="Red")
boxplot(Ozone~Month, airquality, xlab="Month", ylab="Ozone (ppb)",col.axis="blue",col.lab="red")
title("Ozone and Wind in New York City")
with(airquality,plot(Wind,Ozone))
title(main="Ozone and Wind in New York City")
length(par())
names(par())
par()$pin
par("fg")
par()$bg
par("pch")
par("lty")
plot(airquality$Wind, type="n",airquality$Ozone)
title(main="Wind and Ozone in NYC")
may <- subset(airquality, Month==5)
points(may$Wind,may$Ozone,col="blue",pch=17)
notmay <-subset(airquality, Month!=5)
points(notmay$Wind,notmay$Ozone,col="red",pch=8)
legend("topright",pch=c(17,8),col=c("blue","red"),legend=c("May","Other Months"))
abline(v=median(airquality$Wind),lty=2,lwd=2)
par(mfrow=c(1,2))
plot(airquality$Wind, airquality$Ozone, main = "Ozone and Wind")
plot(airquality$Ozone, airquality$Solar.R, main = "Ozone and Solar Radiation")
par(mfrow = c(1, 3), mar = c(4, 4, 2, 1), oma = c(0, 0, 2, 0))
plot(airquality$Wind, airquality$Ozone, main = "Ozone and Wind")
plot(airquality$Solar.R, airquality$Ozone, main = "Ozone and Solar Radiation")
plot(airquality$Temp, airquality$Ozone, main = "Ozone and Temperature")
mtext("Ozone and Weather in New York City", outer = TRUE)
|
#' Random Forest Cross Validation Function
#'
#' This function uses Random Forest Cross Validation to predict the output of
#' a target variable and calculate MSE.
#'
#' @param k numeric input of the number of folds.
#' @keywords prediction
#'
#' @return numeric output of the CV MSE.
#'
#' @examples
#' my_rf_cv(k = 5)
#' my_rf_cv(k = 2)
#'
#' @import class magrittr gapminder stats dplyr
#' @importFrom randomForest randomForest
#' @export
my_rf_cv <- function(k) {
my_gapminder <- my_gapminder
n <- nrow(my_gapminder)
# selects folds randomly and splits data
folds <- sample(rep(1:k, length = n))
data <- data.frame(my_gapminder, "split" = folds)
mse <- rep(NA, k)
for(i in 1:k) {
# X_i, training data
data_train <- data %>% dplyr::filter(split != i)
# X_i^*, testing data
data_test <- data %>% dplyr::filter(split == i)
# remove split columns
data_train$split <- NULL
data_test$split <- NULL
# predicts the outcomes of lifeExp
my_model <- randomForest(lifeExp ~ gdpPercap, data = data_train,
ntree = 100)
# predicts Sepal.length of the testing data
my_pred <- predict(my_model, data_test[, -4])
# calculates the average squared difference
mse[i] <- mean((my_pred - data_test[, 4])^2)
}
return(mean(mse))
}
| /R/my_rf_cv.R | no_license | alishaluo/STAT302package | R | false | false | 1,310 | r | #' Random Forest Cross Validation Function
#'
#' This function uses Random Forest Cross Validation to predict the output of
#' a target variable and calculate MSE.
#'
#' @param k numeric input of the number of folds.
#' @keywords prediction
#'
#' @return numeric output of the CV MSE.
#'
#' @examples
#' my_rf_cv(k = 5)
#' my_rf_cv(k = 2)
#'
#' @import class magrittr gapminder stats dplyr
#' @importFrom randomForest randomForest
#' @export
my_rf_cv <- function(k) {
my_gapminder <- my_gapminder
n <- nrow(my_gapminder)
# selects folds randomly and splits data
folds <- sample(rep(1:k, length = n))
data <- data.frame(my_gapminder, "split" = folds)
mse <- rep(NA, k)
for(i in 1:k) {
# X_i, training data
data_train <- data %>% dplyr::filter(split != i)
# X_i^*, testing data
data_test <- data %>% dplyr::filter(split == i)
# remove split columns
data_train$split <- NULL
data_test$split <- NULL
# predicts the outcomes of lifeExp
my_model <- randomForest(lifeExp ~ gdpPercap, data = data_train,
ntree = 100)
# predicts Sepal.length of the testing data
my_pred <- predict(my_model, data_test[, -4])
# calculates the average squared difference
mse[i] <- mean((my_pred - data_test[, 4])^2)
}
return(mean(mse))
}
|
CreateDirIfAbsent<-function(path){
#check if a directory exist , if not then create it
res<-dir.exists(path)
if(!res){
dir.create(file.path(path),recursive=TRUE)
}
}
canonicalizeACNames<-function(name){
#convert to upper case after trimming
t<-toupper(trimws(name))
#replace spaces with empty strings
t<-gsub("( )+","",t)
#replace (SC) or (ST) or (BL) with empty string
t<-gsub("(SC)","",t,fixed=TRUE)
t<-gsub("(ST)","",t,fixed=TRUE)
t<-gsub("(BL)","",t,fixed=TRUE)
}
canonicalizePartyNames<-function(name){
#convert to upper case after trimming
t<-toupper(trimws(name))
#replace spaces with empty strings
#t<-gsub("( )+","",t)
}
| /Ashoka_TCPD/Data/AE/scripts/helper.R | no_license | akibmayadav/Data_Visualisation_Projects | R | false | false | 716 | r | CreateDirIfAbsent<-function(path){
#check if a directory exist , if not then create it
res<-dir.exists(path)
if(!res){
dir.create(file.path(path),recursive=TRUE)
}
}
canonicalizeACNames<-function(name){
#convert to upper case after trimming
t<-toupper(trimws(name))
#replace spaces with empty strings
t<-gsub("( )+","",t)
#replace (SC) or (ST) or (BL) with empty string
t<-gsub("(SC)","",t,fixed=TRUE)
t<-gsub("(ST)","",t,fixed=TRUE)
t<-gsub("(BL)","",t,fixed=TRUE)
}
canonicalizePartyNames<-function(name){
#convert to upper case after trimming
t<-toupper(trimws(name))
#replace spaces with empty strings
#t<-gsub("( )+","",t)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spm.impute.R
\name{func1}
\alias{func1}
\title{An internal function to compute m and gamma based on
continuous-time model (Yashin et. al., 2007)}
\usage{
func1(tt, y, a, f1, Q, f, b, theta)
}
\arguments{
\item{tt}{tt - time}
\item{y}{y}
\item{a}{a (see Yashin et. al, 2007)}
\item{f1}{f1 (see Yashin et. al, 2007)}
\item{Q}{Q (see Yashin et. al, 2007)}
\item{f}{f (see Yashin et. al, 2007)}
\item{b}{b (see Yashin et. al, 2007)}
\item{theta}{theta}
}
\value{
list(m, gamma) Next values of m and gamma (see Yashin et. al, 2007)
}
\description{
An internal function to compute m and gamma based on
continuous-time model (Yashin et. al., 2007)
}
| /fuzzedpackages/stpm/man/func1.Rd | no_license | akhikolla/testpackages | R | false | true | 730 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spm.impute.R
\name{func1}
\alias{func1}
\title{An internal function to compute m and gamma based on
continuous-time model (Yashin et. al., 2007)}
\usage{
func1(tt, y, a, f1, Q, f, b, theta)
}
\arguments{
\item{tt}{tt - time}
\item{y}{y}
\item{a}{a (see Yashin et. al, 2007)}
\item{f1}{f1 (see Yashin et. al, 2007)}
\item{Q}{Q (see Yashin et. al, 2007)}
\item{f}{f (see Yashin et. al, 2007)}
\item{b}{b (see Yashin et. al, 2007)}
\item{theta}{theta}
}
\value{
list(m, gamma) Next values of m and gamma (see Yashin et. al, 2007)
}
\description{
An internal function to compute m and gamma based on
continuous-time model (Yashin et. al., 2007)
}
|
#' @title \code{tidy_lm_permute}
#' @description \code{permute} \code{lm} and output the results as a \code{tidy} table.
#' @author Ekarin Eric Pongpipat
#' @param data a data.frame to be analyzed
#' @param formula a formula to be analyzed as typically written for the \code{lm} function
#' @param n_permute = 1000 (default) the number of permutations to perform
#' @param var_permute variable(s) to unlink in the permutation
#'
#' @return outputs \code{tidy} table that includes the p.value from the permutation of a \code{lm} test
#'
#' @examples
#' packages <- c("broom", "broomExtra", "dplyr", "modelr", "purrr", "tibble")
#' xfun::pkg_attach2(packages, message = F)
#'
#' data <- tibble(
#' a = scale(sample.int(100), scale = F),
#' b = scale(sample.int(100), scale = F),
#' c = b^2,
#' d = scale(sample.int(100), scale = F)
#' )
#'
#' tidy_lm_permute(data = data, formula = "a ~ b + c", n_permute = 100, var_permute = "a")
#' @export
tidy_lm_permute <- function(data, formula, n_permute = 1000, var_permute) {
# load packages if not already ----
packages <- c("broom", "dplyr", "modelr", "purrr", "tibble")
xfun::pkg_attach2(packages, message = F)
if (n_permute <= 1) {
stop(paste0("n_permute must be larger than 1"))
} else if (is.null(var_permute)) {
stop(paste0("var_permute must be defined"))
}
lm <- lm(as.formula(formula), data)
lm_tidy <- lm %>% tidy()
df_permute <- permute(df, n_permute, var_permute)
df_lm_permute <- map(df_permute[["perm"]], ~ lm(as.formula(formula), data = .))
df_lm_permute_tidy <- map_df(df_lm_permute, broom::tidy, .id = "id")
for (term_name in unique(df_lm_permute_tidy$term)) {
lm_tidy_name <- lm_tidy %>%
filter(term == term_name)
df_lm_permute_tidy_term <- df_lm_permute_tidy %>%
filter(term == term_name)
sign <- lm_tidy_name$estimate / lm_tidy_name$estimate
if (sign == 1) {
p_permute <- (sum(df_lm_permute_tidy_term$estimate >= lm_tidy_name$estimate) + 1) / n_permute
} else {
p_permute <- (sum(df_lm_permute_tidy_term$estimate <= lm_tidy_name$estimate) + 1) / n_permute
}
permute_table <- tibble(
term = term_name,
p_permuate = p_permute
)
if (term_name == unique(df_lm_permute_tidy$term)[1]) {
permute_table_full <- permute_table
} else {
permute_table_full <- rbind(permute_table_full, permute_table)
}
}
colnames(permute_table_full) <- c("term", paste0("p_permute_", n_permute))
lm_tidy <- full_join(lm_tidy, permute_table_full, by = "term")
return(lm_tidy)
}
| /R/tidy_lm_permute.R | permissive | epongpipat/bootPermBroom | R | false | false | 2,557 | r | #' @title \code{tidy_lm_permute}
#' @description \code{permute} \code{lm} and output the results as a \code{tidy} table.
#' @author Ekarin Eric Pongpipat
#' @param data a data.frame to be analyzed
#' @param formula a formula to be analyzed as typically written for the \code{lm} function
#' @param n_permute = 1000 (default) the number of permutations to perform
#' @param var_permute variable(s) to unlink in the permutation
#'
#' @return outputs \code{tidy} table that includes the p.value from the permutation of a \code{lm} test
#'
#' @examples
#' packages <- c("broom", "broomExtra", "dplyr", "modelr", "purrr", "tibble")
#' xfun::pkg_attach2(packages, message = F)
#'
#' data <- tibble(
#' a = scale(sample.int(100), scale = F),
#' b = scale(sample.int(100), scale = F),
#' c = b^2,
#' d = scale(sample.int(100), scale = F)
#' )
#'
#' tidy_lm_permute(data = data, formula = "a ~ b + c", n_permute = 100, var_permute = "a")
#' @export
tidy_lm_permute <- function(data, formula, n_permute = 1000, var_permute) {
# load packages if not already ----
packages <- c("broom", "dplyr", "modelr", "purrr", "tibble")
xfun::pkg_attach2(packages, message = F)
if (n_permute <= 1) {
stop(paste0("n_permute must be larger than 1"))
} else if (is.null(var_permute)) {
stop(paste0("var_permute must be defined"))
}
lm <- lm(as.formula(formula), data)
lm_tidy <- lm %>% tidy()
df_permute <- permute(df, n_permute, var_permute)
df_lm_permute <- map(df_permute[["perm"]], ~ lm(as.formula(formula), data = .))
df_lm_permute_tidy <- map_df(df_lm_permute, broom::tidy, .id = "id")
for (term_name in unique(df_lm_permute_tidy$term)) {
lm_tidy_name <- lm_tidy %>%
filter(term == term_name)
df_lm_permute_tidy_term <- df_lm_permute_tidy %>%
filter(term == term_name)
sign <- lm_tidy_name$estimate / lm_tidy_name$estimate
if (sign == 1) {
p_permute <- (sum(df_lm_permute_tidy_term$estimate >= lm_tidy_name$estimate) + 1) / n_permute
} else {
p_permute <- (sum(df_lm_permute_tidy_term$estimate <= lm_tidy_name$estimate) + 1) / n_permute
}
permute_table <- tibble(
term = term_name,
p_permuate = p_permute
)
if (term_name == unique(df_lm_permute_tidy$term)[1]) {
permute_table_full <- permute_table
} else {
permute_table_full <- rbind(permute_table_full, permute_table)
}
}
colnames(permute_table_full) <- c("term", paste0("p_permute_", n_permute))
lm_tidy <- full_join(lm_tidy, permute_table_full, by = "term")
return(lm_tidy)
}
|
testlist <- list(n = -1928462336L)
result <- do.call(breakfast:::setBitNumber,testlist)
str(result) | /breakfast/inst/testfiles/setBitNumber/libFuzzer_setBitNumber/setBitNumber_valgrind_files/1609961724-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 99 | r | testlist <- list(n = -1928462336L)
result <- do.call(breakfast:::setBitNumber,testlist)
str(result) |
#' PLCOm2012 risk prediction model for lung cancer
#'
#' @param age a vector of patient's age
#' @param race categorical variable of patient's race or ethnic group (White, Black, Hispanic,
#' Asian, American Indian, Alaskan Native, Native Hawaiian, Pacific Islander)
#' @param education education was measured in six ordinal levels: less than high-school graduate (level 1),
#' high-school graduate (level 2), some training after high school (level 3), some college (level 4),
#' college graduate (level 5), and postgraduate or professional degree (level 6)
#' @param bmi a vector of patient's body mass index, per 1 unit of increase
#' @param copd binary variable of chronic obstructive pulmonary disease (yes as 1 or no as 0)
#' @param cancer_hist binary variable of patient's cancer history (yes as 1 or no as 0)
#' @param family_hist_lung_cancer binary variable of patient's family history of lung cancer (yes as 1 or no as 0)
#' @param smoking_status binary variable of patient's smoking status (current as 1 or former as 0)
#' @param smoking_intensity a vector of the number cigarettes patient smokes per day
#' @param duration_smoking a vector of patient's duration of smoking, per 1-yr increase
#' @param smoking_quit_time a vector of patient's smoking quit time, per 1-yr increase
#'
#' @return prob patient's 6-year probability of lung-cancer
#' @export
#'
#' @examples
#'plcom2012(age=62, race='White', education=4, bmi=27, copd=0, cancer_hist=0,
#'family_hist_lung_cancer=0, smoking_status=0, smoking_intensity=80,
#'duration_smoking=27, smoking_quit_time=10)
plcom2012 <- function(age, race, education, bmi, copd, cancer_hist, family_hist_lung_cancer, smoking_status, smoking_intensity, duration_smoking,
smoking_quit_time) {
race <- tolower(race)
if (race == "white" | race == "american indian" | race == "alaskan native" | race == 1) {
model <- 0.0778868 * (age - 62) - 0.0812744 * (education - 4) - 0.0274194 * (bmi - 27) + 0.3553063 * copd + 0.4589971 * cancer_hist +
0.587185 * family_hist_lung_cancer + 0.2597431 * smoking_status - 1.822606 * ((smoking_intensity/10)^(-1) - 0.4021541613) + 0.0317321 *
(duration_smoking - 27) - 0.0308572 * (smoking_quit_time - 10) - 4.532506
}
if (race == "black" | race == 2) {
model <- 0.0778868 * (age - 62) - 0.0812744 * (education - 4) - 0.0274194 * (bmi - 27) + 0.3553063 * copd + 0.4589971 * cancer_hist +
0.587185 * family_hist_lung_cancer + 0.2597431 * smoking_status - 1.822606 * ((smoking_intensity/10)^(-1) - 0.4021541613) + 0.0317321 *
(duration_smoking - 27) - 0.0308572 * (smoking_quit_time - 10) - 4.532506 + 0.3944778
}
if (race == "hispanic" | race == 3) {
model <- 0.0778868 * (age - 62) - 0.0812744 * (education - 4) - 0.0274194 * (bmi - 27) + 0.3553063 * copd + 0.4589971 * cancer_hist +
0.587185 * family_hist_lung_cancer + 0.2597431 * smoking_status - 1.822606 * ((smoking_intensity/10)^(-1) - 0.4021541613) + 0.0317321 *
(duration_smoking - 27) - 0.0308572 * (smoking_quit_time - 10) - 4.532506 - 0.7434744
}
if (race == "asian" | race == 4) {
model <- 0.0778868 * (age - 62) - 0.0812744 * (education - 4) - 0.0274194 * (bmi - 27) + 0.3553063 * copd + 0.4589971 * cancer_hist +
0.587185 * family_hist_lung_cancer + 0.2597431 * smoking_status - 1.822606 * ((smoking_intensity/10)^(-1) - 0.4021541613) + 0.0317321 *
(duration_smoking - 27) - 0.0308572 * (smoking_quit_time - 10) - 4.532506 - 0.466585
}
if (race == "native hawaiian" | race == "pacific islander" | race == 5) {
model <- 0.0778868 * (age - 62) - 0.0812744 * (education - 4) - 0.0274194 * (bmi - 27) + 0.3553063 * copd + 0.4589971 * cancer_hist +
0.587185 * family_hist_lung_cancer + 0.2597431 * smoking_status - 1.822606 * ((smoking_intensity/10)^(-1) - 0.4021541613) + 0.0317321 *
(duration_smoking - 27) - 0.0308572 * (smoking_quit_time - 10) - 4.532506 + 1.027152
}
prob <- exp(model)/(1 + exp(model))
results <- list()
results$prob <- prob
return(results)
}
| /R/plcom2012.R | no_license | resplab/PLCOm2012 | R | false | false | 4,049 | r | #' PLCOm2012 risk prediction model for lung cancer
#'
#' @param age a vector of patient's age
#' @param race categorical variable of patient's race or ethnic group (White, Black, Hispanic,
#' Asian, American Indian, Alaskan Native, Native Hawaiian, Pacific Islander)
#' @param education education was measured in six ordinal levels: less than high-school graduate (level 1),
#' high-school graduate (level 2), some training after high school (level 3), some college (level 4),
#' college graduate (level 5), and postgraduate or professional degree (level 6)
#' @param bmi a vector of patient's body mass index, per 1 unit of increase
#' @param copd binary variable of chronic obstructive pulmonary disease (yes as 1 or no as 0)
#' @param cancer_hist binary variable of patient's cancer history (yes as 1 or no as 0)
#' @param family_hist_lung_cancer binary variable of patient's family history of lung cancer (yes as 1 or no as 0)
#' @param smoking_status binary variable of patient's smoking status (current as 1 or former as 0)
#' @param smoking_intensity a vector of the number cigarettes patient smokes per day
#' @param duration_smoking a vector of patient's duration of smoking, per 1-yr increase
#' @param smoking_quit_time a vector of patient's smoking quit time, per 1-yr increase
#'
#' @return prob patient's 6-year probability of lung-cancer
#' @export
#'
#' @examples
#'plcom2012(age=62, race='White', education=4, bmi=27, copd=0, cancer_hist=0,
#'family_hist_lung_cancer=0, smoking_status=0, smoking_intensity=80,
#'duration_smoking=27, smoking_quit_time=10)
plcom2012 <- function(age, race, education, bmi, copd, cancer_hist, family_hist_lung_cancer, smoking_status, smoking_intensity, duration_smoking,
smoking_quit_time) {
race <- tolower(race)
if (race == "white" | race == "american indian" | race == "alaskan native" | race == 1) {
model <- 0.0778868 * (age - 62) - 0.0812744 * (education - 4) - 0.0274194 * (bmi - 27) + 0.3553063 * copd + 0.4589971 * cancer_hist +
0.587185 * family_hist_lung_cancer + 0.2597431 * smoking_status - 1.822606 * ((smoking_intensity/10)^(-1) - 0.4021541613) + 0.0317321 *
(duration_smoking - 27) - 0.0308572 * (smoking_quit_time - 10) - 4.532506
}
if (race == "black" | race == 2) {
model <- 0.0778868 * (age - 62) - 0.0812744 * (education - 4) - 0.0274194 * (bmi - 27) + 0.3553063 * copd + 0.4589971 * cancer_hist +
0.587185 * family_hist_lung_cancer + 0.2597431 * smoking_status - 1.822606 * ((smoking_intensity/10)^(-1) - 0.4021541613) + 0.0317321 *
(duration_smoking - 27) - 0.0308572 * (smoking_quit_time - 10) - 4.532506 + 0.3944778
}
if (race == "hispanic" | race == 3) {
model <- 0.0778868 * (age - 62) - 0.0812744 * (education - 4) - 0.0274194 * (bmi - 27) + 0.3553063 * copd + 0.4589971 * cancer_hist +
0.587185 * family_hist_lung_cancer + 0.2597431 * smoking_status - 1.822606 * ((smoking_intensity/10)^(-1) - 0.4021541613) + 0.0317321 *
(duration_smoking - 27) - 0.0308572 * (smoking_quit_time - 10) - 4.532506 - 0.7434744
}
if (race == "asian" | race == 4) {
model <- 0.0778868 * (age - 62) - 0.0812744 * (education - 4) - 0.0274194 * (bmi - 27) + 0.3553063 * copd + 0.4589971 * cancer_hist +
0.587185 * family_hist_lung_cancer + 0.2597431 * smoking_status - 1.822606 * ((smoking_intensity/10)^(-1) - 0.4021541613) + 0.0317321 *
(duration_smoking - 27) - 0.0308572 * (smoking_quit_time - 10) - 4.532506 - 0.466585
}
if (race == "native hawaiian" | race == "pacific islander" | race == 5) {
model <- 0.0778868 * (age - 62) - 0.0812744 * (education - 4) - 0.0274194 * (bmi - 27) + 0.3553063 * copd + 0.4589971 * cancer_hist +
0.587185 * family_hist_lung_cancer + 0.2597431 * smoking_status - 1.822606 * ((smoking_intensity/10)^(-1) - 0.4021541613) + 0.0317321 *
(duration_smoking - 27) - 0.0308572 * (smoking_quit_time - 10) - 4.532506 + 1.027152
}
prob <- exp(model)/(1 + exp(model))
results <- list()
results$prob <- prob
return(results)
}
|
cpa<- function(senso, hedo, coord=c(1,2),center=TRUE,scale=TRUE,nb.clusters=0,scale.unit=FALSE,col=terrain.colors(45)[1:41]) {
colplot<-function(mat, k=0,coord, z, level=41, col = terrain.colors(level+level%/%10)[1:level], xlab="", ylab="") { #heat.colors(level)
abs <- coord[1]
ord <- coord[2]
x <- mat[,abs]
y <- mat[,ord]
z <- mat[,z]
# x1 <- min(z)
# x2 <- max(z)
x1 <- -1
x2 <- 1
plot(mat[,abs],mat[,ord],xlab=xlab, ylab=ylab,asp=1,type="n")
legend("topleft",legend=c(1,0.75,0.5,0.25,0,-0.25,-0.5,-0.75,-1),fill=c(col[level],col[(level%/%2)+(level%/%4)+(level%/%8)+1],col[(level%/%2)+(level%/%4)+1],col[(level%/%2)+(level%/%8)+1],col[(level%/%2)+1],col[(level%/%4)+(level%/%8)+1],col[(level%/%4)+1],col[(level%/%8)+1],col[1]),cex=0.7)
abline(v=0,lty=2)
abline(h=0,lty=2)
####rect(0, levels[-length(levels)], 1, levels[-1], col = col)
n <- nrow(mat)
h <- (x2-x1)/level
for (ind in 1:(n-k)) points(x[ind],y[ind],col=col[max(1,(z[ind]-x1)%/%h)],pch=20)
for (ind in (n-k+1):n) points(x[ind],y[ind],col=col[max(1,(z[ind]-x1)%/%h)],pch=15,cex=1)
for (ind in (n-k+1):n) text(x[ind],y[ind],col=col[max(1,(z[ind]-x1)%/%h)],rownames(mat)[ind],cex=1,pos = 1, offset = 0.05)
}
### Main program
if (max(coord) > (nrow(hedo)-1)) {
print (paste("Problem with coord. Max (coord) must be less than",nrow(hedo)-1," Axes 1-2 will be taken",sep=""))
coord=c(1,2)
}
senso <- scale(senso,center=center,scale=scale)[,]
hedo <- scale(hedo,center=center,scale=scale)[,]
if (scale) senso=senso*sqrt(nrow(senso)/(nrow(senso)-1))
if (scale) hedo=hedo*sqrt(nrow(hedo)/(nrow(hedo)-1))
op <- par(no.readonly = TRUE)
on.exit(par(op))
senso <- data.frame(senso)
hedo <- data.frame(hedo)
nbjuge <- ncol(hedo)
nbdesc <- ncol(senso)
classif <- cluster::agnes(dist(t(hedo)),method="ward")
plot(as.dendrogram(classif),main="Cluster Dendrogram",xlab="Panelists")
if (nb.clusters==0){
classif2 <- as.hclust(classif)
nb.clusters = which.max(rev(diff(classif2$height))) + 1
# classif=hopach(t(MatH),d="euclid",K=10,mss="mean")
# nb.clusters=classif$clustering$k
}
clusters=kmeans(t(hedo),centers=nb.clusters)$cluster
mat <- matrix(0,nb.clusters,nrow(hedo))
dimnames(mat) <- list(1:nb.clusters,rownames(hedo))
for (i in 1:nb.clusters){
mat[i,] <- apply(t(hedo[,clusters==i]),2,mean)
rownames(mat)[i] <- paste("cluster",i)
}
desc.clusters=cor(senso,t(mat),use="pairwise.complete.obs")
A <- rbind.data.frame(t(hedo),mat,t(senso))
colnames(A) <- row.names(hedo)
result <- A
auxil = cbind.data.frame(A,as.factor(c(clusters,rep(1,nrow(mat)+ncol(senso)))))
colnames(auxil)[ncol(A)+1]="cluster"
hedo.pca <- PCA(auxil,quali.sup=ncol(A)+1,ind.sup=(nbjuge+1):nrow(A),scale.unit=scale.unit,graph=FALSE,ncp = min(nbjuge-1,ncol(A)))
print(plot(hedo.pca,choix="ind",axes=coord,cex=0.7,habillage=ncol(A)+1))
print(plot(hedo.pca,choix="var",axes=coord))
TA <- t(A)
coef <- matrix(NA,nbjuge+nb.clusters,nbdesc)
for (d in 1:nbdesc) {
coef[1:nbjuge,d] <- cor(TA[,1:nbjuge],TA[,nbjuge+nb.clusters+d],use="pairwise.complete.obs")
coef[(nbjuge+1):(nbjuge+nb.clusters),d] <- cor(TA[,(nbjuge+1):(nbjuge+nb.clusters)],TA[,nbjuge+nb.clusters+d],use="pairwise.complete.obs")
}
coef <- data.frame(coef)
colnames(coef) <- colnames(senso)
B <- cbind.data.frame(rbind.data.frame(hedo.pca$ind$coord,hedo.pca$ind.sup$coord[1:nb.clusters,]),coef)
for (d in 1:nbdesc) {
if (!nzchar(Sys.getenv("RSTUDIO_USER_IDENTITY"))) dev.new()
par(mar = c(4.2,4.1,3.5,2))
colplot(as.matrix(B), k=nb.clusters,coord, (nrow(hedo)+d),col=col, xlab=paste("Dim",coord[1]," (",signif(hedo.pca$eig[coord[1],2],4),"%)",sep=""), ylab=paste("Dim",coord[2]," (",signif(hedo.pca$eig[coord[2],2],4),"%)",sep=""))
points(hedo.pca$ind.sup$coord[nb.clusters+d,coord[1]],hedo.pca$ind.sup$coord[nb.clusters+d,coord[2]],col="red",pch=15,cex=0.8)
text(hedo.pca$ind.sup$coord[nb.clusters+d,coord[1]],hedo.pca$ind.sup$coord[nb.clusters+d,coord[2]],col="red",labels=colnames(B)[nrow(hedo)+d],pos = 1, offset = 0.05)
title(main = paste("Consumers' preferences analysed by",colnames(B)[nrow(hedo)+d]),cex.main = 1.1, font.main = 2)
}
don <- cbind.data.frame(as.factor(clusters),t(hedo))
colnames(don) <- c("clusters",paste("Prod",rownames(hedo),sep="."))
resdecat <- decat(don,formul="~clusters",firstvar=2,proba=1,graph=FALSE)
res <- list()
res$clusters <- clusters
res$result <- result
res$prod.clusters <- resdecat$resT
res$desc.clusters <- desc.clusters
return(res)
}
| /R/cpa.R | no_license | cran/SensoMineR | R | false | false | 4,798 | r | cpa<- function(senso, hedo, coord=c(1,2),center=TRUE,scale=TRUE,nb.clusters=0,scale.unit=FALSE,col=terrain.colors(45)[1:41]) {
colplot<-function(mat, k=0,coord, z, level=41, col = terrain.colors(level+level%/%10)[1:level], xlab="", ylab="") { #heat.colors(level)
abs <- coord[1]
ord <- coord[2]
x <- mat[,abs]
y <- mat[,ord]
z <- mat[,z]
# x1 <- min(z)
# x2 <- max(z)
x1 <- -1
x2 <- 1
plot(mat[,abs],mat[,ord],xlab=xlab, ylab=ylab,asp=1,type="n")
legend("topleft",legend=c(1,0.75,0.5,0.25,0,-0.25,-0.5,-0.75,-1),fill=c(col[level],col[(level%/%2)+(level%/%4)+(level%/%8)+1],col[(level%/%2)+(level%/%4)+1],col[(level%/%2)+(level%/%8)+1],col[(level%/%2)+1],col[(level%/%4)+(level%/%8)+1],col[(level%/%4)+1],col[(level%/%8)+1],col[1]),cex=0.7)
abline(v=0,lty=2)
abline(h=0,lty=2)
####rect(0, levels[-length(levels)], 1, levels[-1], col = col)
n <- nrow(mat)
h <- (x2-x1)/level
for (ind in 1:(n-k)) points(x[ind],y[ind],col=col[max(1,(z[ind]-x1)%/%h)],pch=20)
for (ind in (n-k+1):n) points(x[ind],y[ind],col=col[max(1,(z[ind]-x1)%/%h)],pch=15,cex=1)
for (ind in (n-k+1):n) text(x[ind],y[ind],col=col[max(1,(z[ind]-x1)%/%h)],rownames(mat)[ind],cex=1,pos = 1, offset = 0.05)
}
### Main program
if (max(coord) > (nrow(hedo)-1)) {
print (paste("Problem with coord. Max (coord) must be less than",nrow(hedo)-1," Axes 1-2 will be taken",sep=""))
coord=c(1,2)
}
senso <- scale(senso,center=center,scale=scale)[,]
hedo <- scale(hedo,center=center,scale=scale)[,]
if (scale) senso=senso*sqrt(nrow(senso)/(nrow(senso)-1))
if (scale) hedo=hedo*sqrt(nrow(hedo)/(nrow(hedo)-1))
op <- par(no.readonly = TRUE)
on.exit(par(op))
senso <- data.frame(senso)
hedo <- data.frame(hedo)
nbjuge <- ncol(hedo)
nbdesc <- ncol(senso)
classif <- cluster::agnes(dist(t(hedo)),method="ward")
plot(as.dendrogram(classif),main="Cluster Dendrogram",xlab="Panelists")
if (nb.clusters==0){
classif2 <- as.hclust(classif)
nb.clusters = which.max(rev(diff(classif2$height))) + 1
# classif=hopach(t(MatH),d="euclid",K=10,mss="mean")
# nb.clusters=classif$clustering$k
}
clusters=kmeans(t(hedo),centers=nb.clusters)$cluster
mat <- matrix(0,nb.clusters,nrow(hedo))
dimnames(mat) <- list(1:nb.clusters,rownames(hedo))
for (i in 1:nb.clusters){
mat[i,] <- apply(t(hedo[,clusters==i]),2,mean)
rownames(mat)[i] <- paste("cluster",i)
}
desc.clusters=cor(senso,t(mat),use="pairwise.complete.obs")
A <- rbind.data.frame(t(hedo),mat,t(senso))
colnames(A) <- row.names(hedo)
result <- A
auxil = cbind.data.frame(A,as.factor(c(clusters,rep(1,nrow(mat)+ncol(senso)))))
colnames(auxil)[ncol(A)+1]="cluster"
hedo.pca <- PCA(auxil,quali.sup=ncol(A)+1,ind.sup=(nbjuge+1):nrow(A),scale.unit=scale.unit,graph=FALSE,ncp = min(nbjuge-1,ncol(A)))
print(plot(hedo.pca,choix="ind",axes=coord,cex=0.7,habillage=ncol(A)+1))
print(plot(hedo.pca,choix="var",axes=coord))
TA <- t(A)
coef <- matrix(NA,nbjuge+nb.clusters,nbdesc)
for (d in 1:nbdesc) {
coef[1:nbjuge,d] <- cor(TA[,1:nbjuge],TA[,nbjuge+nb.clusters+d],use="pairwise.complete.obs")
coef[(nbjuge+1):(nbjuge+nb.clusters),d] <- cor(TA[,(nbjuge+1):(nbjuge+nb.clusters)],TA[,nbjuge+nb.clusters+d],use="pairwise.complete.obs")
}
coef <- data.frame(coef)
colnames(coef) <- colnames(senso)
B <- cbind.data.frame(rbind.data.frame(hedo.pca$ind$coord,hedo.pca$ind.sup$coord[1:nb.clusters,]),coef)
for (d in 1:nbdesc) {
if (!nzchar(Sys.getenv("RSTUDIO_USER_IDENTITY"))) dev.new()
par(mar = c(4.2,4.1,3.5,2))
colplot(as.matrix(B), k=nb.clusters,coord, (nrow(hedo)+d),col=col, xlab=paste("Dim",coord[1]," (",signif(hedo.pca$eig[coord[1],2],4),"%)",sep=""), ylab=paste("Dim",coord[2]," (",signif(hedo.pca$eig[coord[2],2],4),"%)",sep=""))
points(hedo.pca$ind.sup$coord[nb.clusters+d,coord[1]],hedo.pca$ind.sup$coord[nb.clusters+d,coord[2]],col="red",pch=15,cex=0.8)
text(hedo.pca$ind.sup$coord[nb.clusters+d,coord[1]],hedo.pca$ind.sup$coord[nb.clusters+d,coord[2]],col="red",labels=colnames(B)[nrow(hedo)+d],pos = 1, offset = 0.05)
title(main = paste("Consumers' preferences analysed by",colnames(B)[nrow(hedo)+d]),cex.main = 1.1, font.main = 2)
}
don <- cbind.data.frame(as.factor(clusters),t(hedo))
colnames(don) <- c("clusters",paste("Prod",rownames(hedo),sep="."))
resdecat <- decat(don,formul="~clusters",firstvar=2,proba=1,graph=FALSE)
res <- list()
res$clusters <- clusters
res$result <- result
res$prod.clusters <- resdecat$resT
res$desc.clusters <- desc.clusters
return(res)
}
|
###############################################################################
### Performance monitor for Shinyapps.io ###
### Server ###
### Version: 1.0 ###
### Date: 09-04-2018 ###
### Author: Nicolai Simonsen ###
###############################################################################
library(shiny)
library(ggplot2)
library(plyr)
library(dplyr)
library(data.table)
library(lubridate)
# Define server logic required to draw a histogram
shinyServer(function(input, output, session) {
timer <- reactiveValues(start = 0)
# Set data path
dataPath <- paste0("C:/Users/",Sys.info()[7],"/OneDrive - Syddansk Universitet/PhD/Projects/Forskningens dogn/Shiny-survey/DashboardShinyapps/monitoringData.Rdata")
# Render plot
output$dashboardPlot <- renderPlot({
print("Rendering plot")
# Load data
load(dataPath)
monitoring.data <- melt(monitoring.data, id.vars = "timestamp")
ggplot(data = monitoring.data[timestamp>Sys.time()-60*60*as.numeric(input$period),]) +
geom_area(aes(y = value, x = timestamp, colour = variable, fill = variable), alpha = 0.3) +
facet_wrap(~variable,scales = "free_y") +
theme_gray(base_size = 20) +
theme(plot.background = element_rect(fill = "#2b3e50"),
axis.text = element_text(colour = "white" ),
legend.background = element_rect(fill = "#4e5d6c"),
legend.text = element_text(color = "white"))
})
observe({
invalidateLater(as.numeric(input$updateTime)*60*1000,session)
timer$start <- Sys.time()
# Print to console
print(paste0("Updating data - ",Sys.time()))
# Update data
library(rsconnect)
source("authentication.R")
setwd(paste0("C:/Users/",Sys.info()[7],"/OneDrive - Syddansk Universitet/PhD/Projects/Forskningens dogn/Shiny-survey/experiment1"))
setAccountInfo(name = name,
token = token,
secret = secret)
cpu.user <- showMetrics("container.cpu",c("cpu.user"), server="shinyapps.io") %>% data.table()
cpu.user <- cpu.user[,timestamp := as.POSIXct(timestamp, tz = "CET", origin = "1970-01-01")]
cpu.system <- showMetrics("container.cpu",c("cpu.system"), server="shinyapps.io") %>% data.table()
cpu.system <- cpu.system[,timestamp := as.POSIXct(timestamp, tz = "CET", origin = "1970-01-01")]
connections <- showMetrics("container.shiny.connections",c("shiny.connections.active"), server="shinyapps.io") %>% data.table()
connections <- connections[,timestamp := as.POSIXct(timestamp, tz = "CET", origin = "1970-01-01")]
workers <- showMetrics("container.shiny.status",c("shiny.rprocs.count"), server="shinyapps.io") %>% data.table()
workers <- workers[,timestamp := as.POSIXct(timestamp, tz = "CET", origin = "1970-01-01")]
setwd(paste0("C:/Users/",Sys.info()[7],"/OneDrive - Syddansk Universitet/PhD/Projects/Forskningens dogn/Shiny-survey/DashboardShinyapps"))
library(plyr); library(dplyr)
new.data <- join_all(list(cpu.user,cpu.system,connections,workers), by = "timestamp", type = "right")
# Load old monitoring data
load(dataPath)
# Check which observations are new
have <- monitoring.data[,timestamp]
new <- new.data[!timestamp %in% have,]
# Append to old
monitoring.data <- rbind(monitoring.data,new)
setkey(monitoring.data, "timestamp")
# Save
monitoring.data <- monitoring.data[cpu.user != "NA" &
cpu.system != "NA" &
shiny.connections.active != "NA" &
shiny.rprocs.count != "NA",]
save(monitoring.data, file = dataPath)
# Render Plot
output$dashboardPlot <- renderPlot({
print("Rendering plot")
monitoring.data <- melt(monitoring.data, id.vars = "timestamp")
ggplot(data = monitoring.data[timestamp>Sys.time()-60*60*as.numeric(input$period),]) +
geom_area(aes(y = value, x = timestamp, colour = variable, fill = variable), alpha = 0.3) +
facet_wrap(~variable,scales = "free_y") +
theme_gray(base_size = 20) +
theme(plot.background = element_rect(fill = "#2b3e50"),
axis.text = element_text(colour = "white" ),
legend.background = element_rect(fill = "#4e5d6c"),
legend.text = element_text(color = "white"))
})
})
# Show time to next update
output$timeToUpdate <- renderText({
invalidateLater(1000, session)
paste0("Time to update: ",
seconds_to_period(round(as.numeric(input$updateTime)*60 - as.numeric(Sys.time()-timer$start, units = "secs"), digits = 0))
)
})
})
| /server.R | no_license | fink42/DashboardShinyapps | R | false | false | 4,988 | r | ###############################################################################
### Performance monitor for Shinyapps.io ###
### Server ###
### Version: 1.0 ###
### Date: 09-04-2018 ###
### Author: Nicolai Simonsen ###
###############################################################################
library(shiny)
library(ggplot2)
library(plyr)
library(dplyr)
library(data.table)
library(lubridate)
# Define server logic required to draw a histogram
shinyServer(function(input, output, session) {
timer <- reactiveValues(start = 0)
# Set data path
dataPath <- paste0("C:/Users/",Sys.info()[7],"/OneDrive - Syddansk Universitet/PhD/Projects/Forskningens dogn/Shiny-survey/DashboardShinyapps/monitoringData.Rdata")
# Render plot
output$dashboardPlot <- renderPlot({
print("Rendering plot")
# Load data
load(dataPath)
monitoring.data <- melt(monitoring.data, id.vars = "timestamp")
ggplot(data = monitoring.data[timestamp>Sys.time()-60*60*as.numeric(input$period),]) +
geom_area(aes(y = value, x = timestamp, colour = variable, fill = variable), alpha = 0.3) +
facet_wrap(~variable,scales = "free_y") +
theme_gray(base_size = 20) +
theme(plot.background = element_rect(fill = "#2b3e50"),
axis.text = element_text(colour = "white" ),
legend.background = element_rect(fill = "#4e5d6c"),
legend.text = element_text(color = "white"))
})
observe({
invalidateLater(as.numeric(input$updateTime)*60*1000,session)
timer$start <- Sys.time()
# Print to console
print(paste0("Updating data - ",Sys.time()))
# Update data
library(rsconnect)
source("authentication.R")
setwd(paste0("C:/Users/",Sys.info()[7],"/OneDrive - Syddansk Universitet/PhD/Projects/Forskningens dogn/Shiny-survey/experiment1"))
setAccountInfo(name = name,
token = token,
secret = secret)
cpu.user <- showMetrics("container.cpu",c("cpu.user"), server="shinyapps.io") %>% data.table()
cpu.user <- cpu.user[,timestamp := as.POSIXct(timestamp, tz = "CET", origin = "1970-01-01")]
cpu.system <- showMetrics("container.cpu",c("cpu.system"), server="shinyapps.io") %>% data.table()
cpu.system <- cpu.system[,timestamp := as.POSIXct(timestamp, tz = "CET", origin = "1970-01-01")]
connections <- showMetrics("container.shiny.connections",c("shiny.connections.active"), server="shinyapps.io") %>% data.table()
connections <- connections[,timestamp := as.POSIXct(timestamp, tz = "CET", origin = "1970-01-01")]
workers <- showMetrics("container.shiny.status",c("shiny.rprocs.count"), server="shinyapps.io") %>% data.table()
workers <- workers[,timestamp := as.POSIXct(timestamp, tz = "CET", origin = "1970-01-01")]
setwd(paste0("C:/Users/",Sys.info()[7],"/OneDrive - Syddansk Universitet/PhD/Projects/Forskningens dogn/Shiny-survey/DashboardShinyapps"))
library(plyr); library(dplyr)
new.data <- join_all(list(cpu.user,cpu.system,connections,workers), by = "timestamp", type = "right")
# Load old monitoring data
load(dataPath)
# Check which observations are new
have <- monitoring.data[,timestamp]
new <- new.data[!timestamp %in% have,]
# Append to old
monitoring.data <- rbind(monitoring.data,new)
setkey(monitoring.data, "timestamp")
# Save
monitoring.data <- monitoring.data[cpu.user != "NA" &
cpu.system != "NA" &
shiny.connections.active != "NA" &
shiny.rprocs.count != "NA",]
save(monitoring.data, file = dataPath)
# Render Plot
output$dashboardPlot <- renderPlot({
print("Rendering plot")
monitoring.data <- melt(monitoring.data, id.vars = "timestamp")
ggplot(data = monitoring.data[timestamp>Sys.time()-60*60*as.numeric(input$period),]) +
geom_area(aes(y = value, x = timestamp, colour = variable, fill = variable), alpha = 0.3) +
facet_wrap(~variable,scales = "free_y") +
theme_gray(base_size = 20) +
theme(plot.background = element_rect(fill = "#2b3e50"),
axis.text = element_text(colour = "white" ),
legend.background = element_rect(fill = "#4e5d6c"),
legend.text = element_text(color = "white"))
})
})
# Show time to next update
output$timeToUpdate <- renderText({
invalidateLater(1000, session)
paste0("Time to update: ",
seconds_to_period(round(as.numeric(input$updateTime)*60 - as.numeric(Sys.time()-timer$start, units = "secs"), digits = 0))
)
})
})
|
require("stringr",quietly=TRUE)
args = commandArgs(trailingOnly=TRUE)
if (length(args)<2) {
print(length(args))
stop("Two arguments must be supplied (text + regex).n", call.=FALSE)
} else {
text = args[1]
regex = args[2]
}
# print("Got tex")
# print(text)
# print("Got regex")
# print(regex)
x <- c(text)
vals <- str_extract_all(x, regex, simplify = TRUE)
for (val in vals) {
print(val)
} | /R_regex_tester.R | no_license | divalentino/dash-regex-tester | R | false | false | 410 | r |
require("stringr",quietly=TRUE)
args = commandArgs(trailingOnly=TRUE)
if (length(args)<2) {
print(length(args))
stop("Two arguments must be supplied (text + regex).n", call.=FALSE)
} else {
text = args[1]
regex = args[2]
}
# print("Got tex")
# print(text)
# print("Got regex")
# print(regex)
x <- c(text)
vals <- str_extract_all(x, regex, simplify = TRUE)
for (val in vals) {
print(val)
} |
testlist <- list(Beta = 0, CVLinf = -3.35916362954636e-268, FM = 3.81959242373749e-313, L50 = 0, L95 = 0, LenBins = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 537479424L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) | /DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615827872-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 487 | r | testlist <- list(Beta = 0, CVLinf = -3.35916362954636e-268, FM = 3.81959242373749e-313, L50 = 0, L95 = 0, LenBins = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 537479424L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rekognition_operations.R
\name{rekognition_get_celebrity_info}
\alias{rekognition_get_celebrity_info}
\title{Gets the name and additional information about a celebrity based on
their Amazon Rekognition ID}
\usage{
rekognition_get_celebrity_info(Id)
}
\arguments{
\item{Id}{[required] The ID for the celebrity. You get the celebrity ID from a call to the
\code{\link[=rekognition_recognize_celebrities]{recognize_celebrities}} operation,
which recognizes celebrities in an image.}
}
\description{
Gets the name and additional information about a celebrity based on their Amazon Rekognition ID. The additional information is returned as an array of URLs. If there is no additional information about the celebrity, this list is empty.
See \url{https://www.paws-r-sdk.com/docs/rekognition_get_celebrity_info/} for full documentation.
}
\keyword{internal}
| /cran/paws.machine.learning/man/rekognition_get_celebrity_info.Rd | permissive | paws-r/paws | R | false | true | 930 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rekognition_operations.R
\name{rekognition_get_celebrity_info}
\alias{rekognition_get_celebrity_info}
\title{Gets the name and additional information about a celebrity based on
their Amazon Rekognition ID}
\usage{
rekognition_get_celebrity_info(Id)
}
\arguments{
\item{Id}{[required] The ID for the celebrity. You get the celebrity ID from a call to the
\code{\link[=rekognition_recognize_celebrities]{recognize_celebrities}} operation,
which recognizes celebrities in an image.}
}
\description{
Gets the name and additional information about a celebrity based on their Amazon Rekognition ID. The additional information is returned as an array of URLs. If there is no additional information about the celebrity, this list is empty.
See \url{https://www.paws-r-sdk.com/docs/rekognition_get_celebrity_info/} for full documentation.
}
\keyword{internal}
|
---
title: "Experiment Data Exploration"
author: "Jesús Vélez Santiago"
date: "`r format(Sys.Date(), '%Y-%m')`"
output:
html_document:
theme: readable
highlight: kate
toc: true
toc_float: true
toc_depth: 3
code_folding: show
self_contained: true
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(
echo = TRUE,
message = FALSE,
fig.align = "center",
# dev = "svg",
fig.retina = 2
)
```
## Libraries
```{r libraries, message=FALSE}
library(tidyverse)
library(ggpubr)
library(ggwaffle)
library(here)
library(glue)
```
## Load Data
```{r load_data}
lineages_files <- here("data", "processed", "lineages.tsv")
lineages_df <- read_tsv(lineages_files, show_col_types = FALSE) %>%
glimpse()
```
## Minimal preprocessing
```{r minimal_preprocessing}
processed_lineages_df <- lineages_df %>%
mutate(
gfp = log10(gfp),
ds_red = log10(ds_red),
across(contains("filamentaded_"),~factor(.x, c(FALSE, TRUE), c("Not filamentaded", "Filamentaded")))
) %>%
add_count(experiment_id, trap_id, track_id, time) %>%
filter(n == 1) %>%
select(-n) %>%
glimpse()
```
## Exploratory Data Analysis
### Set default plot style
```{r default_plot_theme}
theme_set(theme_bw())
```
```{r donout_chart}
donout_df <- processed_lineages_df %>%
count(experiment_id, filamentaded_track) %>%
arrange(filamentaded_track) %>%
mutate(
experiment_id = case_when(
experiment_id == "Chromosome" ~ "C",
TRUE ~ "P"
),
percentage = n / sum(n) * 100,
ymax = cumsum(percentage),
ymin = c(0, head(ymax, -1)),
label = glue("{experiment_id}: {format(percentage, digits=2)}%"),
label_position = (ymax + ymin) / 2
) %>%
glimpse()
donout_total <- donout_df %>%
pull(n) %>%
sum()
donout_df %>%
ggplot(
aes(
ymax=ymax,
ymin=ymin,
xmax=4,
xmin=3
),
) +
geom_rect(
size = 1.5,
color = "white",
aes(
fill=filamentaded_track,
group=experiment_id
)
) +
geom_label(x = 2, aes(y = label_position, label = label), size=3.5) +
coord_polar(theta = "y") +
xlim(c(-1, 4)) +
labs(
fill = "Cell status",
caption = glue("Total: {format(donout_total, big.mark=',')} tracks")
) +
theme_void() +
theme(
legend.position = "top",
plot.caption = element_text(face = "bold", hjust = 0.5)
)
```
### Mean GFP
```{r gfp_distribution}
processed_lineages_df %>%
group_by(experiment_id, trap_id, track_id, filamentaded_track) %>%
summarize(mean_gfp = mean(gfp), .groups = "drop") %>%
gghistogram(
x = "mean_gfp",
facet.by = "experiment_id",
color = "filamentaded_track",
fill = "filamentaded_track",
alpha = 1/3,
add = "mean",
xlab = "Mean fluorescent intensity (log10)",
ylab = "Count of cells"
) +
labs(
color = "Cell status",
fill = "Cell status"
)
```
```{r,area_chart}
processed_lineages_df %>%
count(experiment_id, filamentaded_at_frame, time) %>%
group_by(experiment_id, time) %>%
summarize(
filamentaded_at_frame = filamentaded_at_frame,
percentage = n / sum(n),
.groups = "drop"
) %>%
ggplot(aes(x = time, y = percentage, fill = filamentaded_at_frame)) +
geom_area(size = 0.5, alpha = 1/1) +
geom_vline(xintercept = c(60, 140), linetype = "dashed") +
geom_text(
data = data.frame(
x = 73,
y = 0.8,
label = "Start",
experiment_id = "Plasmid"
),
mapping = aes(x = x, y = y, label = label),
size = 5.64,
colour = "white",
fontface = 2,
inherit.aes = FALSE
) +
geom_text(
data = data.frame(
x = 151,
y = 0.8,
label = "End",
experiment_id = "Plasmid"
),
mapping = aes(x = x, y = y, label = label),
size = 5.64,
colour = "white",
fontface = 2,
inherit.aes = FALSE
) +
facet_grid(experiment_id ~ .) +
scale_x_continuous(expand = c(0, 0)) +
scale_y_continuous(expand = c(0, 0), labels = scales::percent) +
theme_bw() +
theme(
legend.position = "top",
panel.spacing.y = unit(1, "lines")
) +
labs(
x = "Time (minutes)",
y = "Percentage of cells",
fill = "Cell status"
)
```
```{r rain_plot}
library(ggdist)
p_1 <- processed_lineages_df %>%
filter(time == 0) %>%
ggplot(
aes(
x = filamentaded_track,
y = length,
group = filamentaded_track,
fill = filamentaded_track,
color = filamentaded_track
)
) +
ggdist::stat_halfeye(
adjust = .5,
width = .6,
.width = 0,
justification = -.2,
point_colour = NA
) +
geom_boxplot(
width = .15,
outlier.shape = NA,
alpha = 1/3
) +
ggdist::geom_dots(side = "bottom", alpha = 1/10) +
facet_wrap(experiment_id ~ .) +
theme_bw() +
theme(legend.position = "top") +
labs(
x = "Cell status",
y = "Initial length",
color = "Cell status",
fill = "Cell status"
) +
scale_y_continuous(limits = c(0, 100)) +
coord_flip() +
stat_compare_means(label.y = 60, label.x = 1.5)
p_2 <- processed_lineages_df %>%
filter(time == 0) %>%
ggplot(
aes(
x = filamentaded_track,
y = gfp,
group = filamentaded_track,
fill = filamentaded_track,
color = filamentaded_track
),
side = "bottom"
) +
ggdist::stat_halfeye(
adjust = .5,
width = .6,
.width = 0,
justification = -.2,
point_colour = NA
) +
geom_boxplot(
width = .15,
outlier.shape = NA,
alpha = 1/3
) +
ggdist::geom_dots(side = "bottom", alpha = 1/10) +
facet_wrap(experiment_id ~ .) +
theme_bw() +
theme(legend.position = "top") +
labs(
x = "Cell status",
y = "Initial GFP",
color = "Cell status",
fill = "Cell status"
) +
coord_flip() +
stat_compare_means(label.y = 2.5, label.x = 1.5)
```
```{r rain_cloud_2}
library(patchwork)
library(ggpubr)
(p_1 / p_2) +
plot_layout(guides = 'collect')
```
```{r metric_charts}
processed_lineages_df %>%
select(experiment_id, filamentaded_track, time, length, gfp, ds_red) %>%
pivot_longer(
cols = c(length, gfp, ds_red),
names_to = "metric"
) %>%
mutate(
metric = case_when(
metric == "ds_red" ~ "DsRed",
metric == "gfp" ~ "GFP",
metric == "length" ~ "Length"
)
) %>%
group_by(experiment_id, filamentaded_track, time, metric) %>%
summarise(
ci = list(mean_cl_normal(value)),
.groups = "drop"
) %>%
unnest(cols = c(ci)) %>%
ggplot(aes(x = time, y = y, ymin= ymin, ymax=ymax, color = filamentaded_track)) +
annotate("rect", xmin=60, xmax=140, ymin=-Inf, ymax=Inf, alpha=1/2, color = "transparent", fill = "#FCB565") +
geom_smooth(method = "loess") +
facet_grid(metric ~ experiment_id, scales = "free_y") +
labs(
x = "Time (minutes)",
y = "Value",
color = "Cell status"
) +
theme_bw() +
theme(legend.position = "top")
```
```{r survival_probability}
gfp_control_hist <- processed_lineages_df %>%
ggplot(aes(x = gfp)) +
geom_histogram(bins = 100)
gfp_hist_data <- gfp_control_hist %>%
ggplot_build() %>%
pluck("data", 1) %>%
select(count, x, xmin, xmax) %>%
as_tibble()
gfp_breaks <- gfp_hist_data %>%
{c(.$xmin, last(.$xmax))}
survival_probability_df <- processed_lineages_df %>%
group_by(experiment_id, lineage_id, trap_id, filamentaded_track) %>%
summarise(
initial_gfp = first(gfp),
is_long_track = first(centered_frame) < unique(centered_antibiotic_start_frame) &&
last(centered_frame) > unique(centered_antibiotic_end_frame),
.groups = "drop"
) %>%
filter(is_long_track) %>%
group_by(experiment_id, filamentaded_track) %>%
group_modify(~{
tibble(
plot = list(
ggplot(data = .x, aes(x = initial_gfp)) +
geom_histogram(breaks = gfp_breaks)
)
)
}) %>%
mutate(
counts = map(plot, ggplot_build),
counts = map(counts, pluck, "data", 1),
counts = map(counts, add_column, control_count = gfp_hist_data$count),
counts = map(counts, select, gfp = x, control_count, count)
) %>%
unnest(counts) %>%
mutate(
survival_probability = count / control_count,
#survival_probability = survival_probability / max(survival_probability, na.rm = TRUE)
) %>%
filter(survival_probability != 0) %>%
glimpse()
```
```{r survival_probability_plot}
survival_probability_df %>%
ggplot(aes(x = gfp, y = survival_probability, color = filamentaded_track, linetype = experiment_id)) +
geom_point() +
geom_line() +
scale_y_continuous(labels = scales::percent) +
theme_bw() +
theme(
legend.position = "top"
) +
labs(
x = "Initial GFP",
y = "Survival probability",
color = "Cell status",
linetype = "Experiment"
)
```
```{r survival_probability_plot_area}
survival_probability_df %>%
group_by(filamentaded_track, gfp) %>%
ggplot(aes(x = gfp, y = count, fill = filamentaded_track)) +
geom_area(position = "fill", stat="identity") +
scale_x_continuous(expand = c(0, 0)) +
scale_y_continuous(expand = c(0, 0), labels = scales::percent) +
theme_bw() +
theme(
legend.position = "top"
) +
labs(
x = "Initial GFP (log10)",
y = "Percentage of cells",
fill = "Cell status"
)
```
```{r}
status_points_df <- processed_lineages_df %>%
group_by(experiment_id, trap_id, track_id, filamentaded_track) %>%
summarize(
first_false = which.min(filamentaded_at_frame),
first_true = which.max(filamentaded_at_frame),
initial_gfp = first(gfp),
sos_gfp = gfp[first_true],
end_gfp = last(gfp),
#diff_sos_initial_gfp = sos_gfp -initial_gfp,
#diff_end_sos_gfp = end_gfp - sos_gfp,
diff_end_intial_gfp = end_gfp - initial_gfp,
initial_length = first(length),
sos_length = length[first_true],
end_length = last(length),
#diff_sos_initial_length = sos_length - initial_length,
#diff_end_sos_length = end_length - sos_length,
diff_end_intial_length = end_length - initial_length,
initial_time = first(centered_frame) * 10,
sos_time = centered_frame[first_true] * 10,
end_time = last(centered_frame) * 10,
life_time = end_time - initial_time,
#diff_sos_intial_time = sos_time - initial_time,
#diff_end_sos_time = end_time - sos_time,
#diff_end_intial_time = end_time - initial_time,
is_survivor = initial_time < unique(centered_antibiotic_end_frame) * 10 &&
end_time > unique(centered_antibiotic_end_frame) * 10,
is_survivor = ifelse(is_survivor, "Survived", "Dit not survive"),
is_survivor = factor(is_survivor),
.groups = "drop"
) %>%
#filter(initial_frame <= sos_frame, sos_frame <= end_frame) %>%
glimpse()
```
```{r}
status_points_df %>%
count(experiment_id, filamentaded_track, life_time) %>%
ggplot(aes(x = as.factor(life_time), y = n, fill = filamentaded_track)) +
geom_bar(position = "fill", stat="identity", width = 1) +
scale_x_discrete(expand = c(0, 0)) +
scale_y_continuous(expand = c(0, 0), labels = scales::percent) +
facet_grid(experiment_id ~ .) +
theme_bw() +
theme(
legend.position = "top",
panel.spacing.y = unit(1, "lines")
) +
labs(
x = "Cell life time",
y = "Percentage of cells",
fill = "Cell status"
)
```
```{r}
status_points_df %>%
group_by(experiment_id, filamentaded_track, life_time) %>%
summarize(
n = n(),
initial_length = median(initial_length),
sos_length = median(sos_length),
end_length = median(end_length),
.groups = "drop"
) %>%
pivot_longer(
cols = contains("length"),
names_to = "length_type"
) %>%
mutate(
length_type = factor(length_type, levels = c("initial_length", "sos_length", "end_length"), labels = c("Initial", "SOS", "End"))
) %>%
ggplot(aes(x = life_time, y = value)) +
geom_line(aes(group = life_time)) +
geom_point(aes(color = length_type), alpha = 1/1) +
facet_grid(filamentaded_track ~ experiment_id) +
theme_bw() +
theme(
legend.position = "top"
) +
labs(
x = "Cell life time",
y = "Length value",
color = "Length type"
)
```
```{r}
status_points_df %>%
group_by(experiment_id, filamentaded_track, life_time) %>%
summarize(
n = n(),
initial_gfp = median(initial_gfp),
sos_gfp = median(sos_gfp),
end_gfp = median(end_gfp),
.groups = "drop"
) %>%
pivot_longer(
cols = contains("gfp"),
names_to = "gfp_type"
) %>%
mutate(
gfp_type = factor(gfp_type, levels = c("initial_gfp", "sos_gfp", "end_gfp"), labels = c("Initial", "SOS", "End"))
) %>%
ggplot(aes(x = life_time, y = value)) +
geom_line(aes(group = life_time)) +
geom_point(aes(color = gfp_type), alpha = 1/1) +
facet_grid(filamentaded_track ~ experiment_id) +
theme_bw() +
theme(
legend.position = "top"
) +
labs(
x = "Cell life time",
y = "GFP value",
color = "GFP type"
)
```
```{r}
library(tidymodels)
model_data <- status_points_df %>%
select(is_survivor, filamentaded_track, contains("gfp"), contains("length"), -contains("diff"), -contains("sos")) %>%
mutate(
out = interaction(is_survivor, filamentaded_track),
out = as.character(out),
out = as.factor(out)
) %>%
select(-is_survivor, -filamentaded_track) %>%
glimpse()
count(model_data, out)
```
```{r}
set.seed(123)
model_data_split <- initial_split(model_data, prop = 0.75, strata = out)
training_data <- training(model_data_split)
testing_data <- testing(model_data_split)
model_split
```
```{r}
data_folds <- vfold_cv(training_data, v = 20, strata = out)
data_folds
```
```{r}
library(themis)
data_recipe <- recipe(out ~ ., data = training_data) %>%
#step_corr(all_numeric(), threshold = 0.8) %>%
step_normalize(all_numeric(), -contains("time")) %>%
step_zv(all_predictors()) %>%
step_dummy(all_nominal(), -all_outcomes()) %>%
step_downsample(out)
summary(data_recipe)
```
```{r}
dt_tune_model <- decision_tree(
mode = "classification",
engine = "rpart",
cost_complexity = tune(),
tree_depth = tune(),
min_n = tune()
)
dt_tune_model <- rand_forest(
mode = "classification",
engine = "ranger",
mtry = tune(),
trees = tune(),
min_n = tune()
)
dt_tune_model
```
```{r}
set.seed(123)
dt_grid <- grid_random(
mtry() %>% range_set(c(2, 3)),
trees(),
min_n(),
size = 10
)
dt_grid
```
```{r}
data_wkfl <- workflow() %>%
add_model(dt_tune_model) %>%
add_recipe(data_recipe)
data_wkfl
```
```{r}
dt_tuning <- data_wkfl %>%
tune_grid(
resamples = data_folds,
grid = dt_grid
)
dt_tuning %>%
show_best(metric = "roc_auc", n = 5)
```
```{r}
best_dt_model <- dt_tuning %>%
select_best(metric = "roc_auc")
best_dt_model
```
```{r}
final_data_wkfl <- data_wkfl %>%
finalize_workflow(best_dt_model)
final_data_wkfl
```
```{r}
data_wf_fit <- final_data_wkfl %>%
fit(data = training_data)
tree_fit <- data_wf_fit %>%
extract_fit_parsnip()
```
```{r}
vip::vip(tree_fit)
```
```{r}
data_final_fit <- final_data_wkfl %>%
last_fit(split = model_data_split)
data_final_fit %>%
collect_metrics()
```
```{r}
data_final_fit %>%
collect_predictions() %>%
roc_curve(truth = is_survivor, .estimate = .pred_Survived) %>%
identity() %>%
autoplot()
```
```{r}
tree_predictions <- data_final_fit %>% collect_predictions()
conf_mat(tree_predictions, truth = is_survivor, estimate = .pred_class) %>%
autoplot()
```
```{r}
status_points_df %>%
count(is_survivor) %>%
identity()
```
| /Rmarkdown/tmp.R | no_license | jvelezmagic/CellFilamentation | R | false | false | 15,409 | r | ---
title: "Experiment Data Exploration"
author: "Jesús Vélez Santiago"
date: "`r format(Sys.Date(), '%Y-%m')`"
output:
html_document:
theme: readable
highlight: kate
toc: true
toc_float: true
toc_depth: 3
code_folding: show
self_contained: true
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(
echo = TRUE,
message = FALSE,
fig.align = "center",
# dev = "svg",
fig.retina = 2
)
```
## Libraries
```{r libraries, message=FALSE}
library(tidyverse)
library(ggpubr)
library(ggwaffle)
library(here)
library(glue)
```
## Load Data
```{r load_data}
lineages_files <- here("data", "processed", "lineages.tsv")
lineages_df <- read_tsv(lineages_files, show_col_types = FALSE) %>%
glimpse()
```
## Minimal preprocessing
```{r minimal_preprocessing}
processed_lineages_df <- lineages_df %>%
mutate(
gfp = log10(gfp),
ds_red = log10(ds_red),
across(contains("filamentaded_"),~factor(.x, c(FALSE, TRUE), c("Not filamentaded", "Filamentaded")))
) %>%
add_count(experiment_id, trap_id, track_id, time) %>%
filter(n == 1) %>%
select(-n) %>%
glimpse()
```
## Exploratory Data Analysis
### Set default plot style
```{r default_plot_theme}
theme_set(theme_bw())
```
```{r donout_chart}
donout_df <- processed_lineages_df %>%
count(experiment_id, filamentaded_track) %>%
arrange(filamentaded_track) %>%
mutate(
experiment_id = case_when(
experiment_id == "Chromosome" ~ "C",
TRUE ~ "P"
),
percentage = n / sum(n) * 100,
ymax = cumsum(percentage),
ymin = c(0, head(ymax, -1)),
label = glue("{experiment_id}: {format(percentage, digits=2)}%"),
label_position = (ymax + ymin) / 2
) %>%
glimpse()
donout_total <- donout_df %>%
pull(n) %>%
sum()
donout_df %>%
ggplot(
aes(
ymax=ymax,
ymin=ymin,
xmax=4,
xmin=3
),
) +
geom_rect(
size = 1.5,
color = "white",
aes(
fill=filamentaded_track,
group=experiment_id
)
) +
geom_label(x = 2, aes(y = label_position, label = label), size=3.5) +
coord_polar(theta = "y") +
xlim(c(-1, 4)) +
labs(
fill = "Cell status",
caption = glue("Total: {format(donout_total, big.mark=',')} tracks")
) +
theme_void() +
theme(
legend.position = "top",
plot.caption = element_text(face = "bold", hjust = 0.5)
)
```
### Mean GFP
```{r gfp_distribution}
processed_lineages_df %>%
group_by(experiment_id, trap_id, track_id, filamentaded_track) %>%
summarize(mean_gfp = mean(gfp), .groups = "drop") %>%
gghistogram(
x = "mean_gfp",
facet.by = "experiment_id",
color = "filamentaded_track",
fill = "filamentaded_track",
alpha = 1/3,
add = "mean",
xlab = "Mean fluorescent intensity (log10)",
ylab = "Count of cells"
) +
labs(
color = "Cell status",
fill = "Cell status"
)
```
```{r,area_chart}
processed_lineages_df %>%
count(experiment_id, filamentaded_at_frame, time) %>%
group_by(experiment_id, time) %>%
summarize(
filamentaded_at_frame = filamentaded_at_frame,
percentage = n / sum(n),
.groups = "drop"
) %>%
ggplot(aes(x = time, y = percentage, fill = filamentaded_at_frame)) +
geom_area(size = 0.5, alpha = 1/1) +
geom_vline(xintercept = c(60, 140), linetype = "dashed") +
geom_text(
data = data.frame(
x = 73,
y = 0.8,
label = "Start",
experiment_id = "Plasmid"
),
mapping = aes(x = x, y = y, label = label),
size = 5.64,
colour = "white",
fontface = 2,
inherit.aes = FALSE
) +
geom_text(
data = data.frame(
x = 151,
y = 0.8,
label = "End",
experiment_id = "Plasmid"
),
mapping = aes(x = x, y = y, label = label),
size = 5.64,
colour = "white",
fontface = 2,
inherit.aes = FALSE
) +
facet_grid(experiment_id ~ .) +
scale_x_continuous(expand = c(0, 0)) +
scale_y_continuous(expand = c(0, 0), labels = scales::percent) +
theme_bw() +
theme(
legend.position = "top",
panel.spacing.y = unit(1, "lines")
) +
labs(
x = "Time (minutes)",
y = "Percentage of cells",
fill = "Cell status"
)
```
```{r rain_plot}
library(ggdist)
p_1 <- processed_lineages_df %>%
filter(time == 0) %>%
ggplot(
aes(
x = filamentaded_track,
y = length,
group = filamentaded_track,
fill = filamentaded_track,
color = filamentaded_track
)
) +
ggdist::stat_halfeye(
adjust = .5,
width = .6,
.width = 0,
justification = -.2,
point_colour = NA
) +
geom_boxplot(
width = .15,
outlier.shape = NA,
alpha = 1/3
) +
ggdist::geom_dots(side = "bottom", alpha = 1/10) +
facet_wrap(experiment_id ~ .) +
theme_bw() +
theme(legend.position = "top") +
labs(
x = "Cell status",
y = "Initial length",
color = "Cell status",
fill = "Cell status"
) +
scale_y_continuous(limits = c(0, 100)) +
coord_flip() +
stat_compare_means(label.y = 60, label.x = 1.5)
p_2 <- processed_lineages_df %>%
filter(time == 0) %>%
ggplot(
aes(
x = filamentaded_track,
y = gfp,
group = filamentaded_track,
fill = filamentaded_track,
color = filamentaded_track
),
side = "bottom"
) +
ggdist::stat_halfeye(
adjust = .5,
width = .6,
.width = 0,
justification = -.2,
point_colour = NA
) +
geom_boxplot(
width = .15,
outlier.shape = NA,
alpha = 1/3
) +
ggdist::geom_dots(side = "bottom", alpha = 1/10) +
facet_wrap(experiment_id ~ .) +
theme_bw() +
theme(legend.position = "top") +
labs(
x = "Cell status",
y = "Initial GFP",
color = "Cell status",
fill = "Cell status"
) +
coord_flip() +
stat_compare_means(label.y = 2.5, label.x = 1.5)
```
```{r rain_cloud_2}
library(patchwork)
library(ggpubr)
(p_1 / p_2) +
plot_layout(guides = 'collect')
```
```{r metric_charts}
processed_lineages_df %>%
select(experiment_id, filamentaded_track, time, length, gfp, ds_red) %>%
pivot_longer(
cols = c(length, gfp, ds_red),
names_to = "metric"
) %>%
mutate(
metric = case_when(
metric == "ds_red" ~ "DsRed",
metric == "gfp" ~ "GFP",
metric == "length" ~ "Length"
)
) %>%
group_by(experiment_id, filamentaded_track, time, metric) %>%
summarise(
ci = list(mean_cl_normal(value)),
.groups = "drop"
) %>%
unnest(cols = c(ci)) %>%
ggplot(aes(x = time, y = y, ymin= ymin, ymax=ymax, color = filamentaded_track)) +
annotate("rect", xmin=60, xmax=140, ymin=-Inf, ymax=Inf, alpha=1/2, color = "transparent", fill = "#FCB565") +
geom_smooth(method = "loess") +
facet_grid(metric ~ experiment_id, scales = "free_y") +
labs(
x = "Time (minutes)",
y = "Value",
color = "Cell status"
) +
theme_bw() +
theme(legend.position = "top")
```
```{r survival_probability}
gfp_control_hist <- processed_lineages_df %>%
ggplot(aes(x = gfp)) +
geom_histogram(bins = 100)
gfp_hist_data <- gfp_control_hist %>%
ggplot_build() %>%
pluck("data", 1) %>%
select(count, x, xmin, xmax) %>%
as_tibble()
gfp_breaks <- gfp_hist_data %>%
{c(.$xmin, last(.$xmax))}
survival_probability_df <- processed_lineages_df %>%
group_by(experiment_id, lineage_id, trap_id, filamentaded_track) %>%
summarise(
initial_gfp = first(gfp),
is_long_track = first(centered_frame) < unique(centered_antibiotic_start_frame) &&
last(centered_frame) > unique(centered_antibiotic_end_frame),
.groups = "drop"
) %>%
filter(is_long_track) %>%
group_by(experiment_id, filamentaded_track) %>%
group_modify(~{
tibble(
plot = list(
ggplot(data = .x, aes(x = initial_gfp)) +
geom_histogram(breaks = gfp_breaks)
)
)
}) %>%
mutate(
counts = map(plot, ggplot_build),
counts = map(counts, pluck, "data", 1),
counts = map(counts, add_column, control_count = gfp_hist_data$count),
counts = map(counts, select, gfp = x, control_count, count)
) %>%
unnest(counts) %>%
mutate(
survival_probability = count / control_count,
#survival_probability = survival_probability / max(survival_probability, na.rm = TRUE)
) %>%
filter(survival_probability != 0) %>%
glimpse()
```
```{r survival_probability_plot}
survival_probability_df %>%
ggplot(aes(x = gfp, y = survival_probability, color = filamentaded_track, linetype = experiment_id)) +
geom_point() +
geom_line() +
scale_y_continuous(labels = scales::percent) +
theme_bw() +
theme(
legend.position = "top"
) +
labs(
x = "Initial GFP",
y = "Survival probability",
color = "Cell status",
linetype = "Experiment"
)
```
```{r survival_probability_plot_area}
survival_probability_df %>%
group_by(filamentaded_track, gfp) %>%
ggplot(aes(x = gfp, y = count, fill = filamentaded_track)) +
geom_area(position = "fill", stat="identity") +
scale_x_continuous(expand = c(0, 0)) +
scale_y_continuous(expand = c(0, 0), labels = scales::percent) +
theme_bw() +
theme(
legend.position = "top"
) +
labs(
x = "Initial GFP (log10)",
y = "Percentage of cells",
fill = "Cell status"
)
```
```{r}
status_points_df <- processed_lineages_df %>%
group_by(experiment_id, trap_id, track_id, filamentaded_track) %>%
summarize(
first_false = which.min(filamentaded_at_frame),
first_true = which.max(filamentaded_at_frame),
initial_gfp = first(gfp),
sos_gfp = gfp[first_true],
end_gfp = last(gfp),
#diff_sos_initial_gfp = sos_gfp -initial_gfp,
#diff_end_sos_gfp = end_gfp - sos_gfp,
diff_end_intial_gfp = end_gfp - initial_gfp,
initial_length = first(length),
sos_length = length[first_true],
end_length = last(length),
#diff_sos_initial_length = sos_length - initial_length,
#diff_end_sos_length = end_length - sos_length,
diff_end_intial_length = end_length - initial_length,
initial_time = first(centered_frame) * 10,
sos_time = centered_frame[first_true] * 10,
end_time = last(centered_frame) * 10,
life_time = end_time - initial_time,
#diff_sos_intial_time = sos_time - initial_time,
#diff_end_sos_time = end_time - sos_time,
#diff_end_intial_time = end_time - initial_time,
is_survivor = initial_time < unique(centered_antibiotic_end_frame) * 10 &&
end_time > unique(centered_antibiotic_end_frame) * 10,
is_survivor = ifelse(is_survivor, "Survived", "Dit not survive"),
is_survivor = factor(is_survivor),
.groups = "drop"
) %>%
#filter(initial_frame <= sos_frame, sos_frame <= end_frame) %>%
glimpse()
```
```{r}
status_points_df %>%
count(experiment_id, filamentaded_track, life_time) %>%
ggplot(aes(x = as.factor(life_time), y = n, fill = filamentaded_track)) +
geom_bar(position = "fill", stat="identity", width = 1) +
scale_x_discrete(expand = c(0, 0)) +
scale_y_continuous(expand = c(0, 0), labels = scales::percent) +
facet_grid(experiment_id ~ .) +
theme_bw() +
theme(
legend.position = "top",
panel.spacing.y = unit(1, "lines")
) +
labs(
x = "Cell life time",
y = "Percentage of cells",
fill = "Cell status"
)
```
```{r}
status_points_df %>%
group_by(experiment_id, filamentaded_track, life_time) %>%
summarize(
n = n(),
initial_length = median(initial_length),
sos_length = median(sos_length),
end_length = median(end_length),
.groups = "drop"
) %>%
pivot_longer(
cols = contains("length"),
names_to = "length_type"
) %>%
mutate(
length_type = factor(length_type, levels = c("initial_length", "sos_length", "end_length"), labels = c("Initial", "SOS", "End"))
) %>%
ggplot(aes(x = life_time, y = value)) +
geom_line(aes(group = life_time)) +
geom_point(aes(color = length_type), alpha = 1/1) +
facet_grid(filamentaded_track ~ experiment_id) +
theme_bw() +
theme(
legend.position = "top"
) +
labs(
x = "Cell life time",
y = "Length value",
color = "Length type"
)
```
```{r}
status_points_df %>%
group_by(experiment_id, filamentaded_track, life_time) %>%
summarize(
n = n(),
initial_gfp = median(initial_gfp),
sos_gfp = median(sos_gfp),
end_gfp = median(end_gfp),
.groups = "drop"
) %>%
pivot_longer(
cols = contains("gfp"),
names_to = "gfp_type"
) %>%
mutate(
gfp_type = factor(gfp_type, levels = c("initial_gfp", "sos_gfp", "end_gfp"), labels = c("Initial", "SOS", "End"))
) %>%
ggplot(aes(x = life_time, y = value)) +
geom_line(aes(group = life_time)) +
geom_point(aes(color = gfp_type), alpha = 1/1) +
facet_grid(filamentaded_track ~ experiment_id) +
theme_bw() +
theme(
legend.position = "top"
) +
labs(
x = "Cell life time",
y = "GFP value",
color = "GFP type"
)
```
```{r}
library(tidymodels)
model_data <- status_points_df %>%
select(is_survivor, filamentaded_track, contains("gfp"), contains("length"), -contains("diff"), -contains("sos")) %>%
mutate(
out = interaction(is_survivor, filamentaded_track),
out = as.character(out),
out = as.factor(out)
) %>%
select(-is_survivor, -filamentaded_track) %>%
glimpse()
count(model_data, out)
```
```{r}
set.seed(123)
model_data_split <- initial_split(model_data, prop = 0.75, strata = out)
training_data <- training(model_data_split)
testing_data <- testing(model_data_split)
model_split
```
```{r}
data_folds <- vfold_cv(training_data, v = 20, strata = out)
data_folds
```
```{r}
library(themis)
data_recipe <- recipe(out ~ ., data = training_data) %>%
#step_corr(all_numeric(), threshold = 0.8) %>%
step_normalize(all_numeric(), -contains("time")) %>%
step_zv(all_predictors()) %>%
step_dummy(all_nominal(), -all_outcomes()) %>%
step_downsample(out)
summary(data_recipe)
```
```{r}
dt_tune_model <- decision_tree(
mode = "classification",
engine = "rpart",
cost_complexity = tune(),
tree_depth = tune(),
min_n = tune()
)
dt_tune_model <- rand_forest(
mode = "classification",
engine = "ranger",
mtry = tune(),
trees = tune(),
min_n = tune()
)
dt_tune_model
```
```{r}
set.seed(123)
dt_grid <- grid_random(
mtry() %>% range_set(c(2, 3)),
trees(),
min_n(),
size = 10
)
dt_grid
```
```{r}
data_wkfl <- workflow() %>%
add_model(dt_tune_model) %>%
add_recipe(data_recipe)
data_wkfl
```
```{r}
dt_tuning <- data_wkfl %>%
tune_grid(
resamples = data_folds,
grid = dt_grid
)
dt_tuning %>%
show_best(metric = "roc_auc", n = 5)
```
```{r}
best_dt_model <- dt_tuning %>%
select_best(metric = "roc_auc")
best_dt_model
```
```{r}
final_data_wkfl <- data_wkfl %>%
finalize_workflow(best_dt_model)
final_data_wkfl
```
```{r}
data_wf_fit <- final_data_wkfl %>%
fit(data = training_data)
tree_fit <- data_wf_fit %>%
extract_fit_parsnip()
```
```{r}
vip::vip(tree_fit)
```
```{r}
data_final_fit <- final_data_wkfl %>%
last_fit(split = model_data_split)
data_final_fit %>%
collect_metrics()
```
```{r}
data_final_fit %>%
collect_predictions() %>%
roc_curve(truth = is_survivor, .estimate = .pred_Survived) %>%
identity() %>%
autoplot()
```
```{r}
tree_predictions <- data_final_fit %>% collect_predictions()
conf_mat(tree_predictions, truth = is_survivor, estimate = .pred_class) %>%
autoplot()
```
```{r}
status_points_df %>%
count(is_survivor) %>%
identity()
```
|
############################################################################
# Input file download, unzip and load
############################################################################
if(!file.exists("./data")){dir.create("./data")}
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl,destfile="./data/Dataset.zip",method="curl")
unzip(zipfile="./data/Dataset.zip",exdir="./data")
filePath <- file.path("./data")
hpcMaster <- read.table(file.path(filePath, "household_power_consumption.txt" ),
header = TRUE,
sep = ";",
stringsAsFactors = FALSE,
dec=".")
############################################################################
# Subset data base on requirements 2/1/2007 & 2/2/2007
############################################################################
hpc2007 <- hpcMaster[hpcMaster$Date %in% c("1/2/2007","2/2/2007"),]
############################################################################
# Create timestamp and convert variable to numeric
############################################################################
datetimeStamp <- strptime(paste(hpc2007$Date, hpc2007$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
hpc2007$Global_active_power <- as.numeric(hpc2007$Global_active_power)
############################################################################
# Create Chart
###########################################################################
png("plot2.png", width=480, height=480)
plot(datetimeStamp, hpc2007$Global_active_power,
type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off()
| /plot2.R | no_license | charaje/ExData_Plotting1 | R | false | false | 1,759 | r | ############################################################################
# Input file download, unzip and load
############################################################################
if(!file.exists("./data")){dir.create("./data")}
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl,destfile="./data/Dataset.zip",method="curl")
unzip(zipfile="./data/Dataset.zip",exdir="./data")
filePath <- file.path("./data")
hpcMaster <- read.table(file.path(filePath, "household_power_consumption.txt" ),
header = TRUE,
sep = ";",
stringsAsFactors = FALSE,
dec=".")
############################################################################
# Subset data base on requirements 2/1/2007 & 2/2/2007
############################################################################
hpc2007 <- hpcMaster[hpcMaster$Date %in% c("1/2/2007","2/2/2007"),]
############################################################################
# Create timestamp and convert variable to numeric
############################################################################
datetimeStamp <- strptime(paste(hpc2007$Date, hpc2007$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
hpc2007$Global_active_power <- as.numeric(hpc2007$Global_active_power)
############################################################################
# Create Chart
###########################################################################
png("plot2.png", width=480, height=480)
plot(datetimeStamp, hpc2007$Global_active_power,
type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spagi2_master.R
\name{generate_pathway_ppi_data_frame}
\alias{generate_pathway_ppi_data_frame}
\title{generate_pathway_ppi_data_frame}
\usage{
generate_pathway_ppi_data_frame(active.pathway.path)
}
\arguments{
\item{active.pathway.path}{A list of sublist containing active pathway path data for each cell / tissue.}
}
\value{
This function returns pathway PPI data frame from the active pathway data to draw pathway figures using cytoscape.
}
\description{
This function generates pathway PPI data frame from the active pathway data to draw pathway figures using cytoscape.
}
\details{
This function generates pathway PPI data frame from the active pathway data to draw pathway figures using cytoscape.
}
\examples{
#Pre-process the 'tooth.epi.E13.5' data
tooth.epi.E13.5.processed.data<-preprocess_querydata_new(cell.tissue.data = tooth.epi.E13.5, exp.cutoff.th = 5.0, species="mmusculus")
#Generate the mouse homology pathway path data
mouse.homology.pathway.path<-generate_homology_pathways(species1 = "hsapiens", species2 = "mmusculus", pathway.path = pathway.path.new)
#Identify active pathway paths of the processed query data
tooth.epi.E13.5.active.pathway<-identify_active_pathway_path_new(pathway.path = mouse.homology.pathway.path, processed.query.data = tooth.epi.E13.5.processed.data)
#Generate the active pathway paths data frame
tooth.epi.E13.5.active.pathway.df<-generate_pathway_ppi_data_frame(active.pathway.path = tooth.epi.E13.5.active.pathway)
tooth.epi.E13.5.active.pathway.df[[1]][1]
}
| /man/generate_pathway_ppi_data_frame.Rd | no_license | humayun2017/SPAGI2 | R | false | true | 1,586 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spagi2_master.R
\name{generate_pathway_ppi_data_frame}
\alias{generate_pathway_ppi_data_frame}
\title{generate_pathway_ppi_data_frame}
\usage{
generate_pathway_ppi_data_frame(active.pathway.path)
}
\arguments{
\item{active.pathway.path}{A list of sublist containing active pathway path data for each cell / tissue.}
}
\value{
This function returns pathway PPI data frame from the active pathway data to draw pathway figures using cytoscape.
}
\description{
This function generates pathway PPI data frame from the active pathway data to draw pathway figures using cytoscape.
}
\details{
This function generates pathway PPI data frame from the active pathway data to draw pathway figures using cytoscape.
}
\examples{
#Pre-process the 'tooth.epi.E13.5' data
tooth.epi.E13.5.processed.data<-preprocess_querydata_new(cell.tissue.data = tooth.epi.E13.5, exp.cutoff.th = 5.0, species="mmusculus")
#Generate the mouse homology pathway path data
mouse.homology.pathway.path<-generate_homology_pathways(species1 = "hsapiens", species2 = "mmusculus", pathway.path = pathway.path.new)
#Identify active pathway paths of the processed query data
tooth.epi.E13.5.active.pathway<-identify_active_pathway_path_new(pathway.path = mouse.homology.pathway.path, processed.query.data = tooth.epi.E13.5.processed.data)
#Generate the active pathway paths data frame
tooth.epi.E13.5.active.pathway.df<-generate_pathway_ppi_data_frame(active.pathway.path = tooth.epi.E13.5.active.pathway)
tooth.epi.E13.5.active.pathway.df[[1]][1]
}
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/generics.R, R/msgfPar-getters.R
\docType{methods}
\name{chargeRange}
\alias{chargeRange}
\alias{chargeRange,msgfPar-method}
\alias{chargeRange<-}
\alias{chargeRange<-,msgfPar,msgfParChargeRange-method}
\alias{chargeRange<-,msgfPar,numeric-method}
\title{Get and set the charge range in msgfPar objects}
\usage{
chargeRange(object)
chargeRange(object) <- value
\S4method{chargeRange}{msgfPar}(object)
\S4method{chargeRange}{msgfPar,numeric}(object) <- value
\S4method{chargeRange}{msgfPar,msgfParChargeRange}(object) <- value
}
\arguments{
\item{object}{An msgfPar object}
\item{value}{Either a numeric vector of length 2 or an msgfParChargeRange
object}
}
\value{
In case of the getter a numeric vector with the named elements 'min'
and 'max'
}
\description{
These functions allow you to retrieve and set the charge range in the msgfPar
object of interest
}
\section{Methods (by class)}{
\itemize{
\item \code{msgfPar}: Get the charge range
\item \code{object = msgfPar,value = numeric}: Set the charge range using lower and upper bounds
\item \code{object = msgfPar,value = msgfParChargeRange}: Set the charge range using a dedicated
msgfParChargeRange object
}}
\examples{
parameters <- msgfPar(system.file(package='MSGFplus', 'extdata', 'milk-proteins.fasta'))
chargeRange(parameters) <- c(2, 4)
chargeRange(parameters)
}
\seealso{
Other msgfPar-getter_setter: \code{\link{db}},
\code{\link{db,msgfPar-method}}, \code{\link{db<-}},
\code{\link{db<-,msgfPar,character-method}};
\code{\link{enzyme}},
\code{\link{enzyme,msgfPar-method}},
\code{\link{enzyme<-}},
\code{\link{enzyme<-,msgfPar,character-method}},
\code{\link{enzyme<-,msgfPar,msgfParEnzyme-method}},
\code{\link{enzyme<-,msgfPar,numeric-method}};
\code{\link{fragmentation}},
\code{\link{fragmentation,msgfPar-method}},
\code{\link{fragmentation<-}},
\code{\link{fragmentation<-,msgfPar,character-method}},
\code{\link{fragmentation<-,msgfPar,msgfParFragmentation-method}},
\code{\link{fragmentation<-,msgfPar,numeric-method}};
\code{\link{instrument}},
\code{\link{instrument,msgfPar-method}},
\code{\link{instrument<-}},
\code{\link{instrument<-,msgfPar,character-method}},
\code{\link{instrument<-,msgfPar,msgfParInstrument-method}},
\code{\link{instrument<-,msgfPar,numeric-method}};
\code{\link{isotopeError}},
\code{\link{isotopeError,msgfPar-method}},
\code{\link{isotopeError<-}},
\code{\link{isotopeError<-,msgfPar,msgfParIsotopeError-method}},
\code{\link{isotopeError<-,msgfPar,numeric-method}};
\code{\link{lengthRange}},
\code{\link{lengthRange,msgfPar-method}},
\code{\link{lengthRange<-}},
\code{\link{lengthRange<-,msgfPar,msgfParLengthRange-method}},
\code{\link{lengthRange<-,msgfPar,numeric-method}};
\code{\link{matches}},
\code{\link{matches,msgfPar-method}},
\code{\link{matches<-}},
\code{\link{matches<-,msgfPar,msgfParMatches-method}},
\code{\link{matches<-,msgfPar,numeric-method}};
\code{\link{mods}}, \code{\link{mods,msgfPar-method}},
\code{\link{mods<-}},
\code{\link{mods<-,msgfPar,msgfParModificationList-method}},
\code{\link{nMod}}, \code{\link{nMod,msgfPar-method}},
\code{\link{nMod<-}},
\code{\link{nMod<-,msgfPar,numeric-method}};
\code{\link{ntt}}, \code{\link{ntt,msgfPar-method}},
\code{\link{ntt<-}},
\code{\link{ntt<-,msgfPar,msgfParNtt-method}},
\code{\link{ntt<-,msgfPar,numeric-method}};
\code{\link{protocol}},
\code{\link{protocol,msgfPar-method}},
\code{\link{protocol<-}},
\code{\link{protocol<-,msgfPar,character-method}},
\code{\link{protocol<-,msgfPar,msgfParProtocol-method}},
\code{\link{protocol<-,msgfPar,numeric-method}};
\code{\link{tda}}, \code{\link{tda,msgfPar-method}},
\code{\link{tda<-}},
\code{\link{tda<-,msgfPar,logical-method}},
\code{\link{tda<-,msgfPar,msgfParTda-method}};
\code{\link{tolerance}},
\code{\link{tolerance,msgfPar-method}},
\code{\link{tolerance<-}},
\code{\link{tolerance<-,msgfPar,character-method}},
\code{\link{tolerance<-,msgfPar,msgfParTolerance-method}},
\code{\link{toleranceRange}},
\code{\link{toleranceRange,msgfPar-method}},
\code{\link{toleranceRange<-}},
\code{\link{toleranceRange<-,msgfPar,numeric-method}},
\code{\link{toleranceUnit}},
\code{\link{toleranceUnit,msgfPar-method}},
\code{\link{toleranceUnit<-}},
\code{\link{toleranceUnit<-,msgfPar,character-method}}
}
| /man/chargeRange.Rd | no_license | jgmeyerucsd/MSGFplus | R | false | false | 4,466 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/generics.R, R/msgfPar-getters.R
\docType{methods}
\name{chargeRange}
\alias{chargeRange}
\alias{chargeRange,msgfPar-method}
\alias{chargeRange<-}
\alias{chargeRange<-,msgfPar,msgfParChargeRange-method}
\alias{chargeRange<-,msgfPar,numeric-method}
\title{Get and set the charge range in msgfPar objects}
\usage{
chargeRange(object)
chargeRange(object) <- value
\S4method{chargeRange}{msgfPar}(object)
\S4method{chargeRange}{msgfPar,numeric}(object) <- value
\S4method{chargeRange}{msgfPar,msgfParChargeRange}(object) <- value
}
\arguments{
\item{object}{An msgfPar object}
\item{value}{Either a numeric vector of length 2 or an msgfParChargeRange
object}
}
\value{
In case of the getter a numeric vector with the named elements 'min'
and 'max'
}
\description{
These functions allow you to retrieve and set the charge range in the msgfPar
object of interest
}
\section{Methods (by class)}{
\itemize{
\item \code{msgfPar}: Get the charge range
\item \code{object = msgfPar,value = numeric}: Set the charge range using lower and upper bounds
\item \code{object = msgfPar,value = msgfParChargeRange}: Set the charge range using a dedicated
msgfParChargeRange object
}}
\examples{
parameters <- msgfPar(system.file(package='MSGFplus', 'extdata', 'milk-proteins.fasta'))
chargeRange(parameters) <- c(2, 4)
chargeRange(parameters)
}
\seealso{
Other msgfPar-getter_setter: \code{\link{db}},
\code{\link{db,msgfPar-method}}, \code{\link{db<-}},
\code{\link{db<-,msgfPar,character-method}};
\code{\link{enzyme}},
\code{\link{enzyme,msgfPar-method}},
\code{\link{enzyme<-}},
\code{\link{enzyme<-,msgfPar,character-method}},
\code{\link{enzyme<-,msgfPar,msgfParEnzyme-method}},
\code{\link{enzyme<-,msgfPar,numeric-method}};
\code{\link{fragmentation}},
\code{\link{fragmentation,msgfPar-method}},
\code{\link{fragmentation<-}},
\code{\link{fragmentation<-,msgfPar,character-method}},
\code{\link{fragmentation<-,msgfPar,msgfParFragmentation-method}},
\code{\link{fragmentation<-,msgfPar,numeric-method}};
\code{\link{instrument}},
\code{\link{instrument,msgfPar-method}},
\code{\link{instrument<-}},
\code{\link{instrument<-,msgfPar,character-method}},
\code{\link{instrument<-,msgfPar,msgfParInstrument-method}},
\code{\link{instrument<-,msgfPar,numeric-method}};
\code{\link{isotopeError}},
\code{\link{isotopeError,msgfPar-method}},
\code{\link{isotopeError<-}},
\code{\link{isotopeError<-,msgfPar,msgfParIsotopeError-method}},
\code{\link{isotopeError<-,msgfPar,numeric-method}};
\code{\link{lengthRange}},
\code{\link{lengthRange,msgfPar-method}},
\code{\link{lengthRange<-}},
\code{\link{lengthRange<-,msgfPar,msgfParLengthRange-method}},
\code{\link{lengthRange<-,msgfPar,numeric-method}};
\code{\link{matches}},
\code{\link{matches,msgfPar-method}},
\code{\link{matches<-}},
\code{\link{matches<-,msgfPar,msgfParMatches-method}},
\code{\link{matches<-,msgfPar,numeric-method}};
\code{\link{mods}}, \code{\link{mods,msgfPar-method}},
\code{\link{mods<-}},
\code{\link{mods<-,msgfPar,msgfParModificationList-method}},
\code{\link{nMod}}, \code{\link{nMod,msgfPar-method}},
\code{\link{nMod<-}},
\code{\link{nMod<-,msgfPar,numeric-method}};
\code{\link{ntt}}, \code{\link{ntt,msgfPar-method}},
\code{\link{ntt<-}},
\code{\link{ntt<-,msgfPar,msgfParNtt-method}},
\code{\link{ntt<-,msgfPar,numeric-method}};
\code{\link{protocol}},
\code{\link{protocol,msgfPar-method}},
\code{\link{protocol<-}},
\code{\link{protocol<-,msgfPar,character-method}},
\code{\link{protocol<-,msgfPar,msgfParProtocol-method}},
\code{\link{protocol<-,msgfPar,numeric-method}};
\code{\link{tda}}, \code{\link{tda,msgfPar-method}},
\code{\link{tda<-}},
\code{\link{tda<-,msgfPar,logical-method}},
\code{\link{tda<-,msgfPar,msgfParTda-method}};
\code{\link{tolerance}},
\code{\link{tolerance,msgfPar-method}},
\code{\link{tolerance<-}},
\code{\link{tolerance<-,msgfPar,character-method}},
\code{\link{tolerance<-,msgfPar,msgfParTolerance-method}},
\code{\link{toleranceRange}},
\code{\link{toleranceRange,msgfPar-method}},
\code{\link{toleranceRange<-}},
\code{\link{toleranceRange<-,msgfPar,numeric-method}},
\code{\link{toleranceUnit}},
\code{\link{toleranceUnit,msgfPar-method}},
\code{\link{toleranceUnit<-}},
\code{\link{toleranceUnit<-,msgfPar,character-method}}
}
|
#######################################################################################################################
# delete old results ##################################################################################################
those<-list(0)
those[[1]]<-file.path(logfile[[1]],"quantification","target_recov_table_pos")
those[[2]]<-file.path(logfile[[1]],"quantification","target_recov_table_neg")
for(n in 1:length(those)){
if(file.exists(those[[n]])){
file.remove(those[[n]])
}
}
rm(those)
measurements<-read.csv(file=file.path(logfile[[1]],"dataframes","measurements"),colClasses = "character");
######################################################################################################################
######################################################################################################################
# POSITIVE ###########################################################################################################
if(
any(measurements[,"Mode"]=="positive" & measurements[,"Type"]=="spiked" & measurements[,"include"]=="TRUE") &
file.exists(file.path(logfile[[1]],"quantification","target_quant_table_pos"))
){
load(file.path(logfile[[1]],"quantification","target_quant_table_pos"))
those_files<-measurements[(measurements[,"Mode"]=="positive" & measurements[,"Type"]=="spiked" & measurements[,"include"]=="TRUE"),,drop=FALSE]
atdate<-those_files[,6]
atdate<-as.Date(atdate);
attime<-those_files[,7]
attime<-as.difftime(attime);
ord<-order(as.numeric(atdate),as.numeric(attime),as.numeric(those_files[,1]),decreasing=TRUE);
those_files<-those_files[ord,,drop=FALSE]
if(logfile$parameters$recov_files_included!="FALSE"){
if(as.numeric(logfile$parameters$recov_files_included)<length(those_files[,1])){
those_files<-those_files[1:as.numeric(logfile$parameters$recov_files_included),,drop=FALSE]
}
}
those_targets<-target_quant_table_pos[6:length(target_quant_table_pos[,1]),1:2,drop=FALSE]
target_recov_table_pos<-matrix(nrow=(length(those_targets[,1])+4),ncol=(length(those_files[,1])+2),"")
colnames(target_recov_table_pos)<-c("Target ID","Target name",those_files[,"ID"])
rownames(target_recov_table_pos)<-c("Name","Type","Date","Time",those_targets[,1])
target_recov_table_pos[1,]<-c("","",as.character(those_files[,"Name"]))
target_recov_table_pos[2,]<-c("","",as.character(those_files[,"Type"]))
target_recov_table_pos[3,]<-c("","",as.character(those_files[,"Date"]))
target_recov_table_pos[4,]<-c("","",as.character(those_files[,"Time"]))
target_recov_table_pos[,1]<-c("","","","",those_targets[,1])
target_recov_table_pos[,2]<-c("","","","",those_targets[,2])
##################################################################################################################
for(i in 1:length(those_files[,"ID"])){
from_ID<-those_files[i,"ID"]
to_ID<-those_files[i,"tag2"]
if(!any(measurements[measurements[,"Mode"]=="positive","ID"]==to_ID)){ # this should not happen anyway - included in check_project
cat("\n WARNING: Missing relation for spiked file detected! Please revise");
next;
}
if(!any(colnames(target_quant_table_pos)==from_ID)){
next;
}
for(j in 5:length(target_recov_table_pos[,1])){
target_ID<-target_recov_table_pos[j,1]
from_quant<-target_quant_table_pos[
target_quant_table_pos[,1]==target_ID,
colnames(target_quant_table_pos)==from_ID
]
if(grepl("!",from_quant)){next}
to_quant<-target_quant_table_pos[
target_quant_table_pos[,1]==target_ID,
colnames(target_quant_table_pos)==to_ID
]
if(grepl("!",to_quant)){next}
from_quant<-as.numeric(strsplit(from_quant,",")[[1]])
to_quant<-as.numeric(strsplit(to_quant,",")[[1]])
recov<-c()
for(n in 1:length(from_quant)){
for(m in 1:length(to_quant)){
recov<-c(recov,
from_quant[n]-to_quant[m]
)
}
}
recov<-recov[recov>=0] # cannot be negatively concentrated!
if(length(recov)==0){next}
recov<-paste(as.character(recov),collapse=",")
target_recov_table_pos[
target_recov_table_pos[,1]==target_ID,
colnames(target_recov_table_pos)==from_ID
]<-recov
}
}
##################################################################################################################
save(target_recov_table_pos,file=file.path(logfile[[1]],"quantification","target_recov_table_pos"))
rm(target_quant_table_pos,target_recov_table_pos)
}
######################################################################################################################
######################################################################################################################
# NEGATIVE ###########################################################################################################
if(
any(measurements[,"Mode"]=="negative" & measurements[,"Type"]=="spiked" & measurements[,"include"]=="TRUE") &
file.exists(file.path(logfile[[1]],"quantification","target_quant_table_neg"))
){
load(file.path(logfile[[1]],"quantification","target_quant_table_neg"))
those_files<-measurements[(measurements[,"Mode"]=="negative" & measurements[,"Type"]=="spiked" & measurements[,"include"]=="TRUE"),,drop=FALSE]
atdate<-those_files[,6]
atdate<-as.Date(atdate);
attime<-those_files[,7]
attime<-as.difftime(attime);
ord<-order(as.numeric(atdate),as.numeric(attime),as.numeric(those_files[,1]),decreasing=TRUE);
those_files<-those_files[ord,,drop=FALSE]
if(logfile$parameters$recov_files_included!="FALSE"){
if(as.numeric(logfile$parameters$recov_files_included)<length(those_files[,1])){
those_files<-those_files[1:as.numeric(logfile$parameters$recov_files_included),,drop=FALSE]
}
}
those_targets<-target_quant_table_neg[6:length(target_quant_table_neg[,1]),1:2,drop=FALSE]
target_recov_table_neg<-matrix(nrow=(length(those_targets[,1])+4),ncol=(length(those_files[,1])+2),"")
colnames(target_recov_table_neg)<-c("Target ID","Target name",those_files[,"ID"])
rownames(target_recov_table_neg)<-c("Name","Type","Date","Time",those_targets[,1])
target_recov_table_neg[1,]<-c("","",as.character(those_files[,"Name"]))
target_recov_table_neg[2,]<-c("","",as.character(those_files[,"Type"]))
target_recov_table_neg[3,]<-c("","",as.character(those_files[,"Date"]))
target_recov_table_neg[4,]<-c("","",as.character(those_files[,"Time"]))
target_recov_table_neg[,1]<-c("","","","",those_targets[,1])
target_recov_table_neg[,2]<-c("","","","",those_targets[,2])
##################################################################################################################
for(i in 1:length(those_files[,"ID"])){
from_ID<-those_files[i,"ID"]
to_ID<-those_files[i,"tag2"]
if(!any(measurements[measurements[,"Mode"]=="negative","ID"]==to_ID)){ # this should not happen anyway - included in check_project
cat("\n WARNING: Missing relation for spiked file detected! Please revise");
next;
}
if(!any(colnames(target_quant_table_neg)==from_ID)){
next;
}
for(j in 5:length(target_recov_table_neg[,1])){
target_ID<-target_recov_table_neg[j,1]
from_quant<-target_quant_table_neg[
target_quant_table_neg[,1]==target_ID,
colnames(target_quant_table_neg)==from_ID
]
if(grepl("!",from_quant)){next}
to_quant<-target_quant_table_neg[
target_quant_table_neg[,1]==target_ID,
colnames(target_quant_table_neg)==to_ID
]
if(grepl("!",to_quant)){next}
from_quant<-as.numeric(strsplit(from_quant,",")[[1]])
to_quant<-as.numeric(strsplit(to_quant,",")[[1]])
recov<-c()
for(n in 1:length(from_quant)){
for(m in 1:length(to_quant)){
recov<-c(recov,
from_quant[n]-to_quant[m]
)
}
}
recov<-recov[recov>=0] # cannot be negatively concentrated!
if(length(recov)==0){next}
recov<-paste(as.character(recov),collapse=",")
target_recov_table_neg[
target_recov_table_neg[,1]==target_ID,
colnames(target_recov_table_neg)==from_ID
]<-recov
}
}
##################################################################################################################
save(target_recov_table_neg,file=file.path(logfile[[1]],"quantification","target_recov_table_neg"))
rm(target_quant_table_neg,target_recov_table_neg)
}
######################################################################################################################
######################################################################################################################
rm(measurements)
| /inst/webMass/do_recovery.r | no_license | uweschmitt/enviMass | R | false | false | 8,753 | r |
#######################################################################################################################
# delete old results ##################################################################################################
those<-list(0)
those[[1]]<-file.path(logfile[[1]],"quantification","target_recov_table_pos")
those[[2]]<-file.path(logfile[[1]],"quantification","target_recov_table_neg")
for(n in 1:length(those)){
if(file.exists(those[[n]])){
file.remove(those[[n]])
}
}
rm(those)
measurements<-read.csv(file=file.path(logfile[[1]],"dataframes","measurements"),colClasses = "character");
######################################################################################################################
######################################################################################################################
# POSITIVE ###########################################################################################################
if(
any(measurements[,"Mode"]=="positive" & measurements[,"Type"]=="spiked" & measurements[,"include"]=="TRUE") &
file.exists(file.path(logfile[[1]],"quantification","target_quant_table_pos"))
){
load(file.path(logfile[[1]],"quantification","target_quant_table_pos"))
those_files<-measurements[(measurements[,"Mode"]=="positive" & measurements[,"Type"]=="spiked" & measurements[,"include"]=="TRUE"),,drop=FALSE]
atdate<-those_files[,6]
atdate<-as.Date(atdate);
attime<-those_files[,7]
attime<-as.difftime(attime);
ord<-order(as.numeric(atdate),as.numeric(attime),as.numeric(those_files[,1]),decreasing=TRUE);
those_files<-those_files[ord,,drop=FALSE]
if(logfile$parameters$recov_files_included!="FALSE"){
if(as.numeric(logfile$parameters$recov_files_included)<length(those_files[,1])){
those_files<-those_files[1:as.numeric(logfile$parameters$recov_files_included),,drop=FALSE]
}
}
those_targets<-target_quant_table_pos[6:length(target_quant_table_pos[,1]),1:2,drop=FALSE]
target_recov_table_pos<-matrix(nrow=(length(those_targets[,1])+4),ncol=(length(those_files[,1])+2),"")
colnames(target_recov_table_pos)<-c("Target ID","Target name",those_files[,"ID"])
rownames(target_recov_table_pos)<-c("Name","Type","Date","Time",those_targets[,1])
target_recov_table_pos[1,]<-c("","",as.character(those_files[,"Name"]))
target_recov_table_pos[2,]<-c("","",as.character(those_files[,"Type"]))
target_recov_table_pos[3,]<-c("","",as.character(those_files[,"Date"]))
target_recov_table_pos[4,]<-c("","",as.character(those_files[,"Time"]))
target_recov_table_pos[,1]<-c("","","","",those_targets[,1])
target_recov_table_pos[,2]<-c("","","","",those_targets[,2])
##################################################################################################################
for(i in 1:length(those_files[,"ID"])){
from_ID<-those_files[i,"ID"]
to_ID<-those_files[i,"tag2"]
if(!any(measurements[measurements[,"Mode"]=="positive","ID"]==to_ID)){ # this should not happen anyway - included in check_project
cat("\n WARNING: Missing relation for spiked file detected! Please revise");
next;
}
if(!any(colnames(target_quant_table_pos)==from_ID)){
next;
}
for(j in 5:length(target_recov_table_pos[,1])){
target_ID<-target_recov_table_pos[j,1]
from_quant<-target_quant_table_pos[
target_quant_table_pos[,1]==target_ID,
colnames(target_quant_table_pos)==from_ID
]
if(grepl("!",from_quant)){next}
to_quant<-target_quant_table_pos[
target_quant_table_pos[,1]==target_ID,
colnames(target_quant_table_pos)==to_ID
]
if(grepl("!",to_quant)){next}
from_quant<-as.numeric(strsplit(from_quant,",")[[1]])
to_quant<-as.numeric(strsplit(to_quant,",")[[1]])
recov<-c()
for(n in 1:length(from_quant)){
for(m in 1:length(to_quant)){
recov<-c(recov,
from_quant[n]-to_quant[m]
)
}
}
recov<-recov[recov>=0] # cannot be negatively concentrated!
if(length(recov)==0){next}
recov<-paste(as.character(recov),collapse=",")
target_recov_table_pos[
target_recov_table_pos[,1]==target_ID,
colnames(target_recov_table_pos)==from_ID
]<-recov
}
}
##################################################################################################################
save(target_recov_table_pos,file=file.path(logfile[[1]],"quantification","target_recov_table_pos"))
rm(target_quant_table_pos,target_recov_table_pos)
}
######################################################################################################################
######################################################################################################################
# NEGATIVE ###########################################################################################################
if(
any(measurements[,"Mode"]=="negative" & measurements[,"Type"]=="spiked" & measurements[,"include"]=="TRUE") &
file.exists(file.path(logfile[[1]],"quantification","target_quant_table_neg"))
){
load(file.path(logfile[[1]],"quantification","target_quant_table_neg"))
those_files<-measurements[(measurements[,"Mode"]=="negative" & measurements[,"Type"]=="spiked" & measurements[,"include"]=="TRUE"),,drop=FALSE]
atdate<-those_files[,6]
atdate<-as.Date(atdate);
attime<-those_files[,7]
attime<-as.difftime(attime);
ord<-order(as.numeric(atdate),as.numeric(attime),as.numeric(those_files[,1]),decreasing=TRUE);
those_files<-those_files[ord,,drop=FALSE]
if(logfile$parameters$recov_files_included!="FALSE"){
if(as.numeric(logfile$parameters$recov_files_included)<length(those_files[,1])){
those_files<-those_files[1:as.numeric(logfile$parameters$recov_files_included),,drop=FALSE]
}
}
those_targets<-target_quant_table_neg[6:length(target_quant_table_neg[,1]),1:2,drop=FALSE]
target_recov_table_neg<-matrix(nrow=(length(those_targets[,1])+4),ncol=(length(those_files[,1])+2),"")
colnames(target_recov_table_neg)<-c("Target ID","Target name",those_files[,"ID"])
rownames(target_recov_table_neg)<-c("Name","Type","Date","Time",those_targets[,1])
target_recov_table_neg[1,]<-c("","",as.character(those_files[,"Name"]))
target_recov_table_neg[2,]<-c("","",as.character(those_files[,"Type"]))
target_recov_table_neg[3,]<-c("","",as.character(those_files[,"Date"]))
target_recov_table_neg[4,]<-c("","",as.character(those_files[,"Time"]))
target_recov_table_neg[,1]<-c("","","","",those_targets[,1])
target_recov_table_neg[,2]<-c("","","","",those_targets[,2])
##################################################################################################################
for(i in 1:length(those_files[,"ID"])){
from_ID<-those_files[i,"ID"]
to_ID<-those_files[i,"tag2"]
if(!any(measurements[measurements[,"Mode"]=="negative","ID"]==to_ID)){ # this should not happen anyway - included in check_project
cat("\n WARNING: Missing relation for spiked file detected! Please revise");
next;
}
if(!any(colnames(target_quant_table_neg)==from_ID)){
next;
}
for(j in 5:length(target_recov_table_neg[,1])){
target_ID<-target_recov_table_neg[j,1]
from_quant<-target_quant_table_neg[
target_quant_table_neg[,1]==target_ID,
colnames(target_quant_table_neg)==from_ID
]
if(grepl("!",from_quant)){next}
to_quant<-target_quant_table_neg[
target_quant_table_neg[,1]==target_ID,
colnames(target_quant_table_neg)==to_ID
]
if(grepl("!",to_quant)){next}
from_quant<-as.numeric(strsplit(from_quant,",")[[1]])
to_quant<-as.numeric(strsplit(to_quant,",")[[1]])
recov<-c()
for(n in 1:length(from_quant)){
for(m in 1:length(to_quant)){
recov<-c(recov,
from_quant[n]-to_quant[m]
)
}
}
recov<-recov[recov>=0] # cannot be negatively concentrated!
if(length(recov)==0){next}
recov<-paste(as.character(recov),collapse=",")
target_recov_table_neg[
target_recov_table_neg[,1]==target_ID,
colnames(target_recov_table_neg)==from_ID
]<-recov
}
}
##################################################################################################################
save(target_recov_table_neg,file=file.path(logfile[[1]],"quantification","target_recov_table_neg"))
rm(target_quant_table_neg,target_recov_table_neg)
}
######################################################################################################################
######################################################################################################################
rm(measurements)
|
## Code for creating plot to answer question 2:
#
# "Have total emissions from PM2.5 decreased in the Baltimore City, Maryland
# (fips=="24510") from 1999 to 2008? Use the base plotting system to make a plot
# answering this question."
#
library(dplyr)
# Get data
dataurl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download.file(dataurl, "data.zip")
unzip("data.zip")
# Load data
NEI <- tbl_df(readRDS("summarySCC_PM25.rds"))
SCC <- tbl_df(readRDS("Source_Classification_Code.rds"))
# Extract data for Baltimore City only
baltdat <- filter(NEI, fips == "24510")
# Calculate sum of emissions from all sources per year in Baltimore City
emsum <- tapply(baltdat$Emissions, baltdat$year, sum)
# Create and open PNG graphic device
png(filename = "q2plot.png", width = 480, height = 480)
# Plot
plot(names(emsum), emsum, col = "red", xlab = "Year", ylab = "Total Yearly Emissions [tons]", main = "Total Yearly Emissions in Baltimore City 1999-2008")
lines(names(emsum),emsum)
# Close PNG device
dev.off()
print("plot for question 2 created!")
| /Exploratory-Data-Analysis-Course-Project/Question2.R | no_license | adamjos/datasciencecoursera | R | false | false | 1,080 | r | ## Code for creating plot to answer question 2:
#
# "Have total emissions from PM2.5 decreased in the Baltimore City, Maryland
# (fips=="24510") from 1999 to 2008? Use the base plotting system to make a plot
# answering this question."
#
library(dplyr)
# Get data
dataurl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download.file(dataurl, "data.zip")
unzip("data.zip")
# Load data
NEI <- tbl_df(readRDS("summarySCC_PM25.rds"))
SCC <- tbl_df(readRDS("Source_Classification_Code.rds"))
# Extract data for Baltimore City only
baltdat <- filter(NEI, fips == "24510")
# Calculate sum of emissions from all sources per year in Baltimore City
emsum <- tapply(baltdat$Emissions, baltdat$year, sum)
# Create and open PNG graphic device
png(filename = "q2plot.png", width = 480, height = 480)
# Plot
plot(names(emsum), emsum, col = "red", xlab = "Year", ylab = "Total Yearly Emissions [tons]", main = "Total Yearly Emissions in Baltimore City 1999-2008")
lines(names(emsum),emsum)
# Close PNG device
dev.off()
print("plot for question 2 created!")
|
## ----setup, include = FALSE----------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ------------------------------------------------------------------------
library(customLayout)
library(officer)
library(magrittr)
library(ggplot2)
lay <- lay_new(matrix(1:4,nc=2),widths=c(3,2),heights=c(2,1))
lay2 <- lay_new(matrix(1:3))
titleLay <- lay_new(1, widths = 1, heights = 1)
lay3 <- lay_bind_col(lay,lay2, widths=c(3,1))
layout <- lay_bind_row(titleLay, lay3, heights = c(1,7))
lay_show(layout)
## ------------------------------------------------------------------------
## create officer layout
offLayout <- phl_layout(layout,
margins = c(0.25, 0.25, 0.25, 0.25),
innerMargins = rep(0.15,4))
## ------------------------------------------------------------------------
pptx <- read_pptx() %>%
add_slide(master = "Office Theme", layout = "Title and Content")
### fill first placeholder
plot1 <- qplot(mpg, wt, data = mtcars)
plot3 <- qplot(mpg, qsec, data = mtcars)
pptx <- phl_with_gg(pptx, offLayout, 2, plot1)
pptx <- phl_with_gg(pptx, offLayout, 4, plot3)
## ------------------------------------------------------------------------
pl5 <- function() {
par(mar = rep(0.1, 4))
pie(c(3, 4, 6), col = 2:4)
}
pl6 <- function() {
par(mar = rep(0.1, 4))
pie(c(3, 2, 7), col = 2:4 + 3)
}
pl7 <- function() {
par(mar = rep(0.1, 4))
pie(c(5, 4, 2), col = 2:4 + 6)
}
pptx <- phl_with_plot(pptx, offLayout, 6, pl5)
pptx <- phl_with_plot(pptx, offLayout, 7, pl6)
pptx <- phl_with_plot(pptx, offLayout, 8, pl7)
## ------------------------------------------------------------------------
pptx <- phl_with_table(pptx, offLayout, 3, head(iris, 2))
## ------------------------------------------------------------------------
pptx <- phl_with_text(pptx, offLayout, 1, "Custom Layout")
style <- fp_text(font.size = 24, color = "red")
pptx <- phl_with_text(pptx, offLayout, 5,
"Lorem ipsum", type = "body", style = style)
## ---- eval=FALSE---------------------------------------------------------
# file <- tempfile(fileext = ".pptx")
# print(pptx, file)
## ------------------------------------------------------------------------
library(customLayout)
library(flextable)
library(dplyr)
library(officer)
lay <- lay_new(matrix(1:4,nc=2),widths=c(3,2),heights=c(2,1))
lay2 <- lay_new(matrix(1:3))
layout <- lay_bind_col(lay,lay2, widths=c(3,1))
lay_show(layout)
offLayout <- phl_layout(layout,
margins = c(0.25, 0.25, 0.25, 0.25),
innerMargins = rep(0.15,4))
pptx <- read_pptx() %>%
add_slide(master = "Office Theme", layout = "Title and Content")
table <- mtcars %>%
group_by(cyl) %>%
summarise(Mean =round(mean(qsec), 2))
## ------------------------------------------------------------------------
pptx <- read_pptx() %>%
add_slide(
master = "Office Theme",
layout = "Title and Content")
flTableRaw <- flextable(table)
pptx <- phl_with_flextable(pptx,
olay = offLayout, 1, flTableRaw)
## ------------------------------------------------------------------------
pptx <- read_pptx() %>%
add_slide(
master = "Office Theme",
layout = "Title and Content")
flTable <- phl_adjust_table(table, olay = offLayout, id = 1)
pptx <- phl_with_flextable(pptx,
olay = offLayout, 1, flTable)
## ------------------------------------------------------------------------
pptx <- read_pptx() %>%
add_slide(
master = "Office Theme",
layout = "Title and Content")
flTable <- phl_adjust_table(table, olay = offLayout, id = 1)
flTable <- bg(flTable, bg = "#E4C994", part = "header")
flTable <- bg(flTable, bg = "#333333", part = "body")
flTable <- color(flTable, color = "#E4C994")
pptx <- phl_with_flextable(pptx,
olay = offLayout, 1, flTable)
## ---- results='hide'-----------------------------------------------------
pptx <- read_pptx() %>%
add_slide(
master = "Office Theme",
layout = "Title and Content")
lapply(seq_len(length(offLayout)), function(i) {
tbl <- phl_adjust_table(table, offLayout, i)
phl_with_flextable(pptx, olay = offLayout, i, tbl)
invisible()
})
| /data/genthat_extracted_code/customLayout/vignettes/layouts-for-officer-power-point-document.R | no_license | surayaaramli/typeRrh | R | false | false | 4,140 | r | ## ----setup, include = FALSE----------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ------------------------------------------------------------------------
library(customLayout)
library(officer)
library(magrittr)
library(ggplot2)
lay <- lay_new(matrix(1:4,nc=2),widths=c(3,2),heights=c(2,1))
lay2 <- lay_new(matrix(1:3))
titleLay <- lay_new(1, widths = 1, heights = 1)
lay3 <- lay_bind_col(lay,lay2, widths=c(3,1))
layout <- lay_bind_row(titleLay, lay3, heights = c(1,7))
lay_show(layout)
## ------------------------------------------------------------------------
## create officer layout
offLayout <- phl_layout(layout,
margins = c(0.25, 0.25, 0.25, 0.25),
innerMargins = rep(0.15,4))
## ------------------------------------------------------------------------
pptx <- read_pptx() %>%
add_slide(master = "Office Theme", layout = "Title and Content")
### fill first placeholder
plot1 <- qplot(mpg, wt, data = mtcars)
plot3 <- qplot(mpg, qsec, data = mtcars)
pptx <- phl_with_gg(pptx, offLayout, 2, plot1)
pptx <- phl_with_gg(pptx, offLayout, 4, plot3)
## ------------------------------------------------------------------------
pl5 <- function() {
par(mar = rep(0.1, 4))
pie(c(3, 4, 6), col = 2:4)
}
pl6 <- function() {
par(mar = rep(0.1, 4))
pie(c(3, 2, 7), col = 2:4 + 3)
}
pl7 <- function() {
par(mar = rep(0.1, 4))
pie(c(5, 4, 2), col = 2:4 + 6)
}
pptx <- phl_with_plot(pptx, offLayout, 6, pl5)
pptx <- phl_with_plot(pptx, offLayout, 7, pl6)
pptx <- phl_with_plot(pptx, offLayout, 8, pl7)
## ------------------------------------------------------------------------
pptx <- phl_with_table(pptx, offLayout, 3, head(iris, 2))
## ------------------------------------------------------------------------
pptx <- phl_with_text(pptx, offLayout, 1, "Custom Layout")
style <- fp_text(font.size = 24, color = "red")
pptx <- phl_with_text(pptx, offLayout, 5,
"Lorem ipsum", type = "body", style = style)
## ---- eval=FALSE---------------------------------------------------------
# file <- tempfile(fileext = ".pptx")
# print(pptx, file)
## ------------------------------------------------------------------------
library(customLayout)
library(flextable)
library(dplyr)
library(officer)
lay <- lay_new(matrix(1:4,nc=2),widths=c(3,2),heights=c(2,1))
lay2 <- lay_new(matrix(1:3))
layout <- lay_bind_col(lay,lay2, widths=c(3,1))
lay_show(layout)
offLayout <- phl_layout(layout,
margins = c(0.25, 0.25, 0.25, 0.25),
innerMargins = rep(0.15,4))
pptx <- read_pptx() %>%
add_slide(master = "Office Theme", layout = "Title and Content")
table <- mtcars %>%
group_by(cyl) %>%
summarise(Mean =round(mean(qsec), 2))
## ------------------------------------------------------------------------
pptx <- read_pptx() %>%
add_slide(
master = "Office Theme",
layout = "Title and Content")
flTableRaw <- flextable(table)
pptx <- phl_with_flextable(pptx,
olay = offLayout, 1, flTableRaw)
## ------------------------------------------------------------------------
pptx <- read_pptx() %>%
add_slide(
master = "Office Theme",
layout = "Title and Content")
flTable <- phl_adjust_table(table, olay = offLayout, id = 1)
pptx <- phl_with_flextable(pptx,
olay = offLayout, 1, flTable)
## ------------------------------------------------------------------------
pptx <- read_pptx() %>%
add_slide(
master = "Office Theme",
layout = "Title and Content")
flTable <- phl_adjust_table(table, olay = offLayout, id = 1)
flTable <- bg(flTable, bg = "#E4C994", part = "header")
flTable <- bg(flTable, bg = "#333333", part = "body")
flTable <- color(flTable, color = "#E4C994")
pptx <- phl_with_flextable(pptx,
olay = offLayout, 1, flTable)
## ---- results='hide'-----------------------------------------------------
pptx <- read_pptx() %>%
add_slide(
master = "Office Theme",
layout = "Title and Content")
lapply(seq_len(length(offLayout)), function(i) {
tbl <- phl_adjust_table(table, offLayout, i)
phl_with_flextable(pptx, olay = offLayout, i, tbl)
invisible()
})
|
## Name: Elizabeth Lee
## Date: 7/6/16
## Function: explore distributions of disease burden metrics for ilinDt at the county level
## Results: magnitude metrics could be truncated and shifted normals, but timing metrics don't appear to be normally distributed
### disease burden metrics: sum ILI across epidemic weeks, cumulative difference in ILI and baseline, cumulative difference in ILI and epidemic threshold, rate of ILI at epidemic peak, epidemic duration, time to epidemic from start of flu period, time to epidemic peak from start of epidemic
## Filenames: sprintf('dbMetrics_periodicReg_%silinDt%s_analyzeDB.csv', code, code2)
## Data Source: IMS Health
## Notes:
##
## useful commands:
## install.packages("pkg", dependencies=TRUE, lib="/usr/local/lib/R/site-library") # in sudo R
## update.packages(lib.loc = "/usr/local/lib/R/site-library")
#### header ####################################
require(ggplot2)
require(readr)
require(dplyr)
require(tidyr)
setwd(dirname(sys.frame(1)$ofile))
source("source_clean_response_functions_cty.R") # functions to clean response and IMS coverage data (cty)
#### set these! ####################################
code <-"" # linear time trend term
code2 <- "_Octfit" # fit = Apr to Oct and fluseason = Oct to Apr
dbCodeStr <- "_ilinDt_Octfit_span0.4_degree2"
# uncomment when running script separately
spatial <- list(scale = "county", stringcode = "County", stringabbr = "_cty")
span.var <- 0.4 # 0.4, 0.6
degree.var <- 2
code.str <- sprintf('_span%s_degree%s', span.var, degree.var)
#### FILEPATHS #################################
setwd('../reference_data')
path_abbr_st <- paste0(getwd(), "/state_abbreviations_FIPS.csv")
path_latlon_cty <- paste0(getwd(), "/cty_pop_latlon.csv")
setwd("../R_export")
path_response_cty <- paste0(getwd(), sprintf("/dbMetrics_periodicReg%s_analyzeDB_cty.csv", dbCodeStr))
# put all paths in a list to pass them around in functions
path_list <- list(path_abbr_st = path_abbr_st,
path_latlon_cty = path_latlon_cty,
path_response_cty = path_response_cty)
#### import data ####################################
iliSum <- cleanR_iliSum_cty(path_list)
iliPeak <- cleanR_iliPeak_cty(path_list)
#### plot formatting ####################################
w <- 9; h <- 6
#### plot distribution of dbMetrics ####################################
print(sprintf('plotting db metrics %s', code.str))
# 7/6/16 - saved figures
setwd(sprintf('../graph_outputs/EDA_IMS_burden_iliSum%s', spatial$stringabbr))
# total ILI plot
plt.distr.iliSum <- ggplot(iliSum, aes(x=y, group=season)) +
geom_histogram(aes(y=..density..), binwidth=10) + geom_density() +
# coord_cartesian(xlim=c(0, 250)) +
facet_wrap(~season) + ggtitle("Sum ilinDt during flu season")
ggsave(sprintf("distr_ILITot_%silinDt%s%s%s.png", code, code2, code.str, spatial$stringabbr), plt.distr.iliSum, width=w, height=h)
# ili peak case count plot
setwd(sprintf('../EDA_IMS_burden_iliPeak%s', spatial$stringabbr))
plt.distr.pkCount <- ggplot(iliPeak, aes(x=y, group=season)) +
geom_histogram(aes(y=..density..), binwidth=5) + geom_density() +
# coord_cartesian(xlim=c(0, 50)) +
facet_wrap(~season) + ggtitle("peak ilinDt count during flu season")
ggsave(sprintf("distr_pkCount_%silinDt%s%s%s.png", code, code2, code.str, spatial$stringabbr), plt.distr.pkCount, width=w, height=h)
print('finished plotting db metrics')
####################################
# compare the mean and variance for each metric by season
iliSum.summ <- iliSum %>% group_by(season) %>% summarise(MN = mean(y, na.rm=TRUE), VAR = var(y, na.rm=TRUE))
iliPk.summ <- iliPeak %>% group_by(season) %>% summarise(MN = mean(y, na.rm=TRUE), VAR = var(y, na.rm=TRUE))
print(sprintf('span %s degree %s', span.var, degree.var))
print(iliSum.summ)
print(iliPk.summ)
| /programs/explore_dbMetricsDistribution_ilinDt_cty.R | no_license | Qasim-1develop/flu-SDI-dzBurden-drivers | R | false | false | 3,824 | r | ## Name: Elizabeth Lee
## Date: 7/6/16
## Function: explore distributions of disease burden metrics for ilinDt at the county level
## Results: magnitude metrics could be truncated and shifted normals, but timing metrics don't appear to be normally distributed
### disease burden metrics: sum ILI across epidemic weeks, cumulative difference in ILI and baseline, cumulative difference in ILI and epidemic threshold, rate of ILI at epidemic peak, epidemic duration, time to epidemic from start of flu period, time to epidemic peak from start of epidemic
## Filenames: sprintf('dbMetrics_periodicReg_%silinDt%s_analyzeDB.csv', code, code2)
## Data Source: IMS Health
## Notes:
##
## useful commands:
## install.packages("pkg", dependencies=TRUE, lib="/usr/local/lib/R/site-library") # in sudo R
## update.packages(lib.loc = "/usr/local/lib/R/site-library")
#### header ####################################
require(ggplot2)
require(readr)
require(dplyr)
require(tidyr)
setwd(dirname(sys.frame(1)$ofile))
source("source_clean_response_functions_cty.R") # functions to clean response and IMS coverage data (cty)
#### set these! ####################################
code <-"" # linear time trend term
code2 <- "_Octfit" # fit = Apr to Oct and fluseason = Oct to Apr
dbCodeStr <- "_ilinDt_Octfit_span0.4_degree2"
# uncomment when running script separately
spatial <- list(scale = "county", stringcode = "County", stringabbr = "_cty")
span.var <- 0.4 # 0.4, 0.6
degree.var <- 2
code.str <- sprintf('_span%s_degree%s', span.var, degree.var)
#### FILEPATHS #################################
setwd('../reference_data')
path_abbr_st <- paste0(getwd(), "/state_abbreviations_FIPS.csv")
path_latlon_cty <- paste0(getwd(), "/cty_pop_latlon.csv")
setwd("../R_export")
path_response_cty <- paste0(getwd(), sprintf("/dbMetrics_periodicReg%s_analyzeDB_cty.csv", dbCodeStr))
# put all paths in a list to pass them around in functions
path_list <- list(path_abbr_st = path_abbr_st,
path_latlon_cty = path_latlon_cty,
path_response_cty = path_response_cty)
#### import data ####################################
iliSum <- cleanR_iliSum_cty(path_list)
iliPeak <- cleanR_iliPeak_cty(path_list)
#### plot formatting ####################################
w <- 9; h <- 6
#### plot distribution of dbMetrics ####################################
print(sprintf('plotting db metrics %s', code.str))
# 7/6/16 - saved figures
setwd(sprintf('../graph_outputs/EDA_IMS_burden_iliSum%s', spatial$stringabbr))
# total ILI plot
plt.distr.iliSum <- ggplot(iliSum, aes(x=y, group=season)) +
geom_histogram(aes(y=..density..), binwidth=10) + geom_density() +
# coord_cartesian(xlim=c(0, 250)) +
facet_wrap(~season) + ggtitle("Sum ilinDt during flu season")
ggsave(sprintf("distr_ILITot_%silinDt%s%s%s.png", code, code2, code.str, spatial$stringabbr), plt.distr.iliSum, width=w, height=h)
# ili peak case count plot
setwd(sprintf('../EDA_IMS_burden_iliPeak%s', spatial$stringabbr))
plt.distr.pkCount <- ggplot(iliPeak, aes(x=y, group=season)) +
geom_histogram(aes(y=..density..), binwidth=5) + geom_density() +
# coord_cartesian(xlim=c(0, 50)) +
facet_wrap(~season) + ggtitle("peak ilinDt count during flu season")
ggsave(sprintf("distr_pkCount_%silinDt%s%s%s.png", code, code2, code.str, spatial$stringabbr), plt.distr.pkCount, width=w, height=h)
print('finished plotting db metrics')
####################################
# compare the mean and variance for each metric by season
iliSum.summ <- iliSum %>% group_by(season) %>% summarise(MN = mean(y, na.rm=TRUE), VAR = var(y, na.rm=TRUE))
iliPk.summ <- iliPeak %>% group_by(season) %>% summarise(MN = mean(y, na.rm=TRUE), VAR = var(y, na.rm=TRUE))
print(sprintf('span %s degree %s', span.var, degree.var))
print(iliSum.summ)
print(iliPk.summ)
|
# FUNCTIONS FOR CLEANING RAW DATA FILES
#### efficacy_function cleans raw efficacy data in Shiny app
# Function Title: Cleaning Efficacy Dataframe
# This function uses the file input from the fileInput widget for "efficacy" as the argument.
# The dataframe explores lung and spleen efficacies by drug, days of treatment, and dosage. The function
# cleans the plasma dataframe by first removing columns that are repeating (i.e., units) and putting
# the efficacy values into a log value for easier comprehension. Further, the dosage and days_treatment columns
# were cleaned by changing the factor names in order to compare by dosage and include controls in this analysis.
library(dplyr)
efficacy_function <- function(efficacy_df){
efficacy_clean <- efficacy_df %>%
select(Protocol_Animal, Compound, Group, Drug_Dose, Days_Treatment,
Treatment_Interval,Elung,Espleen) %>%
rename(lung_efficacy = Elung,
spleen_efficacy = Espleen,
dosage = Drug_Dose,
days_treatment = Days_Treatment,
dose_interval = Treatment_Interval,
drug = Compound) %>%
mutate(lung_efficacy = as.numeric(lung_efficacy)) %>%
mutate(spleen_efficacy = as.numeric(spleen_efficacy)) %>%
mutate(dose_interval = as.factor(dose_interval)) %>%
mutate(days_treatment = as.factor(days_treatment)) %>%
group_by(Protocol_Animal, drug, Group, dosage, days_treatment, dose_interval) %>%
summarize(lung_efficacy_log = log10(lung_efficacy),
spleen_efficacy_log = log10(spleen_efficacy))
levels(efficacy_clean$dose_interval)[levels(efficacy_clean$dose_interval)=="Pre Rx 9 week"] <- "_Baseline"
levels(efficacy_clean$dose_interval)[levels(efficacy_clean$dose_interval)=="M-F"] <- "_QD"
levels(efficacy_clean$dose_interval)[levels(efficacy_clean$dose_interval)=="4 wk"] <- "20_Control"
levels(efficacy_clean$dose_interval)[levels(efficacy_clean$dose_interval)=="8 wk"] <- "40_Control"
levels(efficacy_clean$drug)[levels(efficacy_clean$drug)==""] <- "Baseline"
efficacy_clean <- efficacy_clean %>%
unite(days_dose, days_treatment, dose_interval, sep = "") %>%
separate(days_dose, c("days", "dose"), sep = "_") %>%
rename("days_treatment" = days,
"dose_interval" = dose) %>%
mutate(days_treatment = as.numeric(days_treatment))
return(efficacy_clean)
}
#### plasma_function cleans raw plasma data in Shiny app
#Function Title: Cleaning Plasma Dataframe
#This function has a dataframe as an argument. The dataframe contains data on plasma
#concentrations. The function cleans the plasma dataframe by selecting only the needed
#variables, renaming variables, and changing the group column to a character.
plasma_function <- function(plasma_df){
plasma_clean <- plasma_df %>%
select(MouseID,
Compound,
Group,
Protocol_Animal,
Dosing,
Timepoint,
Plasma_Parent) %>%
rename(drug = Compound,
mouse_number = MouseID,
plasma_concentration = Plasma_Parent) %>%
mutate(Group = as.character(Group))
return(plasma_clean)
}
##### Clean the tissue laser data into a tidy format
#
tissue_laser_function <- function(tissue_laser_df) {
tissue_laser_clean <- tissue_laser_df %>%
rename(`Parent [ng/ml]` = Parent) %>%
select(-StudyID, -Metabolite, - Units, - Collection, - `Sample ID`)
n <- nrow(tissue_laser_clean)
mice_ids <- rep(c(1:(n/4)), each = 4)
tissue_laser_clean <- mutate(tissue_laser_clean, MouseID = mice_ids) %>%
spread(key = Compartment, value = `Parent [ng/ml]`) %>%
rename(ULU = `uninvolved lung`, RIM = rim,
OCS = `outer caseum`, ICS = `inner caseum`) %>%
mutate(ULU = as.numeric(ULU), RIM = as.numeric(RIM),
OCS = as.numeric(OCS), ICS = as.numeric(ICS))
return(tissue_laser_clean)
}
##### tissue_std_pk_function cleans raw tissue std pk data in Shiny app
#Function Title: Clean STD PK Dataframe
#The argument for this function contains information on pharmacokinetic properties of the
#drugs tested on a mouse-by-mouse level. A mouse id was created as a new column to the
#dataset. Additionally, only the necessary columns were included in the dataframe. The spread
#function was used to convert the Comparment column into columns for each compartment,
#containing the respective Parent values. These new columns were then renamed to match the
#SLE and SLU variable names in the tidy data templates and recoded as numerical values.
tissue_std_pk_function <- function(tissue_std_pk_df){
n <- nrow(tissue_std_pk_df)
mice_ids <- rep(c(1:(n/2)), each = 2)
tissue_std_pk_clean <- tissue_std_pk_df %>%
mutate(mouse_number = mice_ids) %>%
select(Compound, mouse_number, Group, Protocol_Animal, Dosing, Timepoint, Compartment, Parent) %>%
rename(drug = Compound,
`Parent [ng/ml]` = Parent) %>%
spread(key = Compartment, value = `Parent [ng/ml]`) %>%
rename(SLU = Lung,
SLE = Lesion) %>%
mutate(SLU = as.numeric(SLU),
SLE = as.numeric(SLE))
return(tissue_std_pk_clean)
}
| /Shiny_App/helper.R | no_license | KatieKey/input_output_shiny_group | R | false | false | 5,150 | r |
# FUNCTIONS FOR CLEANING RAW DATA FILES
#### efficacy_function cleans raw efficacy data in Shiny app
# Function Title: Cleaning Efficacy Dataframe
# This function uses the file input from the fileInput widget for "efficacy" as the argument.
# The dataframe explores lung and spleen efficacies by drug, days of treatment, and dosage. The function
# cleans the plasma dataframe by first removing columns that are repeating (i.e., units) and putting
# the efficacy values into a log value for easier comprehension. Further, the dosage and days_treatment columns
# were cleaned by changing the factor names in order to compare by dosage and include controls in this analysis.
library(dplyr)
efficacy_function <- function(efficacy_df){
efficacy_clean <- efficacy_df %>%
select(Protocol_Animal, Compound, Group, Drug_Dose, Days_Treatment,
Treatment_Interval,Elung,Espleen) %>%
rename(lung_efficacy = Elung,
spleen_efficacy = Espleen,
dosage = Drug_Dose,
days_treatment = Days_Treatment,
dose_interval = Treatment_Interval,
drug = Compound) %>%
mutate(lung_efficacy = as.numeric(lung_efficacy)) %>%
mutate(spleen_efficacy = as.numeric(spleen_efficacy)) %>%
mutate(dose_interval = as.factor(dose_interval)) %>%
mutate(days_treatment = as.factor(days_treatment)) %>%
group_by(Protocol_Animal, drug, Group, dosage, days_treatment, dose_interval) %>%
summarize(lung_efficacy_log = log10(lung_efficacy),
spleen_efficacy_log = log10(spleen_efficacy))
levels(efficacy_clean$dose_interval)[levels(efficacy_clean$dose_interval)=="Pre Rx 9 week"] <- "_Baseline"
levels(efficacy_clean$dose_interval)[levels(efficacy_clean$dose_interval)=="M-F"] <- "_QD"
levels(efficacy_clean$dose_interval)[levels(efficacy_clean$dose_interval)=="4 wk"] <- "20_Control"
levels(efficacy_clean$dose_interval)[levels(efficacy_clean$dose_interval)=="8 wk"] <- "40_Control"
levels(efficacy_clean$drug)[levels(efficacy_clean$drug)==""] <- "Baseline"
efficacy_clean <- efficacy_clean %>%
unite(days_dose, days_treatment, dose_interval, sep = "") %>%
separate(days_dose, c("days", "dose"), sep = "_") %>%
rename("days_treatment" = days,
"dose_interval" = dose) %>%
mutate(days_treatment = as.numeric(days_treatment))
return(efficacy_clean)
}
#### plasma_function cleans raw plasma data in Shiny app
#Function Title: Cleaning Plasma Dataframe
#This function has a dataframe as an argument. The dataframe contains data on plasma
#concentrations. The function cleans the plasma dataframe by selecting only the needed
#variables, renaming variables, and changing the group column to a character.
plasma_function <- function(plasma_df){
plasma_clean <- plasma_df %>%
select(MouseID,
Compound,
Group,
Protocol_Animal,
Dosing,
Timepoint,
Plasma_Parent) %>%
rename(drug = Compound,
mouse_number = MouseID,
plasma_concentration = Plasma_Parent) %>%
mutate(Group = as.character(Group))
return(plasma_clean)
}
##### Clean the tissue laser data into a tidy format
#
tissue_laser_function <- function(tissue_laser_df) {
tissue_laser_clean <- tissue_laser_df %>%
rename(`Parent [ng/ml]` = Parent) %>%
select(-StudyID, -Metabolite, - Units, - Collection, - `Sample ID`)
n <- nrow(tissue_laser_clean)
mice_ids <- rep(c(1:(n/4)), each = 4)
tissue_laser_clean <- mutate(tissue_laser_clean, MouseID = mice_ids) %>%
spread(key = Compartment, value = `Parent [ng/ml]`) %>%
rename(ULU = `uninvolved lung`, RIM = rim,
OCS = `outer caseum`, ICS = `inner caseum`) %>%
mutate(ULU = as.numeric(ULU), RIM = as.numeric(RIM),
OCS = as.numeric(OCS), ICS = as.numeric(ICS))
return(tissue_laser_clean)
}
##### tissue_std_pk_function cleans raw tissue std pk data in Shiny app
#Function Title: Clean STD PK Dataframe
#The argument for this function contains information on pharmacokinetic properties of the
#drugs tested on a mouse-by-mouse level. A mouse id was created as a new column to the
#dataset. Additionally, only the necessary columns were included in the dataframe. The spread
#function was used to convert the Comparment column into columns for each compartment,
#containing the respective Parent values. These new columns were then renamed to match the
#SLE and SLU variable names in the tidy data templates and recoded as numerical values.
tissue_std_pk_function <- function(tissue_std_pk_df){
n <- nrow(tissue_std_pk_df)
mice_ids <- rep(c(1:(n/2)), each = 2)
tissue_std_pk_clean <- tissue_std_pk_df %>%
mutate(mouse_number = mice_ids) %>%
select(Compound, mouse_number, Group, Protocol_Animal, Dosing, Timepoint, Compartment, Parent) %>%
rename(drug = Compound,
`Parent [ng/ml]` = Parent) %>%
spread(key = Compartment, value = `Parent [ng/ml]`) %>%
rename(SLU = Lung,
SLE = Lesion) %>%
mutate(SLU = as.numeric(SLU),
SLE = as.numeric(SLE))
return(tissue_std_pk_clean)
}
|
# Name: Guilherme de Araujo
# 1 Merges the training and the test sets to create one data set.
# 2 Extracts only the measurements on the mean and standard deviation for each measurement.
# 3 Uses descriptive activity names to name the activities in the data set
# 4 Appropriately labels the data set with descriptive variable names.
#5 From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
# Load Library
library(data.table)
# Create directory is name coletadados
if (!file.exists("coletadados")) {
dir.create("coletadados")
}
# Download Dataset
download.file(url = "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip",
destfile = "coletadados/dados.zip")
# Unzip Dataset
unzip("coletadados/dados.zip", exdir = "coletadados")
# Load Dataset's
x_test <- read.table(file = "coletadados/UCI HAR Dataset/test/X_test.txt")
y_test <- read.table(file = "coletadados/UCI HAR Dataset/test/Y_test.txt")
subject_test <- read.table("coletadados/UCI HAR Dataset/test/subject_test.txt")
x_train <- read.table(file = "coletadados/UCI HAR Dataset/train/X_train.txt")
y_train <- read.table(file = "coletadados/UCI HAR Dataset/train/Y_train.txt")
subject_train <- read.table("coletadados/UCI HAR Dataset/train/subject_train.txt")
features <- read.table("coletadados/UCI HAR Dataset/features.txt")
# Merge Dataset's
dadosx <- rbind(x_test, x_train)
dadosy <- rbind(y_test, y_train)
# Mean and Standard Deviation
media <- mean(dadosx$V2)
desviopadrao <- sd(dadosx$V2)
# Rename Variables
features$V2 <- as.character(features$V2)
dadosx <- setnames(dadosx, features[,2])
# Create Dataset organized
write.table(dadosx,"coletadados/UCI HAR Dataset/Data.txt")
| /run_analysis.R | no_license | guilhermevfc/Getting-and-Cleaning-Data | R | false | false | 1,885 | r |
# Name: Guilherme de Araujo
# 1 Merges the training and the test sets to create one data set.
# 2 Extracts only the measurements on the mean and standard deviation for each measurement.
# 3 Uses descriptive activity names to name the activities in the data set
# 4 Appropriately labels the data set with descriptive variable names.
#5 From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
# Load Library
library(data.table)
# Create directory is name coletadados
if (!file.exists("coletadados")) {
dir.create("coletadados")
}
# Download Dataset
download.file(url = "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip",
destfile = "coletadados/dados.zip")
# Unzip Dataset
unzip("coletadados/dados.zip", exdir = "coletadados")
# Load Dataset's
x_test <- read.table(file = "coletadados/UCI HAR Dataset/test/X_test.txt")
y_test <- read.table(file = "coletadados/UCI HAR Dataset/test/Y_test.txt")
subject_test <- read.table("coletadados/UCI HAR Dataset/test/subject_test.txt")
x_train <- read.table(file = "coletadados/UCI HAR Dataset/train/X_train.txt")
y_train <- read.table(file = "coletadados/UCI HAR Dataset/train/Y_train.txt")
subject_train <- read.table("coletadados/UCI HAR Dataset/train/subject_train.txt")
features <- read.table("coletadados/UCI HAR Dataset/features.txt")
# Merge Dataset's
dadosx <- rbind(x_test, x_train)
dadosy <- rbind(y_test, y_train)
# Mean and Standard Deviation
media <- mean(dadosx$V2)
desviopadrao <- sd(dadosx$V2)
# Rename Variables
features$V2 <- as.character(features$V2)
dadosx <- setnames(dadosx, features[,2])
# Create Dataset organized
write.table(dadosx,"coletadados/UCI HAR Dataset/Data.txt")
|
source("./read_data.R")
source("./plot2.R")
source("./plot3.R")
#Set the working directory to the one this script is in, to run this.
plot_4 <- function(){
data <- read_data()
#Retrieves the current locale and changes it to English temporarily
user_lang <- Sys.getlocale("LC_TIME")
Sys.setlocale("LC_TIME", "English") #Windows
Sys.setlocale("LC_TIME", "en_US.UTF-8") #linux
#Opens file device.
png("plot4.png")
create_plot_4(data)
#Closes device and sets locale back to original
dev.off()
Sys.setlocale("LC_TIME", user_lang)
}
create_plot_4 <- function(data){
#Sets number of columns and rows
par(mfcol = c(2, 2))
#Calls the functions defined in plot2.R and plot3.R to add the corresponding plots
create_plot_2(data)
create_plot_3(data)
#Creates the Voltage plot
plot(data$Time, data$Voltage, type="n", xlab = "datetime", ylab = "Voltage")
lines(data$Time, data$Voltage, type="l")
#Creates the Global_reactive_power plot
plot(data$Time, data$Global_reactive_power, type="n", xlab = "datetime", ylab = "Global_reactive_power")
lines(data$Time, data$Global_reactive_power, type="l")
}
| /plot4.R | no_license | sideral/ExData_Plotting1 | R | false | false | 1,150 | r | source("./read_data.R")
source("./plot2.R")
source("./plot3.R")
#Set the working directory to the one this script is in, to run this.
plot_4 <- function(){
data <- read_data()
#Retrieves the current locale and changes it to English temporarily
user_lang <- Sys.getlocale("LC_TIME")
Sys.setlocale("LC_TIME", "English") #Windows
Sys.setlocale("LC_TIME", "en_US.UTF-8") #linux
#Opens file device.
png("plot4.png")
create_plot_4(data)
#Closes device and sets locale back to original
dev.off()
Sys.setlocale("LC_TIME", user_lang)
}
create_plot_4 <- function(data){
#Sets number of columns and rows
par(mfcol = c(2, 2))
#Calls the functions defined in plot2.R and plot3.R to add the corresponding plots
create_plot_2(data)
create_plot_3(data)
#Creates the Voltage plot
plot(data$Time, data$Voltage, type="n", xlab = "datetime", ylab = "Voltage")
lines(data$Time, data$Voltage, type="l")
#Creates the Global_reactive_power plot
plot(data$Time, data$Global_reactive_power, type="n", xlab = "datetime", ylab = "Global_reactive_power")
lines(data$Time, data$Global_reactive_power, type="l")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pretty_output_functions.R
\name{run_pretty_km_output}
\alias{run_pretty_km_output}
\title{Wrapper for KM Model Output, with Log-Rank p value}
\usage{
run_pretty_km_output(strata_in = NA, model_data, time_in, event_in,
event_level = NULL, time_est = NULL, group_name = NULL,
title_name = NULL, conf_level = 0.95, surv_est_prefix = "Time",
surv_est_digits = 2, median_est_digits = 1, p_digits = 4,
output_type = NULL, sig_alpha = 0.05, background = "yellow", ...)
}
\arguments{
\item{strata_in}{name of strata variable, or NA (default) if no strata desired}
\item{model_data}{dataset that contains \code{strata_in}, \code{time_in}, and \code{event_in} variables}
\item{time_in}{name of time variable component of outcome measure}
\item{event_in}{name of event status variable. If \code{event_level} = NULL then this must be the name of a FALSE/TRUE or 0/1 variable, where FALSE or 0 are considered the censored level, respectively}
\item{event_level}{event level for event status variable.}
\item{time_est}{numerical vector of time estimates. If NULL (default) no time estimates are calculated}
\item{group_name}{strata variable name. If NULL and strata exists then using variable}
\item{title_name}{title to use}
\item{conf_level}{the confidence level required (default is 0.95).}
\item{surv_est_prefix}{prefix to use in survival estimate names. Default is Time (i.e. Time:5, Time:10,...)}
\item{surv_est_digits}{number of digits to round p values for survival estimates for specified times}
\item{median_est_digits}{number of digits to round p values for Median Survival Estimates}
\item{p_digits}{number of digits to round p values for Log-Rank p value}
\item{output_type}{output type, either NULL (default), "latex", or "html" (making special charaters latex friendly)}
\item{sig_alpha}{the defined significance level. Default = 0.05}
\item{background}{background color of significant values, or no highlighting if NULL. Default is "yellow"}
\item{...}{other params to pass to \code{pretty_pvalues} (i.e. \code{bold} or \code{italic})}
}
\value{
A tibble with: \code{Name} (if provided), \code{Group} (if strata variable in fit), \code{Level} (if strata variable in fit), \code{Time:X} (Survival estimates for each time provided), \code{Median Estimate}. In no strata variable tibble is one row, otherwise nrows = number of strata levels.
}
\description{
This function takes a dataset, along with variables names for time and event status for KM fit, and possibly strata
}
\examples{
# Basic survival model examples
set.seed(542542522)
ybin <- sample(0:1, 100, replace = TRUE)
ybin2 <- sample(0:1, 100, replace = TRUE)
ybin3 <- sample(c('Dead','Alive'), 100, replace = TRUE)
y <- rexp(100,.1)
x1 <- factor(sample(LETTERS[1:2],100,replace = TRUE))
x2 <- factor(sample(letters[1:4],100,replace = TRUE))
my_data <- data.frame(y, ybin, ybin2, ybin3, x1, x2)
Hmisc::label(my_data$x1) <- "X1 Variable"
# Single runs
run_pretty_km_output(strata_in = 'x1', model_data = my_data,
time_in = 'y', event_in = 'ybin', time_est = NULL)
run_pretty_km_output(strata_in = 'x1', model_data = my_data,
time_in = 'y', event_in = 'ybin', time_est = c(5,10))
run_pretty_km_output(strata_in = 'x2', model_data = my_data,
time_in = 'y', event_in = 'ybin3', event_level = 'Dead', time_est = c(5,10))
# Multiple runs for different variables
library(dplyr)
vars_to_run = c(NA, 'x1', 'x2')
purrr::map_dfr(vars_to_run, run_pretty_km_output, model_data = my_data,
time_in = 'y', event_in = 'ybin', event_level = '0', time_est = NULL) \%>\%
select(Group, Level, everything())
km_info <- purrr::map_dfr(vars_to_run, run_pretty_km_output, model_data = my_data, time_in = 'y',
event_in = 'ybin3', event_level = 'Dead', time_est = c(5,10), surv_est_prefix = 'Year',
title_name = 'Overall Survival') \%>\%
select(Group, Level, everything())
km_info2 <- purrr::map_dfr(vars_to_run, run_pretty_km_output, model_data = my_data, time_in = 'y',
event_in = 'ybin2', time_est = c(5,10), surv_est_prefix = 'Year',
title_name = 'Cancer Specific Survival') \%>\%
select(Group, Level, everything())
options(knitr.kable.NA = '')
kableExtra::kable(bind_rows(km_info, km_info2), escape = FALSE, longtable = FALSE, booktabs = TRUE, linesep = '',
caption = 'Survival Percentage Estimates at 5 and 10 Years') \%>\%
kableExtra::collapse_rows(c(1:2), row_group_label_position = 'stack', headers_to_remove = 1:2)
# Real World Example
data(Bladder_Cancer)
vars_to_run = c(NA, 'Gender', 'Clinical_Stage_Grouped', 'PT0N0', 'Any_Downstaging')
purrr::map_dfr(vars_to_run, run_pretty_km_output, model_data = Bladder_Cancer,
time_in = 'Survival_Months', event_in = 'Vital_Status', event_level = 'Dead',
time_est = c(24,60), surv_est_prefix = 'Month', p_digits=5) \%>\%
select(Group, Level, everything())
}
| /man/run_pretty_km_output.Rd | no_license | CarvajalRodrigo/MoffittFunctions | R | false | true | 4,969 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pretty_output_functions.R
\name{run_pretty_km_output}
\alias{run_pretty_km_output}
\title{Wrapper for KM Model Output, with Log-Rank p value}
\usage{
run_pretty_km_output(strata_in = NA, model_data, time_in, event_in,
event_level = NULL, time_est = NULL, group_name = NULL,
title_name = NULL, conf_level = 0.95, surv_est_prefix = "Time",
surv_est_digits = 2, median_est_digits = 1, p_digits = 4,
output_type = NULL, sig_alpha = 0.05, background = "yellow", ...)
}
\arguments{
\item{strata_in}{name of strata variable, or NA (default) if no strata desired}
\item{model_data}{dataset that contains \code{strata_in}, \code{time_in}, and \code{event_in} variables}
\item{time_in}{name of time variable component of outcome measure}
\item{event_in}{name of event status variable. If \code{event_level} = NULL then this must be the name of a FALSE/TRUE or 0/1 variable, where FALSE or 0 are considered the censored level, respectively}
\item{event_level}{event level for event status variable.}
\item{time_est}{numerical vector of time estimates. If NULL (default) no time estimates are calculated}
\item{group_name}{strata variable name. If NULL and strata exists then using variable}
\item{title_name}{title to use}
\item{conf_level}{the confidence level required (default is 0.95).}
\item{surv_est_prefix}{prefix to use in survival estimate names. Default is Time (i.e. Time:5, Time:10,...)}
\item{surv_est_digits}{number of digits to round p values for survival estimates for specified times}
\item{median_est_digits}{number of digits to round p values for Median Survival Estimates}
\item{p_digits}{number of digits to round p values for Log-Rank p value}
\item{output_type}{output type, either NULL (default), "latex", or "html" (making special charaters latex friendly)}
\item{sig_alpha}{the defined significance level. Default = 0.05}
\item{background}{background color of significant values, or no highlighting if NULL. Default is "yellow"}
\item{...}{other params to pass to \code{pretty_pvalues} (i.e. \code{bold} or \code{italic})}
}
\value{
A tibble with: \code{Name} (if provided), \code{Group} (if strata variable in fit), \code{Level} (if strata variable in fit), \code{Time:X} (Survival estimates for each time provided), \code{Median Estimate}. In no strata variable tibble is one row, otherwise nrows = number of strata levels.
}
\description{
This function takes a dataset, along with variables names for time and event status for KM fit, and possibly strata
}
\examples{
# Basic survival model examples
set.seed(542542522)
ybin <- sample(0:1, 100, replace = TRUE)
ybin2 <- sample(0:1, 100, replace = TRUE)
ybin3 <- sample(c('Dead','Alive'), 100, replace = TRUE)
y <- rexp(100,.1)
x1 <- factor(sample(LETTERS[1:2],100,replace = TRUE))
x2 <- factor(sample(letters[1:4],100,replace = TRUE))
my_data <- data.frame(y, ybin, ybin2, ybin3, x1, x2)
Hmisc::label(my_data$x1) <- "X1 Variable"
# Single runs
run_pretty_km_output(strata_in = 'x1', model_data = my_data,
time_in = 'y', event_in = 'ybin', time_est = NULL)
run_pretty_km_output(strata_in = 'x1', model_data = my_data,
time_in = 'y', event_in = 'ybin', time_est = c(5,10))
run_pretty_km_output(strata_in = 'x2', model_data = my_data,
time_in = 'y', event_in = 'ybin3', event_level = 'Dead', time_est = c(5,10))
# Multiple runs for different variables
library(dplyr)
vars_to_run = c(NA, 'x1', 'x2')
purrr::map_dfr(vars_to_run, run_pretty_km_output, model_data = my_data,
time_in = 'y', event_in = 'ybin', event_level = '0', time_est = NULL) \%>\%
select(Group, Level, everything())
km_info <- purrr::map_dfr(vars_to_run, run_pretty_km_output, model_data = my_data, time_in = 'y',
event_in = 'ybin3', event_level = 'Dead', time_est = c(5,10), surv_est_prefix = 'Year',
title_name = 'Overall Survival') \%>\%
select(Group, Level, everything())
km_info2 <- purrr::map_dfr(vars_to_run, run_pretty_km_output, model_data = my_data, time_in = 'y',
event_in = 'ybin2', time_est = c(5,10), surv_est_prefix = 'Year',
title_name = 'Cancer Specific Survival') \%>\%
select(Group, Level, everything())
options(knitr.kable.NA = '')
kableExtra::kable(bind_rows(km_info, km_info2), escape = FALSE, longtable = FALSE, booktabs = TRUE, linesep = '',
caption = 'Survival Percentage Estimates at 5 and 10 Years') \%>\%
kableExtra::collapse_rows(c(1:2), row_group_label_position = 'stack', headers_to_remove = 1:2)
# Real World Example
data(Bladder_Cancer)
vars_to_run = c(NA, 'Gender', 'Clinical_Stage_Grouped', 'PT0N0', 'Any_Downstaging')
purrr::map_dfr(vars_to_run, run_pretty_km_output, model_data = Bladder_Cancer,
time_in = 'Survival_Months', event_in = 'Vital_Status', event_level = 'Dead',
time_est = c(24,60), surv_est_prefix = 'Month', p_digits=5) \%>\%
select(Group, Level, everything())
}
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
mat_inverse <- NULL
set <- function(y){
x <<- y
mat_inverse <<- NULL
}
get <- function(){
x
}
set_matrix_inverse <- function(inverse){
mat_inverse <<- inverse
}
get_matrix_inverse <- function(){
mat_inverse
}
list(set = set , get = get , setInverse = set_matrix_inverse , getInverse = get_matrix_inverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
mat_inverse <- x$getInverse()
if(!is.null(mat_inverse)){
message("getting cached data")
return(mat_inverse)
}
mat <- x$get()
mat_inverse <- solve(mat,...)
x$setInverse(mat_inverse)
mat_inverse
}
| /cachematrix.R | no_license | mahmoudsaeed99/ProgrammingAssignment2 | R | false | false | 960 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
mat_inverse <- NULL
set <- function(y){
x <<- y
mat_inverse <<- NULL
}
get <- function(){
x
}
set_matrix_inverse <- function(inverse){
mat_inverse <<- inverse
}
get_matrix_inverse <- function(){
mat_inverse
}
list(set = set , get = get , setInverse = set_matrix_inverse , getInverse = get_matrix_inverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
mat_inverse <- x$getInverse()
if(!is.null(mat_inverse)){
message("getting cached data")
return(mat_inverse)
}
mat <- x$get()
mat_inverse <- solve(mat,...)
x$setInverse(mat_inverse)
mat_inverse
}
|
source('getDataSetFunction.R')
# getDataSet function is defined on the file 'getDataSetFunction'
data <- getDataSet()
# plot 4
png(file='plot4.png', width = 480, height = 480, units = 'px')
# Indicates that the plot will be formed as a 2x2 table
par(mfrow = c(2,2))
# top left plot
plot(data$timestamp, data$Global_active_power, type = 'l', ylab='Global Active Power', xlab = "")
# top right plot
plot(data$timestamp, data$Voltage, type = 'l', ylab='Voltage', xlab = "datetime")
# bottom left plot
plot(data$timestamp, data$Sub_metering_1, type = 'l', ylab = 'Energy sub metering', xlab = '')
lines(data$timestamp, data$Sub_metering_2, type = 'l', col='red')
lines(data$timestamp, data$Sub_metering_3, type = 'l', col='blue')
legend("topright", c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), lty = c(1, 1, 1), col = c('black', 'red','blue'), bty = "n")
# bottom right plot
plot(data$timestamp, data$Global_reactive_power, type = 'l', ylab='Global_reactive_power', xlab = "datetime")
dev.off() | /plot4.R | no_license | pig4tti/ExData_Plotting1 | R | false | false | 1,008 | r | source('getDataSetFunction.R')
# getDataSet function is defined on the file 'getDataSetFunction'
data <- getDataSet()
# plot 4
png(file='plot4.png', width = 480, height = 480, units = 'px')
# Indicates that the plot will be formed as a 2x2 table
par(mfrow = c(2,2))
# top left plot
plot(data$timestamp, data$Global_active_power, type = 'l', ylab='Global Active Power', xlab = "")
# top right plot
plot(data$timestamp, data$Voltage, type = 'l', ylab='Voltage', xlab = "datetime")
# bottom left plot
plot(data$timestamp, data$Sub_metering_1, type = 'l', ylab = 'Energy sub metering', xlab = '')
lines(data$timestamp, data$Sub_metering_2, type = 'l', col='red')
lines(data$timestamp, data$Sub_metering_3, type = 'l', col='blue')
legend("topright", c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), lty = c(1, 1, 1), col = c('black', 'red','blue'), bty = "n")
# bottom right plot
plot(data$timestamp, data$Global_reactive_power, type = 'l', ylab='Global_reactive_power', xlab = "datetime")
dev.off() |
# to run: R CMD BATCH fullSimScript.R &
date()
library(foreach)
library(doParallel)
source("Final_funcs/build_B.R")
source("Final_funcs/sim_setup.R")
source("Final_funcs/Cov_suped.R")
source("Final_funcs/sample_sigma12_function.R")
source("Final_funcs/scca_function.R")
source("Final_funcs/scca_CVperm.R")
source("Final_funcs/result_helpers.R")
source("Final_funcs/determine_true_vals.R")
source("Final_funcs/results.R")
source("Final_funcs/interpret_results_curveonly.R")
start <- date()
start <- strptime(start, "%a %b %d %H:%M:%S %Y")
## We first set the parameters for running in parallel as well as the
## population parameter set-up
num.cluster1 = 25
num.runs1 = 4*num.cluster1
k = 1
p = 500
q = 1000
Btype = 2
num.obs = 50
n.pair = 10 # should be at least 10
nperm=100
# cutoff.perc tells where to cutoff for permutation values
cutoff.perc = 0.9
cor.suped = .2 # the cor of internal X and internal Y
noise = "t"
# options are clean, t, sym, and asym (with t, sym, and asym you need noise.level)
# t uses df=2, we might want a lower df? 1?
B <- build.B(k,p,q,Btype)
#set.seed(47)
run1 <- list()
length(run1)<- num.runs1 #need this val
c1 <- makeCluster(num.cluster1)
registerDoParallel(c1)
## This loop runs the entire simulation in parallel for each dataset.
results.sim <- foreach(i=1:num.runs1, .combine='rbind') %dopar%{
library(mvnfast)
simdata = sim.setup(num.obs, B, var.cor=cor.suped, noisetype = noise, Btype=Btype)
sim.output = scca.CVperm(simdata, n.pair, nperm)
# using the permuted correlations to create a curve to determine significance cutoffs
perm.cor.s = sim.output$perm.cor.s
perm.s.curve = apply(perm.cor.s, 2, quantile, probs=cutoff.perc)
perm.cor.p = sim.output$perm.cor.p
perm.p.curve = apply(perm.cor.p, 2, quantile, probs=cutoff.perc)
# mapping new output to the same form as the previous output
res.s = list()
res.s$sp.coef.u = data.frame(matrix(unlist(sim.output$alphas.s), nrow=length(sim.output$alphas.s[[1]]), byrow=F))
res.s$sp.coef.v = data.frame(matrix(unlist(sim.output$betas.s), nrow=length(sim.output$betas.s[[1]]), byrow=F))
res.s$sp.cor = sim.output$cor.test.s
res.p = list()
res.p$sp.coef.u = data.frame(matrix(unlist(sim.output$alphas.p), nrow=length(sim.output$alphas.p[[1]]), byrow=F))
res.p$sp.coef.v = data.frame(matrix(unlist(sim.output$betas.p), nrow=length(sim.output$betas.p[[1]]), byrow=F))
res.p$sp.cor = sim.output$cor.test.p
# counting false positives, false negatives, etc.
output.s <- results(res.s, B, n.pair)
output.p <- results(res.p, B, n.pair)
c( interpret.results.curve(output.s, perm.s.curve ),
interpret.results.curve(output.p, perm.p.curve), sim.output$lambda1.s, sim.output$lambda1.p)
}
fname = paste("secondsimB",Btype,".n",num.obs,".p",p,".q",q,".",noise,".txt",sep="")
write.table(results.sim, file=fname, row.names=F, quote=F, col.names=F, sep="\t")
end1 <- date()
end1 <- strptime(end1, "%a %b %d %H:%M:%S %Y")
dif1 <- as.numeric(difftime(end1,start,units="mins")) # how long the first loop takes, in minutes
write.table(cbind(dif1, dif1, fname), file="times.txt", row.names=F, col.names=F, quote=F, sep="\t", append=T)
| /fullSimScript.R | no_license | hardin47/rmscca | R | false | false | 3,255 | r |
# to run: R CMD BATCH fullSimScript.R &
date()
library(foreach)
library(doParallel)
source("Final_funcs/build_B.R")
source("Final_funcs/sim_setup.R")
source("Final_funcs/Cov_suped.R")
source("Final_funcs/sample_sigma12_function.R")
source("Final_funcs/scca_function.R")
source("Final_funcs/scca_CVperm.R")
source("Final_funcs/result_helpers.R")
source("Final_funcs/determine_true_vals.R")
source("Final_funcs/results.R")
source("Final_funcs/interpret_results_curveonly.R")
start <- date()
start <- strptime(start, "%a %b %d %H:%M:%S %Y")
## We first set the parameters for running in parallel as well as the
## population parameter set-up
num.cluster1 = 25
num.runs1 = 4*num.cluster1
k = 1
p = 500
q = 1000
Btype = 2
num.obs = 50
n.pair = 10 # should be at least 10
nperm=100
# cutoff.perc tells where to cutoff for permutation values
cutoff.perc = 0.9
cor.suped = .2 # the cor of internal X and internal Y
noise = "t"
# options are clean, t, sym, and asym (with t, sym, and asym you need noise.level)
# t uses df=2, we might want a lower df? 1?
B <- build.B(k,p,q,Btype)
#set.seed(47)
run1 <- list()
length(run1)<- num.runs1 #need this val
c1 <- makeCluster(num.cluster1)
registerDoParallel(c1)
## This loop runs the entire simulation in parallel for each dataset.
results.sim <- foreach(i=1:num.runs1, .combine='rbind') %dopar%{
library(mvnfast)
simdata = sim.setup(num.obs, B, var.cor=cor.suped, noisetype = noise, Btype=Btype)
sim.output = scca.CVperm(simdata, n.pair, nperm)
# using the permuted correlations to create a curve to determine significance cutoffs
perm.cor.s = sim.output$perm.cor.s
perm.s.curve = apply(perm.cor.s, 2, quantile, probs=cutoff.perc)
perm.cor.p = sim.output$perm.cor.p
perm.p.curve = apply(perm.cor.p, 2, quantile, probs=cutoff.perc)
# mapping new output to the same form as the previous output
res.s = list()
res.s$sp.coef.u = data.frame(matrix(unlist(sim.output$alphas.s), nrow=length(sim.output$alphas.s[[1]]), byrow=F))
res.s$sp.coef.v = data.frame(matrix(unlist(sim.output$betas.s), nrow=length(sim.output$betas.s[[1]]), byrow=F))
res.s$sp.cor = sim.output$cor.test.s
res.p = list()
res.p$sp.coef.u = data.frame(matrix(unlist(sim.output$alphas.p), nrow=length(sim.output$alphas.p[[1]]), byrow=F))
res.p$sp.coef.v = data.frame(matrix(unlist(sim.output$betas.p), nrow=length(sim.output$betas.p[[1]]), byrow=F))
res.p$sp.cor = sim.output$cor.test.p
# counting false positives, false negatives, etc.
output.s <- results(res.s, B, n.pair)
output.p <- results(res.p, B, n.pair)
c( interpret.results.curve(output.s, perm.s.curve ),
interpret.results.curve(output.p, perm.p.curve), sim.output$lambda1.s, sim.output$lambda1.p)
}
fname = paste("secondsimB",Btype,".n",num.obs,".p",p,".q",q,".",noise,".txt",sep="")
write.table(results.sim, file=fname, row.names=F, quote=F, col.names=F, sep="\t")
end1 <- date()
end1 <- strptime(end1, "%a %b %d %H:%M:%S %Y")
dif1 <- as.numeric(difftime(end1,start,units="mins")) # how long the first loop takes, in minutes
write.table(cbind(dif1, dif1, fname), file="times.txt", row.names=F, col.names=F, quote=F, sep="\t", append=T)
|
testlist <- list(data = structure(0, .Dim = c(1L, 1L)), x = structure(c(9.61276249046606e+281, 9.61276249046606e+281, 9.61276249046606e+281, 9.61276249046606e+281, 9.61276249046606e+281, 9.61276249046606e+281, 9.61349105591925e+281, 0, 0, 0, 0, 0), .Dim = 4:3))
result <- do.call(distr6:::C_EmpiricalMVPdf,testlist)
str(result) | /distr6/inst/testfiles/C_EmpiricalMVPdf/libFuzzer_C_EmpiricalMVPdf/C_EmpiricalMVPdf_valgrind_files/1610035816-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 330 | r | testlist <- list(data = structure(0, .Dim = c(1L, 1L)), x = structure(c(9.61276249046606e+281, 9.61276249046606e+281, 9.61276249046606e+281, 9.61276249046606e+281, 9.61276249046606e+281, 9.61276249046606e+281, 9.61349105591925e+281, 0, 0, 0, 0, 0), .Dim = 4:3))
result <- do.call(distr6:::C_EmpiricalMVPdf,testlist)
str(result) |
library(leaflet)
library(RColorBrewer)
library(lattice)
library(dplyr)
library(scales)
library(shiny)
library(tidyverse)
library(leaflet.extras)
library(DT)
library(shiny)
library(viridisLite)
library(wordcloud)
library(wordcloud2)
library(textdata)
library(tidytext)
business <- read.csv("business.csv")
att <- read.csv("attribute.csv")
att1 <- select(att, -state)
review <- read.csv("review.csv")
shinyServer(function(input, output){
###create the map
pal <- colorFactor(
palette = viridis(100),
domain = business$city)
output$map <- renderLeaflet({
leaflet(business)%>%
addTiles() %>%
setView(lng = -80.85, lat = 39.5, zoom = 7) %>%
addCircles(data = business, lat = ~ latitude, lng = ~ longitude, weight = 1, radius = 100,
label = ~as.character(paste0("Restaurant: ", sep = " ", name)), labelOptions = labelOptions(textsize = "20px"), color = ~pal(city), fillOpacity = 0.5)
})
observeEvent(input$Res, {
leafletProxy("map")%>%
clearGroup("Markers") %>%
addMarkers(data = business[business$name == input$Res, ], ~longitude, ~latitude, group = "Markers")
})
observeEvent(input$state1, {
leafletProxy("map")%>%
clearGroup("Markers") %>%
addMarkers(data = business[business$state == input$state1, ], ~longitude, ~latitude, group = "Markers")
})
observeEvent(input$star, {
leafletProxy("map", data = business[business$stars == input$star, ])%>%
clearGroup("Markers") %>%
addMarkers( ~longitude, ~latitude, group = "Markers")
})
###### business exploration
output$inf <- DT::renderDataTable(DT::datatable({
data <- business
if(input$Re != "All"){
data <- data[data$name == input$Re, ]
}
if(input$stat != "All"){
data <- data[data$state == input$stat, ]
}
if(input$sta != "All"){
data <- data[data$stars == input$sta, ]
}
data
}))
#####EDA
# output$eda <- renderPlotly({
# ggplotly(
# ggplot(att) +
# geom_point(aes(x=stars, y=reviewcount, color=state), stat = "identity",alpha=0.3) +
# facet_grid(.~state) +scale_y_log10()
# )
# })
####PCA
pc3 <- reactive({
principal(att1, nfactors = input$factor, rotate = "varimax")
})
output$result <- renderPlot({
fa.diagram(pc3(),simple=TRUE)
})
##### Explore preference
output$explore <- renderPlotly({
a <- business %>% filter(str_detect(categories, "Restaurant")) %>%
unnest(categories) %>%
filter(categories != "Restaurants") %>%
count(state, categories) %>%
filter(n > 10) %>%
group_by(state) %>%
top_n(1, n)
a$categories[a$categories == "Restaurants, Pizza"] <- "Pizza, Restaurants"
ggplotly(ggplot(a, aes(x=state, y=n, fill=categories)) +
geom_bar(stat = "identity") +
labs(y="Number of restaurants"))
})
######Wordcloud
PA <- subset(review, stars > 4 & state == 'PA')
PAr <- PA %>% unnest_tokens(bigram, text, token = "ngrams", n = 2)
seperate2 <- PAr %>%
count(bigram, sort = TRUE) %>%
separate(bigram, c("word1", "word2"), sep = " ")
OH <- subset(review, stars > 4 & state == 'OH')
OHr <- OH %>% unnest_tokens(bigram, text, token = "ngrams", n = 2)
seperate3 <- OHr %>%
count(bigram, sort = TRUE) %>%
separate(bigram, c("word1", "word2"), sep = " ")
output$wordcloud <- renderWordcloud2({
if(input$pa){
PA <- subset(review, stars > 4 & state == 'PA')
PAr <- PA %>% unnest_tokens(bigram, text, token = "ngrams", n = 2)
seperate2 <- PAr %>%
count(bigram, sort = TRUE) %>%
separate(bigram, c("word1", "word2"), sep = " ")
unite2 <- seperate2 %>%
filter(!word1 %in% stop_words$word) %>% #remove cases where either is a stop-word.
filter(!word2 %in% stop_words$word) %>%
unite(bigram, word1, word2, sep = " ") %>%
head(input$fre)
wordcloud2(unite2, shape = 'circle' ,color = "random-light", size = 0.3, backgroundColor = "white")
}
else if(input$oh){
OH <- subset(review, stars > 4 & state == 'OH')
OHr <- OH %>% unnest_tokens(bigram, text, token = "ngrams", n = 2)
seperate3 <- OHr %>%
count(bigram, sort = TRUE) %>%
separate(bigram, c("word1", "word2"), sep = " ")
unite3 <- seperate3 %>%
filter(!word1 %in% stop_words$word) %>% #remove cases where either is a stop-word.
filter(!word2 %in% stop_words$word) %>%
unite(bigram, word1, word2, sep = " ") %>%
head(input$fre)
wordcloud2(unite3, shape = "circle",color = "random-light", size = 0.3, backgroundColor = "white")
}
})
######Sentiment Analysis
output$sentiment <- renderPlot({
Afinn <- get_sentiments("afinn")
negation_words <- c("not", "no", "never", "without","none","bad")
if(input$state == "PA"){
# PA <- subset(review, stars > 4 & state == 'PA')
# PAr <- PA %>% unnest_tokens(bigram, text, token = "ngrams", n = 2)
# seperate2 <- PAr %>%
# count(bigram, sort = TRUE) %>%
# separate(bigram, c("word1", "word2"), sep = " ")
not_words <- seperate2 %>%
filter(word1 %in% negation_words) %>%
inner_join(Afinn, by = c(word2 = "word")) %>%
count(word1,word2, value, sort = TRUE)
not_words %>%
mutate(contribution = n * value) %>%
arrange(desc(abs(contribution))) %>%
head(20) %>%
mutate(word2 = reorder(word2, contribution)) %>%
ggplot(aes(word2, n * value, fill = n * value > 0)) +
geom_col(show.legend = FALSE) +
xlab("Words preceded by \"not,no,never,without,none and bad\"") +
ylab("Sentiment value * number of occurrences in state PA") +
coord_flip()
}
else if(input$state == "OH"){
# OH <- subset(review, stars > 4 & state == 'OH')
# OHr <- OH %>% unnest_tokens(bigram, text, token = "ngrams", n = 2)
# seperate3 <- OHr %>%
# count(bigram, sort = TRUE) %>%
# separate(bigram, c("word1", "word2"), sep = " ")
not_words1 <- seperate3 %>%
filter(word1 %in% negation_words) %>%
inner_join(Afinn, by = c(word2 = "word")) %>%
count(word1,word2, value, sort = TRUE)
not_words1 %>%
mutate(contribution = n * value) %>%
arrange(desc(abs(contribution))) %>%
head(20) %>%
mutate(word2 = reorder(word2, contribution)) %>%
ggplot(aes(word2, n * value, fill = n * value > 0)) +
geom_col(show.legend = FALSE) +
xlab("Words preceded by \"not,no,never,without,none and bad\"") +
ylab("Sentiment value * number of occurrences in state OH") +
coord_flip()
}
})
###### sentiment compare
output$compare <- renderPlot({
if(input$state2 == "PA"){
seperate2 %>%
inner_join(get_sentiments("bing"), by=c(word2="word")) %>%
count(word1, word2, sentiment, sort = TRUE) %>%
acast(word2 ~ sentiment, value.var = "n", fill = 0) %>%
comparison.cloud(colors = c("blue", "red"),
max.words = 50)
}
else if(input$state2 == "OH"){
# OH <- subset(review, stars > 4 & state == 'OH')
# OHr <- OH %>% unnest_tokens(bigram, text, token = "ngrams", n = 2)
# seperate3 <- OHr %>%
# count(bigram, sort = TRUE) %>%
# separate(bigram, c("word1", "word2"), sep = " ")
seperate3 %>%
inner_join(get_sentiments("bing"), by=c(word2="word")) %>%
count(word1, word2, sentiment, sort = TRUE) %>%
acast(word2 ~ sentiment, value.var = "n", fill = 0) %>%
comparison.cloud(colors = c("blue", "red"),
max.words = 50)
}
})
})
| /server.R | no_license | JingningYang/615-Final | R | false | false | 8,727 | r | library(leaflet)
library(RColorBrewer)
library(lattice)
library(dplyr)
library(scales)
library(shiny)
library(tidyverse)
library(leaflet.extras)
library(DT)
library(shiny)
library(viridisLite)
library(wordcloud)
library(wordcloud2)
library(textdata)
library(tidytext)
business <- read.csv("business.csv")
att <- read.csv("attribute.csv")
att1 <- select(att, -state)
review <- read.csv("review.csv")
shinyServer(function(input, output){
###create the map
pal <- colorFactor(
palette = viridis(100),
domain = business$city)
output$map <- renderLeaflet({
leaflet(business)%>%
addTiles() %>%
setView(lng = -80.85, lat = 39.5, zoom = 7) %>%
addCircles(data = business, lat = ~ latitude, lng = ~ longitude, weight = 1, radius = 100,
label = ~as.character(paste0("Restaurant: ", sep = " ", name)), labelOptions = labelOptions(textsize = "20px"), color = ~pal(city), fillOpacity = 0.5)
})
observeEvent(input$Res, {
leafletProxy("map")%>%
clearGroup("Markers") %>%
addMarkers(data = business[business$name == input$Res, ], ~longitude, ~latitude, group = "Markers")
})
observeEvent(input$state1, {
leafletProxy("map")%>%
clearGroup("Markers") %>%
addMarkers(data = business[business$state == input$state1, ], ~longitude, ~latitude, group = "Markers")
})
observeEvent(input$star, {
leafletProxy("map", data = business[business$stars == input$star, ])%>%
clearGroup("Markers") %>%
addMarkers( ~longitude, ~latitude, group = "Markers")
})
###### business exploration
output$inf <- DT::renderDataTable(DT::datatable({
data <- business
if(input$Re != "All"){
data <- data[data$name == input$Re, ]
}
if(input$stat != "All"){
data <- data[data$state == input$stat, ]
}
if(input$sta != "All"){
data <- data[data$stars == input$sta, ]
}
data
}))
#####EDA
# output$eda <- renderPlotly({
# ggplotly(
# ggplot(att) +
# geom_point(aes(x=stars, y=reviewcount, color=state), stat = "identity",alpha=0.3) +
# facet_grid(.~state) +scale_y_log10()
# )
# })
####PCA
pc3 <- reactive({
principal(att1, nfactors = input$factor, rotate = "varimax")
})
output$result <- renderPlot({
fa.diagram(pc3(),simple=TRUE)
})
##### Explore preference
output$explore <- renderPlotly({
a <- business %>% filter(str_detect(categories, "Restaurant")) %>%
unnest(categories) %>%
filter(categories != "Restaurants") %>%
count(state, categories) %>%
filter(n > 10) %>%
group_by(state) %>%
top_n(1, n)
a$categories[a$categories == "Restaurants, Pizza"] <- "Pizza, Restaurants"
ggplotly(ggplot(a, aes(x=state, y=n, fill=categories)) +
geom_bar(stat = "identity") +
labs(y="Number of restaurants"))
})
######Wordcloud
PA <- subset(review, stars > 4 & state == 'PA')
PAr <- PA %>% unnest_tokens(bigram, text, token = "ngrams", n = 2)
seperate2 <- PAr %>%
count(bigram, sort = TRUE) %>%
separate(bigram, c("word1", "word2"), sep = " ")
OH <- subset(review, stars > 4 & state == 'OH')
OHr <- OH %>% unnest_tokens(bigram, text, token = "ngrams", n = 2)
seperate3 <- OHr %>%
count(bigram, sort = TRUE) %>%
separate(bigram, c("word1", "word2"), sep = " ")
output$wordcloud <- renderWordcloud2({
if(input$pa){
PA <- subset(review, stars > 4 & state == 'PA')
PAr <- PA %>% unnest_tokens(bigram, text, token = "ngrams", n = 2)
seperate2 <- PAr %>%
count(bigram, sort = TRUE) %>%
separate(bigram, c("word1", "word2"), sep = " ")
unite2 <- seperate2 %>%
filter(!word1 %in% stop_words$word) %>% #remove cases where either is a stop-word.
filter(!word2 %in% stop_words$word) %>%
unite(bigram, word1, word2, sep = " ") %>%
head(input$fre)
wordcloud2(unite2, shape = 'circle' ,color = "random-light", size = 0.3, backgroundColor = "white")
}
else if(input$oh){
OH <- subset(review, stars > 4 & state == 'OH')
OHr <- OH %>% unnest_tokens(bigram, text, token = "ngrams", n = 2)
seperate3 <- OHr %>%
count(bigram, sort = TRUE) %>%
separate(bigram, c("word1", "word2"), sep = " ")
unite3 <- seperate3 %>%
filter(!word1 %in% stop_words$word) %>% #remove cases where either is a stop-word.
filter(!word2 %in% stop_words$word) %>%
unite(bigram, word1, word2, sep = " ") %>%
head(input$fre)
wordcloud2(unite3, shape = "circle",color = "random-light", size = 0.3, backgroundColor = "white")
}
})
######Sentiment Analysis
output$sentiment <- renderPlot({
Afinn <- get_sentiments("afinn")
negation_words <- c("not", "no", "never", "without","none","bad")
if(input$state == "PA"){
# PA <- subset(review, stars > 4 & state == 'PA')
# PAr <- PA %>% unnest_tokens(bigram, text, token = "ngrams", n = 2)
# seperate2 <- PAr %>%
# count(bigram, sort = TRUE) %>%
# separate(bigram, c("word1", "word2"), sep = " ")
not_words <- seperate2 %>%
filter(word1 %in% negation_words) %>%
inner_join(Afinn, by = c(word2 = "word")) %>%
count(word1,word2, value, sort = TRUE)
not_words %>%
mutate(contribution = n * value) %>%
arrange(desc(abs(contribution))) %>%
head(20) %>%
mutate(word2 = reorder(word2, contribution)) %>%
ggplot(aes(word2, n * value, fill = n * value > 0)) +
geom_col(show.legend = FALSE) +
xlab("Words preceded by \"not,no,never,without,none and bad\"") +
ylab("Sentiment value * number of occurrences in state PA") +
coord_flip()
}
else if(input$state == "OH"){
# OH <- subset(review, stars > 4 & state == 'OH')
# OHr <- OH %>% unnest_tokens(bigram, text, token = "ngrams", n = 2)
# seperate3 <- OHr %>%
# count(bigram, sort = TRUE) %>%
# separate(bigram, c("word1", "word2"), sep = " ")
not_words1 <- seperate3 %>%
filter(word1 %in% negation_words) %>%
inner_join(Afinn, by = c(word2 = "word")) %>%
count(word1,word2, value, sort = TRUE)
not_words1 %>%
mutate(contribution = n * value) %>%
arrange(desc(abs(contribution))) %>%
head(20) %>%
mutate(word2 = reorder(word2, contribution)) %>%
ggplot(aes(word2, n * value, fill = n * value > 0)) +
geom_col(show.legend = FALSE) +
xlab("Words preceded by \"not,no,never,without,none and bad\"") +
ylab("Sentiment value * number of occurrences in state OH") +
coord_flip()
}
})
###### sentiment compare
output$compare <- renderPlot({
if(input$state2 == "PA"){
seperate2 %>%
inner_join(get_sentiments("bing"), by=c(word2="word")) %>%
count(word1, word2, sentiment, sort = TRUE) %>%
acast(word2 ~ sentiment, value.var = "n", fill = 0) %>%
comparison.cloud(colors = c("blue", "red"),
max.words = 50)
}
else if(input$state2 == "OH"){
# OH <- subset(review, stars > 4 & state == 'OH')
# OHr <- OH %>% unnest_tokens(bigram, text, token = "ngrams", n = 2)
# seperate3 <- OHr %>%
# count(bigram, sort = TRUE) %>%
# separate(bigram, c("word1", "word2"), sep = " ")
seperate3 %>%
inner_join(get_sentiments("bing"), by=c(word2="word")) %>%
count(word1, word2, sentiment, sort = TRUE) %>%
acast(word2 ~ sentiment, value.var = "n", fill = 0) %>%
comparison.cloud(colors = c("blue", "red"),
max.words = 50)
}
})
})
|
#' @importFrom dat flatmap map extract extract2
#' @importFrom magrittr %>%
#' @importFrom stats update as.formula
#' @import methods
NULL
| /R/NAMESPACE.R | no_license | billdenney/templates | R | false | false | 139 | r | #' @importFrom dat flatmap map extract extract2
#' @importFrom magrittr %>%
#' @importFrom stats update as.formula
#' @import methods
NULL
|
library(cocktailApp)
### Name: cocktailApp
### Title: cocktailApp .
### Aliases: cocktailApp
### Keywords: shiny
### ** Examples
## Not run:
##D cocktailApp()
## End(Not run)
| /data/genthat_extracted_code/cocktailApp/examples/cocktailApp.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 183 | r | library(cocktailApp)
### Name: cocktailApp
### Title: cocktailApp .
### Aliases: cocktailApp
### Keywords: shiny
### ** Examples
## Not run:
##D cocktailApp()
## End(Not run)
|
remove_uniq_cols <- function(df) {
df[,apply(df, 2, function(x) length(unique(x)) != 1)]
}
read_jh_ts <- function() {
file_names <- c("confirmed", "deaths", "recovered")
url <- "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/"
url <- paste0(url, "master/csse_covid_19_data/csse_covid_19_time_series/")
url <- paste0(url, "time_series_covid19_%s_global.csv")
dfl <- lapply(file_names, function(f) {
df <- read.csv(sprintf(url, f), stringsAsFactors = FALSE,
strip.white = TRUE, na.strings = "")
df <- reshape2::melt(df, measure.vars = colnames(df)[-(1:4)],
variable.name = "date", value.name = "cases")
df$type <- f
return(df)
})
df <- do.call(rbind, dfl)
colnames(df) <- tolower(colnames(df))
colnames(df) <- gsub(".", "_", colnames(df), fixed = TRUE)
df$date <- as.Date(df$date, "X%m.%d.%y")
# substring(df$type, 1) <- toupper(substring(df$type, 1, 1))
df[,c(1,2,7)] <- lapply(df[,c(1,2,7)], factor)
df <- df[,c("date", "country_region", "province_state", "lat", "long",
"type", "cases")]
df <- with(df, df[order(country_region, province_state, date, type),])
df <- reshape2::dcast(df, date + country_region + province_state ~ type,
value.var = "cases")
df$active <- with(df, confirmed - recovered - deaths)
df$recovered[which(df$recovered == 0)] <- NA
df[!sapply(df, is.finite)] <- NA
return(df)
}
read_jh_daily <- function(from = "2020-01-22",
to = as.character(Sys.Date())) {
cn <- c("date", "fips", "country_region", "province_state", "admin2",
"lat", "long", "confirmed", "deaths", "recovered", "active")
from <- as.Date(from)
to <- as.Date(to)
url <- "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/"
url <- paste0(url, "master/csse_covid_19_data/csse_covid_19_daily_reports/")
url <- paste0(url, "%s.csv")
df <- lapply(strftime(seq.Date(from, to, 1), format = "%m-%d-%Y"),
function(dt) {
cat("read: ", dt, "\n")
tryCatch({read.csv(sprintf(url, dt), stringsAsFactors = FALSE,
strip.white = TRUE, na.strings = "")},
error = function(e){}, warning = function(w){})
})
df[sapply(df, is.null)] <- NULL
if (length(df) == 0) return(NULL)
df <- lapply(df, function(x) {
colnames(x) <- tolower(colnames(x))
colnames(x) <- gsub(".", "_", colnames(x), fixed = TRUE)
colnames(x)[startsWith(colnames(x), "lat")] <- "lat"
colnames(x)[startsWith(colnames(x), "long")] <- "long"
colnames(x)[startsWith(colnames(x), "last")] <- "date"
if (!("fips" %in% colnames(x))) x$fips <- NA
if (!("admin2" %in% colnames(x))) x$admin2 <- NA
if (!("active" %in% colnames(x))) x$active <- NA
if (!("lat" %in% colnames(x))) x$lat <- NA
if (!("long" %in% colnames(x))) x$long <- NA
if ("combined_key" %in% colnames(x)) x <- subset(x, select = -combined_key)
x <- x[,cn]
if (all(grepl("/", x$date, fixed = TRUE))) {
fmt <- "%m/%d/%Y %H:%M"
} else if (all(grepl("T", x$date, fixed = TRUE))) {
fmt <- "%Y-%m-%dT%H:%M:%S"
} else {
fmt <- "%Y-%m-%d %H:%M:%S"
}
x$date <- as.POSIXct(as.POSIXlt(x$date, "UTC", fmt))
x$province_state[x$province_state == "None"] <- NA
return(x)
})
loc <- do.call(rbind, lapply(df, function(x) x[c(2:7)]))
loc <- loc[!duplicated(loc),]
by <- lapply(as.list(loc[,1:4]), factor, exclude = NULL)
a <- aggregate(1:nrow(loc), by, function(i) {
x <- loc[i,, drop = FALSE]
i <- which(!(is.na(x$lat) & is.na(x$long)))
if (length(i) > 0) {
y <- x[i,, drop = FALSE]
x <- y[1,, drop = FALSE]
}
return(x)
}, simplify = FALSE)
loc <- do.call(rbind, a$x)
df2 <- do.call(rbind, df)
# df2 <- merge(df2, loc)
# df2 <- df2[,cn]
x <- apply(df2[,2:5], 1, paste0, collapse = "")
y <- apply(loc[,1:4], 1, paste0, collapse = ""); names(y) <- NULL
for (i in y) {
df2[which(i == x), "lat"] <- loc[which(i == y), "lat"]
df2[which(i == x), "long"] <- loc[which(i == y), "long"]
}
df2 <- with(df2, df2[order(country_region, province_state, admin2, date),])
for (i in 3:5) df2[,i] <- factor(df2[,i])
lubridate::year(df2[lubridate::year(df2$date) == 20, 1]) <- 2020
df2 <- df2[!duplicated(df2[,-(6:7)]),]
by <- lapply(df2[,c(1:7)], factor, exclude = NULL)
a <- aggregate(1:nrow(df2), by, function(i) {
x <- df2[i, 8:11]
if (length(i) > 1) {
return(apply(x, 2, function(r) {
if (all(is.na(r))) return(NA)
return(max(r, na.rm = TRUE))
}))
}
return(x)
})
b <- t(apply(a$x, 1, unlist))
b <- cbind(a[,1:7], b)
b$date <- as.POSIXct(b$date)
b$fips <- as.integer(as.character(b$fips))
b$lat <- as.numeric(as.character(b$lat))
b$long <- as.numeric(as.character(b$long))
b$province_state <- factor(b$province_state)
b$admin2 <- factor(b$admin2)
b <- with(b, b[order(country_region, province_state, admin2, date),])
return(b)
}
read_data <- function(from = c("dworld", "ramikrispin")) {
from <- match.arg(from, c("dworld", "ramikrispin"))
url <- switch(
from,
"dworld" = "https://query.data.world/s/igmopqfux3jq3omp6tl6fsabldvcnf",
"ramikrispin" = "https://raw.githubusercontent.com/RamiKrispin/coronavirus-csv/master/coronavirus_dataset.csv")
df <- read.csv(url, stringsAsFactors = FALSE, strip.white = TRUE)
colnames(df) <- tolower(colnames(df))
colnames(df) <- gsub(".", "_", colnames(df), fixed = TRUE)
colnames(df) <- gsub("case_type", "type", colnames(df), fixed = TRUE)
df <- remove_uniq_cols(df)
df$province_state[df$province_state == "N/A"] <- ""
df <- df[, c("date", "country_region", "province_state", "type", "cases",
"lat", "long")]
# handle duplicated records
df <- df[!duplicated(df[,c("date", "country_region", "province_state", "type", "cases")]),]
by <- df[, c("date", "country_region", "province_state", "type")]
a <- aggregate(1:nrow(df), by, function(i) {
df2 <- df[i,,drop = FALSE]
if (nrow(df2) > 1) {
df2[1,5] <- sum(df2[,5])
return(df2[1,, drop = FALSE])
} else return(df2)
}, simplify = FALSE)
df <- do.call(rbind, a$x)
i <- which(colnames(df) == "date")
if (length(i) == 1 && i > 1) df <- cbind(df[,i, drop = FALSE], df[,-i])
if (from == "dworld") {
df$date <- as.Date(df$date, "%m/%d/%Y")
} else {
df$date <- as.Date(df$date)
}
substring(df$type, 1) <- toupper(substring(df$type, 1, 1))
df$country_region <- factor(df$country_region)
df$province_state <- factor(df$province_state)
df$type <- factor(df$type)
df <- df[order(df$country_region, df$province_state, df$date, df$type),]
rownames(df) <- NULL
return(df)
}
#' Download Covid19 data
#'
#' @export
download.c19 <- function(from = "jh") {
from <- match.arg(from, c("jh"))
df <- read_jh_ts()
return(df)
}
| /R/download.R | permissive | isezen/covid19data | R | false | false | 6,951 | r |
remove_uniq_cols <- function(df) {
df[,apply(df, 2, function(x) length(unique(x)) != 1)]
}
read_jh_ts <- function() {
file_names <- c("confirmed", "deaths", "recovered")
url <- "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/"
url <- paste0(url, "master/csse_covid_19_data/csse_covid_19_time_series/")
url <- paste0(url, "time_series_covid19_%s_global.csv")
dfl <- lapply(file_names, function(f) {
df <- read.csv(sprintf(url, f), stringsAsFactors = FALSE,
strip.white = TRUE, na.strings = "")
df <- reshape2::melt(df, measure.vars = colnames(df)[-(1:4)],
variable.name = "date", value.name = "cases")
df$type <- f
return(df)
})
df <- do.call(rbind, dfl)
colnames(df) <- tolower(colnames(df))
colnames(df) <- gsub(".", "_", colnames(df), fixed = TRUE)
df$date <- as.Date(df$date, "X%m.%d.%y")
# substring(df$type, 1) <- toupper(substring(df$type, 1, 1))
df[,c(1,2,7)] <- lapply(df[,c(1,2,7)], factor)
df <- df[,c("date", "country_region", "province_state", "lat", "long",
"type", "cases")]
df <- with(df, df[order(country_region, province_state, date, type),])
df <- reshape2::dcast(df, date + country_region + province_state ~ type,
value.var = "cases")
df$active <- with(df, confirmed - recovered - deaths)
df$recovered[which(df$recovered == 0)] <- NA
df[!sapply(df, is.finite)] <- NA
return(df)
}
read_jh_daily <- function(from = "2020-01-22",
to = as.character(Sys.Date())) {
cn <- c("date", "fips", "country_region", "province_state", "admin2",
"lat", "long", "confirmed", "deaths", "recovered", "active")
from <- as.Date(from)
to <- as.Date(to)
url <- "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/"
url <- paste0(url, "master/csse_covid_19_data/csse_covid_19_daily_reports/")
url <- paste0(url, "%s.csv")
df <- lapply(strftime(seq.Date(from, to, 1), format = "%m-%d-%Y"),
function(dt) {
cat("read: ", dt, "\n")
tryCatch({read.csv(sprintf(url, dt), stringsAsFactors = FALSE,
strip.white = TRUE, na.strings = "")},
error = function(e){}, warning = function(w){})
})
df[sapply(df, is.null)] <- NULL
if (length(df) == 0) return(NULL)
df <- lapply(df, function(x) {
colnames(x) <- tolower(colnames(x))
colnames(x) <- gsub(".", "_", colnames(x), fixed = TRUE)
colnames(x)[startsWith(colnames(x), "lat")] <- "lat"
colnames(x)[startsWith(colnames(x), "long")] <- "long"
colnames(x)[startsWith(colnames(x), "last")] <- "date"
if (!("fips" %in% colnames(x))) x$fips <- NA
if (!("admin2" %in% colnames(x))) x$admin2 <- NA
if (!("active" %in% colnames(x))) x$active <- NA
if (!("lat" %in% colnames(x))) x$lat <- NA
if (!("long" %in% colnames(x))) x$long <- NA
if ("combined_key" %in% colnames(x)) x <- subset(x, select = -combined_key)
x <- x[,cn]
if (all(grepl("/", x$date, fixed = TRUE))) {
fmt <- "%m/%d/%Y %H:%M"
} else if (all(grepl("T", x$date, fixed = TRUE))) {
fmt <- "%Y-%m-%dT%H:%M:%S"
} else {
fmt <- "%Y-%m-%d %H:%M:%S"
}
x$date <- as.POSIXct(as.POSIXlt(x$date, "UTC", fmt))
x$province_state[x$province_state == "None"] <- NA
return(x)
})
loc <- do.call(rbind, lapply(df, function(x) x[c(2:7)]))
loc <- loc[!duplicated(loc),]
by <- lapply(as.list(loc[,1:4]), factor, exclude = NULL)
a <- aggregate(1:nrow(loc), by, function(i) {
x <- loc[i,, drop = FALSE]
i <- which(!(is.na(x$lat) & is.na(x$long)))
if (length(i) > 0) {
y <- x[i,, drop = FALSE]
x <- y[1,, drop = FALSE]
}
return(x)
}, simplify = FALSE)
loc <- do.call(rbind, a$x)
df2 <- do.call(rbind, df)
# df2 <- merge(df2, loc)
# df2 <- df2[,cn]
x <- apply(df2[,2:5], 1, paste0, collapse = "")
y <- apply(loc[,1:4], 1, paste0, collapse = ""); names(y) <- NULL
for (i in y) {
df2[which(i == x), "lat"] <- loc[which(i == y), "lat"]
df2[which(i == x), "long"] <- loc[which(i == y), "long"]
}
df2 <- with(df2, df2[order(country_region, province_state, admin2, date),])
for (i in 3:5) df2[,i] <- factor(df2[,i])
lubridate::year(df2[lubridate::year(df2$date) == 20, 1]) <- 2020
df2 <- df2[!duplicated(df2[,-(6:7)]),]
by <- lapply(df2[,c(1:7)], factor, exclude = NULL)
a <- aggregate(1:nrow(df2), by, function(i) {
x <- df2[i, 8:11]
if (length(i) > 1) {
return(apply(x, 2, function(r) {
if (all(is.na(r))) return(NA)
return(max(r, na.rm = TRUE))
}))
}
return(x)
})
b <- t(apply(a$x, 1, unlist))
b <- cbind(a[,1:7], b)
b$date <- as.POSIXct(b$date)
b$fips <- as.integer(as.character(b$fips))
b$lat <- as.numeric(as.character(b$lat))
b$long <- as.numeric(as.character(b$long))
b$province_state <- factor(b$province_state)
b$admin2 <- factor(b$admin2)
b <- with(b, b[order(country_region, province_state, admin2, date),])
return(b)
}
read_data <- function(from = c("dworld", "ramikrispin")) {
from <- match.arg(from, c("dworld", "ramikrispin"))
url <- switch(
from,
"dworld" = "https://query.data.world/s/igmopqfux3jq3omp6tl6fsabldvcnf",
"ramikrispin" = "https://raw.githubusercontent.com/RamiKrispin/coronavirus-csv/master/coronavirus_dataset.csv")
df <- read.csv(url, stringsAsFactors = FALSE, strip.white = TRUE)
colnames(df) <- tolower(colnames(df))
colnames(df) <- gsub(".", "_", colnames(df), fixed = TRUE)
colnames(df) <- gsub("case_type", "type", colnames(df), fixed = TRUE)
df <- remove_uniq_cols(df)
df$province_state[df$province_state == "N/A"] <- ""
df <- df[, c("date", "country_region", "province_state", "type", "cases",
"lat", "long")]
# handle duplicated records
df <- df[!duplicated(df[,c("date", "country_region", "province_state", "type", "cases")]),]
by <- df[, c("date", "country_region", "province_state", "type")]
a <- aggregate(1:nrow(df), by, function(i) {
df2 <- df[i,,drop = FALSE]
if (nrow(df2) > 1) {
df2[1,5] <- sum(df2[,5])
return(df2[1,, drop = FALSE])
} else return(df2)
}, simplify = FALSE)
df <- do.call(rbind, a$x)
i <- which(colnames(df) == "date")
if (length(i) == 1 && i > 1) df <- cbind(df[,i, drop = FALSE], df[,-i])
if (from == "dworld") {
df$date <- as.Date(df$date, "%m/%d/%Y")
} else {
df$date <- as.Date(df$date)
}
substring(df$type, 1) <- toupper(substring(df$type, 1, 1))
df$country_region <- factor(df$country_region)
df$province_state <- factor(df$province_state)
df$type <- factor(df$type)
df <- df[order(df$country_region, df$province_state, df$date, df$type),]
rownames(df) <- NULL
return(df)
}
#' Download Covid19 data
#'
#' @export
download.c19 <- function(from = "jh") {
from <- match.arg(from, c("jh"))
df <- read_jh_ts()
return(df)
}
|
library(pracma)
fileDB <- './../Podatki/db.csv'
db<- read.csv(fileDB, header=TRUE, sep=",")
N <- db$Infected_to_peak
P <- db$Deaths_to_peak
plot(N,P,
main = "Vpliv števila okužencev na število mrtvih",
xlab = "Število okužencev",
ylab = "Število mrtvih")
r <- cor(N, P, method = "pearson")
s <- cor(N, P, method = "spearman")
rtest <- cor.test(N,P, method = "pearson")
stest <- cor.test(N,P, method = "spearman") | /Programi/cor_analisys_infected_and_deaths.R | no_license | KalcMatej99/Seminarska-VS-Covid-19 | R | false | false | 473 | r | library(pracma)
fileDB <- './../Podatki/db.csv'
db<- read.csv(fileDB, header=TRUE, sep=",")
N <- db$Infected_to_peak
P <- db$Deaths_to_peak
plot(N,P,
main = "Vpliv števila okužencev na število mrtvih",
xlab = "Število okužencev",
ylab = "Število mrtvih")
r <- cor(N, P, method = "pearson")
s <- cor(N, P, method = "spearman")
rtest <- cor.test(N,P, method = "pearson")
stest <- cor.test(N,P, method = "spearman") |
# Não altere nenhum dos códigos abaixo. Basta digitar submit ()
# quando você acha que entende. Se você encontrar
# confuso, você está absolutamente certo!
result2 <-
arrange(
filter(
summarize(
group_by(cran,
package
),
count = n(),
unique = n_distinct(ip_id),
countries = n_distinct(country),
avg_bytes = mean(size)
),
countries > 60
),
desc(countries),
avg_bytes
)
print(result2)
| /Agrupando_e_estruturando_dados_com_dplr/scripts/summarize3.R | no_license | Murilojunqueira/Obtencao_e_Limpeza_de_Dados | R | false | false | 494 | r | # Não altere nenhum dos códigos abaixo. Basta digitar submit ()
# quando você acha que entende. Se você encontrar
# confuso, você está absolutamente certo!
result2 <-
arrange(
filter(
summarize(
group_by(cran,
package
),
count = n(),
unique = n_distinct(ip_id),
countries = n_distinct(country),
avg_bytes = mean(size)
),
countries > 60
),
desc(countries),
avg_bytes
)
print(result2)
|
library(lattice)
# (A) Loading and preprocessing the data
activity<-read.csv("activity.csv")
totalByDate<-aggregate(steps~date, data=activity, sum, na.rm=TRUE)
# (B) mean total number of steps taken per day
# Create Histogram
png("Histogram of the total number of steps taken each day.png", height = 480, width = 480)
hist( totalByDate$steps,
col = "red",
main = "Histogram of the total number of steps taken each day",
xlab = "Number of steps taken per day",
ylab = "Frequency" )
dev.off()
mean(totalByDate$steps)
median(totalByDate$steps)
# (C) Average daily activity pattern
meanByInterval<-aggregate(steps~interval, data=activity, mean, na.rm=TRUE)
png("Average daily activity pattern.png", height = 480, width = 480)
plot( steps~interval,
data = meanByInterval,
type = "l",
main = "Average daily activity pattern",
xlab = "Interval",
ylab = "Mean")
dev.off()
maxsteps = max(activity$steps, na.rm=TRUE)
activity[which.max(activity$steps),]$interval
# (D) Inputing missing values
sum(is.na(activity$steps))
GetSimulatedData<-function(interval) {
meanByInterval[meanByInterval$interval==interval,]$steps
}
activitySimulated<-activity
for(i in 1:nrow(activitySimulated)){
if(is.na(activitySimulated[i,]$steps)){
activitySimulated[i,]$steps<-GetSimulatedData(activitySimulated[i,]$interval)
}
}
totalByDateSimulated<-aggregate(steps~date, data=activitySimulated, sum, na.rm=TRUE)
png("Histogram of the total number of SIMULATED steps taken each day.png", height = 480, width = 480)
hist( totalByDate$steps,
col = "red",
main = "Histogram of the total number of steps taken each day",
xlab = "Number of steps taken per day",
ylab = "Frequency" )
dev.off()
mean(totalByDateSimulated$steps)
median(totalByDateSimulated$steps)
# (E) Differences in activity patterns between weekdays and weekends
activitySimulated$wday <- ifelse( (as.POSIXlt(as.Date(activitySimulated$date))$wday-1 %% 7) >= 5,
"weekend",
"weekday" )
totalByWeekdaySimulated<-aggregate(steps~interval+wday, data=activitySimulated, mean, na.rm=TRUE)
png("Panel plot of simulated data.png", height = 480, width = 480)
xyplot( steps~interval|factor(wday),
data=totalByWeekdaySimulated,
ylab="Number of steps",
type="l",
aspect=1/2
)
dev.off() | /RepData.R | no_license | datascience0001/RepData_PeerAssessment1 | R | false | false | 2,471 | r | library(lattice)
# (A) Loading and preprocessing the data
activity<-read.csv("activity.csv")
totalByDate<-aggregate(steps~date, data=activity, sum, na.rm=TRUE)
# (B) mean total number of steps taken per day
# Create Histogram
png("Histogram of the total number of steps taken each day.png", height = 480, width = 480)
hist( totalByDate$steps,
col = "red",
main = "Histogram of the total number of steps taken each day",
xlab = "Number of steps taken per day",
ylab = "Frequency" )
dev.off()
mean(totalByDate$steps)
median(totalByDate$steps)
# (C) Average daily activity pattern
meanByInterval<-aggregate(steps~interval, data=activity, mean, na.rm=TRUE)
png("Average daily activity pattern.png", height = 480, width = 480)
plot( steps~interval,
data = meanByInterval,
type = "l",
main = "Average daily activity pattern",
xlab = "Interval",
ylab = "Mean")
dev.off()
maxsteps = max(activity$steps, na.rm=TRUE)
activity[which.max(activity$steps),]$interval
# (D) Inputing missing values
sum(is.na(activity$steps))
GetSimulatedData<-function(interval) {
meanByInterval[meanByInterval$interval==interval,]$steps
}
activitySimulated<-activity
for(i in 1:nrow(activitySimulated)){
if(is.na(activitySimulated[i,]$steps)){
activitySimulated[i,]$steps<-GetSimulatedData(activitySimulated[i,]$interval)
}
}
totalByDateSimulated<-aggregate(steps~date, data=activitySimulated, sum, na.rm=TRUE)
png("Histogram of the total number of SIMULATED steps taken each day.png", height = 480, width = 480)
hist( totalByDate$steps,
col = "red",
main = "Histogram of the total number of steps taken each day",
xlab = "Number of steps taken per day",
ylab = "Frequency" )
dev.off()
mean(totalByDateSimulated$steps)
median(totalByDateSimulated$steps)
# (E) Differences in activity patterns between weekdays and weekends
activitySimulated$wday <- ifelse( (as.POSIXlt(as.Date(activitySimulated$date))$wday-1 %% 7) >= 5,
"weekend",
"weekday" )
totalByWeekdaySimulated<-aggregate(steps~interval+wday, data=activitySimulated, mean, na.rm=TRUE)
png("Panel plot of simulated data.png", height = 480, width = 480)
xyplot( steps~interval|factor(wday),
data=totalByWeekdaySimulated,
ylab="Number of steps",
type="l",
aspect=1/2
)
dev.off() |
# Set the working directory
setwd("C:/Users/frost/Documents/DataScience/hpc")
getwd() # CHECK UP
library(dplyr)
# read tha data
data<-read.table("household_power_consumption.txt",header = TRUE,sep = ";",na.strings = "?")
data<-as.data.frame(data)
# convert date and time in the right format
data$Date<-as.Date(data$Date, "%d/%m/%Y")
data$Time<- format(strptime(data$Time,"%H:%M:%S"),"%H:%M:%S")
#add a column
data$days<-weekdays(data$Date,abbreviate=TRUE)
# get a required subset of data from 2007-02-01 till 2007-02-02
sample<-filter(data,data$Date>=as.Date("2007-02-01")&data$Date<=as.Date("2007-02-02"))
# Plot 3
## open a png device
png(file="plot3.png",width=480,height=480)
## call a function hist() to create a histogram with y-axis label and no x-axis labels
plot(sample$Sub_metering_1, ylab = "Energy sub metering",xlab="",type = "l",xaxt = 'n',col="black")
lines(sample$Sub_metering_2,col="red",type="l")
lines(sample$Sub_metering_3,col="blue",type="l")
# annotating the plot by adding the days of the week on the x-axis of the plot
step<-sum(sample$days == "Fri")-1
lengthframe<-nrow(sample)
axis(1,at=seq(1, lengthframe, by=step),labels=list("Thu","Fri","Sat"))
legend("topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), col=c("black","red","blue"),lty=c(1,1,1), ncol=1,trace = TRUE)
## close the png device
dev.off()
| /plot3.R | no_license | AnastasiaMorozova/ExData_Plotting1 | R | false | false | 1,399 | r | # Set the working directory
setwd("C:/Users/frost/Documents/DataScience/hpc")
getwd() # CHECK UP
library(dplyr)
# read tha data
data<-read.table("household_power_consumption.txt",header = TRUE,sep = ";",na.strings = "?")
data<-as.data.frame(data)
# convert date and time in the right format
data$Date<-as.Date(data$Date, "%d/%m/%Y")
data$Time<- format(strptime(data$Time,"%H:%M:%S"),"%H:%M:%S")
#add a column
data$days<-weekdays(data$Date,abbreviate=TRUE)
# get a required subset of data from 2007-02-01 till 2007-02-02
sample<-filter(data,data$Date>=as.Date("2007-02-01")&data$Date<=as.Date("2007-02-02"))
# Plot 3
## open a png device
png(file="plot3.png",width=480,height=480)
## call a function hist() to create a histogram with y-axis label and no x-axis labels
plot(sample$Sub_metering_1, ylab = "Energy sub metering",xlab="",type = "l",xaxt = 'n',col="black")
lines(sample$Sub_metering_2,col="red",type="l")
lines(sample$Sub_metering_3,col="blue",type="l")
# annotating the plot by adding the days of the week on the x-axis of the plot
step<-sum(sample$days == "Fri")-1
lengthframe<-nrow(sample)
axis(1,at=seq(1, lengthframe, by=step),labels=list("Thu","Fri","Sat"))
legend("topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), col=c("black","red","blue"),lty=c(1,1,1), ncol=1,trace = TRUE)
## close the png device
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calc_occ.r
\name{calc_occ}
\alias{calc_occ}
\alias{calc_tot_occ}
\title{Calculate abundance}
\usage{
calc_occ(comm)
calc_tot_occ(comm)
}
\arguments{
\item{comm}{(required) site x microsite matrix with integers representing
the associations present in each microsite}
}
\value{
vector of number of occupied microsites at each site or the total
number of microsites occupied in the entire metacommunity
}
\description{
Calculates the number of occupied microsites at each site or across the
entire metacommunity
}
\section{Functions}{
\itemize{
\item \code{calc_occ}: Calculate abundance at each site
\item \code{calc_tot_occ}: Calculate total abundance for the metacommunity
}}
| /CAMM/man/calc_occ.Rd | no_license | jescoyle/CAMM | R | false | true | 760 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calc_occ.r
\name{calc_occ}
\alias{calc_occ}
\alias{calc_tot_occ}
\title{Calculate abundance}
\usage{
calc_occ(comm)
calc_tot_occ(comm)
}
\arguments{
\item{comm}{(required) site x microsite matrix with integers representing
the associations present in each microsite}
}
\value{
vector of number of occupied microsites at each site or the total
number of microsites occupied in the entire metacommunity
}
\description{
Calculates the number of occupied microsites at each site or across the
entire metacommunity
}
\section{Functions}{
\itemize{
\item \code{calc_occ}: Calculate abundance at each site
\item \code{calc_tot_occ}: Calculate total abundance for the metacommunity
}}
|
download.file('https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip','getdata-projectfiles-UCI HAR Dataset.zip')
| /getData.R | no_license | bprs/getdata-011 | R | false | false | 147 | r | download.file('https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip','getdata-projectfiles-UCI HAR Dataset.zip')
|
#only import the lines for Feb 1 and Feb 2
usage <- read.table("household_power_consumption.txt", header = FALSE, sep = ";", skip = 66637, nrows = 2880, col.names = c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3"))
#convert date column to date/time format
usage$Date <- strptime(paste(usage$Date, usage$Time), "%d/%m/%Y %H:%M:%S")
#make the plot
png("plot3.png")
plot(usage$Date, usage$Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering")
lines(usage$Date, usage$Sub_metering_2, type = "l", col = "red")
lines(usage$Date, usage$Sub_metering_3, type = "l", col = "blue")
legend("topright", lty = 1, col = c("black", "blue", "red"), legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
dev.off(6)
| /plot3.R | no_license | Dayve42/ExData_Plotting1 | R | false | false | 818 | r | #only import the lines for Feb 1 and Feb 2
usage <- read.table("household_power_consumption.txt", header = FALSE, sep = ";", skip = 66637, nrows = 2880, col.names = c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3"))
#convert date column to date/time format
usage$Date <- strptime(paste(usage$Date, usage$Time), "%d/%m/%Y %H:%M:%S")
#make the plot
png("plot3.png")
plot(usage$Date, usage$Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering")
lines(usage$Date, usage$Sub_metering_2, type = "l", col = "red")
lines(usage$Date, usage$Sub_metering_3, type = "l", col = "blue")
legend("topright", lty = 1, col = c("black", "blue", "red"), legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
dev.off(6)
|
test.init_score_matrix = function() {
score_gap = -2
test_mat = init_score_matrix(nrow=4,ncol=12,F,score_gap)
checkEquals(c(4,12), dim(test_mat), "Wrong dimensions in the score matrix")
checkEquals(seq(0, score_gap * 11, score_gap), test_mat[1,], "Wrong initial values in the upper row for local=F")
checkEquals(seq(0, score_gap * 3, score_gap), test_mat[,1], "Wrong initial values in the left column for local=F")
test_mat = init_score_matrix(nrow=4,ncol=12,T,score_gap)
checkEquals(rep(0, 11), test_mat[1,][-1], "Wrong initial values in the upper row for local=T")
checkEquals(rep(0, 3), test_mat[,1][-1], "Wrong initial values in the left column for local=T")
}
test.init_path_matrix = function() {
test_mat = init_path_matrix(nrow=4,ncol=12, F)
checkEquals(c(4,12), dim(test_mat), "Wrong dimensions in the path matrix for local=F")
checkEquals(rep("left", 11), test_mat[1,][-1], "Wrong initial values in the upper row of the path matrix for local=F")
checkEquals(rep("up", 3), test_mat[,1][-1], "Wrong initial values in the left column of the path matrix for local=F")
checkEquals(sum(test_mat[-1,-1] == "left") + sum(test_mat[-1,-1] == "up") + sum(test_mat[-1,-1] == "diag"), 0, "Wrong initial values (not empty string) outside of upper row and left column for local=F")
test_mat = init_path_matrix(nrow=4,ncol=12, T)
checkEquals(c(4,12), dim(test_mat), "Wrong dimensions in the path matrix for local=T")
checkEquals(sum(test_mat == "left") + sum(test_mat == "up") + sum(test_mat == "diag"), 0, "Path matrix is not empty for local=T")
} | /assignments/assignment_1/student_test_suite/runit.get_init_matrices.R | no_license | pjhartout/Computational_Biology | R | false | false | 1,586 | r | test.init_score_matrix = function() {
score_gap = -2
test_mat = init_score_matrix(nrow=4,ncol=12,F,score_gap)
checkEquals(c(4,12), dim(test_mat), "Wrong dimensions in the score matrix")
checkEquals(seq(0, score_gap * 11, score_gap), test_mat[1,], "Wrong initial values in the upper row for local=F")
checkEquals(seq(0, score_gap * 3, score_gap), test_mat[,1], "Wrong initial values in the left column for local=F")
test_mat = init_score_matrix(nrow=4,ncol=12,T,score_gap)
checkEquals(rep(0, 11), test_mat[1,][-1], "Wrong initial values in the upper row for local=T")
checkEquals(rep(0, 3), test_mat[,1][-1], "Wrong initial values in the left column for local=T")
}
test.init_path_matrix = function() {
test_mat = init_path_matrix(nrow=4,ncol=12, F)
checkEquals(c(4,12), dim(test_mat), "Wrong dimensions in the path matrix for local=F")
checkEquals(rep("left", 11), test_mat[1,][-1], "Wrong initial values in the upper row of the path matrix for local=F")
checkEquals(rep("up", 3), test_mat[,1][-1], "Wrong initial values in the left column of the path matrix for local=F")
checkEquals(sum(test_mat[-1,-1] == "left") + sum(test_mat[-1,-1] == "up") + sum(test_mat[-1,-1] == "diag"), 0, "Wrong initial values (not empty string) outside of upper row and left column for local=F")
test_mat = init_path_matrix(nrow=4,ncol=12, T)
checkEquals(c(4,12), dim(test_mat), "Wrong dimensions in the path matrix for local=T")
checkEquals(sum(test_mat == "left") + sum(test_mat == "up") + sum(test_mat == "diag"), 0, "Path matrix is not empty for local=T")
} |
##Install and load needed packages
##install.packages("bibliometrix", dependencies = TRUE)
##install.packages("splitstackshape")
##install.packages("tidyverse")
library(bibliometrix)
library(splitstackshape)
library(tidyverse)
library(stringr)
## Read in downloaded files and convert to dataframe
filePathsDal = dir("./Dalhousie", pattern = "*.bib", recursive = TRUE, full.names = TRUE)
DDal <- do.call("readFiles", as.list(filePathsDal))
MDal <- convert2df(DDal, dbsource = "isi", format = "bibtex")
## Keep only selected columns: UT, DT, C1, DT, TC, PY
mydataDal <- select(MDal, UT, C1, DT, PY, TC)
## Separate authors into single observations
tidy_dataDal <- cSplit(mydataDal, "C1", sep = ";", direction = "long")
##Test that there were no unintended drops
count <- sum(str_count(mydataDal$C1, ";"))
ifelse(count + nrow(mydataDal) == nrow(tidy_dataDal), "No drops", "Warning")
## Remove non-Dalhousie addresses
DalData <- tidy_dataDal[grep("DALHOUSIE UNIV", tidy_dataDal$C1), ]
engDataDal <- DalData[grep("ENGN", DalData$C1), ]
deptURL <- "https://docs.google.com/spreadsheets/d/e/2PACX-1vTMpIJn2N9pV13zRhYKRdOOAUfvHhKF6dqUzMWhnk3_eaBgPD8XT6UJBuAXfyoWfA0qfvaO4LyQpfJA/pub?gid=134374484&single=true&output=csv"
depts <- read.csv(deptURL)
abs <- as.character(depts$Abbreviation)
dept_test <- sapply(engDataDal$C1, function(x) abs[str_detect(x, abs)])
engDataDal<-cbind(engDataDal,plyr::ldply(dept_test,rbind)[,1])
names(engDataDal)[6]<-"Abbreviation"
engDeptData <- merge(engDataDal, depts, all.x = TRUE) ##keeps nonmatches and enters NA
## check the "other"s for articles that should be kept
Other <- filter(engDeptData, is.na(Department))
View(Other)
##Keep only eng departments
engDeptData <- filter(engDeptData, Department !="Truro Campus")
finalEngData <- engDeptData[complete.cases(engDeptData), ]
##Remove departmental duplicates (leave institutional duplicates)
engDataDD <- unique(select(finalEngData, UT, DT, TC, PY, Department))
write.csv(engDataDD, "Dalhousie.csv", quote = TRUE, row.names = FALSE)
| /Dalhousie.R | no_license | athenry/2017ROA | R | false | false | 2,028 | r | ##Install and load needed packages
##install.packages("bibliometrix", dependencies = TRUE)
##install.packages("splitstackshape")
##install.packages("tidyverse")
library(bibliometrix)
library(splitstackshape)
library(tidyverse)
library(stringr)
## Read in downloaded files and convert to dataframe
filePathsDal = dir("./Dalhousie", pattern = "*.bib", recursive = TRUE, full.names = TRUE)
DDal <- do.call("readFiles", as.list(filePathsDal))
MDal <- convert2df(DDal, dbsource = "isi", format = "bibtex")
## Keep only selected columns: UT, DT, C1, DT, TC, PY
mydataDal <- select(MDal, UT, C1, DT, PY, TC)
## Separate authors into single observations
tidy_dataDal <- cSplit(mydataDal, "C1", sep = ";", direction = "long")
##Test that there were no unintended drops
count <- sum(str_count(mydataDal$C1, ";"))
ifelse(count + nrow(mydataDal) == nrow(tidy_dataDal), "No drops", "Warning")
## Remove non-Dalhousie addresses
DalData <- tidy_dataDal[grep("DALHOUSIE UNIV", tidy_dataDal$C1), ]
engDataDal <- DalData[grep("ENGN", DalData$C1), ]
deptURL <- "https://docs.google.com/spreadsheets/d/e/2PACX-1vTMpIJn2N9pV13zRhYKRdOOAUfvHhKF6dqUzMWhnk3_eaBgPD8XT6UJBuAXfyoWfA0qfvaO4LyQpfJA/pub?gid=134374484&single=true&output=csv"
depts <- read.csv(deptURL)
abs <- as.character(depts$Abbreviation)
dept_test <- sapply(engDataDal$C1, function(x) abs[str_detect(x, abs)])
engDataDal<-cbind(engDataDal,plyr::ldply(dept_test,rbind)[,1])
names(engDataDal)[6]<-"Abbreviation"
engDeptData <- merge(engDataDal, depts, all.x = TRUE) ##keeps nonmatches and enters NA
## check the "other"s for articles that should be kept
Other <- filter(engDeptData, is.na(Department))
View(Other)
##Keep only eng departments
engDeptData <- filter(engDeptData, Department !="Truro Campus")
finalEngData <- engDeptData[complete.cases(engDeptData), ]
##Remove departmental duplicates (leave institutional duplicates)
engDataDD <- unique(select(finalEngData, UT, DT, TC, PY, Department))
write.csv(engDataDD, "Dalhousie.csv", quote = TRUE, row.names = FALSE)
|
#' sample_fluidigm
#'
#' sample_fluidigm function reads excel and .csv files from the fluidigm/ folders
#' and maps SampleIDs to file names and file paths
#'
#' @param studypath for example
#' studypath <- "/gne/data/obdroot/PDL1mab/go29294"
#' @export
bo29337_sample_fluidigm <- function(studypath){
report <- NULL
report <- as_tibble(report)
# Collect all fluidigm rawdata files
fluidigm_raw <- paste(studypath, "/fluidigm/rawdata", sep="")
fluidigm_files <- fluidigm_raw %>% dir(full.names=T, recursive=T)
# Subset all .csv and .xlsx fluidigm files (excluding .tif files):
fluidigm_files_excel <- fluidigm_files %>% str_subset(".csv|.xlsx|.xls")
# Map .xlsx files
fluidigm_files_xlsx <- fluidigm_files_excel %>% str_subset(".xlsx")
Sampleid_colnames <- c("Specimen_Name", "FMI SAMPLE ID", "FMID", "Specimen Name",
"Specimen", "FMI", "specimenName", "SpecimenName", "specimen_name",
"specimen name", "Sample", "SAMPLE", "sampleID", "Specimen ID", "Name", "SMPID")
for (i in seq_along(fluidigm_files_xlsx)){
tryCatch({xlsx_file <- read_excel(fluidigm_files_xlsx[i], col_names=T, skip=3)},
error=function(e) {cat("Bad file pattern:\n", fluidigm_files_xlsx[i], "\n")}
)
#check if column name of the 1st column of the text file is contained
# in the Sampleid_colnames and the format of SampleIDs in the first
# column consists of three letters followed by six numbers
if(any(colnames(xlsx_file) %in% Sampleid_colnames)){
# if the above condition is true - extract SampleIDs and File names
# and paths into the report table
SampleID <- xlsx_file %>%
select(one_of(Sampleid_colnames)) %>% pull() %>% toupper()
File_Path <- fluidigm_files_xlsx[i]
File_Name <- basename(File_Path)
combine <- cbind(File_Path, File_Name, SampleID)
report <- rbind2(report, combine) %>% distinct()
} else {
# if above condition in not true - add the file to the sample_missed_files
# report and move to the next iteration
file <- fluidigm_files_xlsx[i]
samp <- "sample_fluidigm"
sample_missed_files(file, studypath, samp)
cat("File ", fluidigm_files_xlsx[i], "\n could not be processed - see sample_missed_files report")
next
}
}
if(nrow(report)!=0){
# print sample to file mapping report or append to the existing one
study <- basename(studypath)
if (!file.exists(paste0(getwd(), "/", study, "_sample_to_file.csv"))){
write_csv(report, paste0(getwd(), "/", study, "_sample_to_file.csv"), append=T, col_names=T)
} else {
write_csv(report, paste0(getwd(), "/", study, "_sample_to_file.csv"), append=T, col_names=F)
}
print("Report on fluidigm xlsx files in /fluidigm/rawdata was generated in your home directory")
}
# Map .csv files
fluidigm_files_csv <- fluidigm_files_excel %>% str_subset(".csv")
for (i in seq_along(fluidigm_files_csv)){
tryCatch({csv_file <- read_csv(fluidigm_files_csv[i], col_names=T, skip=11)},
error=function(e) {cat("Bad file pattern:\n", fluidigm_files_csv[i], "\n")}
)
#check if column name of the csv file is contained
# in the Sampleid_colnames and the format of SampleIDs in the first
# column consists of three letters followed by six numbers
if(any(colnames(csv_file) %in% Sampleid_colnames)){
# if the above condition is true - extract SampleIDs and File names
# and paths into the report table
SampleID <- csv_file[,2] %>% pull() %>% toupper()
File_Path <- fluidigm_files_csv[i]
File_Name <- basename(File_Path)
combine <- cbind(File_Path, File_Name, SampleID)
report <- rbind2(report, combine) %>% distinct()
} else {
# if above condition in not true - add the file to the sample_missed_files
# report and move to the next iteration
file <- fluidigm_files_xlsx[i]
samp <- "sample_fluidigm"
sample_missed_files(file, studypath, samp)
cat("File ", fluidigm_files_csv[i], "\n could not be processed - see sample_missed_files report")
next
}
}
# print sample to file mapping report or append to the existing one
study <- basename(studypath)
if (!file.exists(paste0(getwd(), "/", study, "_sample_to_file.csv"))){
write_csv(report, paste0(getwd(), "/", study, "_sample_to_file.csv"), append=T, col_names=T)
} else {
write_csv(report, paste0(getwd(), "/", study, "_sample_to_file.csv"), append=T, col_names=F)
}
print("Report on fluidigm csv files in /fluidigm/rawdata was generated in your home directory")
Report_File_Path <- report$File_Path %>% unique()
total_files <- fluidigm_files_excel
difference <- setdiff(total_files, Report_File_Path) %>%
str_ignore("summary|Summary|erroneous|contents|Contents|Analysis|analysis|standard|annotated|Thumbs|thumbs|readme|manifest|Manifest|sas7bdat")
backup_options <- options()
options(max.print=999999)
currentDate <- Sys.Date() %>% str_replace_all("-", "_")
study <- basename(studypath)
file.name <- paste(study, "_sample_to_file_report_", currentDate, ".txt", sep="")
sink(file.name, append=T, split=T)
cat("\n\n", study, "_sample to file mapping report\n\n")
print(as.data.frame(Sys.info()))
cat("\n########################################\n\n")
# print the difference between all files in the folders and mapped files
# and precentage of files mapped
if(length(difference)==0){
print("All files were included")} else{
percent <- round((length(Report_File_Path)/length(total_files))*100, digits=1)
cat(percent, "% of files were mapped in ", study, "fluidigm rawdata folders\n\n")
cat("The following files were excluded from the sample to file mapping report for study ", study, ":\n\n")
cat("The list below excludes files that do not contain SampleIDs, such as files containing\n")
cat("summary, contents, analysis, standard, annotated, thumbs, readme - in the file or folder name\n\n")
print(difference)}
cat("###########################################")
while (sink.number()>0) sink()
options(backup_options)
}
################################
# Cong modified original function by changing argument skip =10 tp skip =3 (line 30) to make it useful for study bo29337.
| /bo29337_sample_fluidigm-5-1-19.R | permissive | CongChen2017/Rscript4Work | R | false | false | 6,385 | r | #' sample_fluidigm
#'
#' sample_fluidigm function reads excel and .csv files from the fluidigm/ folders
#' and maps SampleIDs to file names and file paths
#'
#' @param studypath for example
#' studypath <- "/gne/data/obdroot/PDL1mab/go29294"
#' @export
bo29337_sample_fluidigm <- function(studypath){
report <- NULL
report <- as_tibble(report)
# Collect all fluidigm rawdata files
fluidigm_raw <- paste(studypath, "/fluidigm/rawdata", sep="")
fluidigm_files <- fluidigm_raw %>% dir(full.names=T, recursive=T)
# Subset all .csv and .xlsx fluidigm files (excluding .tif files):
fluidigm_files_excel <- fluidigm_files %>% str_subset(".csv|.xlsx|.xls")
# Map .xlsx files
fluidigm_files_xlsx <- fluidigm_files_excel %>% str_subset(".xlsx")
Sampleid_colnames <- c("Specimen_Name", "FMI SAMPLE ID", "FMID", "Specimen Name",
"Specimen", "FMI", "specimenName", "SpecimenName", "specimen_name",
"specimen name", "Sample", "SAMPLE", "sampleID", "Specimen ID", "Name", "SMPID")
for (i in seq_along(fluidigm_files_xlsx)){
tryCatch({xlsx_file <- read_excel(fluidigm_files_xlsx[i], col_names=T, skip=3)},
error=function(e) {cat("Bad file pattern:\n", fluidigm_files_xlsx[i], "\n")}
)
#check if column name of the 1st column of the text file is contained
# in the Sampleid_colnames and the format of SampleIDs in the first
# column consists of three letters followed by six numbers
if(any(colnames(xlsx_file) %in% Sampleid_colnames)){
# if the above condition is true - extract SampleIDs and File names
# and paths into the report table
SampleID <- xlsx_file %>%
select(one_of(Sampleid_colnames)) %>% pull() %>% toupper()
File_Path <- fluidigm_files_xlsx[i]
File_Name <- basename(File_Path)
combine <- cbind(File_Path, File_Name, SampleID)
report <- rbind2(report, combine) %>% distinct()
} else {
# if above condition in not true - add the file to the sample_missed_files
# report and move to the next iteration
file <- fluidigm_files_xlsx[i]
samp <- "sample_fluidigm"
sample_missed_files(file, studypath, samp)
cat("File ", fluidigm_files_xlsx[i], "\n could not be processed - see sample_missed_files report")
next
}
}
if(nrow(report)!=0){
# print sample to file mapping report or append to the existing one
study <- basename(studypath)
if (!file.exists(paste0(getwd(), "/", study, "_sample_to_file.csv"))){
write_csv(report, paste0(getwd(), "/", study, "_sample_to_file.csv"), append=T, col_names=T)
} else {
write_csv(report, paste0(getwd(), "/", study, "_sample_to_file.csv"), append=T, col_names=F)
}
print("Report on fluidigm xlsx files in /fluidigm/rawdata was generated in your home directory")
}
# Map .csv files
fluidigm_files_csv <- fluidigm_files_excel %>% str_subset(".csv")
for (i in seq_along(fluidigm_files_csv)){
tryCatch({csv_file <- read_csv(fluidigm_files_csv[i], col_names=T, skip=11)},
error=function(e) {cat("Bad file pattern:\n", fluidigm_files_csv[i], "\n")}
)
#check if column name of the csv file is contained
# in the Sampleid_colnames and the format of SampleIDs in the first
# column consists of three letters followed by six numbers
if(any(colnames(csv_file) %in% Sampleid_colnames)){
# if the above condition is true - extract SampleIDs and File names
# and paths into the report table
SampleID <- csv_file[,2] %>% pull() %>% toupper()
File_Path <- fluidigm_files_csv[i]
File_Name <- basename(File_Path)
combine <- cbind(File_Path, File_Name, SampleID)
report <- rbind2(report, combine) %>% distinct()
} else {
# if above condition in not true - add the file to the sample_missed_files
# report and move to the next iteration
file <- fluidigm_files_xlsx[i]
samp <- "sample_fluidigm"
sample_missed_files(file, studypath, samp)
cat("File ", fluidigm_files_csv[i], "\n could not be processed - see sample_missed_files report")
next
}
}
# print sample to file mapping report or append to the existing one
study <- basename(studypath)
if (!file.exists(paste0(getwd(), "/", study, "_sample_to_file.csv"))){
write_csv(report, paste0(getwd(), "/", study, "_sample_to_file.csv"), append=T, col_names=T)
} else {
write_csv(report, paste0(getwd(), "/", study, "_sample_to_file.csv"), append=T, col_names=F)
}
print("Report on fluidigm csv files in /fluidigm/rawdata was generated in your home directory")
Report_File_Path <- report$File_Path %>% unique()
total_files <- fluidigm_files_excel
difference <- setdiff(total_files, Report_File_Path) %>%
str_ignore("summary|Summary|erroneous|contents|Contents|Analysis|analysis|standard|annotated|Thumbs|thumbs|readme|manifest|Manifest|sas7bdat")
backup_options <- options()
options(max.print=999999)
currentDate <- Sys.Date() %>% str_replace_all("-", "_")
study <- basename(studypath)
file.name <- paste(study, "_sample_to_file_report_", currentDate, ".txt", sep="")
sink(file.name, append=T, split=T)
cat("\n\n", study, "_sample to file mapping report\n\n")
print(as.data.frame(Sys.info()))
cat("\n########################################\n\n")
# print the difference between all files in the folders and mapped files
# and precentage of files mapped
if(length(difference)==0){
print("All files were included")} else{
percent <- round((length(Report_File_Path)/length(total_files))*100, digits=1)
cat(percent, "% of files were mapped in ", study, "fluidigm rawdata folders\n\n")
cat("The following files were excluded from the sample to file mapping report for study ", study, ":\n\n")
cat("The list below excludes files that do not contain SampleIDs, such as files containing\n")
cat("summary, contents, analysis, standard, annotated, thumbs, readme - in the file or folder name\n\n")
print(difference)}
cat("###########################################")
while (sink.number()>0) sink()
options(backup_options)
}
################################
# Cong modified original function by changing argument skip =10 tp skip =3 (line 30) to make it useful for study bo29337.
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.R
\name{acm}
\alias{acm}
\title{AWS Certificate Manager}
\usage{
acm(config = list())
}
\arguments{
\item{config}{Optional configuration of credentials, endpoint, and/or region.}
}
\description{
Welcome to the AWS Certificate Manager (ACM) API documentation.
You can use ACM to manage SSL/TLS certificates for your AWS-based
websites and applications. For general information about using ACM, see
the \href{https://docs.aws.amazon.com/acm/latest/userguide/}{\emph{AWS Certificate Manager User Guide}} .
}
\section{Service syntax}{
\preformatted{svc <- acm(
config = list(
credentials = list(
creds = list(
access_key_id = "string",
secret_access_key = "string",
session_token = "string"
),
profile = "string"
),
endpoint = "string",
region = "string"
)
)
}
}
\section{Operations}{
\tabular{ll}{
\link[=acm_add_tags_to_certificate]{add_tags_to_certificate} \tab Adds one or more tags to an ACM certificate \cr
\link[=acm_delete_certificate]{delete_certificate} \tab Deletes a certificate and its associated private key \cr
\link[=acm_describe_certificate]{describe_certificate} \tab Returns detailed metadata about the specified ACM certificate \cr
\link[=acm_export_certificate]{export_certificate} \tab Exports a private certificate issued by a private certificate authority (CA) for use anywhere \cr
\link[=acm_get_certificate]{get_certificate} \tab Retrieves a certificate specified by an ARN and its certificate chain \cr
\link[=acm_import_certificate]{import_certificate} \tab Imports a certificate into AWS Certificate Manager (ACM) to use with services that are integrated with ACM\cr
\link[=acm_list_certificates]{list_certificates} \tab Retrieves a list of certificate ARNs and domain names \cr
\link[=acm_list_tags_for_certificate]{list_tags_for_certificate} \tab Lists the tags that have been applied to the ACM certificate \cr
\link[=acm_remove_tags_from_certificate]{remove_tags_from_certificate} \tab Remove one or more tags from an ACM certificate \cr
\link[=acm_renew_certificate]{renew_certificate} \tab Renews an eligable ACM certificate \cr
\link[=acm_request_certificate]{request_certificate} \tab Requests an ACM certificate for use with other AWS services \cr
\link[=acm_resend_validation_email]{resend_validation_email} \tab Resends the email that requests domain ownership validation \cr
\link[=acm_update_certificate_options]{update_certificate_options} \tab Updates a certificate
}
}
\examples{
\donttest{svc <- acm()
svc$add_tags_to_certificate(
Foo = 123
)}
}
| /cran/paws/man/acm.Rd | permissive | ryanb8/paws | R | false | true | 2,640 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.R
\name{acm}
\alias{acm}
\title{AWS Certificate Manager}
\usage{
acm(config = list())
}
\arguments{
\item{config}{Optional configuration of credentials, endpoint, and/or region.}
}
\description{
Welcome to the AWS Certificate Manager (ACM) API documentation.
You can use ACM to manage SSL/TLS certificates for your AWS-based
websites and applications. For general information about using ACM, see
the \href{https://docs.aws.amazon.com/acm/latest/userguide/}{\emph{AWS Certificate Manager User Guide}} .
}
\section{Service syntax}{
\preformatted{svc <- acm(
config = list(
credentials = list(
creds = list(
access_key_id = "string",
secret_access_key = "string",
session_token = "string"
),
profile = "string"
),
endpoint = "string",
region = "string"
)
)
}
}
\section{Operations}{
\tabular{ll}{
\link[=acm_add_tags_to_certificate]{add_tags_to_certificate} \tab Adds one or more tags to an ACM certificate \cr
\link[=acm_delete_certificate]{delete_certificate} \tab Deletes a certificate and its associated private key \cr
\link[=acm_describe_certificate]{describe_certificate} \tab Returns detailed metadata about the specified ACM certificate \cr
\link[=acm_export_certificate]{export_certificate} \tab Exports a private certificate issued by a private certificate authority (CA) for use anywhere \cr
\link[=acm_get_certificate]{get_certificate} \tab Retrieves a certificate specified by an ARN and its certificate chain \cr
\link[=acm_import_certificate]{import_certificate} \tab Imports a certificate into AWS Certificate Manager (ACM) to use with services that are integrated with ACM\cr
\link[=acm_list_certificates]{list_certificates} \tab Retrieves a list of certificate ARNs and domain names \cr
\link[=acm_list_tags_for_certificate]{list_tags_for_certificate} \tab Lists the tags that have been applied to the ACM certificate \cr
\link[=acm_remove_tags_from_certificate]{remove_tags_from_certificate} \tab Remove one or more tags from an ACM certificate \cr
\link[=acm_renew_certificate]{renew_certificate} \tab Renews an eligable ACM certificate \cr
\link[=acm_request_certificate]{request_certificate} \tab Requests an ACM certificate for use with other AWS services \cr
\link[=acm_resend_validation_email]{resend_validation_email} \tab Resends the email that requests domain ownership validation \cr
\link[=acm_update_certificate_options]{update_certificate_options} \tab Updates a certificate
}
}
\examples{
\donttest{svc <- acm()
svc$add_tags_to_certificate(
Foo = 123
)}
}
|
## This function plots all graphs
##' @importFrom 'graphics' 'par' 'points' 'legend'
##' @importFrom 'stats' 'as.formula' 'predict'
##' @importFrom 'grDevices' 'dev.off'
plotAll <- function(x, dirPath = file.path(".", "figs"),
figArgs = list(res = 150, units = "in", height = 8, width = 8)){
dir.create(dirPath)
## ---------------
## Set colors
## ---------------
## Colors from "set1" of RColorBrewer.
mycols <- c("#E41A1C", "#377EB8", "#4DAF4A", "#984EA3",
"#FF7F00", "#FFFF33", "#A65628", "#F781BF", "#999999")
## ---------------
## Plots
## ---------------
allres <- x$all
## predictors
prds <- rownames(allres[,,1])
prds <- prds[prds != x$response]
## Responses
rsps <- colnames(allres[,,1])
rsps <- rsps[rsps != x$stressor]
## Go through pair by pair
for(i in 1:length(prds)){
for(j in 1:length(rsps)){
if(i == j) next
## list of results of function forms on this pair
allForms <- allres[prds[i], rsps[j],]
## initiate a png file
pngArgs <- list(filename = file.path(dirPath,
paste0(rsps[j], "~", prds[i], ".png")))
pngArgs <- c(pngArgs, figArgs)
do.call(what = "png", args = pngArgs)
## plot original data
par(mar = c(5.1, 4.1, 4.1, 11.1))
cat("Plotting", rsps[j], "~", prds[i], "\n")
plotargs <- list(formula = as.formula(paste0(rsps[j], "~", prds[i])),
data = x$data,
main = paste0(rsps[j], "~", prds[i],
paste0("\nBest fit: ", x$best[prds[i], rsps[j]])))
do.call(what = "plot", args = plotargs)
## Generate x values for fitted lines
xs.dense <- seq(min(x$data[prds[i]], na.rm = TRUE),
max(x$data[prds[i]], na.rm = TRUE),
length = ifelse(nrow(x$data > 100), nrow(x$data), 100))
newdata <- list(xs.dense)
names(newdata) <- prds[i]
## Form 1: simple linear
## if(i == 1 & j == 2) browser()
lgd.label <- NULL
lgd.col <- NA
nlines <- 0
if(inherits(allForms[["SL"]], "lm")){
ys <- predict(allForms[["SL"]], newdata = newdata)
points(xs.dense, ys, type = "l", col = mycols[1])
nlines <- nlines + 1
lgd.label[nlines] <- "Simple Linear"
lgd.col[nlines] <- mycols[1]
}
## Form 2: Quadratic
if(inherits(allForms[["Quad"]], "lm")){
ys <- predict(allForms[["Quad"]], newdata = newdata)
points(xs.dense, ys, type = "l", col = mycols[2])
nlines <- nlines + 1
lgd.label[nlines] <- "Quadratic"
lgd.col[nlines] <- mycols[2]
}
## Form 3: Simple Quadratic
if(inherits(allForms[["SQuad"]], "lm")){
ys <- predict(allForms[["SQuad"]], newdata = newdata)
points(xs.dense, ys, type = "l", col = mycols[3])
nlines <- nlines + 1
lgd.label[nlines] <- "Simple Quadratic"
lgd.col[nlines] <- mycols[3]
}
## Form 4: Exponential
if(inherits(allForms[["Exp"]], "lm")){
ys <- predict(allForms[["Exp"]], newdata = newdata)
points(xs.dense, ys, type = "l", col = mycols[4])
nlines <- nlines + 1
lgd.label[nlines] <- "Exponential"
lgd.col[nlines] <- mycols[4]
}
## Form 5: log
if(inherits(allForms[["Log"]], "lm")){
ys <- predict(allForms[["Log"]], newdata = newdata)
points(xs.dense, ys, type = "l", col = mycols[5])
nlines <- nlines + 1
lgd.label[nlines] <- "Logarithm"
lgd.col[nlines] <- mycols[5]
}
## Form 5: nls
if(inherits(allForms[["nls"]], "nls")){
ys <- predict(allForms[["nls"]], newdata = newdata)
points(xs.dense, ys, type = "l", col = mycols[5])
nlines <- nlines + 1
lgd.label[nlines] <- "a + b * exp(c * x)"
lgd.col[nlines] <- mycols[6]
}
if(length(lgd.label) > 0)
legend("right", inset=c(-0.4,0),
legend = lgd.label, col = lgd.col, lty = 1, lwd = 1.5,
title = "Fittings", xpd = TRUE)
dev.off()
}
}
invisible(NULL)
}
| /gSEM/R/plotAll.R | no_license | ingted/R-Examples | R | false | false | 4,773 | r | ## This function plots all graphs
##' @importFrom 'graphics' 'par' 'points' 'legend'
##' @importFrom 'stats' 'as.formula' 'predict'
##' @importFrom 'grDevices' 'dev.off'
plotAll <- function(x, dirPath = file.path(".", "figs"),
figArgs = list(res = 150, units = "in", height = 8, width = 8)){
dir.create(dirPath)
## ---------------
## Set colors
## ---------------
## Colors from "set1" of RColorBrewer.
mycols <- c("#E41A1C", "#377EB8", "#4DAF4A", "#984EA3",
"#FF7F00", "#FFFF33", "#A65628", "#F781BF", "#999999")
## ---------------
## Plots
## ---------------
allres <- x$all
## predictors
prds <- rownames(allres[,,1])
prds <- prds[prds != x$response]
## Responses
rsps <- colnames(allres[,,1])
rsps <- rsps[rsps != x$stressor]
## Go through pair by pair
for(i in 1:length(prds)){
for(j in 1:length(rsps)){
if(i == j) next
## list of results of function forms on this pair
allForms <- allres[prds[i], rsps[j],]
## initiate a png file
pngArgs <- list(filename = file.path(dirPath,
paste0(rsps[j], "~", prds[i], ".png")))
pngArgs <- c(pngArgs, figArgs)
do.call(what = "png", args = pngArgs)
## plot original data
par(mar = c(5.1, 4.1, 4.1, 11.1))
cat("Plotting", rsps[j], "~", prds[i], "\n")
plotargs <- list(formula = as.formula(paste0(rsps[j], "~", prds[i])),
data = x$data,
main = paste0(rsps[j], "~", prds[i],
paste0("\nBest fit: ", x$best[prds[i], rsps[j]])))
do.call(what = "plot", args = plotargs)
## Generate x values for fitted lines
xs.dense <- seq(min(x$data[prds[i]], na.rm = TRUE),
max(x$data[prds[i]], na.rm = TRUE),
length = ifelse(nrow(x$data > 100), nrow(x$data), 100))
newdata <- list(xs.dense)
names(newdata) <- prds[i]
## Form 1: simple linear
## if(i == 1 & j == 2) browser()
lgd.label <- NULL
lgd.col <- NA
nlines <- 0
if(inherits(allForms[["SL"]], "lm")){
ys <- predict(allForms[["SL"]], newdata = newdata)
points(xs.dense, ys, type = "l", col = mycols[1])
nlines <- nlines + 1
lgd.label[nlines] <- "Simple Linear"
lgd.col[nlines] <- mycols[1]
}
## Form 2: Quadratic
if(inherits(allForms[["Quad"]], "lm")){
ys <- predict(allForms[["Quad"]], newdata = newdata)
points(xs.dense, ys, type = "l", col = mycols[2])
nlines <- nlines + 1
lgd.label[nlines] <- "Quadratic"
lgd.col[nlines] <- mycols[2]
}
## Form 3: Simple Quadratic
if(inherits(allForms[["SQuad"]], "lm")){
ys <- predict(allForms[["SQuad"]], newdata = newdata)
points(xs.dense, ys, type = "l", col = mycols[3])
nlines <- nlines + 1
lgd.label[nlines] <- "Simple Quadratic"
lgd.col[nlines] <- mycols[3]
}
## Form 4: Exponential
if(inherits(allForms[["Exp"]], "lm")){
ys <- predict(allForms[["Exp"]], newdata = newdata)
points(xs.dense, ys, type = "l", col = mycols[4])
nlines <- nlines + 1
lgd.label[nlines] <- "Exponential"
lgd.col[nlines] <- mycols[4]
}
## Form 5: log
if(inherits(allForms[["Log"]], "lm")){
ys <- predict(allForms[["Log"]], newdata = newdata)
points(xs.dense, ys, type = "l", col = mycols[5])
nlines <- nlines + 1
lgd.label[nlines] <- "Logarithm"
lgd.col[nlines] <- mycols[5]
}
## Form 5: nls
if(inherits(allForms[["nls"]], "nls")){
ys <- predict(allForms[["nls"]], newdata = newdata)
points(xs.dense, ys, type = "l", col = mycols[5])
nlines <- nlines + 1
lgd.label[nlines] <- "a + b * exp(c * x)"
lgd.col[nlines] <- mycols[6]
}
if(length(lgd.label) > 0)
legend("right", inset=c(-0.4,0),
legend = lgd.label, col = lgd.col, lty = 1, lwd = 1.5,
title = "Fittings", xpd = TRUE)
dev.off()
}
}
invisible(NULL)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot-vpc.R
\name{pmx_vpc_rug}
\alias{pmx_vpc_rug}
\title{Sets vpc rug layer}
\usage{
pmx_vpc_rug(show = TRUE, color = "#000000", linewidth = 1, alpha = 0.7, size)
}
\arguments{
\item{show}{\code{logical} If TRUE show bin separators}
\item{color}{\code{character} Color of the rug. Default: "#000000".}
\item{linewidth}{\code{numeric} Thickness of the rug. Default: 1.}
\item{alpha}{\code{numeric} Transparency of the rug. Default: 0.7.}
\item{size}{\code{numeric} Depreciated thickness of the rug. Default: 1.}
}
\description{
Sets vpc rug layer
}
\details{
When the vpc confidence interval layer method is rectangles we don't show rug separators.
}
\seealso{
Other vpc:
\code{\link{pmx_plot_vpc}()},
\code{\link{pmx_vpc_bin}()},
\code{\link{pmx_vpc_ci}()},
\code{\link{pmx_vpc_obs}()},
\code{\link{pmx_vpc_pi}()},
\code{\link{pmx_vpc}()}
}
\concept{vpc}
| /man/pmx_vpc_rug.Rd | no_license | ggPMXdevelopment/ggPMX | R | false | true | 939 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot-vpc.R
\name{pmx_vpc_rug}
\alias{pmx_vpc_rug}
\title{Sets vpc rug layer}
\usage{
pmx_vpc_rug(show = TRUE, color = "#000000", linewidth = 1, alpha = 0.7, size)
}
\arguments{
\item{show}{\code{logical} If TRUE show bin separators}
\item{color}{\code{character} Color of the rug. Default: "#000000".}
\item{linewidth}{\code{numeric} Thickness of the rug. Default: 1.}
\item{alpha}{\code{numeric} Transparency of the rug. Default: 0.7.}
\item{size}{\code{numeric} Depreciated thickness of the rug. Default: 1.}
}
\description{
Sets vpc rug layer
}
\details{
When the vpc confidence interval layer method is rectangles we don't show rug separators.
}
\seealso{
Other vpc:
\code{\link{pmx_plot_vpc}()},
\code{\link{pmx_vpc_bin}()},
\code{\link{pmx_vpc_ci}()},
\code{\link{pmx_vpc_obs}()},
\code{\link{pmx_vpc_pi}()},
\code{\link{pmx_vpc}()}
}
\concept{vpc}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot-eta-pairs.R
\name{plot_pmx.eta_pairs}
\alias{plot_pmx.eta_pairs}
\title{Plot random effect correlation plot}
\usage{
\method{plot_pmx}{eta_pairs}(x, dx, ...)
}
\arguments{
\item{x}{distribution object}
\item{dx}{data set}
\item{...}{not used for the moment}
}
\value{
ggpairs plot
}
\description{
Plot random effect correlation plot
}
\seealso{
\code{\link{distrib}}
Other plot_pmx:
\code{\link{distrib}()},
\code{\link{eta_cov}()},
\code{\link{eta_pairs}()},
\code{\link{individual}()},
\code{\link{plot_pmx.distrib}()},
\code{\link{plot_pmx.eta_cov}()},
\code{\link{plot_pmx.individual}()},
\code{\link{plot_pmx.pmx_dens}()},
\code{\link{plot_pmx.pmx_gpar}()},
\code{\link{plot_pmx.pmx_qq}()},
\code{\link{plot_pmx.residual}()},
\code{\link{plot_pmx}()}
}
\concept{plot_pmx}
| /man/plot_pmx.eta_pairs.Rd | no_license | csetraynor/ggPMX | R | false | true | 864 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot-eta-pairs.R
\name{plot_pmx.eta_pairs}
\alias{plot_pmx.eta_pairs}
\title{Plot random effect correlation plot}
\usage{
\method{plot_pmx}{eta_pairs}(x, dx, ...)
}
\arguments{
\item{x}{distribution object}
\item{dx}{data set}
\item{...}{not used for the moment}
}
\value{
ggpairs plot
}
\description{
Plot random effect correlation plot
}
\seealso{
\code{\link{distrib}}
Other plot_pmx:
\code{\link{distrib}()},
\code{\link{eta_cov}()},
\code{\link{eta_pairs}()},
\code{\link{individual}()},
\code{\link{plot_pmx.distrib}()},
\code{\link{plot_pmx.eta_cov}()},
\code{\link{plot_pmx.individual}()},
\code{\link{plot_pmx.pmx_dens}()},
\code{\link{plot_pmx.pmx_gpar}()},
\code{\link{plot_pmx.pmx_qq}()},
\code{\link{plot_pmx.residual}()},
\code{\link{plot_pmx}()}
}
\concept{plot_pmx}
|
## These functions define a matrix which caches it's own inverse
## Written for the Coursera R programming course.
## Two functions:
## makeCacheMatrix created a cached matrix from a normal matrix
## cacheSolve - returns the inverse, computed using solve() and cached for subsequent calls
## makeCacheMatrix(x) creates a cached Matrix from a normal matrix
## E.g., c = rbind(c(1, -1/4), c(-1/4, 1))
## m = makeCacheMatrix(c)
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x ## returns matrix
getinv <- function() inv
setinv <- function (i) inv <<- i ## Updates stored inverse
list(set = set, get = get,
getinv = getinv, setinv=setinv)
}
## Compute the inverse and save it in the cache for next time
## cacheSolve(m)
cacheSolve <- function(x, ...) {
inv = x$getinv()
if (is.null(inv)) {
message("Computing inverse")
m <-x$get()
inv <- solve(m) # this is the actual inverse computation
x$setinv(inv)
return(inv)
}
else {
message ("returning cached value")
return (inv)
}
return
}
cachemean <- function(x, ...) {
m <- x$getmean()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- mean(data, ...)
x$setmean(m)
m
}
| /cachematrix.R | no_license | AndyCWB/ProgrammingAssignment2 | R | false | false | 1,342 | r | ## These functions define a matrix which caches it's own inverse
## Written for the Coursera R programming course.
## Two functions:
## makeCacheMatrix created a cached matrix from a normal matrix
## cacheSolve - returns the inverse, computed using solve() and cached for subsequent calls
## makeCacheMatrix(x) creates a cached Matrix from a normal matrix
## E.g., c = rbind(c(1, -1/4), c(-1/4, 1))
## m = makeCacheMatrix(c)
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x ## returns matrix
getinv <- function() inv
setinv <- function (i) inv <<- i ## Updates stored inverse
list(set = set, get = get,
getinv = getinv, setinv=setinv)
}
## Compute the inverse and save it in the cache for next time
## cacheSolve(m)
cacheSolve <- function(x, ...) {
inv = x$getinv()
if (is.null(inv)) {
message("Computing inverse")
m <-x$get()
inv <- solve(m) # this is the actual inverse computation
x$setinv(inv)
return(inv)
}
else {
message ("returning cached value")
return (inv)
}
return
}
cachemean <- function(x, ...) {
m <- x$getmean()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- mean(data, ...)
x$setmean(m)
m
}
|
# read data to a table
download.file(url = "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",
destfile = "household_power_consumption.zip")
unzip("household_power_consumption.zip")
data <- read.table("household_power_consumption.txt", header=TRUE, sep=";", na.strings = "?",
colClasses = c('character','character','numeric','numeric',
'numeric','numeric','numeric','numeric','numeric'))
# format date
data$Date <- as.Date(data$Date, "%d/%m/%Y")
# select a subset of complete cases from data between 2007-2-1 and 2007-2-2
data <- subset(data,Date >= as.Date("2007-2-1") & Date <= as.Date("2007-2-2"))
data <- data[complete.cases(data),]
# concatenate date and time together, format correctly and add to the table
date_time <- paste(data$Date, data$Time)
date_time <- setNames(date_time, "Date and time")
data <- data[ ,!(names(data) %in% c("Date","Time"))]
data <- cbind(date_time, data)
data$date_time <- as.POSIXct(date_time)
# plot energy sub metering on different weekdays with legend
plot(data$Sub_metering_1~data$date_time, type="l", ylab="Energy sub metering", xlab="")
lines(data$Sub_metering_2~data$date_time,col='Red')
lines(data$Sub_metering_3~data$date_time,col='Blue')
legend("topright", col=c("black", "red", "blue"), lwd=c(1,1,1),
c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
# save the plot to a PNG file
dev.copy(png,"plot3.png", width=480, height=480)
dev.off()
| /plot3.R | no_license | bviikmae/ExData_Plotting1 | R | false | false | 1,510 | r | # read data to a table
download.file(url = "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",
destfile = "household_power_consumption.zip")
unzip("household_power_consumption.zip")
data <- read.table("household_power_consumption.txt", header=TRUE, sep=";", na.strings = "?",
colClasses = c('character','character','numeric','numeric',
'numeric','numeric','numeric','numeric','numeric'))
# format date
data$Date <- as.Date(data$Date, "%d/%m/%Y")
# select a subset of complete cases from data between 2007-2-1 and 2007-2-2
data <- subset(data,Date >= as.Date("2007-2-1") & Date <= as.Date("2007-2-2"))
data <- data[complete.cases(data),]
# concatenate date and time together, format correctly and add to the table
date_time <- paste(data$Date, data$Time)
date_time <- setNames(date_time, "Date and time")
data <- data[ ,!(names(data) %in% c("Date","Time"))]
data <- cbind(date_time, data)
data$date_time <- as.POSIXct(date_time)
# plot energy sub metering on different weekdays with legend
plot(data$Sub_metering_1~data$date_time, type="l", ylab="Energy sub metering", xlab="")
lines(data$Sub_metering_2~data$date_time,col='Red')
lines(data$Sub_metering_3~data$date_time,col='Blue')
legend("topright", col=c("black", "red", "blue"), lwd=c(1,1,1),
c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
# save the plot to a PNG file
dev.copy(png,"plot3.png", width=480, height=480)
dev.off()
|
####################################################################
#' Dataset columns and rows structure
#'
#' This function lets the user to check quickly the structure of a
#' dataset (data.frame). It returns multiple counters for useful metrics,
#' a plot, and a list of column names for each of the column metrics.
#'
#' @family Exploratory
#' @param df Dataframe
#' @param return Character. Return "skimr" for skim report, "numbers" for
#' stats and numbers, "names" for a list with the column names of each of
#' the class types, "plot" for a nice plot with "numbers" output, "distr"
#' for an overall summary plot showing categorical, numeric, and missing
#' values by using \code{plot_df}
#' distributions
#' @param subtitle Character. Add subtitle to plot
#' @param quiet Boolean. Keep quiet or show other options available?
#' @examples
#' options("lares.font" = NA) # Temporal
#' data(dft) # Titanic dataset
#'
#' # List with the names of the columns classified by class
#' df_str(dft, "names")
#'
#' # Dataframe with numbers: total values, row, columns, complete rows....
#' df_str(dft, "numbers", quiet = TRUE)
#'
#' # Now, some visualizations
#' df_str(dft, "plot", quiet = TRUE)
#' df_str(dft, "distr", quiet = TRUE)
#' @export
df_str <- function(df,
return = "plot",
subtitle = NA,
quiet = FALSE){
if (!quiet) {
rets <- c("skimr","numbers","names","distr","plot")
message(paste("Other available 'return' options:", vector2text(rets[rets != return])))
}
df <- data.frame(df)
if (return == "skimr") {
try_require("skimr")
return(skim(df))
}
if (return == "distr") {
p <- plot_df(df)
return(p)
}
names <- list(
cols = colnames(df),
nums = colnames(df)[unlist(lapply(df, is.numeric))],
char = colnames(df)[unlist(lapply(df, is.character))],
factor = colnames(df)[unlist(lapply(df, is.factor))],
logic = colnames(df)[unlist(lapply(df, is.logical))])
names[["time"]] <- names$cols[!colnames(df) %in% c(
names$nums, names$char, names$factor, names$logic)]
names[["allnas"]] <- names$cols[unlist(lapply(df, function(x) all(is.na(x))))]
if (return == "names")
return(names)
numbers <- data.frame(
"Total Values" = nrow(df) * ncol(df),
"Total Rows" = nrow(df),
"Total Columns" = ncol(df),
"Numeric Columns" = length(names$nums),
"Character Columns" = length(names$char),
"Factor Columns" = length(names$factor),
"Logical Columns" = length(names$logic),
"Time/Date Columns" = length(names$time),
"All Missing Columns" = length(names$allnas),
"Missing Values" = sum(is.na(df)),
"Complete Rows" = sum(complete.cases(df)),
"Memory Usage" = as.numeric(object.size(df)))
intro2 <- data.frame(counter = t(numbers)) %>%
mutate(metric = row.names(.),
type = ifelse(grepl("Column", colnames(numbers)), "Columns",
ifelse(grepl("Rows", colnames(numbers)), "Rows", "Values")),
p = ifelse(.data$type == "Columns", 100*.data$counter/numbers$Total.Columns,
ifelse(.data$type == "Rows", 100*.data$counter/numbers$Total.Rows,
100*.data$counter/numbers$Total.Values)),
p = round(.data$p, 2),
type = factor(.data$type, levels = c("Values", "Columns", "Rows"))) %>%
select(.data$metric, .data$counter, .data$type, .data$p)
if (return == "numbers") return(select(intro2, -.data$type))
if (return == "plot") {
p <- intro2 %>%
filter(!.data$metric %in% "Memory.Usage") %>%
mutate(x = ifelse(.data$p < 75, -0.15, 1.15)) %>%
ggplot(aes(x = reorder(.data$metric, as.integer(.data$counter)),
y = .data$p, fill = .data$type,
label = formatNum(.data$counter, 0))) +
geom_col() + coord_flip() + ylim(0, 100) +
theme_minimal() + guides(fill = FALSE) +
labs(title = "Dataset overall structure",
x = "", y = "% of total", fill = "",
caption = paste("Memory Usage:", formatNum(numbers$Memory.Usage/(1024*1024)),"Mb")) +
facet_grid(type ~., scales = "free", space = "free") +
geom_text(aes(hjust = .data$x), size = 3) +
theme_lares2(pal = 1)
if (!is.na(subtitle)) p <- p + labs(subtitle = subtitle)
return(p)
}
}
####################################################################
#' Plot All Numerical Features (Boxplots)
#'
#' This function filters numerical columns and plots boxplots.
#'
#' @family Exploratory
#' @param df Dataframe
#' @examples
#' options("lares.font" = NA) # Temporal
#' data(dft) # Titanic dataset
#' plot_nums(dft)
#' @export
plot_nums <- function(df) {
set.seed(0)
which <- df %>% select_if(is.numeric)
if (length(which) > 0) {
p <- gather(which) %>%
filter(!is.na(.data$value)) %>%
ggplot(aes(x = .data$key, y = .data$value)) +
geom_jitter(alpha = 0.2, size = 0.8) +
geom_boxplot(alpha = 0.8, outlier.shape = NA, width = 1) +
facet_wrap(.data$key~., scales = "free") +
labs(title = "Numerical Features Boxplots", x = NULL, y = NULL) +
theme_lares2() +
theme(axis.text.y = element_blank(),
axis.text.x = element_text(vjust = 2, size = 8),
panel.spacing.y = unit(-.5, "lines"),
strip.text = element_text(size = 10, vjust = -1.3)) +
coord_flip()
return(p)
} else {
message("No numerical variables found!")
}
}
####################################################################
#' Plot All Categorical Features (Frequencies)
#'
#' This function filters categorical columns and plots the frequency
#' for each value on every feature.
#'
#' @family Exploratory
#' @param df Dataframe
#' @examples
#'
#' data(dft) # Titanic dataset
#' plot_cats(dft)
#' @export
plot_cats <- function(df) {
plot <- df %>% select_if(Negate(is.numeric))
if (length(plot) > 0) {
p <- plot %>% freqs(plot = TRUE) +
labs(title = "Categorical Features Frequencies")
return(p)
} else {
message("No categorical variables found!")
}
}
####################################################################
#' Plot Summary of Numerical and Categorical Features
#'
#' This function plots all columns frequencies and boxplots, for
#' categorical and numerical respectively.
#'
#' @family Exploratory
#' @param df Dataframe
#' @examples
#'
#' data(dft) # Titanic dataset
#' plot_df(dft)
#' @export
plot_df <- function(df) {
plots <- list()
cats <- plot_cats(df)
if (length(cats) != 0) plots[["cats"]] <- cats +
theme(plot.title = element_text(size = 12))
nums <- plot_nums(df)
if (length(nums) != 0) plots[["nums"]] <- nums +
theme(plot.title = element_text(size = 12))
mis <- missingness(df, plot = TRUE, summary = FALSE)
if (length(mis) != 0) plots[["miss"]] <- mis +
theme(plot.title = element_text(size = 12)) + guides(fill = FALSE)
if (length(plots) == 3) heights <- c(4/12, 1/2, 3/12)
if (length(plots) == 2) heights <- c(0.5, 0.5)
if (length(plots) == 1) heights <- NULL
margin <- theme(plot.margin = unit(c(0.1,0.5,0.1,0.5), "cm"))
plots <- lapply(plots, "+", margin)
p <- wrap_plots(plots, heights = heights)
return(p)
}
| /R/dataframe_str.R | no_license | alexandereric995/lares | R | false | false | 7,308 | r | ####################################################################
#' Dataset columns and rows structure
#'
#' This function lets the user to check quickly the structure of a
#' dataset (data.frame). It returns multiple counters for useful metrics,
#' a plot, and a list of column names for each of the column metrics.
#'
#' @family Exploratory
#' @param df Dataframe
#' @param return Character. Return "skimr" for skim report, "numbers" for
#' stats and numbers, "names" for a list with the column names of each of
#' the class types, "plot" for a nice plot with "numbers" output, "distr"
#' for an overall summary plot showing categorical, numeric, and missing
#' values by using \code{plot_df}
#' distributions
#' @param subtitle Character. Add subtitle to plot
#' @param quiet Boolean. Keep quiet or show other options available?
#' @examples
#' options("lares.font" = NA) # Temporal
#' data(dft) # Titanic dataset
#'
#' # List with the names of the columns classified by class
#' df_str(dft, "names")
#'
#' # Dataframe with numbers: total values, row, columns, complete rows....
#' df_str(dft, "numbers", quiet = TRUE)
#'
#' # Now, some visualizations
#' df_str(dft, "plot", quiet = TRUE)
#' df_str(dft, "distr", quiet = TRUE)
#' @export
df_str <- function(df,
return = "plot",
subtitle = NA,
quiet = FALSE){
if (!quiet) {
rets <- c("skimr","numbers","names","distr","plot")
message(paste("Other available 'return' options:", vector2text(rets[rets != return])))
}
df <- data.frame(df)
if (return == "skimr") {
try_require("skimr")
return(skim(df))
}
if (return == "distr") {
p <- plot_df(df)
return(p)
}
names <- list(
cols = colnames(df),
nums = colnames(df)[unlist(lapply(df, is.numeric))],
char = colnames(df)[unlist(lapply(df, is.character))],
factor = colnames(df)[unlist(lapply(df, is.factor))],
logic = colnames(df)[unlist(lapply(df, is.logical))])
names[["time"]] <- names$cols[!colnames(df) %in% c(
names$nums, names$char, names$factor, names$logic)]
names[["allnas"]] <- names$cols[unlist(lapply(df, function(x) all(is.na(x))))]
if (return == "names")
return(names)
numbers <- data.frame(
"Total Values" = nrow(df) * ncol(df),
"Total Rows" = nrow(df),
"Total Columns" = ncol(df),
"Numeric Columns" = length(names$nums),
"Character Columns" = length(names$char),
"Factor Columns" = length(names$factor),
"Logical Columns" = length(names$logic),
"Time/Date Columns" = length(names$time),
"All Missing Columns" = length(names$allnas),
"Missing Values" = sum(is.na(df)),
"Complete Rows" = sum(complete.cases(df)),
"Memory Usage" = as.numeric(object.size(df)))
intro2 <- data.frame(counter = t(numbers)) %>%
mutate(metric = row.names(.),
type = ifelse(grepl("Column", colnames(numbers)), "Columns",
ifelse(grepl("Rows", colnames(numbers)), "Rows", "Values")),
p = ifelse(.data$type == "Columns", 100*.data$counter/numbers$Total.Columns,
ifelse(.data$type == "Rows", 100*.data$counter/numbers$Total.Rows,
100*.data$counter/numbers$Total.Values)),
p = round(.data$p, 2),
type = factor(.data$type, levels = c("Values", "Columns", "Rows"))) %>%
select(.data$metric, .data$counter, .data$type, .data$p)
if (return == "numbers") return(select(intro2, -.data$type))
if (return == "plot") {
p <- intro2 %>%
filter(!.data$metric %in% "Memory.Usage") %>%
mutate(x = ifelse(.data$p < 75, -0.15, 1.15)) %>%
ggplot(aes(x = reorder(.data$metric, as.integer(.data$counter)),
y = .data$p, fill = .data$type,
label = formatNum(.data$counter, 0))) +
geom_col() + coord_flip() + ylim(0, 100) +
theme_minimal() + guides(fill = FALSE) +
labs(title = "Dataset overall structure",
x = "", y = "% of total", fill = "",
caption = paste("Memory Usage:", formatNum(numbers$Memory.Usage/(1024*1024)),"Mb")) +
facet_grid(type ~., scales = "free", space = "free") +
geom_text(aes(hjust = .data$x), size = 3) +
theme_lares2(pal = 1)
if (!is.na(subtitle)) p <- p + labs(subtitle = subtitle)
return(p)
}
}
####################################################################
#' Plot All Numerical Features (Boxplots)
#'
#' This function filters numerical columns and plots boxplots.
#'
#' @family Exploratory
#' @param df Dataframe
#' @examples
#' options("lares.font" = NA) # Temporal
#' data(dft) # Titanic dataset
#' plot_nums(dft)
#' @export
plot_nums <- function(df) {
set.seed(0)
which <- df %>% select_if(is.numeric)
if (length(which) > 0) {
p <- gather(which) %>%
filter(!is.na(.data$value)) %>%
ggplot(aes(x = .data$key, y = .data$value)) +
geom_jitter(alpha = 0.2, size = 0.8) +
geom_boxplot(alpha = 0.8, outlier.shape = NA, width = 1) +
facet_wrap(.data$key~., scales = "free") +
labs(title = "Numerical Features Boxplots", x = NULL, y = NULL) +
theme_lares2() +
theme(axis.text.y = element_blank(),
axis.text.x = element_text(vjust = 2, size = 8),
panel.spacing.y = unit(-.5, "lines"),
strip.text = element_text(size = 10, vjust = -1.3)) +
coord_flip()
return(p)
} else {
message("No numerical variables found!")
}
}
####################################################################
#' Plot All Categorical Features (Frequencies)
#'
#' This function filters categorical columns and plots the frequency
#' for each value on every feature.
#'
#' @family Exploratory
#' @param df Dataframe
#' @examples
#'
#' data(dft) # Titanic dataset
#' plot_cats(dft)
#' @export
plot_cats <- function(df) {
plot <- df %>% select_if(Negate(is.numeric))
if (length(plot) > 0) {
p <- plot %>% freqs(plot = TRUE) +
labs(title = "Categorical Features Frequencies")
return(p)
} else {
message("No categorical variables found!")
}
}
####################################################################
#' Plot Summary of Numerical and Categorical Features
#'
#' This function plots all columns frequencies and boxplots, for
#' categorical and numerical respectively.
#'
#' @family Exploratory
#' @param df Dataframe
#' @examples
#'
#' data(dft) # Titanic dataset
#' plot_df(dft)
#' @export
plot_df <- function(df) {
plots <- list()
cats <- plot_cats(df)
if (length(cats) != 0) plots[["cats"]] <- cats +
theme(plot.title = element_text(size = 12))
nums <- plot_nums(df)
if (length(nums) != 0) plots[["nums"]] <- nums +
theme(plot.title = element_text(size = 12))
mis <- missingness(df, plot = TRUE, summary = FALSE)
if (length(mis) != 0) plots[["miss"]] <- mis +
theme(plot.title = element_text(size = 12)) + guides(fill = FALSE)
if (length(plots) == 3) heights <- c(4/12, 1/2, 3/12)
if (length(plots) == 2) heights <- c(0.5, 0.5)
if (length(plots) == 1) heights <- NULL
margin <- theme(plot.margin = unit(c(0.1,0.5,0.1,0.5), "cm"))
plots <- lapply(plots, "+", margin)
p <- wrap_plots(plots, heights = heights)
return(p)
}
|
\name{skat.uniqtl.simple.C}
\alias{skat.uniqtl.simple.C}
\title{SKAT Test for Population-basd Studies of Quantitative Trait
}
\description{
This function implements the sequence kernel association test.
}
\usage{
skat.uniqtl.simple.C(dat.ped, par.dat, maf, maf.cutoff, no.perm =
1000, alternative = "two.sided" , out.type="C")
}
\arguments{
\item{dat.ped}{
A list of ped files.
}
\item{par.dat}{
A list of parameters for ascertainment. The default in an empty list.
}
\item{maf}{
User specified minor allele frequency vector
}
\item{maf.cutoff}{
Upper minor allele frequency cutoff for rare variant analysis
}
\item{no.perm}{
The number of permutations. Set to 1000 is default for SKAT
test. Adaptive permutatoin is implemented
}
\item{alternative}{
Alternative hypothesis, default choice is two.sided. Other options
include greater or less.
}
\item{out.type}{C for continuous trait}
}
\value{
\item{p.value}{P-value as determined by the alternative hypothesis tested}
\item{statistic}{Statistic value for the SKAT test}
}
\author{
Dajiang Liu
}
| /man/skat.uniqtl.simple.C.Rd | no_license | cran/STARSEQ | R | false | false | 1,103 | rd | \name{skat.uniqtl.simple.C}
\alias{skat.uniqtl.simple.C}
\title{SKAT Test for Population-basd Studies of Quantitative Trait
}
\description{
This function implements the sequence kernel association test.
}
\usage{
skat.uniqtl.simple.C(dat.ped, par.dat, maf, maf.cutoff, no.perm =
1000, alternative = "two.sided" , out.type="C")
}
\arguments{
\item{dat.ped}{
A list of ped files.
}
\item{par.dat}{
A list of parameters for ascertainment. The default in an empty list.
}
\item{maf}{
User specified minor allele frequency vector
}
\item{maf.cutoff}{
Upper minor allele frequency cutoff for rare variant analysis
}
\item{no.perm}{
The number of permutations. Set to 1000 is default for SKAT
test. Adaptive permutatoin is implemented
}
\item{alternative}{
Alternative hypothesis, default choice is two.sided. Other options
include greater or less.
}
\item{out.type}{C for continuous trait}
}
\value{
\item{p.value}{P-value as determined by the alternative hypothesis tested}
\item{statistic}{Statistic value for the SKAT test}
}
\author{
Dajiang Liu
}
|
# Instalação de pacotes
install.packages("swirl")
install.packages("curl")
install.packages("dplyr")
install.packages("openssl")
install.packages("samplingbook")
library(swirl)
select_language(language = 'portuguese')
# Instala curso
#library(swirl)
#uninstall_course('Aprenda_R_no_R')
install_course_github('elthonf','Aprenda_R_no_R')
# Inicia os cursos interativos
swirl()
| /Codigos/Instalacao de pacotes e Aprenda R no R/Instalando_Swirl.R | no_license | MattPina/PortfolioR | R | false | false | 379 | r | # Instalação de pacotes
install.packages("swirl")
install.packages("curl")
install.packages("dplyr")
install.packages("openssl")
install.packages("samplingbook")
library(swirl)
select_language(language = 'portuguese')
# Instala curso
#library(swirl)
#uninstall_course('Aprenda_R_no_R')
install_course_github('elthonf','Aprenda_R_no_R')
# Inicia os cursos interativos
swirl()
|
# For compatibility with 2.2.21
swirl_options(swirl_logging = TRUE)
.get_course_path <- function(){
tryCatch(swirl:::swirl_courses_dir(),
error = function(c) {file.path(find.package("swirl"),"Courses")}
)
}
# Path to installed lesson
lessonpath <- file.path(.get_course_path(), "Graphics",
"Plotting_Colour")
try(dev.off(),silent=TRUE)
plot.new()
palette("default")
par(mfrow = c(1,1))
| /Graphics/Plotting_Colours/initLesson.R | no_license | Hachemi-CRSTRA/swirl_courses | R | false | false | 433 | r | # For compatibility with 2.2.21
swirl_options(swirl_logging = TRUE)
.get_course_path <- function(){
tryCatch(swirl:::swirl_courses_dir(),
error = function(c) {file.path(find.package("swirl"),"Courses")}
)
}
# Path to installed lesson
lessonpath <- file.path(.get_course_path(), "Graphics",
"Plotting_Colour")
try(dev.off(),silent=TRUE)
plot.new()
palette("default")
par(mfrow = c(1,1))
|
library(caret)
library(rsample)
library(klaR)
nb.features
load(file = "/Users/wangyunxuan/Downloads/caddata (3).RData")
df=as.data.frame(cad.df.balanced)
#head(df)
#which( colnames(df)=="Cath" )
#n<-ncol(df)
#c(1:54)
#c(1:42,44:54)
set.seed(123)
train <- train.df[,c(predictors(nb.features),"Cath")]
test <- test.df[,c(predictors(nb.features),"Cath")]
control <- trainControl(method="repeatedcv", number=10)
train_model<-train(Cath ~., data = train, method="nb", ,trControl=control)
train_model$results
pred=predict(train_model,test)
mean(pred== test$Cath)
| /new nb.R | no_license | 123saaa/Hello | R | false | false | 565 | r | library(caret)
library(rsample)
library(klaR)
nb.features
load(file = "/Users/wangyunxuan/Downloads/caddata (3).RData")
df=as.data.frame(cad.df.balanced)
#head(df)
#which( colnames(df)=="Cath" )
#n<-ncol(df)
#c(1:54)
#c(1:42,44:54)
set.seed(123)
train <- train.df[,c(predictors(nb.features),"Cath")]
test <- test.df[,c(predictors(nb.features),"Cath")]
control <- trainControl(method="repeatedcv", number=10)
train_model<-train(Cath ~., data = train, method="nb", ,trControl=control)
train_model$results
pred=predict(train_model,test)
mean(pred== test$Cath)
|
# pdx.owl.kernel.smooth.R
# OWL with Gaussian kernel decision function with random forest smoothed outcomes
library(caret)
library(stringi)
# need to supply to the function a cancer type, one of
# "BRCA", "CM", "CRC", "NSCLC", or "PDAC"
# and an outcome, one of "BAR" for best average response
# or "Surv" for time to doubling
# gene.data.file is the name of a csv file with gene data
# numgenes is the number of genes to use for smoothing
# k is the number of folds for cross-validation
# also need to supply a seed to make training/testing sets for cross-validation
# c1s and c2s are the tuning parameters to try; if c2s is not specified, the max number for each c1 are tried
# if strip = T the first column of gene.data.file is assumed to be rownames and is stripped off
# outstring is an identifier for the output csv file
pdx.owl.kernel.smooth = function(cancer.type, outcome, gene.data.file, numgenes, input_dir, output_dir, c1s = c(0), c2s = NA, k = 5, seed = 1, strip = T, outstring = "_owlkernelsmooth.csv") {
setwd(input_dir)
load("split.cm.data.rda")
load("trts.by.cancer.rda")
# random forest predicted values -- use these as outcomes when estimating decision rule
load("pred_vals_rf.rda")
# extract clinical data for given cancer type
# if (cancer.type == "BRCA") dat = split.cm.data$BRCA
# if (cancer.type == "CM") dat = split.cm.data$CM
# if (cancer.type == "CRC") dat = split.cm.data$CRC
# if (cancer.type == "NSCLC") dat = split.cm.data$NSCLC
# if (cancer.type == "PDAC") dat = split.cm.data$PDAC
# clinical = dat[ , 1:17]
if (cancer.type == "BRCA") {dat = split.cm.data$BRCA; clinical = dat[ , 1:17]}
if (cancer.type == "CM") {dat = split.cm.data$CM;clinical = dat[ , 1:17]}
if (cancer.type == "CRC") {dat = split.cm.data$CRC; clinical = dat[ , 1:17]}
if (cancer.type == "NSCLC") {dat = split.cm.data$NSCLC; clinical = dat[ , 1:17]}
if (cancer.type == "PDAC") {dat = split.cm.data$PDAC; clinical = dat[ , 1:17]}
if (cancer.type == "overall") {
load("full.data.rda")
clinical = dat[ , 1:5]
}
clinical$RespScaled = -clinical$RespScaled # reverse sign of best average response -- this way larger values are better
ind1 = which(cancer.type == names(trts.by.cancer))
ind2 = which(numgenes == c(50, 100, 500, 1000))
clinical$new.resp = pred_vals_rf[[ind1]][[ind2]][[1]]
clinical$new.surv = pred_vals_rf[[ind1]][[ind2]][[2]]
biomarkers = read.csv(gene.data.file)
if (strip) biomarkers = biomarkers[ , -1]
ngene = dim(biomarkers)[2]
# remove duplicated columns from biomarkers
if (sum(duplicated(as.matrix(biomarkers), MARGIN = 2)) > 0) biomarkers = biomarkers[ , -which(duplicated(as.matrix(biomarkers), MARGIN = 2))]
# center/scale biomarkers
center.scale = function(x) return((x - mean(x)) / sd(x))
biomarker.temp = apply(biomarkers, 2, center.scale)
biomarkers = biomarker.temp
# format data
biomarkers = biomarkers[!duplicated(biomarkers), ] # here biomarkers contains one observation per line
new.resp.mat = matrix(NA, nrow = length(unique(clinical$Model)), ncol = length(unique(clinical$Treatment)))
new.surv.mat = matrix(NA, nrow = length(unique(clinical$Model)), ncol = length(unique(clinical$Treatment)))
rownames(new.resp.mat) = unique(clinical$Model); colnames(new.resp.mat) = unique(clinical$Treatment)
rownames(new.surv.mat) = unique(clinical$Model); colnames(new.surv.mat) = unique(clinical$Treatment)
for (dim1 in 1:nrow(new.resp.mat)) {
for (dim2 in 1:ncol(new.resp.mat)) {
row = which(clinical$Model == rownames(new.resp.mat)[dim1] & clinical$Treatment == colnames(new.resp.mat)[dim2])
if (length(row) != 0) {
new.resp.mat[dim1, dim2] = clinical$RespScaled[row]
new.surv.mat[dim1, dim2] = clinical$logSurvScaled[row]
}
}
} # end format of clinical data
# RF predicted values
smooth.resp.mat = matrix(NA, nrow = length(unique(clinical$Model)), ncol = length(unique(clinical$Treatment)))
smooth.surv.mat = matrix(NA, nrow = length(unique(clinical$Model)), ncol = length(unique(clinical$Treatment)))
rownames(smooth.resp.mat) = unique(clinical$Model); colnames(smooth.resp.mat) = unique(clinical$Treatment)
rownames(smooth.surv.mat) = unique(clinical$Model); colnames(smooth.surv.mat) = unique(clinical$Treatment)
for (dim1 in 1:nrow(smooth.resp.mat)) {
for (dim2 in 1:ncol(smooth.resp.mat)) {
row = which(clinical$Model == rownames(smooth.resp.mat)[dim1] & clinical$Treatment == colnames(smooth.resp.mat)[dim2])
if (length(row) != 0) {
smooth.resp.mat[dim1, dim2] = clinical$new.resp[row]
smooth.surv.mat[dim1, dim2] = clinical$new.surv[row]
}
}
} # end format of clinical data
# clinical.other contains outcomes not specified for estimation
if (outcome == "BAR") {
clinical = new.resp.mat
clinical.other = new.surv.mat
}
if (outcome == "Surv") {
clinical = new.surv.mat
clinical.other = new.resp.mat
}
# smooth.clinical.other contains outcomes not specified for estimation
if (outcome == "BAR") {
smooth.clinical = smooth.resp.mat
smooth.clinical.other = smooth.surv.mat
}
if (outcome == "Surv") {
smooth.clinical = smooth.surv.mat
smooth.clinical.other = smooth.resp.mat
}
# create folds for cross-validation
set.seed(seed)
folds = createFolds(1:dim(biomarkers)[1], k = k, list = TRUE, returnTrain = FALSE)
# c1 is the number of trt's to group with untreated, c2 is the number of steps to take down the tree
avg.main.outs = NULL # store primary value functions across c1 and c2
avg.other.outs = NULL # store secondary value functions across c1 and c2
parameters = matrix(NA, nrow = length(c1s) * ncol(clinical), ncol = 2) # store pairs of c1 and c2
colnames(parameters) = c("c1", "c2")
cov.list = list() # to store covariances of value functions
ctr = 1 # count number of times through inner loop
print ("Begin the loops of ntrt and nodes")
# loop through parameters
for (c1 in c1s) {
if (is.na(c2s)) c2s = seq(1, (ncol(clinical) - c1 - 2))
print(sprintf(" c1 = %d", c1))
for (c2 in c2s) {
print(sprintf(" c2 = %d", c2))
# store primary and secondary value functions across folds
main.folds = NULL
other.folds = NULL
# loop through folds
for (f in 1:length(folds)) {
# select training and testing sets
train.bio = biomarkers[-folds[[f]], ]
train.clin = smooth.clinical[-folds[[f]], ]
test.bio = biomarkers[folds[[f]], ]
test.clin = clinical[folds[[f]], ]
test.clin.other = clinical.other[folds[[f]], ]
# find the c1 closest treatments to "untreated"
dist_mat = as.matrix(dist(t(train.clin)))
col_ind = which(colnames(dist_mat) == "untreated")
ordered_dist_mat = dist_mat[order(dist_mat[ , col_ind]) , col_ind]
untrt = names(ordered_dist_mat[1:(1 + c1)])
# average outcomes aross "No treatment group"
untrt.ind = which(colnames(train.clin) %in% untrt)
means = apply(as.matrix(train.clin[ , untrt.ind]), 1, mean, na.rm = T)
train.clin = train.clin - means # subtract off mean of untreated group in each line
train.clin = train.clin[ , -untrt.ind]
# same for the test set
# replace na by the mean untreated value
for(i in 1:nrow(test.clin)){
if (is.na(test.clin[i,"untreated"]))
test.clin[i,"untreated"] = mean(test.clin[ ,"untreated"], na.rm = T)
}
test.clin = test.clin - test.clin[,"untreated"]
untrt.ind = which(colnames(test.clin) %in% untrt)
test.clin = test.clin[ , -untrt.ind]
# same for the test set of the secondary outcome
for(i in 1:nrow(test.clin.other)){
if (is.na(test.clin.other[i,"untreated"]))
test.clin.other[i,"untreated"] = mean(test.clin.other[ ,"untreated"], na.rm = T)
}
test.clin.other = test.clin.other - test.clin.other[,"untreated"]
untrt.ind = which(colnames(test.clin.other) %in% untrt)
test.clin.other = test.clin.other[ , -untrt.ind]
# create treatment tree by clustering
clusters = hclust(dist(t(train.clin))) # repeat distance matrix after removing untreated columns
# full mouse level data
rownames(train.bio) = rownames(train.clin)
full.bio = train.bio[matrix(apply(matrix(1:nrow(train.bio), ncol = 1), 1, rep, times = ncol(train.clin)), ncol = 1), ]
full.clin = matrix(as.numeric(t(train.clin)), ncol = 1)
rownames(full.clin) = rownames(full.bio)
full.trt = matrix(rep(colnames(train.clin), nrow(train.clin)))
full.clin = cbind(full.clin, full.trt)
# create treatment variables for each step of the tree
num.steps = dim(clusters$merge)[1]
merge.steps = clusters$merge
trt.list = clusters$labels
new.trt.vars = NULL # new treatment variables
for (j in 1:num.steps) {
temp = rep(NA, dim(full.clin)[1])
merge1 = merge.steps[j, 1]; merge2 = merge.steps[j, 2]
if (merge1 < 0) {
temp[which(full.clin[ , 2] == trt.list[-merge1])] = 1
}
if (merge2 < 0) {
temp[which(full.clin[ , 2] == trt.list[-merge2])] = -1
}
if (merge1 > 0) {
temp[which(!is.na(new.trt.vars[ , merge1]))] = 1
}
if (merge2 > 0) {
temp[which(!is.na(new.trt.vars[ , merge2]))] = -1
}
new.trt.vars = cbind(new.trt.vars, temp)
} # end creation of trt variables
# select trt variables for c2 steps down tree
row.names(new.trt.vars) = row.names(full.clin)
trt.vars = new.trt.vars[ , rev(rev(1:dim(new.trt.vars)[2])[1:c2])]
trt.vars = as.matrix(trt.vars)
# OWL up the tree
moves.mat = NULL # this will be nrow(test.bio) by ncol(trt.vars) -- each row contains the moves up the tree for one line in test set
list.pred = list()
for (d in 1:dim(trt.vars)[2]) {
X = train.bio[matrix(apply(matrix(1:nrow(train.bio), ncol = 1), 1, rep, times = 2), ncol = 1), ]
Y = matrix(NA, nrow = nrow(X), ncol = 2)
Y[ , 2] = rep(c(-1, 1), nrow(train.bio))
rownames(Y) = rownames(X)
# for first step, outcomes are mean outcomes in root nodes
if (d == 1) {
for (g in 1:dim(Y)[1]) {
Y[g, 1] = mean(as.numeric(full.clin[which(trt.vars[ , d] == Y[g, 2] & rownames(trt.vars) == rownames(Y)[g]), 1]), na.rm = T)
}
}
# for other steps, outcomes are the maximum over outcomes on lower steps of the tree (if a previous decision has been made)
if (d > 1) {
for (g in 1:dim(Y)[1]) {
# most recent previous step of tree where decision was made
index = max(which(!is.na(as.matrix(trt.vars[which(rownames(Y)[g] == rownames(trt.vars) & trt.vars[ , d] == Y[g, 2]), 1:(d - 1)])[1,])))
if (is.infinite(index)) Y[g, 1] = mean(as.numeric(full.clin[which(trt.vars[ , d] == Y[g, 2] & rownames(full.clin) == rownames(Y)[g]), 1]), na.rm = T)
if (!is.infinite(index)){
while(!is.infinite(index)){
d.temp = index
choice = list.pred[[d.temp]][which(list.pred[[d.temp]][ , 1] == rownames(Y)[g]), 3]
if(d.temp == 1){
index = -Inf
}else{
index = max(which(!is.na(as.matrix(trt.vars[which(rownames(Y)[g] == rownames(trt.vars) & trt.vars[ , d.temp] == choice), 1:(d.temp - 1)])[1,])))
}
}
Y[g, 1] = mean(as.numeric(full.clin[which(trt.vars[ , d.temp] == choice & rownames(full.clin) == rownames(Y)[g]), 1]), na.rm = T)
}
}
}
A = Y[ , 2]
Y = Y[ , 1]
# fit model for OWL
temp.mat = cbind(Y, A, X)
trainname = sprintf("smoothkernel_train_%s_%s_%i.csv", cancer.type, outcome, ngene)
testname = sprintf("smoothkernel_test_%s_%s_%i.csv", cancer.type, outcome, ngene)
write.table(temp.mat, file = trainname, col.names = FALSE, row.names = FALSE, sep = ",")
write.table(test.bio, testname, col.names = F, row.names = F, sep = ",")
cmdstring = sprintf("python3 owl.temp.kernel.py %s %s", trainname, testname)
owl.res = system(cmdstring, intern = T)
temp.res = read.csv(sprintf("temp_owl_%s", testname), header = F) # this is the file that python.owl.temp returns
temp.res = as.matrix(temp.res * 2 - 3)
moves.mat = cbind(moves.mat, temp.res)
# predicted best treatment for training set
# we're borrowing code from the QL files -- predicted treatments are in column 3
traindatmoves = read.csv(sprintf("temp_owl_%s", trainname), header=F)
traindatmoves = as.matrix(traindatmoves * 2 - 3)
traindatmoves = traindatmoves[which(1:nrow(traindatmoves) %% 2 == 1), ]
traindatmoves = as.matrix(traindatmoves)
preds = matrix(NA, nrow = nrow(train.bio), ncol = 3)
preds[ , 1] = rownames(train.bio)
preds[ , 3] = traindatmoves
list.pred[[d]] = preds
} # end loop through steps in tree
# test on validation set
main.outs = NULL # save mean outcomes for each line among mice treated consistent with treatment rule
other.outs = NULL
rownames(test.bio) = rownames(test.clin)
# moves for each row in test set
moves.all = moves.mat
if (ncol(moves.mat) > 1) moves.all = t(apply(moves.mat, 1, rev)) # when we fill moves.mat we are doing it from the bottom of the tree up, not top down
moves.all[which(moves.all == -1)] = 0
# loop through lines in test set
for (t in 1:dim(test.bio)[1]) {
# moves for line t in test set (these are the moves that would be taken under decision rule)
moves = moves.all[t, ]
cur.step = merge.steps[nrow(merge.steps), ]
trt.ind = NULL # save indices (in trt.list) of those trts that are consistent with decision rule
temp = rep(NA, nrow(merge.steps) - length(moves))
tmoves = c(temp, rev(moves))
# determine root node for each line in testing set
keeplooping = T
cur.move = tmoves[length(tmoves)]
while (keeplooping) {
if (cur.move == 1) next.step = cur.step[1]
if (cur.move == 0) next.step = cur.step[2]
if (next.step < 0) {
trt.ind = c(trt.ind, -next.step)
keeplooping = F
}
if (next.step > 0) {
cur.step = merge.steps[next.step, ]
cur.move = tmoves[next.step]
if (is.na(cur.move)) keeplooping = F
}
}
# recursive function to construct list of indices for trts in root node
get.trt.list = function(cur.step) {
trt.ind = NULL
if (cur.step[1] < 0) trt.ind = c(trt.ind, -cur.step[1])
if (cur.step[1] > 0) trt.ind = c(trt.ind, get.trt.list(merge.steps[cur.step[1], ]))
if (cur.step[2] < 0) trt.ind = c(trt.ind, -cur.step[2])
if (cur.step[2] > 0) trt.ind = c(trt.ind, get.trt.list(merge.steps[cur.step[2], ]))
return(trt.ind)
} # end recursive function
# if root node is not a single trt, get list of trts in root node
if (length(trt.ind) == 0) trt.ind = get.trt.list(cur.step)
# list of trts in root node
treatments = trt.list[trt.ind]
# take mean outcome in root node
main.outs = c(main.outs, mean(test.clin[which(rownames(test.clin) == rownames(test.bio)[t]), which(colnames(test.clin) %in% treatments)]))
other.outs = c(other.outs, mean(test.clin.other[which(rownames(test.clin.other) == rownames(test.bio)[t]), which(colnames(test.clin.other) %in% treatments)]))
} # end loop through lines in test set
# determine if any lines should have been left untreated by fitting OWL one more time
X = train.bio[matrix(apply(matrix(1:nrow(train.bio), ncol = 1), 1, rep, times = 2), ncol = 1), ]
Y = matrix(NA, nrow = nrow(X), ncol = 2)
Y[ , 2] = rep(c(0, 1), nrow(train.bio))
rownames(Y) = rownames(X)
for (g in 1:dim(Y)[1]) {
if (Y[g, 2] == 0) Y[g, 1] = 0 # 0 is untreated
if (Y[g, 2] == 1) Y[g, 1] = mean(as.numeric(full.clin[which(trt.vars[ , ncol(trt.vars)] == list.pred[[ncol(trt.vars)]][which(list.pred[[ncol(trt.vars)]][ , 1] == rownames(Y)[g]), 3] & rownames(full.clin) == rownames(Y)[g]), 1]), na.rm = T)
}
A = Y[ , 2]
Y = Y[ , 1]
A[which(A == 0)] = -1 # trt coded as 1/-1 for OWL
# fit model for OWL
temp.mat = cbind(Y, A, X)
trainname = sprintf("smoothkernel_train_%s_%s_%i.csv", cancer.type, outcome, ngene)
testname = sprintf("smoothkernel_test_%s_%s_%i.csv", cancer.type, outcome, ngene)
write.table(temp.mat, file=trainname, col.names=FALSE, row.names=FALSE, sep=",")
write.table(test.bio, testname, col.names=F, row.names=F, sep=",")
cmdstring = sprintf("python3 owl.temp.kernel.py %s %s", trainname, testname)
owl.res = system(cmdstring, intern=T)
temp.res = read.csv(sprintf("temp_owl_%s", testname), header=F) # this is the file that python.owl.temp returns
temp.res = as.matrix(temp.res*2 - 3)
# for any mice who should have been left untreated, replace outcome with mean in untreated group
main.outs[which(temp.res == -1)] = 0
# save mean outcomes from this fold
main.folds = c(main.folds, mean(main.outs, na.rm = T))
other.folds = c(other.folds, mean(other.outs, na.rm = T))
} # end loop through five folds
# save means, variances, and covariances of outcomes across folds for one choice of c1/c2
avg.main.outs = c(avg.main.outs, mean(main.folds, na.rm = T))
avg.other.outs = c(avg.other.outs, mean(other.folds, na.rm = T))
cov.mat = cov(matrix(c(main.folds, other.folds), ncol = 2), use = "complete.obs")
cov.list[[ctr]] = cov.mat
parameters[ctr, ] = c(c1, c2)
ctr = ctr + 1
} # end loop through c1s
} # end loop through c2s
# determine which c1/c2 maximize main outcome
opt.ind = which(avg.main.outs == max(avg.main.outs, na.rm = T))
if (length(opt.ind) > 1) opt.ind = which(avg.other.outs[opt.ind] == max(avg.other.outs[opt.ind]))
if (length(opt.ind) > 1) opt.ind = sample(opt.ind, 1)
# select value functions at optimal c1/c2
if (outcome == "BAR") {
final.resp = avg.main.outs[opt.ind]
final.surv = avg.other.outs[opt.ind]
}
if (outcome == "Surv") {
final.resp = avg.other.outs[opt.ind]
final.surv = avg.main.outs[opt.ind]
}
# select optimal c1/c2
final.param = parameters[opt.ind, ]
# select covariance matrix at optimal c1/c2
final.covariance = cov.list[[opt.ind]] # note that covariance matrix always has main outcome in upper left
# observed mean for primary outcome
for(i in 1:nrow(clinical)){
if (is.na(clinical[i,"untreated"]))
clinical[i,"untreated"] = mean(clinical[ ,"untreated"], na.rm = T)
}
clinical = clinical - clinical[,"untreated"] # subtract off mean of untreated group in each line
# observed mean for secondary outcome
for(i in 1:nrow(clinical.other)){
if (is.na(clinical.other[i,"untreated"]))
clinical.other[i,"untreated"] = mean(clinical.other[ ,"untreated"], na.rm = T)
}
clinical.other = clinical.other - clinical.other[,"untreated"] # subtract off mean of untreated group in each line
if (outcome == "BAR") {
obs.resp = mean(clinical, na.rm = T)
obs.surv = mean(clinical.other, na.rm = T)
opt.resp = mean(apply(clinical, 1, max, na.rm = T)[!is.infinite(apply(clinical, 1, max, na.rm = T))], na.rm = T)
opt.surv = mean(apply(clinical.other, 1, max, na.rm = T)[!is.infinite(apply(clinical.other, 1, max, na.rm = T))], na.rm = T)
var.resp = final.covariance[1, 1]
var.surv = final.covariance[2, 2]
cov = final.covariance[1, 2]
}
if (outcome == "Surv") {
obs.resp = mean(clinical.other, na.rm = T)
obs.surv = mean(clinical, na.rm = T)
opt.resp = mean(apply(clinical.other, 1, max, na.rm = T)[!is.infinite(apply(clinical.other, 1, max, na.rm = T))], na.rm = T)
opt.surv = mean(apply(clinical, 1, max, na.rm = T)[!is.infinite(apply(clinical, 1, max, na.rm = T))], na.rm = T)
var.resp = final.covariance[2, 2]
var.surv = final.covariance[1, 1]
cov = final.covariance[1, 2]
}
res = data.frame(c1 = final.param[1], c2 = final.param[2], mean.response = final.resp, mean.survival = final.surv,
var.response = var.resp, var.survival = var.surv, covariance = cov, observed.resp = obs.resp,
observed.surv = obs.surv, optimal.resp = opt.resp, optimal.surv = opt.surv)
rownames(res) = NULL
setwd(output_dir)
output.name = paste(cancer.type, "_", outcome, "_", stri_sub(gene.data.file, 1, -5), outstring, sep = "")
write.csv(res, output.name)
# return list of results
return(list(parameters = final.param, mean.response = final.resp, mean.survival = final.surv,
covariance = final.covariance, observed.resp = obs.resp, observed.surv = obs.surv,
optimal.resp = opt.resp, optimal.surv = opt.surv))
} # end pdx.owl.kernel.smooth function
| /PDX.Code/pdx.owl.kernel.smooth.R | no_license | jasa-acs/High-Dimensional-Precision-Medicine-From-Patient-Derived-Xenografts | R | false | false | 22,537 | r | # pdx.owl.kernel.smooth.R
# OWL with Gaussian kernel decision function with random forest smoothed outcomes
library(caret)
library(stringi)
# need to supply to the function a cancer type, one of
# "BRCA", "CM", "CRC", "NSCLC", or "PDAC"
# and an outcome, one of "BAR" for best average response
# or "Surv" for time to doubling
# gene.data.file is the name of a csv file with gene data
# numgenes is the number of genes to use for smoothing
# k is the number of folds for cross-validation
# also need to supply a seed to make training/testing sets for cross-validation
# c1s and c2s are the tuning parameters to try; if c2s is not specified, the max number for each c1 are tried
# if strip = T the first column of gene.data.file is assumed to be rownames and is stripped off
# outstring is an identifier for the output csv file
pdx.owl.kernel.smooth = function(cancer.type, outcome, gene.data.file, numgenes, input_dir, output_dir, c1s = c(0), c2s = NA, k = 5, seed = 1, strip = T, outstring = "_owlkernelsmooth.csv") {
setwd(input_dir)
load("split.cm.data.rda")
load("trts.by.cancer.rda")
# random forest predicted values -- use these as outcomes when estimating decision rule
load("pred_vals_rf.rda")
# extract clinical data for given cancer type
# if (cancer.type == "BRCA") dat = split.cm.data$BRCA
# if (cancer.type == "CM") dat = split.cm.data$CM
# if (cancer.type == "CRC") dat = split.cm.data$CRC
# if (cancer.type == "NSCLC") dat = split.cm.data$NSCLC
# if (cancer.type == "PDAC") dat = split.cm.data$PDAC
# clinical = dat[ , 1:17]
if (cancer.type == "BRCA") {dat = split.cm.data$BRCA; clinical = dat[ , 1:17]}
if (cancer.type == "CM") {dat = split.cm.data$CM;clinical = dat[ , 1:17]}
if (cancer.type == "CRC") {dat = split.cm.data$CRC; clinical = dat[ , 1:17]}
if (cancer.type == "NSCLC") {dat = split.cm.data$NSCLC; clinical = dat[ , 1:17]}
if (cancer.type == "PDAC") {dat = split.cm.data$PDAC; clinical = dat[ , 1:17]}
if (cancer.type == "overall") {
load("full.data.rda")
clinical = dat[ , 1:5]
}
clinical$RespScaled = -clinical$RespScaled # reverse sign of best average response -- this way larger values are better
ind1 = which(cancer.type == names(trts.by.cancer))
ind2 = which(numgenes == c(50, 100, 500, 1000))
clinical$new.resp = pred_vals_rf[[ind1]][[ind2]][[1]]
clinical$new.surv = pred_vals_rf[[ind1]][[ind2]][[2]]
biomarkers = read.csv(gene.data.file)
if (strip) biomarkers = biomarkers[ , -1]
ngene = dim(biomarkers)[2]
# remove duplicated columns from biomarkers
if (sum(duplicated(as.matrix(biomarkers), MARGIN = 2)) > 0) biomarkers = biomarkers[ , -which(duplicated(as.matrix(biomarkers), MARGIN = 2))]
# center/scale biomarkers
center.scale = function(x) return((x - mean(x)) / sd(x))
biomarker.temp = apply(biomarkers, 2, center.scale)
biomarkers = biomarker.temp
# format data
biomarkers = biomarkers[!duplicated(biomarkers), ] # here biomarkers contains one observation per line
new.resp.mat = matrix(NA, nrow = length(unique(clinical$Model)), ncol = length(unique(clinical$Treatment)))
new.surv.mat = matrix(NA, nrow = length(unique(clinical$Model)), ncol = length(unique(clinical$Treatment)))
rownames(new.resp.mat) = unique(clinical$Model); colnames(new.resp.mat) = unique(clinical$Treatment)
rownames(new.surv.mat) = unique(clinical$Model); colnames(new.surv.mat) = unique(clinical$Treatment)
for (dim1 in 1:nrow(new.resp.mat)) {
for (dim2 in 1:ncol(new.resp.mat)) {
row = which(clinical$Model == rownames(new.resp.mat)[dim1] & clinical$Treatment == colnames(new.resp.mat)[dim2])
if (length(row) != 0) {
new.resp.mat[dim1, dim2] = clinical$RespScaled[row]
new.surv.mat[dim1, dim2] = clinical$logSurvScaled[row]
}
}
} # end format of clinical data
# RF predicted values
smooth.resp.mat = matrix(NA, nrow = length(unique(clinical$Model)), ncol = length(unique(clinical$Treatment)))
smooth.surv.mat = matrix(NA, nrow = length(unique(clinical$Model)), ncol = length(unique(clinical$Treatment)))
rownames(smooth.resp.mat) = unique(clinical$Model); colnames(smooth.resp.mat) = unique(clinical$Treatment)
rownames(smooth.surv.mat) = unique(clinical$Model); colnames(smooth.surv.mat) = unique(clinical$Treatment)
for (dim1 in 1:nrow(smooth.resp.mat)) {
for (dim2 in 1:ncol(smooth.resp.mat)) {
row = which(clinical$Model == rownames(smooth.resp.mat)[dim1] & clinical$Treatment == colnames(smooth.resp.mat)[dim2])
if (length(row) != 0) {
smooth.resp.mat[dim1, dim2] = clinical$new.resp[row]
smooth.surv.mat[dim1, dim2] = clinical$new.surv[row]
}
}
} # end format of clinical data
# clinical.other contains outcomes not specified for estimation
if (outcome == "BAR") {
clinical = new.resp.mat
clinical.other = new.surv.mat
}
if (outcome == "Surv") {
clinical = new.surv.mat
clinical.other = new.resp.mat
}
# smooth.clinical.other contains outcomes not specified for estimation
if (outcome == "BAR") {
smooth.clinical = smooth.resp.mat
smooth.clinical.other = smooth.surv.mat
}
if (outcome == "Surv") {
smooth.clinical = smooth.surv.mat
smooth.clinical.other = smooth.resp.mat
}
# create folds for cross-validation
set.seed(seed)
folds = createFolds(1:dim(biomarkers)[1], k = k, list = TRUE, returnTrain = FALSE)
# c1 is the number of trt's to group with untreated, c2 is the number of steps to take down the tree
avg.main.outs = NULL # store primary value functions across c1 and c2
avg.other.outs = NULL # store secondary value functions across c1 and c2
parameters = matrix(NA, nrow = length(c1s) * ncol(clinical), ncol = 2) # store pairs of c1 and c2
colnames(parameters) = c("c1", "c2")
cov.list = list() # to store covariances of value functions
ctr = 1 # count number of times through inner loop
print ("Begin the loops of ntrt and nodes")
# loop through parameters
for (c1 in c1s) {
if (is.na(c2s)) c2s = seq(1, (ncol(clinical) - c1 - 2))
print(sprintf(" c1 = %d", c1))
for (c2 in c2s) {
print(sprintf(" c2 = %d", c2))
# store primary and secondary value functions across folds
main.folds = NULL
other.folds = NULL
# loop through folds
for (f in 1:length(folds)) {
# select training and testing sets
train.bio = biomarkers[-folds[[f]], ]
train.clin = smooth.clinical[-folds[[f]], ]
test.bio = biomarkers[folds[[f]], ]
test.clin = clinical[folds[[f]], ]
test.clin.other = clinical.other[folds[[f]], ]
# find the c1 closest treatments to "untreated"
dist_mat = as.matrix(dist(t(train.clin)))
col_ind = which(colnames(dist_mat) == "untreated")
ordered_dist_mat = dist_mat[order(dist_mat[ , col_ind]) , col_ind]
untrt = names(ordered_dist_mat[1:(1 + c1)])
# average outcomes aross "No treatment group"
untrt.ind = which(colnames(train.clin) %in% untrt)
means = apply(as.matrix(train.clin[ , untrt.ind]), 1, mean, na.rm = T)
train.clin = train.clin - means # subtract off mean of untreated group in each line
train.clin = train.clin[ , -untrt.ind]
# same for the test set
# replace na by the mean untreated value
for(i in 1:nrow(test.clin)){
if (is.na(test.clin[i,"untreated"]))
test.clin[i,"untreated"] = mean(test.clin[ ,"untreated"], na.rm = T)
}
test.clin = test.clin - test.clin[,"untreated"]
untrt.ind = which(colnames(test.clin) %in% untrt)
test.clin = test.clin[ , -untrt.ind]
# same for the test set of the secondary outcome
for(i in 1:nrow(test.clin.other)){
if (is.na(test.clin.other[i,"untreated"]))
test.clin.other[i,"untreated"] = mean(test.clin.other[ ,"untreated"], na.rm = T)
}
test.clin.other = test.clin.other - test.clin.other[,"untreated"]
untrt.ind = which(colnames(test.clin.other) %in% untrt)
test.clin.other = test.clin.other[ , -untrt.ind]
# create treatment tree by clustering
clusters = hclust(dist(t(train.clin))) # repeat distance matrix after removing untreated columns
# full mouse level data
rownames(train.bio) = rownames(train.clin)
full.bio = train.bio[matrix(apply(matrix(1:nrow(train.bio), ncol = 1), 1, rep, times = ncol(train.clin)), ncol = 1), ]
full.clin = matrix(as.numeric(t(train.clin)), ncol = 1)
rownames(full.clin) = rownames(full.bio)
full.trt = matrix(rep(colnames(train.clin), nrow(train.clin)))
full.clin = cbind(full.clin, full.trt)
# create treatment variables for each step of the tree
num.steps = dim(clusters$merge)[1]
merge.steps = clusters$merge
trt.list = clusters$labels
new.trt.vars = NULL # new treatment variables
for (j in 1:num.steps) {
temp = rep(NA, dim(full.clin)[1])
merge1 = merge.steps[j, 1]; merge2 = merge.steps[j, 2]
if (merge1 < 0) {
temp[which(full.clin[ , 2] == trt.list[-merge1])] = 1
}
if (merge2 < 0) {
temp[which(full.clin[ , 2] == trt.list[-merge2])] = -1
}
if (merge1 > 0) {
temp[which(!is.na(new.trt.vars[ , merge1]))] = 1
}
if (merge2 > 0) {
temp[which(!is.na(new.trt.vars[ , merge2]))] = -1
}
new.trt.vars = cbind(new.trt.vars, temp)
} # end creation of trt variables
# select trt variables for c2 steps down tree
row.names(new.trt.vars) = row.names(full.clin)
trt.vars = new.trt.vars[ , rev(rev(1:dim(new.trt.vars)[2])[1:c2])]
trt.vars = as.matrix(trt.vars)
# OWL up the tree
moves.mat = NULL # this will be nrow(test.bio) by ncol(trt.vars) -- each row contains the moves up the tree for one line in test set
list.pred = list()
for (d in 1:dim(trt.vars)[2]) {
X = train.bio[matrix(apply(matrix(1:nrow(train.bio), ncol = 1), 1, rep, times = 2), ncol = 1), ]
Y = matrix(NA, nrow = nrow(X), ncol = 2)
Y[ , 2] = rep(c(-1, 1), nrow(train.bio))
rownames(Y) = rownames(X)
# for first step, outcomes are mean outcomes in root nodes
if (d == 1) {
for (g in 1:dim(Y)[1]) {
Y[g, 1] = mean(as.numeric(full.clin[which(trt.vars[ , d] == Y[g, 2] & rownames(trt.vars) == rownames(Y)[g]), 1]), na.rm = T)
}
}
# for other steps, outcomes are the maximum over outcomes on lower steps of the tree (if a previous decision has been made)
if (d > 1) {
for (g in 1:dim(Y)[1]) {
# most recent previous step of tree where decision was made
index = max(which(!is.na(as.matrix(trt.vars[which(rownames(Y)[g] == rownames(trt.vars) & trt.vars[ , d] == Y[g, 2]), 1:(d - 1)])[1,])))
if (is.infinite(index)) Y[g, 1] = mean(as.numeric(full.clin[which(trt.vars[ , d] == Y[g, 2] & rownames(full.clin) == rownames(Y)[g]), 1]), na.rm = T)
if (!is.infinite(index)){
while(!is.infinite(index)){
d.temp = index
choice = list.pred[[d.temp]][which(list.pred[[d.temp]][ , 1] == rownames(Y)[g]), 3]
if(d.temp == 1){
index = -Inf
}else{
index = max(which(!is.na(as.matrix(trt.vars[which(rownames(Y)[g] == rownames(trt.vars) & trt.vars[ , d.temp] == choice), 1:(d.temp - 1)])[1,])))
}
}
Y[g, 1] = mean(as.numeric(full.clin[which(trt.vars[ , d.temp] == choice & rownames(full.clin) == rownames(Y)[g]), 1]), na.rm = T)
}
}
}
A = Y[ , 2]
Y = Y[ , 1]
# fit model for OWL
temp.mat = cbind(Y, A, X)
trainname = sprintf("smoothkernel_train_%s_%s_%i.csv", cancer.type, outcome, ngene)
testname = sprintf("smoothkernel_test_%s_%s_%i.csv", cancer.type, outcome, ngene)
write.table(temp.mat, file = trainname, col.names = FALSE, row.names = FALSE, sep = ",")
write.table(test.bio, testname, col.names = F, row.names = F, sep = ",")
cmdstring = sprintf("python3 owl.temp.kernel.py %s %s", trainname, testname)
owl.res = system(cmdstring, intern = T)
temp.res = read.csv(sprintf("temp_owl_%s", testname), header = F) # this is the file that python.owl.temp returns
temp.res = as.matrix(temp.res * 2 - 3)
moves.mat = cbind(moves.mat, temp.res)
# predicted best treatment for training set
# we're borrowing code from the QL files -- predicted treatments are in column 3
traindatmoves = read.csv(sprintf("temp_owl_%s", trainname), header=F)
traindatmoves = as.matrix(traindatmoves * 2 - 3)
traindatmoves = traindatmoves[which(1:nrow(traindatmoves) %% 2 == 1), ]
traindatmoves = as.matrix(traindatmoves)
preds = matrix(NA, nrow = nrow(train.bio), ncol = 3)
preds[ , 1] = rownames(train.bio)
preds[ , 3] = traindatmoves
list.pred[[d]] = preds
} # end loop through steps in tree
# test on validation set
main.outs = NULL # save mean outcomes for each line among mice treated consistent with treatment rule
other.outs = NULL
rownames(test.bio) = rownames(test.clin)
# moves for each row in test set
moves.all = moves.mat
if (ncol(moves.mat) > 1) moves.all = t(apply(moves.mat, 1, rev)) # when we fill moves.mat we are doing it from the bottom of the tree up, not top down
moves.all[which(moves.all == -1)] = 0
# loop through lines in test set
for (t in 1:dim(test.bio)[1]) {
# moves for line t in test set (these are the moves that would be taken under decision rule)
moves = moves.all[t, ]
cur.step = merge.steps[nrow(merge.steps), ]
trt.ind = NULL # save indices (in trt.list) of those trts that are consistent with decision rule
temp = rep(NA, nrow(merge.steps) - length(moves))
tmoves = c(temp, rev(moves))
# determine root node for each line in testing set
keeplooping = T
cur.move = tmoves[length(tmoves)]
while (keeplooping) {
if (cur.move == 1) next.step = cur.step[1]
if (cur.move == 0) next.step = cur.step[2]
if (next.step < 0) {
trt.ind = c(trt.ind, -next.step)
keeplooping = F
}
if (next.step > 0) {
cur.step = merge.steps[next.step, ]
cur.move = tmoves[next.step]
if (is.na(cur.move)) keeplooping = F
}
}
# recursive function to construct list of indices for trts in root node
get.trt.list = function(cur.step) {
trt.ind = NULL
if (cur.step[1] < 0) trt.ind = c(trt.ind, -cur.step[1])
if (cur.step[1] > 0) trt.ind = c(trt.ind, get.trt.list(merge.steps[cur.step[1], ]))
if (cur.step[2] < 0) trt.ind = c(trt.ind, -cur.step[2])
if (cur.step[2] > 0) trt.ind = c(trt.ind, get.trt.list(merge.steps[cur.step[2], ]))
return(trt.ind)
} # end recursive function
# if root node is not a single trt, get list of trts in root node
if (length(trt.ind) == 0) trt.ind = get.trt.list(cur.step)
# list of trts in root node
treatments = trt.list[trt.ind]
# take mean outcome in root node
main.outs = c(main.outs, mean(test.clin[which(rownames(test.clin) == rownames(test.bio)[t]), which(colnames(test.clin) %in% treatments)]))
other.outs = c(other.outs, mean(test.clin.other[which(rownames(test.clin.other) == rownames(test.bio)[t]), which(colnames(test.clin.other) %in% treatments)]))
} # end loop through lines in test set
# determine if any lines should have been left untreated by fitting OWL one more time
X = train.bio[matrix(apply(matrix(1:nrow(train.bio), ncol = 1), 1, rep, times = 2), ncol = 1), ]
Y = matrix(NA, nrow = nrow(X), ncol = 2)
Y[ , 2] = rep(c(0, 1), nrow(train.bio))
rownames(Y) = rownames(X)
for (g in 1:dim(Y)[1]) {
if (Y[g, 2] == 0) Y[g, 1] = 0 # 0 is untreated
if (Y[g, 2] == 1) Y[g, 1] = mean(as.numeric(full.clin[which(trt.vars[ , ncol(trt.vars)] == list.pred[[ncol(trt.vars)]][which(list.pred[[ncol(trt.vars)]][ , 1] == rownames(Y)[g]), 3] & rownames(full.clin) == rownames(Y)[g]), 1]), na.rm = T)
}
A = Y[ , 2]
Y = Y[ , 1]
A[which(A == 0)] = -1 # trt coded as 1/-1 for OWL
# fit model for OWL
temp.mat = cbind(Y, A, X)
trainname = sprintf("smoothkernel_train_%s_%s_%i.csv", cancer.type, outcome, ngene)
testname = sprintf("smoothkernel_test_%s_%s_%i.csv", cancer.type, outcome, ngene)
write.table(temp.mat, file=trainname, col.names=FALSE, row.names=FALSE, sep=",")
write.table(test.bio, testname, col.names=F, row.names=F, sep=",")
cmdstring = sprintf("python3 owl.temp.kernel.py %s %s", trainname, testname)
owl.res = system(cmdstring, intern=T)
temp.res = read.csv(sprintf("temp_owl_%s", testname), header=F) # this is the file that python.owl.temp returns
temp.res = as.matrix(temp.res*2 - 3)
# for any mice who should have been left untreated, replace outcome with mean in untreated group
main.outs[which(temp.res == -1)] = 0
# save mean outcomes from this fold
main.folds = c(main.folds, mean(main.outs, na.rm = T))
other.folds = c(other.folds, mean(other.outs, na.rm = T))
} # end loop through five folds
# save means, variances, and covariances of outcomes across folds for one choice of c1/c2
avg.main.outs = c(avg.main.outs, mean(main.folds, na.rm = T))
avg.other.outs = c(avg.other.outs, mean(other.folds, na.rm = T))
cov.mat = cov(matrix(c(main.folds, other.folds), ncol = 2), use = "complete.obs")
cov.list[[ctr]] = cov.mat
parameters[ctr, ] = c(c1, c2)
ctr = ctr + 1
} # end loop through c1s
} # end loop through c2s
# determine which c1/c2 maximize main outcome
opt.ind = which(avg.main.outs == max(avg.main.outs, na.rm = T))
if (length(opt.ind) > 1) opt.ind = which(avg.other.outs[opt.ind] == max(avg.other.outs[opt.ind]))
if (length(opt.ind) > 1) opt.ind = sample(opt.ind, 1)
# select value functions at optimal c1/c2
if (outcome == "BAR") {
final.resp = avg.main.outs[opt.ind]
final.surv = avg.other.outs[opt.ind]
}
if (outcome == "Surv") {
final.resp = avg.other.outs[opt.ind]
final.surv = avg.main.outs[opt.ind]
}
# select optimal c1/c2
final.param = parameters[opt.ind, ]
# select covariance matrix at optimal c1/c2
final.covariance = cov.list[[opt.ind]] # note that covariance matrix always has main outcome in upper left
# observed mean for primary outcome
for(i in 1:nrow(clinical)){
if (is.na(clinical[i,"untreated"]))
clinical[i,"untreated"] = mean(clinical[ ,"untreated"], na.rm = T)
}
clinical = clinical - clinical[,"untreated"] # subtract off mean of untreated group in each line
# observed mean for secondary outcome
for(i in 1:nrow(clinical.other)){
if (is.na(clinical.other[i,"untreated"]))
clinical.other[i,"untreated"] = mean(clinical.other[ ,"untreated"], na.rm = T)
}
clinical.other = clinical.other - clinical.other[,"untreated"] # subtract off mean of untreated group in each line
if (outcome == "BAR") {
obs.resp = mean(clinical, na.rm = T)
obs.surv = mean(clinical.other, na.rm = T)
opt.resp = mean(apply(clinical, 1, max, na.rm = T)[!is.infinite(apply(clinical, 1, max, na.rm = T))], na.rm = T)
opt.surv = mean(apply(clinical.other, 1, max, na.rm = T)[!is.infinite(apply(clinical.other, 1, max, na.rm = T))], na.rm = T)
var.resp = final.covariance[1, 1]
var.surv = final.covariance[2, 2]
cov = final.covariance[1, 2]
}
if (outcome == "Surv") {
obs.resp = mean(clinical.other, na.rm = T)
obs.surv = mean(clinical, na.rm = T)
opt.resp = mean(apply(clinical.other, 1, max, na.rm = T)[!is.infinite(apply(clinical.other, 1, max, na.rm = T))], na.rm = T)
opt.surv = mean(apply(clinical, 1, max, na.rm = T)[!is.infinite(apply(clinical, 1, max, na.rm = T))], na.rm = T)
var.resp = final.covariance[2, 2]
var.surv = final.covariance[1, 1]
cov = final.covariance[1, 2]
}
res = data.frame(c1 = final.param[1], c2 = final.param[2], mean.response = final.resp, mean.survival = final.surv,
var.response = var.resp, var.survival = var.surv, covariance = cov, observed.resp = obs.resp,
observed.surv = obs.surv, optimal.resp = opt.resp, optimal.surv = opt.surv)
rownames(res) = NULL
setwd(output_dir)
output.name = paste(cancer.type, "_", outcome, "_", stri_sub(gene.data.file, 1, -5), outstring, sep = "")
write.csv(res, output.name)
# return list of results
return(list(parameters = final.param, mean.response = final.resp, mean.survival = final.surv,
covariance = final.covariance, observed.resp = obs.resp, observed.surv = obs.surv,
optimal.resp = opt.resp, optimal.surv = opt.surv))
} # end pdx.owl.kernel.smooth function
|
#' @title Specifications test-render.R
#' @section Last updated by: Tim Treis (tim.treis@@outlook.de)
#' @section Last update date: 2022-02-09T15:22:32
#'
#' @section List of tested specifications
#' T1. The function `render.tableone()` properly renders a `render.tableone` object.
#' T1.1 No error when `data` is a `tableone` object.
#' T1.2 An error when `data` is not a `tableone` object.
#' T1.3 An error when `title` is missing.
#' T1.4 No error when `title` is defined.
#' T1.5 An error when `datasource` is missing.
#' T1.6 No error when `datasource` is defined.
#' T1.7 No error when `footnote` is defined.
#' T1.8 No error when `output_format` is 'html' and `engine` is 'gt'.
#' T1.9 No error when `output_format` is 'html' and `engine` is 'kable'.
#' T1.10 No error when `output_format` is 'html' and `engine` is 'dt', 'datatable' or 'datatables'.
#' T1.11 An error when `output_format` is 'latex' and `engine` is not 'gt' or 'kable'.
#' T1.12 An error when `output_format` is an invalid parameter.
#' T1.13 An error when `engine` is an invalid parameter.
#' T1.14 No error when `output_format` is 'latex' and `engine` is 'kable'.
#' T1.16 No error when `engine` is in ['dt', 'datatable', 'datatables'] and download_format` is in ['copy', 'csv', 'excel'].
#' T1.17 A warning when `engine` is not in ['dt', 'datatable', 'datatables'] and download_format` is in ['copy', 'csv', 'excel'].
#' T1.18 A warning when `download_format` is not 'copy', 'csv' or 'excel'.
#' T2. The function `render.risktable()` properly renders a `risktable` object.
#' T2.1 No error when `data` is a `risktable` object.
#' T2.2 An error when `data` is not a `risktable` object.
#' T2.3 An error when `title` is missing.
#' T2.4 No error when `title` is defined.
#' T2.5 An error when `datasource` is missing.
#' T2.6 No error when `datasource` is defined.
#' T2.7 No error when `footnote` is defined.
#' T2.8 No error when `output_format` is 'html' and `engine` is 'gt'.
#' T2.9 No error when `output_format` is 'html' and `engine` is 'kable'.
#' T2.10 No error when `output_format` is 'html' and `engine` is 'dt', 'datatable' or 'datatables'.
#' T2.11 An error when `output_format` is an invalid parameter.
#' T2.12 An error when `engine` is an invalid parameter.
#' T2.13 No error when `output_format` is 'latex' and `engine` is 'kable'.
#' T2.14 An error when `output_format` is 'latex' and `engine` is 'dt', 'datatable' or 'datatables'.
#' T2.15 No error when `engine` is in ['dt', 'datatable', 'datatables'] and download_format` is in ['copy', 'csv', 'excel'].
#' T2.16 A warning when `engine` is not in ['dt', 'datatable', 'datatables'] and download_format` is in ['copy', 'csv', 'excel'].
#' T2.17 The strata-colnames of the `risktable` object are used as rownames.
#' T2.18 The metric of the risktable is used in the rendered table.
#' T2.19 The values of the evalutated metric are pivoted wide.
#' T3. The function `render.data.frame()` properly renders a `data.frame` object.
#' T3.1 When `engine` is 'gt' and `output_format` is 'latex', a latex `knit_asis` object is returned.
#' T3.2 A warning when `engine` is 'dt', 'datatable' or 'datatables' and `output_format is not 'html'.`
#' T4. The function `check_rendering_input()` only permits valid `output_format` and `engine` options.
#' T4.1 No error when `output_format` is `html` or `latex` and `engine` is `kable`, `gt`, `dt`, `datatable` or `datatables`.
#' T4.2 An error when `output_format` and/or `engine` are missing, `NULL` or `NA`.
#' T4.3 An error when `output_format` is not `html` or `latex` and `engine` is a valid option.
#' T4.4 An error when `engine` is not `kable`, `gt`, `dt`, `datatables` or `datatable` and `output_format` is a valid option.
#' T5. The function `render_datatable.data.frame()` creates an `htmlwidget` of the table.
#' T5.1 No error when `data` is a `data.frame`.
#' T5.2 The returned object is of type `htmlwidget`.
#' T5.3 The `title` is passed along to the HTML widget.
#' T5.4 The `source_cap` is passed along to the HTML widget.
#' T5.5 When `download_format` is not `NULL`, a button is added.
#' T5.6 When `download_format` is `NULL`, no button is added.
#' T6. The function `get_gt.data.frame()` properly passes the input along to `gt::gt()`.
#' T6.1 No error when `data` is a `data.frame`.
#' T6.2 The returned object is of type `gt_tbl`.
# Requirement T1 ----------------------------------------------------------
testthat::context("render - T1. The function `render.tableone()` properly renders a `render.tableone` object.")
testthat::test_that("T1.1 No error when `data` is a `tableone` object.", {
adtte_tableone <- adtte %>%
visR::get_tableone()
testthat::expect_true(inherits(adtte_tableone, "tableone"))
adtte_tableone %>% visR:::render.tableone(title = NULL, datasource = NULL)
})
testthat::test_that("T1.2 An error when `data` is not a `tableone` object.", {
adtte_tableone <- adtte %>%
visR::get_tableone()
class(adtte_tableone) <- class(adtte_tableone)[class(adtte_tableone) != "tableone"]
testthat::expect_false(inherits(adtte_tableone, "tableone"))
adtte_tableone %>%
visR:::render.tableone(title = NULL, datasource = NULL) %>%
testthat::expect_error()
})
testthat::test_that("T1.3 An error when `title` is missing.", {
adtte_tableone <- adtte %>%
visR::get_tableone()
adtte_tableone %>%
visR:::render.tableone(datasource = NULL) %>%
testthat::expect_error()
})
testthat::test_that("T1.4 No error when `title` is defined.", {
adtte_tableone <- adtte %>%
visR::get_tableone()
adtte_tableone %>%
visR:::render.tableone(title = NULL, datasource = NULL) %>%
testthat::expect_error(NA)
adtte_tableone %>%
visR:::render.tableone(title = 1, datasource = NULL) %>%
testthat::expect_error(NA)
adtte_tableone %>%
visR:::render.tableone(title = "visR", datasource = NULL) %>%
testthat::expect_error(NA)
adtte_tableone %>%
visR:::render.tableone(title = c(1, 2, 3), datasource = NULL) %>%
testthat::expect_error(NA)
})
testthat::test_that("T1.5 An error when `datasource` is missing.", {
adtte_tableone <- adtte %>%
visR::get_tableone()
adtte_tableone %>%
visR:::render.tableone(title = NULL) %>%
testthat::expect_error()
})
testthat::test_that("T1.6 No error when `datasource` is defined.", {
adtte_tableone <- adtte %>%
visR::get_tableone()
adtte_tableone %>%
visR:::render.tableone(title = NULL, datasource = NULL) %>%
testthat::expect_error(NA)
adtte_tableone %>%
visR:::render.tableone(title = NULL, datasource = 1) %>%
testthat::expect_error(NA)
adtte_tableone %>%
visR:::render.tableone(title = NULL, datasource = "visR") %>%
testthat::expect_error(NA)
adtte_tableone %>%
visR:::render.tableone(title = NULL, datasource = c(1, 2, 3)) %>%
testthat::expect_error(NA)
})
testthat::test_that("T1.7 No error when `footnote` is defined.", {
adtte_tableone <- adtte %>%
visR::get_tableone()
adtte_tableone %>%
visR:::render.tableone(
title = NULL,
datasource = NULL,
footnote = NULL
) %>%
testthat::expect_error(NA)
adtte_tableone %>%
visR:::render.tableone(
title = NULL,
datasource = NULL,
footnote = 1
) %>%
testthat::expect_error(NA)
adtte_tableone %>%
visR:::render.tableone(
title = NULL,
datasource = NULL,
footnote = "visR"
) %>%
testthat::expect_error(NA)
adtte_tableone %>%
visR:::render.tableone(
title = NULL,
datasource = NULL,
footnote = c(1, 2, 3)
) %>%
testthat::expect_error(NA)
})
testthat::test_that("T1.8 No error when `output_format` is 'html' and `engine` is 'gt'.", {
adtte_tableone <- adtte %>%
visR::get_tableone()
adtte_tableone %>%
visR:::render.tableone(
title = NULL,
datasource = NULL,
output_format = "html",
engine = "gt"
) %>%
testthat::expect_error(NA)
})
testthat::test_that("T1.9 No error when `output_format` is 'html' and `engine` is 'kable'.", {
adtte_tableone <- adtte %>%
visR::get_tableone()
adtte_tableone %>%
visR:::render.tableone(
title = NULL,
datasource = NULL,
output_format = "html",
engine = "kable"
) %>%
testthat::expect_error(NA)
})
testthat::test_that("T1.10 No error when `output_format` is 'html' and `engine` is 'dt', 'datatable' or 'datatables'.", {
adtte_tableone <- adtte %>%
visR::get_tableone()
adtte_tableone %>%
visR:::render.tableone(
title = NULL,
datasource = NULL,
output_format = "html",
engine = "dt"
) %>%
testthat::expect_error(NA)
adtte_tableone %>%
visR:::render.tableone(
title = NULL,
datasource = NULL,
output_format = "html",
engine = "datatable"
) %>%
testthat::expect_error(NA)
adtte_tableone %>%
visR:::render.tableone(
title = NULL,
datasource = NULL,
output_format = "html",
engine = "datatables"
) %>%
testthat::expect_error(NA)
})
testthat::test_that("T1.11 An error when `output_format` is 'latex' and `engine` is not 'gt' or 'kable'.", {
adtte_tableone <- adtte %>%
visR::get_tableone()
expected_error <- "Currently, 'latex' output is only implemented with 'gt' or 'kable' as a table engine."
adtte_tableone %>%
visR:::render.tableone(
title = NULL,
datasource = NULL,
output_format = "latex",
engine = "dt"
) %>%
testthat::expect_error(expected_error)
})
testthat::test_that("T1.12 An error when `output_format` is an invalid parameter.", {
adtte_tableone <- adtte %>%
visR::get_tableone()
adtte_tableone %>%
visR:::render.tableone(
title = NULL,
datasource = NULL,
output_format = NULL
) %>%
testthat::expect_error()
adtte_tableone %>%
visR:::render.tableone(
title = NULL,
datasource = NULL,
output_format = 1
) %>%
testthat::expect_error()
adtte_tableone %>%
visR:::render.tableone(
title = NULL,
datasource = NULL,
output_format = "visR"
) %>%
testthat::expect_error()
})
testthat::test_that("T1.13 An error when `engine` is an invalid parameter.", {
adtte_tableone <- adtte %>%
visR::get_tableone()
adtte_tableone %>%
visR:::render.tableone(
title = NULL,
datasource = NULL,
engine = NULL
) %>%
testthat::expect_error()
adtte_tableone %>%
visR:::render.tableone(
title = NULL,
datasource = NULL,
engine = 1
) %>%
testthat::expect_error()
adtte_tableone %>%
visR:::render.tableone(
title = NULL,
datasource = NULL,
engine = "visR"
) %>%
testthat::expect_error()
})
testthat::test_that("T1.14 No error when `output_format` is 'latex' and `engine` is 'kable'.", {
adtte_tableone <- adtte %>%
visR::get_tableone()
adtte_tableone %>%
visR:::render.tableone(
title = NULL,
datasource = NULL,
output_format = "latex",
engine = "kable"
) %>%
testthat::expect_error(NA)
})
testthat::test_that("T1.16 No error when `engine` is in ['dt', 'datatable', 'datatables'] and download_format` is in ['copy', 'csv', 'excel'].", {
adtte_tableone <- adtte %>%
visR::get_tableone()
for (engine in c("dt", "datatable", "datatables")) {
for (download_format in c("copy", "csv", "excel")) {
adtte_tableone %>%
visR:::render.tableone(
title = NULL,
datasource = NULL,
engine = engine,
download_format = download_format
) %>%
testthat::expect_error(NA)
}
}
})
testthat::test_that("T1.17 A warning when `engine` is not in ['dt', 'datatable', 'datatables'] and download_format` is in ['copy', 'csv', 'excel'].", {
adtte_tableone <- adtte %>%
visR::get_tableone()
for (engine in c("gt", "kable")) {
for (download_format in c("copy", "csv", "excel")) {
adtte_tableone %>%
visR:::render.tableone(
title = NULL,
datasource = NULL,
engine = engine,
download_format = download_format
) %>%
testthat::expect_warning()
}
}
})
testthat::test_that("T1.18 A warning when `download_format` is not 'copy', 'csv' or 'excel'.", {
adtte_tableone <- adtte %>%
visR::get_tableone()
expected_warning <- "Currently, only 'copy', 'csv' and 'excel' are supported as 'download_format'."
adtte_tableone %>%
visR:::render.tableone(
title = NULL,
datasource = NULL,
engine = "dt",
download_format = "visR"
) %>%
testthat::expect_warning(expected_warning)
})
# Requirement T2 ---------------------------------------------------------------
testthat::context("render - T2. The function `render.risktable()` properly renders a `risktable` object.")
testthat::test_that("T2.1 No error when `data` is a `risktable` object.", {
adtte_risktable <- adtte %>%
visR::estimate_KM("SEX") %>%
visR::get_risktable()
testthat::expect_true(inherits(adtte_risktable, "risktable"))
adtte_risktable %>%
visR:::render.risktable(title = NULL, datasource = NULL) %>%
testthat::expect_error(NA)
})
testthat::test_that("T2.2 An error when `data` is not a `risktable` object.", {
adtte_risktable <- adtte %>%
visR::estimate_KM("SEX") %>%
visR::get_risktable()
class(adtte_risktable) <- class(adtte_risktable)[class(adtte_risktable) != "risktable"]
testthat::expect_false(inherits(adtte_risktable, "risktable"))
adtte_risktable %>%
visR:::render.risktable(title = NULL, datasource = NULL) %>%
testthat::expect_error()
})
testthat::test_that("T2.3 An error when `title` is missing.", {
adtte_risktable <- adtte %>%
visR::estimate_KM("SEX") %>%
visR::get_risktable()
adtte_risktable %>%
visR:::render.risktable(datasource = NULL) %>%
testthat::expect_error()
})
testthat::test_that("T2.4 No error when `title` is defined.", {
adtte_risktable <- adtte %>%
visR::estimate_KM("SEX") %>%
visR::get_risktable()
adtte_risktable %>%
visR:::render.risktable(
title = NULL,
datasource = NULL
) %>%
testthat::expect_error(NA)
adtte_risktable %>%
visR:::render.risktable(
title = 1,
datasource = NULL
) %>%
testthat::expect_error(NA)
adtte_risktable %>%
visR:::render.risktable(
title = "visR",
datasource = NULL
) %>%
testthat::expect_error(NA)
adtte_risktable %>%
visR:::render.risktable(
title = c(1, 2, 3),
datasource = NULL
) %>%
testthat::expect_error(NA)
})
testthat::test_that("T2.5 An error when `datasource` is missing.", {
adtte_risktable <- adtte %>%
visR::estimate_KM("SEX") %>%
visR::get_risktable()
adtte_risktable %>%
visR:::render.risktable(title = NULL) %>%
testthat::expect_error()
})
testthat::test_that("T2.6 No error when `datasource` is defined.", {
adtte_risktable <- adtte %>%
visR::estimate_KM("SEX") %>%
visR::get_risktable()
adtte_risktable %>%
visR:::render.risktable(
title = NULL,
datasource = NULL
) %>%
testthat::expect_error(NA)
adtte_risktable %>%
visR:::render.risktable(
title = NULL,
datasource = 1
) %>%
testthat::expect_error(NA)
adtte_risktable %>%
visR:::render.risktable(
title = NULL,
datasource = "visR"
) %>%
testthat::expect_error(NA)
adtte_risktable %>%
visR:::render.risktable(
title = NULL,
datasource = c(1, 2, 3)
) %>%
testthat::expect_error(NA)
})
testthat::test_that("T2.7 No error when `footnote` is defined.", {
adtte_risktable <- adtte %>%
visR::estimate_KM("SEX") %>%
visR::get_risktable()
adtte_risktable %>%
visR:::render.risktable(
title = NULL,
datasource = NULL,
footnote = NULL
) %>%
testthat::expect_error(NA)
adtte_risktable %>%
visR:::render.risktable(
title = NULL,
datasource = NULL,
footnote = 1
) %>%
testthat::expect_error(NA)
adtte_risktable %>%
visR:::render.risktable(
title = NULL,
datasource = NULL,
footnote = "visR"
) %>%
testthat::expect_error(NA)
adtte_risktable %>%
visR:::render.risktable(
title = NULL,
datasource = NULL,
footnote = c(1, 2, 3)
) %>%
testthat::expect_error(NA)
})
testthat::test_that("T2.8 No error when `output_format` is 'html' and `engine` is 'gt'.", {
adtte_risktable <- adtte %>%
visR::estimate_KM("SEX") %>%
visR::get_risktable()
adtte_risktable %>%
visR:::render.risktable(
title = NULL,
datasource = NULL,
output_format = "html",
engine = "gt"
) %>%
testthat::expect_error(NA)
})
testthat::test_that("T2.9 No error when `output_format` is 'html' and `engine` is 'kable'.", {
adtte_risktable <- adtte %>%
visR::estimate_KM("SEX") %>%
visR::get_risktable()
adtte_risktable %>%
visR:::render.risktable(
title = NULL,
datasource = NULL,
output_format = "html",
engine = "kable"
) %>%
testthat::expect_error(NA)
})
testthat::test_that("T2.10 No error when `output_format` is 'html' and `engine` is 'dt', 'datatable' or 'datatables'.", {
adtte_risktable <- adtte %>%
visR::estimate_KM("SEX") %>%
visR::get_risktable()
adtte_risktable %>%
visR:::render.risktable(
title = NULL,
datasource = NULL,
output_format = "html",
engine = "dt"
) %>%
testthat::expect_error(NA)
adtte_risktable %>%
visR:::render.risktable(
title = NULL,
datasource = NULL,
output_format = "html",
engine = "datatable"
) %>%
testthat::expect_error(NA)
adtte_risktable %>%
visR:::render.risktable(
title = NULL,
datasource = NULL,
output_format = "html",
engine = "datatables"
) %>%
testthat::expect_error(NA)
})
testthat::test_that("T2.11 An error when `output_format` is an invalid parameter.", {
adtte_risktable <- adtte %>%
visR::estimate_KM("SEX") %>%
visR::get_risktable()
adtte_risktable %>%
visR:::render.risktable(
title = NULL,
datasource = NULL,
output_format = NULL
) %>%
testthat::expect_error()
adtte_risktable %>%
visR:::render.risktable(
title = NULL,
datasource = NULL,
output_format = 1
) %>%
testthat::expect_error()
adtte_risktable %>%
visR:::render.risktable(
title = NULL,
datasource = NULL,
output_format = "visR"
) %>%
testthat::expect_error()
})
testthat::test_that("T2.12 An error when `engine` is an invalid parameter.", {
adtte_risktable <- adtte %>%
visR::estimate_KM("SEX") %>%
visR::get_risktable()
adtte_risktable %>%
visR:::render.risktable(
title = NULL,
datasource = NULL,
engine = NULL
) %>%
testthat::expect_error()
adtte_risktable %>%
visR:::render.risktable(
title = NULL,
datasource = NULL,
engine = 1
) %>%
testthat::expect_error()
adtte_risktable %>%
visR:::render.risktable(
title = NULL,
datasource = NULL,
engine = "visR"
) %>%
testthat::expect_error()
})
testthat::test_that("T2.13 No error when `output_format` is 'latex' and `engine` is 'kable'.", {
adtte_risktable <- adtte %>%
visR::estimate_KM("SEX") %>%
visR::get_risktable()
adtte_risktable %>%
visR:::render.risktable(
title = NULL,
datasource = NULL,
output_format = "latex",
engine = "kable"
) %>%
testthat::expect_error(NA)
})
testthat::test_that("T2.14 An error when `output_format` is 'latex' and `engine` is 'dt', 'datatable' or 'datatables'.", {
adtte_risktable <- adtte %>%
visR::estimate_KM("SEX") %>%
visR::get_risktable()
adtte_risktable %>%
visR:::render.risktable(
title = NULL,
datasource = NULL,
output_format = "latex",
engine = "dt"
) %>%
testthat::expect_error()
adtte_risktable %>%
visR:::render.risktable(
title = NULL,
datasource = NULL,
output_format = "latex",
engine = "datatable"
) %>%
testthat::expect_error()
adtte_risktable %>%
visR:::render.risktable(
title = NULL,
datasource = NULL,
output_format = "latex",
engine = "datatables"
) %>%
testthat::expect_error()
})
testthat::test_that("T2.15 No error when `engine` is in ['dt', 'datatable', 'datatables'] and download_format` is in ['copy', 'csv', 'excel'].", {
adtte_risktable <- adtte %>%
visR::estimate_KM("SEX") %>%
visR::get_risktable()
for (engine in c("dt", "datatable", "datatables")) {
for (download_format in c("copy", "csv", "excel")) {
adtte_risktable %>%
visR:::render.risktable(
title = NULL,
datasource = NULL,
engine = engine,
download_format = download_format
) %>%
testthat::expect_error(NA)
}
}
})
testthat::test_that("T2.16 A warning when `engine` is not in ['dt', 'datatable', 'datatables'] and download_format` is in ['copy', 'csv', 'excel'].", {
adtte_risktable <- adtte %>%
visR::estimate_KM("SEX") %>%
visR::get_risktable()
for (engine in c("gt", "kable")) {
for (download_format in c("copy", "csv", "excel")) {
adtte_risktable %>%
visR:::render.risktable(
title = NULL,
datasource = NULL,
engine = engine,
download_format = download_format
) %>%
testthat::expect_warning()
}
}
})
testthat::test_that("T2.17 The strata-colnames of the `risktable` object are used as rownames.", {
adtte_risktable <- adtte %>%
visR::estimate_KM("SEX") %>%
visR::get_risktable()
gg <- adtte_risktable %>%
visR:::render.risktable(title = NULL, datasource = NULL)
gg_data <- gg["_data"] %>% as.data.frame()
strata_names <- colnames(adtte_risktable)[3:length(colnames(adtte_risktable))]
testthat::expect_identical(strata_names, gg_data[, 1])
})
testthat::test_that("T2.18 The metric of the risktable is used in the rendered table.", {
adtte_risktable_at_risk <- adtte %>%
visR::estimate_KM("SEX") %>%
visR::get_risktable(statlist = "n.risk")
adtte_risktable_censored <- adtte %>%
visR::estimate_KM("SEX") %>%
visR::get_risktable(statlist = "n.censor")
adtte_risktable_events <- adtte %>%
visR::estimate_KM("SEX") %>%
visR::get_risktable(statlist = "n.event")
gg_at_risk <- adtte_risktable_at_risk %>%
visR:::render.risktable(title = NULL, datasource = NULL)
gg_at_risk_data <- gg_at_risk["_data"] %>% as.data.frame()
gg_censored <- adtte_risktable_censored %>%
visR:::render.risktable(title = NULL, datasource = NULL)
gg_censored_data <- gg_censored["_data"] %>% as.data.frame()
gg_events <- adtte_risktable_events %>%
visR:::render.risktable(title = NULL, datasource = NULL)
gg_events_data <- gg_events["_data"] %>% as.data.frame()
testthat::expect_identical(levels(gg_at_risk_data[, 2]), "At risk")
testthat::expect_identical(levels(gg_censored_data[, 2]), "Censored")
testthat::expect_identical(levels(gg_events_data[, 2]), "Events")
})
testthat::test_that("T2.19 The values of the evalutated metric are pivoted wide.", {
adtte_risktable <- adtte %>%
visR::estimate_KM("SEX") %>%
visR::get_risktable()
gg <- adtte_risktable %>%
visR:::render.risktable(title = NULL, datasource = NULL)
gg_data <- gg["_data"] %>% as.data.frame()
female_vals <- as.numeric(t(gg_data)[3:length(gg_data), 1])
male_vals <- as.numeric(t(gg_data)[3:length(gg_data), 2])
testthat::expect_identical(adtte_risktable[, "F"], female_vals)
testthat::expect_identical(adtte_risktable[, "M"], male_vals)
})
# Requirement T3 ---------------------------------------------------------------
testthat::context("render - T3. The function `render.data.frame()` properly renders a `data.frame` object.")
testthat::test_that("T3.1 When `engine` is 'gt' and `output_format` is 'latex', a latex `knit_asis` object is returned.", {
latex_table <- adtte %>%
visR:::render.data.frame(
title = NULL,
datasource = NULL,
engine = "gt",
output_format = "latex"
)
testthat::expect_true(inherits(latex_table, "knit_asis"))
})
testthat::test_that("T3.2 A warning when `engine` is 'dt', 'datatable' or 'datatables' and `output_format is not 'html'.`", {
expected_warning <- "DT engine only supports html output and not latex - falling back to html. Please pick a different engine to create other outputs"
adtte %>%
visR:::render.data.frame(
title = NULL,
datasource = NULL,
engine = "dt",
output_format = "latex"
) %>%
testthat::expect_warning(expected_warning)
adtte %>%
visR:::render.data.frame(
title = NULL,
datasource = NULL,
engine = "datatable",
output_format = "latex"
) %>%
testthat::expect_warning(expected_warning)
adtte %>%
visR:::render.data.frame(
title = NULL,
datasource = NULL,
engine = "datatables",
output_format = "latex"
) %>%
testthat::expect_warning(expected_warning)
})
# Requirement T4 ---------------------------------------------------------------
testthat::context("render - T4. The function `check_rendering_input()` only permits valid `output_format` and `engine` options.")
testthat::test_that("T4.1 No error when `output_format` is `html` or `latex` and `engine` is `kable`, `gt`, `dt`, `datatable` or `datatables`.", {
for (output_format in c("html", "latex")) {
for (engine in c("kable", "gt", "dt", "datatable", "datatables")) {
visR:::check_rendering_input(
output_format = output_format,
engine = engine
) %>%
testthat::expect_error(NA)
}
}
})
testthat::test_that("T4.2 An error when `output_format` and/or `engine` are missing, `NULL` or `NA`.", {
arg_missing_waring <- "Please provide an output_format and an engine."
visR:::check_rendering_input(output_format = "visR") %>%
testthat::expect_error(arg_missing_waring)
visR:::check_rendering_input(engine = "visR") %>%
testthat::expect_error(arg_missing_waring)
visR:::check_rendering_input(output_format = "html", engine = NULL) %>%
testthat::expect_error(arg_missing_waring)
visR:::check_rendering_input(engine = "kable", output_format = NULL) %>%
testthat::expect_error(arg_missing_waring)
visR:::check_rendering_input(engine = NULL, output_format = NULL) %>%
testthat::expect_error(arg_missing_waring)
expected_error <- "Currently implemented output engines are kable, gt and jquery datatables \\(DT\\). NA is not yet supported."
visR:::check_rendering_input(output_format = "html", engine = NA) %>%
testthat::expect_error(expected_error)
expected_error <- "Currently supported output formats are html and latex. NA is not yet supported."
visR:::check_rendering_input(engine = "kable", output_format = NA) %>%
testthat::expect_error()
})
testthat::test_that("T4.3 An error when `output_format` is not `html` or `latex` and `engine` is a valid option.", {
expected_error <- "Currently supported output formats are html and latex. visR is not yet supported."
visR:::check_rendering_input(engine = "kable", output_format = "visR") %>%
testthat::expect_error(expected_error)
})
testthat::test_that("T4.4 An error when `engine` is not `kable`, `gt`, `dt`, `datatables` or `datatable` and `output_format` is a valid option.", {
expected_error <- "Currently implemented output engines are kable, gt and jquery datatables \\(DT\\). visR is not yet supported."
visR:::check_rendering_input(output_format = "html", engine = "visR") %>%
testthat::expect_error(expected_error)
})
# Requirement T5 ---------------------------------------------------------------
testthat::context("render - T5. The function `render_datatable.data.frame()` creates an `htmlwidget` of the table.")
testthat::test_that("T5.1 No error when `data` is a `data.frame`.", {
adtte %>%
visR:::render_datatable.data.frame(
title = "visR",
download_format = "csv",
source_cap = "visR"
) %>%
testthat::expect_error(NA)
})
testthat::test_that("T5.2 The returned object is of type `htmlwidget`.", {
tmp <- adtte %>%
visR:::render_datatable.data.frame(
title = "visR_title",
download_format = "csv",
source_cap = "visR_source_cap"
)
testthat::expect_true(inherits(tmp, "htmlwidget"))
})
testthat::test_that("T5.3 The `title` is passed along to the HTML widget.", {
widget_title <- "visR_title"
tmp <- adtte %>%
visR:::render_datatable.data.frame(
title = widget_title,
download_format = "csv",
source_cap = "visR_source_cap"
)
testthat::expect_true(grepl(widget_title, tmp$x$caption))
})
testthat::test_that("T5.4 The `source_cap` is passed along to the HTML widget.", {
source_cap <- "visR_source_cap"
tmp <- adtte %>%
visR:::render_datatable.data.frame(
title = "visR_title",
download_format = "csv",
source_cap = source_cap
)
testthat::expect_true(grepl(source_cap, tmp$x$options$drawCallback))
})
testthat::test_that("T5.5 When `download_format` is not `NULL`, a button is added.", {
download_format <- "visR_csv"
tmp <- adtte %>%
visR:::render_datatable.data.frame(
title = "visR_title",
download_format = download_format,
source_cap = "visR_source_cap"
)
testthat::expect_equal(tmp$x$options$buttons[[1]], download_format)
})
testthat::test_that("T5.6 When `download_format` is `NULL`, no button is added.", {
tmp <- adtte %>%
visR:::render_datatable.data.frame(
title = "visR_title",
download_format = NULL,
source_cap = "visR_source_cap"
)
testthat::expect_false("buttons" %in% names(tmp$x$options))
})
# Requirement T6 ---------------------------------------------------------------
testthat::context("render - T6. The function `get_gt.data.frame()` properly passes the input along to `gt::gt()`.")
testthat::test_that("T6.1 No error when `data` is a `data.frame`.", {
adtte %>%
visR:::get_gt.data.frame() %>%
testthat::expect_error(NA)
})
testthat::test_that("T6.2 The returned object is of type `gt_tbl`.", {
tmp <- adtte %>%
visR:::get_gt.data.frame()
testthat::expect_true(inherits(tmp, "gt_tbl"))
})
# END OF CODE -------------------------------------------------------------
| /tests/testthat/test-render.R | permissive | bailliem/pharmavisR | R | false | false | 30,522 | r | #' @title Specifications test-render.R
#' @section Last updated by: Tim Treis (tim.treis@@outlook.de)
#' @section Last update date: 2022-02-09T15:22:32
#'
#' @section List of tested specifications
#' T1. The function `render.tableone()` properly renders a `render.tableone` object.
#' T1.1 No error when `data` is a `tableone` object.
#' T1.2 An error when `data` is not a `tableone` object.
#' T1.3 An error when `title` is missing.
#' T1.4 No error when `title` is defined.
#' T1.5 An error when `datasource` is missing.
#' T1.6 No error when `datasource` is defined.
#' T1.7 No error when `footnote` is defined.
#' T1.8 No error when `output_format` is 'html' and `engine` is 'gt'.
#' T1.9 No error when `output_format` is 'html' and `engine` is 'kable'.
#' T1.10 No error when `output_format` is 'html' and `engine` is 'dt', 'datatable' or 'datatables'.
#' T1.11 An error when `output_format` is 'latex' and `engine` is not 'gt' or 'kable'.
#' T1.12 An error when `output_format` is an invalid parameter.
#' T1.13 An error when `engine` is an invalid parameter.
#' T1.14 No error when `output_format` is 'latex' and `engine` is 'kable'.
#' T1.16 No error when `engine` is in ['dt', 'datatable', 'datatables'] and download_format` is in ['copy', 'csv', 'excel'].
#' T1.17 A warning when `engine` is not in ['dt', 'datatable', 'datatables'] and download_format` is in ['copy', 'csv', 'excel'].
#' T1.18 A warning when `download_format` is not 'copy', 'csv' or 'excel'.
#' T2. The function `render.risktable()` properly renders a `risktable` object.
#' T2.1 No error when `data` is a `risktable` object.
#' T2.2 An error when `data` is not a `risktable` object.
#' T2.3 An error when `title` is missing.
#' T2.4 No error when `title` is defined.
#' T2.5 An error when `datasource` is missing.
#' T2.6 No error when `datasource` is defined.
#' T2.7 No error when `footnote` is defined.
#' T2.8 No error when `output_format` is 'html' and `engine` is 'gt'.
#' T2.9 No error when `output_format` is 'html' and `engine` is 'kable'.
#' T2.10 No error when `output_format` is 'html' and `engine` is 'dt', 'datatable' or 'datatables'.
#' T2.11 An error when `output_format` is an invalid parameter.
#' T2.12 An error when `engine` is an invalid parameter.
#' T2.13 No error when `output_format` is 'latex' and `engine` is 'kable'.
#' T2.14 An error when `output_format` is 'latex' and `engine` is 'dt', 'datatable' or 'datatables'.
#' T2.15 No error when `engine` is in ['dt', 'datatable', 'datatables'] and download_format` is in ['copy', 'csv', 'excel'].
#' T2.16 A warning when `engine` is not in ['dt', 'datatable', 'datatables'] and download_format` is in ['copy', 'csv', 'excel'].
#' T2.17 The strata-colnames of the `risktable` object are used as rownames.
#' T2.18 The metric of the risktable is used in the rendered table.
#' T2.19 The values of the evalutated metric are pivoted wide.
#' T3. The function `render.data.frame()` properly renders a `data.frame` object.
#' T3.1 When `engine` is 'gt' and `output_format` is 'latex', a latex `knit_asis` object is returned.
#' T3.2 A warning when `engine` is 'dt', 'datatable' or 'datatables' and `output_format is not 'html'.`
#' T4. The function `check_rendering_input()` only permits valid `output_format` and `engine` options.
#' T4.1 No error when `output_format` is `html` or `latex` and `engine` is `kable`, `gt`, `dt`, `datatable` or `datatables`.
#' T4.2 An error when `output_format` and/or `engine` are missing, `NULL` or `NA`.
#' T4.3 An error when `output_format` is not `html` or `latex` and `engine` is a valid option.
#' T4.4 An error when `engine` is not `kable`, `gt`, `dt`, `datatables` or `datatable` and `output_format` is a valid option.
#' T5. The function `render_datatable.data.frame()` creates an `htmlwidget` of the table.
#' T5.1 No error when `data` is a `data.frame`.
#' T5.2 The returned object is of type `htmlwidget`.
#' T5.3 The `title` is passed along to the HTML widget.
#' T5.4 The `source_cap` is passed along to the HTML widget.
#' T5.5 When `download_format` is not `NULL`, a button is added.
#' T5.6 When `download_format` is `NULL`, no button is added.
#' T6. The function `get_gt.data.frame()` properly passes the input along to `gt::gt()`.
#' T6.1 No error when `data` is a `data.frame`.
#' T6.2 The returned object is of type `gt_tbl`.
# Requirement T1 ----------------------------------------------------------
testthat::context("render - T1. The function `render.tableone()` properly renders a `render.tableone` object.")
testthat::test_that("T1.1 No error when `data` is a `tableone` object.", {
adtte_tableone <- adtte %>%
visR::get_tableone()
testthat::expect_true(inherits(adtte_tableone, "tableone"))
adtte_tableone %>% visR:::render.tableone(title = NULL, datasource = NULL)
})
testthat::test_that("T1.2 An error when `data` is not a `tableone` object.", {
adtte_tableone <- adtte %>%
visR::get_tableone()
class(adtte_tableone) <- class(adtte_tableone)[class(adtte_tableone) != "tableone"]
testthat::expect_false(inherits(adtte_tableone, "tableone"))
adtte_tableone %>%
visR:::render.tableone(title = NULL, datasource = NULL) %>%
testthat::expect_error()
})
testthat::test_that("T1.3 An error when `title` is missing.", {
adtte_tableone <- adtte %>%
visR::get_tableone()
adtte_tableone %>%
visR:::render.tableone(datasource = NULL) %>%
testthat::expect_error()
})
testthat::test_that("T1.4 No error when `title` is defined.", {
adtte_tableone <- adtte %>%
visR::get_tableone()
adtte_tableone %>%
visR:::render.tableone(title = NULL, datasource = NULL) %>%
testthat::expect_error(NA)
adtte_tableone %>%
visR:::render.tableone(title = 1, datasource = NULL) %>%
testthat::expect_error(NA)
adtte_tableone %>%
visR:::render.tableone(title = "visR", datasource = NULL) %>%
testthat::expect_error(NA)
adtte_tableone %>%
visR:::render.tableone(title = c(1, 2, 3), datasource = NULL) %>%
testthat::expect_error(NA)
})
testthat::test_that("T1.5 An error when `datasource` is missing.", {
adtte_tableone <- adtte %>%
visR::get_tableone()
adtte_tableone %>%
visR:::render.tableone(title = NULL) %>%
testthat::expect_error()
})
testthat::test_that("T1.6 No error when `datasource` is defined.", {
adtte_tableone <- adtte %>%
visR::get_tableone()
adtte_tableone %>%
visR:::render.tableone(title = NULL, datasource = NULL) %>%
testthat::expect_error(NA)
adtte_tableone %>%
visR:::render.tableone(title = NULL, datasource = 1) %>%
testthat::expect_error(NA)
adtte_tableone %>%
visR:::render.tableone(title = NULL, datasource = "visR") %>%
testthat::expect_error(NA)
adtte_tableone %>%
visR:::render.tableone(title = NULL, datasource = c(1, 2, 3)) %>%
testthat::expect_error(NA)
})
testthat::test_that("T1.7 No error when `footnote` is defined.", {
adtte_tableone <- adtte %>%
visR::get_tableone()
adtte_tableone %>%
visR:::render.tableone(
title = NULL,
datasource = NULL,
footnote = NULL
) %>%
testthat::expect_error(NA)
adtte_tableone %>%
visR:::render.tableone(
title = NULL,
datasource = NULL,
footnote = 1
) %>%
testthat::expect_error(NA)
adtte_tableone %>%
visR:::render.tableone(
title = NULL,
datasource = NULL,
footnote = "visR"
) %>%
testthat::expect_error(NA)
adtte_tableone %>%
visR:::render.tableone(
title = NULL,
datasource = NULL,
footnote = c(1, 2, 3)
) %>%
testthat::expect_error(NA)
})
testthat::test_that("T1.8 No error when `output_format` is 'html' and `engine` is 'gt'.", {
adtte_tableone <- adtte %>%
visR::get_tableone()
adtte_tableone %>%
visR:::render.tableone(
title = NULL,
datasource = NULL,
output_format = "html",
engine = "gt"
) %>%
testthat::expect_error(NA)
})
testthat::test_that("T1.9 No error when `output_format` is 'html' and `engine` is 'kable'.", {
adtte_tableone <- adtte %>%
visR::get_tableone()
adtte_tableone %>%
visR:::render.tableone(
title = NULL,
datasource = NULL,
output_format = "html",
engine = "kable"
) %>%
testthat::expect_error(NA)
})
testthat::test_that("T1.10 No error when `output_format` is 'html' and `engine` is 'dt', 'datatable' or 'datatables'.", {
adtte_tableone <- adtte %>%
visR::get_tableone()
adtte_tableone %>%
visR:::render.tableone(
title = NULL,
datasource = NULL,
output_format = "html",
engine = "dt"
) %>%
testthat::expect_error(NA)
adtte_tableone %>%
visR:::render.tableone(
title = NULL,
datasource = NULL,
output_format = "html",
engine = "datatable"
) %>%
testthat::expect_error(NA)
adtte_tableone %>%
visR:::render.tableone(
title = NULL,
datasource = NULL,
output_format = "html",
engine = "datatables"
) %>%
testthat::expect_error(NA)
})
testthat::test_that("T1.11 An error when `output_format` is 'latex' and `engine` is not 'gt' or 'kable'.", {
adtte_tableone <- adtte %>%
visR::get_tableone()
expected_error <- "Currently, 'latex' output is only implemented with 'gt' or 'kable' as a table engine."
adtte_tableone %>%
visR:::render.tableone(
title = NULL,
datasource = NULL,
output_format = "latex",
engine = "dt"
) %>%
testthat::expect_error(expected_error)
})
testthat::test_that("T1.12 An error when `output_format` is an invalid parameter.", {
adtte_tableone <- adtte %>%
visR::get_tableone()
adtte_tableone %>%
visR:::render.tableone(
title = NULL,
datasource = NULL,
output_format = NULL
) %>%
testthat::expect_error()
adtte_tableone %>%
visR:::render.tableone(
title = NULL,
datasource = NULL,
output_format = 1
) %>%
testthat::expect_error()
adtte_tableone %>%
visR:::render.tableone(
title = NULL,
datasource = NULL,
output_format = "visR"
) %>%
testthat::expect_error()
})
testthat::test_that("T1.13 An error when `engine` is an invalid parameter.", {
adtte_tableone <- adtte %>%
visR::get_tableone()
adtte_tableone %>%
visR:::render.tableone(
title = NULL,
datasource = NULL,
engine = NULL
) %>%
testthat::expect_error()
adtte_tableone %>%
visR:::render.tableone(
title = NULL,
datasource = NULL,
engine = 1
) %>%
testthat::expect_error()
adtte_tableone %>%
visR:::render.tableone(
title = NULL,
datasource = NULL,
engine = "visR"
) %>%
testthat::expect_error()
})
testthat::test_that("T1.14 No error when `output_format` is 'latex' and `engine` is 'kable'.", {
adtte_tableone <- adtte %>%
visR::get_tableone()
adtte_tableone %>%
visR:::render.tableone(
title = NULL,
datasource = NULL,
output_format = "latex",
engine = "kable"
) %>%
testthat::expect_error(NA)
})
testthat::test_that("T1.16 No error when `engine` is in ['dt', 'datatable', 'datatables'] and download_format` is in ['copy', 'csv', 'excel'].", {
adtte_tableone <- adtte %>%
visR::get_tableone()
for (engine in c("dt", "datatable", "datatables")) {
for (download_format in c("copy", "csv", "excel")) {
adtte_tableone %>%
visR:::render.tableone(
title = NULL,
datasource = NULL,
engine = engine,
download_format = download_format
) %>%
testthat::expect_error(NA)
}
}
})
testthat::test_that("T1.17 A warning when `engine` is not in ['dt', 'datatable', 'datatables'] and download_format` is in ['copy', 'csv', 'excel'].", {
adtte_tableone <- adtte %>%
visR::get_tableone()
for (engine in c("gt", "kable")) {
for (download_format in c("copy", "csv", "excel")) {
adtte_tableone %>%
visR:::render.tableone(
title = NULL,
datasource = NULL,
engine = engine,
download_format = download_format
) %>%
testthat::expect_warning()
}
}
})
testthat::test_that("T1.18 A warning when `download_format` is not 'copy', 'csv' or 'excel'.", {
adtte_tableone <- adtte %>%
visR::get_tableone()
expected_warning <- "Currently, only 'copy', 'csv' and 'excel' are supported as 'download_format'."
adtte_tableone %>%
visR:::render.tableone(
title = NULL,
datasource = NULL,
engine = "dt",
download_format = "visR"
) %>%
testthat::expect_warning(expected_warning)
})
# Requirement T2 ---------------------------------------------------------------
testthat::context("render - T2. The function `render.risktable()` properly renders a `risktable` object.")
testthat::test_that("T2.1 No error when `data` is a `risktable` object.", {
adtte_risktable <- adtte %>%
visR::estimate_KM("SEX") %>%
visR::get_risktable()
testthat::expect_true(inherits(adtte_risktable, "risktable"))
adtte_risktable %>%
visR:::render.risktable(title = NULL, datasource = NULL) %>%
testthat::expect_error(NA)
})
testthat::test_that("T2.2 An error when `data` is not a `risktable` object.", {
adtte_risktable <- adtte %>%
visR::estimate_KM("SEX") %>%
visR::get_risktable()
class(adtte_risktable) <- class(adtte_risktable)[class(adtte_risktable) != "risktable"]
testthat::expect_false(inherits(adtte_risktable, "risktable"))
adtte_risktable %>%
visR:::render.risktable(title = NULL, datasource = NULL) %>%
testthat::expect_error()
})
testthat::test_that("T2.3 An error when `title` is missing.", {
adtte_risktable <- adtte %>%
visR::estimate_KM("SEX") %>%
visR::get_risktable()
adtte_risktable %>%
visR:::render.risktable(datasource = NULL) %>%
testthat::expect_error()
})
testthat::test_that("T2.4 No error when `title` is defined.", {
adtte_risktable <- adtte %>%
visR::estimate_KM("SEX") %>%
visR::get_risktable()
adtte_risktable %>%
visR:::render.risktable(
title = NULL,
datasource = NULL
) %>%
testthat::expect_error(NA)
adtte_risktable %>%
visR:::render.risktable(
title = 1,
datasource = NULL
) %>%
testthat::expect_error(NA)
adtte_risktable %>%
visR:::render.risktable(
title = "visR",
datasource = NULL
) %>%
testthat::expect_error(NA)
adtte_risktable %>%
visR:::render.risktable(
title = c(1, 2, 3),
datasource = NULL
) %>%
testthat::expect_error(NA)
})
testthat::test_that("T2.5 An error when `datasource` is missing.", {
adtte_risktable <- adtte %>%
visR::estimate_KM("SEX") %>%
visR::get_risktable()
adtte_risktable %>%
visR:::render.risktable(title = NULL) %>%
testthat::expect_error()
})
testthat::test_that("T2.6 No error when `datasource` is defined.", {
adtte_risktable <- adtte %>%
visR::estimate_KM("SEX") %>%
visR::get_risktable()
adtte_risktable %>%
visR:::render.risktable(
title = NULL,
datasource = NULL
) %>%
testthat::expect_error(NA)
adtte_risktable %>%
visR:::render.risktable(
title = NULL,
datasource = 1
) %>%
testthat::expect_error(NA)
adtte_risktable %>%
visR:::render.risktable(
title = NULL,
datasource = "visR"
) %>%
testthat::expect_error(NA)
adtte_risktable %>%
visR:::render.risktable(
title = NULL,
datasource = c(1, 2, 3)
) %>%
testthat::expect_error(NA)
})
testthat::test_that("T2.7 No error when `footnote` is defined.", {
adtte_risktable <- adtte %>%
visR::estimate_KM("SEX") %>%
visR::get_risktable()
adtte_risktable %>%
visR:::render.risktable(
title = NULL,
datasource = NULL,
footnote = NULL
) %>%
testthat::expect_error(NA)
adtte_risktable %>%
visR:::render.risktable(
title = NULL,
datasource = NULL,
footnote = 1
) %>%
testthat::expect_error(NA)
adtte_risktable %>%
visR:::render.risktable(
title = NULL,
datasource = NULL,
footnote = "visR"
) %>%
testthat::expect_error(NA)
adtte_risktable %>%
visR:::render.risktable(
title = NULL,
datasource = NULL,
footnote = c(1, 2, 3)
) %>%
testthat::expect_error(NA)
})
testthat::test_that("T2.8 No error when `output_format` is 'html' and `engine` is 'gt'.", {
adtte_risktable <- adtte %>%
visR::estimate_KM("SEX") %>%
visR::get_risktable()
adtte_risktable %>%
visR:::render.risktable(
title = NULL,
datasource = NULL,
output_format = "html",
engine = "gt"
) %>%
testthat::expect_error(NA)
})
testthat::test_that("T2.9 No error when `output_format` is 'html' and `engine` is 'kable'.", {
adtte_risktable <- adtte %>%
visR::estimate_KM("SEX") %>%
visR::get_risktable()
adtte_risktable %>%
visR:::render.risktable(
title = NULL,
datasource = NULL,
output_format = "html",
engine = "kable"
) %>%
testthat::expect_error(NA)
})
testthat::test_that("T2.10 No error when `output_format` is 'html' and `engine` is 'dt', 'datatable' or 'datatables'.", {
adtte_risktable <- adtte %>%
visR::estimate_KM("SEX") %>%
visR::get_risktable()
adtte_risktable %>%
visR:::render.risktable(
title = NULL,
datasource = NULL,
output_format = "html",
engine = "dt"
) %>%
testthat::expect_error(NA)
adtte_risktable %>%
visR:::render.risktable(
title = NULL,
datasource = NULL,
output_format = "html",
engine = "datatable"
) %>%
testthat::expect_error(NA)
adtte_risktable %>%
visR:::render.risktable(
title = NULL,
datasource = NULL,
output_format = "html",
engine = "datatables"
) %>%
testthat::expect_error(NA)
})
testthat::test_that("T2.11 An error when `output_format` is an invalid parameter.", {
adtte_risktable <- adtte %>%
visR::estimate_KM("SEX") %>%
visR::get_risktable()
adtte_risktable %>%
visR:::render.risktable(
title = NULL,
datasource = NULL,
output_format = NULL
) %>%
testthat::expect_error()
adtte_risktable %>%
visR:::render.risktable(
title = NULL,
datasource = NULL,
output_format = 1
) %>%
testthat::expect_error()
adtte_risktable %>%
visR:::render.risktable(
title = NULL,
datasource = NULL,
output_format = "visR"
) %>%
testthat::expect_error()
})
testthat::test_that("T2.12 An error when `engine` is an invalid parameter.", {
adtte_risktable <- adtte %>%
visR::estimate_KM("SEX") %>%
visR::get_risktable()
adtte_risktable %>%
visR:::render.risktable(
title = NULL,
datasource = NULL,
engine = NULL
) %>%
testthat::expect_error()
adtte_risktable %>%
visR:::render.risktable(
title = NULL,
datasource = NULL,
engine = 1
) %>%
testthat::expect_error()
adtte_risktable %>%
visR:::render.risktable(
title = NULL,
datasource = NULL,
engine = "visR"
) %>%
testthat::expect_error()
})
testthat::test_that("T2.13 No error when `output_format` is 'latex' and `engine` is 'kable'.", {
adtte_risktable <- adtte %>%
visR::estimate_KM("SEX") %>%
visR::get_risktable()
adtte_risktable %>%
visR:::render.risktable(
title = NULL,
datasource = NULL,
output_format = "latex",
engine = "kable"
) %>%
testthat::expect_error(NA)
})
testthat::test_that("T2.14 An error when `output_format` is 'latex' and `engine` is 'dt', 'datatable' or 'datatables'.", {
adtte_risktable <- adtte %>%
visR::estimate_KM("SEX") %>%
visR::get_risktable()
adtte_risktable %>%
visR:::render.risktable(
title = NULL,
datasource = NULL,
output_format = "latex",
engine = "dt"
) %>%
testthat::expect_error()
adtte_risktable %>%
visR:::render.risktable(
title = NULL,
datasource = NULL,
output_format = "latex",
engine = "datatable"
) %>%
testthat::expect_error()
adtte_risktable %>%
visR:::render.risktable(
title = NULL,
datasource = NULL,
output_format = "latex",
engine = "datatables"
) %>%
testthat::expect_error()
})
testthat::test_that("T2.15 No error when `engine` is in ['dt', 'datatable', 'datatables'] and download_format` is in ['copy', 'csv', 'excel'].", {
adtte_risktable <- adtte %>%
visR::estimate_KM("SEX") %>%
visR::get_risktable()
for (engine in c("dt", "datatable", "datatables")) {
for (download_format in c("copy", "csv", "excel")) {
adtte_risktable %>%
visR:::render.risktable(
title = NULL,
datasource = NULL,
engine = engine,
download_format = download_format
) %>%
testthat::expect_error(NA)
}
}
})
testthat::test_that("T2.16 A warning when `engine` is not in ['dt', 'datatable', 'datatables'] and download_format` is in ['copy', 'csv', 'excel'].", {
adtte_risktable <- adtte %>%
visR::estimate_KM("SEX") %>%
visR::get_risktable()
for (engine in c("gt", "kable")) {
for (download_format in c("copy", "csv", "excel")) {
adtte_risktable %>%
visR:::render.risktable(
title = NULL,
datasource = NULL,
engine = engine,
download_format = download_format
) %>%
testthat::expect_warning()
}
}
})
testthat::test_that("T2.17 The strata-colnames of the `risktable` object are used as rownames.", {
adtte_risktable <- adtte %>%
visR::estimate_KM("SEX") %>%
visR::get_risktable()
gg <- adtte_risktable %>%
visR:::render.risktable(title = NULL, datasource = NULL)
gg_data <- gg["_data"] %>% as.data.frame()
strata_names <- colnames(adtte_risktable)[3:length(colnames(adtte_risktable))]
testthat::expect_identical(strata_names, gg_data[, 1])
})
testthat::test_that("T2.18 The metric of the risktable is used in the rendered table.", {
adtte_risktable_at_risk <- adtte %>%
visR::estimate_KM("SEX") %>%
visR::get_risktable(statlist = "n.risk")
adtte_risktable_censored <- adtte %>%
visR::estimate_KM("SEX") %>%
visR::get_risktable(statlist = "n.censor")
adtte_risktable_events <- adtte %>%
visR::estimate_KM("SEX") %>%
visR::get_risktable(statlist = "n.event")
gg_at_risk <- adtte_risktable_at_risk %>%
visR:::render.risktable(title = NULL, datasource = NULL)
gg_at_risk_data <- gg_at_risk["_data"] %>% as.data.frame()
gg_censored <- adtte_risktable_censored %>%
visR:::render.risktable(title = NULL, datasource = NULL)
gg_censored_data <- gg_censored["_data"] %>% as.data.frame()
gg_events <- adtte_risktable_events %>%
visR:::render.risktable(title = NULL, datasource = NULL)
gg_events_data <- gg_events["_data"] %>% as.data.frame()
testthat::expect_identical(levels(gg_at_risk_data[, 2]), "At risk")
testthat::expect_identical(levels(gg_censored_data[, 2]), "Censored")
testthat::expect_identical(levels(gg_events_data[, 2]), "Events")
})
testthat::test_that("T2.19 The values of the evalutated metric are pivoted wide.", {
adtte_risktable <- adtte %>%
visR::estimate_KM("SEX") %>%
visR::get_risktable()
gg <- adtte_risktable %>%
visR:::render.risktable(title = NULL, datasource = NULL)
gg_data <- gg["_data"] %>% as.data.frame()
female_vals <- as.numeric(t(gg_data)[3:length(gg_data), 1])
male_vals <- as.numeric(t(gg_data)[3:length(gg_data), 2])
testthat::expect_identical(adtte_risktable[, "F"], female_vals)
testthat::expect_identical(adtte_risktable[, "M"], male_vals)
})
# Requirement T3 ---------------------------------------------------------------
testthat::context("render - T3. The function `render.data.frame()` properly renders a `data.frame` object.")
testthat::test_that("T3.1 When `engine` is 'gt' and `output_format` is 'latex', a latex `knit_asis` object is returned.", {
latex_table <- adtte %>%
visR:::render.data.frame(
title = NULL,
datasource = NULL,
engine = "gt",
output_format = "latex"
)
testthat::expect_true(inherits(latex_table, "knit_asis"))
})
testthat::test_that("T3.2 A warning when `engine` is 'dt', 'datatable' or 'datatables' and `output_format is not 'html'.`", {
expected_warning <- "DT engine only supports html output and not latex - falling back to html. Please pick a different engine to create other outputs"
adtte %>%
visR:::render.data.frame(
title = NULL,
datasource = NULL,
engine = "dt",
output_format = "latex"
) %>%
testthat::expect_warning(expected_warning)
adtte %>%
visR:::render.data.frame(
title = NULL,
datasource = NULL,
engine = "datatable",
output_format = "latex"
) %>%
testthat::expect_warning(expected_warning)
adtte %>%
visR:::render.data.frame(
title = NULL,
datasource = NULL,
engine = "datatables",
output_format = "latex"
) %>%
testthat::expect_warning(expected_warning)
})
# Requirement T4 ---------------------------------------------------------------
testthat::context("render - T4. The function `check_rendering_input()` only permits valid `output_format` and `engine` options.")
testthat::test_that("T4.1 No error when `output_format` is `html` or `latex` and `engine` is `kable`, `gt`, `dt`, `datatable` or `datatables`.", {
for (output_format in c("html", "latex")) {
for (engine in c("kable", "gt", "dt", "datatable", "datatables")) {
visR:::check_rendering_input(
output_format = output_format,
engine = engine
) %>%
testthat::expect_error(NA)
}
}
})
testthat::test_that("T4.2 An error when `output_format` and/or `engine` are missing, `NULL` or `NA`.", {
arg_missing_waring <- "Please provide an output_format and an engine."
visR:::check_rendering_input(output_format = "visR") %>%
testthat::expect_error(arg_missing_waring)
visR:::check_rendering_input(engine = "visR") %>%
testthat::expect_error(arg_missing_waring)
visR:::check_rendering_input(output_format = "html", engine = NULL) %>%
testthat::expect_error(arg_missing_waring)
visR:::check_rendering_input(engine = "kable", output_format = NULL) %>%
testthat::expect_error(arg_missing_waring)
visR:::check_rendering_input(engine = NULL, output_format = NULL) %>%
testthat::expect_error(arg_missing_waring)
expected_error <- "Currently implemented output engines are kable, gt and jquery datatables \\(DT\\). NA is not yet supported."
visR:::check_rendering_input(output_format = "html", engine = NA) %>%
testthat::expect_error(expected_error)
expected_error <- "Currently supported output formats are html and latex. NA is not yet supported."
visR:::check_rendering_input(engine = "kable", output_format = NA) %>%
testthat::expect_error()
})
testthat::test_that("T4.3 An error when `output_format` is not `html` or `latex` and `engine` is a valid option.", {
expected_error <- "Currently supported output formats are html and latex. visR is not yet supported."
visR:::check_rendering_input(engine = "kable", output_format = "visR") %>%
testthat::expect_error(expected_error)
})
testthat::test_that("T4.4 An error when `engine` is not `kable`, `gt`, `dt`, `datatables` or `datatable` and `output_format` is a valid option.", {
expected_error <- "Currently implemented output engines are kable, gt and jquery datatables \\(DT\\). visR is not yet supported."
visR:::check_rendering_input(output_format = "html", engine = "visR") %>%
testthat::expect_error(expected_error)
})
# Requirement T5 ---------------------------------------------------------------
testthat::context("render - T5. The function `render_datatable.data.frame()` creates an `htmlwidget` of the table.")
testthat::test_that("T5.1 No error when `data` is a `data.frame`.", {
adtte %>%
visR:::render_datatable.data.frame(
title = "visR",
download_format = "csv",
source_cap = "visR"
) %>%
testthat::expect_error(NA)
})
testthat::test_that("T5.2 The returned object is of type `htmlwidget`.", {
tmp <- adtte %>%
visR:::render_datatable.data.frame(
title = "visR_title",
download_format = "csv",
source_cap = "visR_source_cap"
)
testthat::expect_true(inherits(tmp, "htmlwidget"))
})
testthat::test_that("T5.3 The `title` is passed along to the HTML widget.", {
widget_title <- "visR_title"
tmp <- adtte %>%
visR:::render_datatable.data.frame(
title = widget_title,
download_format = "csv",
source_cap = "visR_source_cap"
)
testthat::expect_true(grepl(widget_title, tmp$x$caption))
})
testthat::test_that("T5.4 The `source_cap` is passed along to the HTML widget.", {
source_cap <- "visR_source_cap"
tmp <- adtte %>%
visR:::render_datatable.data.frame(
title = "visR_title",
download_format = "csv",
source_cap = source_cap
)
testthat::expect_true(grepl(source_cap, tmp$x$options$drawCallback))
})
testthat::test_that("T5.5 When `download_format` is not `NULL`, a button is added.", {
download_format <- "visR_csv"
tmp <- adtte %>%
visR:::render_datatable.data.frame(
title = "visR_title",
download_format = download_format,
source_cap = "visR_source_cap"
)
testthat::expect_equal(tmp$x$options$buttons[[1]], download_format)
})
testthat::test_that("T5.6 When `download_format` is `NULL`, no button is added.", {
tmp <- adtte %>%
visR:::render_datatable.data.frame(
title = "visR_title",
download_format = NULL,
source_cap = "visR_source_cap"
)
testthat::expect_false("buttons" %in% names(tmp$x$options))
})
# Requirement T6 ---------------------------------------------------------------
testthat::context("render - T6. The function `get_gt.data.frame()` properly passes the input along to `gt::gt()`.")
testthat::test_that("T6.1 No error when `data` is a `data.frame`.", {
adtte %>%
visR:::get_gt.data.frame() %>%
testthat::expect_error(NA)
})
testthat::test_that("T6.2 The returned object is of type `gt_tbl`.", {
tmp <- adtte %>%
visR:::get_gt.data.frame()
testthat::expect_true(inherits(tmp, "gt_tbl"))
})
# END OF CODE -------------------------------------------------------------
|
##################################################################
## Functions for analysing the interactome and estimating the
## p-values in a multi-threaded setting
##################################################################
## LICENSE:
## Copyright (C) <2012> <Vivek Jayaswal>
##
## This library is free software; you can redistribute it and/or modify it
## under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2.1 of the License, or (at
## your option) any later version.
##
## This library is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
## or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
## License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this library; if not, write to the Free Software Foundation Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#######################################################################
## For a given expression dataset and PPI dataset, estimate the p-value
## for each hub with >=5 interaction partners
##
## Input
## exprFile: Vector of file names corresponding to normalized expression data
## labelIndex: Row of the exprFile which contains the sample labels
## mapFile: File name corresponding to PPI/Mirnome
## outFile: Output file name
## hubSize: Minimum number of interactors in the expression dataset
## randomizeCount: Number of permutations to consider for estimating
## the p-values
## adjustMethod: Method for adjusting the p-values. Default:"BH"
## Possible values - "BH", "bonferroni"
## assocType: Type of correlation to calculate. Default:TCC
## TCC, PCC, FSTAT
## labelVect: Vector of conditions to test. If all conditions
## are to be tested, set to NULL. Default: NULL
## exprDataType: "ENTREZ" or "SYMB". Default:SYMB
## ppiDataType: "ENTREZ" or "SYMB". Default:SYMB
## outputDataType: "ENTREZ" or "SYMB". Default:SYMB
## species: Name of species. At present only "Human" is supported
## inputCores: Number of threads for executing the code. Default:4
## hubVect: A vector of hub genes to consider. Default: NULL
## interactomeVect: A vector of interactors to consider. Default: NULL
##
##
## Output
## Two output files -
## 1. outFile that contains the p-value for each hub
## 2. outFile with suffix "Cor.txt" that contains the CC value
## for each hub-interactor pair
#######################################################################
identifySignificantHubs = function(exprFile, labelIndex, mapFile, outFile
, hubSize = 5, randomizeCount = 1000
, adjustMethod="BH", assocType = "TCC"
, labelVect = NULL
, exprDataType="SYMB", ppiDataType="SYMB", outputDataType="SYMB"
, species="Human", inputCores=4
, hubVect = NULL, interactomeVect = NULL) {
## Create a multi-threaded system
numCores = inputCores
if(detectCores() < inputCores) numCores = detectCores()
cl = makeCluster(numCores)
registerDoParallel(cl)
twoInputExprFiles = FALSE
if(length(exprFile) == 2) twoInputExprFiles = TRUE
## Load the Bioconductor annotation file
generateGeneSymEntrezMap(species)
## Read expression data for genes and check the association type
srcExprMatrix = readExprData(exprFile[1], labelIndex)
## Ensure that the correct statistic is being calculated
## and all the labels are present in the expression matrix
labelsToConsider = colnames(srcExprMatrix)
if(!is.null(labelVect)) labelsToConsider = labelVect
uniqueLabels = unique(labelsToConsider)
correctAssocType = checkAssociation(uniqueLabels, assocType)
stopifnot(correctAssocType == TRUE)
## Read mapping data
srcHubsMatrix = trimWhiteSpace(as.matrix(read.table(mapFile, sep="\t", header=TRUE)))
## Perform an internal conversion to Entrez IDs if expr and ppi data types
## are different
if(exprDataType != ppiDataType) {
naIndexesExpr = naIndexesPpiHubs = naIndexesPpiInt = NULL
if(exprDataType != "ENTREZ") {
entrezIdVect = geneSymbolToEntrez(rownames(srcExprMatrix))
naIndexesExpr = which(is.na(entrezIdVect))
naGeneSymbols = rownames(srcExprMatrix)[naIndexesExpr]
rownames(srcExprMatrix) = entrezIdVect
if(length(naIndexesExpr) > 0) {
print("Missing Entrez IDs for expression data")
generateErrorOutput(naGeneSymbols, "Expr")
srcExprMatrix = srcExprMatrix[-naIndexesExpr, ]
}
}
if(ppiDataType != "ENTREZ") {
if(!twoInputExprFiles) {
## If two files are provided, then hubs are microRNAs and no conversion needed for hubs
entrezIdVect = geneSymbolToEntrez(srcHubsMatrix[, 1])
naIndexesPpiHubs = which(is.na(entrezIdVect))
naGeneSymbols = srcHubsMatrix[naIndexesPpiHubs, 1]
srcHubsMatrix[, 1] = entrezIdVect
if(length(naIndexesPpiHubs) > 0) {
print("Missing Entrez IDs in hubs")
generateErrorOutput(naGeneSymbols, "PPI_Hubs")
}
}
entrezIdVect = geneSymbolToEntrez(srcHubsMatrix[, 2])
naIndexesPpiInt = which(is.na(entrezIdVect))
naGeneSymbols = srcHubsMatrix[naIndexesPpiInt, 2]
srcHubsMatrix[, 2] = entrezIdVect
if(length(naIndexesPpiInt) > 0) {
print("Missing Entrez IDs in interactors")
generateErrorOutput(naGeneSymbols, "PPI_Interactors")
}
}
if(!is.null(naIndexesPpiHubs) || !is.null(naIndexesPpiInt)) srcHubsMatrix = srcHubsMatrix[-c(naIndexesPpiHubs, naIndexesPpiInt), ]
if(is.null(naIndexesExpr) || is.null(naIndexesPpiHubs) || is.null(naIndexesPpiInt)) {
print("Do you want to continue: (Y/N)? ")
userInput = "X"
while(is.na(match(userInput, c("Y", "N")))) userInput = readLines(n=1)
if(userInput == "N") return(0)
}
}
## Obtain the list of hubs
srcHubs = readMapData(srcHubsMatrix)
elementsInHubs = sapply(srcHubs, length)
## Read regulator expression data
srcRegMatrix = srcExprMatrix
if(twoInputExprFiles) srcRegMatrix = readExprData(exprFile[2], labelIndex)
regNames = rownames(srcRegMatrix)
## Identify the hubs based on expression data
exprHubs = filterHubs(srcHubs, rownames(srcExprMatrix), regNames, hubSize)
elementsInExprHub = sapply(exprHubs, length)
## Obtain user-defined subset of hubs and interactomes
if(!is.null(hubVect) || !is.null(interactomeVect)) {
exprHubs = filterUserDefined(exprHubs, hubVect, interactomeVect, hubSize)
elementsInExprHub = sapply(exprHubs, length)
}
exprIndexes = match(names(exprHubs), names(srcHubs))
totalElementsInExprHub = elementsInHubs[exprIndexes]
## Save the hubs/interactors as gene symbols if the user explicitly asks for it
## and the input comprises Entrez IDs
changeToGeneSymb = FALSE
if(outputDataType == "SYMB" && (exprDataType == "ENTREZ" || ppiDataType == "ENTREZ")) changeToGeneSymb = TRUE
## Display the number of threads used for perumation tests
print(paste("Number of threads = ", getDoParWorkers(), sep=""))
## Perform interactome analysis
filterSrcExprMatrix = filterMatrix(srcExprMatrix, uniqueLabels)
filterSrcRegMatrix = filterMatrix(srcRegMatrix, uniqueLabels)
probVect = interactomeAnalysis(filterSrcExprMatrix, filterSrcRegMatrix, exprHubs, randomizeCount, assocType, numCores
, outFile, changeToGeneSymb, twoInputExprFiles, uniqueLabels)
saveProbValues(probVect, elementsInExprHub, totalElementsInExprHub, outFile, changeToGeneSymb, twoInputExprFiles, adjustMethod)
stopCluster(cl)
}
#######################################################################
## For a given normalized expression matrix and PPI list,
## estimate the p-value for each hub with >=5 interaction partners
##
## Input
## inExprMatrix: Normalized expression matrix for interactors
## inRegMatrix: Normalized expression matrix for regulators
## inHubList: PPI list with elements corresponding to the interactors
## inCount: Number of permutations to consider for estimating
## the p-values
## assocType: Type of correlation to calculate
## coreCount: Number of threads for executing the code
## fileName: Output file name
## getGeneSymb: TRUE/FALSE. If TRUE, convert Entrez IDs to gene symbols
## isMicro: TRUE/FALSE. If TRUE, then the regulator corresponds to microRNAs
## sampleGroups: Vector of unique conditions to evaluate
##
##
## Output
## A vector of p-values for each hub
#######################################################################
interactomeAnalysis = function(inExprMatrix, inRegMatrix, inHubList, inCount, assocType, coreCount, fileName
, getGeneSymb, isMicro, sampleGroups) {
probVect = NULL
allLabels = colnames(inExprMatrix)
numUniqueLabels = length(sampleGroups)
hubNames = names(inHubList)
geneNames = rownames(inExprMatrix)
regulatorNames = rownames(inRegMatrix)
## Create the file for saving <hub, interactor> values for the two conditions
corFile = gsub(".txt", "_Cor.txt", fileName)
if(numUniqueLabels == 2) {
corLabel = paste(sampleGroups[1], sampleGroups[2], sep="-")
write(c("Hub", "Interactor", sampleGroups, corLabel), corFile, sep="\t", ncolumns=5)
}
else write(c("Hub", "Interactor", sampleGroups), corFile, sep="\t", ncolumns=numUniqueLabels+2)
print(paste("Number of hubs = ", length(inHubList), sep=""))
for(hubIndex in 1:length(inHubList)) {
print(paste("Current hub = ", hubIndex, sep=""))
currentHub = hubNames[hubIndex]
currentInteractors = as.character(inHubList[[hubIndex]])
numInteractors = length(currentInteractors)
currentHubIndex = match(currentHub, regulatorNames)
currentIteratorIndexes = match(currentInteractors, geneNames)
## Obtain the avg correlation coefficient for real data
## along with the correlation coefficient per interactor
origHubDiff = getCorrelation(inRegMatrix[currentHubIndex, ], inExprMatrix[currentIteratorIndexes, ]
, sampleGroups, corType = assocType, corInfo = TRUE, permuteLabels = FALSE)
saveCorValues(currentHub, currentInteractors, origHubDiff$corMatrix
, corFile, getGeneSymb, isMicro)
## Obtain the correlation coefficient for randomized data
grpValue = floor(inCount/coreCount)
grpValueVector = rep(grpValue, coreCount)
if(sum(grpValueVector) < inCount) grpValueVector[coreCount] = grpValueVector[coreCount] + (inCount - sum(grpValueVector))
exportFunctions = c("calculateProbDist", "getCorrelation"
, "interactomeTaylorCorrelation", "getTaylorCor"
, "interactomePearsonCorrelation", "getPearsonCor"
, "getSampleIndexes", "getFStat")
probTemp = foreach(i = 1:coreCount, .combine='c', .export=exportFunctions) %dopar% {
calculateProbDist(inExprMatrix, inRegMatrix, currentHubIndex, currentIteratorIndexes
, sampleGroups, assocType
, grpValueVector[i], origHubDiff$avgData)
}
probVect = c(probVect, sum(probTemp)/inCount)
} ## All hubs have been considered
names(probVect) = names(inHubList)
return(probVect)
}
#######################################################################
## For a given hub and its interactor set, estimate the p-values
##
## Input
## exprMatrix: Normalized expression matrix for interactors
## regMatrix: Normalized expression matrix for regulators
## hubIndex: Row index of regMatrix
## interactorIndexes: Vector of row indexes in exprMatrix corresponding
## to the interactors
## sampleGroups: Vector of unique conditions to compare
## assocType: Type of correlation to calculate
## grpVal: Number of permutations
## origVal: Actual avg hub diff
##
##
## Output
## Number of times the bootstrap p-value > origVal
#######################################################################
calculateProbDist = function(exprMatrix, regMatrix, hubIndex, interactorIndexes, sampleGroups
, assocType, grpVal, origVal) {
randomHubDiff = 0
for(j in 1:grpVal) {
temp = getCorrelation(regMatrix[hubIndex, ], exprMatrix[interactorIndexes, ], sampleGroups
, corType = assocType, corInfo = FALSE, permuteLabels = TRUE)
if(temp >= origVal) randomHubDiff = randomHubDiff + 1
}
return(randomHubDiff)
}
#######################################################################
## For a given hub, calculate the correlation for each hub-interactor pair
## and the average hub difference
##
## Input
## inVect: Vector of expression value for the hub
## inMatrix: Matrix of expression values for the interactors
## sampleGroups: Vector of unique conditions to compare
## corType: Type of correlation to calculate
## corInfo: TRUE -> Return a list of values
## FALSE -> Return only the average hub difference
## permuteLabels: TRUE/FALSE. If TRUE, then permute the samples for
## calculation of p-value
##
## Output
## If corInfo = TRUE, a list containing average hub difference and the
## pairwise hub-interactor values for both the conditions
## If corInfo = FALSE, average hub difference
#######################################################################
getCorrelation = function(inVect, inMatrix, sampleGroups, corType, corInfo, permuteLabels) {
numInteractors = nrow(inMatrix)
avgDiffLabels = NULL
corVectMatrix = matrix(0, nrow=nrow(inMatrix), ncol=length(sampleGroups))
labelList = getSampleIndexes(sampleGroups, colnames(inMatrix), permuteLabels)
if(corType == "TCC") { ## Taylor's CC
for(i in 1:2) corVectMatrix[, i] = interactomeTaylorCorrelation(inVect, inMatrix, labelList[[i]])
}
if(corType == "PCC" || corType == "FSTAT") { ## Pearsons CC
for(i in 1:length(sampleGroups)) corVectMatrix[, i] = interactomePearsonCorrelation(inVect[labelList[[i]]], inMatrix[, labelList[[i]]])
}
if(corType == "TCC" || corType == "PCC") {
avgDiffLabels = sum(abs(corVectMatrix[, 1] - corVectMatrix[, 2]))
avgDiffLabels = avgDiffLabels/(numInteractors - 1)
}
if(corType == "FSTAT") avgDiffLabels = getFStat(corVectMatrix)
if(corInfo) return(list(avgData = avgDiffLabels, corMatrix = corVectMatrix))
return(avgDiffLabels)
}
#######################################################################
## Obtain the Taylor's CC for all interactors for a given condition
##
## Input
## hubVector: Vector of expression value for the hub
## geneMatrix: Matrix of expression values for the interactors
## currentLabel: Column indexes corresponding to a condition
##
## Output
## A vector of CC for all hub-interactor pairs
#######################################################################
interactomeTaylorCorrelation = function(hubVector, geneMatrix, currentLabel) {
return(apply(geneMatrix, 1, getTaylorCor, hubVector, currentLabel))
}
#######################################################################
## Obtain the Taylor's CC for a given hub-interactor pair
##
## Input
## x: Vector of expression value for the interactor
## hubVector: Vector of expression value for the hub
## currentLabels: Column indexes corresponding to a condition
##
## Output
## Taylor's CC
#######################################################################
getTaylorCor = function(x, hubVector, currentLabels) {
numSamples = length(currentLabels)
sdHub = sd(hubVector[currentLabels])
sdInteractor = sd(x[currentLabels])
denom = (numSamples - 1) * sdHub * sdInteractor
numValue1 = x[currentLabels] - mean(x)
numValue2 = hubVector[currentLabels] - mean(hubVector)
ratioVal = sum(numValue1 * numValue2)/denom
return(ratioVal)
}
#######################################################################
## Obtain the Pearsons CC for all interactors for a given condition
##
## Input
## hubVector: Vector of expression value for the hub
## geneMatrix: Matrix of expression values for the interactors
##
## Output
## A vector of CC for all hub-interactor pairs
#######################################################################
interactomePearsonCorrelation = function(hubVector, geneMatrix) {
return(apply(geneMatrix, 1, getPearsonCor, hubVector))
}
#######################################################################
## Obtain the Pearsons CC for a given hub-interactor pair
##
## Input
## x: Vector of expression value for the interactor
## inVect: Vector of expression value for the hub
##
## Output
## Pearsons CC
#######################################################################
getPearsonCor = function(x, inVect) {
return(cor(x, inVect))
}
#######################################################################
## Check that the association measure is appropriate for analyzing the
## number of conditions in the dataset
##
## Input
## uniqueLabels: Vector of unique conditions in the dataset
## assocType: Type of association
##
## Output
## TRUE/FALSE value. If the assocType is "TCC" or "PCC", then
## the number of unique conditions has to be two. Otherwise, the
## the number of unique conditions must be > 2
#######################################################################
checkAssociation = function(uniqueLabels, assocType) {
numUniqueLabels = length(uniqueLabels)
stopifnot(numUniqueLabels > 1)
if(numUniqueLabels == 2 && is.null(match(assocType, c("TCC", "PCC")))) {
print("Number of unique labels = 2. The valid options are TCC/PCC")
return(FALSE)
}
if(numUniqueLabels > 2 && assocType != "FSTAT") {
print("Number of unique labels > 2. The only valid option is FSTAT")
return(FALSE)
}
return(TRUE)
}
#######################################################################
## Obtain a subset of the expression matrix corresponding to the
## conditions of interest
##
## Input
## inputMatrix: Input matrix (N x P)
## labelsOfInterest: Vector of biological conditions of interest
##
## Output
## An N x P1 matrix such that only the columns corresponding to
## the relevant conditions are retained
#######################################################################
filterMatrix = function(inputMatrix, labelsOfInterest) {
allLabels = colnames(inputMatrix)
colsOfInterest = which(allLabels %in% labelsOfInterest)
revMatrix = inputMatrix[, colsOfInterest]
return(exprDataStd(revMatrix))
}
#######################################################################
## Convert a gene expression matrix with multiple rows corresponding
## to the same gene into a normalized matrix with one row per gene
## Also, the gene expression values are standardized with row median
## set to 0 and row var = 1
##
## Input
## summaryMatrix: Non-standardized input matrix
##
## Output
## Standardized matrix
#######################################################################
exprDataStd = function(summaryMatrix) {
## Median center the values and set variance to 1
medianVect = apply(summaryMatrix, 1, median)
sdVect = apply(summaryMatrix, 1, sd)
normalizedMatrix = summaryMatrix - medianVect
normalizedMatrix = normalizedMatrix/sdVect
return(normalizedMatrix)
}
#######################################################################
## Determine the column indexes of samples that correspond to different
## biological conditions of interest
##
## Input
## uniqueLabels: Vector of unique conditions
## allLabels: Original labels for the various columns
## permuteLabels: TRUE/FALSE
##
## Output
## A list with each element representing the samples that correspond
## to the biological condition of interest. The order of list
## elements corresponds to uniqueLabels
#######################################################################
getSampleIndexes = function(uniqueLabels, allLabels, permuteLabels) {
numUniqueLabels = length(uniqueLabels)
labelIndexesList = list()
for(i in 1:numUniqueLabels) labelIndexesList[[i]] = which(allLabels %in% uniqueLabels[i])
if(!permuteLabels) return(labelIndexesList)
## Proceed if labels to be permuted
countPerGrp = sapply(labelIndexesList, length)
totalCount = sum(countPerGrp)
grpData = NULL
for(i in 1:numUniqueLabels) grpData = c(grpData, rep(i, countPerGrp[i]))
temp = sample(grpData, totalCount, replace=FALSE)
labelIndexesList = list()
for(i in 1:numUniqueLabels) labelIndexesList[[i]] = which(temp == i)
return(labelIndexesList)
}
#######################################################################
## Determine the ratio of between to within sum of squares for testing
## whether there is a difference between three or more conditions
## for a given hub
##
## Input
## inputMatrix: X x Y matrix where X corresponds to the TCC/PCC
## value for hub-interactor pairs and Y denotes
## the number of conditions
##
## Output
## Ratio
#######################################################################
getFStat = function(inputMatrix) {
inputData = as.vector(inputMatrix)
totalSumSquare = var(inputData) * (length(inputData) - 1)
betweenSumSquare = sum(apply(inputMatrix, 2, var) * (nrow(inputMatrix) - 1))
withinSumSquare = totalSumSquare - betweenSumSquare
ratio = betweenSumSquare/withinSumSquare
return(ratio)
}
| /VAN/R/InteractomeMultiThread_Func.R | no_license | kevinwang09/CPC_VAN_analysis | R | false | false | 21,505 | r | ##################################################################
## Functions for analysing the interactome and estimating the
## p-values in a multi-threaded setting
##################################################################
## LICENSE:
## Copyright (C) <2012> <Vivek Jayaswal>
##
## This library is free software; you can redistribute it and/or modify it
## under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2.1 of the License, or (at
## your option) any later version.
##
## This library is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
## or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
## License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this library; if not, write to the Free Software Foundation Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#######################################################################
## For a given expression dataset and PPI dataset, estimate the p-value
## for each hub with >=5 interaction partners
##
## Input
## exprFile: Vector of file names corresponding to normalized expression data
## labelIndex: Row of the exprFile which contains the sample labels
## mapFile: File name corresponding to PPI/Mirnome
## outFile: Output file name
## hubSize: Minimum number of interactors in the expression dataset
## randomizeCount: Number of permutations to consider for estimating
## the p-values
## adjustMethod: Method for adjusting the p-values. Default:"BH"
## Possible values - "BH", "bonferroni"
## assocType: Type of correlation to calculate. Default:TCC
## TCC, PCC, FSTAT
## labelVect: Vector of conditions to test. If all conditions
## are to be tested, set to NULL. Default: NULL
## exprDataType: "ENTREZ" or "SYMB". Default:SYMB
## ppiDataType: "ENTREZ" or "SYMB". Default:SYMB
## outputDataType: "ENTREZ" or "SYMB". Default:SYMB
## species: Name of species. At present only "Human" is supported
## inputCores: Number of threads for executing the code. Default:4
## hubVect: A vector of hub genes to consider. Default: NULL
## interactomeVect: A vector of interactors to consider. Default: NULL
##
##
## Output
## Two output files -
## 1. outFile that contains the p-value for each hub
## 2. outFile with suffix "Cor.txt" that contains the CC value
## for each hub-interactor pair
#######################################################################
identifySignificantHubs = function(exprFile, labelIndex, mapFile, outFile
, hubSize = 5, randomizeCount = 1000
, adjustMethod="BH", assocType = "TCC"
, labelVect = NULL
, exprDataType="SYMB", ppiDataType="SYMB", outputDataType="SYMB"
, species="Human", inputCores=4
, hubVect = NULL, interactomeVect = NULL) {
## Create a multi-threaded system
numCores = inputCores
if(detectCores() < inputCores) numCores = detectCores()
cl = makeCluster(numCores)
registerDoParallel(cl)
twoInputExprFiles = FALSE
if(length(exprFile) == 2) twoInputExprFiles = TRUE
## Load the Bioconductor annotation file
generateGeneSymEntrezMap(species)
## Read expression data for genes and check the association type
srcExprMatrix = readExprData(exprFile[1], labelIndex)
## Ensure that the correct statistic is being calculated
## and all the labels are present in the expression matrix
labelsToConsider = colnames(srcExprMatrix)
if(!is.null(labelVect)) labelsToConsider = labelVect
uniqueLabels = unique(labelsToConsider)
correctAssocType = checkAssociation(uniqueLabels, assocType)
stopifnot(correctAssocType == TRUE)
## Read mapping data
srcHubsMatrix = trimWhiteSpace(as.matrix(read.table(mapFile, sep="\t", header=TRUE)))
## Perform an internal conversion to Entrez IDs if expr and ppi data types
## are different
if(exprDataType != ppiDataType) {
naIndexesExpr = naIndexesPpiHubs = naIndexesPpiInt = NULL
if(exprDataType != "ENTREZ") {
entrezIdVect = geneSymbolToEntrez(rownames(srcExprMatrix))
naIndexesExpr = which(is.na(entrezIdVect))
naGeneSymbols = rownames(srcExprMatrix)[naIndexesExpr]
rownames(srcExprMatrix) = entrezIdVect
if(length(naIndexesExpr) > 0) {
print("Missing Entrez IDs for expression data")
generateErrorOutput(naGeneSymbols, "Expr")
srcExprMatrix = srcExprMatrix[-naIndexesExpr, ]
}
}
if(ppiDataType != "ENTREZ") {
if(!twoInputExprFiles) {
## If two files are provided, then hubs are microRNAs and no conversion needed for hubs
entrezIdVect = geneSymbolToEntrez(srcHubsMatrix[, 1])
naIndexesPpiHubs = which(is.na(entrezIdVect))
naGeneSymbols = srcHubsMatrix[naIndexesPpiHubs, 1]
srcHubsMatrix[, 1] = entrezIdVect
if(length(naIndexesPpiHubs) > 0) {
print("Missing Entrez IDs in hubs")
generateErrorOutput(naGeneSymbols, "PPI_Hubs")
}
}
entrezIdVect = geneSymbolToEntrez(srcHubsMatrix[, 2])
naIndexesPpiInt = which(is.na(entrezIdVect))
naGeneSymbols = srcHubsMatrix[naIndexesPpiInt, 2]
srcHubsMatrix[, 2] = entrezIdVect
if(length(naIndexesPpiInt) > 0) {
print("Missing Entrez IDs in interactors")
generateErrorOutput(naGeneSymbols, "PPI_Interactors")
}
}
if(!is.null(naIndexesPpiHubs) || !is.null(naIndexesPpiInt)) srcHubsMatrix = srcHubsMatrix[-c(naIndexesPpiHubs, naIndexesPpiInt), ]
if(is.null(naIndexesExpr) || is.null(naIndexesPpiHubs) || is.null(naIndexesPpiInt)) {
print("Do you want to continue: (Y/N)? ")
userInput = "X"
while(is.na(match(userInput, c("Y", "N")))) userInput = readLines(n=1)
if(userInput == "N") return(0)
}
}
## Obtain the list of hubs
srcHubs = readMapData(srcHubsMatrix)
elementsInHubs = sapply(srcHubs, length)
## Read regulator expression data
srcRegMatrix = srcExprMatrix
if(twoInputExprFiles) srcRegMatrix = readExprData(exprFile[2], labelIndex)
regNames = rownames(srcRegMatrix)
## Identify the hubs based on expression data
exprHubs = filterHubs(srcHubs, rownames(srcExprMatrix), regNames, hubSize)
elementsInExprHub = sapply(exprHubs, length)
## Obtain user-defined subset of hubs and interactomes
if(!is.null(hubVect) || !is.null(interactomeVect)) {
exprHubs = filterUserDefined(exprHubs, hubVect, interactomeVect, hubSize)
elementsInExprHub = sapply(exprHubs, length)
}
exprIndexes = match(names(exprHubs), names(srcHubs))
totalElementsInExprHub = elementsInHubs[exprIndexes]
## Save the hubs/interactors as gene symbols if the user explicitly asks for it
## and the input comprises Entrez IDs
changeToGeneSymb = FALSE
if(outputDataType == "SYMB" && (exprDataType == "ENTREZ" || ppiDataType == "ENTREZ")) changeToGeneSymb = TRUE
## Display the number of threads used for perumation tests
print(paste("Number of threads = ", getDoParWorkers(), sep=""))
## Perform interactome analysis
filterSrcExprMatrix = filterMatrix(srcExprMatrix, uniqueLabels)
filterSrcRegMatrix = filterMatrix(srcRegMatrix, uniqueLabels)
probVect = interactomeAnalysis(filterSrcExprMatrix, filterSrcRegMatrix, exprHubs, randomizeCount, assocType, numCores
, outFile, changeToGeneSymb, twoInputExprFiles, uniqueLabels)
saveProbValues(probVect, elementsInExprHub, totalElementsInExprHub, outFile, changeToGeneSymb, twoInputExprFiles, adjustMethod)
stopCluster(cl)
}
#######################################################################
## For a given normalized expression matrix and PPI list,
## estimate the p-value for each hub with >=5 interaction partners
##
## Input
## inExprMatrix: Normalized expression matrix for interactors
## inRegMatrix: Normalized expression matrix for regulators
## inHubList: PPI list with elements corresponding to the interactors
## inCount: Number of permutations to consider for estimating
## the p-values
## assocType: Type of correlation to calculate
## coreCount: Number of threads for executing the code
## fileName: Output file name
## getGeneSymb: TRUE/FALSE. If TRUE, convert Entrez IDs to gene symbols
## isMicro: TRUE/FALSE. If TRUE, then the regulator corresponds to microRNAs
## sampleGroups: Vector of unique conditions to evaluate
##
##
## Output
## A vector of p-values for each hub
#######################################################################
interactomeAnalysis = function(inExprMatrix, inRegMatrix, inHubList, inCount, assocType, coreCount, fileName
, getGeneSymb, isMicro, sampleGroups) {
probVect = NULL
allLabels = colnames(inExprMatrix)
numUniqueLabels = length(sampleGroups)
hubNames = names(inHubList)
geneNames = rownames(inExprMatrix)
regulatorNames = rownames(inRegMatrix)
## Create the file for saving <hub, interactor> values for the two conditions
corFile = gsub(".txt", "_Cor.txt", fileName)
if(numUniqueLabels == 2) {
corLabel = paste(sampleGroups[1], sampleGroups[2], sep="-")
write(c("Hub", "Interactor", sampleGroups, corLabel), corFile, sep="\t", ncolumns=5)
}
else write(c("Hub", "Interactor", sampleGroups), corFile, sep="\t", ncolumns=numUniqueLabels+2)
print(paste("Number of hubs = ", length(inHubList), sep=""))
for(hubIndex in 1:length(inHubList)) {
print(paste("Current hub = ", hubIndex, sep=""))
currentHub = hubNames[hubIndex]
currentInteractors = as.character(inHubList[[hubIndex]])
numInteractors = length(currentInteractors)
currentHubIndex = match(currentHub, regulatorNames)
currentIteratorIndexes = match(currentInteractors, geneNames)
## Obtain the avg correlation coefficient for real data
## along with the correlation coefficient per interactor
origHubDiff = getCorrelation(inRegMatrix[currentHubIndex, ], inExprMatrix[currentIteratorIndexes, ]
, sampleGroups, corType = assocType, corInfo = TRUE, permuteLabels = FALSE)
saveCorValues(currentHub, currentInteractors, origHubDiff$corMatrix
, corFile, getGeneSymb, isMicro)
## Obtain the correlation coefficient for randomized data
grpValue = floor(inCount/coreCount)
grpValueVector = rep(grpValue, coreCount)
if(sum(grpValueVector) < inCount) grpValueVector[coreCount] = grpValueVector[coreCount] + (inCount - sum(grpValueVector))
exportFunctions = c("calculateProbDist", "getCorrelation"
, "interactomeTaylorCorrelation", "getTaylorCor"
, "interactomePearsonCorrelation", "getPearsonCor"
, "getSampleIndexes", "getFStat")
probTemp = foreach(i = 1:coreCount, .combine='c', .export=exportFunctions) %dopar% {
calculateProbDist(inExprMatrix, inRegMatrix, currentHubIndex, currentIteratorIndexes
, sampleGroups, assocType
, grpValueVector[i], origHubDiff$avgData)
}
probVect = c(probVect, sum(probTemp)/inCount)
} ## All hubs have been considered
names(probVect) = names(inHubList)
return(probVect)
}
#######################################################################
## For a given hub and its interactor set, estimate the p-values
##
## Input
## exprMatrix: Normalized expression matrix for interactors
## regMatrix: Normalized expression matrix for regulators
## hubIndex: Row index of regMatrix
## interactorIndexes: Vector of row indexes in exprMatrix corresponding
## to the interactors
## sampleGroups: Vector of unique conditions to compare
## assocType: Type of correlation to calculate
## grpVal: Number of permutations
## origVal: Actual avg hub diff
##
##
## Output
## Number of times the bootstrap p-value > origVal
#######################################################################
calculateProbDist = function(exprMatrix, regMatrix, hubIndex, interactorIndexes, sampleGroups
, assocType, grpVal, origVal) {
randomHubDiff = 0
for(j in 1:grpVal) {
temp = getCorrelation(regMatrix[hubIndex, ], exprMatrix[interactorIndexes, ], sampleGroups
, corType = assocType, corInfo = FALSE, permuteLabels = TRUE)
if(temp >= origVal) randomHubDiff = randomHubDiff + 1
}
return(randomHubDiff)
}
#######################################################################
## For a given hub, calculate the correlation for each hub-interactor pair
## and the average hub difference
##
## Input
## inVect: Vector of expression value for the hub
## inMatrix: Matrix of expression values for the interactors
## sampleGroups: Vector of unique conditions to compare
## corType: Type of correlation to calculate
## corInfo: TRUE -> Return a list of values
## FALSE -> Return only the average hub difference
## permuteLabels: TRUE/FALSE. If TRUE, then permute the samples for
## calculation of p-value
##
## Output
## If corInfo = TRUE, a list containing average hub difference and the
## pairwise hub-interactor values for both the conditions
## If corInfo = FALSE, average hub difference
#######################################################################
getCorrelation = function(inVect, inMatrix, sampleGroups, corType, corInfo, permuteLabels) {
numInteractors = nrow(inMatrix)
avgDiffLabels = NULL
corVectMatrix = matrix(0, nrow=nrow(inMatrix), ncol=length(sampleGroups))
labelList = getSampleIndexes(sampleGroups, colnames(inMatrix), permuteLabels)
if(corType == "TCC") { ## Taylor's CC
for(i in 1:2) corVectMatrix[, i] = interactomeTaylorCorrelation(inVect, inMatrix, labelList[[i]])
}
if(corType == "PCC" || corType == "FSTAT") { ## Pearsons CC
for(i in 1:length(sampleGroups)) corVectMatrix[, i] = interactomePearsonCorrelation(inVect[labelList[[i]]], inMatrix[, labelList[[i]]])
}
if(corType == "TCC" || corType == "PCC") {
avgDiffLabels = sum(abs(corVectMatrix[, 1] - corVectMatrix[, 2]))
avgDiffLabels = avgDiffLabels/(numInteractors - 1)
}
if(corType == "FSTAT") avgDiffLabels = getFStat(corVectMatrix)
if(corInfo) return(list(avgData = avgDiffLabels, corMatrix = corVectMatrix))
return(avgDiffLabels)
}
#######################################################################
## Obtain the Taylor's CC for all interactors for a given condition
##
## Input
## hubVector: Vector of expression value for the hub
## geneMatrix: Matrix of expression values for the interactors
## currentLabel: Column indexes corresponding to a condition
##
## Output
## A vector of CC for all hub-interactor pairs
#######################################################################
interactomeTaylorCorrelation = function(hubVector, geneMatrix, currentLabel) {
return(apply(geneMatrix, 1, getTaylorCor, hubVector, currentLabel))
}
#######################################################################
## Obtain the Taylor's CC for a given hub-interactor pair
##
## Input
## x: Vector of expression value for the interactor
## hubVector: Vector of expression value for the hub
## currentLabels: Column indexes corresponding to a condition
##
## Output
## Taylor's CC
#######################################################################
getTaylorCor = function(x, hubVector, currentLabels) {
numSamples = length(currentLabels)
sdHub = sd(hubVector[currentLabels])
sdInteractor = sd(x[currentLabels])
denom = (numSamples - 1) * sdHub * sdInteractor
numValue1 = x[currentLabels] - mean(x)
numValue2 = hubVector[currentLabels] - mean(hubVector)
ratioVal = sum(numValue1 * numValue2)/denom
return(ratioVal)
}
#######################################################################
## Obtain the Pearsons CC for all interactors for a given condition
##
## Input
## hubVector: Vector of expression value for the hub
## geneMatrix: Matrix of expression values for the interactors
##
## Output
## A vector of CC for all hub-interactor pairs
#######################################################################
interactomePearsonCorrelation = function(hubVector, geneMatrix) {
return(apply(geneMatrix, 1, getPearsonCor, hubVector))
}
#######################################################################
## Obtain the Pearsons CC for a given hub-interactor pair
##
## Input
## x: Vector of expression value for the interactor
## inVect: Vector of expression value for the hub
##
## Output
## Pearsons CC
#######################################################################
getPearsonCor = function(x, inVect) {
return(cor(x, inVect))
}
#######################################################################
## Check that the association measure is appropriate for analyzing the
## number of conditions in the dataset
##
## Input
## uniqueLabels: Vector of unique conditions in the dataset
## assocType: Type of association
##
## Output
## TRUE/FALSE value. If the assocType is "TCC" or "PCC", then
## the number of unique conditions has to be two. Otherwise, the
## the number of unique conditions must be > 2
#######################################################################
checkAssociation = function(uniqueLabels, assocType) {
numUniqueLabels = length(uniqueLabels)
stopifnot(numUniqueLabels > 1)
if(numUniqueLabels == 2 && is.null(match(assocType, c("TCC", "PCC")))) {
print("Number of unique labels = 2. The valid options are TCC/PCC")
return(FALSE)
}
if(numUniqueLabels > 2 && assocType != "FSTAT") {
print("Number of unique labels > 2. The only valid option is FSTAT")
return(FALSE)
}
return(TRUE)
}
#######################################################################
## Obtain a subset of the expression matrix corresponding to the
## conditions of interest
##
## Input
## inputMatrix: Input matrix (N x P)
## labelsOfInterest: Vector of biological conditions of interest
##
## Output
## An N x P1 matrix such that only the columns corresponding to
## the relevant conditions are retained
#######################################################################
filterMatrix = function(inputMatrix, labelsOfInterest) {
allLabels = colnames(inputMatrix)
colsOfInterest = which(allLabels %in% labelsOfInterest)
revMatrix = inputMatrix[, colsOfInterest]
return(exprDataStd(revMatrix))
}
#######################################################################
## Convert a gene expression matrix with multiple rows corresponding
## to the same gene into a normalized matrix with one row per gene
## Also, the gene expression values are standardized with row median
## set to 0 and row var = 1
##
## Input
## summaryMatrix: Non-standardized input matrix
##
## Output
## Standardized matrix
#######################################################################
exprDataStd = function(summaryMatrix) {
## Median center the values and set variance to 1
medianVect = apply(summaryMatrix, 1, median)
sdVect = apply(summaryMatrix, 1, sd)
normalizedMatrix = summaryMatrix - medianVect
normalizedMatrix = normalizedMatrix/sdVect
return(normalizedMatrix)
}
#######################################################################
## Determine the column indexes of samples that correspond to different
## biological conditions of interest
##
## Input
## uniqueLabels: Vector of unique conditions
## allLabels: Original labels for the various columns
## permuteLabels: TRUE/FALSE
##
## Output
## A list with each element representing the samples that correspond
## to the biological condition of interest. The order of list
## elements corresponds to uniqueLabels
#######################################################################
getSampleIndexes = function(uniqueLabels, allLabels, permuteLabels) {
numUniqueLabels = length(uniqueLabels)
labelIndexesList = list()
for(i in 1:numUniqueLabels) labelIndexesList[[i]] = which(allLabels %in% uniqueLabels[i])
if(!permuteLabels) return(labelIndexesList)
## Proceed if labels to be permuted
countPerGrp = sapply(labelIndexesList, length)
totalCount = sum(countPerGrp)
grpData = NULL
for(i in 1:numUniqueLabels) grpData = c(grpData, rep(i, countPerGrp[i]))
temp = sample(grpData, totalCount, replace=FALSE)
labelIndexesList = list()
for(i in 1:numUniqueLabels) labelIndexesList[[i]] = which(temp == i)
return(labelIndexesList)
}
#######################################################################
## Determine the ratio of between to within sum of squares for testing
## whether there is a difference between three or more conditions
## for a given hub
##
## Input
## inputMatrix: X x Y matrix where X corresponds to the TCC/PCC
## value for hub-interactor pairs and Y denotes
## the number of conditions
##
## Output
## Ratio
#######################################################################
getFStat = function(inputMatrix) {
inputData = as.vector(inputMatrix)
totalSumSquare = var(inputData) * (length(inputData) - 1)
betweenSumSquare = sum(apply(inputMatrix, 2, var) * (nrow(inputMatrix) - 1))
withinSumSquare = totalSumSquare - betweenSumSquare
ratio = betweenSumSquare/withinSumSquare
return(ratio)
}
|
#' getForecastAR.r
#' developped on www.alphien.com
#' predict gold future returns with AR model
#' @param pxs data points
#' @param lags the AR order
#' @param trainDataLen length of train data for one model fit
#' @param forecastStep do forecastStep-step ahead prediction for one model
#' @param rollStep interval between two model fits, =1 if no date is skipped
#' @param showGraph binary variable, whether to show (yPred, yTrue)
#'
#' @return an xts object of 4 columns
#' yPred: predicted returns
#' yTrue: true returns
#' mse: mean square error(s)
#' mae: mean absolute error(s)
#' @export
#'
#' @examples
#' one-step ahead forecast
#' pxs = ROC(getBB("GC", start = "2019-11-11", end = "2019-11-19"), n = 1, type ="continuous", na.pad = FALSE)
#' res = getForecastAR(pxs, lags = 2, trainDataLen = 5, forecastStep = 1, rollStep = 1, showGraph = FALSE)
#'
#' multiple-step ahead forecast
#' pxs = ROC(getBB("GC", start = "2019-10-12", end = "2019-11-20"), n = 1, type ="continuous", na.pad = FALSE)
#' res = getForecastAR(pxs, lags = 2, trainDataLen = 5, forecastStep = 2, rollStep = 2, showGraph = TRUE)
#'
#' @seealso
#'* [https://www.alphien.com/mynotebooks/PRIMLOGIT/Library/Dongrui/AR_model_performance.ipynb Notebook that illustrates the use of this function, references are also provided]
#'* [https://www.alphien.com/mynotebooks/PRIMLOGIT/Library/Dongrui/AR_Model_Validation.ipynb Notebook that proposed some validation test cases related to this function]
getForecastAR = function(pxs, lags = 2, trainDataLen = 5, forecastStep = 1, rollStep = 1, showGraph = FALSE){
if(length(pxs)<(trainDataLen+forecastStep)){
stop("Not enough data")
}
if(rollStep < forecastStep){
stop("Too small rollStep or too big forecastStep, cannot have 2 predictions for a same day")
}
data=cbind(as.character(index(pxs)),data.frame(pxs))
names(data)=c("date","y")
# apply AR model and do forecastStep-ahead forecast
perfs=rollapply(data.frame(data), by=rollStep, width=(trainDataLen+forecastStep), by.column=FALSE,
FUN=function(df){
ARmodel=ar(head(as.numeric(df[,2]), n=trainDataLen), aic = FALSE, order.max = lags, method = "yw", na.action = na.omit, demean = FALSE)
Xpred = tail(head(as.numeric(df[,2]), n=trainDataLen), n=lags)
yPred = predict(ARmodel, Xpred, n.ahead = forecastStep, se.fit = FALSE)
return(list(df[(nrow(df)-forecastStep+1):nrow(df),"date"],
as.numeric(yPred),
as.numeric(df[(nrow(df)-forecastStep+1):nrow(df),"y"]),
as.numeric(cumsum((yPred - as.numeric(tail(df[,"y"], n = forecastStep)))^2)/c(1:forecastStep)),
as.numeric(cumsum(abs(yPred - as.numeric(tail(df[,"y"], n = forecastStep))))/c(1:forecastStep))))
})
res = xts(cbind("yPred"=unlist(perfs[,2]),
"yTrue"=unlist(perfs[,3]),
"MSE"=unlist(perfs[,4]),
"MAE"=unlist(perfs[,5])),
order.by = as.POSIXct(unlist(perfs[,1])))
# visualisation
if(showGraph){
par(mfrow = c(1, 1))
plotAl(cbind(res[,"yPred"], res[,"yTrue"]),
color = c("#8DD3C7", "#BEBADA"),
title = "AR(2) model",
legendPlace = "bottom")
}
return(res)
}
| /getForecastAR.r | no_license | geng-lee/Gold-Future-Returns-Forecast | R | false | false | 3,487 | r | #' getForecastAR.r
#' developped on www.alphien.com
#' predict gold future returns with AR model
#' @param pxs data points
#' @param lags the AR order
#' @param trainDataLen length of train data for one model fit
#' @param forecastStep do forecastStep-step ahead prediction for one model
#' @param rollStep interval between two model fits, =1 if no date is skipped
#' @param showGraph binary variable, whether to show (yPred, yTrue)
#'
#' @return an xts object of 4 columns
#' yPred: predicted returns
#' yTrue: true returns
#' mse: mean square error(s)
#' mae: mean absolute error(s)
#' @export
#'
#' @examples
#' one-step ahead forecast
#' pxs = ROC(getBB("GC", start = "2019-11-11", end = "2019-11-19"), n = 1, type ="continuous", na.pad = FALSE)
#' res = getForecastAR(pxs, lags = 2, trainDataLen = 5, forecastStep = 1, rollStep = 1, showGraph = FALSE)
#'
#' multiple-step ahead forecast
#' pxs = ROC(getBB("GC", start = "2019-10-12", end = "2019-11-20"), n = 1, type ="continuous", na.pad = FALSE)
#' res = getForecastAR(pxs, lags = 2, trainDataLen = 5, forecastStep = 2, rollStep = 2, showGraph = TRUE)
#'
#' @seealso
#'* [https://www.alphien.com/mynotebooks/PRIMLOGIT/Library/Dongrui/AR_model_performance.ipynb Notebook that illustrates the use of this function, references are also provided]
#'* [https://www.alphien.com/mynotebooks/PRIMLOGIT/Library/Dongrui/AR_Model_Validation.ipynb Notebook that proposed some validation test cases related to this function]
getForecastAR = function(pxs, lags = 2, trainDataLen = 5, forecastStep = 1, rollStep = 1, showGraph = FALSE){
if(length(pxs)<(trainDataLen+forecastStep)){
stop("Not enough data")
}
if(rollStep < forecastStep){
stop("Too small rollStep or too big forecastStep, cannot have 2 predictions for a same day")
}
data=cbind(as.character(index(pxs)),data.frame(pxs))
names(data)=c("date","y")
# apply AR model and do forecastStep-ahead forecast
perfs=rollapply(data.frame(data), by=rollStep, width=(trainDataLen+forecastStep), by.column=FALSE,
FUN=function(df){
ARmodel=ar(head(as.numeric(df[,2]), n=trainDataLen), aic = FALSE, order.max = lags, method = "yw", na.action = na.omit, demean = FALSE)
Xpred = tail(head(as.numeric(df[,2]), n=trainDataLen), n=lags)
yPred = predict(ARmodel, Xpred, n.ahead = forecastStep, se.fit = FALSE)
return(list(df[(nrow(df)-forecastStep+1):nrow(df),"date"],
as.numeric(yPred),
as.numeric(df[(nrow(df)-forecastStep+1):nrow(df),"y"]),
as.numeric(cumsum((yPred - as.numeric(tail(df[,"y"], n = forecastStep)))^2)/c(1:forecastStep)),
as.numeric(cumsum(abs(yPred - as.numeric(tail(df[,"y"], n = forecastStep))))/c(1:forecastStep))))
})
res = xts(cbind("yPred"=unlist(perfs[,2]),
"yTrue"=unlist(perfs[,3]),
"MSE"=unlist(perfs[,4]),
"MAE"=unlist(perfs[,5])),
order.by = as.POSIXct(unlist(perfs[,1])))
# visualisation
if(showGraph){
par(mfrow = c(1, 1))
plotAl(cbind(res[,"yPred"], res[,"yTrue"]),
color = c("#8DD3C7", "#BEBADA"),
title = "AR(2) model",
legendPlace = "bottom")
}
return(res)
}
|
obsdat <- readfocfiles(fpath,c("F","F","HH","R"))
obs <- obsdat[,unique(Observation)]
cutoff <- 310
splitobsdat <- list()
for (i in 1:length(obs)) splitobsdat[[i]] <- splitstate(obsdat[Observation==obs[i]],cutoff) #function at bottom
splitobsdat <- do.call(rbind,splitobsdat)
splitobsdat[RelativeEventTime>=cutoff,Observation:=paste0(Observation,".2")]
splitobsdat[RelativeEventTime<cutoff,Observation:=paste0(Observation,".1")]
splitobsdat[RelativeEventTime>=cutoff,RelativeEventTime:=RelativeEventTime-cutoff]
ptetho <- defaultpoint2()[type!="misc" & !(behavior%in%c("Vigilnce","PsCnTerm","GrmTerm","GrmPrsnt"))]
stetho <- defaultstate2()[type!="misc" & state!="Corral"]
Y <- collectfocal(splitobsdat,ptetho,stetho,state.maxsec = 320)
#resume R2julia here
filt <- Y[,lapply(.SD,function(x) mean(x>0)),.SD=eventslices(names(Y),ptetho)] > 0.005
filt <- colnames(filt)[!filt] %>% c(stetho[baseline==T,behavior]) %>% unique()
filt <- c(filt,"ScanProx","ScanProxInd")
filt <- filt[-c(1,5)]
Y <- Y[,-filt,with=F]
dat <- list(n=nrow(Y),K=10,B=ncol(Y)-ncovcols,Bs=sapply(Y[,-c(1:ncovcols),with=F],max),Y=as.matrix(Y[,-c(1:ncovcols),with=F]),alpha_p=1,alpha_t=1)
foo3 <- foreach(1:8) %dopar% { library(gtools); library(rstan)
init <- list(pi=gtools::rdirichlet(1,alpha = rep(1,dat$K)) %>% as.vector(),
theta_raw=sapply(Y[,-(1:ncovcols),with=F],function(x) table(x) %>% prop.table) %>% unlist() %>% matrix(nrow=dat$K,ncol=sum(dat$Bs),byrow = T))
init$theta_raw <- init$theta_raw * pmax(1-rnorm(length(init$theta_raw),sd=0.5),0.01)
moo <- optimizing(topetho,dat,verbose=T,init=init,as_vector=F,iter=500)
return(moo)
}
splitstate <- function(obsdat,cutoff=330) {
target <- obsdat[,RelativeEventTime < cutoff & (RelativeEventTime+Duration)>cutoff & Duration>0]
repacts <- obsdat[target==T]
repacts[,Duration:=Duration-cutoff+RelativeEventTime]
repacts[,RelativeEventTime:=cutoff]
obsdat[target,Duration:=cutoff-RelativeEventTime]
newdat <- rbind(obsdat,repacts)
setkey(newdat,"RelativeEventTime")
return(newdat)
}
| /splitobstest.R | no_license | thewart/LatentSocialPheno | R | false | false | 2,049 | r | obsdat <- readfocfiles(fpath,c("F","F","HH","R"))
obs <- obsdat[,unique(Observation)]
cutoff <- 310
splitobsdat <- list()
for (i in 1:length(obs)) splitobsdat[[i]] <- splitstate(obsdat[Observation==obs[i]],cutoff) #function at bottom
splitobsdat <- do.call(rbind,splitobsdat)
splitobsdat[RelativeEventTime>=cutoff,Observation:=paste0(Observation,".2")]
splitobsdat[RelativeEventTime<cutoff,Observation:=paste0(Observation,".1")]
splitobsdat[RelativeEventTime>=cutoff,RelativeEventTime:=RelativeEventTime-cutoff]
ptetho <- defaultpoint2()[type!="misc" & !(behavior%in%c("Vigilnce","PsCnTerm","GrmTerm","GrmPrsnt"))]
stetho <- defaultstate2()[type!="misc" & state!="Corral"]
Y <- collectfocal(splitobsdat,ptetho,stetho,state.maxsec = 320)
#resume R2julia here
filt <- Y[,lapply(.SD,function(x) mean(x>0)),.SD=eventslices(names(Y),ptetho)] > 0.005
filt <- colnames(filt)[!filt] %>% c(stetho[baseline==T,behavior]) %>% unique()
filt <- c(filt,"ScanProx","ScanProxInd")
filt <- filt[-c(1,5)]
Y <- Y[,-filt,with=F]
dat <- list(n=nrow(Y),K=10,B=ncol(Y)-ncovcols,Bs=sapply(Y[,-c(1:ncovcols),with=F],max),Y=as.matrix(Y[,-c(1:ncovcols),with=F]),alpha_p=1,alpha_t=1)
foo3 <- foreach(1:8) %dopar% { library(gtools); library(rstan)
init <- list(pi=gtools::rdirichlet(1,alpha = rep(1,dat$K)) %>% as.vector(),
theta_raw=sapply(Y[,-(1:ncovcols),with=F],function(x) table(x) %>% prop.table) %>% unlist() %>% matrix(nrow=dat$K,ncol=sum(dat$Bs),byrow = T))
init$theta_raw <- init$theta_raw * pmax(1-rnorm(length(init$theta_raw),sd=0.5),0.01)
moo <- optimizing(topetho,dat,verbose=T,init=init,as_vector=F,iter=500)
return(moo)
}
splitstate <- function(obsdat,cutoff=330) {
target <- obsdat[,RelativeEventTime < cutoff & (RelativeEventTime+Duration)>cutoff & Duration>0]
repacts <- obsdat[target==T]
repacts[,Duration:=Duration-cutoff+RelativeEventTime]
repacts[,RelativeEventTime:=cutoff]
obsdat[target,Duration:=cutoff-RelativeEventTime]
newdat <- rbind(obsdat,repacts)
setkey(newdat,"RelativeEventTime")
return(newdat)
}
|
make_species_biomass_relationship_aerial_insects <- function(){
download_insect_data()
### pitfall to collect ground-dwelling arthopods
myDF1 <- read.csv("download/FACE_P0051_RA_ARTHROPODS-2_L1_20131101-20150114.csv")
## suction sampling to collect understorey arthropods
myDF2 <- read.csv("download/FACE_P0051_RA_ARTHROPODS-3_L1_20131101-20150114.csv")
## aerial samples
myDF3 <- read.csv("download/FACE_P0051_RA_ARTHROPODS-5_L1_20130930-20141121.csv.csv")
myDF1 <- myDF1[,c("RUN", "RING", "PLOT", "GROUP", "ABUNDANCE", "WEIGHT.MG.")]
myDF2 <- myDF2[,c("Run", "Ring", "Plot", "Group", "Abundance", "Weight.mg.")]
colnames(myDF1) <- colnames(myDF2) <- c("Run", "Ring", "Plot", "Group", "Abundance", "Weight.mg.")
## add method
myDF1$Method <- "pitfall"
myDF2$Method <- "suction"
myDF <- rbind(myDF1, myDF2)
### calculate individual mass
myDF$weight_individual <- myDF$Weight.mg. / myDF$Abundance
# average across groups
myDF.mass <- summaryBy(weight_individual~Group, FUN=mean, data=myDF, keep.names=T, na.rm=T)
# add individual mass information onto aerial dataset
myDF.merge <- merge(myDF3, myDF.mass, by.x = c("GROUP"), by.y = c("Group"), all.x=T)
# fil NA values with all means
m.value <- mean(myDF.mass$weight_individual, na.rm=T)
myDF.merge$weight_individual <- ifelse(is.na(myDF.merge$weight_individual), m.value, myDF.merge$weight_individual)
# convert into total mass per collectin, and convert into g from mg
myDF.merge$weight <- myDF.merge$weight_individual * myDF.merge$ABUNDANCE / 1000
# sum all insect at each height and direction within a ring together
myDF.sum <- summaryBy(weight~RUN+RING, FUN=sum, data=myDF.merge, na.rm=T, keep.names=T)
myDF.sum$Date <- paste0("01-", as.character(myDF.sum$RUN))
myDF.sum$Date <- gsub("-", "/", myDF.sum$Date)
myDF.sum$Date <- as.Date(myDF.sum$Date, format="%d/%b/%y")
myDF.sum$Method <- "aerial"
out <- myDF.sum[,c("Date", "RING", "weight")]
colnames(out) <- c("Date", "Ring", "weight")
### return
return(out)
}
| /modules/insect_pool/make_species_biomass_relationship_aerial_insect.R | no_license | mingkaijiang/EucFACE_Carbon_Budget | R | false | false | 2,185 | r | make_species_biomass_relationship_aerial_insects <- function(){
download_insect_data()
### pitfall to collect ground-dwelling arthopods
myDF1 <- read.csv("download/FACE_P0051_RA_ARTHROPODS-2_L1_20131101-20150114.csv")
## suction sampling to collect understorey arthropods
myDF2 <- read.csv("download/FACE_P0051_RA_ARTHROPODS-3_L1_20131101-20150114.csv")
## aerial samples
myDF3 <- read.csv("download/FACE_P0051_RA_ARTHROPODS-5_L1_20130930-20141121.csv.csv")
myDF1 <- myDF1[,c("RUN", "RING", "PLOT", "GROUP", "ABUNDANCE", "WEIGHT.MG.")]
myDF2 <- myDF2[,c("Run", "Ring", "Plot", "Group", "Abundance", "Weight.mg.")]
colnames(myDF1) <- colnames(myDF2) <- c("Run", "Ring", "Plot", "Group", "Abundance", "Weight.mg.")
## add method
myDF1$Method <- "pitfall"
myDF2$Method <- "suction"
myDF <- rbind(myDF1, myDF2)
### calculate individual mass
myDF$weight_individual <- myDF$Weight.mg. / myDF$Abundance
# average across groups
myDF.mass <- summaryBy(weight_individual~Group, FUN=mean, data=myDF, keep.names=T, na.rm=T)
# add individual mass information onto aerial dataset
myDF.merge <- merge(myDF3, myDF.mass, by.x = c("GROUP"), by.y = c("Group"), all.x=T)
# fil NA values with all means
m.value <- mean(myDF.mass$weight_individual, na.rm=T)
myDF.merge$weight_individual <- ifelse(is.na(myDF.merge$weight_individual), m.value, myDF.merge$weight_individual)
# convert into total mass per collectin, and convert into g from mg
myDF.merge$weight <- myDF.merge$weight_individual * myDF.merge$ABUNDANCE / 1000
# sum all insect at each height and direction within a ring together
myDF.sum <- summaryBy(weight~RUN+RING, FUN=sum, data=myDF.merge, na.rm=T, keep.names=T)
myDF.sum$Date <- paste0("01-", as.character(myDF.sum$RUN))
myDF.sum$Date <- gsub("-", "/", myDF.sum$Date)
myDF.sum$Date <- as.Date(myDF.sum$Date, format="%d/%b/%y")
myDF.sum$Method <- "aerial"
out <- myDF.sum[,c("Date", "RING", "weight")]
colnames(out) <- c("Date", "Ring", "weight")
### return
return(out)
}
|
setwd('/Users/rita-gaofei/Desktop/2020_Genotype_survival/shiny_geno')
library(shiny)
source('ui.R')
source('server.R')
# Create Shiny app ----
shinyApp(ui = ui, server = server) | /shiny_geno/app.R | no_license | SQ206/BiCens_Fam_Genorisk | R | false | false | 178 | r | setwd('/Users/rita-gaofei/Desktop/2020_Genotype_survival/shiny_geno')
library(shiny)
source('ui.R')
source('server.R')
# Create Shiny app ----
shinyApp(ui = ui, server = server) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/to_tbl.R
\name{as.data.frame.psd_lst}
\alias{as.data.frame.psd_lst}
\title{Convert a psd_lst to a (base) data frame.}
\usage{
\method{as.data.frame}{psd_lst}(x, row.names = NULL, optional = FALSE, ...)
}
\arguments{
\item{x}{A \code{psd_lst} object.}
\item{row.names}{\code{NULL} or a character vector giving the row
names for the data frame. Missing values are not allowed.}
\item{optional}{logical. If \code{TRUE}, setting row names and
converting column names (to syntactic names: see
\code{\link[base]{make.names}}) is optional. Note that all of \R's
\pkg{base} package \code{as.data.frame()} methods use
\code{optional} only for column names treatment, basically with the
meaning of \code{\link[base]{data.frame}(*, check.names = !optional)}.
See also the \code{make.names} argument of the \code{matrix} method.}
\item{...}{Additional arguments to be passed to or from other methods.}
}
\value{
A data.frame.
}
\description{
Convert a psd_lst to a (base) data frame.
}
\seealso{
Other tibble:
\code{\link{as_tibble.eeg_lst}()},
\code{\link{as_tibble.psd_lst}()}
}
\concept{tibble}
| /man/as.data.frame.psd_lst.Rd | permissive | bnicenboim/eeguana | R | false | true | 1,198 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/to_tbl.R
\name{as.data.frame.psd_lst}
\alias{as.data.frame.psd_lst}
\title{Convert a psd_lst to a (base) data frame.}
\usage{
\method{as.data.frame}{psd_lst}(x, row.names = NULL, optional = FALSE, ...)
}
\arguments{
\item{x}{A \code{psd_lst} object.}
\item{row.names}{\code{NULL} or a character vector giving the row
names for the data frame. Missing values are not allowed.}
\item{optional}{logical. If \code{TRUE}, setting row names and
converting column names (to syntactic names: see
\code{\link[base]{make.names}}) is optional. Note that all of \R's
\pkg{base} package \code{as.data.frame()} methods use
\code{optional} only for column names treatment, basically with the
meaning of \code{\link[base]{data.frame}(*, check.names = !optional)}.
See also the \code{make.names} argument of the \code{matrix} method.}
\item{...}{Additional arguments to be passed to or from other methods.}
}
\value{
A data.frame.
}
\description{
Convert a psd_lst to a (base) data frame.
}
\seealso{
Other tibble:
\code{\link{as_tibble.eeg_lst}()},
\code{\link{as_tibble.psd_lst}()}
}
\concept{tibble}
|
## Initialisation ======================================================================
#--- Load required libraries
library(shiny)
library(MASS)
library(lazyeval)
library(tidyr)
library(dplyr)
library(purrr)
library(broom)
library(ggplot2)
library(RColorBrewer)
library(reshape2)
#--- Set the initial values
source("Helper UI Functions.R")
source("Helper Standard Atmosphere.R")
source("Helper Main Functions.R")
#--- Set initial display options
theme_set(theme_linedraw())
options(scipen = 10)
## Server ======================================================================
shinyServer(function(session, input, output) {
## Non-Reactive ======================================================================
output$SpecificationsTable <- renderDataTable({
specifications[,2:3]
})
## Input Values ======================================================================
#--- Allow a user to upload their inputs
inputdata <- reactive({
infile <- input$uploadData
if (is.null(infile)) return(NULL)
read.csv(infile$datapath)
})
#--- Change ONLY when a new file is uploaded
observe({
#--- Update the input values
inputdatavars <- inputdata()
if (is.null(inputdatavars)) return(NULL)
updateNumericInput(session, "S", value = inputdatavars$S)
updateNumericInput(session, "b", value = inputdatavars$b)
updateNumericInput(session, "AR", value = inputdatavars$AR)
updateNumericInput(session, "e", value = inputdatavars$e)
updateNumericInput(session, "K", value = inputdatavars$K)
updateNumericInput(session, "Cd0", value = inputdatavars$Cd0)
updateNumericInput(session, "Clclean", value = inputdatavars$Clclean)
updateNumericInput(session, "Clflaps", value = inputdatavars$Clflaps)
updateNumericInput(session, "Clhls", value = inputdatavars$Clhls)
updateNumericInput(session, "m", value = inputdatavars$m)
updateNumericInput(session, "W", value = inputdatavars$W)
updateNumericInput(session, "WS", value = inputdatavars$WS)
updateNumericInput(session, "P0eng", value = inputdatavars$P0eng)
updateNumericInput(session, "P0", value = inputdatavars$P0)
updateNumericInput(session, "Etatotal", value = inputdatavars$Etatotal)
updateNumericInput(session, "alt_s", value = inputdatavars$alt_s)
updateNumericInput(session, "ClG", value = inputdatavars$ClG)
updateNumericInput(session, "Cd0G", value = inputdatavars$Cd0G)
updateNumericInput(session, "hground", value = inputdatavars$hground)
# End Observe
})
#--- Change whenever ANY input is changed
observe({
#--- Make calculations in the input boxes
if (!is.na(input$S) & !is.na(input$AR) & input$S*input$AR != 0)
updateNumericInput(session, "b", value = sqrt(input$AR * input$S))
if (!is.na(input$e) & !is.na(input$AR) & input$e*input$AR != 0)
updateNumericInput(session, "K", value = 1/(pi * input$AR * input$e))
if (!is.na(input$m) & input$m != 0)
updateNumericInput(session, "W", value = input$m * 9.8065)
if (!is.na(input$W) & !is.na(input$S) & input$W * input$S != 0)
updateNumericInput(session, "WS", value = input$W/input$S)
if (!is.na(input$P0eng) & input$P0eng != 0)
updateNumericInput(session, "P0", value = input$P0eng * 2)
#--- Store the inputs as a dataframe
inputvals <-
data.frame(S = input$S, b = input$b, AR = input$AR, e = input$e, K = input$K,
Cd0 = input$Cd0, Clclean = input$Clclean, Clflaps = input$Clflaps, Clhls = input$Clhls,
m = input$m, W = input$W, WS = input$WS,
P0eng = input$P0eng, P0 = input$P0, Etatotal = input$Etatotal, alt_s = input$alt_s,
ClG = input$ClG, Cd0G = input$Cd0G, hground = input$hground
)
#--- Allow a user to download their inputs
output$downloadData <- downloadHandler(
filename = function() {
paste(date(), ".csv", sep = "")
},
content = function(file) {
write.csv(inputvals, file)
}
)
# End Observe
})
## Calculations ======================================================================
#--- Change whenever ANY input is changed
observe({
#--- Store the inputs as a dataframe
inputvals <-
data.frame(S = input$S, b = input$b, AR = input$AR, e = input$e, K = input$K,
Cd0 = input$Cd0, Clclean = input$Clclean, Clflaps = input$Clflaps, Clhls = input$Clhls,
m = input$m, W = input$W, WS = input$WS,
P0eng = input$P0eng, P0 = input$P0, Etatotal = input$Etatotal, alt_s = input$alt_s,
ClG = input$ClG, Cd0G = input$Cd0G, hground = input$hground
)
# Create a Progress object
progress <- shiny::Progress$new()
progress$set(message = "Computing:", value = 0)
# Close the progress when this reactive exits (even if there's an error)
on.exit(progress$close())
# Create a callback function to update progress.
updateProgress <- function(value = NULL, detail = NULL) {
if (is.null(value)) {
value <- progress$getValue()
value <- value + (20 - value) / 5
}
progress$set(value = value, detail = detail)
}
MainIterationOut <- suppressWarnings(MainIterationFunction(inputvals, specifications, out = "All", updateProgress = updateProgress))
## Summary ======================================================================
output$SummaryTable <- renderDataTable({
MainIterationOut$summary
})
## AeroParams ======================================================================
AeroParams <- suppressWarnings(AeroParamsFunction(inputvals, specifications))
output$AeroParamsTable <- renderDataTable({
MainIterationOut$AeroParamsTable
})
output$AeroParamsPlot <- renderPlot({
slope = AeroParams$AeroParamsPlotPoints$Cl[3]/AeroParams$AeroParamsPlotPoints$Cd[3]
ggplot(AeroParams$AeroParamsPlotPoints,
aes(x = Cd, y = Cl, colour = type)) +
geom_abline(intercept = 0, slope = slope, colour = "green4") +
geom_line(data = AeroParams$AeroParamsPlot,
aes(x = Cd, y = Cl, colour = "Drag Polar")) +
geom_point() +
geom_text(aes(label = paste0(type, " Vinf = ", round(Vinf, 4))), hjust = 1, vjust = -0.5, show.legend = FALSE) +
scale_color_manual(values = c("Drag Polar" = "grey4", "Cruise" = "blue",
"(L/D)*" = "green3", "L^(3/2)/D" = "purple",
"Stall" = "red")) +
expand_limits(x = 0, y = 0) +
labs(list(title = "Drag Polar", x = "Coefficient of Drag", y = "Coefficient of Lift", colour = ""))
})
output$APP_info <- renderText({
paste0("click: ", xy_str(input$APP_click), "hover: ", xy_str(input$APP_hover)
)
})
## Operating Window ======================================================================
# Get required plotting parameters
nh <- input$OW_nh
nv <- input$OW_nv
maxh <- input$OW_maxh
maxv <- input$OW_maxv
# Create the plotting window
operatingwindow <-
ThrustPowerCurves(inputvals, specifications, 0, maxh, nh, 0, maxv, nv, 1, 250)
# Find plotting limits
OW_xlow <-
operatingwindow %>% arrange(Pexc) %>% select(Vinf)
OW_xupp <- head(OW_xlow, 1)[[1]] * 1.1
OW_xlow <- tail(OW_xlow, 1)[[1]] * 0.9
# Ouput a plot of the Excess Power
output$OperatingWindowPowerPlot <- renderPlot({
ggplot(operatingwindow) +
geom_point(data = filter(operatingwindow, Pexc >= 0),
aes(x = Vinf, y = h, colour = Pexc)) +
geom_path(aes(x = Vmin, y = h), colour = "red") +
geom_path(aes(x = Vmin * 1.2, y = h), colour = "orange") +
geom_path(aes(x = VmaxP, y = h), colour = "purple") +
geom_path(aes(x = Vstar, y = h), colour = "green") +
geom_path(aes(x = Vcruise, y = h), colour = "blue") +
scale_colour_gradientn(colours = brewer.pal(3, "RdYlGn"),
guide = "colourbar",
name = "Excess Power") +
xlim(0, OW_xupp)+
labs(list(title = "Excess Power and Height", x = "Vinf (m/s)", y = "Altitude (m)", colour = "Excess Power"))
})
# Ouput a plot of the Velocities
output$OperatingWindowPlot <- renderPlot({
ggplot(operatingwindow) +
geom_path(aes(x = Vmin, y = h, colour = "Stall Speed")) +
geom_path(aes(x = Vmin * 1.2, y = h, colour = "Safety Factor 1.2")) +
geom_path(aes(x = VmaxP, y = h, colour = "Maximum Speed")) +
geom_path(aes(x = Vstar, y = h, colour = "(L/D)*")) +
geom_path(aes(x = Vcruise, y = h, colour = "Cruise Specification")) +
scale_color_manual(values = c("Stall Speed" = "red", "Safety Factor 1.2" = "orange",
"Maximum Speed" = "purple", "Cruise Specification" = "blue",
"(L/D)*" = "green")) +
xlim(OW_xlow, OW_xupp) +
labs(list(title = "Velocities and Height", x = "Vinf (m/s)", y = "Altitude (m)", colour = "Velocity"))
})
## Climb ======================================================================
heights <- data.frame(type = c("Sea Level", "2nd Seg", "2nd Seg OEI", "Cruise", "Ceiling"),
h = c(0, 35*0.3048, 35*0.3048, 10000*0.3048, 12000*0.3048),
Ne = c(2, 2, 1, 2, 2))
Climb <- ClimbFunction(inputvals, specifications, heights)
Climb$type <- factor(Climb$type, levels = heights$type, ordered = TRUE)
# Graph of Percentage Gradients
output$PerGradPlot <- renderPlot({
ggplot(Climb, aes(x=Vinf, y=PerGrad, group = type, colour = type)) +
geom_path() +
geom_point(aes(shape = Vname, size = ifelse(Vname == "Vinf", 0, 1))) +
geom_hline(aes(yintercept = 1.5, colour = "2nd Seg OEI")) +
geom_text(aes(x = min(Vinf), y = 1.5, colour = "2nd Seg OEI"),
label = "Minimum 2nd Seg Climb OEI", hjust = 0, vjust = 1.5,
show.legend = FALSE) +
scale_size(range = c(0,3)) +
scale_shape_manual(values = c("Vcruise" = 1, "Vflaps" = 3, "Vinf" = 1, "Vsafe" = 0, "Vstall" = 2)) +
guides(size = FALSE) +
labs(list(title = "Percentage Graidents", x = "Vinf (m/s)", y = "Percentage Gradient (%)",
colour = "Mission Segment", shape = "Velocity"))
})
output$ClimbRatePlot <- renderPlot({
ggplot(Climb, aes(x=Vinf, ClimbRate / 0.3 * 60, group = type, colour = type)) +
geom_path() +
geom_point(aes(shape = Vname, size = ifelse(Vname == "Vinf", 0, 1))) +
geom_hline(aes(yintercept = 100, colour = "Ceiling")) +
geom_text(aes(x = min(Vinf), y = 100, colour = "Ceiling"),
label = "Minimum Ceiling Rate of Climb", hjust = 0, vjust = 1.5,
show.legend = FALSE) +
geom_hline(aes(yintercept = 300, colour = "Cruise")) +
geom_text(aes(x = min(Vinf), y = 300, colour = "Cruise"),
label = "Minimum Cruise Rate of Climb", hjust = 0, vjust = 1.5,
show.legend = FALSE) +
scale_size(range = c(0,3)) +
scale_shape_manual(values = c("Vcruise" = 1, "Vflaps" = 3, "Vinf" = 1, "Vsafe" = 0, "Vstall" = 2)) +
guides(size = FALSE) +
labs(list(title = "Climb Rates (Vv)", x = "Vinf (m/s)", y = "Climb Rate (ft/min)",
colour = "Mission Segment", shape = "Velocity"))
})
output$ClimbAnglePlot<- renderPlot({
ggplot(Climb, aes(x=Vinf, Theta, group = type, colour = type)) +
geom_path() +
geom_point(aes(shape = Vname, size = ifelse(Vname == "Vinf", 0, 1))) +
scale_size(range = c(0,3)) +
scale_shape_manual(values = c("Vcruise" = 1, "Vflaps" = 3, "Vinf" = 1, "Vsafe" = 0, "Vstall" = 2)) +
guides(size = FALSE) +
labs(list(title = "Climb Angle (Theta)", x = "Vinf (m/s)", y = "Theta (degrees)",
colour = "Mission Segment", shape = "Velocity"))
})
heightsall <- data.frame(type = seq(0, 4000, 250),
h = seq(0, 4000, 250),
Ne = 2)
Climball <- ClimbFunction(inputvals, specifications, heightsall)
output$ClimbRateAllPlot <- renderPlot({
ggplot(Climball, aes(x=Vinf, ClimbRate / 0.3 * 60, group = type, colour = type)) +
geom_point(aes(shape = Vname, size = ifelse(Vname == "Vinf", 0, 1))) +
scale_size(range = c(0,3)) +
scale_shape_manual(values = c("Vcruise" = 1, "Vflaps" = 3, "Vinf" = 1, "Vsafe" = 0, "Vstall" = 2)) +
guides(size = FALSE) +
labs(list(title = "Climb Rates At Various Altitudes (Vv)", x = "Vinf (m/s)", y = "Climb Rate (ft/min)",
colour = "Mission Segment", shape = "Velocity"))
})
## Takeoff ======================================================================
Takeoff <- MainIterationOut[c("AccelerateStop","AccelerateContinue", "AccelerateLiftoff", "BFL")]
BFL <- Takeoff$BFL
Takeoff <- data.frame(
Vinf = Takeoff$AccelerateStop$Vinf,
`AccelerateStop` = Takeoff$AccelerateStop$AccelerateStop,
`AccelerateContinue` = Takeoff$AccelerateContinue$AccelerateContinue,
`AccelerateContinueGround` = Takeoff$AccelerateContinue$AccelerateContinue - Takeoff$AccelerateContinue$`Air Distance`,
`AccelerateLiftoff` = Takeoff$AccelerateLiftoff$AccelerateLiftoff)
Takeoff <- Takeoff %>% gather(key, value, - Vinf)
Takeoff <- Takeoff %>% mutate(dist = ifelse(key == "AccelerateContinueGround", "Ground", "Ground & Air"))
# Takeoff <- filter(Takeoff, key!= "AccelerateLiftoff" & Vinf <= BFL$Vlof)
output$TakeoffFieldLengthPlot <- renderPlot({
ggplot(Takeoff, aes(x = Vinf, y = value, colour = key)) +
geom_line(aes(linetype = dist)) +
geom_text(aes(x = 0, y = as.double(tail(filter(Takeoff, key == "AccelerateLiftoff"),1)$value),
colour = "AccelerateLiftoff"),
label = "1.15 x Runway Distance for \nNormal Takeoff with 2 Engines", hjust = 0, vjust = 1.1,
show.legend = FALSE) +
geom_hline(aes(yintercept = 1200, colour = "Maximum")) +
geom_text(aes(x = 0, y = 1200, colour = "Maximum"),
label = "Maximum Runway Length", hjust = 0, vjust = -0.5,
show.legend = FALSE) +
geom_hline(aes(yintercept = BFL$BFL, colour = "BFL"), linetype = 3, show.legend = FALSE) +
geom_vline(aes(xintercept = BFL$Vinf, colour = "BFL"), linetype = 3, show.legend = FALSE) +
geom_point(data = BFL, aes(x = Vinf, y = BFL, colour = "BFL")) +
geom_text(aes(x = BFL$Vinf, y = 0, colour = "BFL"),
label = "V1", hjust = 0.5, vjust = 0.5,
show.legend = FALSE) +
geom_vline(aes(xintercept = BFL$Vlof, colour = "AccelerateLiftoff"), linetype = 6, show.legend = FALSE) +
geom_text(aes(x = BFL$Vlof, y = 0, colour = "AccelerateLiftoff"),
label = "V2", hjust = 0.5, vjust = -1.5,
show.legend = FALSE) +
labs(list(title = "Takeoff Runway Balanced Field Length",
x = "Velocity at Engine Loss (m/s)", y = "Runway Distance Required (m)",
colour = "Scenario:", linetype = "Distance:")) +
scale_linetype_manual(values = c("Ground" = 5, "Ground & Air" = 1)) +
ylim(0, NA) +
theme(legend.position = "bottom")
})
## Mission Analysis ======================================================================
output$PowerSummary <- renderPlot({
ggplot(mutate(MainIterationOut$BatteryFracs, type = factor(type, levels = type)),
aes(colour = type)) +
geom_bar(aes(type, weight = `%Wi/Wb`, colour = type)) +
theme(axis.text.x = element_text(angle = -30, hjust = 0)) +
labs(list(title = "Energy Usesage", x = "Mission Segment", y = "Percentage", colour = "Mission Segment"))
})
PlotPower <- MainIterationOut$Power %>% mutate(`D x V` = Drag * Vinf) %>% gather(key, value, -type, -R_total)
PlotPower$type <- factor(PlotPower$type, levels = unique(PlotPower$type))
PlotPower$key <- factor(PlotPower$key, levels = unique(PlotPower$key))
output$PowerFacet <- renderPlot({
ggplot(filter(PlotPower, key %in% c("Clmax", "Cl", "Cd", "ClCd", "theta", "Power")),
aes(x=R_total, colour = type, width = 2)) +
geom_line(aes(y = value)) +
facet_wrap(~key, scales = "free_y") +
labs(list(title = "Mission Analysis", x = "Range", y = "", colour = "Mission Segment"))
})
## To do up better later ####
output$MissionInput <- renderPlot({
ggplot(filter(PlotPower, key %in% c("Vinf", "h")),
aes(x=R_total, colour = type, width = 2)) +
geom_line(aes(y = value)) +
facet_wrap(~key, scales = "free_y") +
labs(list(title = "Input Values", x = "Range", y = "", colour = "Mission Segment"))
})
output$MissionParams <- renderPlot({
ggplot(filter(PlotPower, key %in% c("Cl", "Cd", "ClCd", "theta","Drag", "D x V")),
aes(x=R_total, colour = type, width = 2)) +
geom_line(aes(y = value)) +
facet_wrap(~key, scales = "free_y", ncol = 2) +
labs(list(title = "Calculated Parameters", x = "Range", y = "", colour = "Mission Segment"))
})
output$MissionOutput <- renderPlot({
ggplot(filter(PlotPower, key %in% c("Power", "Wb_total")),
aes(x=R_total, colour = type, width = 2)) +
geom_line(aes(y = value)) +
facet_wrap(~key, scales = "free_y") +
labs(list(title = "Power Usage and Total Battery Weight", x = "Range", y = "", colour = "Mission Segment"))
})
output$WeightFracs <- renderPlot({
WeightFracs <- MainIterationOut$WeightFracs
WeightFracs$Description <- factor(WeightFracs$Description, levels = WeightFracs$Description)
ggplot(WeightFracs[1:3,1:2]) + geom_bar(aes(Description, weight = Value, colour = Description)) +
geom_text(aes(x = Description, y = Value/2, label = round(Value, 4)), colour="white") +
labs(list(title = "Weight Fractions", x = "Weight", y = "Fraction", colour = "Mission Segment"))
})
#--- Allow a user to download power calcs
output$downloadPower <- downloadHandler(
filename = function() {
paste(date()," Power Calcs", ".csv", sep = "")
},
content = function(file) {
write.csv(MainIterationOut$Power, file)
}
)
output$PowerTable <- renderDataTable({
MainIterationOut$PowerSummary
})
})
# End shinyServer
})
| /Aircraft Performance Iteration App/server.R | no_license | KiranKumar-A/Aircraft-Performance | R | false | false | 19,013 | r | ## Initialisation ======================================================================
#--- Load required libraries
library(shiny)
library(MASS)
library(lazyeval)
library(tidyr)
library(dplyr)
library(purrr)
library(broom)
library(ggplot2)
library(RColorBrewer)
library(reshape2)
#--- Set the initial values
source("Helper UI Functions.R")
source("Helper Standard Atmosphere.R")
source("Helper Main Functions.R")
#--- Set initial display options
theme_set(theme_linedraw())
options(scipen = 10)
## Server ======================================================================
shinyServer(function(session, input, output) {
## Non-Reactive ======================================================================
output$SpecificationsTable <- renderDataTable({
specifications[,2:3]
})
## Input Values ======================================================================
#--- Allow a user to upload their inputs
inputdata <- reactive({
infile <- input$uploadData
if (is.null(infile)) return(NULL)
read.csv(infile$datapath)
})
#--- Change ONLY when a new file is uploaded
observe({
#--- Update the input values
inputdatavars <- inputdata()
if (is.null(inputdatavars)) return(NULL)
updateNumericInput(session, "S", value = inputdatavars$S)
updateNumericInput(session, "b", value = inputdatavars$b)
updateNumericInput(session, "AR", value = inputdatavars$AR)
updateNumericInput(session, "e", value = inputdatavars$e)
updateNumericInput(session, "K", value = inputdatavars$K)
updateNumericInput(session, "Cd0", value = inputdatavars$Cd0)
updateNumericInput(session, "Clclean", value = inputdatavars$Clclean)
updateNumericInput(session, "Clflaps", value = inputdatavars$Clflaps)
updateNumericInput(session, "Clhls", value = inputdatavars$Clhls)
updateNumericInput(session, "m", value = inputdatavars$m)
updateNumericInput(session, "W", value = inputdatavars$W)
updateNumericInput(session, "WS", value = inputdatavars$WS)
updateNumericInput(session, "P0eng", value = inputdatavars$P0eng)
updateNumericInput(session, "P0", value = inputdatavars$P0)
updateNumericInput(session, "Etatotal", value = inputdatavars$Etatotal)
updateNumericInput(session, "alt_s", value = inputdatavars$alt_s)
updateNumericInput(session, "ClG", value = inputdatavars$ClG)
updateNumericInput(session, "Cd0G", value = inputdatavars$Cd0G)
updateNumericInput(session, "hground", value = inputdatavars$hground)
# End Observe
})
#--- Change whenever ANY input is changed
observe({
#--- Make calculations in the input boxes
if (!is.na(input$S) & !is.na(input$AR) & input$S*input$AR != 0)
updateNumericInput(session, "b", value = sqrt(input$AR * input$S))
if (!is.na(input$e) & !is.na(input$AR) & input$e*input$AR != 0)
updateNumericInput(session, "K", value = 1/(pi * input$AR * input$e))
if (!is.na(input$m) & input$m != 0)
updateNumericInput(session, "W", value = input$m * 9.8065)
if (!is.na(input$W) & !is.na(input$S) & input$W * input$S != 0)
updateNumericInput(session, "WS", value = input$W/input$S)
if (!is.na(input$P0eng) & input$P0eng != 0)
updateNumericInput(session, "P0", value = input$P0eng * 2)
#--- Store the inputs as a dataframe
inputvals <-
data.frame(S = input$S, b = input$b, AR = input$AR, e = input$e, K = input$K,
Cd0 = input$Cd0, Clclean = input$Clclean, Clflaps = input$Clflaps, Clhls = input$Clhls,
m = input$m, W = input$W, WS = input$WS,
P0eng = input$P0eng, P0 = input$P0, Etatotal = input$Etatotal, alt_s = input$alt_s,
ClG = input$ClG, Cd0G = input$Cd0G, hground = input$hground
)
#--- Allow a user to download their inputs
output$downloadData <- downloadHandler(
filename = function() {
paste(date(), ".csv", sep = "")
},
content = function(file) {
write.csv(inputvals, file)
}
)
# End Observe
})
## Calculations ======================================================================
#--- Change whenever ANY input is changed
observe({
#--- Store the inputs as a dataframe
inputvals <-
data.frame(S = input$S, b = input$b, AR = input$AR, e = input$e, K = input$K,
Cd0 = input$Cd0, Clclean = input$Clclean, Clflaps = input$Clflaps, Clhls = input$Clhls,
m = input$m, W = input$W, WS = input$WS,
P0eng = input$P0eng, P0 = input$P0, Etatotal = input$Etatotal, alt_s = input$alt_s,
ClG = input$ClG, Cd0G = input$Cd0G, hground = input$hground
)
# Create a Progress object
progress <- shiny::Progress$new()
progress$set(message = "Computing:", value = 0)
# Close the progress when this reactive exits (even if there's an error)
on.exit(progress$close())
# Create a callback function to update progress.
updateProgress <- function(value = NULL, detail = NULL) {
if (is.null(value)) {
value <- progress$getValue()
value <- value + (20 - value) / 5
}
progress$set(value = value, detail = detail)
}
MainIterationOut <- suppressWarnings(MainIterationFunction(inputvals, specifications, out = "All", updateProgress = updateProgress))
## Summary ======================================================================
output$SummaryTable <- renderDataTable({
MainIterationOut$summary
})
## AeroParams ======================================================================
AeroParams <- suppressWarnings(AeroParamsFunction(inputvals, specifications))
output$AeroParamsTable <- renderDataTable({
MainIterationOut$AeroParamsTable
})
output$AeroParamsPlot <- renderPlot({
slope = AeroParams$AeroParamsPlotPoints$Cl[3]/AeroParams$AeroParamsPlotPoints$Cd[3]
ggplot(AeroParams$AeroParamsPlotPoints,
aes(x = Cd, y = Cl, colour = type)) +
geom_abline(intercept = 0, slope = slope, colour = "green4") +
geom_line(data = AeroParams$AeroParamsPlot,
aes(x = Cd, y = Cl, colour = "Drag Polar")) +
geom_point() +
geom_text(aes(label = paste0(type, " Vinf = ", round(Vinf, 4))), hjust = 1, vjust = -0.5, show.legend = FALSE) +
scale_color_manual(values = c("Drag Polar" = "grey4", "Cruise" = "blue",
"(L/D)*" = "green3", "L^(3/2)/D" = "purple",
"Stall" = "red")) +
expand_limits(x = 0, y = 0) +
labs(list(title = "Drag Polar", x = "Coefficient of Drag", y = "Coefficient of Lift", colour = ""))
})
output$APP_info <- renderText({
paste0("click: ", xy_str(input$APP_click), "hover: ", xy_str(input$APP_hover)
)
})
## Operating Window ======================================================================
# Get required plotting parameters
nh <- input$OW_nh
nv <- input$OW_nv
maxh <- input$OW_maxh
maxv <- input$OW_maxv
# Create the plotting window
operatingwindow <-
ThrustPowerCurves(inputvals, specifications, 0, maxh, nh, 0, maxv, nv, 1, 250)
# Find plotting limits
OW_xlow <-
operatingwindow %>% arrange(Pexc) %>% select(Vinf)
OW_xupp <- head(OW_xlow, 1)[[1]] * 1.1
OW_xlow <- tail(OW_xlow, 1)[[1]] * 0.9
# Ouput a plot of the Excess Power
output$OperatingWindowPowerPlot <- renderPlot({
ggplot(operatingwindow) +
geom_point(data = filter(operatingwindow, Pexc >= 0),
aes(x = Vinf, y = h, colour = Pexc)) +
geom_path(aes(x = Vmin, y = h), colour = "red") +
geom_path(aes(x = Vmin * 1.2, y = h), colour = "orange") +
geom_path(aes(x = VmaxP, y = h), colour = "purple") +
geom_path(aes(x = Vstar, y = h), colour = "green") +
geom_path(aes(x = Vcruise, y = h), colour = "blue") +
scale_colour_gradientn(colours = brewer.pal(3, "RdYlGn"),
guide = "colourbar",
name = "Excess Power") +
xlim(0, OW_xupp)+
labs(list(title = "Excess Power and Height", x = "Vinf (m/s)", y = "Altitude (m)", colour = "Excess Power"))
})
# Ouput a plot of the Velocities
output$OperatingWindowPlot <- renderPlot({
ggplot(operatingwindow) +
geom_path(aes(x = Vmin, y = h, colour = "Stall Speed")) +
geom_path(aes(x = Vmin * 1.2, y = h, colour = "Safety Factor 1.2")) +
geom_path(aes(x = VmaxP, y = h, colour = "Maximum Speed")) +
geom_path(aes(x = Vstar, y = h, colour = "(L/D)*")) +
geom_path(aes(x = Vcruise, y = h, colour = "Cruise Specification")) +
scale_color_manual(values = c("Stall Speed" = "red", "Safety Factor 1.2" = "orange",
"Maximum Speed" = "purple", "Cruise Specification" = "blue",
"(L/D)*" = "green")) +
xlim(OW_xlow, OW_xupp) +
labs(list(title = "Velocities and Height", x = "Vinf (m/s)", y = "Altitude (m)", colour = "Velocity"))
})
## Climb ======================================================================
heights <- data.frame(type = c("Sea Level", "2nd Seg", "2nd Seg OEI", "Cruise", "Ceiling"),
h = c(0, 35*0.3048, 35*0.3048, 10000*0.3048, 12000*0.3048),
Ne = c(2, 2, 1, 2, 2))
Climb <- ClimbFunction(inputvals, specifications, heights)
Climb$type <- factor(Climb$type, levels = heights$type, ordered = TRUE)
# Graph of Percentage Gradients
output$PerGradPlot <- renderPlot({
ggplot(Climb, aes(x=Vinf, y=PerGrad, group = type, colour = type)) +
geom_path() +
geom_point(aes(shape = Vname, size = ifelse(Vname == "Vinf", 0, 1))) +
geom_hline(aes(yintercept = 1.5, colour = "2nd Seg OEI")) +
geom_text(aes(x = min(Vinf), y = 1.5, colour = "2nd Seg OEI"),
label = "Minimum 2nd Seg Climb OEI", hjust = 0, vjust = 1.5,
show.legend = FALSE) +
scale_size(range = c(0,3)) +
scale_shape_manual(values = c("Vcruise" = 1, "Vflaps" = 3, "Vinf" = 1, "Vsafe" = 0, "Vstall" = 2)) +
guides(size = FALSE) +
labs(list(title = "Percentage Graidents", x = "Vinf (m/s)", y = "Percentage Gradient (%)",
colour = "Mission Segment", shape = "Velocity"))
})
output$ClimbRatePlot <- renderPlot({
ggplot(Climb, aes(x=Vinf, ClimbRate / 0.3 * 60, group = type, colour = type)) +
geom_path() +
geom_point(aes(shape = Vname, size = ifelse(Vname == "Vinf", 0, 1))) +
geom_hline(aes(yintercept = 100, colour = "Ceiling")) +
geom_text(aes(x = min(Vinf), y = 100, colour = "Ceiling"),
label = "Minimum Ceiling Rate of Climb", hjust = 0, vjust = 1.5,
show.legend = FALSE) +
geom_hline(aes(yintercept = 300, colour = "Cruise")) +
geom_text(aes(x = min(Vinf), y = 300, colour = "Cruise"),
label = "Minimum Cruise Rate of Climb", hjust = 0, vjust = 1.5,
show.legend = FALSE) +
scale_size(range = c(0,3)) +
scale_shape_manual(values = c("Vcruise" = 1, "Vflaps" = 3, "Vinf" = 1, "Vsafe" = 0, "Vstall" = 2)) +
guides(size = FALSE) +
labs(list(title = "Climb Rates (Vv)", x = "Vinf (m/s)", y = "Climb Rate (ft/min)",
colour = "Mission Segment", shape = "Velocity"))
})
output$ClimbAnglePlot<- renderPlot({
ggplot(Climb, aes(x=Vinf, Theta, group = type, colour = type)) +
geom_path() +
geom_point(aes(shape = Vname, size = ifelse(Vname == "Vinf", 0, 1))) +
scale_size(range = c(0,3)) +
scale_shape_manual(values = c("Vcruise" = 1, "Vflaps" = 3, "Vinf" = 1, "Vsafe" = 0, "Vstall" = 2)) +
guides(size = FALSE) +
labs(list(title = "Climb Angle (Theta)", x = "Vinf (m/s)", y = "Theta (degrees)",
colour = "Mission Segment", shape = "Velocity"))
})
heightsall <- data.frame(type = seq(0, 4000, 250),
h = seq(0, 4000, 250),
Ne = 2)
Climball <- ClimbFunction(inputvals, specifications, heightsall)
output$ClimbRateAllPlot <- renderPlot({
ggplot(Climball, aes(x=Vinf, ClimbRate / 0.3 * 60, group = type, colour = type)) +
geom_point(aes(shape = Vname, size = ifelse(Vname == "Vinf", 0, 1))) +
scale_size(range = c(0,3)) +
scale_shape_manual(values = c("Vcruise" = 1, "Vflaps" = 3, "Vinf" = 1, "Vsafe" = 0, "Vstall" = 2)) +
guides(size = FALSE) +
labs(list(title = "Climb Rates At Various Altitudes (Vv)", x = "Vinf (m/s)", y = "Climb Rate (ft/min)",
colour = "Mission Segment", shape = "Velocity"))
})
## Takeoff ======================================================================
Takeoff <- MainIterationOut[c("AccelerateStop","AccelerateContinue", "AccelerateLiftoff", "BFL")]
BFL <- Takeoff$BFL
Takeoff <- data.frame(
Vinf = Takeoff$AccelerateStop$Vinf,
`AccelerateStop` = Takeoff$AccelerateStop$AccelerateStop,
`AccelerateContinue` = Takeoff$AccelerateContinue$AccelerateContinue,
`AccelerateContinueGround` = Takeoff$AccelerateContinue$AccelerateContinue - Takeoff$AccelerateContinue$`Air Distance`,
`AccelerateLiftoff` = Takeoff$AccelerateLiftoff$AccelerateLiftoff)
Takeoff <- Takeoff %>% gather(key, value, - Vinf)
Takeoff <- Takeoff %>% mutate(dist = ifelse(key == "AccelerateContinueGround", "Ground", "Ground & Air"))
# Takeoff <- filter(Takeoff, key!= "AccelerateLiftoff" & Vinf <= BFL$Vlof)
output$TakeoffFieldLengthPlot <- renderPlot({
ggplot(Takeoff, aes(x = Vinf, y = value, colour = key)) +
geom_line(aes(linetype = dist)) +
geom_text(aes(x = 0, y = as.double(tail(filter(Takeoff, key == "AccelerateLiftoff"),1)$value),
colour = "AccelerateLiftoff"),
label = "1.15 x Runway Distance for \nNormal Takeoff with 2 Engines", hjust = 0, vjust = 1.1,
show.legend = FALSE) +
geom_hline(aes(yintercept = 1200, colour = "Maximum")) +
geom_text(aes(x = 0, y = 1200, colour = "Maximum"),
label = "Maximum Runway Length", hjust = 0, vjust = -0.5,
show.legend = FALSE) +
geom_hline(aes(yintercept = BFL$BFL, colour = "BFL"), linetype = 3, show.legend = FALSE) +
geom_vline(aes(xintercept = BFL$Vinf, colour = "BFL"), linetype = 3, show.legend = FALSE) +
geom_point(data = BFL, aes(x = Vinf, y = BFL, colour = "BFL")) +
geom_text(aes(x = BFL$Vinf, y = 0, colour = "BFL"),
label = "V1", hjust = 0.5, vjust = 0.5,
show.legend = FALSE) +
geom_vline(aes(xintercept = BFL$Vlof, colour = "AccelerateLiftoff"), linetype = 6, show.legend = FALSE) +
geom_text(aes(x = BFL$Vlof, y = 0, colour = "AccelerateLiftoff"),
label = "V2", hjust = 0.5, vjust = -1.5,
show.legend = FALSE) +
labs(list(title = "Takeoff Runway Balanced Field Length",
x = "Velocity at Engine Loss (m/s)", y = "Runway Distance Required (m)",
colour = "Scenario:", linetype = "Distance:")) +
scale_linetype_manual(values = c("Ground" = 5, "Ground & Air" = 1)) +
ylim(0, NA) +
theme(legend.position = "bottom")
})
## Mission Analysis ======================================================================
output$PowerSummary <- renderPlot({
ggplot(mutate(MainIterationOut$BatteryFracs, type = factor(type, levels = type)),
aes(colour = type)) +
geom_bar(aes(type, weight = `%Wi/Wb`, colour = type)) +
theme(axis.text.x = element_text(angle = -30, hjust = 0)) +
labs(list(title = "Energy Usesage", x = "Mission Segment", y = "Percentage", colour = "Mission Segment"))
})
PlotPower <- MainIterationOut$Power %>% mutate(`D x V` = Drag * Vinf) %>% gather(key, value, -type, -R_total)
PlotPower$type <- factor(PlotPower$type, levels = unique(PlotPower$type))
PlotPower$key <- factor(PlotPower$key, levels = unique(PlotPower$key))
output$PowerFacet <- renderPlot({
ggplot(filter(PlotPower, key %in% c("Clmax", "Cl", "Cd", "ClCd", "theta", "Power")),
aes(x=R_total, colour = type, width = 2)) +
geom_line(aes(y = value)) +
facet_wrap(~key, scales = "free_y") +
labs(list(title = "Mission Analysis", x = "Range", y = "", colour = "Mission Segment"))
})
## To do up better later ####
output$MissionInput <- renderPlot({
ggplot(filter(PlotPower, key %in% c("Vinf", "h")),
aes(x=R_total, colour = type, width = 2)) +
geom_line(aes(y = value)) +
facet_wrap(~key, scales = "free_y") +
labs(list(title = "Input Values", x = "Range", y = "", colour = "Mission Segment"))
})
output$MissionParams <- renderPlot({
ggplot(filter(PlotPower, key %in% c("Cl", "Cd", "ClCd", "theta","Drag", "D x V")),
aes(x=R_total, colour = type, width = 2)) +
geom_line(aes(y = value)) +
facet_wrap(~key, scales = "free_y", ncol = 2) +
labs(list(title = "Calculated Parameters", x = "Range", y = "", colour = "Mission Segment"))
})
output$MissionOutput <- renderPlot({
ggplot(filter(PlotPower, key %in% c("Power", "Wb_total")),
aes(x=R_total, colour = type, width = 2)) +
geom_line(aes(y = value)) +
facet_wrap(~key, scales = "free_y") +
labs(list(title = "Power Usage and Total Battery Weight", x = "Range", y = "", colour = "Mission Segment"))
})
output$WeightFracs <- renderPlot({
WeightFracs <- MainIterationOut$WeightFracs
WeightFracs$Description <- factor(WeightFracs$Description, levels = WeightFracs$Description)
ggplot(WeightFracs[1:3,1:2]) + geom_bar(aes(Description, weight = Value, colour = Description)) +
geom_text(aes(x = Description, y = Value/2, label = round(Value, 4)), colour="white") +
labs(list(title = "Weight Fractions", x = "Weight", y = "Fraction", colour = "Mission Segment"))
})
#--- Allow a user to download power calcs
output$downloadPower <- downloadHandler(
filename = function() {
paste(date()," Power Calcs", ".csv", sep = "")
},
content = function(file) {
write.csv(MainIterationOut$Power, file)
}
)
output$PowerTable <- renderDataTable({
MainIterationOut$PowerSummary
})
})
# End shinyServer
})
|
library(svydiags)
### Name: svystdres
### Title: Standardized residuals for models fitted with complex survey
### data
### Aliases: svystdres
### Keywords: methods survey
### ** Examples
require(survey)
data(api)
# unstratified design single stage design
d0 <- svydesign(id=~1,strata=NULL, weights=~pw, data=apistrat)
m0 <- svyglm(api00 ~ ell + meals + mobility, design=d0)
svystdres(mobj=m0, stvar=NULL, clvar=NULL)
# stratified cluster design
require(NHANES)
data(NHANESraw)
dnhanes <- svydesign(id=~SDMVPSU, strata=~SDMVSTRA, weights=~WTINT2YR, nest=TRUE, data=NHANESraw)
m1 <- svyglm(BPDiaAve ~ as.factor(Race1) + BMI + AlcoholYear, design = dnhanes)
svystdres(mobj=m1, stvar= "SDMVSTRA", clvar="SDMVPSU")
| /data/genthat_extracted_code/svydiags/examples/svystdres.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 728 | r | library(svydiags)
### Name: svystdres
### Title: Standardized residuals for models fitted with complex survey
### data
### Aliases: svystdres
### Keywords: methods survey
### ** Examples
require(survey)
data(api)
# unstratified design single stage design
d0 <- svydesign(id=~1,strata=NULL, weights=~pw, data=apistrat)
m0 <- svyglm(api00 ~ ell + meals + mobility, design=d0)
svystdres(mobj=m0, stvar=NULL, clvar=NULL)
# stratified cluster design
require(NHANES)
data(NHANESraw)
dnhanes <- svydesign(id=~SDMVPSU, strata=~SDMVSTRA, weights=~WTINT2YR, nest=TRUE, data=NHANESraw)
m1 <- svyglm(BPDiaAve ~ as.factor(Race1) + BMI + AlcoholYear, design = dnhanes)
svystdres(mobj=m1, stvar= "SDMVSTRA", clvar="SDMVPSU")
|
dataFile <- "./EdaProject1/household_power_consumption.txt"
data <- read.table(dataFile, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
subSetData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
#str(subSetData)
datetime <- strptime(paste(subSetData$Date, subSetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
globalActivePower <- as.numeric(subSetData$Global_active_power)
subMetering1 <- as.numeric(subSetData$Sub_metering_1)
subMetering2 <- as.numeric(subSetData$Sub_metering_2)
subMetering3 <- as.numeric(subSetData$Sub_metering_3)
png("plot3.png", width=480, height=480)
plot(datetime, subMetering1, type="l", ylab="Energy Submetering", xlab="")
lines(datetime, subMetering2, type="l", col="red")
lines(datetime, subMetering3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
dev.off() | /plot3.r | no_license | SeenivasanRamamoorthy/ExData_Plotting1 | R | false | false | 894 | r | dataFile <- "./EdaProject1/household_power_consumption.txt"
data <- read.table(dataFile, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
subSetData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
#str(subSetData)
datetime <- strptime(paste(subSetData$Date, subSetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
globalActivePower <- as.numeric(subSetData$Global_active_power)
subMetering1 <- as.numeric(subSetData$Sub_metering_1)
subMetering2 <- as.numeric(subSetData$Sub_metering_2)
subMetering3 <- as.numeric(subSetData$Sub_metering_3)
png("plot3.png", width=480, height=480)
plot(datetime, subMetering1, type="l", ylab="Energy Submetering", xlab="")
lines(datetime, subMetering2, type="l", col="red")
lines(datetime, subMetering3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
dev.off() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/repositories.R
\name{get.repository.hooks}
\alias{get.repository.hooks}
\title{list hooks of repository}
\usage{
get.repository.hooks(owner, repo, ctx = get.github.context())
}
\arguments{
\item{owner}{the repo owner (user, org, etc)}
\item{repo}{the name of the repo}
\item{ctx}{the github context object}
}
\value{
list of hooks
}
\description{
list hooks of repository
}
| /man/get.repository.hooks.Rd | permissive | cscheid/rgithub | R | false | true | 455 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/repositories.R
\name{get.repository.hooks}
\alias{get.repository.hooks}
\title{list hooks of repository}
\usage{
get.repository.hooks(owner, repo, ctx = get.github.context())
}
\arguments{
\item{owner}{the repo owner (user, org, etc)}
\item{repo}{the name of the repo}
\item{ctx}{the github context object}
}
\value{
list of hooks
}
\description{
list hooks of repository
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getConvexHullRatio.R
\name{makeCHULL_plot}
\alias{makeCHULL_plot}
\title{Title}
\usage{
makeCHULL_plot(solver_traj)
}
\arguments{
\item{solver_traj}{}
}
\value{
}
\description{
Title
}
| /man/makeCHULL_plot.Rd | no_license | gero90000/MonitoringFeatures | R | false | true | 264 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getConvexHullRatio.R
\name{makeCHULL_plot}
\alias{makeCHULL_plot}
\title{Title}
\usage{
makeCHULL_plot(solver_traj)
}
\arguments{
\item{solver_traj}{}
}
\value{
}
\description{
Title
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/term_selection.R
\name{make_ngram_graph}
\alias{make_ngram_graph}
\title{Create keyword co-occurrence network with only ngrams}
\usage{
make_ngram_graph(graph, min_ngrams = 2, unigrams = FALSE)
}
\arguments{
\item{graph}{an igraph object}
\item{min_ngrams}{a number; the minimum number of words to consider an ngram}
\item{unigrams}{if TRUE, returns a subset of the network where each node is a unigram}
}
\value{
an igraph object
}
\description{
Reduces the full keyword co-occurrence network to only include nodes with 2+ words or only unigrams. This is useful for separating commonly used words from distinct phrases.
}
\examples{
make_ngram_graph(graph=litsearchr::BBWO_graph, min_ngrams=2, unigrams=FALSE)
}
| /man/make_ngram_graph.Rd | no_license | benjaminschwetz/litsearchr | R | false | true | 793 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/term_selection.R
\name{make_ngram_graph}
\alias{make_ngram_graph}
\title{Create keyword co-occurrence network with only ngrams}
\usage{
make_ngram_graph(graph, min_ngrams = 2, unigrams = FALSE)
}
\arguments{
\item{graph}{an igraph object}
\item{min_ngrams}{a number; the minimum number of words to consider an ngram}
\item{unigrams}{if TRUE, returns a subset of the network where each node is a unigram}
}
\value{
an igraph object
}
\description{
Reduces the full keyword co-occurrence network to only include nodes with 2+ words or only unigrams. This is useful for separating commonly used words from distinct phrases.
}
\examples{
make_ngram_graph(graph=litsearchr::BBWO_graph, min_ngrams=2, unigrams=FALSE)
}
|
#
# Adaptation of script for intercatch export to StoX 3
#
# Exports landings to intercatch and runs Reca for the segments where SD lines are requested.
# Needs a stox project to be set up with necessary filtering and Reca-parameterization
#
# In order to get correct metier/fleet annotations, that stox project will need landings data that is pre-processed,
# and metiers must be annotated in one of the columns in the landings format.
# This would most sensibly be annotated in the gear column, but if native gear codes are needed for Reca parameterisation another column may be abused for the purpose.
# The default option is therefore landingssite, which is not otherwise required for intercatch.
#
# In addition, the columns Usage and species must be converted to intercatch codes. This can be done in Stox, or on the StoxLandingData prior to calling exportInterCatch.
#
library(RstoxFDA)
library(RstoxData)
library(data.table)
#' checks if an value is among a set of options.
checkParam <- function(paramname, value, options){
if (!(value %in% options)){
stop(paste("Parameter", paramname, "must be one of", paste(options, collapse=","), ". Got:", value))
}
}
#' checks if a value is unique
checkUnique <- function(paramname, values){
if (length(unique(values))>1){
stop(paste("paramname must be unique. Got:", paste(unique(values)), collapse=","))
}
}
#' write HI line
#' @noRd
writeHI <- function(stream,
Country, Year, SeasonType, Season, Fleet, AreaType, FishingArea,
DepthRange="NA", UnitEffort="NA", Effort="-9", AreaQualifier="NA"){
writeLines(con=stream, paste("HI", Country, Year, SeasonType, Season, Fleet, AreaType, FishingArea, DepthRange, UnitEffort, Effort, AreaQualifier, sep=","))
}
#' write SI line
#' @noRd
writeSI <- function(stream,
Country, Year, SeasonType, Season, Fleet, AreaType, FishingArea, Species, CatchCategory, ReportingCategory, DataToFrom, Usage, SamplesOrigin, UnitCATON, CATON,
OffLandings=NA, varCATON="-9", DepthRange="NA", Stock="NA", QualityFlag="NA", InfoFleet="", InfoStockCoordinator="", InfoGeneral=""){
if (is.na(OffLandings)){
OffLandings <- "-9"
}
else{
OffLandings <- format(OffLandings, digits=2)
}
writeLines(con=stream, paste("SI", Country, Year, SeasonType, Season, Fleet, AreaType, FishingArea, DepthRange, Species, Stock, CatchCategory, ReportingCategory, DataToFrom, Usage, SamplesOrigin, QualityFlag, UnitCATON, format(CATON, digits=2), OffLandings, varCATON, InfoFleet, InfoStockCoordinator, InfoGeneral, sep=","))
}
#' write SD line
#' @noRd
writeSD <- function(stream,
Country, Year, SeasonType, Season, Fleet, AreaType, FishingArea, Species, CatchCategory, ReportingCategory, Sex, CANUMtype, AgeLength, PlusGroup, unitMeanWeight, unitCANUM, UnitAgeOrLength, UnitMeanLength, Maturity, NumberCaught, MeanWeight, MeanLength,
DepthRange="NA", Stock="NA",SampledCatch="-9", NumSamplesLngt="-9", NumLngtMeas="-9", NumSamplesAge="-9", NumAgeMeas="-9", varNumLanded="-9", varWgtLanded="-9", varLgtLanded="-9"){
writeLines(con=stream, paste("SD", Country, Year, SeasonType, Season, Fleet, AreaType, FishingArea, DepthRange, Species, Stock, CatchCategory, ReportingCategory, Sex, CANUMtype, AgeLength, PlusGroup, SampledCatch, NumSamplesLngt, NumLngtMeas, NumSamplesAge, NumAgeMeas, unitMeanWeight, unitCANUM, UnitAgeOrLength, UnitMeanLength, Maturity, format(NumberCaught, digits=4), format(MeanWeight,digits=2), format(MeanLength, digits=2), varNumLanded, varWgtLanded, varLgtLanded, sep=","))
}
#' Compare StoX and intercatch
#' @description
#' Reads data from stox project and compare it with data exported for intercatch
#' @param StoxLandingData
#' @param intercatchfile path to file with data in intercatch exchange format
checks <- function(StoxLandingData, intercatchfile){
intercatchdata <- RstoxData::parseInterCatch(intercatchfile)
#compare species
cat(paste("Species StoX-Reca:", paste(unique(StoxLandingData$Landing$Species), collapse=","), "\n"))
cat(paste("Species intercatch (IC):", paste(unique(intercatchdata$SI$Species), collapse=","), "\n"))
#compare total weights
sis <- intercatchdata$SI
sis$CATON[sis$UnitCATON=="kg"] <- sis$CATON[sis$UnitCATON=="kg"]/1000
totstox <- sum(StoxLandingData$Landing$RoundWeight)/1000
totIC <- sum(sis$CATON)
cat("\n")
cat(paste("Totalvekt StoX-Reca (t):", totstox, "\n"))
cat(paste("Totalvekt IC (t):", totIC, "\n"))
diff <- totstox - totIC
reldiff <- diff / totstox
cat(paste("Difference: ", format(diff, digits=2), " t (", format(reldiff*100, digits=1), "%)\n", sep=""))
#compare sum of products
SISD <- merge(intercatchdata$SI, intercatchdata$SD)
SISD$SIid <- paste(SISD$Country, SISD$Year, SISD$SeasonType, SISD$Season, SISD$Fleet, SISD$AreaType, SISD$FishingArea, SISD$DepthRange, SISD$Species, SISD$Stock, SISD$CatchCategory, SISD$ReportingCategory, SISD$DataToFrom, sep="-")
SISD$NumberCaught[SISD$unitCANUM=="k"] <- SISD$NumberCaught[SISD$unitCANUM=="k"]*1000
SISD$NumberCaught[SISD$unitCANUM=="m"] <- SISD$NumberCaught[SISD$unitCANUM=="m"]*1000*1000
SISD$CATON[SISD$UnitCATON=="kg"] <- SISD$CATON[SISD$UnitCATON=="kg"]/1000
SISD$MeanWeight[SISD$unitMeanWeight=="g"] <- SISD$MeanWeight[SISD$unitMeanWeight=="g"]/1000
SOP <- sum(SISD$NumberCaught*SISD$MeanWeight)
SOPt <- SOP/1000
total <- sum(SISD$CATON[!duplicated(SISD$SIid)])
diffSOP <- total - SOPt
reldiffSOP <- diff / total
cat("\n")
cat(paste("Total weight IC (t):", format(total, digits=2),"\n"))
cat(paste("Total SOP IC (t):", format(SOPt, digits=2),"\n"))
cat(paste("Difference: ", format(diffSOP, digits=2), " t (", format(reldiffSOP*100, digits=1), "%)\n", sep=""))
}
#' export intercatch data from StoX project
#' @description
#' export intercatch data from StoX project
#' Need metier annotations hacked into StoxLandingData somehow. Provide the column containint metiers in 'metierColumn'.
#' @details
#' Consult the InterCatch exchange format definitions when necessary: https://www.ices.dk/data/Documents/Intercatch/IC-ExchangeFormat1-0.pdf
#' @param StoxLandingData StoxLandingData
#' @param RecaParameterData Reca parameterizattion data.
#' @param exportfile file to write intercatc data to
#' @param seasonType the temporal resolution for the intercatc export, may be 'Month', 'Quarter' or 'Year'
#' @param country ISO 3166 2-alpha code for country submitting data
#' @param unitCATON unit for landings, may be kg or t.
#' @param unitCANUM unit for catch at age in numbers, may be k,m or n for thosuands, millions or unit (ones) respectively
#' @param samplesOrigin information of origin of samples for SI line. See intercatch exchange format SampleOrigin.
#' @param plusGroup plus group for the SD lines (NULL means no plus group)
#' @param metierColumn the column in StoxLandingData containing metier (fleet) category for landings
#' @param icesAreaColumn column where ices areas are annotated to the desired resolution. area type will be inferred
#' @param SDfleets fleets / metier that SD lines should be exported for. NULL means all fleets, NA no fleets.
exportIntercatch <- function(StoxLandingData, RecaParameterData, exportfile, seasonType="Quarter", country="NO", unitCATON="kg", unitCANUM="n", samplesOrigin="U", plusGroup=NULL, metierColumn="LandingSite", icesAreaColumn="IcesArea", SDfleets=NULL){
if (!all(nchar(StoxLandingData$Landing$Species)==3)){
stop("species must be provided as FAO three letter species-code")
}
if (!all(StoxLandingData$Landing$Usage %in% c("I","H", NA))){
stop("usage must be encoded as I (industrial) or H (human consumption")
}
StoxLandingData$Landing$Area <- StoxLandingData$Landing[[icesAreaColumn]]
StoxLandingData$Landing$AreaType <- as.character(NA)
StoxLandingData$Landing$AreaType[sapply(strsplit(StoxLandingData$Landing$Area, "\\."), FUN=function(x){length(x)})==1] <- "AreaTop"
StoxLandingData$Landing$AreaType[sapply(strsplit(StoxLandingData$Landing$Area, "\\."), FUN=function(x){length(x)})==2] <- "SubArea"
StoxLandingData$Landing$AreaType[sapply(strsplit(StoxLandingData$Landing$Area, "\\."), FUN=function(x){length(x)})==3] <- "Div"
StoxLandingData$Landing$AreaType[sapply(strsplit(StoxLandingData$Landing$Area, "\\."), FUN=function(x){length(x)})==4] <- "SubDiv"
StoxLandingData$Landing$AreaType[sapply(strsplit(StoxLandingData$Landing$Area, "\\."), FUN=function(x){length(x)})==5] <- "Unit"
if (any(is.na(StoxLandingData$Landing$AreaType))){
stop("AreaType could not be deduced for all Areas.")
}
StoxLandingData$Landing$Fleet <- StoxLandingData$Landing[[metierColumn]]
StoxLandingData$Landing$Country <- country
checkParam("seasonType", seasonType, c("Quarter", "Month", "Year"))
StoxLandingData$Landing$SeasonType <- seasonType
checkParam("unitCATON", unitCATON, c("kg", "t"))
#
# annotate season
#
if (seasonType == "Quarter"){
StoxLandingData$Landing$Season <- substr(quarters(StoxLandingData$Landing$CatchDate, T),2,2)
}
else if (seasonType == "Month"){
StoxLandingData$Landing$Season <- substr(StoxLandingData$Landing$CatchDate, 6,7)
}
else if (seasonType == "Year"){
StoxLandingData$Landing$Season <- StoxLandingData$Landing$year
}
else{
#assert false
stop("Error (seasonType)")
}
#
# extract species code from data
#
if (length(unique(StoxLandingData$Landing$Species)) != 1){
stop("Landings does not contain unique species code")
}
StoxLandingData$Landing$CatchCategory <- "L"
StoxLandingData$Landing$ReportingCategory <- "R"
StoxLandingData$Landing$DataToFrom <- "NA"
StoxLandingData$Landing$UnitCATON <- unitCATON
if (unitCATON == "kg"){
StoxLandingData$Landing$CATON <- StoxLandingData$Landing$RoundWeight
}
else if (unitCATON == "t"){
StoxLandingData$Landing$CATON <- StoxLandingData$Landing$RoundWeight / 1000
}
else{
stop("Error UnitCATON")
}
StoxLandingData$Landing$OffLandings <- NA
neededColumns <- c("Year", "Season", "Fleet", "Area","Country", "Species", "SeasonType", "AreaType", "CatchCategory",
"ReportingCategory", "DataToFrom", "Usage", "UnitCATON", "CATON", "OffLandings")
missingColumns <- neededColumns[!(neededColumns %in% names(StoxLandingData$Landing))]
if (length(missingColumns) > 0){
stop(paste("Some columns that are needed for intercatch export are not annotated on landings. Missing: "), paste(missingColumns, collapse=","))
}
if (is.null(SDfleets)){
SDfleets <- unique(StoxLandingData$Landing[[metierColumn]])
}
missingFleets <- SDfleets[!is.na(SDfleets) & !(SDfleets %in% StoxLandingData$Landing[[metierColumn]])]
if (length(missingFleets) > 0){
stop(paste("Not all specified fleets / metiers found in landings. Missing:", paste(missingFleets, collapse=",")))
}
checkParam("unitCANUM", unitCANUM, c("k", "m", "n"))
stream <- file(exportfile, open="w")
for (year in unique(StoxLandingData$Landing$Year)){ #exp 1 cat
for (season in unique(StoxLandingData$Landing$Season)){ #exp 4 cat
for (fleet in unique(StoxLandingData$Landing$Fleet)){ #exp many cat
for (area in unique(StoxLandingData$Landing$Area)){ #exp many cat
data <- StoxLandingData$Landing[ StoxLandingData$Landing$Year == year &
StoxLandingData$Landing$Season == season &
StoxLandingData$Landing$Fleet == fleet &
StoxLandingData$Landing$Area == area,]
# dont write lines for cells with no catch
if (nrow(data) > 0){
checkUnique("Country", data$Country)
checkUnique("SeasonType", data$SeasonType)
checkUnique("AreaType", data$AreaType)
writeHI(stream, Country = data$Country[1], Year = year, SeasonType = data$SeasonType[1], Season = season, Fleet = fleet, AreaType = data$AreaType[1], FishingArea = area)
for (catchCategory in unique(StoxLandingData$Landing$CatchCategory)){ #exp 1 cat
for (reportingCategory in unique(StoxLandingData$Landing$ReportingCategory)){ #exp 1 cat
for (dataToFrom in unique(StoxLandingData$Landing$DataToFrom)){ #exp 1 cat
for (species in unique(data$Species)){
data <- StoxLandingData$Landing[StoxLandingData$Landing$CatchCategory == catchCategory &
StoxLandingData$Landing$ReportingCategory == reportingCategory &
StoxLandingData$Landing$DataToFrom == dataToFrom &
StoxLandingData$Landing$Year == year &
StoxLandingData$Landing$Season == season &
StoxLandingData$Landing$Fleet == fleet &
StoxLandingData$Landing$Area == area &
StoxLandingData$Landing$Species == species,]
checkUnique("UnitCATON", data$UnitCATON)
#intercatch does not allow multiple usages within the variables filtered for above.
#extract most common
tab <- aggregate(list(w=data$RoundWeight), by=list(usage=data$Usage), FUN=function(x){sum(x, na.rm=T)})
tab <- tab[order(tab$w, decreasing = T),]
usage <- tab[1,"usage"]
if (!(fleet %in% SDfleets) & nrow(data)>0){
writeSI(stream, Country = data$Country[1], Year = year, SeasonType = data$SeasonType[1], Season = season, Fleet = fleet, AreaType = data$AreaType[1], FishingArea = area, Species = species, CatchCategory = catchCategory, ReportingCategory = reportingCategory, DataToFrom = dataToFrom, Usage = usage, SamplesOrigin = "NA", UnitCATON = data$UnitCATON[1], CATON = sum(data$CATON, na.rm=T), OffLandings = sum(data$OffLandings))
}
else if ((fleet %in% SDfleets) & nrow(data)>0){
message(paste("Predicting catch at age for", paste(data$Year[1], data$Season[1], data$Fleet[1], data$Area[1], collapse=",")))
#
# run prediction for cell
#
SL <- list()
SL$Landing <- data
result <- RstoxFDA::RunRecaModels(RecaParameterData, SL)
if (unitCANUM == "k"){
unit <- "10^3 individuals"
}
else if (unitCANUM == "m"){
unit <- "10^6 individuals"
}
else if (unitCANUM == "n"){
unit <- "individuals"
}
else{
stop("Error: unitCANUM")
}
ageMat <- RstoxFDA::ReportRecaCatchAtAge(result, PlusGroup = plusGroup, Unit = unit)
meanWtab <- RstoxFDA::ReportRecaWeightAtAge(result, PlusGroup = plusGroup, Unit = "g")
meanLtab <- RstoxFDA::ReportRecaLengthAtAge(result, PlusGroup = plusGroup, Unit = "cm")
#format plusgroup for report
plg <- "-9"
if (!is.null(plusGroup)){
plg <- plusGroup
}
writeSI(stream, Country = data$Country[1], Year = year, SeasonType = data$SeasonType[1], Season = season, Fleet = fleet, AreaType = data$AreaType[1], FishingArea = area, Species = species, CatchCategory = catchCategory, ReportingCategory = reportingCategory, DataToFrom = dataToFrom, Usage = usage, SamplesOrigin = samplesOrigin, UnitCATON = data$UnitCATON[1], CATON = sum(data$CATON), OffLandings = sum(data$OffLandings))
for (age in ageMat$NbyAge$Age){
lowerage <- gsub("\\+", "", age) #remove plus sign from plus group
caa <- ageMat$NbyAge$CatchAtAge[ageMat$NbyAge$Age==age]
meanW <- meanWtab$MeanWeightByAge$MeanIndividualWeight[meanWtab$MeanWeightByAge$Age==age]
meanL <- meanLtab$MeanLengthByAge$MeanIndividualLength[meanLtab$MeanLengthByAge$Age==age]
#Sex is mandatory in the sense that the field must be filled (but accepts N=indetermined). Intercatch doc says its not mandatory
writeSD(stream, Country = data$Country[1], Year = year, SeasonType = data$SeasonType[1], Season = season, Fleet = fleet, AreaType = data$AreaType[1], FishingArea = area, Species = species, CatchCategory = catchCategory, ReportingCategory = reportingCategory,
Sex = "N", CANUMtype="Age", AgeLength = age, PlusGroup=plg, unitMeanWeight="g", unitCANUM=unitCANUM, UnitAgeOrLength="year", UnitMeanLength="cm", Maturity="NA", NumberCaught=caa, MeanWeight=meanW, MeanLength=meanL)
}
}
}
}
}
}
}
}
}
}
}
close(stream)
}
stoxCalculations <- RstoxFramework::runProject("~/stoxprosjekter/testing/reca_neasaithe_2021/")
landings <- stoxCalculations$landings_FilterFishery #readRDS("landings_example.rds")
parameterization <- stoxCalculations$ParameterizeReca #readRDS("parameterization.rds")
speciesConversion <- readRDS("speciesConversion.rds")
usageConversion <- readRDS("usageConversion.rds")
landings$Landing$Species <- RstoxFDA::convertCodes(landings$Landing$Species, speciesConversion)
landings$Landing$Usage <- RstoxFDA::convertCodes(landings$Landing$Usage, usageConversion, strict = F)
exportIntercatch(landings, parameterization, "test.csv", plusGroup = 12)
checks(landings, "test.csv")
| /stoxReca/reports/intercatchExport/interCatchExportStox3.R | no_license | Sea2Data/FDAtools | R | false | false | 18,262 | r | #
# Adaptation of script for intercatch export to StoX 3
#
# Exports landings to intercatch and runs Reca for the segments where SD lines are requested.
# Needs a stox project to be set up with necessary filtering and Reca-parameterization
#
# In order to get correct metier/fleet annotations, that stox project will need landings data that is pre-processed,
# and metiers must be annotated in one of the columns in the landings format.
# This would most sensibly be annotated in the gear column, but if native gear codes are needed for Reca parameterisation another column may be abused for the purpose.
# The default option is therefore landingssite, which is not otherwise required for intercatch.
#
# In addition, the columns Usage and species must be converted to intercatch codes. This can be done in Stox, or on the StoxLandingData prior to calling exportInterCatch.
#
library(RstoxFDA)
library(RstoxData)
library(data.table)
#' checks if an value is among a set of options.
checkParam <- function(paramname, value, options){
if (!(value %in% options)){
stop(paste("Parameter", paramname, "must be one of", paste(options, collapse=","), ". Got:", value))
}
}
#' checks if a value is unique
checkUnique <- function(paramname, values){
if (length(unique(values))>1){
stop(paste("paramname must be unique. Got:", paste(unique(values)), collapse=","))
}
}
#' write HI line
#' @noRd
writeHI <- function(stream,
Country, Year, SeasonType, Season, Fleet, AreaType, FishingArea,
DepthRange="NA", UnitEffort="NA", Effort="-9", AreaQualifier="NA"){
writeLines(con=stream, paste("HI", Country, Year, SeasonType, Season, Fleet, AreaType, FishingArea, DepthRange, UnitEffort, Effort, AreaQualifier, sep=","))
}
#' write SI line
#' @noRd
writeSI <- function(stream,
Country, Year, SeasonType, Season, Fleet, AreaType, FishingArea, Species, CatchCategory, ReportingCategory, DataToFrom, Usage, SamplesOrigin, UnitCATON, CATON,
OffLandings=NA, varCATON="-9", DepthRange="NA", Stock="NA", QualityFlag="NA", InfoFleet="", InfoStockCoordinator="", InfoGeneral=""){
if (is.na(OffLandings)){
OffLandings <- "-9"
}
else{
OffLandings <- format(OffLandings, digits=2)
}
writeLines(con=stream, paste("SI", Country, Year, SeasonType, Season, Fleet, AreaType, FishingArea, DepthRange, Species, Stock, CatchCategory, ReportingCategory, DataToFrom, Usage, SamplesOrigin, QualityFlag, UnitCATON, format(CATON, digits=2), OffLandings, varCATON, InfoFleet, InfoStockCoordinator, InfoGeneral, sep=","))
}
#' write SD line
#' @noRd
writeSD <- function(stream,
Country, Year, SeasonType, Season, Fleet, AreaType, FishingArea, Species, CatchCategory, ReportingCategory, Sex, CANUMtype, AgeLength, PlusGroup, unitMeanWeight, unitCANUM, UnitAgeOrLength, UnitMeanLength, Maturity, NumberCaught, MeanWeight, MeanLength,
DepthRange="NA", Stock="NA",SampledCatch="-9", NumSamplesLngt="-9", NumLngtMeas="-9", NumSamplesAge="-9", NumAgeMeas="-9", varNumLanded="-9", varWgtLanded="-9", varLgtLanded="-9"){
writeLines(con=stream, paste("SD", Country, Year, SeasonType, Season, Fleet, AreaType, FishingArea, DepthRange, Species, Stock, CatchCategory, ReportingCategory, Sex, CANUMtype, AgeLength, PlusGroup, SampledCatch, NumSamplesLngt, NumLngtMeas, NumSamplesAge, NumAgeMeas, unitMeanWeight, unitCANUM, UnitAgeOrLength, UnitMeanLength, Maturity, format(NumberCaught, digits=4), format(MeanWeight,digits=2), format(MeanLength, digits=2), varNumLanded, varWgtLanded, varLgtLanded, sep=","))
}
#' Compare StoX and intercatch
#' @description
#' Reads data from stox project and compare it with data exported for intercatch
#' @param StoxLandingData
#' @param intercatchfile path to file with data in intercatch exchange format
checks <- function(StoxLandingData, intercatchfile){
intercatchdata <- RstoxData::parseInterCatch(intercatchfile)
#compare species
cat(paste("Species StoX-Reca:", paste(unique(StoxLandingData$Landing$Species), collapse=","), "\n"))
cat(paste("Species intercatch (IC):", paste(unique(intercatchdata$SI$Species), collapse=","), "\n"))
#compare total weights
sis <- intercatchdata$SI
sis$CATON[sis$UnitCATON=="kg"] <- sis$CATON[sis$UnitCATON=="kg"]/1000
totstox <- sum(StoxLandingData$Landing$RoundWeight)/1000
totIC <- sum(sis$CATON)
cat("\n")
cat(paste("Totalvekt StoX-Reca (t):", totstox, "\n"))
cat(paste("Totalvekt IC (t):", totIC, "\n"))
diff <- totstox - totIC
reldiff <- diff / totstox
cat(paste("Difference: ", format(diff, digits=2), " t (", format(reldiff*100, digits=1), "%)\n", sep=""))
#compare sum of products
SISD <- merge(intercatchdata$SI, intercatchdata$SD)
SISD$SIid <- paste(SISD$Country, SISD$Year, SISD$SeasonType, SISD$Season, SISD$Fleet, SISD$AreaType, SISD$FishingArea, SISD$DepthRange, SISD$Species, SISD$Stock, SISD$CatchCategory, SISD$ReportingCategory, SISD$DataToFrom, sep="-")
SISD$NumberCaught[SISD$unitCANUM=="k"] <- SISD$NumberCaught[SISD$unitCANUM=="k"]*1000
SISD$NumberCaught[SISD$unitCANUM=="m"] <- SISD$NumberCaught[SISD$unitCANUM=="m"]*1000*1000
SISD$CATON[SISD$UnitCATON=="kg"] <- SISD$CATON[SISD$UnitCATON=="kg"]/1000
SISD$MeanWeight[SISD$unitMeanWeight=="g"] <- SISD$MeanWeight[SISD$unitMeanWeight=="g"]/1000
SOP <- sum(SISD$NumberCaught*SISD$MeanWeight)
SOPt <- SOP/1000
total <- sum(SISD$CATON[!duplicated(SISD$SIid)])
diffSOP <- total - SOPt
reldiffSOP <- diff / total
cat("\n")
cat(paste("Total weight IC (t):", format(total, digits=2),"\n"))
cat(paste("Total SOP IC (t):", format(SOPt, digits=2),"\n"))
cat(paste("Difference: ", format(diffSOP, digits=2), " t (", format(reldiffSOP*100, digits=1), "%)\n", sep=""))
}
#' export intercatch data from StoX project
#' @description
#' export intercatch data from StoX project
#' Need metier annotations hacked into StoxLandingData somehow. Provide the column containint metiers in 'metierColumn'.
#' @details
#' Consult the InterCatch exchange format definitions when necessary: https://www.ices.dk/data/Documents/Intercatch/IC-ExchangeFormat1-0.pdf
#' @param StoxLandingData StoxLandingData
#' @param RecaParameterData Reca parameterizattion data.
#' @param exportfile file to write intercatc data to
#' @param seasonType the temporal resolution for the intercatc export, may be 'Month', 'Quarter' or 'Year'
#' @param country ISO 3166 2-alpha code for country submitting data
#' @param unitCATON unit for landings, may be kg or t.
#' @param unitCANUM unit for catch at age in numbers, may be k,m or n for thosuands, millions or unit (ones) respectively
#' @param samplesOrigin information of origin of samples for SI line. See intercatch exchange format SampleOrigin.
#' @param plusGroup plus group for the SD lines (NULL means no plus group)
#' @param metierColumn the column in StoxLandingData containing metier (fleet) category for landings
#' @param icesAreaColumn column where ices areas are annotated to the desired resolution. area type will be inferred
#' @param SDfleets fleets / metier that SD lines should be exported for. NULL means all fleets, NA no fleets.
exportIntercatch <- function(StoxLandingData, RecaParameterData, exportfile, seasonType="Quarter", country="NO", unitCATON="kg", unitCANUM="n", samplesOrigin="U", plusGroup=NULL, metierColumn="LandingSite", icesAreaColumn="IcesArea", SDfleets=NULL){
if (!all(nchar(StoxLandingData$Landing$Species)==3)){
stop("species must be provided as FAO three letter species-code")
}
if (!all(StoxLandingData$Landing$Usage %in% c("I","H", NA))){
stop("usage must be encoded as I (industrial) or H (human consumption")
}
StoxLandingData$Landing$Area <- StoxLandingData$Landing[[icesAreaColumn]]
StoxLandingData$Landing$AreaType <- as.character(NA)
StoxLandingData$Landing$AreaType[sapply(strsplit(StoxLandingData$Landing$Area, "\\."), FUN=function(x){length(x)})==1] <- "AreaTop"
StoxLandingData$Landing$AreaType[sapply(strsplit(StoxLandingData$Landing$Area, "\\."), FUN=function(x){length(x)})==2] <- "SubArea"
StoxLandingData$Landing$AreaType[sapply(strsplit(StoxLandingData$Landing$Area, "\\."), FUN=function(x){length(x)})==3] <- "Div"
StoxLandingData$Landing$AreaType[sapply(strsplit(StoxLandingData$Landing$Area, "\\."), FUN=function(x){length(x)})==4] <- "SubDiv"
StoxLandingData$Landing$AreaType[sapply(strsplit(StoxLandingData$Landing$Area, "\\."), FUN=function(x){length(x)})==5] <- "Unit"
if (any(is.na(StoxLandingData$Landing$AreaType))){
stop("AreaType could not be deduced for all Areas.")
}
StoxLandingData$Landing$Fleet <- StoxLandingData$Landing[[metierColumn]]
StoxLandingData$Landing$Country <- country
checkParam("seasonType", seasonType, c("Quarter", "Month", "Year"))
StoxLandingData$Landing$SeasonType <- seasonType
checkParam("unitCATON", unitCATON, c("kg", "t"))
#
# annotate season
#
if (seasonType == "Quarter"){
StoxLandingData$Landing$Season <- substr(quarters(StoxLandingData$Landing$CatchDate, T),2,2)
}
else if (seasonType == "Month"){
StoxLandingData$Landing$Season <- substr(StoxLandingData$Landing$CatchDate, 6,7)
}
else if (seasonType == "Year"){
StoxLandingData$Landing$Season <- StoxLandingData$Landing$year
}
else{
#assert false
stop("Error (seasonType)")
}
#
# extract species code from data
#
if (length(unique(StoxLandingData$Landing$Species)) != 1){
stop("Landings does not contain unique species code")
}
StoxLandingData$Landing$CatchCategory <- "L"
StoxLandingData$Landing$ReportingCategory <- "R"
StoxLandingData$Landing$DataToFrom <- "NA"
StoxLandingData$Landing$UnitCATON <- unitCATON
if (unitCATON == "kg"){
StoxLandingData$Landing$CATON <- StoxLandingData$Landing$RoundWeight
}
else if (unitCATON == "t"){
StoxLandingData$Landing$CATON <- StoxLandingData$Landing$RoundWeight / 1000
}
else{
stop("Error UnitCATON")
}
StoxLandingData$Landing$OffLandings <- NA
neededColumns <- c("Year", "Season", "Fleet", "Area","Country", "Species", "SeasonType", "AreaType", "CatchCategory",
"ReportingCategory", "DataToFrom", "Usage", "UnitCATON", "CATON", "OffLandings")
missingColumns <- neededColumns[!(neededColumns %in% names(StoxLandingData$Landing))]
if (length(missingColumns) > 0){
stop(paste("Some columns that are needed for intercatch export are not annotated on landings. Missing: "), paste(missingColumns, collapse=","))
}
if (is.null(SDfleets)){
SDfleets <- unique(StoxLandingData$Landing[[metierColumn]])
}
missingFleets <- SDfleets[!is.na(SDfleets) & !(SDfleets %in% StoxLandingData$Landing[[metierColumn]])]
if (length(missingFleets) > 0){
stop(paste("Not all specified fleets / metiers found in landings. Missing:", paste(missingFleets, collapse=",")))
}
checkParam("unitCANUM", unitCANUM, c("k", "m", "n"))
stream <- file(exportfile, open="w")
for (year in unique(StoxLandingData$Landing$Year)){ #exp 1 cat
for (season in unique(StoxLandingData$Landing$Season)){ #exp 4 cat
for (fleet in unique(StoxLandingData$Landing$Fleet)){ #exp many cat
for (area in unique(StoxLandingData$Landing$Area)){ #exp many cat
data <- StoxLandingData$Landing[ StoxLandingData$Landing$Year == year &
StoxLandingData$Landing$Season == season &
StoxLandingData$Landing$Fleet == fleet &
StoxLandingData$Landing$Area == area,]
# dont write lines for cells with no catch
if (nrow(data) > 0){
checkUnique("Country", data$Country)
checkUnique("SeasonType", data$SeasonType)
checkUnique("AreaType", data$AreaType)
writeHI(stream, Country = data$Country[1], Year = year, SeasonType = data$SeasonType[1], Season = season, Fleet = fleet, AreaType = data$AreaType[1], FishingArea = area)
for (catchCategory in unique(StoxLandingData$Landing$CatchCategory)){ #exp 1 cat
for (reportingCategory in unique(StoxLandingData$Landing$ReportingCategory)){ #exp 1 cat
for (dataToFrom in unique(StoxLandingData$Landing$DataToFrom)){ #exp 1 cat
for (species in unique(data$Species)){
data <- StoxLandingData$Landing[StoxLandingData$Landing$CatchCategory == catchCategory &
StoxLandingData$Landing$ReportingCategory == reportingCategory &
StoxLandingData$Landing$DataToFrom == dataToFrom &
StoxLandingData$Landing$Year == year &
StoxLandingData$Landing$Season == season &
StoxLandingData$Landing$Fleet == fleet &
StoxLandingData$Landing$Area == area &
StoxLandingData$Landing$Species == species,]
checkUnique("UnitCATON", data$UnitCATON)
#intercatch does not allow multiple usages within the variables filtered for above.
#extract most common
tab <- aggregate(list(w=data$RoundWeight), by=list(usage=data$Usage), FUN=function(x){sum(x, na.rm=T)})
tab <- tab[order(tab$w, decreasing = T),]
usage <- tab[1,"usage"]
if (!(fleet %in% SDfleets) & nrow(data)>0){
writeSI(stream, Country = data$Country[1], Year = year, SeasonType = data$SeasonType[1], Season = season, Fleet = fleet, AreaType = data$AreaType[1], FishingArea = area, Species = species, CatchCategory = catchCategory, ReportingCategory = reportingCategory, DataToFrom = dataToFrom, Usage = usage, SamplesOrigin = "NA", UnitCATON = data$UnitCATON[1], CATON = sum(data$CATON, na.rm=T), OffLandings = sum(data$OffLandings))
}
else if ((fleet %in% SDfleets) & nrow(data)>0){
message(paste("Predicting catch at age for", paste(data$Year[1], data$Season[1], data$Fleet[1], data$Area[1], collapse=",")))
#
# run prediction for cell
#
SL <- list()
SL$Landing <- data
result <- RstoxFDA::RunRecaModels(RecaParameterData, SL)
if (unitCANUM == "k"){
unit <- "10^3 individuals"
}
else if (unitCANUM == "m"){
unit <- "10^6 individuals"
}
else if (unitCANUM == "n"){
unit <- "individuals"
}
else{
stop("Error: unitCANUM")
}
ageMat <- RstoxFDA::ReportRecaCatchAtAge(result, PlusGroup = plusGroup, Unit = unit)
meanWtab <- RstoxFDA::ReportRecaWeightAtAge(result, PlusGroup = plusGroup, Unit = "g")
meanLtab <- RstoxFDA::ReportRecaLengthAtAge(result, PlusGroup = plusGroup, Unit = "cm")
#format plusgroup for report
plg <- "-9"
if (!is.null(plusGroup)){
plg <- plusGroup
}
writeSI(stream, Country = data$Country[1], Year = year, SeasonType = data$SeasonType[1], Season = season, Fleet = fleet, AreaType = data$AreaType[1], FishingArea = area, Species = species, CatchCategory = catchCategory, ReportingCategory = reportingCategory, DataToFrom = dataToFrom, Usage = usage, SamplesOrigin = samplesOrigin, UnitCATON = data$UnitCATON[1], CATON = sum(data$CATON), OffLandings = sum(data$OffLandings))
for (age in ageMat$NbyAge$Age){
lowerage <- gsub("\\+", "", age) #remove plus sign from plus group
caa <- ageMat$NbyAge$CatchAtAge[ageMat$NbyAge$Age==age]
meanW <- meanWtab$MeanWeightByAge$MeanIndividualWeight[meanWtab$MeanWeightByAge$Age==age]
meanL <- meanLtab$MeanLengthByAge$MeanIndividualLength[meanLtab$MeanLengthByAge$Age==age]
#Sex is mandatory in the sense that the field must be filled (but accepts N=indetermined). Intercatch doc says its not mandatory
writeSD(stream, Country = data$Country[1], Year = year, SeasonType = data$SeasonType[1], Season = season, Fleet = fleet, AreaType = data$AreaType[1], FishingArea = area, Species = species, CatchCategory = catchCategory, ReportingCategory = reportingCategory,
Sex = "N", CANUMtype="Age", AgeLength = age, PlusGroup=plg, unitMeanWeight="g", unitCANUM=unitCANUM, UnitAgeOrLength="year", UnitMeanLength="cm", Maturity="NA", NumberCaught=caa, MeanWeight=meanW, MeanLength=meanL)
}
}
}
}
}
}
}
}
}
}
}
close(stream)
}
stoxCalculations <- RstoxFramework::runProject("~/stoxprosjekter/testing/reca_neasaithe_2021/")
landings <- stoxCalculations$landings_FilterFishery #readRDS("landings_example.rds")
parameterization <- stoxCalculations$ParameterizeReca #readRDS("parameterization.rds")
speciesConversion <- readRDS("speciesConversion.rds")
usageConversion <- readRDS("usageConversion.rds")
landings$Landing$Species <- RstoxFDA::convertCodes(landings$Landing$Species, speciesConversion)
landings$Landing$Usage <- RstoxFDA::convertCodes(landings$Landing$Usage, usageConversion, strict = F)
exportIntercatch(landings, parameterization, "test.csv", plusGroup = 12)
checks(landings, "test.csv")
|
# Professional Skills R Session: Model selection, 12 Nov
# From the second worksheet: Model Fit
# Introductory things ----
library(tidyverse)
# Exercise 1: Comparing AIC values of diff linear models ----
soils <- read_csv("02-multiple-predictors/peru_soil_data.csv")
View(soils)
## Creating linear models
lm_pH_habitat <- lm(Soil_pH ~ Habitat, data = soils)
lm_pH_tbs <- lm(Soil_pH ~ Total_Base_Saturation, data = soils)
lm_pH_habitat_tbs <- lm(Soil_pH ~ Habitat + Total_Base_Saturation, data = soils)
lm_pH_habitat_tbs_interaction <- lm(Soil_pH ~ Habitat * Total_Base_Saturation, data = soils)
## Compare AIC values
AIC(lm_pH_habitat, lm_pH_tbs, lm_pH_habitat_tbs, lm_pH_habitat_tbs_interaction)
# AIC of lm_pH_habitat_tbs is the lowest, meaning that it is the best model fit!
| /02-multiple-predictors/multiple-pred-script2.R | no_license | beverlytan/uni-profskills | R | false | false | 795 | r | # Professional Skills R Session: Model selection, 12 Nov
# From the second worksheet: Model Fit
# Introductory things ----
library(tidyverse)
# Exercise 1: Comparing AIC values of diff linear models ----
soils <- read_csv("02-multiple-predictors/peru_soil_data.csv")
View(soils)
## Creating linear models
lm_pH_habitat <- lm(Soil_pH ~ Habitat, data = soils)
lm_pH_tbs <- lm(Soil_pH ~ Total_Base_Saturation, data = soils)
lm_pH_habitat_tbs <- lm(Soil_pH ~ Habitat + Total_Base_Saturation, data = soils)
lm_pH_habitat_tbs_interaction <- lm(Soil_pH ~ Habitat * Total_Base_Saturation, data = soils)
## Compare AIC values
AIC(lm_pH_habitat, lm_pH_tbs, lm_pH_habitat_tbs, lm_pH_habitat_tbs_interaction)
# AIC of lm_pH_habitat_tbs is the lowest, meaning that it is the best model fit!
|
library(dplyr)
library(lubridate)
power_consumption <- read.table("household_power_consumption.txt",
header = TRUE,
sep = ";",
na.strings = "?")
power_consumption$Date <- as.Date(strptime(power_consumption$Date, "%d/%m/%Y",
tz = "UTC"))
power_plot <- subset(power_consumption,
Date < as.Date("2007-02-03") & Date > as.Date("2007-01-31"))
power_plot <- mutate(power_plot,
Datetime = ymd_hms(paste(power_plot$Date, power_plot$Time)))
# To create the first plot
png(file = "plot1.png",
width = 480,
height = 480)
hist(power_plot$Global_active_power,
col = "red",
main = "Global Active Power",
xlab = "Global Active Power (kilowatts)",
ylab = "Frequency")
dev.off()
| /plot1.R | no_license | jdumagay/ExData_Plotting1 | R | false | false | 698 | r | library(dplyr)
library(lubridate)
power_consumption <- read.table("household_power_consumption.txt",
header = TRUE,
sep = ";",
na.strings = "?")
power_consumption$Date <- as.Date(strptime(power_consumption$Date, "%d/%m/%Y",
tz = "UTC"))
power_plot <- subset(power_consumption,
Date < as.Date("2007-02-03") & Date > as.Date("2007-01-31"))
power_plot <- mutate(power_plot,
Datetime = ymd_hms(paste(power_plot$Date, power_plot$Time)))
# To create the first plot
png(file = "plot1.png",
width = 480,
height = 480)
hist(power_plot$Global_active_power,
col = "red",
main = "Global Active Power",
xlab = "Global Active Power (kilowatts)",
ylab = "Frequency")
dev.off()
|
library(shinydashboard)
library(dplyr)
library(plotly)
library(tidytext)
library(SnowballC)
library(tm)
library(wordcloud)
library(wordcloud2)
library(memoise)
##### load datasets #####
load("products_influenster.Rda")
load("top_products.Rda")
load("products_review.Rda")
load("reviews_cloud.Rda")
load("rev_shampoo.Rda")
load("rev_conditioner.Rda")
load("rev_oil.Rda")
load("corpus.Rda")
##### Search engine #####
docList <- as.list(products_influenster$product_name)
N.docs <- length(docList)
QrySearch <- function(queryTerm) {
# Record starting time to measure your search engine performance
start.time <- Sys.time()
# store docs in Corpus class which is a fundamental data structure in text mining
my.docs <- VectorSource(c(docList, queryTerm))
# Transform/standaridze docs to get ready for analysis
my.corpus <- VCorpus(my.docs) %>%
tm_map(stemDocument) %>%
tm_map(content_transformer(tolower)) %>%
tm_map(removeWords,stopwords("english")) %>%
tm_map(stripWhitespace)
# Store docs into a term document matrix where rows=terms and cols=docs
# Normalize term counts by applying TF-IDF weightings
term.doc.matrix.stm <- TermDocumentMatrix(my.corpus,
control=list(
weighting=function(x) weightSMART(x,spec="nnn"), #ltc
wordLengths=c(1,Inf)))
# Transform term document matrix into a dataframe
term.doc.matrix <- tidy(term.doc.matrix.stm) %>%
group_by(document) %>%
mutate(vtrLen=sqrt(sum(count^2))) %>%
mutate(count=count/vtrLen) %>%
ungroup() %>%
select(term:count)
docMatrix <- term.doc.matrix %>%
mutate(document=as.numeric(document)) %>%
filter(document<N.docs+1)
qryMatrix <- term.doc.matrix %>%
mutate(document=as.numeric(document)) %>%
filter(document>=N.docs+1)
# Calcualte top 5 results by cosine similarity
searchRes <- docMatrix %>%
inner_join(qryMatrix,by=c("term"="term"),
suffix=c(".doc",".query")) %>%
mutate(termScore=round(count.doc*count.query,4)) %>%
group_by(document.query,document.doc) %>%
summarise(cosine_similarity=sum(termScore)) %>%
filter(row_number(desc(cosine_similarity))<=5) %>%
arrange(desc(cosine_similarity)) %>%
left_join(products_influenster,by=c("document.doc"="V1")) %>%
ungroup() %>%
rename(Result=product_name) %>%
select(Result,cosine_similarity,overall_rating,reviews_count) %>%
data.frame()
# Record when it stops and take the difference
end.time <- Sys.time()
time.taken <- round(end.time - start.time,4)
print(paste("Used",time.taken,"seconds"))
return(searchRes)
}
##### Wordcloud #####
# reviewCorpus <- Corpus(VectorSource(reviews_cloud$content)) %>%
# tm_map(removePunctuation) %>%
# tm_map(stripWhitespace) %>%
# tm_map(tolower) %>%
# tm_map(removeNumbers) %>%
# tm_map(removeWords, stopwords('english')) %>%
# tm_map(removeWords, c('hair','product','shampoo','shampoos','conditioner','conditioners',
# 'oil','love','like','smells','make','makes','ends','use','used','put',
# 'great','good','really','just','one','let','goes'))
#
# shampooCorpus <- Corpus(VectorSource(rev_shampoo$V2)) %>%
# tm_map(removePunctuation) %>%
# tm_map(stripWhitespace) %>%
# tm_map(tolower) %>%
# tm_map(removeNumbers) %>%
# tm_map(removeWords, stopwords('english'))
#
# conditionerCorpus <- Corpus(VectorSource(rev_conditioner$V2)) %>%
# tm_map(removePunctuation) %>%
# tm_map(stripWhitespace) %>%
# tm_map(tolower) %>%
# tm_map(removeNumbers) %>%
# tm_map(removeWords, stopwords('english'))
#
# oilCorpus <- Corpus(VectorSource(rev_oil$V2)) %>%
# tm_map(removePunctuation) %>%
# tm_map(stripWhitespace) %>%
# tm_map(tolower) %>%
# tm_map(removeNumbers) %>%
# tm_map(removeWords, stopwords('english'))
| /shinyapp/global.R | no_license | yuyuhan0306/Influenster_haircare | R | false | false | 3,976 | r | library(shinydashboard)
library(dplyr)
library(plotly)
library(tidytext)
library(SnowballC)
library(tm)
library(wordcloud)
library(wordcloud2)
library(memoise)
##### load datasets #####
load("products_influenster.Rda")
load("top_products.Rda")
load("products_review.Rda")
load("reviews_cloud.Rda")
load("rev_shampoo.Rda")
load("rev_conditioner.Rda")
load("rev_oil.Rda")
load("corpus.Rda")
##### Search engine #####
docList <- as.list(products_influenster$product_name)
N.docs <- length(docList)
QrySearch <- function(queryTerm) {
# Record starting time to measure your search engine performance
start.time <- Sys.time()
# store docs in Corpus class which is a fundamental data structure in text mining
my.docs <- VectorSource(c(docList, queryTerm))
# Transform/standaridze docs to get ready for analysis
my.corpus <- VCorpus(my.docs) %>%
tm_map(stemDocument) %>%
tm_map(content_transformer(tolower)) %>%
tm_map(removeWords,stopwords("english")) %>%
tm_map(stripWhitespace)
# Store docs into a term document matrix where rows=terms and cols=docs
# Normalize term counts by applying TF-IDF weightings
term.doc.matrix.stm <- TermDocumentMatrix(my.corpus,
control=list(
weighting=function(x) weightSMART(x,spec="nnn"), #ltc
wordLengths=c(1,Inf)))
# Transform term document matrix into a dataframe
term.doc.matrix <- tidy(term.doc.matrix.stm) %>%
group_by(document) %>%
mutate(vtrLen=sqrt(sum(count^2))) %>%
mutate(count=count/vtrLen) %>%
ungroup() %>%
select(term:count)
docMatrix <- term.doc.matrix %>%
mutate(document=as.numeric(document)) %>%
filter(document<N.docs+1)
qryMatrix <- term.doc.matrix %>%
mutate(document=as.numeric(document)) %>%
filter(document>=N.docs+1)
# Calcualte top 5 results by cosine similarity
searchRes <- docMatrix %>%
inner_join(qryMatrix,by=c("term"="term"),
suffix=c(".doc",".query")) %>%
mutate(termScore=round(count.doc*count.query,4)) %>%
group_by(document.query,document.doc) %>%
summarise(cosine_similarity=sum(termScore)) %>%
filter(row_number(desc(cosine_similarity))<=5) %>%
arrange(desc(cosine_similarity)) %>%
left_join(products_influenster,by=c("document.doc"="V1")) %>%
ungroup() %>%
rename(Result=product_name) %>%
select(Result,cosine_similarity,overall_rating,reviews_count) %>%
data.frame()
# Record when it stops and take the difference
end.time <- Sys.time()
time.taken <- round(end.time - start.time,4)
print(paste("Used",time.taken,"seconds"))
return(searchRes)
}
##### Wordcloud #####
# reviewCorpus <- Corpus(VectorSource(reviews_cloud$content)) %>%
# tm_map(removePunctuation) %>%
# tm_map(stripWhitespace) %>%
# tm_map(tolower) %>%
# tm_map(removeNumbers) %>%
# tm_map(removeWords, stopwords('english')) %>%
# tm_map(removeWords, c('hair','product','shampoo','shampoos','conditioner','conditioners',
# 'oil','love','like','smells','make','makes','ends','use','used','put',
# 'great','good','really','just','one','let','goes'))
#
# shampooCorpus <- Corpus(VectorSource(rev_shampoo$V2)) %>%
# tm_map(removePunctuation) %>%
# tm_map(stripWhitespace) %>%
# tm_map(tolower) %>%
# tm_map(removeNumbers) %>%
# tm_map(removeWords, stopwords('english'))
#
# conditionerCorpus <- Corpus(VectorSource(rev_conditioner$V2)) %>%
# tm_map(removePunctuation) %>%
# tm_map(stripWhitespace) %>%
# tm_map(tolower) %>%
# tm_map(removeNumbers) %>%
# tm_map(removeWords, stopwords('english'))
#
# oilCorpus <- Corpus(VectorSource(rev_oil$V2)) %>%
# tm_map(removePunctuation) %>%
# tm_map(stripWhitespace) %>%
# tm_map(tolower) %>%
# tm_map(removeNumbers) %>%
# tm_map(removeWords, stopwords('english'))
|
### initialize globals
pathLocal <- '/Users/sfrey/projecto/research_projects/minecraft/redditcommunity/'
source(paste0(pathLocal,"local_settings.R"))
source(paste0(pathLocal,"lib_step6_analysis.r"))
source(paste0(pathLocal,"lib_plotting.r"))
library(boot)
library(ggthemes)
library(scales)
mw <- readRDS(paste0(pathData, "step6_servers_wide_govanalysis.rds"))
mw_train <- mw
### mere data density
(plot_srv_density <- make_plot_size_by_success(mw_train, "weeks_up_total", function(x,i) nrow(x[i]), ggmore=scale_fill_gradientn(colors=grey(seq(from=0.6,to=0.3,length.out=6)), values=rescale(c(0,4,16,64,256,1024)), breaks=c(0,4,16,64,256,1024)), ggguide=guide_legend("Server\ncount", reverse=TRUE), reps=10))
plot_srv_density <- plot_srv_density + guides(fill="none")
ggsave(plot_srv_density, file=paste0(pathImages, "plot_srv_density.png"), units='cm', width=3.25, height=2.5, scale=3)
# plot hazard log and linear
(plot_srv_hazard_bar1 <- ggplot(mw_train[,.(longevity_count=.N),by=.(weeks_up_total)], aes(x=weeks_up_total, y=longevity_count)) + geom_bar(stat="identity") + theme_bw() + scale_y_log10("Count") + xlab("Longevity (weeks)") )
median( mw_train$weeks_up_total)
(plot_srv_hazard_bar2 <- ggplot(mw_train[,.(longevity_count=.N),by=.(weeks_up_total, pop_size_factor)], aes(x=weeks_up_total, y=longevity_count, fill=pop_size_factor )) + geom_bar(stat="identity", position="dodge") + theme_bw() + scale_y_continuous("Count") + xlab("Longevity (weeks)") )
(plot_srv_hazard <- make_plot_size_by_success(mw_train, "weeks_up_total", gov_median, ggmore=scale_fill_gradient(high="#3182bd", low="#cccccc"), ggguide="none", reps=1000 ) )
ggsave(plot_srv_hazard, file=paste0(pathImages, "plot_srv_hazard.png"), units='cm', width=3.25, height=2.5, scale=3)
ggsave(plot_srv_hazard_bar1, file=paste0(pathImages, "plot_srv_hazard_bar1.png"), units='cm', width=5, height=1.5, scale=3)
#plot increase in grief:
### gov going up or down
### governance against size against community
ggel <- scale_fill_gradient(high="#3182bd", low="#cccccc")
ggel_gov <- scale_fill_gradient2(low="#91cf60", mid="#ffffbf", high="#fc8d59", midpoint=2.5, breaks=seq(from=0,to=12,by=2))
ggel_gov_prop <- scale_fill_gradient2(low="#91cf60", mid="#ffffbf", high="#fc8d59", midpoint=0.5, breaks=seq(from=0,to=1,by=0.2))
ggel_gov_rat <- scale_fill_gradient2(low="#91cf60", mid="#ffffbf", high="#fc8d59", midpoint=0.10)
ggel_gov_rat_within <- scale_fill_gradient2(low="#91cf60", mid="#ffffbf", high="#fc8d59")
(plot_gov_scaling <- make_plot_size_by_success(mw_train, "gov", gov_mean , ggmore=ggel_gov, ggguide="none", reps=1000))
(plot_gov_specialization <- make_plot_size_by_success(mw_train, "plugin_specialization", gov_mean_narm , ggmore=ggel_gov, ggguide="none", reps=1000))
(plot_gov_scaling_ratio <- make_plot_size_by_success(mw_train, c("gov","plugin_count"), gov_median_proportion_1, ggmore=ggel_gov_rat, ggguide=guide_legend("Ratio\ngovernance", reverse=TRUE), reps=100))
(plot_gov_scaling_ratio_antigrief <- make_plot_size_by_success(mw_train, c("res_grief","sum_resource"), gov_median_proportion_1_narm, ggmore=ggel_gov_rat, ggguide=guide_legend("Ratio\ngovernance", reverse=TRUE), reps=100))
ggsave(plot_gov_scaling, file=paste0(pathImages, "plot_gov_scaling.png"), units='cm', width=3.25, height=2.5, scale=3)
ggsave(plot_gov_specialization, file=paste0(pathImages, "plot_gov_specialization.png"), units='cm', width=3.25, height=2.5, scale=3)
### resource managemanet style by size:
(plot_gov_scaling_by_plugin_category <- make_plot_size_by_success(melt(mw_train[gov>0], id.vars = c("srv_addr", "y", "srv_max", "pop_size_factor", "pop_size_factor_coarse", "perf_factor", "perf_factor_ratio", "sum_resource"), measure.vars = c(grep("^cat_", names(mw_train), value=TRUE)), variable.name = 'resource', value.name='resource_count'), c("resource_count"), gov_mean , ggmore=ggel_gov, ggguide=guide_legend("Governance\nplugins", reverse=TRUE), reps=10, facetting=c("resource")) + facet_wrap( ~ resource, ncol=4)+ theme(strip.background=element_rect(color="white", fill="white")))
#(plot_gov_scaling_by_resource_type_across_proportion <- make_plot_size_by_success(melt(mw_train, id.vars = c("srv_addr", "y", "srv_max", "pop_size_factor", "pop_size_factor_coarse", "perf_factor", "perf_factor_ratio", "sum_resource"), measure.vars = c("gov", "res_grief", "res_ingame", "res_realworld", "res_players"), variable.name = 'resource', value.name='resource_count', variable.factor=FALSE), c("resource_count", "sum_resource"), gov_median_proportion_1 , ggmore=ggel_gov_rat, ggguide=guide_legend("% governance\nplugins", reverse=TRUE), reps=100, facetting=c("resource")) + facet_wrap( ~ resource, ncol=2)+ theme(strip.background=element_rect(color="white", fill="white")))
ggel_gov_by_type <- scale_fill_gradientn(colors=(seq_gradient_pal(low=muted("#91cf60", l=100, c=100), high=muted("#fc8d59", l=100, c=100)))(rescale(seq(from=0,to=10,by=2))), values=rescale(seq(from=0,to=10,by=2)^2))
#scale_fill_gradientn(colors=grey(seq(from=0.6,to=0.3,length.out=6)), values=rescale(c(0,4,16,64,256,1024)), breaks=c(0,4,16,64,256,1024))
{
gg <- melt(mw_train[gov>0], id.vars = c("srv_addr", "y", "srv_max", "pop_size_factor", "pop_size_factor_coarse", "perf_factor", "perf_factor_ratio", "sum_resource"), measure.vars = c("res_grief", "res_ingame", "res_realworld", "res_players"), variable.name = 'resource', value.name='resource_count', variable.factor=FALSE)
gg[,resource:=factor(resource, levels=c("res_grief", "res_ingame", "res_realworld", "res_players", "res_attention"), labels=c("Grief", "In-game", "Real-world", "Player community", "Mod cognitive"))]
(plot_gov_scaling_by_resource_type <- make_plot_size_by_success(gg, c("resource_count"), gov_median , ggmore=ggel_gov_by_type, ggguide="none", reps=1000, facetting=c("resource")) + facet_wrap( ~ resource, ncol=1)+ theme(strip.background=element_rect(color="white", fill="white"), axis.text=element_text(size=6)))
ggsave(plot_gov_scaling_by_resource_type, file=paste0(pathImages, "plot_gov_scaling_by_resource_type.png"), units='cm', width=2.25, height=5, scale=3)
(plot_antigrief_scaling <- make_plot_size_by_success(gg[resource=="Grief"], c("resource_count"), gov_median , ggmore=ggel_gov_by_type, ggguide="none", reps=1000) )
(plot_antigrief_ratio_scaling <- make_plot_size_by_success(gg, c("resource_count"), gov_median , ggmore=ggel_gov_by_type, ggguide="none", reps=1000) )
(plot_gov_scaling_by_aud_type2 <- make_plot_size_by_success(mw_train[,.(perf_factor, pop_size_factor, pop_size_factor, ratio_aud)], c("ratio_aud"), gov_mean, ggmore=ggel_govaud2, ggguide=guide_legend("Ratio\ngovernance", reverse=TRUE), reps=100, ggtext=FALSE))
ggsave(plot_antigrief_scaling, file=paste0(pathImages, "plot_antigrief_scaling.png"), units='cm', width=3.25, height=2.5, scale=3)
}
### institution by size:
{
gg <- melt(mw_train[gov>0], id.vars = c("srv_addr", "y", "srv_max", "pop_size_factor", "pop_size_factor_coarse", "perf_factor", "perf_factor_ratio", "sum_institution"), measure.vars = c("gov", grep("^inst_", names(mw_train), value=TRUE)), variable.name = 'institution', value.name='institution_count', variable.factor=FALSE)
gginclude <- c("inst_action_space_down", "inst_chat", "inst_privateproperty", "inst_shop")
gg <- gg[institution %in% gginclude]
gg[,institution:=factor(institution, levels=gginclude, labels=c("Proscriptions", "Chat", "Property", "Exchange"))]
(plot_gov_scaling_by_inst_type <- make_plot_size_by_success(gg, c("institution_count"), gov_median , ggmore=ggel_gov_by_type, ggguide="none", reps=1000, facetting=c("institution")) + facet_wrap( ~ institution, ncol=1)+ theme(strip.background=element_rect(color="white", fill="white"), axis.text=element_text(size=6)))
(plot_actiondown_scaling <- make_plot_size_by_success(gg[institution == "Proscriptions"], c("institution_count"), gov_median , ggmore=ggel_gov_by_type, ggguide="none", reps=1000) )
ggsave(plot_actiondown_scaling, file=paste0(pathImages, "plot_actiondown_scaling.png"), units='cm', width=3.25, height=2.5, scale=3)
}
### institution by size as a fraction of total institutions
#(plot_gov_scaling_by_inst_type_proportion <- make_plot_size_by_success(melt(mw_train, id.vars = c("srv_addr", "y", "srv_max", "pop_size_factor", "pop_size_factor_coarse", "perf_factor", "perf_factor_ratio", "sum_institution"), measure.vars = c("gov", grep("^inst_", names(mw_train), value=TRUE)), variable.name = 'institution', value.name='institution_count'), c("institution_count","sum_institution"), gov_median_proportion_1 , ggmore=ggel_gov_rat, ggguide=guide_legend("% governance\nplugins", reverse=TRUE), reps=0, facetting=c("institution")) + facet_wrap( ~ institution, ncol=4)+ theme(strip.background=element_rect(color="white", fill="white")))
### institution by size as a fraction of within that type of institution
### but this ultimately gives less info that the original clacuclation, and less valuable, so back to original/
#dataa <- make_plot_size_by_success(melt(mw_train, id.vars = c("srv_addr", "y", "srv_max", "pop_size_factor", "pop_size_factor_coarse", "perf_factor", "perf_factor_ratio", "sum_institution"), measure.vars = c("gov", grep("^inst_", names(mw_train), value=TRUE)), variable.name = 'institution', value.name='institution_count'), c("institution_count","sum_institution"), gov_mean_proportion_1, reps=0, facetting=c("institution"), return_plot=FALSE)[,pop_var:=pop_var/sum(pop_var),by=institution]
#(plot_gov_scaling_by_inst_type_proportion <- ggplot(dataa[,.(xvar=pop_size_factor_coarse, yvar=perf_factor,institution, pop_var)], aes(x=xvar, y=yvar)) + scale_y_discrete("Core members", labels=c("0", "", "", "10", "", "", "100")) + geom_bin2d(aes(fill=pop_var)) + theme_bw() + theme(panel.grid.major=element_line(0)) + coord_fixed(ratio=6/7) + scale_x_discrete("Server size", labels=c(5,10,50,100,500,1000)) + guides(fill=guide_legend("% governance\nplugins", reverse=TRUE)) + ggel_gov_rat_within + facet_wrap( ~ institution, ncol=4)+ theme(strip.background=element_rect(color="white", fill="white")))
### governance audience
ggel_govaud <- scale_fill_gradient2(low="#91cf60", mid="#f0f0f0", high="#fc8d59", midpoint=3 )
(plot_gov_scaling_by_aud_type <- make_plot_size_by_success(melt(mw_train, id.vars = c("srv_addr", "y", "srv_max", "pop_size_factor", "pop_size_factor_coarse", "perf_factor", "perf_factor_ratio"), measure.vars = c(grep("^aud_[^n]", names(mw_train), value=TRUE)), variable.name = 'audience', value.name='audience_count'), "audience_count", gov_mean , ggmore=ggel_govaud, ggguide=guide_legend("Governance\nplugins", reverse=TRUE), reps=0, facetting=c("audience")) + facet_wrap( ~ audience, ncol=4)+ theme(strip.background=element_rect(color="white", fill="white")))
ggel_govaud2 <- scale_fill_gradient(low="#f0f0f0", high=muted("#fc8d59", l=80,c=100))
(plot_gov_scaling_by_aud_type2 <- make_plot_size_by_success(mw_train[,.(perf_factor, pop_size_factor, pop_size_factor, ratio_aud)], c("ratio_aud"), gov_mean, ggmore=ggel_govaud2, ggguide=guide_legend("Ratio\ngovernance", reverse=TRUE), reps=100, ggtext=FALSE))
#ggsave(plot_gov_scaling_by_aud_type2, file=paste0(pathImages, "plot_gov_scaling_by_aud_type2.png"), units='cm', width=4, height=2.5, scale=3)
ggsave(plot_gov_scaling_by_aud_type, file=paste0(pathImages, "plot_gov_scaling_by_aud_type.png"), units='cm', width=4, height=2.5, scale=3)
## uniques vs core members
plot_population_distribution_rect <- plot_visitortype(mw, plot_type='vertical')
#ggsave(plot_population_distribution, file=paste0(pathImages, "plot_population_distribution.png"), units='cm', width=5, height=2.5, scale=5)
ggsave(plot_population_distribution_rect, file=paste0(pathImages, "plot_population_distribution_rect.png"), units='cm', width=2, height=3, scale=5)
ggsave(plot_population_distribution_rect, file=paste0(pathImages, "plot_population_distribution_rect2.png"), units='cm', width=3, height=2, scale=5)
### now plot uniques against size and success
(make_plot_size_by_success(mw_train, "nuvisits12", function(x,i) log2(gov_median(x, i)), ggmore=scale_fill_gradient(low="#d9d9d9", high="#525252"), ggguide=guide_legend("Unique visits", reverse=TRUE), reps=10))
(plot_srv_density_uvisits <- make_plot_size_by_success(mw_train, "nuvisits12", function(x,i) gov_median(x, i), ggmore=scale_fill_gradientn(colors=grey(seq(from=0.6,to=0.3,length.out=6)), values=rescale(c(0,4,16,64,256,1024)^2), breaks=c(0,4,16,64,256,1024)), ggguide=guide_legend("Unique visits", reverse=TRUE), reps=10))
mw_train[,.(unsuccessful=sum(table(perf_factor)[1:2]),all=sum(table(perf_factor)),ratio=sum(table(perf_factor)[1:2])/sum(table(perf_factor))), by=pop_size_factor]
ggsave(plot_srv_density_uvisits, file=paste0(pathImages, "plot_srv_density_uvisits.png"), units='cm', width=3.25, height=2.5, scale=3)
### server diversity
### bootstrapping fucntion for entropy
ggel_lowbad <- scale_fill_gradient(high="#41ab5d", low="#cccccc")
#(make_plot_size_by_success(mw_train, grep("^inst_[^n]", names(mw_train), value=TRUE), gov_entropy_diversity, ggguide=guide_legend("Entropy"), ggmore=ggel_lowbad, reps=10))
(plot_srv_institutional_diversity <- (make_plot_size_by_success(mw_train, grep("^inst_[^n]", names(mw_train), value=TRUE), gov_dist, ggguide=guide_legend("Variability"), ggmore=ggel_lowbad, reps=10, ggtext=FALSE)))
ggsave(plot_srv_institutional_diversity, file=paste0(pathImages, "plot_srv_institutional_diversity.png"), units='cm', width=4, height=2.5, scale=3)
#ggsave(plot_srv_institutional_diversity, file=paste0(pathImages, "plot_srv_institutional_diversity_entropy.png"), units='cm', width=4, height=2.5, scale=3)
### within-server diversity
(make_plot_size_by_success(mw_train, "srv_entropy", gov_median, ggmore=ggel_lowbad, ggguide=guide_legend("Pluralism", reverse=TRUE), reps=10))
### uptime %?
### number of weeks up (longevity)
ggel_longevity <- scale_fill_gradient2(low="#91cf60", mid="#ffffbf", high="#fc8d59", midpoint=15)
(make_plot_size_by_success(mw_train, "weeks_up_total", gov_median, ggmore=ggel_longevity, ggguide=guide_legend("Longevity", reverse=TRUE), reps=1000))
### number of signs (informal governance)
ggel_signs <- scale_fill_gradient2(low="#91cf60", mid="#ffffbf", high="#fc8d59")
(make_plot_size_by_success(mw_train, "sign_count", function(x,i) median(as.double(asdf(x[i][!is.na(sign_count)])[,1])), ggmore=ggel_signs, ggguide=guide_legend("Norms", reverse=TRUE), reps=0))
### maintenance style
ggel_maint <- scale_fill_gradient2(low="#91cf60", mid="#ffffbf", high="#fc8d59")
gov_features <- grep("^use_[^n]", names(mw_train), value=TRUE)
for (i in 1:length(gov_features)){
print(make_plot_size_by_success(mw_train, gov_features, gov_median_proportion_2, ggguide=guide_legend(paste0(gov_features[i])), ggmore=ggel_maint, reps=5, focal=i))
}
ggel_govmaint <- scale_fill_gradient2(low="#91cf60", mid="#f0f0f0", high="#fc8d59", midpoint=3 )
(plot_gov_scaling_by_maint_type <- make_plot_size_by_success(melt(mw_train, id.vars = c("srv_addr", "y", "srv_max", "pop_size_factor", "pop_size_factor_coarse", "perf_factor", "perf_factor_ratio"), measure.vars = c(grep("^use_[^n]", names(mw_train), value=TRUE)), variable.name = 'maintain', value.name='maintain_count'), "maintain_count", gov_median , ggmore=ggel_govmaint, ggguide=guide_legend("Governance\nplugins", reverse=TRUE), reps=0, facetting=c("maintain")) + facet_wrap( ~ maintain, ncol=4)+ theme(strip.background=element_rect(color="white", fill="white")))
### jubilees
ggel_v <- scale_fill_gradient2(low="#91cf60", mid="#ffffbf", high="#fc8d59", midpoint=1)
(make_plot_size_by_success(mw_train, "jubilees", gov_median, ggmore=ggel_v, ggguide=guide_legend("Updates", reverse=TRUE), reps=1000) )
ggplot(mw_train, aes(x=weeks_up_total, y=jubilees)) + geom_jitter(height=0.5, width=0) ### the patern above occurs even though longevity and jubilees are psoitively correlated without controlling for size
### audience
### cowplot merge
#plot increase in grief:
### fix pred_hist plotting of histograms with fake data
pred_hist <- mc
#pred_hist_fake1 <- pred_hist[srv_max>200 & srv_max<400 & resource=="players", ]
#pred_hist_fake1[,':='(resource='performance')]
#pred_hist_fake2 <- pred_hist[srv_max>200 & srv_max<400 & resource=="players", ]
#pred_hist_fake2[,':='(resource='realmoney')]
#pred_hist <- rbind(pred_hist, pred_hist_fake1, pred_hist_fake2)
pred_hist[ ,':='(
institution_name={ifelse( gov==1 , "Other gov", "Misc") %>%
#ifelse( gov==1 & institution %in% c("noinstitution", "monitor", "action_space"), "Misc", '') %>%
ifelse( gov==1 & institution == "boundary", "Entry restrictions", .) %>%
ifelse( gov==1 & institution == "action_space_up", "More player actions", .) %>%
ifelse( gov==1 & institution == "action_space_down", "Fewer player actions", .) %>%
ifelse( gov==1 & institution == "shop", "Economy", .) %>%
ifelse( gov==1 & institution == "chat", "Communication", .) %>%
ifelse( gov==1 & institution == "privateproperty", "Private property", .) %>%
ifelse( gov==1 & institution == "broadcast", "Admin broadcast", .) %>%
ifelse( gov==1 & institution == "monitor_by_peer", "Peer monitoring", .) %>%
ifelse( gov==1 & institution == "monitor_by_admin", "Admin monitoring", .) %>%
ifelse( gov==1 & institution == "position_v", "More groups, vertical", .) %>%
ifelse( gov==1 & institution == "position_h", "More groups, horizontal", .) %>%
ifelse( gov==1 & institution == "payoff", "Incentives", .) %>%
factor(levels=c( "Communication", "Private property", "Economy", "More player actions", "Entry restrictions", "Fewer player actions", "Admin broadcast", "Peer monitoring", "Admin monitoring", "More groups, vertical", "More groups, horizontal", "Other gov", "Misc"))
},
resource_name={
ifelse( gov==1 & resource == "noresource", "Not resource-related", "Not resource-related") %>%
ifelse( gov==1 & resource == "grief", "Anti-grief", .) %>%
ifelse( gov==1 & resource == "ingame", "Game-related\nresources", .) %>%
ifelse( gov==1 & resource == "performance", "Server performance", .) %>%
ifelse( gov==1 & resource == "players", "Player community", .) %>%
ifelse( gov==1 & resource == "realmoney", "Server provisioning", .) %>%
factor(levels=c( "Anti-grief", "Game-related\nresources", "Server performance", "Server provisioning", "Player community", "Not resource-related"))
},
gov_factor=factor(gov, levels=c(1,0), labels=c("Governance-related", "Game-related"))
) ]
xaxis_size_factor <- scale_x_discrete("Server size", labels=c("(0,5]", "(5,10]", "(10, 50]", "(50,100]", "(100, 500]", "(500, 1000]"))
### Each online community can be seen as a bundle of collective action problems. Larger servers are more likely to have to install governance modules that mitigate such problems. among 4000 plugins on 1300 active servers, large servers are more likely to face problems with server performance (CPU/RAM/lag), server provisioning (paying server fees), and maintaining the player community (aiding and coordinating community members).
plot_color1 <- scale_fill_brewer("Resource type", type="qual",palette=1)
plot_color2 <- scale_fill_manual("Resource type", values=c("#666666", "#bf5b17", "#ffff99")) ### for consistentcy. see http://colorbrewer2.org/#type=qualitative&scheme=Accent&n=6
(plot_resource_types_1 <- ggplot(pred_hist[resource %ni% c("grief", "ingame"),], aes(x=srv_max, fill=resource_name)) + geom_histogram(position="fill", breaks=c(0,0.7,1,1.7,2,2.7,3), closed='right')+ scale_x_log10("Server size", breaks=c(0,1,5,10,50,100,500,1000),limits=c(1,1000))+ scale_y_continuous("Plugin proportions by type") + plot_color1 + theme_bw() + theme(aspect.ratio=0.6, plot.margin = unit(c(0,0,0,0), "cm")) + geom_vline(xintercept=c(1,10,100,1000), alpha=0.3))
(plot_resource_types_2 <- ggplot(pred_hist[gov== 0 | resource %in% c("grief", "ingame"),], aes(x=srv_max, fill=resource_name)) + geom_histogram(position="fill", breaks=c(0,0.7,1,1.7,2,2.7,3), closed='right')+ scale_x_log10("Server size", breaks=c(0,1,10,100,1000),limits=c(1,1000))+ scale_y_continuous("Plugin proportions by type") + plot_color2 + theme_bw() + theme(aspect.ratio=0.6, plot.margin = unit(c(0,0,0,0), "cm")) + geom_vline(xintercept=c(1,10,100,1000), alpha=0.3))
(plot_resource_types_x <- ggplot(pred_hist, aes(x=srv_max, fill=resource_name)) + geom_histogram(position="fill", breaks=c(0,0.7,1,1.7,2,2.7,3), closed='right')+ scale_x_log10("Server size", breaks=c(0,1,10,100,1000),limits=c(1,1000))+ scale_y_continuous("Plugin proportions by type") + scale_fill_hue() + theme_bw() + theme(aspect.ratio=0.6, plot.margin = unit(c(0,0,0,0), "cm")) + geom_vline(xintercept=c(1,10,100,1000), alpha=0.3))
(plot_resource_types_abs_x <- ggplot(pred_hist, aes(x=srv_max, fill=resource_name)) + geom_histogram(position="dodge", bins=6, binwidth=0.5)+ scale_x_log10("Server size", breaks=c(0,1,10,100,1000),limits=c(1,1000))+ scale_y_continuous("Plugin proportions by type") + scale_fill_hue() + theme_bw() + theme(aspect.ratio=0.6, plot.margin = unit(c(0,0,0,0), "cm")) + geom_vline(xintercept=c(1,3.1,10,31,100,310,1000), alpha=0.3))
ggsave(plot_resource_types_1, file=paste0(pathImages, "plot_resource_types_1.png"), units='cm', width=2.25, height=1, scale=6)
ggsave(plot_resource_types_2, file=paste0(pathImages, "plot_resource_types_2.png"), units='cm', width=2.25, height=1, scale=6)
plot_color1 <- scale_fill_manual("Institution type", values=c(rainbow(4, start=15/540, end=105/540, s=0.8, v=0.9 ), 'grey50'))
plot_color2 <- scale_fill_manual("Institution type", values=c(rainbow(4, start=200/540, end=360/540, s=0.8, v=0.9 ), 'grey50'))
plot_color3 <- scale_fill_manual("Institution type", values=c(rainbow(3, start=240/360, end=360/360, s=0.8, v=0.9 ), 'grey50'))
filter1 <- c("monitor_by_admin", "position_v", "action_space_down", "broadcast")
filter2 <- c("monitor_by_peer","position_h", "privateproperty","action_space_up")
filter3 <- c("boundary","chat","shop" )
(plot_institution_types_1 <- ggplot(pred_hist[gov == 1 & (institution_name == "Other gov" | institution %in% filter1) ], aes(x=srv_max, fill=institution_name)) + geom_histogram(position="fill", breaks=c(0,0.7,1,1.7,2,2.7,3), closed='right')+ scale_x_log10("Server size", breaks=c(0,1,10,100,1000),limits=c(1,1000))+ scale_y_continuous("Plugin proportions by type") + plot_color1 + theme_bw() + theme(aspect.ratio=0.6, plot.margin = unit(c(0,0,0,0), "cm")) + geom_vline(xintercept=c(1,10,100,1000), alpha=0.3))
(plot_institution_types_2 <- ggplot(pred_hist[gov == 1 & (institution_name == "Other gov" | institution %in% filter2) ], aes(x=srv_max, fill=institution_name)) + geom_histogram(position="fill", breaks=c(0,0.7,1,1.7,2,2.7,3), closed='right')+ scale_x_log10("Server size", breaks=c(0,1,10,100,1000),limits=c(1,1000))+ scale_y_continuous("Plugin proportions by type") + plot_color2 + theme_bw() + theme(aspect.ratio=0.6, plot.margin = unit(c(0,0,0,0), "cm")) + geom_vline(xintercept=c(1,10,100,1000), alpha=0.3))
plot_institution_types_3 <- ggplot(pred_hist[gov == 1 & (institution_name == "Other gov" | institution %in% filter3) ], aes(x=srv_max, fill=institution_name)) + geom_histogram(position="fill", breaks=c(0,0.7,1,1.7,2,2.7,3), closed='right')+ scale_x_log10("Server size", breaks=c(0,1,10,100,1000),limits=c(1,1000))+ scale_y_continuous("Plugin proportions by type") + plot_color3 + theme_bw() + theme(aspect.ratio=0.6, plot.margin = unit(c(0,0,0,0), "cm")) + geom_vline(xintercept=c(1,10,100,1000), alpha=0.3); plot_institution_types_3
(plot_institution_types_x <- ggplot(pred_hist[gov == 1], aes(x=srv_max, fill=institution_name)) + geom_histogram(position="fill", breaks=c(0,0.7,1,1.7,2,2.7,3), closed='right')+ scale_x_log10("Server size", breaks=c(0,1,10,100,1000),limits=c(1,1000))+ scale_y_continuous("Plugin proportions by type") + scale_fill_hue("Institution type") + theme_bw() + theme(aspect.ratio=0.6, plot.margin = unit(c(0,0,0,0), "cm")) + geom_vline(xintercept=c(1,10,100,1000), alpha=0.3))
ggsave(plot_institution_types_1, file=paste0(pathImages, "plot_institution_types_1.png"), units='cm', width=2.25, height=1, scale=6)
ggsave(plot_institution_types_2, file=paste0(pathImages, "plot_institution_types_2.png"), units='cm', width=2.25, height=1, scale=6)
ggsave(plot_institution_types_3, file=paste0(pathImages, "plot_institution_types_3.png"), units='cm', width=2.25, height=1, scale=6)
### gov going up or down
(plot_gov_count <- ggplot(mw_train, aes(x=srv_max, y=(gov+1))) + geom_jitter(height=0.4, width=0.05, color="dark grey", size=0.5) + scale_x_log10("Server size", breaks=c(0,1,10,100,1000),limits=c(1,1000))+ scale_y_log10("Governance plugins") + plot_color1 + theme_bw() + theme(aspect.ratio=0.6, plot.margin = unit(c(0,0,0,0), "cm")) + geom_vline(xintercept=c(1,10,100,1000), alpha=0.3) + geom_smooth(method="rlm", color="black"))
(plot_gov_relative <- ggplot(pred_hist, aes(x=srv_max, fill=gov_factor)) + geom_histogram(position="fill", breaks=c(0,0.7,1,1.7,2,2.7,3), closed='right')+ scale_x_log10("Server size", breaks=c(0,1,10,100,1000),limits=c(1,1000))+ scale_y_continuous("Increase in governance intensity") + plot_color1 + theme_bw() + theme(aspect.ratio=0.6, plot.margin = unit(c(0,0,0,0), "cm")) + geom_vline(xintercept=c(1,10,100,1000), alpha=0.3))
ggsave(plot_gov_count, file=paste0(pathImages, "plot_gov_count.png"), units='cm', width=2.25, height=1, scale=6)
ggsave(plot_gov_relative, file=paste0(pathImages, "plot_gov_relative.png"), units='cm', width=2.25, height=1, scale=6)
### governance against size against community
(plot_gov_scaling <- ggplot(mw_train[,.(gov=median(gov)),by=.(perf_factor, pop_size_factor_coarse)], aes(x=pop_size_factor_coarse, y=perf_factor)) + geom_bin2d(aes(fill=gov)) + scale_fill_gradient2(low="#91cf60", mid="#ffffbf", high="#fc8d59", midpoint=2.5, breaks=seq(from=0,to=12,by=2)) + theme_bw() + theme(panel.grid.major=element_line(0)) + scale_y_discrete("Core members", labels=c("0", "", "", "10", "", "", "100")) + coord_fixed(ratio=6/7) + scale_x_discrete("Server size", labels=c(5,10,50,100,500,1000)) + guides(fill=guide_legend(title="Governance\nplugins", reverse=TRUE)))
(plot_gov_scaling_by_resource_type <- ggplot(melt(mw_train, id.vars = c("srv_addr", "y", "srv_max", "pop_size_factor", "perf_factor"), measure.vars = c("gov", "res_grief", "res_ingame", "res_realworld", "res_players"), variable.name = 'resource', value.name='resource_count')[,.(gov=mean(resource_count)),by=.(resource, perf_factor, pop_size_factor)], aes(x=pop_size_factor, y=perf_factor)) + geom_bin2d(aes(fill=gov)) + scale_fill_gradient2(low="#91cf60", mid="#ffffbf", high="#fc8d59", midpoint=1, breaks=seq(from=0,to=12,by=2)) + theme_bw() + theme(panel.grid.major=element_line(0), strip.background=element_rect(color="white", fill="white")) + scale_y_discrete("Core members", labels=c("0", "", "", "10", "", "", "100")) + coord_fixed(ratio=6/7) + scale_x_discrete("Server size", labels=c(5,10,50,100,500,1000)) + guides(fill=guide_legend(title="Governance\nplugins", reverse=TRUE)) + facet_wrap( ~ resource, ncol=1))
(plot_gov_scaling_by_inst_type <- ggplot(melt(mw_train, id.vars = c("srv_addr", "y", "srv_max", "pop_size_factor", "perf_factor"), measure.vars = c("gov", grep("^inst_", names(mw_train), value=TRUE)), variable.name = 'institution', value.name='institution_count')[,.(gov=mean(institution_count)),by=.(institution, perf_factor, pop_size_factor)], aes(x=pop_size_factor, y=perf_factor)) + geom_bin2d(aes(fill=gov)) + scale_fill_gradient2(low="#91cf60", mid="#ffffbf", high="#fc8d59", midpoint=1, breaks=seq(from=0,to=12,by=2)) + theme_bw() + theme(panel.grid.major=element_line(0), strip.background=element_rect(color="white", fill="white")) + scale_y_discrete("Core members", labels=c("0", "", "", "10", "", "", "100")) + coord_fixed(ratio=6/7) + scale_x_discrete("Server size", labels=c(5,10,50,100,500,1000)) + guides(fill=guide_legend(title="Governance\nplugins", reverse=TRUE)) + facet_wrap( ~ institution, ncol=4))
### resource managemanet style by size:
ggplot(data=melt(training_full_lasso, id.vars = c("srv_addr", "srv_max", "y"), measure.vars = c("res_grief", "res_ingame", "res_realworld", "res_players", "res_attention"), variable.name = 'resource', value.name='resource_count'),aes(x=srv_max, y=resource_count)) + geom_jitter(size=0.1, height=0.1, width=0.1) + scale_x_log10() + geom_smooth(method='rlm') + facet_wrap(~resource, ncol=2)
### institution by size:
ggplot(data=melt(mw_train, id.vars = c("srv_addr", "srv_max", "y"), measure.vars = grep("^inst_", names(mw_train)), variable.name = 'institution', value.name='institution_count'),aes(x=srv_max, y=institution_count)) + geom_jitter(size=0.1, height=0.1, width=0.1) + scale_x_log10() + geom_smooth(method='rlm') + facet_wrap(~institution, ncol=2)
ggsave(plot_gov_scaling, file=paste0(pathImages, "plot_gov_scaling.png"), units='cm', width=2.25, height=1, scale=6)
### server diversity
plot_diversity_data <- mw_train[,.(srv_max, srv_max_log,pop_size_factor, srv_entropy), by=srv_addr]
plot_diversity_data2 <- mw_train[,.( pop_entropy={inst_dist<-colSums(.SD[,grep("^inst_", names(mw_train)),with=FALSE]); inst_dist<-(inst_dist+0.000001)/(sum(inst_dist)+0.000001); sum(sapply(inst_dist, function(x) {-x*log(x)})) }), by=pop_size_factor]
plot_diversity_data <- merge(plot_diversity_data, plot_diversity_data2[,.(pop_size_factor, pop_entropy)], all.x=T, all.y=F, by="pop_size_factor")
plot_diversity_data[,srv_entropy_agg1:=mean(srv_entropy), by=pop_size_factor]
plot_diversity_data[srv_entropy!=0,srv_entropy_agg2:=mean(srv_entropy), by=pop_size_factor]
plot_diversity_data[,srv_entropy_agg3:=median(srv_entropy), by=pop_size_factor]
### each server draws ona greater variety of governance styles as it gets larger, but they also become less different from each other .
ggplot(plot_diversity_data, aes(x=srv_max, y=srv_entropy)) + geom_point() + scale_x_log10() + geom_line(data=plot_diversity_data[srv_entropy!=0,],aes(x=srv_max, y=srv_entropy_agg2), color='red') + geom_line(aes(x=srv_max, y=srv_entropy_agg1), color='blue') + geom_line(aes(x=srv_max, y=srv_entropy_agg3), color='orange') + geom_line(aes(x=srv_max, y=pop_entropy), color='green')
### focus on decrease in difference over time
(plot_diversity <- ggplot(plot_diversity_data2, aes(x=pop_size_factor, y=pop_entropy)) + geom_bar(stat='identity') + geom_smooth() + xaxis_size_factor + scale_y_continuous("Population-level diversity in governance style") + theme_bw() )
# now bootstrap the stat
gov_diversity <- function(data, i_samp) {
entropy_calc <- function(x) {-x*log(x)}
inst_dist<-colSums(data[i_samp,])
inst_dist<-(inst_dist+0.000001)/(sum(inst_dist)+0.000001)
return(sum(sapply(inst_dist, entropy_calc)) )
}
plot_diversity_data4 <- mw_train[,{ttt <- boot(.SD[,c(grep("^inst_", names(.SD))), with=F], gov_diversity, R=1000, parallel = "multicore", ncpus = 8);
tttq <- unlist(quantile(ttt$t, c(0.99, 0.50, 0.01)))
list(pop_entropy=tttq[2], pop_entropy_low=tttq[3], pop_entropy_high=tttq[1])
},by=pop_size_factor_fine]
(plot_diversity <- ggplot(plot_diversity_data4, aes(x=pop_size_factor_fine, y=pop_entropy)) + geom_bar(stat='identity') + geom_smooth() + scale_x_discrete("Server size", labels=c("(0,5]", "(5,10]", "(10, 50]", "(50,100]", "(100, 500]", "(500, 1000]"))) + scale_y_continuous("Population-level diversity in governance style") + theme_bw() + coord_cartesian(ylim=c(1.5, 2.5)) + geom_errorbar(aes(ymin = pop_entropy_low, ymax = pop_entropy_high))
(plot_diversity_scaling <- ggplot(mw_train[,.(pop_entropy={inst_dist<-colSums(.SD[,grep("^inst_", names(mw_train)),with=FALSE]); inst_dist<-(inst_dist+0.000001)/(sum(inst_dist)+0.000001); sum(sapply(inst_dist, function(x) {-x*log(x)})) }),by=.(perf_factor, pop_size_factor)], aes(x=pop_size_factor, y=perf_factor)) + geom_bin2d(aes(fill=pop_entropy)) + scale_fill_gradient2(high="#91cf60", mid="#ffffbf", low="#fc8d59", midpoint=1.2) + theme_bw() + theme(panel.grid.major=element_line(0)) + scale_y_discrete("Core members", labels=c("0", "", "", "10", "", "", "100")) + coord_fixed(ratio=6/7) + scale_x_discrete("Server size", labels=c(5,10,50,100,500,1000)) + guides(fill=guide_legend(title="Entropy", reverse=TRUE)))
plot_diversity_scaling_boot_data <- mw_train[,.(pop_entropy={
ttt <- boot(.SD[,c(grep("^inst_", names(.SD))), with=F], gov_diversity, R=1000, parallel = "multicore", ncpus = 8);
tttq <- unlist(quantile(ttt$t, c(0.99, 0.50, 0.01), names=FALSE));
#list(pop_entropy=tttq[2], pop_entropy_low=tttq[3], pop_entropy_high=tttq[1])
tttq[2]
}),by=.(perf_factor, pop_size_factor)]
(plot_diversity_scaling_bootstrapped <- ggplot(plot_diversity_scaling_boot_data, aes(x=pop_size_factor, y=perf_factor)) + geom_bin2d(aes(fill=pop_entropy)) + scale_fill_gradient2(high="#91cf60", mid="#ffffbf", low="#fc8d59", midpoint=1.2) + theme_bw() + theme(panel.grid.major=element_line(0)) + scale_y_discrete("Core members", labels=c("0", "", "", "10", "", "", "100")) + coord_fixed(ratio=6/7) + scale_x_discrete("Server size", labels=c(5,10,50,100,500,1000)) + guides(fill=guide_legend(title="Entropy", reverse=TRUE)))
### comunity model
(lm_comm <- rlm(y ~ srv_max_log + srv_max_log*weeks_up_todate + date_ping_int + jubilees + srv_max_log*log_plugin_count + srv_max_log*dataset_reddit + srv_max_log*dataset_mcs_org + cat_fun + cat_general + cat_mechanics + cat_misc + cat_roleplay + cat_teleportation + cat_world + cat_fixes + cat_worldgen + gov*srv_max_log + aud_users*srv_max_log + aud_admin*srv_max_log + inst_broadcast*srv_max_log + inst_chat*srv_max_log + inst_privateproperty*srv_max_log + inst_shop*srv_max_log + inst_action_space_up*srv_max_log + inst_action_space_down*srv_max_log + inst_boundary*srv_max_log + inst_monitor_by_peer*srv_max_log + inst_monitor_by_admin*srv_max_log + inst_position_h*srv_max_log + inst_position_v*srv_max_log + aud_users:actions_audience:srv_max_log + aud_admin:actions_audience:srv_max_log, data=mw_train))
asdt(tidy(lm_comm))[abs(statistic)>=2]
#### size model (or not)
(lm_size <- rlm(srv_max_log ~ weeks_up_todate + date_ping_int + jubilees + log_plugin_count + dataset_reddit + dataset_mcs_org + cat_fun + cat_general + cat_mechanics + cat_misc + cat_roleplay + cat_teleportation + cat_world + cat_fixes + cat_worldgen + gov + inst_broadcast + inst_chat + inst_privateproperty + inst_shop + inst_action_space_up + inst_action_space_down + inst_boundary + inst_monitor_by_peer + inst_monitor_by_admin + inst_position_h + inst_position_v + aud_users*actions_audience + aud_admin*actions_audience + res_grief + res_ingame + res_players + res_realworld, data=mw_train))
(lm_size <- rlm(srv_max_log ~ weeks_up_todate + date_ping_int + dataset_reddit + dataset_mcs_org + plugin_count + gov + res_grief + res_ingame + res_players + res_realworld, data=mw_train))
asdt(tidy(lm_size))[abs(statistic)>=2]
### resource models
(lm_grief <- rlm(res_grief ~ srv_max_log + srv_max_log*log_plugin_count + srv_max_log*dataset_reddit + srv_max_log*dataset_mcs_org + gov*srv_max_log + aud_users*srv_max_log + aud_admin*srv_max_log + inst_broadcast*srv_max_log + inst_chat*srv_max_log + inst_privateproperty*srv_max_log + inst_shop*srv_max_log + inst_action_space_up*srv_max_log + inst_action_space_down*srv_max_log + inst_boundary*srv_max_log + inst_monitor_by_peer*srv_max_log + inst_monitor_by_admin*srv_max_log + inst_position_h*srv_max_log + inst_position_v*srv_max_log + aud_users:actions_audience:srv_max_log + aud_admin:actions_audience:srv_max_log, data=mw_train))
asdt(tidy(lm_comm))[abs(statistic)>=2]
summary(lm_comm <- rlm(y ~ srv_max_log + srv_max_log*weeks_up_todate + date_ping_int + jubilees + srv_max_log*log_plugin_count + srv_max_log*dataset_reddit + srv_max_log*dataset_mcs_org + cat_fun + cat_general + cat_mechanics + cat_misc + cat_roleplay + cat_teleportation + cat_world + cat_fixes + cat_worldgen + res_grief*srv_max_log + res_ingame*srv_max_log + res_players*srv_max_log + res_realworld*srv_max_log + aud_users*srv_max_log + aud_admin*srv_max_log + actions_user*srv_max_log + use_coarseauto*srv_max_log + use_coarsemanual*srv_max_log + use_fineauto*srv_max_log + use_finemanual*srv_max_log + inst_broadcast*srv_max_log + inst_chat*srv_max_log + inst_privateproperty*srv_max_log + inst_shop*srv_max_log + inst_action_space_up*srv_max_log + inst_action_space_down*srv_max_log + inst_boundary*srv_max_log + inst_monitor_by_peer*srv_max_log + inst_monitor_by_admin*srv_max_log + inst_position_h*srv_max_log + inst_position_v*srv_max_log + aud_users:actions_audience:srv_max_log + aud_admin:actions_audience:srv_max_log, data=mw_train))
| /step8_results.R | no_license | enfascination/mc_scale_analysis | R | false | false | 37,125 | r |
### initialize globals
pathLocal <- '/Users/sfrey/projecto/research_projects/minecraft/redditcommunity/'
source(paste0(pathLocal,"local_settings.R"))
source(paste0(pathLocal,"lib_step6_analysis.r"))
source(paste0(pathLocal,"lib_plotting.r"))
library(boot)
library(ggthemes)
library(scales)
mw <- readRDS(paste0(pathData, "step6_servers_wide_govanalysis.rds"))
mw_train <- mw
### mere data density
(plot_srv_density <- make_plot_size_by_success(mw_train, "weeks_up_total", function(x,i) nrow(x[i]), ggmore=scale_fill_gradientn(colors=grey(seq(from=0.6,to=0.3,length.out=6)), values=rescale(c(0,4,16,64,256,1024)), breaks=c(0,4,16,64,256,1024)), ggguide=guide_legend("Server\ncount", reverse=TRUE), reps=10))
plot_srv_density <- plot_srv_density + guides(fill="none")
ggsave(plot_srv_density, file=paste0(pathImages, "plot_srv_density.png"), units='cm', width=3.25, height=2.5, scale=3)
# plot hazard log and linear
(plot_srv_hazard_bar1 <- ggplot(mw_train[,.(longevity_count=.N),by=.(weeks_up_total)], aes(x=weeks_up_total, y=longevity_count)) + geom_bar(stat="identity") + theme_bw() + scale_y_log10("Count") + xlab("Longevity (weeks)") )
median( mw_train$weeks_up_total)
(plot_srv_hazard_bar2 <- ggplot(mw_train[,.(longevity_count=.N),by=.(weeks_up_total, pop_size_factor)], aes(x=weeks_up_total, y=longevity_count, fill=pop_size_factor )) + geom_bar(stat="identity", position="dodge") + theme_bw() + scale_y_continuous("Count") + xlab("Longevity (weeks)") )
(plot_srv_hazard <- make_plot_size_by_success(mw_train, "weeks_up_total", gov_median, ggmore=scale_fill_gradient(high="#3182bd", low="#cccccc"), ggguide="none", reps=1000 ) )
ggsave(plot_srv_hazard, file=paste0(pathImages, "plot_srv_hazard.png"), units='cm', width=3.25, height=2.5, scale=3)
ggsave(plot_srv_hazard_bar1, file=paste0(pathImages, "plot_srv_hazard_bar1.png"), units='cm', width=5, height=1.5, scale=3)
#plot increase in grief:
### gov going up or down
### governance against size against community
ggel <- scale_fill_gradient(high="#3182bd", low="#cccccc")
ggel_gov <- scale_fill_gradient2(low="#91cf60", mid="#ffffbf", high="#fc8d59", midpoint=2.5, breaks=seq(from=0,to=12,by=2))
ggel_gov_prop <- scale_fill_gradient2(low="#91cf60", mid="#ffffbf", high="#fc8d59", midpoint=0.5, breaks=seq(from=0,to=1,by=0.2))
ggel_gov_rat <- scale_fill_gradient2(low="#91cf60", mid="#ffffbf", high="#fc8d59", midpoint=0.10)
ggel_gov_rat_within <- scale_fill_gradient2(low="#91cf60", mid="#ffffbf", high="#fc8d59")
(plot_gov_scaling <- make_plot_size_by_success(mw_train, "gov", gov_mean , ggmore=ggel_gov, ggguide="none", reps=1000))
(plot_gov_specialization <- make_plot_size_by_success(mw_train, "plugin_specialization", gov_mean_narm , ggmore=ggel_gov, ggguide="none", reps=1000))
(plot_gov_scaling_ratio <- make_plot_size_by_success(mw_train, c("gov","plugin_count"), gov_median_proportion_1, ggmore=ggel_gov_rat, ggguide=guide_legend("Ratio\ngovernance", reverse=TRUE), reps=100))
(plot_gov_scaling_ratio_antigrief <- make_plot_size_by_success(mw_train, c("res_grief","sum_resource"), gov_median_proportion_1_narm, ggmore=ggel_gov_rat, ggguide=guide_legend("Ratio\ngovernance", reverse=TRUE), reps=100))
ggsave(plot_gov_scaling, file=paste0(pathImages, "plot_gov_scaling.png"), units='cm', width=3.25, height=2.5, scale=3)
ggsave(plot_gov_specialization, file=paste0(pathImages, "plot_gov_specialization.png"), units='cm', width=3.25, height=2.5, scale=3)
### resource managemanet style by size:
(plot_gov_scaling_by_plugin_category <- make_plot_size_by_success(melt(mw_train[gov>0], id.vars = c("srv_addr", "y", "srv_max", "pop_size_factor", "pop_size_factor_coarse", "perf_factor", "perf_factor_ratio", "sum_resource"), measure.vars = c(grep("^cat_", names(mw_train), value=TRUE)), variable.name = 'resource', value.name='resource_count'), c("resource_count"), gov_mean , ggmore=ggel_gov, ggguide=guide_legend("Governance\nplugins", reverse=TRUE), reps=10, facetting=c("resource")) + facet_wrap( ~ resource, ncol=4)+ theme(strip.background=element_rect(color="white", fill="white")))
#(plot_gov_scaling_by_resource_type_across_proportion <- make_plot_size_by_success(melt(mw_train, id.vars = c("srv_addr", "y", "srv_max", "pop_size_factor", "pop_size_factor_coarse", "perf_factor", "perf_factor_ratio", "sum_resource"), measure.vars = c("gov", "res_grief", "res_ingame", "res_realworld", "res_players"), variable.name = 'resource', value.name='resource_count', variable.factor=FALSE), c("resource_count", "sum_resource"), gov_median_proportion_1 , ggmore=ggel_gov_rat, ggguide=guide_legend("% governance\nplugins", reverse=TRUE), reps=100, facetting=c("resource")) + facet_wrap( ~ resource, ncol=2)+ theme(strip.background=element_rect(color="white", fill="white")))
ggel_gov_by_type <- scale_fill_gradientn(colors=(seq_gradient_pal(low=muted("#91cf60", l=100, c=100), high=muted("#fc8d59", l=100, c=100)))(rescale(seq(from=0,to=10,by=2))), values=rescale(seq(from=0,to=10,by=2)^2))
#scale_fill_gradientn(colors=grey(seq(from=0.6,to=0.3,length.out=6)), values=rescale(c(0,4,16,64,256,1024)), breaks=c(0,4,16,64,256,1024))
{
gg <- melt(mw_train[gov>0], id.vars = c("srv_addr", "y", "srv_max", "pop_size_factor", "pop_size_factor_coarse", "perf_factor", "perf_factor_ratio", "sum_resource"), measure.vars = c("res_grief", "res_ingame", "res_realworld", "res_players"), variable.name = 'resource', value.name='resource_count', variable.factor=FALSE)
gg[,resource:=factor(resource, levels=c("res_grief", "res_ingame", "res_realworld", "res_players", "res_attention"), labels=c("Grief", "In-game", "Real-world", "Player community", "Mod cognitive"))]
(plot_gov_scaling_by_resource_type <- make_plot_size_by_success(gg, c("resource_count"), gov_median , ggmore=ggel_gov_by_type, ggguide="none", reps=1000, facetting=c("resource")) + facet_wrap( ~ resource, ncol=1)+ theme(strip.background=element_rect(color="white", fill="white"), axis.text=element_text(size=6)))
ggsave(plot_gov_scaling_by_resource_type, file=paste0(pathImages, "plot_gov_scaling_by_resource_type.png"), units='cm', width=2.25, height=5, scale=3)
(plot_antigrief_scaling <- make_plot_size_by_success(gg[resource=="Grief"], c("resource_count"), gov_median , ggmore=ggel_gov_by_type, ggguide="none", reps=1000) )
(plot_antigrief_ratio_scaling <- make_plot_size_by_success(gg, c("resource_count"), gov_median , ggmore=ggel_gov_by_type, ggguide="none", reps=1000) )
(plot_gov_scaling_by_aud_type2 <- make_plot_size_by_success(mw_train[,.(perf_factor, pop_size_factor, pop_size_factor, ratio_aud)], c("ratio_aud"), gov_mean, ggmore=ggel_govaud2, ggguide=guide_legend("Ratio\ngovernance", reverse=TRUE), reps=100, ggtext=FALSE))
ggsave(plot_antigrief_scaling, file=paste0(pathImages, "plot_antigrief_scaling.png"), units='cm', width=3.25, height=2.5, scale=3)
}
### institution by size:
{
gg <- melt(mw_train[gov>0], id.vars = c("srv_addr", "y", "srv_max", "pop_size_factor", "pop_size_factor_coarse", "perf_factor", "perf_factor_ratio", "sum_institution"), measure.vars = c("gov", grep("^inst_", names(mw_train), value=TRUE)), variable.name = 'institution', value.name='institution_count', variable.factor=FALSE)
gginclude <- c("inst_action_space_down", "inst_chat", "inst_privateproperty", "inst_shop")
gg <- gg[institution %in% gginclude]
gg[,institution:=factor(institution, levels=gginclude, labels=c("Proscriptions", "Chat", "Property", "Exchange"))]
(plot_gov_scaling_by_inst_type <- make_plot_size_by_success(gg, c("institution_count"), gov_median , ggmore=ggel_gov_by_type, ggguide="none", reps=1000, facetting=c("institution")) + facet_wrap( ~ institution, ncol=1)+ theme(strip.background=element_rect(color="white", fill="white"), axis.text=element_text(size=6)))
(plot_actiondown_scaling <- make_plot_size_by_success(gg[institution == "Proscriptions"], c("institution_count"), gov_median , ggmore=ggel_gov_by_type, ggguide="none", reps=1000) )
ggsave(plot_actiondown_scaling, file=paste0(pathImages, "plot_actiondown_scaling.png"), units='cm', width=3.25, height=2.5, scale=3)
}
### institution by size as a fraction of total institutions
#(plot_gov_scaling_by_inst_type_proportion <- make_plot_size_by_success(melt(mw_train, id.vars = c("srv_addr", "y", "srv_max", "pop_size_factor", "pop_size_factor_coarse", "perf_factor", "perf_factor_ratio", "sum_institution"), measure.vars = c("gov", grep("^inst_", names(mw_train), value=TRUE)), variable.name = 'institution', value.name='institution_count'), c("institution_count","sum_institution"), gov_median_proportion_1 , ggmore=ggel_gov_rat, ggguide=guide_legend("% governance\nplugins", reverse=TRUE), reps=0, facetting=c("institution")) + facet_wrap( ~ institution, ncol=4)+ theme(strip.background=element_rect(color="white", fill="white")))
### institution by size as a fraction of within that type of institution
### but this ultimately gives less info that the original clacuclation, and less valuable, so back to original/
#dataa <- make_plot_size_by_success(melt(mw_train, id.vars = c("srv_addr", "y", "srv_max", "pop_size_factor", "pop_size_factor_coarse", "perf_factor", "perf_factor_ratio", "sum_institution"), measure.vars = c("gov", grep("^inst_", names(mw_train), value=TRUE)), variable.name = 'institution', value.name='institution_count'), c("institution_count","sum_institution"), gov_mean_proportion_1, reps=0, facetting=c("institution"), return_plot=FALSE)[,pop_var:=pop_var/sum(pop_var),by=institution]
#(plot_gov_scaling_by_inst_type_proportion <- ggplot(dataa[,.(xvar=pop_size_factor_coarse, yvar=perf_factor,institution, pop_var)], aes(x=xvar, y=yvar)) + scale_y_discrete("Core members", labels=c("0", "", "", "10", "", "", "100")) + geom_bin2d(aes(fill=pop_var)) + theme_bw() + theme(panel.grid.major=element_line(0)) + coord_fixed(ratio=6/7) + scale_x_discrete("Server size", labels=c(5,10,50,100,500,1000)) + guides(fill=guide_legend("% governance\nplugins", reverse=TRUE)) + ggel_gov_rat_within + facet_wrap( ~ institution, ncol=4)+ theme(strip.background=element_rect(color="white", fill="white")))
### governance audience
ggel_govaud <- scale_fill_gradient2(low="#91cf60", mid="#f0f0f0", high="#fc8d59", midpoint=3 )
(plot_gov_scaling_by_aud_type <- make_plot_size_by_success(melt(mw_train, id.vars = c("srv_addr", "y", "srv_max", "pop_size_factor", "pop_size_factor_coarse", "perf_factor", "perf_factor_ratio"), measure.vars = c(grep("^aud_[^n]", names(mw_train), value=TRUE)), variable.name = 'audience', value.name='audience_count'), "audience_count", gov_mean , ggmore=ggel_govaud, ggguide=guide_legend("Governance\nplugins", reverse=TRUE), reps=0, facetting=c("audience")) + facet_wrap( ~ audience, ncol=4)+ theme(strip.background=element_rect(color="white", fill="white")))
ggel_govaud2 <- scale_fill_gradient(low="#f0f0f0", high=muted("#fc8d59", l=80,c=100))
(plot_gov_scaling_by_aud_type2 <- make_plot_size_by_success(mw_train[,.(perf_factor, pop_size_factor, pop_size_factor, ratio_aud)], c("ratio_aud"), gov_mean, ggmore=ggel_govaud2, ggguide=guide_legend("Ratio\ngovernance", reverse=TRUE), reps=100, ggtext=FALSE))
#ggsave(plot_gov_scaling_by_aud_type2, file=paste0(pathImages, "plot_gov_scaling_by_aud_type2.png"), units='cm', width=4, height=2.5, scale=3)
ggsave(plot_gov_scaling_by_aud_type, file=paste0(pathImages, "plot_gov_scaling_by_aud_type.png"), units='cm', width=4, height=2.5, scale=3)
## uniques vs core members
plot_population_distribution_rect <- plot_visitortype(mw, plot_type='vertical')
#ggsave(plot_population_distribution, file=paste0(pathImages, "plot_population_distribution.png"), units='cm', width=5, height=2.5, scale=5)
ggsave(plot_population_distribution_rect, file=paste0(pathImages, "plot_population_distribution_rect.png"), units='cm', width=2, height=3, scale=5)
ggsave(plot_population_distribution_rect, file=paste0(pathImages, "plot_population_distribution_rect2.png"), units='cm', width=3, height=2, scale=5)
### now plot uniques against size and success
(make_plot_size_by_success(mw_train, "nuvisits12", function(x,i) log2(gov_median(x, i)), ggmore=scale_fill_gradient(low="#d9d9d9", high="#525252"), ggguide=guide_legend("Unique visits", reverse=TRUE), reps=10))
(plot_srv_density_uvisits <- make_plot_size_by_success(mw_train, "nuvisits12", function(x,i) gov_median(x, i), ggmore=scale_fill_gradientn(colors=grey(seq(from=0.6,to=0.3,length.out=6)), values=rescale(c(0,4,16,64,256,1024)^2), breaks=c(0,4,16,64,256,1024)), ggguide=guide_legend("Unique visits", reverse=TRUE), reps=10))
mw_train[,.(unsuccessful=sum(table(perf_factor)[1:2]),all=sum(table(perf_factor)),ratio=sum(table(perf_factor)[1:2])/sum(table(perf_factor))), by=pop_size_factor]
ggsave(plot_srv_density_uvisits, file=paste0(pathImages, "plot_srv_density_uvisits.png"), units='cm', width=3.25, height=2.5, scale=3)
### server diversity
### bootstrapping fucntion for entropy
ggel_lowbad <- scale_fill_gradient(high="#41ab5d", low="#cccccc")
#(make_plot_size_by_success(mw_train, grep("^inst_[^n]", names(mw_train), value=TRUE), gov_entropy_diversity, ggguide=guide_legend("Entropy"), ggmore=ggel_lowbad, reps=10))
(plot_srv_institutional_diversity <- (make_plot_size_by_success(mw_train, grep("^inst_[^n]", names(mw_train), value=TRUE), gov_dist, ggguide=guide_legend("Variability"), ggmore=ggel_lowbad, reps=10, ggtext=FALSE)))
ggsave(plot_srv_institutional_diversity, file=paste0(pathImages, "plot_srv_institutional_diversity.png"), units='cm', width=4, height=2.5, scale=3)
#ggsave(plot_srv_institutional_diversity, file=paste0(pathImages, "plot_srv_institutional_diversity_entropy.png"), units='cm', width=4, height=2.5, scale=3)
### within-server diversity
(make_plot_size_by_success(mw_train, "srv_entropy", gov_median, ggmore=ggel_lowbad, ggguide=guide_legend("Pluralism", reverse=TRUE), reps=10))
### uptime %?
### number of weeks up (longevity)
ggel_longevity <- scale_fill_gradient2(low="#91cf60", mid="#ffffbf", high="#fc8d59", midpoint=15)
(make_plot_size_by_success(mw_train, "weeks_up_total", gov_median, ggmore=ggel_longevity, ggguide=guide_legend("Longevity", reverse=TRUE), reps=1000))
### number of signs (informal governance)
ggel_signs <- scale_fill_gradient2(low="#91cf60", mid="#ffffbf", high="#fc8d59")
(make_plot_size_by_success(mw_train, "sign_count", function(x,i) median(as.double(asdf(x[i][!is.na(sign_count)])[,1])), ggmore=ggel_signs, ggguide=guide_legend("Norms", reverse=TRUE), reps=0))
### maintenance style
ggel_maint <- scale_fill_gradient2(low="#91cf60", mid="#ffffbf", high="#fc8d59")
gov_features <- grep("^use_[^n]", names(mw_train), value=TRUE)
for (i in 1:length(gov_features)){
print(make_plot_size_by_success(mw_train, gov_features, gov_median_proportion_2, ggguide=guide_legend(paste0(gov_features[i])), ggmore=ggel_maint, reps=5, focal=i))
}
ggel_govmaint <- scale_fill_gradient2(low="#91cf60", mid="#f0f0f0", high="#fc8d59", midpoint=3 )
(plot_gov_scaling_by_maint_type <- make_plot_size_by_success(melt(mw_train, id.vars = c("srv_addr", "y", "srv_max", "pop_size_factor", "pop_size_factor_coarse", "perf_factor", "perf_factor_ratio"), measure.vars = c(grep("^use_[^n]", names(mw_train), value=TRUE)), variable.name = 'maintain', value.name='maintain_count'), "maintain_count", gov_median , ggmore=ggel_govmaint, ggguide=guide_legend("Governance\nplugins", reverse=TRUE), reps=0, facetting=c("maintain")) + facet_wrap( ~ maintain, ncol=4)+ theme(strip.background=element_rect(color="white", fill="white")))
### jubilees
ggel_v <- scale_fill_gradient2(low="#91cf60", mid="#ffffbf", high="#fc8d59", midpoint=1)
(make_plot_size_by_success(mw_train, "jubilees", gov_median, ggmore=ggel_v, ggguide=guide_legend("Updates", reverse=TRUE), reps=1000) )
ggplot(mw_train, aes(x=weeks_up_total, y=jubilees)) + geom_jitter(height=0.5, width=0) ### the patern above occurs even though longevity and jubilees are psoitively correlated without controlling for size
### audience
### cowplot merge
#plot increase in grief:
### fix pred_hist plotting of histograms with fake data
pred_hist <- mc
#pred_hist_fake1 <- pred_hist[srv_max>200 & srv_max<400 & resource=="players", ]
#pred_hist_fake1[,':='(resource='performance')]
#pred_hist_fake2 <- pred_hist[srv_max>200 & srv_max<400 & resource=="players", ]
#pred_hist_fake2[,':='(resource='realmoney')]
#pred_hist <- rbind(pred_hist, pred_hist_fake1, pred_hist_fake2)
pred_hist[ ,':='(
institution_name={ifelse( gov==1 , "Other gov", "Misc") %>%
#ifelse( gov==1 & institution %in% c("noinstitution", "monitor", "action_space"), "Misc", '') %>%
ifelse( gov==1 & institution == "boundary", "Entry restrictions", .) %>%
ifelse( gov==1 & institution == "action_space_up", "More player actions", .) %>%
ifelse( gov==1 & institution == "action_space_down", "Fewer player actions", .) %>%
ifelse( gov==1 & institution == "shop", "Economy", .) %>%
ifelse( gov==1 & institution == "chat", "Communication", .) %>%
ifelse( gov==1 & institution == "privateproperty", "Private property", .) %>%
ifelse( gov==1 & institution == "broadcast", "Admin broadcast", .) %>%
ifelse( gov==1 & institution == "monitor_by_peer", "Peer monitoring", .) %>%
ifelse( gov==1 & institution == "monitor_by_admin", "Admin monitoring", .) %>%
ifelse( gov==1 & institution == "position_v", "More groups, vertical", .) %>%
ifelse( gov==1 & institution == "position_h", "More groups, horizontal", .) %>%
ifelse( gov==1 & institution == "payoff", "Incentives", .) %>%
factor(levels=c( "Communication", "Private property", "Economy", "More player actions", "Entry restrictions", "Fewer player actions", "Admin broadcast", "Peer monitoring", "Admin monitoring", "More groups, vertical", "More groups, horizontal", "Other gov", "Misc"))
},
resource_name={
ifelse( gov==1 & resource == "noresource", "Not resource-related", "Not resource-related") %>%
ifelse( gov==1 & resource == "grief", "Anti-grief", .) %>%
ifelse( gov==1 & resource == "ingame", "Game-related\nresources", .) %>%
ifelse( gov==1 & resource == "performance", "Server performance", .) %>%
ifelse( gov==1 & resource == "players", "Player community", .) %>%
ifelse( gov==1 & resource == "realmoney", "Server provisioning", .) %>%
factor(levels=c( "Anti-grief", "Game-related\nresources", "Server performance", "Server provisioning", "Player community", "Not resource-related"))
},
gov_factor=factor(gov, levels=c(1,0), labels=c("Governance-related", "Game-related"))
) ]
xaxis_size_factor <- scale_x_discrete("Server size", labels=c("(0,5]", "(5,10]", "(10, 50]", "(50,100]", "(100, 500]", "(500, 1000]"))
### Each online community can be seen as a bundle of collective action problems. Larger servers are more likely to have to install governance modules that mitigate such problems. among 4000 plugins on 1300 active servers, large servers are more likely to face problems with server performance (CPU/RAM/lag), server provisioning (paying server fees), and maintaining the player community (aiding and coordinating community members).
plot_color1 <- scale_fill_brewer("Resource type", type="qual",palette=1)
plot_color2 <- scale_fill_manual("Resource type", values=c("#666666", "#bf5b17", "#ffff99")) ### for consistentcy. see http://colorbrewer2.org/#type=qualitative&scheme=Accent&n=6
(plot_resource_types_1 <- ggplot(pred_hist[resource %ni% c("grief", "ingame"),], aes(x=srv_max, fill=resource_name)) + geom_histogram(position="fill", breaks=c(0,0.7,1,1.7,2,2.7,3), closed='right')+ scale_x_log10("Server size", breaks=c(0,1,5,10,50,100,500,1000),limits=c(1,1000))+ scale_y_continuous("Plugin proportions by type") + plot_color1 + theme_bw() + theme(aspect.ratio=0.6, plot.margin = unit(c(0,0,0,0), "cm")) + geom_vline(xintercept=c(1,10,100,1000), alpha=0.3))
(plot_resource_types_2 <- ggplot(pred_hist[gov== 0 | resource %in% c("grief", "ingame"),], aes(x=srv_max, fill=resource_name)) + geom_histogram(position="fill", breaks=c(0,0.7,1,1.7,2,2.7,3), closed='right')+ scale_x_log10("Server size", breaks=c(0,1,10,100,1000),limits=c(1,1000))+ scale_y_continuous("Plugin proportions by type") + plot_color2 + theme_bw() + theme(aspect.ratio=0.6, plot.margin = unit(c(0,0,0,0), "cm")) + geom_vline(xintercept=c(1,10,100,1000), alpha=0.3))
(plot_resource_types_x <- ggplot(pred_hist, aes(x=srv_max, fill=resource_name)) + geom_histogram(position="fill", breaks=c(0,0.7,1,1.7,2,2.7,3), closed='right')+ scale_x_log10("Server size", breaks=c(0,1,10,100,1000),limits=c(1,1000))+ scale_y_continuous("Plugin proportions by type") + scale_fill_hue() + theme_bw() + theme(aspect.ratio=0.6, plot.margin = unit(c(0,0,0,0), "cm")) + geom_vline(xintercept=c(1,10,100,1000), alpha=0.3))
(plot_resource_types_abs_x <- ggplot(pred_hist, aes(x=srv_max, fill=resource_name)) + geom_histogram(position="dodge", bins=6, binwidth=0.5)+ scale_x_log10("Server size", breaks=c(0,1,10,100,1000),limits=c(1,1000))+ scale_y_continuous("Plugin proportions by type") + scale_fill_hue() + theme_bw() + theme(aspect.ratio=0.6, plot.margin = unit(c(0,0,0,0), "cm")) + geom_vline(xintercept=c(1,3.1,10,31,100,310,1000), alpha=0.3))
ggsave(plot_resource_types_1, file=paste0(pathImages, "plot_resource_types_1.png"), units='cm', width=2.25, height=1, scale=6)
ggsave(plot_resource_types_2, file=paste0(pathImages, "plot_resource_types_2.png"), units='cm', width=2.25, height=1, scale=6)
plot_color1 <- scale_fill_manual("Institution type", values=c(rainbow(4, start=15/540, end=105/540, s=0.8, v=0.9 ), 'grey50'))
plot_color2 <- scale_fill_manual("Institution type", values=c(rainbow(4, start=200/540, end=360/540, s=0.8, v=0.9 ), 'grey50'))
plot_color3 <- scale_fill_manual("Institution type", values=c(rainbow(3, start=240/360, end=360/360, s=0.8, v=0.9 ), 'grey50'))
filter1 <- c("monitor_by_admin", "position_v", "action_space_down", "broadcast")
filter2 <- c("monitor_by_peer","position_h", "privateproperty","action_space_up")
filter3 <- c("boundary","chat","shop" )
(plot_institution_types_1 <- ggplot(pred_hist[gov == 1 & (institution_name == "Other gov" | institution %in% filter1) ], aes(x=srv_max, fill=institution_name)) + geom_histogram(position="fill", breaks=c(0,0.7,1,1.7,2,2.7,3), closed='right')+ scale_x_log10("Server size", breaks=c(0,1,10,100,1000),limits=c(1,1000))+ scale_y_continuous("Plugin proportions by type") + plot_color1 + theme_bw() + theme(aspect.ratio=0.6, plot.margin = unit(c(0,0,0,0), "cm")) + geom_vline(xintercept=c(1,10,100,1000), alpha=0.3))
(plot_institution_types_2 <- ggplot(pred_hist[gov == 1 & (institution_name == "Other gov" | institution %in% filter2) ], aes(x=srv_max, fill=institution_name)) + geom_histogram(position="fill", breaks=c(0,0.7,1,1.7,2,2.7,3), closed='right')+ scale_x_log10("Server size", breaks=c(0,1,10,100,1000),limits=c(1,1000))+ scale_y_continuous("Plugin proportions by type") + plot_color2 + theme_bw() + theme(aspect.ratio=0.6, plot.margin = unit(c(0,0,0,0), "cm")) + geom_vline(xintercept=c(1,10,100,1000), alpha=0.3))
plot_institution_types_3 <- ggplot(pred_hist[gov == 1 & (institution_name == "Other gov" | institution %in% filter3) ], aes(x=srv_max, fill=institution_name)) + geom_histogram(position="fill", breaks=c(0,0.7,1,1.7,2,2.7,3), closed='right')+ scale_x_log10("Server size", breaks=c(0,1,10,100,1000),limits=c(1,1000))+ scale_y_continuous("Plugin proportions by type") + plot_color3 + theme_bw() + theme(aspect.ratio=0.6, plot.margin = unit(c(0,0,0,0), "cm")) + geom_vline(xintercept=c(1,10,100,1000), alpha=0.3); plot_institution_types_3
(plot_institution_types_x <- ggplot(pred_hist[gov == 1], aes(x=srv_max, fill=institution_name)) + geom_histogram(position="fill", breaks=c(0,0.7,1,1.7,2,2.7,3), closed='right')+ scale_x_log10("Server size", breaks=c(0,1,10,100,1000),limits=c(1,1000))+ scale_y_continuous("Plugin proportions by type") + scale_fill_hue("Institution type") + theme_bw() + theme(aspect.ratio=0.6, plot.margin = unit(c(0,0,0,0), "cm")) + geom_vline(xintercept=c(1,10,100,1000), alpha=0.3))
ggsave(plot_institution_types_1, file=paste0(pathImages, "plot_institution_types_1.png"), units='cm', width=2.25, height=1, scale=6)
ggsave(plot_institution_types_2, file=paste0(pathImages, "plot_institution_types_2.png"), units='cm', width=2.25, height=1, scale=6)
ggsave(plot_institution_types_3, file=paste0(pathImages, "plot_institution_types_3.png"), units='cm', width=2.25, height=1, scale=6)
### gov going up or down
(plot_gov_count <- ggplot(mw_train, aes(x=srv_max, y=(gov+1))) + geom_jitter(height=0.4, width=0.05, color="dark grey", size=0.5) + scale_x_log10("Server size", breaks=c(0,1,10,100,1000),limits=c(1,1000))+ scale_y_log10("Governance plugins") + plot_color1 + theme_bw() + theme(aspect.ratio=0.6, plot.margin = unit(c(0,0,0,0), "cm")) + geom_vline(xintercept=c(1,10,100,1000), alpha=0.3) + geom_smooth(method="rlm", color="black"))
(plot_gov_relative <- ggplot(pred_hist, aes(x=srv_max, fill=gov_factor)) + geom_histogram(position="fill", breaks=c(0,0.7,1,1.7,2,2.7,3), closed='right')+ scale_x_log10("Server size", breaks=c(0,1,10,100,1000),limits=c(1,1000))+ scale_y_continuous("Increase in governance intensity") + plot_color1 + theme_bw() + theme(aspect.ratio=0.6, plot.margin = unit(c(0,0,0,0), "cm")) + geom_vline(xintercept=c(1,10,100,1000), alpha=0.3))
ggsave(plot_gov_count, file=paste0(pathImages, "plot_gov_count.png"), units='cm', width=2.25, height=1, scale=6)
ggsave(plot_gov_relative, file=paste0(pathImages, "plot_gov_relative.png"), units='cm', width=2.25, height=1, scale=6)
### governance against size against community
(plot_gov_scaling <- ggplot(mw_train[,.(gov=median(gov)),by=.(perf_factor, pop_size_factor_coarse)], aes(x=pop_size_factor_coarse, y=perf_factor)) + geom_bin2d(aes(fill=gov)) + scale_fill_gradient2(low="#91cf60", mid="#ffffbf", high="#fc8d59", midpoint=2.5, breaks=seq(from=0,to=12,by=2)) + theme_bw() + theme(panel.grid.major=element_line(0)) + scale_y_discrete("Core members", labels=c("0", "", "", "10", "", "", "100")) + coord_fixed(ratio=6/7) + scale_x_discrete("Server size", labels=c(5,10,50,100,500,1000)) + guides(fill=guide_legend(title="Governance\nplugins", reverse=TRUE)))
(plot_gov_scaling_by_resource_type <- ggplot(melt(mw_train, id.vars = c("srv_addr", "y", "srv_max", "pop_size_factor", "perf_factor"), measure.vars = c("gov", "res_grief", "res_ingame", "res_realworld", "res_players"), variable.name = 'resource', value.name='resource_count')[,.(gov=mean(resource_count)),by=.(resource, perf_factor, pop_size_factor)], aes(x=pop_size_factor, y=perf_factor)) + geom_bin2d(aes(fill=gov)) + scale_fill_gradient2(low="#91cf60", mid="#ffffbf", high="#fc8d59", midpoint=1, breaks=seq(from=0,to=12,by=2)) + theme_bw() + theme(panel.grid.major=element_line(0), strip.background=element_rect(color="white", fill="white")) + scale_y_discrete("Core members", labels=c("0", "", "", "10", "", "", "100")) + coord_fixed(ratio=6/7) + scale_x_discrete("Server size", labels=c(5,10,50,100,500,1000)) + guides(fill=guide_legend(title="Governance\nplugins", reverse=TRUE)) + facet_wrap( ~ resource, ncol=1))
(plot_gov_scaling_by_inst_type <- ggplot(melt(mw_train, id.vars = c("srv_addr", "y", "srv_max", "pop_size_factor", "perf_factor"), measure.vars = c("gov", grep("^inst_", names(mw_train), value=TRUE)), variable.name = 'institution', value.name='institution_count')[,.(gov=mean(institution_count)),by=.(institution, perf_factor, pop_size_factor)], aes(x=pop_size_factor, y=perf_factor)) + geom_bin2d(aes(fill=gov)) + scale_fill_gradient2(low="#91cf60", mid="#ffffbf", high="#fc8d59", midpoint=1, breaks=seq(from=0,to=12,by=2)) + theme_bw() + theme(panel.grid.major=element_line(0), strip.background=element_rect(color="white", fill="white")) + scale_y_discrete("Core members", labels=c("0", "", "", "10", "", "", "100")) + coord_fixed(ratio=6/7) + scale_x_discrete("Server size", labels=c(5,10,50,100,500,1000)) + guides(fill=guide_legend(title="Governance\nplugins", reverse=TRUE)) + facet_wrap( ~ institution, ncol=4))
### resource managemanet style by size:
ggplot(data=melt(training_full_lasso, id.vars = c("srv_addr", "srv_max", "y"), measure.vars = c("res_grief", "res_ingame", "res_realworld", "res_players", "res_attention"), variable.name = 'resource', value.name='resource_count'),aes(x=srv_max, y=resource_count)) + geom_jitter(size=0.1, height=0.1, width=0.1) + scale_x_log10() + geom_smooth(method='rlm') + facet_wrap(~resource, ncol=2)
### institution by size:
ggplot(data=melt(mw_train, id.vars = c("srv_addr", "srv_max", "y"), measure.vars = grep("^inst_", names(mw_train)), variable.name = 'institution', value.name='institution_count'),aes(x=srv_max, y=institution_count)) + geom_jitter(size=0.1, height=0.1, width=0.1) + scale_x_log10() + geom_smooth(method='rlm') + facet_wrap(~institution, ncol=2)
ggsave(plot_gov_scaling, file=paste0(pathImages, "plot_gov_scaling.png"), units='cm', width=2.25, height=1, scale=6)
### server diversity
plot_diversity_data <- mw_train[,.(srv_max, srv_max_log,pop_size_factor, srv_entropy), by=srv_addr]
plot_diversity_data2 <- mw_train[,.( pop_entropy={inst_dist<-colSums(.SD[,grep("^inst_", names(mw_train)),with=FALSE]); inst_dist<-(inst_dist+0.000001)/(sum(inst_dist)+0.000001); sum(sapply(inst_dist, function(x) {-x*log(x)})) }), by=pop_size_factor]
plot_diversity_data <- merge(plot_diversity_data, plot_diversity_data2[,.(pop_size_factor, pop_entropy)], all.x=T, all.y=F, by="pop_size_factor")
plot_diversity_data[,srv_entropy_agg1:=mean(srv_entropy), by=pop_size_factor]
plot_diversity_data[srv_entropy!=0,srv_entropy_agg2:=mean(srv_entropy), by=pop_size_factor]
plot_diversity_data[,srv_entropy_agg3:=median(srv_entropy), by=pop_size_factor]
### each server draws ona greater variety of governance styles as it gets larger, but they also become less different from each other .
ggplot(plot_diversity_data, aes(x=srv_max, y=srv_entropy)) + geom_point() + scale_x_log10() + geom_line(data=plot_diversity_data[srv_entropy!=0,],aes(x=srv_max, y=srv_entropy_agg2), color='red') + geom_line(aes(x=srv_max, y=srv_entropy_agg1), color='blue') + geom_line(aes(x=srv_max, y=srv_entropy_agg3), color='orange') + geom_line(aes(x=srv_max, y=pop_entropy), color='green')
### focus on decrease in difference over time
(plot_diversity <- ggplot(plot_diversity_data2, aes(x=pop_size_factor, y=pop_entropy)) + geom_bar(stat='identity') + geom_smooth() + xaxis_size_factor + scale_y_continuous("Population-level diversity in governance style") + theme_bw() )
# now bootstrap the stat
gov_diversity <- function(data, i_samp) {
entropy_calc <- function(x) {-x*log(x)}
inst_dist<-colSums(data[i_samp,])
inst_dist<-(inst_dist+0.000001)/(sum(inst_dist)+0.000001)
return(sum(sapply(inst_dist, entropy_calc)) )
}
plot_diversity_data4 <- mw_train[,{ttt <- boot(.SD[,c(grep("^inst_", names(.SD))), with=F], gov_diversity, R=1000, parallel = "multicore", ncpus = 8);
tttq <- unlist(quantile(ttt$t, c(0.99, 0.50, 0.01)))
list(pop_entropy=tttq[2], pop_entropy_low=tttq[3], pop_entropy_high=tttq[1])
},by=pop_size_factor_fine]
(plot_diversity <- ggplot(plot_diversity_data4, aes(x=pop_size_factor_fine, y=pop_entropy)) + geom_bar(stat='identity') + geom_smooth() + scale_x_discrete("Server size", labels=c("(0,5]", "(5,10]", "(10, 50]", "(50,100]", "(100, 500]", "(500, 1000]"))) + scale_y_continuous("Population-level diversity in governance style") + theme_bw() + coord_cartesian(ylim=c(1.5, 2.5)) + geom_errorbar(aes(ymin = pop_entropy_low, ymax = pop_entropy_high))
(plot_diversity_scaling <- ggplot(mw_train[,.(pop_entropy={inst_dist<-colSums(.SD[,grep("^inst_", names(mw_train)),with=FALSE]); inst_dist<-(inst_dist+0.000001)/(sum(inst_dist)+0.000001); sum(sapply(inst_dist, function(x) {-x*log(x)})) }),by=.(perf_factor, pop_size_factor)], aes(x=pop_size_factor, y=perf_factor)) + geom_bin2d(aes(fill=pop_entropy)) + scale_fill_gradient2(high="#91cf60", mid="#ffffbf", low="#fc8d59", midpoint=1.2) + theme_bw() + theme(panel.grid.major=element_line(0)) + scale_y_discrete("Core members", labels=c("0", "", "", "10", "", "", "100")) + coord_fixed(ratio=6/7) + scale_x_discrete("Server size", labels=c(5,10,50,100,500,1000)) + guides(fill=guide_legend(title="Entropy", reverse=TRUE)))
plot_diversity_scaling_boot_data <- mw_train[,.(pop_entropy={
ttt <- boot(.SD[,c(grep("^inst_", names(.SD))), with=F], gov_diversity, R=1000, parallel = "multicore", ncpus = 8);
tttq <- unlist(quantile(ttt$t, c(0.99, 0.50, 0.01), names=FALSE));
#list(pop_entropy=tttq[2], pop_entropy_low=tttq[3], pop_entropy_high=tttq[1])
tttq[2]
}),by=.(perf_factor, pop_size_factor)]
(plot_diversity_scaling_bootstrapped <- ggplot(plot_diversity_scaling_boot_data, aes(x=pop_size_factor, y=perf_factor)) + geom_bin2d(aes(fill=pop_entropy)) + scale_fill_gradient2(high="#91cf60", mid="#ffffbf", low="#fc8d59", midpoint=1.2) + theme_bw() + theme(panel.grid.major=element_line(0)) + scale_y_discrete("Core members", labels=c("0", "", "", "10", "", "", "100")) + coord_fixed(ratio=6/7) + scale_x_discrete("Server size", labels=c(5,10,50,100,500,1000)) + guides(fill=guide_legend(title="Entropy", reverse=TRUE)))
### comunity model
(lm_comm <- rlm(y ~ srv_max_log + srv_max_log*weeks_up_todate + date_ping_int + jubilees + srv_max_log*log_plugin_count + srv_max_log*dataset_reddit + srv_max_log*dataset_mcs_org + cat_fun + cat_general + cat_mechanics + cat_misc + cat_roleplay + cat_teleportation + cat_world + cat_fixes + cat_worldgen + gov*srv_max_log + aud_users*srv_max_log + aud_admin*srv_max_log + inst_broadcast*srv_max_log + inst_chat*srv_max_log + inst_privateproperty*srv_max_log + inst_shop*srv_max_log + inst_action_space_up*srv_max_log + inst_action_space_down*srv_max_log + inst_boundary*srv_max_log + inst_monitor_by_peer*srv_max_log + inst_monitor_by_admin*srv_max_log + inst_position_h*srv_max_log + inst_position_v*srv_max_log + aud_users:actions_audience:srv_max_log + aud_admin:actions_audience:srv_max_log, data=mw_train))
asdt(tidy(lm_comm))[abs(statistic)>=2]
#### size model (or not)
(lm_size <- rlm(srv_max_log ~ weeks_up_todate + date_ping_int + jubilees + log_plugin_count + dataset_reddit + dataset_mcs_org + cat_fun + cat_general + cat_mechanics + cat_misc + cat_roleplay + cat_teleportation + cat_world + cat_fixes + cat_worldgen + gov + inst_broadcast + inst_chat + inst_privateproperty + inst_shop + inst_action_space_up + inst_action_space_down + inst_boundary + inst_monitor_by_peer + inst_monitor_by_admin + inst_position_h + inst_position_v + aud_users*actions_audience + aud_admin*actions_audience + res_grief + res_ingame + res_players + res_realworld, data=mw_train))
(lm_size <- rlm(srv_max_log ~ weeks_up_todate + date_ping_int + dataset_reddit + dataset_mcs_org + plugin_count + gov + res_grief + res_ingame + res_players + res_realworld, data=mw_train))
asdt(tidy(lm_size))[abs(statistic)>=2]
### resource models
(lm_grief <- rlm(res_grief ~ srv_max_log + srv_max_log*log_plugin_count + srv_max_log*dataset_reddit + srv_max_log*dataset_mcs_org + gov*srv_max_log + aud_users*srv_max_log + aud_admin*srv_max_log + inst_broadcast*srv_max_log + inst_chat*srv_max_log + inst_privateproperty*srv_max_log + inst_shop*srv_max_log + inst_action_space_up*srv_max_log + inst_action_space_down*srv_max_log + inst_boundary*srv_max_log + inst_monitor_by_peer*srv_max_log + inst_monitor_by_admin*srv_max_log + inst_position_h*srv_max_log + inst_position_v*srv_max_log + aud_users:actions_audience:srv_max_log + aud_admin:actions_audience:srv_max_log, data=mw_train))
asdt(tidy(lm_comm))[abs(statistic)>=2]
summary(lm_comm <- rlm(y ~ srv_max_log + srv_max_log*weeks_up_todate + date_ping_int + jubilees + srv_max_log*log_plugin_count + srv_max_log*dataset_reddit + srv_max_log*dataset_mcs_org + cat_fun + cat_general + cat_mechanics + cat_misc + cat_roleplay + cat_teleportation + cat_world + cat_fixes + cat_worldgen + res_grief*srv_max_log + res_ingame*srv_max_log + res_players*srv_max_log + res_realworld*srv_max_log + aud_users*srv_max_log + aud_admin*srv_max_log + actions_user*srv_max_log + use_coarseauto*srv_max_log + use_coarsemanual*srv_max_log + use_fineauto*srv_max_log + use_finemanual*srv_max_log + inst_broadcast*srv_max_log + inst_chat*srv_max_log + inst_privateproperty*srv_max_log + inst_shop*srv_max_log + inst_action_space_up*srv_max_log + inst_action_space_down*srv_max_log + inst_boundary*srv_max_log + inst_monitor_by_peer*srv_max_log + inst_monitor_by_admin*srv_max_log + inst_position_h*srv_max_log + inst_position_v*srv_max_log + aud_users:actions_audience:srv_max_log + aud_admin:actions_audience:srv_max_log, data=mw_train))
|
#' Photo classifications: fashion or not
#'
#' This is a simulated data set for photo classifications based on a machine
#' learning algorithm versus what the true classification is for those photos.
#' While the data are not real, they resemble performance that would be
#' reasonable to expect in a well-built classifier.
#'
#' The hypothetical ML algorithm has a precision of 90\%, meaning of those
#' photos it claims are fashion, about 90\% of them are actually about fashion.
#' The recall of the ML algorithm is about 64\%, meaning of the photos that are
#' about fashion, it correctly predicts that they are about fashion about 64\%
#' of the time.
#'
#' @name photo_classify
#' @docType data
#' @format A data frame with 1822 observations on the following 2 variables.
#' \describe{
#' \item{mach_learn}{The prediction by the machine learning system as to whether the photo is about fashion or not.}
#' \item{truth}{The actual classification of the photo by a team of humans.}
#' }
#' @source The data are simulated / hypothetical.
#' @keywords datasets
#' @examples
#'
#' data(photo_classify)
#' table(photo_classify)
#'
"photo_classify"
| /R/data-photo_classify.R | permissive | tessington/qsci381 | R | false | false | 1,152 | r | #' Photo classifications: fashion or not
#'
#' This is a simulated data set for photo classifications based on a machine
#' learning algorithm versus what the true classification is for those photos.
#' While the data are not real, they resemble performance that would be
#' reasonable to expect in a well-built classifier.
#'
#' The hypothetical ML algorithm has a precision of 90\%, meaning of those
#' photos it claims are fashion, about 90\% of them are actually about fashion.
#' The recall of the ML algorithm is about 64\%, meaning of the photos that are
#' about fashion, it correctly predicts that they are about fashion about 64\%
#' of the time.
#'
#' @name photo_classify
#' @docType data
#' @format A data frame with 1822 observations on the following 2 variables.
#' \describe{
#' \item{mach_learn}{The prediction by the machine learning system as to whether the photo is about fashion or not.}
#' \item{truth}{The actual classification of the photo by a team of humans.}
#' }
#' @source The data are simulated / hypothetical.
#' @keywords datasets
#' @examples
#'
#' data(photo_classify)
#' table(photo_classify)
#'
"photo_classify"
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotNTheor.R
\name{plotNTheor}
\alias{plotNTheor}
\title{Plot the number of theoretical random fragments}
\usage{
plotNTheor(
x,
tit = "Number of term and intern fragm",
xlab = "Number of aa",
ylab = "",
col = 2:3,
log = "",
mark = NULL,
cexMark = 0.75
)
}
\arguments{
\item{x}{(integer) length (in amino-acids) of input peptides/proteins to be considered}
\item{tit}{(character) custom title}
\item{xlab}{(character) custom x-axis label}
\item{ylab}{(character) custom y-axis label}
\item{col}{(character or integer) cutsom colors}
\item{log}{(character) define which axis should be log (use "xy" for drawing both x- and y-axis as log-scale)}
\item{mark}{(matrix) first column for text and second column for where it should be stated along the top border of the figure (x-coordinate)}
\item{cexMark}{(numeric) cex expansion-factor for text from argument \code{mark}}
}
\value{
figure only
}
\description{
This simple function allows plotting the expected number of theoretical fragments from random fragmentation of peptides/proteins (in mass spectrometry).
Here, only the pure fragmentation without any variable fragmentation is considered, all fragment-sizes are included (ie, no gating).
For simplicity, possible (variable) modifications like loss of neutrals, etc, are not considered.
}
\examples{
marks <- data.frame(name=c("Ubiquitin\n76aa", "Glutamate dehydrogenase 1\n501aa"),
length=c(76,501))
plotNTheor(x=20:750, log="", mark=marks)
}
\seealso{
\code{\link{AAfragSettings}}
}
| /man/plotNTheor.Rd | no_license | cran/wrTopDownFrag | R | false | true | 1,592 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotNTheor.R
\name{plotNTheor}
\alias{plotNTheor}
\title{Plot the number of theoretical random fragments}
\usage{
plotNTheor(
x,
tit = "Number of term and intern fragm",
xlab = "Number of aa",
ylab = "",
col = 2:3,
log = "",
mark = NULL,
cexMark = 0.75
)
}
\arguments{
\item{x}{(integer) length (in amino-acids) of input peptides/proteins to be considered}
\item{tit}{(character) custom title}
\item{xlab}{(character) custom x-axis label}
\item{ylab}{(character) custom y-axis label}
\item{col}{(character or integer) cutsom colors}
\item{log}{(character) define which axis should be log (use "xy" for drawing both x- and y-axis as log-scale)}
\item{mark}{(matrix) first column for text and second column for where it should be stated along the top border of the figure (x-coordinate)}
\item{cexMark}{(numeric) cex expansion-factor for text from argument \code{mark}}
}
\value{
figure only
}
\description{
This simple function allows plotting the expected number of theoretical fragments from random fragmentation of peptides/proteins (in mass spectrometry).
Here, only the pure fragmentation without any variable fragmentation is considered, all fragment-sizes are included (ie, no gating).
For simplicity, possible (variable) modifications like loss of neutrals, etc, are not considered.
}
\examples{
marks <- data.frame(name=c("Ubiquitin\n76aa", "Glutamate dehydrogenase 1\n501aa"),
length=c(76,501))
plotNTheor(x=20:750, log="", mark=marks)
}
\seealso{
\code{\link{AAfragSettings}}
}
|
setwd("/home/arm/Projects/statistics/r_scripts/ass2")
# Read the dataset 'finans2_data.csv' into R
D <- read.table("finans2_data.csv", header = TRUE, sep = ";")
# Subset containing only AGG, VAW, IWN and SPY (for validation)
D_test <- subset(D, ETF %in% c("AGG","VAW","IWN","SPY"))
# Subset containing only the 91 remaining ETFs (for model estimation)
D_model <- subset(D, !(ETF %in% c("AGG","VAW","IWN","SPY")))
# Estimate multiple linear regression model
fit <- lm(Geo.mean ~ Volatility + maxTuW, data = D_model)
# Show parameter estimates etc.
summary(fit)
# Plots for model validation
# Observations against fitted values
plot(fit$fitted.values, D_model$Geo.mean, xlab = "Fitted values",
ylab = "Geom. average rate of return")
# Residuals against each of the explanatory variables
plot(D_model$EXPLANATORY_VARIABLE, fit$residuals,
xlab = "INSERT TEXT", ylab = "Residuals")
# Residuals against fitted values
plot(fit$fitted.values, fit$residuals, xlab = "Fitted values",
ylab = "Residuals")
# Normal QQ-plot of the residuals
qqnorm(fit$residuals, ylab = "Residuals", xlab = "Z-scores",
main = "")
qqline(fit$residuals)
# Confidence intervals for the model coefficients
confint(fit, level = 0.95)
# Predictions and 95% prediction intervals
pred <- predict(FINAL_MODEL, newdata = D_test,
interval = "prediction", level = 0.95)
# Observed values and predictions
cbind(id = D_test$ETF, Geo.mean = D_test$Geo.mean, pred)
| /ass2/finans2_english.R | no_license | ArmandasRokas/statistics | R | false | false | 1,484 | r | setwd("/home/arm/Projects/statistics/r_scripts/ass2")
# Read the dataset 'finans2_data.csv' into R
D <- read.table("finans2_data.csv", header = TRUE, sep = ";")
# Subset containing only AGG, VAW, IWN and SPY (for validation)
D_test <- subset(D, ETF %in% c("AGG","VAW","IWN","SPY"))
# Subset containing only the 91 remaining ETFs (for model estimation)
D_model <- subset(D, !(ETF %in% c("AGG","VAW","IWN","SPY")))
# Estimate multiple linear regression model
fit <- lm(Geo.mean ~ Volatility + maxTuW, data = D_model)
# Show parameter estimates etc.
summary(fit)
# Plots for model validation
# Observations against fitted values
plot(fit$fitted.values, D_model$Geo.mean, xlab = "Fitted values",
ylab = "Geom. average rate of return")
# Residuals against each of the explanatory variables
plot(D_model$EXPLANATORY_VARIABLE, fit$residuals,
xlab = "INSERT TEXT", ylab = "Residuals")
# Residuals against fitted values
plot(fit$fitted.values, fit$residuals, xlab = "Fitted values",
ylab = "Residuals")
# Normal QQ-plot of the residuals
qqnorm(fit$residuals, ylab = "Residuals", xlab = "Z-scores",
main = "")
qqline(fit$residuals)
# Confidence intervals for the model coefficients
confint(fit, level = 0.95)
# Predictions and 95% prediction intervals
pred <- predict(FINAL_MODEL, newdata = D_test,
interval = "prediction", level = 0.95)
# Observed values and predictions
cbind(id = D_test$ETF, Geo.mean = D_test$Geo.mean, pred)
|
# init.R
# Pas d'argent 0.91
#
# This is a personal project to manage my home economy using Google Spreadsheets and R scripts.
#
# Initial loading script.
# Modules.
source ("properties.R", encoding = "UTF-8")
source ("packages.R", encoding = "UTF-8")
# Initialization.
loadPackage ("methods")
loadPackage ("devtools")
loadPackage ("googlesheets")
loadPackage ("dplyr")
loadPackage ("readr")
properties <- getPropertiesFromFile ("sheet.properties")
# Properties setup.
SHEET_NAME <- getCharacterProperty (properties, "sheet.name")
WORKSHEET_NAME <- getCharacterProperty (properties, "worksheet.name")
VALUE_YES <- getCharacterProperty (properties, "value.yes")
VALUE_NO <- getCharacterProperty (properties, "value.no")
# Registers the specified base data sheet with incomes and expenses.
# The sheet is registered as is, with no further modifications.
#
# @returns Reference to the specified base data sheet with incomes and expenses, loaded from Google Drive.
loadExpensesSheet <- function ( ) {
return (gs_title (SHEET_NAME, verbose = FALSE))
}
# Converts a vector of items of type `character` with `VALUE.YES`/`VALUE.NO` values to logical values.
convertIntoLogicalValues <- function (character_vector) {
transformation <- lapply (character_vector, function (x) {
if (is.na (x)) {
return (FALSE)
} else if (x == VALUE_YES) {
return (TRUE)
} else if (x == VALUE_NO) {
return (FALSE)
} else {
return (FALSE)
}
})
return (unlist (transformation))
}
# Defines if a given date is in the past.
# A date is considered to be in the past if it belongs to the previous month.
#
# @param date Date to check.
#
# @returns `TRUE` if the date belongs to the previous month or before; `FALSE` otherwise.
dateBelongsToThePast <- function (date) {
return (format (date, "%Y%m") < format (Sys.Date ( ), "%Y%m"))
}
# Determines if an estimated expense (budget) is closed yet.
#
# @param isBudget Logical value of the `Is.Budget` column, stating if the expense is an estimation (budget).
# @param isClosed Logical value of the `Is.Closed` column, stating if the expense has been manually closed.
#
# @returns `TRUE` if the expense is an estimation and has been closed; `FALSE` otherwise.
budgetHasBeenClosedYet <- function (isBudget, isClosed) {
return (isBudget & isClosed)
}
# Defines if an estimated expense (budget) belongs to the past.
#
# @param isBudget Logical value of the `Is.Budget` column, stating if the expense is an estimation (budget).
# @param date Date of the expense.
#
# @returns `TRUE` if the expense is an estimation and belongs to the past; `FALSE` otherwise.
budgetBelongsToThePast <- function (isBudget, date) {
return (dateBelongsToThePast (date) & isBudget)
}
# Defines if an estimated expense (budget) in the current month has been consumed.
#
# @param isBudget Logical value of the `Is.Budget` column, stating if the expense is an estimation (budget).
# @param date Date of the budget.
# @param amount Budget amount.
# @param budgetConsumed Budget consumed.
#
# @returns `TRUE` if the expense is a budget in the current month, and the budget consumed exceeds the estimation; `FALSE` otherwise.
budgetIsCurrentAndConsumed <- function (isBudget, date, amount, budgetConsumed) {
return (!dateBelongsToThePast (date) & isBudget & !is.na (budgetConsumed) & (abs (amount) <= abs (budgetConsumed)))
}
# Defines if an estimated expense (budget) should be automatically closed.
# A budget should be closed in one of these cases:
#
# * The budget has been manually closed yet.
# * The budget belongs to a month in the past.
# * The budget belongs to the current month, but has been consumed (the real expenses exceeds the budget amount).
#
# @param isBudget Logical value of the `Is.Budget` column, stating if the expense is an estimation (budget).
# @param isClosed Logical value of the `Is.Closed` column, stating if the budget has been manually closed.
# @param date Date of the budget.
# @param amount Budget amount.
# @param budgetConsumed Budget consumed.
#
# @returns `TRUE` if the expense is a budget which meets the conditions to be closed; `FALSE` otherwise.
budgetShouldBeClosed <- function (isBudget, isClosed, date, amount, budgetConsumed) {
return (
budgetHasBeenClosedYet (isBudget, isClosed) |
budgetBelongsToThePast (isBudget, date) |
budgetIsCurrentAndConsumed (isBudget, date, amount, budgetConsumed)
)
}
# Gets the month from a given date.
#
# @param date Date to get the month from.
#
# @returns Factor from the numeric representation of the month in the date.
getMonthFromDate <- function (date) {
return (as.factor (as.numeric (format (as.Date (date, "%d/%m/%Y"), "%m"))))
}
# Gets the year from a given date.
#
# @param date Date to get the year from.
#
# @returns Factor from the numeric representation of the year in the date.
getYearFromDate <- function (date) {
return (as.factor (as.numeric (format (as.Date (date, "%d/%m/%Y"), "%Y"))))
}
# Creates the data frame with the incomes/expenses data.
# The data frame goes through several transformations:
#
# * The column names are translated into English (the original sheet is in Spanish).
# * A new `Month` column is added, to help in filtering the data frame.
# * A new `Year` column is added, to help in filtering the data frame.
# * Every estimated expense in the past is automatically closed.
# * Every estimated expense in the current month which has been consumed is automatically closed.
#
# @param expensesReference Reference to the base data sheet with incomes and expenses.
#
# @returns A `tbl_df` data frame with the incomes/expenses data.
getExpensesData <- function (expensesReference) {
# Specifies the decimal and grouping mark for currency values.
spanishLocale <- locale (grouping_mark = ".", decimal_mark = ",")
# Loads the sheet from Google Spreadsheets.
expensesData <- gs_read (
expensesReference,
ws = WORKSHEET_NAME,
verbose = FALSE,
skip = 1,
col_types = cols (
Id = col_integer ( ),
Date = col_date ("%d/%m/%Y"),
Is.Budget = col_character ( ),
Is.Closed = col_character ( ),
Type = col_factor (c ("Niños", "Agua", "Coche", "Salud", "Gatuno", "Gasoil", "Gasto extra", "Gasto fijo", "Hogar", "Luz", "Nómina", "Ocio", "Restaurante", "Ropa", "Supermercado", "Teléfono", "Ingreso extra")),
Amount = col_character ( ),
Reference = col_integer ( ),
Comments = col_character ( )
),
col_names = c (
"Id",
"Date",
"Is.Budget",
"Is.Closed",
"Type",
"Amount",
"Reference",
"Comments"
)
)
# Adds the `Month` and `Year` columns.
expensesData <- mutate (
expensesData,
Month = getMonthFromDate (Date),
Year = getYearFromDate (Date)
)
# Transforms the following columns:
#
# * `Is.Budget` should be converted to a logical value.
# * `Is.Closed` should be converted to a logical value.
# * `Amount` should be converted to a numeric value, taking into account it's actually a currency value.
expensesData$Is.Budget <- convertIntoLogicalValues (expensesData$Is.Budget)
expensesData$Is.Closed <- convertIntoLogicalValues (expensesData$Is.Closed)
expensesData$Amount <- parse_number(expensesData$Amount, locale = spanishLocale)
# Closes automatically the budgets if they fall into one of these cases:
#
# * The budget belongs to a month in the past.
# * The budget belongs to the current month, but has been consumed (the real expenses exceeds the budget amount).
realExpensesPerBudget <- expensesData %>% group_by (Reference) %>% summarise (Budget.Consumed = sum (Amount))
expensesData <- left_join (expensesData, realExpensesPerBudget, by = c ("Id" = "Reference"))
expensesData <- mutate (
expensesData,
Is.Closed = ifelse (
budgetShouldBeClosed (Is.Budget, Is.Closed, Date, Amount, Budget.Consumed),
TRUE,
FALSE
)
) %>%
select (Id, Date, Month, Year, Is.Budget, Is.Closed, Type, Amount, Reference, Comments)
return (tbl_df (expensesData))
}
expensesData <- getExpensesData (loadExpensesSheet ( )) | /init.R | no_license | pcesarperez/pas-d-argent | R | false | false | 8,193 | r | # init.R
# Pas d'argent 0.91
#
# This is a personal project to manage my home economy using Google Spreadsheets and R scripts.
#
# Initial loading script.
# Modules.
source ("properties.R", encoding = "UTF-8")
source ("packages.R", encoding = "UTF-8")
# Initialization.
loadPackage ("methods")
loadPackage ("devtools")
loadPackage ("googlesheets")
loadPackage ("dplyr")
loadPackage ("readr")
properties <- getPropertiesFromFile ("sheet.properties")
# Properties setup.
SHEET_NAME <- getCharacterProperty (properties, "sheet.name")
WORKSHEET_NAME <- getCharacterProperty (properties, "worksheet.name")
VALUE_YES <- getCharacterProperty (properties, "value.yes")
VALUE_NO <- getCharacterProperty (properties, "value.no")
# Registers the specified base data sheet with incomes and expenses.
# The sheet is registered as is, with no further modifications.
#
# @returns Reference to the specified base data sheet with incomes and expenses, loaded from Google Drive.
loadExpensesSheet <- function ( ) {
return (gs_title (SHEET_NAME, verbose = FALSE))
}
# Converts a vector of items of type `character` with `VALUE.YES`/`VALUE.NO` values to logical values.
convertIntoLogicalValues <- function (character_vector) {
transformation <- lapply (character_vector, function (x) {
if (is.na (x)) {
return (FALSE)
} else if (x == VALUE_YES) {
return (TRUE)
} else if (x == VALUE_NO) {
return (FALSE)
} else {
return (FALSE)
}
})
return (unlist (transformation))
}
# Defines if a given date is in the past.
# A date is considered to be in the past if it belongs to the previous month.
#
# @param date Date to check.
#
# @returns `TRUE` if the date belongs to the previous month or before; `FALSE` otherwise.
dateBelongsToThePast <- function (date) {
return (format (date, "%Y%m") < format (Sys.Date ( ), "%Y%m"))
}
# Determines if an estimated expense (budget) is closed yet.
#
# @param isBudget Logical value of the `Is.Budget` column, stating if the expense is an estimation (budget).
# @param isClosed Logical value of the `Is.Closed` column, stating if the expense has been manually closed.
#
# @returns `TRUE` if the expense is an estimation and has been closed; `FALSE` otherwise.
budgetHasBeenClosedYet <- function (isBudget, isClosed) {
return (isBudget & isClosed)
}
# Defines if an estimated expense (budget) belongs to the past.
#
# @param isBudget Logical value of the `Is.Budget` column, stating if the expense is an estimation (budget).
# @param date Date of the expense.
#
# @returns `TRUE` if the expense is an estimation and belongs to the past; `FALSE` otherwise.
budgetBelongsToThePast <- function (isBudget, date) {
return (dateBelongsToThePast (date) & isBudget)
}
# Defines if an estimated expense (budget) in the current month has been consumed.
#
# @param isBudget Logical value of the `Is.Budget` column, stating if the expense is an estimation (budget).
# @param date Date of the budget.
# @param amount Budget amount.
# @param budgetConsumed Budget consumed.
#
# @returns `TRUE` if the expense is a budget in the current month, and the budget consumed exceeds the estimation; `FALSE` otherwise.
budgetIsCurrentAndConsumed <- function (isBudget, date, amount, budgetConsumed) {
return (!dateBelongsToThePast (date) & isBudget & !is.na (budgetConsumed) & (abs (amount) <= abs (budgetConsumed)))
}
# Defines if an estimated expense (budget) should be automatically closed.
# A budget should be closed in one of these cases:
#
# * The budget has been manually closed yet.
# * The budget belongs to a month in the past.
# * The budget belongs to the current month, but has been consumed (the real expenses exceeds the budget amount).
#
# @param isBudget Logical value of the `Is.Budget` column, stating if the expense is an estimation (budget).
# @param isClosed Logical value of the `Is.Closed` column, stating if the budget has been manually closed.
# @param date Date of the budget.
# @param amount Budget amount.
# @param budgetConsumed Budget consumed.
#
# @returns `TRUE` if the expense is a budget which meets the conditions to be closed; `FALSE` otherwise.
budgetShouldBeClosed <- function (isBudget, isClosed, date, amount, budgetConsumed) {
return (
budgetHasBeenClosedYet (isBudget, isClosed) |
budgetBelongsToThePast (isBudget, date) |
budgetIsCurrentAndConsumed (isBudget, date, amount, budgetConsumed)
)
}
# Gets the month from a given date.
#
# @param date Date to get the month from.
#
# @returns Factor from the numeric representation of the month in the date.
getMonthFromDate <- function (date) {
return (as.factor (as.numeric (format (as.Date (date, "%d/%m/%Y"), "%m"))))
}
# Gets the year from a given date.
#
# @param date Date to get the year from.
#
# @returns Factor from the numeric representation of the year in the date.
getYearFromDate <- function (date) {
return (as.factor (as.numeric (format (as.Date (date, "%d/%m/%Y"), "%Y"))))
}
# Creates the data frame with the incomes/expenses data.
# The data frame goes through several transformations:
#
# * The column names are translated into English (the original sheet is in Spanish).
# * A new `Month` column is added, to help in filtering the data frame.
# * A new `Year` column is added, to help in filtering the data frame.
# * Every estimated expense in the past is automatically closed.
# * Every estimated expense in the current month which has been consumed is automatically closed.
#
# @param expensesReference Reference to the base data sheet with incomes and expenses.
#
# @returns A `tbl_df` data frame with the incomes/expenses data.
getExpensesData <- function (expensesReference) {
# Specifies the decimal and grouping mark for currency values.
spanishLocale <- locale (grouping_mark = ".", decimal_mark = ",")
# Loads the sheet from Google Spreadsheets.
expensesData <- gs_read (
expensesReference,
ws = WORKSHEET_NAME,
verbose = FALSE,
skip = 1,
col_types = cols (
Id = col_integer ( ),
Date = col_date ("%d/%m/%Y"),
Is.Budget = col_character ( ),
Is.Closed = col_character ( ),
Type = col_factor (c ("Niños", "Agua", "Coche", "Salud", "Gatuno", "Gasoil", "Gasto extra", "Gasto fijo", "Hogar", "Luz", "Nómina", "Ocio", "Restaurante", "Ropa", "Supermercado", "Teléfono", "Ingreso extra")),
Amount = col_character ( ),
Reference = col_integer ( ),
Comments = col_character ( )
),
col_names = c (
"Id",
"Date",
"Is.Budget",
"Is.Closed",
"Type",
"Amount",
"Reference",
"Comments"
)
)
# Adds the `Month` and `Year` columns.
expensesData <- mutate (
expensesData,
Month = getMonthFromDate (Date),
Year = getYearFromDate (Date)
)
# Transforms the following columns:
#
# * `Is.Budget` should be converted to a logical value.
# * `Is.Closed` should be converted to a logical value.
# * `Amount` should be converted to a numeric value, taking into account it's actually a currency value.
expensesData$Is.Budget <- convertIntoLogicalValues (expensesData$Is.Budget)
expensesData$Is.Closed <- convertIntoLogicalValues (expensesData$Is.Closed)
expensesData$Amount <- parse_number(expensesData$Amount, locale = spanishLocale)
# Closes automatically the budgets if they fall into one of these cases:
#
# * The budget belongs to a month in the past.
# * The budget belongs to the current month, but has been consumed (the real expenses exceeds the budget amount).
realExpensesPerBudget <- expensesData %>% group_by (Reference) %>% summarise (Budget.Consumed = sum (Amount))
expensesData <- left_join (expensesData, realExpensesPerBudget, by = c ("Id" = "Reference"))
expensesData <- mutate (
expensesData,
Is.Closed = ifelse (
budgetShouldBeClosed (Is.Budget, Is.Closed, Date, Amount, Budget.Consumed),
TRUE,
FALSE
)
) %>%
select (Id, Date, Month, Year, Is.Budget, Is.Closed, Type, Amount, Reference, Comments)
return (tbl_df (expensesData))
}
expensesData <- getExpensesData (loadExpensesSheet ( )) |
#cachematrix.r assignment
## [Put comments here that describe what your functions do]
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(solve) i <<- solve
getinverse <- function() i
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinverse()
if(!is.null(i)) {
message("getting cached matrix")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
| /cachematrix.R | no_license | sbushman/ProgrammingAssignment2 | R | false | false | 638 | r | #cachematrix.r assignment
## [Put comments here that describe what your functions do]
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(solve) i <<- solve
getinverse <- function() i
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinverse()
if(!is.null(i)) {
message("getting cached matrix")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
|
# Matrix inversion is usually a costly computation and there may be some benefit
# to caching the inverse of a matrix rather than compute it repeatedly. The
# following two functions are used to cache the inverse of a matrix.
#
#
# Usage
#
# the standard operating procedure is to use solve(matrix) to calculate inverse of matrix,
#
# > matrix_o <- makeCacheMatrix(matrix)
# > cacheSolve (matrix_o)
#
#
# Function 1: makeCacheMatrix(x=matrix())
#
# makeCacheMatrix accepts a matrix as a formal argument and pack it into an object w/ the following methods
#
# 1. set the value of the matrix
# 2. get the value of the matrix
# 3. set the value of inverse of the matrix
# 4. get the value of inverse of the matrix
#
# This function utilizes the side effects operations of R, specifically the '<<-' operator, which assigns
# a value to a different environment, notably the parent environment
# The end result, to me, is akin to having a global variable
# So you can calculate the inverse of a matrix, store it into an object's internal variable in an upper
# environment, and it will work as a global variable that you can reference as needed
#
makeCacheMatrix <- function(x = matrix()) {
# reset the inverse value
#
inv <- NULL
# `<<-` assigns a value to an object in an environment
# different from the current one.
#
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(solve) inv <<- solve
getinverse <- function() inv
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
#
# Function 2: cacheSolve(x, ...)
# A replacement function that instead of straight up computation of the inverse of a matrix
#
# x is an output of the makeCacheMatrix, not a straight up matrix
# cacheSolve still returns the inverse of x, by calculating it if needed or retrieve a cached version
# of inv(x) from parent environment if it exits
#
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
# retrieve the stored inverse value by invoking the object's setinverse() method
#
inv<-x$getinverse()
if (!is.null(inv)){
# if there is a non-null inverse available, then use it and get out
#
message("using cached inverse value")
return(inv)
}
# Or we'll have to calculate the inverse if no cached version avails
# get the matrix by invoking the object's get() method
matrix_x <- x$get()
# Use standard solve() function to calculate the inverse of a matrix
#
inv <- solve(matrix_x, ...)
# Store the inverse of the matrix into the object by invoking the setinverse() method
#
x$setinverse(inv)
# return the inverse of matrix_x
#
return (inv)
}
| /cachematrix.R | no_license | studiocardo/ProgrammingAssignment2 | R | false | false | 2,996 | r | # Matrix inversion is usually a costly computation and there may be some benefit
# to caching the inverse of a matrix rather than compute it repeatedly. The
# following two functions are used to cache the inverse of a matrix.
#
#
# Usage
#
# the standard operating procedure is to use solve(matrix) to calculate inverse of matrix,
#
# > matrix_o <- makeCacheMatrix(matrix)
# > cacheSolve (matrix_o)
#
#
# Function 1: makeCacheMatrix(x=matrix())
#
# makeCacheMatrix accepts a matrix as a formal argument and pack it into an object w/ the following methods
#
# 1. set the value of the matrix
# 2. get the value of the matrix
# 3. set the value of inverse of the matrix
# 4. get the value of inverse of the matrix
#
# This function utilizes the side effects operations of R, specifically the '<<-' operator, which assigns
# a value to a different environment, notably the parent environment
# The end result, to me, is akin to having a global variable
# So you can calculate the inverse of a matrix, store it into an object's internal variable in an upper
# environment, and it will work as a global variable that you can reference as needed
#
makeCacheMatrix <- function(x = matrix()) {
# reset the inverse value
#
inv <- NULL
# `<<-` assigns a value to an object in an environment
# different from the current one.
#
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(solve) inv <<- solve
getinverse <- function() inv
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
#
# Function 2: cacheSolve(x, ...)
# A replacement function that instead of straight up computation of the inverse of a matrix
#
# x is an output of the makeCacheMatrix, not a straight up matrix
# cacheSolve still returns the inverse of x, by calculating it if needed or retrieve a cached version
# of inv(x) from parent environment if it exits
#
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
# retrieve the stored inverse value by invoking the object's setinverse() method
#
inv<-x$getinverse()
if (!is.null(inv)){
# if there is a non-null inverse available, then use it and get out
#
message("using cached inverse value")
return(inv)
}
# Or we'll have to calculate the inverse if no cached version avails
# get the matrix by invoking the object's get() method
matrix_x <- x$get()
# Use standard solve() function to calculate the inverse of a matrix
#
inv <- solve(matrix_x, ...)
# Store the inverse of the matrix into the object by invoking the setinverse() method
#
x$setinverse(inv)
# return the inverse of matrix_x
#
return (inv)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SWORD_dataset.R
\name{dataset_atom}
\alias{dataset_atom}
\alias{dataset_statement}
\title{View dataset (SWORD)}
\usage{
dataset_atom(dataset, key = Sys.getenv("DATAVERSE_KEY"),
server = Sys.getenv("DATAVERSE_SERVER"), ...)
dataset_statement(dataset, key = Sys.getenv("DATAVERSE_KEY"),
server = Sys.getenv("DATAVERSE_SERVER"), ...)
}
\arguments{
\item{dataset}{A dataset DOI (or other persistent identifier), an object of class \dQuote{dataset_atom} or \dQuote{dataset_statement}, or an appropriate and complete SWORD URL.}
\item{key}{A character string specifying a Dataverse server API key. If one is not specified, functions calling authenticated API endpoints will fail. Keys can be specified atomically or globally using \code{Sys.setenv("DATAVERSE_KEY" = "examplekey")}.}
\item{server}{A character string specifying a Dataverse server. There are multiple Dataverse installations, but the defaults is to use the Harvard Dataverse. This can be modified atomically or globally using \code{Sys.setenv("DATAVERSE_SERVER" = "dataverse.example.com")}.}
\item{...}{Additional arguments passed to an HTTP request function, such as \code{\link[httr]{GET}}, \code{\link[httr]{POST}}, or \code{\link[httr]{DELETE}}.}
}
\value{
A list. For \code{dataset_atom}, an object of class \dQuote{dataset_atom}.
}
\description{
View a SWORD (possibly unpublished) dataset \dQuote{statement}
}
\details{
These functions are used to view a dataset by its persistent identifier. \code{dataset_statement} will contain information about the contents of the dataset, whereas \code{dataset_atom} contains \dQuote{metadata} relevant to the SWORD API.
}
\examples{
\dontrun{
# retrieve your service document
d <- service_document()
# retrieve dataset statement (list contents)
dataset_statement(d[[2]])
# retrieve dataset atom
dataset_atom(d[[2]])
}
}
\seealso{
Managing a Dataverse: \code{\link{publish_dataverse}}; Managing a dataset: \code{\link{dataset_atom}}, \code{\link{list_datasets}}, \code{\link{create_dataset}}, \code{\link{delete_sword_dataset}}, \code{\link{publish_dataset}}; Managing files within a dataset: \code{\link{add_file}}, \code{\link{delete_file}}
}
| /man/dataset_atom.Rd | permissive | wibeasley/dataverse-client-r | R | false | true | 2,238 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SWORD_dataset.R
\name{dataset_atom}
\alias{dataset_atom}
\alias{dataset_statement}
\title{View dataset (SWORD)}
\usage{
dataset_atom(dataset, key = Sys.getenv("DATAVERSE_KEY"),
server = Sys.getenv("DATAVERSE_SERVER"), ...)
dataset_statement(dataset, key = Sys.getenv("DATAVERSE_KEY"),
server = Sys.getenv("DATAVERSE_SERVER"), ...)
}
\arguments{
\item{dataset}{A dataset DOI (or other persistent identifier), an object of class \dQuote{dataset_atom} or \dQuote{dataset_statement}, or an appropriate and complete SWORD URL.}
\item{key}{A character string specifying a Dataverse server API key. If one is not specified, functions calling authenticated API endpoints will fail. Keys can be specified atomically or globally using \code{Sys.setenv("DATAVERSE_KEY" = "examplekey")}.}
\item{server}{A character string specifying a Dataverse server. There are multiple Dataverse installations, but the defaults is to use the Harvard Dataverse. This can be modified atomically or globally using \code{Sys.setenv("DATAVERSE_SERVER" = "dataverse.example.com")}.}
\item{...}{Additional arguments passed to an HTTP request function, such as \code{\link[httr]{GET}}, \code{\link[httr]{POST}}, or \code{\link[httr]{DELETE}}.}
}
\value{
A list. For \code{dataset_atom}, an object of class \dQuote{dataset_atom}.
}
\description{
View a SWORD (possibly unpublished) dataset \dQuote{statement}
}
\details{
These functions are used to view a dataset by its persistent identifier. \code{dataset_statement} will contain information about the contents of the dataset, whereas \code{dataset_atom} contains \dQuote{metadata} relevant to the SWORD API.
}
\examples{
\dontrun{
# retrieve your service document
d <- service_document()
# retrieve dataset statement (list contents)
dataset_statement(d[[2]])
# retrieve dataset atom
dataset_atom(d[[2]])
}
}
\seealso{
Managing a Dataverse: \code{\link{publish_dataverse}}; Managing a dataset: \code{\link{dataset_atom}}, \code{\link{list_datasets}}, \code{\link{create_dataset}}, \code{\link{delete_sword_dataset}}, \code{\link{publish_dataset}}; Managing files within a dataset: \code{\link{add_file}}, \code{\link{delete_file}}
}
|
# Plot 2
source("load_the_data.R")
png("plot2.png", width = 480, height = 480)
plot(
df$Timestamp,
df$Global_active_power,
xlab = "",
ylab = "Global Active Power (kilowatts)",
type = "n"
)
lines(df$Timestamp, df$Global_active_power)
dev.off()
| /plot2.R | no_license | giacecco/ExData_Plotting1 | R | false | false | 266 | r | # Plot 2
source("load_the_data.R")
png("plot2.png", width = 480, height = 480)
plot(
df$Timestamp,
df$Global_active_power,
xlab = "",
ylab = "Global Active Power (kilowatts)",
type = "n"
)
lines(df$Timestamp, df$Global_active_power)
dev.off()
|
# dplyr only approach
library(tidyverse)
ghg_nodes_df <- read_csv("data-raw/ghg_cats_nodes.csv") %>%
rename(id = name)
ghg_edges_df <- read_csv("data-raw/ghg_cats_edges.csv")
adj_list <- ghg_edges_df %>%
rename(ancestor = to, descendant = from) %>%
select(-type)
materialized_paths <- adj_list %>%
left_join(adj_list, by = c("descendant" = "ancestor")) %>%
select(ancestor, descendant = descendant.y) %>%
bind_rows(adj_list) %>%
distinct(ancestor, descendant) %>%
filter(!is.na(descendant)) %>%
arrange(ancestor, descendant)
self_join_and_prune <- function(.adj_list) {
# assumes .adj_list has columns `parent` and `child`
adj_list <- .adj_list %>%
select(ancestor = parent, descendant = child)
join_path <- left_join(adj_list, adj_list,
by = c("descendant" = "ancestor")) %>%
select(ancestor, descendant = descendant.y) %>%
bind_rows(adj_list) %>%
distinct(ancestor, descendant) %>%
filter(!is.na(descendant)) %>%
arrange(ancestor, descendant)
}
| /scripts/dplyr-sql-path-approach.R | no_license | jameelalsalam/nestedcats | R | false | false | 1,030 | r | # dplyr only approach
library(tidyverse)
ghg_nodes_df <- read_csv("data-raw/ghg_cats_nodes.csv") %>%
rename(id = name)
ghg_edges_df <- read_csv("data-raw/ghg_cats_edges.csv")
adj_list <- ghg_edges_df %>%
rename(ancestor = to, descendant = from) %>%
select(-type)
materialized_paths <- adj_list %>%
left_join(adj_list, by = c("descendant" = "ancestor")) %>%
select(ancestor, descendant = descendant.y) %>%
bind_rows(adj_list) %>%
distinct(ancestor, descendant) %>%
filter(!is.na(descendant)) %>%
arrange(ancestor, descendant)
self_join_and_prune <- function(.adj_list) {
# assumes .adj_list has columns `parent` and `child`
adj_list <- .adj_list %>%
select(ancestor = parent, descendant = child)
join_path <- left_join(adj_list, adj_list,
by = c("descendant" = "ancestor")) %>%
select(ancestor, descendant = descendant.y) %>%
bind_rows(adj_list) %>%
distinct(ancestor, descendant) %>%
filter(!is.na(descendant)) %>%
arrange(ancestor, descendant)
}
|
#
# shopifyr: An R Interface to the Shopify API
#
# Copyright (C) 2015 Charlie Friedemann cfriedem @ gmail.com
# Shopify API (c) 2006-2015 Shopify Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
########### Transaction functions ###########
#' @param orderId an Order id number
#' @templateVar name Transaction
#' @template api
NULL
## GET /admin/orders/#{id}/transactions.json
## Receive a list of all Transactions
#' @rdname Transaction
getTransactions <- function(orderId, ...) {
private$.request(private$.url("orders",orderId,"transactions"), ...)$transactions
}
## GET /admin/orders/#{id}/transactions/count.json
## Receive a count of all Transactions
#' @rdname Transaction
getTransactionsCount <- function(orderId, ...) {
private$.request(private$.url("orders",orderId,"transactions","count"), ...)$count
}
## GET /admin/orders/#{id}/transactions/#{id}.json
## Receive a single Transaction
#' @rdname Transaction
getTransaction <- function(orderId, transactionId, ...) {
private$.request(private$.url("orders",orderId,"transactions",transactionId), ...)$transaction
}
## POST /admin/orders/#{id}/transactions.json
## Create a new Transaction
#' @rdname Transaction
createTransaction <- function(orderId, transaction, ...) {
transaction <- private$.wrap(transaction, "transaction", "kind")
private$.request(private$.url("orders",orderId,"transactions"), reqType="POST", data=transaction, ...)$transaction
} | /R/Transaction.R | no_license | Schumzy/shopifyr | R | false | false | 2,112 | r | #
# shopifyr: An R Interface to the Shopify API
#
# Copyright (C) 2015 Charlie Friedemann cfriedem @ gmail.com
# Shopify API (c) 2006-2015 Shopify Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
########### Transaction functions ###########
#' @param orderId an Order id number
#' @templateVar name Transaction
#' @template api
NULL
## GET /admin/orders/#{id}/transactions.json
## Receive a list of all Transactions
#' @rdname Transaction
getTransactions <- function(orderId, ...) {
private$.request(private$.url("orders",orderId,"transactions"), ...)$transactions
}
## GET /admin/orders/#{id}/transactions/count.json
## Receive a count of all Transactions
#' @rdname Transaction
getTransactionsCount <- function(orderId, ...) {
private$.request(private$.url("orders",orderId,"transactions","count"), ...)$count
}
## GET /admin/orders/#{id}/transactions/#{id}.json
## Receive a single Transaction
#' @rdname Transaction
getTransaction <- function(orderId, transactionId, ...) {
private$.request(private$.url("orders",orderId,"transactions",transactionId), ...)$transaction
}
## POST /admin/orders/#{id}/transactions.json
## Create a new Transaction
#' @rdname Transaction
createTransaction <- function(orderId, transaction, ...) {
transaction <- private$.wrap(transaction, "transaction", "kind")
private$.request(private$.url("orders",orderId,"transactions"), reqType="POST", data=transaction, ...)$transaction
} |
# specify parameters
# k <- 1 # odd number
# p <- 2 # Manhattan (1), Euclidean (2) or Chebyshev (Inf)
kNN <- function(features, labels, memory = NULL,
k = 1, p = 2, type="train") {
# test the inputs
library(assertthat)
library(dplyr)
not_empty(features); not_empty(labels);
if (type == "train") {
assert_that(nrow(features) == length(labels))
}
is.string(type); assert_that(type %in% c("train", "predict"))
is.count(k);
assert_that(p %in% c(1, 2, Inf))
if (type == "predict") {
assert_that(not_empty(memory) &
ncol(memory) == ncol(features) &
nrow(memory) == length(labels))
}
# Compute the distance between each point and all others
noObs <- nrow(features)
labels <- as.factor(labels)
noLabels <- length(levels(labels))
# if we are making predictions on the test set based on the memory,
# we compute distances between each test observation and observations
# in our memory
if (type == "train") {
distMatrix <- matrix(NA, noObs, noObs)
for (obs in 1:noObs) {
# getting the probe for the current observation
probe <- as.numeric(features[obs,])
probeExpanded <- matrix(probe, nrow = noObs, ncol = 2,
byrow = TRUE)
# computing distances between the probe and exemplars in the
# training X
if (p %in% c(1,2)) {
distMatrix[obs, ] <- (rowSums((abs(features -
probeExpanded))^p) )^(1/p)
} else if (p==Inf) {
distMatrix[obs, ] <- apply(abs(features - probeExpanded), 1, max)
}
}
} else if (type == "predict") {
noMemory <- nrow(memory)
distMatrix <- matrix(NA, noObs, noMemory)
for (obs in 1:noObs) {
# getting the probe for the current observation
probe <- as.numeric(features[obs,])
probeExpanded <- matrix(probe, nrow = noMemory, ncol = 2,
byrow = TRUE)
# computing distances between the probe and exemplars in the memory
if (p %in% c(1,2)) {
distMatrix[obs, ] <- (rowSums((abs(memory -
probeExpanded))^p) )^(1/p)
} else if (p==Inf) {
distMatrix[obs, ] <- apply(abs(memory - probeExpanded), 1, max)
}
}
}
# Sort the distances in increasing numerical order and pick the first
# k elements
neighbors <- apply(distMatrix, 1, order) %>% t()
# the most frequent class in the k nearest neighbors and predicted label
predLabels <- rep(NA, noObs)
prob <- matrix(NA, noObs, noLabels)
for (obs in 1:noObs) {
for(label in 1:noLabels){
prob[obs, label] <- sum(labels[neighbors[obs, 1:k]]==levels(labels)[label])/k
}
predLabels[obs] <- levels(labels)[ which.max( prob[obs,] ) ]
}
return(list(prob=prob, predLabels=predLabels))
} | /PS4/kNN.R | no_license | vanbalint/Advanced_comp_methods | R | false | false | 3,234 | r | # specify parameters
# k <- 1 # odd number
# p <- 2 # Manhattan (1), Euclidean (2) or Chebyshev (Inf)
kNN <- function(features, labels, memory = NULL,
k = 1, p = 2, type="train") {
# test the inputs
library(assertthat)
library(dplyr)
not_empty(features); not_empty(labels);
if (type == "train") {
assert_that(nrow(features) == length(labels))
}
is.string(type); assert_that(type %in% c("train", "predict"))
is.count(k);
assert_that(p %in% c(1, 2, Inf))
if (type == "predict") {
assert_that(not_empty(memory) &
ncol(memory) == ncol(features) &
nrow(memory) == length(labels))
}
# Compute the distance between each point and all others
noObs <- nrow(features)
labels <- as.factor(labels)
noLabels <- length(levels(labels))
# if we are making predictions on the test set based on the memory,
# we compute distances between each test observation and observations
# in our memory
if (type == "train") {
distMatrix <- matrix(NA, noObs, noObs)
for (obs in 1:noObs) {
# getting the probe for the current observation
probe <- as.numeric(features[obs,])
probeExpanded <- matrix(probe, nrow = noObs, ncol = 2,
byrow = TRUE)
# computing distances between the probe and exemplars in the
# training X
if (p %in% c(1,2)) {
distMatrix[obs, ] <- (rowSums((abs(features -
probeExpanded))^p) )^(1/p)
} else if (p==Inf) {
distMatrix[obs, ] <- apply(abs(features - probeExpanded), 1, max)
}
}
} else if (type == "predict") {
noMemory <- nrow(memory)
distMatrix <- matrix(NA, noObs, noMemory)
for (obs in 1:noObs) {
# getting the probe for the current observation
probe <- as.numeric(features[obs,])
probeExpanded <- matrix(probe, nrow = noMemory, ncol = 2,
byrow = TRUE)
# computing distances between the probe and exemplars in the memory
if (p %in% c(1,2)) {
distMatrix[obs, ] <- (rowSums((abs(memory -
probeExpanded))^p) )^(1/p)
} else if (p==Inf) {
distMatrix[obs, ] <- apply(abs(memory - probeExpanded), 1, max)
}
}
}
# Sort the distances in increasing numerical order and pick the first
# k elements
neighbors <- apply(distMatrix, 1, order) %>% t()
# the most frequent class in the k nearest neighbors and predicted label
predLabels <- rep(NA, noObs)
prob <- matrix(NA, noObs, noLabels)
for (obs in 1:noObs) {
for(label in 1:noLabels){
prob[obs, label] <- sum(labels[neighbors[obs, 1:k]]==levels(labels)[label])/k
}
predLabels[obs] <- levels(labels)[ which.max( prob[obs,] ) ]
}
return(list(prob=prob, predLabels=predLabels))
} |
context("sample")
## Generate test data without littering the environment with temporary
## variables
x <- NULL
y <- NULL
local({
set.seed(123)
N <- 3
T <- 2
dd <- generate_data(N=N, T=T)
x <<- dd$x
y <<- dd$y
})
## Sanity check
test_that('data', {
expect_equal(x, array(c(-1.31047564655221, -0.679491608575424, -0.289083794010798,
-0.23017748948328, 0.129287735160946, -1.26506123460653,
2.30870831414912, 2.46506498688328, 0.0631471481064739),
dim = c(3, 1, 3)))
expect_equal(y, matrix(c(-2.10089979337606, 1.10899305269782, 2.51416798413193,
-1.98942425038169, 0.729823109874504, 2.93377535075353,
-0.352340885393167, 0.230231415863224, 0.531844092800363),
nrow = 3, ncol = 3, byrow=TRUE))
})
test_that('rho', {
set.seed(123)
expect_equal(sample_rho(10, x, y, rho = c(0, .5, 1)),
c(1.0, 0.5, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.5, 1.0))
})
test_that('sig', {
set.seed(123)
expect_equal(sample_sig(x, y, rho = c(1.0, 0.5, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.5, 1.0)),
c(3.47057973968515, 3.76533958631778, 5.05616904090221, 0.319511980843401,
2.97344640702733, 1.25384839880482, 0.888201360379622, 2.84216255450146,
1.35200519668826, 0.0434125237806975))
})
test_that('beta', {
set.seed(123)
expect_equal(sample_beta(x, y,
rho = c(1, 0.5, 1, 0),
v = c(0.237091661226817, 2.60818150317784, 2.10900711686825, 4.29265963681323)),
as.matrix(c(2.57282485588875, 0.859094481853702, 0.983136793340975, 0.766863519235555)))
})
test_that('all', {
set.seed(123)
expect_equal(sample_all(x, y, n = 10, pts = c(0, .5, 1)),
list(rho = c(1.0, 0.5, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.5, 1.0),
sig2 = 1 / c(3.18533847052255, 4.10251890097103, 1.25384839880482,
0.0585040460504101, 0.442255004862561, 0.035105570425539,
0.434297339273836, 2.30355584438599, 1.70033255494056,
0.594859533302164),
beta = as.matrix(c(1.08887764629632, 1.56039942659895, 1.06592668114779,
1.94115360819053, 0.64932639228806, -0.723483839828413,
1.64356407596858, 1.1762269273108, 1.14516329636659,
1.14599625391133))))
})
| /data/genthat_extracted_code/OrthoPanels/tests/test-sampling.R | no_license | surayaaramli/typeRrh | R | false | false | 2,655 | r | context("sample")
## Generate test data without littering the environment with temporary
## variables
x <- NULL
y <- NULL
local({
set.seed(123)
N <- 3
T <- 2
dd <- generate_data(N=N, T=T)
x <<- dd$x
y <<- dd$y
})
## Sanity check
test_that('data', {
expect_equal(x, array(c(-1.31047564655221, -0.679491608575424, -0.289083794010798,
-0.23017748948328, 0.129287735160946, -1.26506123460653,
2.30870831414912, 2.46506498688328, 0.0631471481064739),
dim = c(3, 1, 3)))
expect_equal(y, matrix(c(-2.10089979337606, 1.10899305269782, 2.51416798413193,
-1.98942425038169, 0.729823109874504, 2.93377535075353,
-0.352340885393167, 0.230231415863224, 0.531844092800363),
nrow = 3, ncol = 3, byrow=TRUE))
})
test_that('rho', {
set.seed(123)
expect_equal(sample_rho(10, x, y, rho = c(0, .5, 1)),
c(1.0, 0.5, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.5, 1.0))
})
test_that('sig', {
set.seed(123)
expect_equal(sample_sig(x, y, rho = c(1.0, 0.5, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.5, 1.0)),
c(3.47057973968515, 3.76533958631778, 5.05616904090221, 0.319511980843401,
2.97344640702733, 1.25384839880482, 0.888201360379622, 2.84216255450146,
1.35200519668826, 0.0434125237806975))
})
test_that('beta', {
set.seed(123)
expect_equal(sample_beta(x, y,
rho = c(1, 0.5, 1, 0),
v = c(0.237091661226817, 2.60818150317784, 2.10900711686825, 4.29265963681323)),
as.matrix(c(2.57282485588875, 0.859094481853702, 0.983136793340975, 0.766863519235555)))
})
test_that('all', {
set.seed(123)
expect_equal(sample_all(x, y, n = 10, pts = c(0, .5, 1)),
list(rho = c(1.0, 0.5, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.5, 1.0),
sig2 = 1 / c(3.18533847052255, 4.10251890097103, 1.25384839880482,
0.0585040460504101, 0.442255004862561, 0.035105570425539,
0.434297339273836, 2.30355584438599, 1.70033255494056,
0.594859533302164),
beta = as.matrix(c(1.08887764629632, 1.56039942659895, 1.06592668114779,
1.94115360819053, 0.64932639228806, -0.723483839828413,
1.64356407596858, 1.1762269273108, 1.14516329636659,
1.14599625391133))))
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R, R/DataFrame.R
\name{intersectAll}
\alias{intersectAll}
\alias{intersectAll,SparkDataFrame,SparkDataFrame-method}
\title{intersectAll}
\usage{
intersectAll(x, y)
\S4method{intersectAll}{SparkDataFrame,SparkDataFrame}(x, y)
}
\arguments{
\item{x}{a SparkDataFrame.}
\item{y}{a SparkDataFrame.}
}
\value{
A SparkDataFrame containing the result of the intersect all operation.
}
\description{
Return a new SparkDataFrame containing rows in both this SparkDataFrame
and another SparkDataFrame while preserving the duplicates.
This is equivalent to \code{INTERSECT ALL} in SQL. Also as standard in
SQL, this function resolves columns by position (not by name).
}
\note{
intersectAll since 2.4.0
}
\examples{
\dontrun{
sparkR.session()
df1 <- read.json(path)
df2 <- read.json(path2)
intersectAllDF <- intersectAll(df1, df2)
}
}
\seealso{
Other SparkDataFrame functions:
\code{\link{SparkDataFrame-class}},
\code{\link{agg}()},
\code{\link{alias}()},
\code{\link{arrange}()},
\code{\link{as.data.frame}()},
\code{\link{attach,SparkDataFrame-method}},
\code{\link{broadcast}()},
\code{\link{cache}()},
\code{\link{checkpoint}()},
\code{\link{coalesce}()},
\code{\link{collect}()},
\code{\link{colnames}()},
\code{\link{coltypes}()},
\code{\link{createOrReplaceTempView}()},
\code{\link{crossJoin}()},
\code{\link{cube}()},
\code{\link{dapplyCollect}()},
\code{\link{dapply}()},
\code{\link{describe}()},
\code{\link{dim}()},
\code{\link{distinct}()},
\code{\link{dropDuplicates}()},
\code{\link{dropna}()},
\code{\link{drop}()},
\code{\link{dtypes}()},
\code{\link{exceptAll}()},
\code{\link{except}()},
\code{\link{explain}()},
\code{\link{filter}()},
\code{\link{first}()},
\code{\link{gapplyCollect}()},
\code{\link{gapply}()},
\code{\link{getNumPartitions}()},
\code{\link{group_by}()},
\code{\link{head}()},
\code{\link{hint}()},
\code{\link{histogram}()},
\code{\link{insertInto}()},
\code{\link{intersect}()},
\code{\link{isLocal}()},
\code{\link{isStreaming}()},
\code{\link{join}()},
\code{\link{limit}()},
\code{\link{localCheckpoint}()},
\code{\link{merge}()},
\code{\link{mutate}()},
\code{\link{ncol}()},
\code{\link{nrow}()},
\code{\link{persist}()},
\code{\link{printSchema}()},
\code{\link{randomSplit}()},
\code{\link{rbind}()},
\code{\link{rename}()},
\code{\link{repartitionByRange}()},
\code{\link{repartition}()},
\code{\link{rollup}()},
\code{\link{sample}()},
\code{\link{saveAsTable}()},
\code{\link{schema}()},
\code{\link{selectExpr}()},
\code{\link{select}()},
\code{\link{showDF}()},
\code{\link{show}()},
\code{\link{storageLevel}()},
\code{\link{str}()},
\code{\link{subset}()},
\code{\link{summary}()},
\code{\link{take}()},
\code{\link{toJSON}()},
\code{\link{unionAll}()},
\code{\link{unionByName}()},
\code{\link{union}()},
\code{\link{unpersist}()},
\code{\link{withColumn}()},
\code{\link{withWatermark}()},
\code{\link{with}()},
\code{\link{write.df}()},
\code{\link{write.jdbc}()},
\code{\link{write.json}()},
\code{\link{write.orc}()},
\code{\link{write.parquet}()},
\code{\link{write.stream}()},
\code{\link{write.text}()}
}
\concept{SparkDataFrame functions}
| /man/intersectAll.Rd | no_license | cran/SparkR | R | false | true | 3,184 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R, R/DataFrame.R
\name{intersectAll}
\alias{intersectAll}
\alias{intersectAll,SparkDataFrame,SparkDataFrame-method}
\title{intersectAll}
\usage{
intersectAll(x, y)
\S4method{intersectAll}{SparkDataFrame,SparkDataFrame}(x, y)
}
\arguments{
\item{x}{a SparkDataFrame.}
\item{y}{a SparkDataFrame.}
}
\value{
A SparkDataFrame containing the result of the intersect all operation.
}
\description{
Return a new SparkDataFrame containing rows in both this SparkDataFrame
and another SparkDataFrame while preserving the duplicates.
This is equivalent to \code{INTERSECT ALL} in SQL. Also as standard in
SQL, this function resolves columns by position (not by name).
}
\note{
intersectAll since 2.4.0
}
\examples{
\dontrun{
sparkR.session()
df1 <- read.json(path)
df2 <- read.json(path2)
intersectAllDF <- intersectAll(df1, df2)
}
}
\seealso{
Other SparkDataFrame functions:
\code{\link{SparkDataFrame-class}},
\code{\link{agg}()},
\code{\link{alias}()},
\code{\link{arrange}()},
\code{\link{as.data.frame}()},
\code{\link{attach,SparkDataFrame-method}},
\code{\link{broadcast}()},
\code{\link{cache}()},
\code{\link{checkpoint}()},
\code{\link{coalesce}()},
\code{\link{collect}()},
\code{\link{colnames}()},
\code{\link{coltypes}()},
\code{\link{createOrReplaceTempView}()},
\code{\link{crossJoin}()},
\code{\link{cube}()},
\code{\link{dapplyCollect}()},
\code{\link{dapply}()},
\code{\link{describe}()},
\code{\link{dim}()},
\code{\link{distinct}()},
\code{\link{dropDuplicates}()},
\code{\link{dropna}()},
\code{\link{drop}()},
\code{\link{dtypes}()},
\code{\link{exceptAll}()},
\code{\link{except}()},
\code{\link{explain}()},
\code{\link{filter}()},
\code{\link{first}()},
\code{\link{gapplyCollect}()},
\code{\link{gapply}()},
\code{\link{getNumPartitions}()},
\code{\link{group_by}()},
\code{\link{head}()},
\code{\link{hint}()},
\code{\link{histogram}()},
\code{\link{insertInto}()},
\code{\link{intersect}()},
\code{\link{isLocal}()},
\code{\link{isStreaming}()},
\code{\link{join}()},
\code{\link{limit}()},
\code{\link{localCheckpoint}()},
\code{\link{merge}()},
\code{\link{mutate}()},
\code{\link{ncol}()},
\code{\link{nrow}()},
\code{\link{persist}()},
\code{\link{printSchema}()},
\code{\link{randomSplit}()},
\code{\link{rbind}()},
\code{\link{rename}()},
\code{\link{repartitionByRange}()},
\code{\link{repartition}()},
\code{\link{rollup}()},
\code{\link{sample}()},
\code{\link{saveAsTable}()},
\code{\link{schema}()},
\code{\link{selectExpr}()},
\code{\link{select}()},
\code{\link{showDF}()},
\code{\link{show}()},
\code{\link{storageLevel}()},
\code{\link{str}()},
\code{\link{subset}()},
\code{\link{summary}()},
\code{\link{take}()},
\code{\link{toJSON}()},
\code{\link{unionAll}()},
\code{\link{unionByName}()},
\code{\link{union}()},
\code{\link{unpersist}()},
\code{\link{withColumn}()},
\code{\link{withWatermark}()},
\code{\link{with}()},
\code{\link{write.df}()},
\code{\link{write.jdbc}()},
\code{\link{write.json}()},
\code{\link{write.orc}()},
\code{\link{write.parquet}()},
\code{\link{write.stream}()},
\code{\link{write.text}()}
}
\concept{SparkDataFrame functions}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gr-domesticplotters.R
\name{plot_devsegments}
\alias{plot_devsegments}
\title{Draws colored segments from a matrix of coordinates.}
\usage{
plot_devsegments(coo, cols, lwd = 1)
}
\arguments{
\item{coo}{A matrix of coordinates.}
\item{cols}{A vector of color of \code{length = nrow(coo)}.}
\item{lwd}{The \code{lwd} to use for drawing segments.}
}
\description{
Given a matrix of (x; y) coordinates, draws segments between every points
defined by the row of the matrix and uses a color to display an information.
}
\examples{
# we load some data
data(bot)
guinness <- coo_sample(bot[9], 100)
# we calculate the diff between 48 harm and one with 6 harm.
out.6 <- efourier_i(efourier(guinness, nb.h=6), nb.pts=120)
# we calculate deviations, you can also try 'edm'
dev <- edm_nearest(out.6, guinness) / coo_centsize(out.6)
# we prepare the color scale
d.cut <- cut(dev, breaks=20, labels=FALSE, include.lowest=TRUE)
cols <- paste0(col_summer(20)[d.cut], 'CC')
# we draw the results
coo_plot(guinness, main='Guiness fitted with 6 harm.', points=FALSE)
par(xpd=NA)
plot_devsegments(out.6, cols=cols, lwd=4)
coo_draw(out.6, lty=2, points=FALSE, col=NA)
par(xpd=FALSE)
}
\seealso{
Other ldk functions: \code{\link{ldk_chull}},
\code{\link{ldk_confell}}, \code{\link{ldk_contour}},
\code{\link{ldk_links}}
}
| /man/plot_devsegments.Rd | no_license | yuting27/Momocs | R | false | true | 1,394 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gr-domesticplotters.R
\name{plot_devsegments}
\alias{plot_devsegments}
\title{Draws colored segments from a matrix of coordinates.}
\usage{
plot_devsegments(coo, cols, lwd = 1)
}
\arguments{
\item{coo}{A matrix of coordinates.}
\item{cols}{A vector of color of \code{length = nrow(coo)}.}
\item{lwd}{The \code{lwd} to use for drawing segments.}
}
\description{
Given a matrix of (x; y) coordinates, draws segments between every points
defined by the row of the matrix and uses a color to display an information.
}
\examples{
# we load some data
data(bot)
guinness <- coo_sample(bot[9], 100)
# we calculate the diff between 48 harm and one with 6 harm.
out.6 <- efourier_i(efourier(guinness, nb.h=6), nb.pts=120)
# we calculate deviations, you can also try 'edm'
dev <- edm_nearest(out.6, guinness) / coo_centsize(out.6)
# we prepare the color scale
d.cut <- cut(dev, breaks=20, labels=FALSE, include.lowest=TRUE)
cols <- paste0(col_summer(20)[d.cut], 'CC')
# we draw the results
coo_plot(guinness, main='Guiness fitted with 6 harm.', points=FALSE)
par(xpd=NA)
plot_devsegments(out.6, cols=cols, lwd=4)
coo_draw(out.6, lty=2, points=FALSE, col=NA)
par(xpd=FALSE)
}
\seealso{
Other ldk functions: \code{\link{ldk_chull}},
\code{\link{ldk_confell}}, \code{\link{ldk_contour}},
\code{\link{ldk_links}}
}
|
skip_if_not_rstudio <- function(version = NULL) {
available <- rstudioapi::isAvailable(version)
message <- if (is.null(version))
"RStudio not available"
else
paste("RStudio version '", version, "' not available", sep = "")
if (!available)
skip(message)
TRUE
}
scratch_file <- function() {
if (rstudioapi::isAvailable()) {
path <- tempfile(pattern = 'test', fileext = '.R')
file.create(path)
rstudioapi::navigateToFile(path)
Sys.sleep(1)
rstudioapi::getSourceEditorContext()
}
}
entire_document <- function() {
rstudioapi::document_range(start = rstudioapi::document_position(1,1),
end = rstudioapi::document_position(Inf,Inf))
}
individual_lines <- function() {
lines <- rstudioapi::getSourceEditorContext()$contents
n <- length(lines)
Map(rstudioapi::document_range,
Map(rstudioapi::document_position, 1:n, 1),
Map(rstudioapi::document_position, 1:n, unlist(lapply(lines, nchar)) + 1)
)
}
set_text <- function(txt = '', sec, mark) {
rstudioapi::modifyRange(location = entire_document(), text = txt, id = sec$id)
rstudioapi::documentSave(sec$id)
if (!missing(mark))
rstudioapi::setSelectionRanges(mark())
}
this_strrep <- function(n) sprintf('%s ',strrep('#',times = n))
| /tests/testthat/helper-functions.R | no_license | aoles/remedy | R | false | false | 1,295 | r | skip_if_not_rstudio <- function(version = NULL) {
available <- rstudioapi::isAvailable(version)
message <- if (is.null(version))
"RStudio not available"
else
paste("RStudio version '", version, "' not available", sep = "")
if (!available)
skip(message)
TRUE
}
scratch_file <- function() {
if (rstudioapi::isAvailable()) {
path <- tempfile(pattern = 'test', fileext = '.R')
file.create(path)
rstudioapi::navigateToFile(path)
Sys.sleep(1)
rstudioapi::getSourceEditorContext()
}
}
entire_document <- function() {
rstudioapi::document_range(start = rstudioapi::document_position(1,1),
end = rstudioapi::document_position(Inf,Inf))
}
individual_lines <- function() {
lines <- rstudioapi::getSourceEditorContext()$contents
n <- length(lines)
Map(rstudioapi::document_range,
Map(rstudioapi::document_position, 1:n, 1),
Map(rstudioapi::document_position, 1:n, unlist(lapply(lines, nchar)) + 1)
)
}
set_text <- function(txt = '', sec, mark) {
rstudioapi::modifyRange(location = entire_document(), text = txt, id = sec$id)
rstudioapi::documentSave(sec$id)
if (!missing(mark))
rstudioapi::setSelectionRanges(mark())
}
this_strrep <- function(n) sprintf('%s ',strrep('#',times = n))
|
stdev<-sd(raw$error)
high_outliers<-which(raw$error>(mean(raw$error)+2*sd(raw$error)))
low_outliers<-which(raw$error<(mean(raw$error)-2*sd(raw$error)))
raw[high_outliers,]
raw[low_outliers,]
plot(raw$period,raw$visitors,type='o')
points(raw$period[high_outliers],raw$visitors[high_outliers],pch=19,col='red')
points(raw$period[low_outliers],raw$visitors[low_outliers],pch=19,col='blue')
| /Lesson06/Exercise44/Exercise44.R | permissive | Lithene/Applied-Unsupervised-Learning-with-R | R | false | false | 387 | r | stdev<-sd(raw$error)
high_outliers<-which(raw$error>(mean(raw$error)+2*sd(raw$error)))
low_outliers<-which(raw$error<(mean(raw$error)-2*sd(raw$error)))
raw[high_outliers,]
raw[low_outliers,]
plot(raw$period,raw$visitors,type='o')
points(raw$period[high_outliers],raw$visitors[high_outliers],pch=19,col='red')
points(raw$period[low_outliers],raw$visitors[low_outliers],pch=19,col='blue')
|
#For dataset movie_review
library(text2vec)
# For Text cleaning and corpus
library(tm)
#For Naive bayes
library(e1071)
#For confusion matrix
library(caret)
#For RandomForest
library(randomForest)
#obtain movie review dataset
data('movie_review')
dataset <- movie_review
rm(movie_review)
#cleaning
#Create corpus
review_corpus <- Corpus(VectorSource(dataset$review))
#case-folding
review_corpus <- tm_map(review_corpus, tolower)
#remove stop-words
review_corpus <- tm_map(review_corpus, removeWords, c('i','its','it','us','use','used','using','will','yes','say','can','take','one', stopwords('english')))
#remove punctuation marks
review_corpus <- tm_map(review_corpus, removePunctuation)
#remove numbers
review_corpus <- tm_map(review_corpus, removeNumbers)
#Stem document
review_corpus <-tm_map(review_corpus, stemDocument)
#remove extra whitespaces
review_corpus <- tm_map(review_corpus, stripWhitespace)
#Create document term matrix
dtm <- DocumentTermMatrix(review_corpus)
#Remove sparse terms
dtm <- removeSparseTerms(dtm, 0.999)
#form a dataframe
data <- data.frame(as.matrix(dtm))
#Add the sentiment column to data
data$c <- as.factor(dataset$sentiment)
#Split data into train and test
train <- data[sample(nrow(data),4800,replace = F),]
test <- data[!(1:nrow(data) %in% row.names(train)),]
#Fit Naivebayes to the train data
model_nb <- naiveBayes(c ~ ., data = train)
#Predict for the test data
prediction_nb <- predict(model_nb, test[,-7348])
#Confusion Matrix
cm_nb = table(test[, 7348], prediction_nb)
confusionMatrix(cm_nb)
#Fit Random Forest to the train data
model_rf <- randomForest(c ~ ., train, ntree = 10)
#Predict for the test data
prediction_rf <- predict(model_rf, test[,-7348])
#Confusion Matrix
cm_rf = table(test[, 7348], prediction_rf)
confusionMatrix(cm_rf) | /movie_review_classification.R | no_license | sakinapitalwala/MovieReviewClassification | R | false | false | 1,802 | r | #For dataset movie_review
library(text2vec)
# For Text cleaning and corpus
library(tm)
#For Naive bayes
library(e1071)
#For confusion matrix
library(caret)
#For RandomForest
library(randomForest)
#obtain movie review dataset
data('movie_review')
dataset <- movie_review
rm(movie_review)
#cleaning
#Create corpus
review_corpus <- Corpus(VectorSource(dataset$review))
#case-folding
review_corpus <- tm_map(review_corpus, tolower)
#remove stop-words
review_corpus <- tm_map(review_corpus, removeWords, c('i','its','it','us','use','used','using','will','yes','say','can','take','one', stopwords('english')))
#remove punctuation marks
review_corpus <- tm_map(review_corpus, removePunctuation)
#remove numbers
review_corpus <- tm_map(review_corpus, removeNumbers)
#Stem document
review_corpus <-tm_map(review_corpus, stemDocument)
#remove extra whitespaces
review_corpus <- tm_map(review_corpus, stripWhitespace)
#Create document term matrix
dtm <- DocumentTermMatrix(review_corpus)
#Remove sparse terms
dtm <- removeSparseTerms(dtm, 0.999)
#form a dataframe
data <- data.frame(as.matrix(dtm))
#Add the sentiment column to data
data$c <- as.factor(dataset$sentiment)
#Split data into train and test
train <- data[sample(nrow(data),4800,replace = F),]
test <- data[!(1:nrow(data) %in% row.names(train)),]
#Fit Naivebayes to the train data
model_nb <- naiveBayes(c ~ ., data = train)
#Predict for the test data
prediction_nb <- predict(model_nb, test[,-7348])
#Confusion Matrix
cm_nb = table(test[, 7348], prediction_nb)
confusionMatrix(cm_nb)
#Fit Random Forest to the train data
model_rf <- randomForest(c ~ ., train, ntree = 10)
#Predict for the test data
prediction_rf <- predict(model_rf, test[,-7348])
#Confusion Matrix
cm_rf = table(test[, 7348], prediction_rf)
confusionMatrix(cm_rf) |
#' @title CountChemicalElements.
#'
#' @description \code{CountChemicalElements} will split a character (chemical formula)
#' into its elements and count their occurrence.
#'
#' @details No testing for any chemical alphabet is performed. Elements may occur
#' several times and will be summed up in this case without a warning.
#'
#' @param x Chemical formula.
#' @param ele Character vector of elements to count particularly or counting all contained if NULL.
#'
#' @return A named numeric with counts for all contained or specified elements.
#'
#' @export
#'
CountChemicalElements <- function(x = NULL, ele = NULL) {
# count all elements present within 'x'
# remove square bracket constructs (e.g. [13]C6 --> C6) upfront
x <- gsub("[[].+[]]","",x)
# all elements start with a LETTER...
p <- gregexpr("[[:upper:]]", x)[[1]]
# split initial string at the large letter positions
out <- sapply(1:length(p), function(i) {
substr(x, p[i], ifelse(i == length(p), nchar(x), p[i + 1] - 1))
})
# remove all non letter/digit (e.g. further brackets, charges...)
out <- gsub("[^[:alnum:]]", "", out)
count <- as.numeric(gsub("[^[:digit:]]", "", out))
count[is.na(count)] <- 1
names(count) <- gsub("[^[:alpha:]]", "", out)
# sum up in case that elements were found repeatedly
if (any(duplicated(names(count)))) {
for (i in rev(which(duplicated(names(count))))) {
count[which((names(count) == names(count)[i]))[1]] <- count[which((names(count) == names(count)[i]))[1]] + count[i]
count <- count[-i]
}
}
# reorder or limit output vector according to 'ele' and 'order_ele'
if (!is.null(ele)) count <- sapply(ele, function(e) { ifelse(e %in% names(count), count[names(count)==e], 0) })
return(count)
} | /R/CountChemicalElements.R | no_license | cran/InterpretMSSpectrum | R | false | false | 1,859 | r | #' @title CountChemicalElements.
#'
#' @description \code{CountChemicalElements} will split a character (chemical formula)
#' into its elements and count their occurrence.
#'
#' @details No testing for any chemical alphabet is performed. Elements may occur
#' several times and will be summed up in this case without a warning.
#'
#' @param x Chemical formula.
#' @param ele Character vector of elements to count particularly or counting all contained if NULL.
#'
#' @return A named numeric with counts for all contained or specified elements.
#'
#' @export
#'
CountChemicalElements <- function(x = NULL, ele = NULL) {
# count all elements present within 'x'
# remove square bracket constructs (e.g. [13]C6 --> C6) upfront
x <- gsub("[[].+[]]","",x)
# all elements start with a LETTER...
p <- gregexpr("[[:upper:]]", x)[[1]]
# split initial string at the large letter positions
out <- sapply(1:length(p), function(i) {
substr(x, p[i], ifelse(i == length(p), nchar(x), p[i + 1] - 1))
})
# remove all non letter/digit (e.g. further brackets, charges...)
out <- gsub("[^[:alnum:]]", "", out)
count <- as.numeric(gsub("[^[:digit:]]", "", out))
count[is.na(count)] <- 1
names(count) <- gsub("[^[:alpha:]]", "", out)
# sum up in case that elements were found repeatedly
if (any(duplicated(names(count)))) {
for (i in rev(which(duplicated(names(count))))) {
count[which((names(count) == names(count)[i]))[1]] <- count[which((names(count) == names(count)[i]))[1]] + count[i]
count <- count[-i]
}
}
# reorder or limit output vector according to 'ele' and 'order_ele'
if (!is.null(ele)) count <- sapply(ele, function(e) { ifelse(e %in% names(count), count[names(count)==e], 0) })
return(count)
} |
library(vecsets)
### Name: vsetdiff
### Title: Find all elements in first argument which are not in second
### argument.
### Aliases: vsetdiff
### ** Examples
x <- c(1:5,3,3,3,2,NA,NA)
y<- c(2:5,4,3,NA)
vsetdiff(x,y)
vsetdiff(x,y,multiple=FALSE)
setdiff(x,y) # same as previous line
vsetdiff(y,x) #note the asymmetry
| /data/genthat_extracted_code/vecsets/examples/vsetdiff.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 326 | r | library(vecsets)
### Name: vsetdiff
### Title: Find all elements in first argument which are not in second
### argument.
### Aliases: vsetdiff
### ** Examples
x <- c(1:5,3,3,3,2,NA,NA)
y<- c(2:5,4,3,NA)
vsetdiff(x,y)
vsetdiff(x,y,multiple=FALSE)
setdiff(x,y) # same as previous line
vsetdiff(y,x) #note the asymmetry
|
################################################################################
###############Plot the Network################################################
################################################################################
setwd("C:/Users/admin-ccook/Desktop/3/Spring/Networks/Project/")
library(tm)
library(network)
data=read.csv("network.csv")
graph2=graph_from_edgelist(data,directed=F)
#############################################################################
#adj is now the adjacentcy matrix for the first ten documents...
adj_net=network(graph2,directed=FALSE)
pdf("simple_plot_small.pdf",height=10,width=10, pointsize=8)
# set some graphical parameters (see ?par)
par(las=1,mar=c(3.25,4,1,1))
# Simple plot
## Set random number seed so the plot is replicable
set.seed(5)
## Plot the network with labels
plot(adj_net,displaylabels=T,vertex.cex=1,label.cex=1,
edge.col=rgb(150,150,150,100,maxColorValue=255),
label.pos=5,vertex.col="lightblue")
# check out all the options with ?plot.network
dev.off()
pdf("simple_plot2_small.pdf",height=10,width=10, pointsize=8)
# set some graphical parameters (see ?par)
par(las=1,mar=c(3.25,4,1,1))
# Simple plot
## Set random number seed so the plot is replicable
set.seed(5)
## Plot the network with labels
plot(adj_net,vertex.cex=1,
edge.col=rgb(150,150,150,100,maxColorValue=255),
label.pos=5,vertex.col="lightblue")
# check out all the options with ?plot.network
dev.off()
| /scripts/simple_plot.R | no_license | cmcook22/Cook_Networks_Project | R | false | false | 1,474 | r | ################################################################################
###############Plot the Network################################################
################################################################################
setwd("C:/Users/admin-ccook/Desktop/3/Spring/Networks/Project/")
library(tm)
library(network)
data=read.csv("network.csv")
graph2=graph_from_edgelist(data,directed=F)
#############################################################################
#adj is now the adjacentcy matrix for the first ten documents...
adj_net=network(graph2,directed=FALSE)
pdf("simple_plot_small.pdf",height=10,width=10, pointsize=8)
# set some graphical parameters (see ?par)
par(las=1,mar=c(3.25,4,1,1))
# Simple plot
## Set random number seed so the plot is replicable
set.seed(5)
## Plot the network with labels
plot(adj_net,displaylabels=T,vertex.cex=1,label.cex=1,
edge.col=rgb(150,150,150,100,maxColorValue=255),
label.pos=5,vertex.col="lightblue")
# check out all the options with ?plot.network
dev.off()
pdf("simple_plot2_small.pdf",height=10,width=10, pointsize=8)
# set some graphical parameters (see ?par)
par(las=1,mar=c(3.25,4,1,1))
# Simple plot
## Set random number seed so the plot is replicable
set.seed(5)
## Plot the network with labels
plot(adj_net,vertex.cex=1,
edge.col=rgb(150,150,150,100,maxColorValue=255),
label.pos=5,vertex.col="lightblue")
# check out all the options with ?plot.network
dev.off()
|
/atigrafia_tese.R | no_license | paulohpmoraes/Doutorado | R | false | false | 9,630 | r | ||
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/treatmentResponseDiallel.R
\name{batch.plotter.wrapper}
\alias{batch.plotter.wrapper}
\title{batch.plotter.wrapper: Make dot plots by batch}
\usage{
batch.plotter.wrapper(data, trt.string, ctrl.string, ...)
}
\arguments{
\item{data}{the data frame being used, in this case, from FluDiData}
\item{trt.string}{a string indicating the treatment group}
\item{ctrl.string}{a string indicating the control group}
\item{...}{additional arguments}
}
\value{
a wrapper for making pdf plots of treated and control, by batch
}
\description{
Generate dot plots, separated by batch.
}
\examples{
## not run
}
| /man/batch.plotter.wrapper.Rd | no_license | mauriziopaul/treatmentResponseDiallel | R | false | true | 677 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/treatmentResponseDiallel.R
\name{batch.plotter.wrapper}
\alias{batch.plotter.wrapper}
\title{batch.plotter.wrapper: Make dot plots by batch}
\usage{
batch.plotter.wrapper(data, trt.string, ctrl.string, ...)
}
\arguments{
\item{data}{the data frame being used, in this case, from FluDiData}
\item{trt.string}{a string indicating the treatment group}
\item{ctrl.string}{a string indicating the control group}
\item{...}{additional arguments}
}
\value{
a wrapper for making pdf plots of treated and control, by batch
}
\description{
Generate dot plots, separated by batch.
}
\examples{
## not run
}
|
require(lubridate)
#Reading data into R
if(!exists("power")){
power<- read.table("household_power_consumption.txt", sep=";",
header=TRUE, quote= "", strip.white=TRUE,
stringsAsFactors = FALSE, na.strings= "?")
}
#Convert Date to date
date <- dmy(power$Date)
# Get the rows for days(Feb 1 & Feb2 2007):
data<- which(date %in% c(ymd(20070201), ymd(20070202)))
# Extract only the data we need
power2 <- power[data,]
# Create full date and time column
power2$DateTime <- dmy_hms(paste(power2$Date,power2$Time))
# Generating Plot2:
png("plot2.png", width=480, height= 480)
plot(power2$DateTime, power2$Global_active_power, type= "l", lwd=1,
ylab= "Global Active Power (kilowatts)", xlab="")
dev.off()
| /plot2.R | no_license | xhoong/ExData_Plotting1 | R | false | false | 750 | r | require(lubridate)
#Reading data into R
if(!exists("power")){
power<- read.table("household_power_consumption.txt", sep=";",
header=TRUE, quote= "", strip.white=TRUE,
stringsAsFactors = FALSE, na.strings= "?")
}
#Convert Date to date
date <- dmy(power$Date)
# Get the rows for days(Feb 1 & Feb2 2007):
data<- which(date %in% c(ymd(20070201), ymd(20070202)))
# Extract only the data we need
power2 <- power[data,]
# Create full date and time column
power2$DateTime <- dmy_hms(paste(power2$Date,power2$Time))
# Generating Plot2:
png("plot2.png", width=480, height= 480)
plot(power2$DateTime, power2$Global_active_power, type= "l", lwd=1,
ylab= "Global Active Power (kilowatts)", xlab="")
dev.off()
|
#' @importFrom tibble tibble
#' @importFrom dplyr bind_rows
generate_keys <- function(number_white_keys = 14) {
stopifnot(number_white_keys > 0)
number_white_keys <- as.integer(number_white_keys)
chords <- NULL
white_keys <- 0L
black_keys <- 0L
i <- 0L
while (white_keys < number_white_keys) {
i <- i + 1L
i_color <- get_key_color(i)
if (i_color == "white") {
# white
white_keys <- white_keys + 1L
start_x <- (white_keys-1)/number_white_keys
end_x <- white_keys/number_white_keys
chords <- dplyr::bind_rows(chords,
tibble::tibble(
key = i,
key_color = "white",
xmin = start_x,
ymin = 0,
xmax = end_x,
ymax = 1,
layer = 1
))
} else {
# black
black_keys <- black_keys + 1L
start_x <- (white_keys-1)/number_white_keys
end_x <- white_keys/number_white_keys
start_x2 <- ((start_x + end_x) / 2) + 1/(4*number_white_keys)
end_x2 <- end_x + 1/(4*number_white_keys)
chords <- bind_rows(chords,
tibble(
key = i,
key_color = "black",
xmin = start_x2,
ymin = 0.45,
xmax = end_x2,
ymax = 1,
layer = 2
))
}
}
return(chords)
}
generate_tone_properties <- function() {
tone_properties <- tribble(
~tone, ~key,
"C", 1,
"C#", 2,
"Db", 2,
"D", 3,
"D#", 4,
"Eb", 4,
"E", 5,
"F", 6,
"F#", 7,
"Gb", 7,
"G", 8,
"G#", 9,
"Ab", 9,
"A", 10,
"A#", 11,
"Bb", 11,
"B", 12
) %>%
dplyr::mutate(
key_color = ifelse(nchar(tone) == 1, "white", "black")
)
tone_properties
}
#' Generate a custom-sized piano
#'
#' Generate a custom-sized piano, useful e.g. for synthesizers.
#'
#' @examples
#' d <- generate_keys_chords(10)
#' ggpiano(d, labels = TRUE)
#' d <- d %>%
#' dplyr::rowwise() %>%
#' dplyr::mutate(label = tones[[1]])
#' ggpiano(d, labels = TRUE)
#'
#' @importFrom tibble tribble
#' @importFrom dplyr rowwise mutate
#'
#' @export
generate_keys_chords <- function(number_white_keys = 14) {
d_keys <- generate_keys(number_white_keys)
if (FALSE) {
library(ggplot2)
ggplot(mapping = aes(xmin = xmin, xmax = xmax, ymin = ymin, ymax = ymax,
group = key,
fill = key_color)) +
geom_rect(data = d_keys %>% filter(key_color == "white"), color = "black", show.legend = FALSE) +
geom_rect(data = d_keys %>% filter(key_color == "black"), color = "black", show.legend = FALSE) +
scale_fill_manual(values = c("white" = "white", "black" = "black")) +
theme_void()
}
d_tones <- generate_tone_properties() %>%
dplyr::select(key, tone) %>%
dplyr::group_by(key) %>%
dplyr::summarise(tones = list(tone),
label = paste0(tone, collapse = "\n"))
keys_chords <- d_keys %>%
dplyr::mutate(join_key = ((key - 1) %% 12) + 1) %>%
dplyr::left_join(d_tones, by = c("join_key" = "key")) %>%
dplyr::select(-join_key) %>%
dplyr::mutate(label_x = (xmin+xmax)/2,
label_y = ymin + 0.1) %>%
dplyr::mutate(label_color = case_when(
key_color == "black" ~ "white",
TRUE ~ "black"))
# Put here instead of where it's generated to ease development process
class(keys_chords) <- c("pichor_key_koords", class(keys_chords))
if (FALSE) {
d <- generate_keys_chords(10)
d
ggplot(mapping = aes(xmin = xmin, xmax = xmax, ymin = ymin, ymax = ymax,
group = key,
fill = key_color)) +
geom_rect(data = d %>% filter(key_color == "white"), color = "black", show.legend = FALSE) +
geom_rect(data = d %>% filter(key_color == "black"), color = "black", show.legend = FALSE) +
scale_fill_manual(values = c("white" = "white", "black" = "black")) +
theme_void()
}
keys_chords
}
| /R/generate_data.R | no_license | mikldk/pichor | R | false | false | 4,346 | r | #' @importFrom tibble tibble
#' @importFrom dplyr bind_rows
generate_keys <- function(number_white_keys = 14) {
stopifnot(number_white_keys > 0)
number_white_keys <- as.integer(number_white_keys)
chords <- NULL
white_keys <- 0L
black_keys <- 0L
i <- 0L
while (white_keys < number_white_keys) {
i <- i + 1L
i_color <- get_key_color(i)
if (i_color == "white") {
# white
white_keys <- white_keys + 1L
start_x <- (white_keys-1)/number_white_keys
end_x <- white_keys/number_white_keys
chords <- dplyr::bind_rows(chords,
tibble::tibble(
key = i,
key_color = "white",
xmin = start_x,
ymin = 0,
xmax = end_x,
ymax = 1,
layer = 1
))
} else {
# black
black_keys <- black_keys + 1L
start_x <- (white_keys-1)/number_white_keys
end_x <- white_keys/number_white_keys
start_x2 <- ((start_x + end_x) / 2) + 1/(4*number_white_keys)
end_x2 <- end_x + 1/(4*number_white_keys)
chords <- bind_rows(chords,
tibble(
key = i,
key_color = "black",
xmin = start_x2,
ymin = 0.45,
xmax = end_x2,
ymax = 1,
layer = 2
))
}
}
return(chords)
}
generate_tone_properties <- function() {
tone_properties <- tribble(
~tone, ~key,
"C", 1,
"C#", 2,
"Db", 2,
"D", 3,
"D#", 4,
"Eb", 4,
"E", 5,
"F", 6,
"F#", 7,
"Gb", 7,
"G", 8,
"G#", 9,
"Ab", 9,
"A", 10,
"A#", 11,
"Bb", 11,
"B", 12
) %>%
dplyr::mutate(
key_color = ifelse(nchar(tone) == 1, "white", "black")
)
tone_properties
}
#' Generate a custom-sized piano
#'
#' Generate a custom-sized piano, useful e.g. for synthesizers.
#'
#' @examples
#' d <- generate_keys_chords(10)
#' ggpiano(d, labels = TRUE)
#' d <- d %>%
#' dplyr::rowwise() %>%
#' dplyr::mutate(label = tones[[1]])
#' ggpiano(d, labels = TRUE)
#'
#' @importFrom tibble tribble
#' @importFrom dplyr rowwise mutate
#'
#' @export
generate_keys_chords <- function(number_white_keys = 14) {
d_keys <- generate_keys(number_white_keys)
if (FALSE) {
library(ggplot2)
ggplot(mapping = aes(xmin = xmin, xmax = xmax, ymin = ymin, ymax = ymax,
group = key,
fill = key_color)) +
geom_rect(data = d_keys %>% filter(key_color == "white"), color = "black", show.legend = FALSE) +
geom_rect(data = d_keys %>% filter(key_color == "black"), color = "black", show.legend = FALSE) +
scale_fill_manual(values = c("white" = "white", "black" = "black")) +
theme_void()
}
d_tones <- generate_tone_properties() %>%
dplyr::select(key, tone) %>%
dplyr::group_by(key) %>%
dplyr::summarise(tones = list(tone),
label = paste0(tone, collapse = "\n"))
keys_chords <- d_keys %>%
dplyr::mutate(join_key = ((key - 1) %% 12) + 1) %>%
dplyr::left_join(d_tones, by = c("join_key" = "key")) %>%
dplyr::select(-join_key) %>%
dplyr::mutate(label_x = (xmin+xmax)/2,
label_y = ymin + 0.1) %>%
dplyr::mutate(label_color = case_when(
key_color == "black" ~ "white",
TRUE ~ "black"))
# Put here instead of where it's generated to ease development process
class(keys_chords) <- c("pichor_key_koords", class(keys_chords))
if (FALSE) {
d <- generate_keys_chords(10)
d
ggplot(mapping = aes(xmin = xmin, xmax = xmax, ymin = ymin, ymax = ymax,
group = key,
fill = key_color)) +
geom_rect(data = d %>% filter(key_color == "white"), color = "black", show.legend = FALSE) +
geom_rect(data = d %>% filter(key_color == "black"), color = "black", show.legend = FALSE) +
scale_fill_manual(values = c("white" = "white", "black" = "black")) +
theme_void()
}
keys_chords
}
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536015178e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(8L, 3L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) | /CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615781643-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 329 | r | testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536015178e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(8L, 3L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) |
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/ReturnNucs.R
\name{ReturnNucs}
\alias{ReturnNucs}
\title{Return Ambiguity Codes}
\usage{
ReturnNucs(NucCode, forSNAPP = FALSE)
}
\arguments{
\item{NucCode}{An ambiguity code}
\item{forSNAPP}{Logical. If FALSE (default), then missing data characters will be returned as all possibilities. If TRUE, the return for missing data will be returned "-".}
}
\value{
Returns a character vector with base possibilities.
}
\description{
This function will take an IUPAC ambiguity code and return a set of bases
}
\examples{
ReturnNucs("N", forSNAPP=FALSE)
ReturnNucs("N", forSNAPP=TRUE)
ReturnNucs("K")
}
\seealso{
\link{ReadSNP} \link{WriteSNP} \link{ReturnAmbyCode}
}
| /man/ReturnNucs.Rd | no_license | QinHantao/phrynomics | R | false | false | 747 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/ReturnNucs.R
\name{ReturnNucs}
\alias{ReturnNucs}
\title{Return Ambiguity Codes}
\usage{
ReturnNucs(NucCode, forSNAPP = FALSE)
}
\arguments{
\item{NucCode}{An ambiguity code}
\item{forSNAPP}{Logical. If FALSE (default), then missing data characters will be returned as all possibilities. If TRUE, the return for missing data will be returned "-".}
}
\value{
Returns a character vector with base possibilities.
}
\description{
This function will take an IUPAC ambiguity code and return a set of bases
}
\examples{
ReturnNucs("N", forSNAPP=FALSE)
ReturnNucs("N", forSNAPP=TRUE)
ReturnNucs("K")
}
\seealso{
\link{ReadSNP} \link{WriteSNP} \link{ReturnAmbyCode}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.