content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
readJDX <- function (file = "", debug = FALSE){
# ChemoSpec, Bryan Hanson, November 2012
# This function works with the JCAMP-DX format
# The data block must be of the type XYDATA=(X++(Y..Y))
# Handles AFFN format for the data block and only with '+', '-' or ' ' as the separator
# Not extensively tested
# We need several things out of the JCAMP-DX file format:
# FIRSTX, LASTX, NPOINTS, XYDATA
if (!requireNamespace("gsubfn", quietly = TRUE)) {
stop("You need to install package gsubfn to use this option")
}
if (file == "") stop("No file specified")
jdx <- readLines(file)
# Check for compound JCAMP files, these will have more than one title
# The standard requires that title be in the first line; this is a check for parsing non-dx files
cmpd <- grep("^##TITLE=.*$", jdx)
if (cmpd > 1) stop("Compound data sets not supported")
if (!cmpd == 1) warning("This may not be a JCAMP-DX file")
# Check for NMR data which is not supported
ntup <- grepl("^##NTUPLES", jdx)
if (any(ntup)) stop("This looks like NMR data with real & imaginary parts, which is not supported")
if (debug) cat("\nFile = ", file, "\n")
# This next grep finds this string: ##XYDATA= (X++(Y..Y)) which is the start of the y data
spcstart <- grep("^##XYDATA=\\s*\\(X\\+\\+\\(Y\\.\\.Y\\)\\)$", jdx) + 1
if (spcstart == 1) stop("Couldn't find the data block start (see ?readJDX for supported formats)")
# And then the end of the y values
# spcend <- grep("^##END=[[:blank:]]*$", jdx) - 1
spcend <- grep("^##END=", jdx) - 1 # some files don't end as above
if (spcend == 0) stop("Couldn't find the data block end")
# Some checks
if (!length(spcstart) == 1L & length(spcend) == 1L) stop("Problem with delimiting data block")
if (!spcstart < spcend) stop("End of data block in the wrong place")
# Each line of the data block begins with a frequency
# These must be removed via a loop
# Need to drop everything up to the first separator
yValues <- jdx[spcstart:spcend] # a series of broken strings
# The first value is a frequency marker, need to remove it
# It may have a decimal or not, it may have a + or - or
# spaces or none of the above ahead of it
# Using sub gets only the first instance (compared to gsub)
# Aug 2013: added processing of ',' as well as '.' as the decimal point
for (n in 1:length(yValues)) {
# yValues[n] <- sub("\\s*(\\+|-)*[[:digit:]]+\\.*[[:digit:]]*(\\+|-|\\s)", "", yValues[n])
yValues[n] <- sub("\\s*(\\+|-)*[[:digit:]]+(\\.|,)?[[:digit:]]*\\s*", "", yValues[n])
}
yValues <- paste(yValues, collapse = " ") # concantenated into one long string
yValues <- gsub("\\+", " ", yValues)
# replace '+' separators with space (you can have + with no space around it)
yValues <- gsub("-", " -", yValues) # replace '-' separators with ' -' -- needed to preserve neg values
yValues <- sub("\\s*", "", yValues) # remove any leading spaces
yValues <- gsub(",", ".", yValues) # replace ',' with '.' -- needed for EU style files
yValues <- strsplit(yValues, split = "\\s+") # broken into a vector at each ' '
yValues <- as.numeric(unlist(yValues))
# Now get other the values & check a few things, fix up some values too
firstX <- grep("^##FIRSTX=", jdx)
if (firstX == 0) stop("Couldn't find FIRSTX")
firstX <- jdx[firstX]
firstX <- gsubfn::gsubfn("##FIRSTX=", replacement = "", firstX)
firstX <- sub(",", ".", firstX) # for EU style files
firstX <- as.numeric(firstX)
lastX <- grep("^##LASTX=", jdx)
if (lastX == 0) stop("Couldn't find LASTX")
lastX <- jdx[lastX]
lastX <- gsubfn::gsubfn("##LASTX=", replacement = "", lastX)
lastX <- sub(",", ".", lastX) # for EU style files
lastX <- as.numeric(lastX)
npoints <- grep("^##NPOINTS=", jdx)
if (npoints == 0) stop("Couldn't find NPOINTS")
npoints <- jdx[npoints]
npoints <- gsubfn::gsubfn("##NPOINTS=", replacement = "", npoints)
npoints <- as.integer(npoints)
if (debug) cat("\tNPOINTS = ", npoints, "\n")
if (debug) cat("\tActual no. data points found = ", length(yValues), "\n")
if (!npoints == length(yValues)) stop("NPOINTS and length of data block don't match")
if (debug) cat("\tfirstX = ", firstX, "\n")
if (debug) cat("\tlastX = ", lastX, "\n")
yFac <- grep("^##YFACTOR=", jdx)
if (yFac == 0) stop("Couldn't find YFACTOR")
yFac <- gsubfn::gsubfn("##YFACTOR=", replacement = "", jdx[yFac])
yFac <- sub(",", ".", yFac) # for EU style files
yFac <- as.numeric(yFac)
if (debug) cat("\tyFac = ", yFac, "\n")
yValues <- yValues*yFac
actDX <- (lastX-firstX)/(npoints - 1)
xValues <- seq(firstX, lastX, by = actDX)
res <- data.frame(x = xValues, y = yValues)
}
|
/ChemoSpec/R/readJDX.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 4,607
|
r
|
readJDX <- function (file = "", debug = FALSE){
# ChemoSpec, Bryan Hanson, November 2012
# This function works with the JCAMP-DX format
# The data block must be of the type XYDATA=(X++(Y..Y))
# Handles AFFN format for the data block and only with '+', '-' or ' ' as the separator
# Not extensively tested
# We need several things out of the JCAMP-DX file format:
# FIRSTX, LASTX, NPOINTS, XYDATA
if (!requireNamespace("gsubfn", quietly = TRUE)) {
stop("You need to install package gsubfn to use this option")
}
if (file == "") stop("No file specified")
jdx <- readLines(file)
# Check for compound JCAMP files, these will have more than one title
# The standard requires that title be in the first line; this is a check for parsing non-dx files
cmpd <- grep("^##TITLE=.*$", jdx)
if (cmpd > 1) stop("Compound data sets not supported")
if (!cmpd == 1) warning("This may not be a JCAMP-DX file")
# Check for NMR data which is not supported
ntup <- grepl("^##NTUPLES", jdx)
if (any(ntup)) stop("This looks like NMR data with real & imaginary parts, which is not supported")
if (debug) cat("\nFile = ", file, "\n")
# This next grep finds this string: ##XYDATA= (X++(Y..Y)) which is the start of the y data
spcstart <- grep("^##XYDATA=\\s*\\(X\\+\\+\\(Y\\.\\.Y\\)\\)$", jdx) + 1
if (spcstart == 1) stop("Couldn't find the data block start (see ?readJDX for supported formats)")
# And then the end of the y values
# spcend <- grep("^##END=[[:blank:]]*$", jdx) - 1
spcend <- grep("^##END=", jdx) - 1 # some files don't end as above
if (spcend == 0) stop("Couldn't find the data block end")
# Some checks
if (!length(spcstart) == 1L & length(spcend) == 1L) stop("Problem with delimiting data block")
if (!spcstart < spcend) stop("End of data block in the wrong place")
# Each line of the data block begins with a frequency
# These must be removed via a loop
# Need to drop everything up to the first separator
yValues <- jdx[spcstart:spcend] # a series of broken strings
# The first value is a frequency marker, need to remove it
# It may have a decimal or not, it may have a + or - or
# spaces or none of the above ahead of it
# Using sub gets only the first instance (compared to gsub)
# Aug 2013: added processing of ',' as well as '.' as the decimal point
for (n in 1:length(yValues)) {
# yValues[n] <- sub("\\s*(\\+|-)*[[:digit:]]+\\.*[[:digit:]]*(\\+|-|\\s)", "", yValues[n])
yValues[n] <- sub("\\s*(\\+|-)*[[:digit:]]+(\\.|,)?[[:digit:]]*\\s*", "", yValues[n])
}
yValues <- paste(yValues, collapse = " ") # concantenated into one long string
yValues <- gsub("\\+", " ", yValues)
# replace '+' separators with space (you can have + with no space around it)
yValues <- gsub("-", " -", yValues) # replace '-' separators with ' -' -- needed to preserve neg values
yValues <- sub("\\s*", "", yValues) # remove any leading spaces
yValues <- gsub(",", ".", yValues) # replace ',' with '.' -- needed for EU style files
yValues <- strsplit(yValues, split = "\\s+") # broken into a vector at each ' '
yValues <- as.numeric(unlist(yValues))
# Now get other the values & check a few things, fix up some values too
firstX <- grep("^##FIRSTX=", jdx)
if (firstX == 0) stop("Couldn't find FIRSTX")
firstX <- jdx[firstX]
firstX <- gsubfn::gsubfn("##FIRSTX=", replacement = "", firstX)
firstX <- sub(",", ".", firstX) # for EU style files
firstX <- as.numeric(firstX)
lastX <- grep("^##LASTX=", jdx)
if (lastX == 0) stop("Couldn't find LASTX")
lastX <- jdx[lastX]
lastX <- gsubfn::gsubfn("##LASTX=", replacement = "", lastX)
lastX <- sub(",", ".", lastX) # for EU style files
lastX <- as.numeric(lastX)
npoints <- grep("^##NPOINTS=", jdx)
if (npoints == 0) stop("Couldn't find NPOINTS")
npoints <- jdx[npoints]
npoints <- gsubfn::gsubfn("##NPOINTS=", replacement = "", npoints)
npoints <- as.integer(npoints)
if (debug) cat("\tNPOINTS = ", npoints, "\n")
if (debug) cat("\tActual no. data points found = ", length(yValues), "\n")
if (!npoints == length(yValues)) stop("NPOINTS and length of data block don't match")
if (debug) cat("\tfirstX = ", firstX, "\n")
if (debug) cat("\tlastX = ", lastX, "\n")
yFac <- grep("^##YFACTOR=", jdx)
if (yFac == 0) stop("Couldn't find YFACTOR")
yFac <- gsubfn::gsubfn("##YFACTOR=", replacement = "", jdx[yFac])
yFac <- sub(",", ".", yFac) # for EU style files
yFac <- as.numeric(yFac)
if (debug) cat("\tyFac = ", yFac, "\n")
yValues <- yValues*yFac
actDX <- (lastX-firstX)/(npoints - 1)
xValues <- seq(firstX, lastX, by = actDX)
res <- data.frame(x = xValues, y = yValues)
}
|
#' get_mz_by_monoisotopicmass
#'
#' Generate list of expected m/z for a specific monoisotopic mass
#'
#'
#' @param monoisotopicmass Monoisotopic mass. e.g.: 149.051
#' @param dbid Database or user-defined ID. e.g.: "M001"
#' @param name Metabolite name. e.g.: "Methionine"
#' @param formula Chemical formula. e.g.: "C5H11NO2S"
#' @param queryadductlist List of adducts to be used for searching. eg:
#' c("M+H","M+Na","M+K"), c("positive") for positive adducts, c("negative") for
#' negative adducts c("all") for all adducts
#' @param syssleep Wait time between queries to prevent overloading the KEGG
#' REST interface. e.g.: 0.1
#' @return Returns an R object with a list of expected m/z for the input
#' monoisotopic mass.
#' @author Karan Uppal
get_mz_by_monoisotopicmass <- function(monoisotopicmass,
dbid = NA, name = NA, formula = NA, queryadductlist = c("M+H"),
syssleep = 0.01, adduct_table = NA) {
cnames <- c("mz", "ID", "Name", "Formula", "MonoisotopicMass",
"Adduct", "AdductMass")
if (is.na(adduct_table) == TRUE) {
try(rm(adduct_table), silent = TRUE)
}
# if(is.na(adduct_table)==TRUE)
{
data(adduct_table)
adduct_table <- as.data.frame(adduct_table)
}
# adduct_table<-read.table('/Users/karanuppal/Documents/Emory/JonesLab/Projects/xMSannotator/adduct_table.txt',sep='\t',header=TRUE)
# adduct_table<-adduct_table[c(which(adduct_table[,6]=='S'),which(adduct_table[,6]=='Acetonitrile')),]
adduct_names <- as.character(adduct_table[, 1])
adductlist <- adduct_table[, 4]
mult_charge <- adduct_table[, 3]
num_mol <- adduct_table[, 2]
names(adductlist) <- as.character(adduct_names)
names(mult_charge) <- as.character(adduct_names)
names(num_mol) <- as.character(adduct_names)
alladducts <- adduct_names
if (queryadductlist[1] == "positive") {
queryadductlist <- adduct_names[which(adduct_table[,
5] == "positive")]
} else {
if (queryadductlist[1] == "negative") {
queryadductlist <- adduct_names[which(adduct_table[,
5] == "negative")]
} else {
if (queryadductlist[1] == "all") {
queryadductlist <- alladducts
} else {
if (length(which(queryadductlist %in% alladducts ==
FALSE)) > 0) {
errormsg <- paste("Adduct should be one of:",
sep = "")
for (i in alladducts) {
errormsg <- paste(errormsg, i, sep = " ; ")
}
stop(errormsg, "\n\nUsage: feat.batch.annotation.KEGG(dataA,max.mz.diff=10, queryadductlist=c(\"M+H\", \"M+Na\"), xMSannotator.outloc, numnodes=1)",
"\n\n OR feat.batch.annotation.KEGG(dataA,max.mz.diff=10, queryadductlist=c(\"positive\"), xMSannotator.outloc, numnodes=1)",
"\n\n OR feat.batch.annotation.KEGG(dataA,max.mz.diff=10, queryadductlist=c(\"negative\"), xMSannotator.outloc, numnodes=1)",
"\n\n OR feat.batch.annotation.KEGG(dataA,max.mz.diff=10, queryadductlist=c(\"all\"), xMSannotator.outloc, numnodes=1)")
}
}
}
}
map_res <- {
}
if (is.na(dbid) == TRUE) {
dbid = "-"
}
if (is.na(name) == TRUE) {
name = "-"
}
if (is.na(formula) == TRUE) {
formula = "-"
}
exact_mass <- monoisotopicmass
for (adnum in 1:length(queryadductlist)) {
adductname = queryadductlist[adnum]
adductmass = adductlist[as.character(adductname)]
adductcharge = mult_charge[as.character(adductname)]
adductnmol = num_mol[as.character(adductname)]
# mz=((adductnmol*exact_mass)+(adductmass))/(adductcharge))
mz = ((exact_mass * as.numeric(adductnmol)) + (as.numeric(adductmass)))/as.numeric(adductcharge)
# delta_ppm=(max.mz.diff)*(mz/1000000)
# min_mz=round((mz-delta_ppm),5)
# max_mz=round((mz+delta_ppm),5)
res = {
}
mzorig = round(exact_mass, 5)
# delta_ppm=round(delta_ppm,5)
syssleep1 <- (syssleep/5)
Sys.sleep(syssleep1)
cur_map_res <- cbind(mz, dbid, name, formula, adductname,
adductmass, mzorig)
cur_map_res <- as.data.frame(cbind(mz, as.character(dbid),
as.character(name), as.character(formula), mzorig,
adductname, adductmass))
# print(cur_map_res)
cur_map_res <- as.data.frame(cur_map_res)
map_res <- rbind(map_res, cur_map_res)
}
colnames(map_res) <- cnames
map_res <- unique(map_res)
map_res <- as.data.frame(map_res)
return(map_res)
}
|
/R/get_mz_by_monoisotopicmass.R
|
no_license
|
stolltho/xMSannotator
|
R
| false
| false
| 5,011
|
r
|
#' get_mz_by_monoisotopicmass
#'
#' Generate list of expected m/z for a specific monoisotopic mass
#'
#'
#' @param monoisotopicmass Monoisotopic mass. e.g.: 149.051
#' @param dbid Database or user-defined ID. e.g.: "M001"
#' @param name Metabolite name. e.g.: "Methionine"
#' @param formula Chemical formula. e.g.: "C5H11NO2S"
#' @param queryadductlist List of adducts to be used for searching. eg:
#' c("M+H","M+Na","M+K"), c("positive") for positive adducts, c("negative") for
#' negative adducts c("all") for all adducts
#' @param syssleep Wait time between queries to prevent overloading the KEGG
#' REST interface. e.g.: 0.1
#' @return Returns an R object with a list of expected m/z for the input
#' monoisotopic mass.
#' @author Karan Uppal
get_mz_by_monoisotopicmass <- function(monoisotopicmass,
dbid = NA, name = NA, formula = NA, queryadductlist = c("M+H"),
syssleep = 0.01, adduct_table = NA) {
cnames <- c("mz", "ID", "Name", "Formula", "MonoisotopicMass",
"Adduct", "AdductMass")
if (is.na(adduct_table) == TRUE) {
try(rm(adduct_table), silent = TRUE)
}
# if(is.na(adduct_table)==TRUE)
{
data(adduct_table)
adduct_table <- as.data.frame(adduct_table)
}
# adduct_table<-read.table('/Users/karanuppal/Documents/Emory/JonesLab/Projects/xMSannotator/adduct_table.txt',sep='\t',header=TRUE)
# adduct_table<-adduct_table[c(which(adduct_table[,6]=='S'),which(adduct_table[,6]=='Acetonitrile')),]
adduct_names <- as.character(adduct_table[, 1])
adductlist <- adduct_table[, 4]
mult_charge <- adduct_table[, 3]
num_mol <- adduct_table[, 2]
names(adductlist) <- as.character(adduct_names)
names(mult_charge) <- as.character(adduct_names)
names(num_mol) <- as.character(adduct_names)
alladducts <- adduct_names
if (queryadductlist[1] == "positive") {
queryadductlist <- adduct_names[which(adduct_table[,
5] == "positive")]
} else {
if (queryadductlist[1] == "negative") {
queryadductlist <- adduct_names[which(adduct_table[,
5] == "negative")]
} else {
if (queryadductlist[1] == "all") {
queryadductlist <- alladducts
} else {
if (length(which(queryadductlist %in% alladducts ==
FALSE)) > 0) {
errormsg <- paste("Adduct should be one of:",
sep = "")
for (i in alladducts) {
errormsg <- paste(errormsg, i, sep = " ; ")
}
stop(errormsg, "\n\nUsage: feat.batch.annotation.KEGG(dataA,max.mz.diff=10, queryadductlist=c(\"M+H\", \"M+Na\"), xMSannotator.outloc, numnodes=1)",
"\n\n OR feat.batch.annotation.KEGG(dataA,max.mz.diff=10, queryadductlist=c(\"positive\"), xMSannotator.outloc, numnodes=1)",
"\n\n OR feat.batch.annotation.KEGG(dataA,max.mz.diff=10, queryadductlist=c(\"negative\"), xMSannotator.outloc, numnodes=1)",
"\n\n OR feat.batch.annotation.KEGG(dataA,max.mz.diff=10, queryadductlist=c(\"all\"), xMSannotator.outloc, numnodes=1)")
}
}
}
}
map_res <- {
}
if (is.na(dbid) == TRUE) {
dbid = "-"
}
if (is.na(name) == TRUE) {
name = "-"
}
if (is.na(formula) == TRUE) {
formula = "-"
}
exact_mass <- monoisotopicmass
for (adnum in 1:length(queryadductlist)) {
adductname = queryadductlist[adnum]
adductmass = adductlist[as.character(adductname)]
adductcharge = mult_charge[as.character(adductname)]
adductnmol = num_mol[as.character(adductname)]
# mz=((adductnmol*exact_mass)+(adductmass))/(adductcharge))
mz = ((exact_mass * as.numeric(adductnmol)) + (as.numeric(adductmass)))/as.numeric(adductcharge)
# delta_ppm=(max.mz.diff)*(mz/1000000)
# min_mz=round((mz-delta_ppm),5)
# max_mz=round((mz+delta_ppm),5)
res = {
}
mzorig = round(exact_mass, 5)
# delta_ppm=round(delta_ppm,5)
syssleep1 <- (syssleep/5)
Sys.sleep(syssleep1)
cur_map_res <- cbind(mz, dbid, name, formula, adductname,
adductmass, mzorig)
cur_map_res <- as.data.frame(cbind(mz, as.character(dbid),
as.character(name), as.character(formula), mzorig,
adductname, adductmass))
# print(cur_map_res)
cur_map_res <- as.data.frame(cur_map_res)
map_res <- rbind(map_res, cur_map_res)
}
colnames(map_res) <- cnames
map_res <- unique(map_res)
map_res <- as.data.frame(map_res)
return(map_res)
}
|
#Kati Puukko
#Creating data for analysis 6
#3.12.2020
##Download packages
library(dplyr)
library(tidyr)
## 1. Load the data sets(BPRS and RATS) into R
BPRS <- read.table("https://raw.githubusercontent.com/KimmoVehkalahti/MABS/master/Examples/data/BPRS.txt", sep=" ", header=T)
RATS <- read.table("https://raw.githubusercontent.com/KimmoVehkalahti/MABS/master/Examples/data/rats.txt", sep='\t', header=T)
#check their variable names
names(BPRS)
names(RATS)
#view the data contents and structures
str(BPRS)
dim(BPRS) #40 observations, 11 variables
str(RATS)
dim(RATS) #16 observations, 13 variables
#create some brief summaries of the variables
summary(BPRS)
#In total 40 participants in the data,who have participated in the study during 8 weeks.
#The participants are divided to two treatment groups with 20 participants in both groups
summary(RATS)
#3 groups of rats that had different diets
#other variables describe different nutrition indicators (e.g.body weight)
#2. Convert the categorical variables of both data sets to factors.
BPRS$treatment <- factor(BPRS$treatment)
BPRS$subject <- factor(BPRS$subject)
RATS$ID <- factor(RATS$ID)
RATS$Group <- factor(RATS$Group)
#3. Convert the data sets to long form.
# Convert to long form and add a week variable to BPRS
BPRS_long <- BPRS %>% gather(key = weeks, value = bprs, -treatment, -subject)
BPRS_long <- BPRSL %>% mutate(week = as.integer(substr(weeks,5,5)))
#Convert to long form and add time variable to RATS
RATS_long <- RATS %>%
gather(key = WD, value = Weight, -ID, -Group) %>%
mutate(Time = as.integer(substr(WD,3,4)))
#4. Take a serious look at the data sets
glimpse(BPRS_long) #360 rows, 5 columns
glimpse(RATS_long)#176 rows, 5 columns
summary(BPRS_long)
summary(RATS_long)
str(BPRS_long) #treatment and subject are now factors
str(RATS_long) #ID and group are factors
names(BPRS_long) #week and time variables exists
names(RATS_long)
#In long format, observations are listed in separate rows one below another.
#In long format observations present repeated measures of the same person,
#Save the datasets
write.table(BPRS_long, file = "data/BPRSL.txt", sep="\t")
write.table(RATS_long, file = "data/RATSL.txt", sep="\t")
|
/data/meet_and_repeat.R
|
no_license
|
kpuukko/IODS-project
|
R
| false
| false
| 2,224
|
r
|
#Kati Puukko
#Creating data for analysis 6
#3.12.2020
##Download packages
library(dplyr)
library(tidyr)
## 1. Load the data sets(BPRS and RATS) into R
BPRS <- read.table("https://raw.githubusercontent.com/KimmoVehkalahti/MABS/master/Examples/data/BPRS.txt", sep=" ", header=T)
RATS <- read.table("https://raw.githubusercontent.com/KimmoVehkalahti/MABS/master/Examples/data/rats.txt", sep='\t', header=T)
#check their variable names
names(BPRS)
names(RATS)
#view the data contents and structures
str(BPRS)
dim(BPRS) #40 observations, 11 variables
str(RATS)
dim(RATS) #16 observations, 13 variables
#create some brief summaries of the variables
summary(BPRS)
#In total 40 participants in the data,who have participated in the study during 8 weeks.
#The participants are divided to two treatment groups with 20 participants in both groups
summary(RATS)
#3 groups of rats that had different diets
#other variables describe different nutrition indicators (e.g.body weight)
#2. Convert the categorical variables of both data sets to factors.
BPRS$treatment <- factor(BPRS$treatment)
BPRS$subject <- factor(BPRS$subject)
RATS$ID <- factor(RATS$ID)
RATS$Group <- factor(RATS$Group)
#3. Convert the data sets to long form.
# Convert to long form and add a week variable to BPRS
BPRS_long <- BPRS %>% gather(key = weeks, value = bprs, -treatment, -subject)
BPRS_long <- BPRSL %>% mutate(week = as.integer(substr(weeks,5,5)))
#Convert to long form and add time variable to RATS
RATS_long <- RATS %>%
gather(key = WD, value = Weight, -ID, -Group) %>%
mutate(Time = as.integer(substr(WD,3,4)))
#4. Take a serious look at the data sets
glimpse(BPRS_long) #360 rows, 5 columns
glimpse(RATS_long)#176 rows, 5 columns
summary(BPRS_long)
summary(RATS_long)
str(BPRS_long) #treatment and subject are now factors
str(RATS_long) #ID and group are factors
names(BPRS_long) #week and time variables exists
names(RATS_long)
#In long format, observations are listed in separate rows one below another.
#In long format observations present repeated measures of the same person,
#Save the datasets
write.table(BPRS_long, file = "data/BPRSL.txt", sep="\t")
write.table(RATS_long, file = "data/RATSL.txt", sep="\t")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/orfRelativePos.R
\name{orfRelativePos}
\alias{orfRelativePos}
\title{Relative position of the start and stop codon along the transcript}
\usage{
orfRelativePos(cdsTransc, exonGRanges)
}
\arguments{
\item{cdsTransc}{a GRangesList.
It contains the CDS coordinates grouped by transcript.}
\item{exonGRanges}{a GRangesList.
It contains the exon coordinates grouped by transcript.}
}
\value{
a list.
A list of relative positions of the start and end of ORFs.
}
\description{
Relative position of the start and stop codon along the transcript
}
\examples{
#make a txdb object containing the annotations for the specified species.
#In this case hg19.
txdb <- TxDb.Hsapiens.UCSC.hg19.knownGene::TxDb.Hsapiens.UCSC.hg19.knownGene
#get all CDSs by transcript
cds <- GenomicFeatures::cdsBy(txdb, by="tx", use.names=TRUE)
#get all exons by transcript
exonGRanges <- GenomicFeatures::exonsBy(txdb, by="tx", use.names=TRUE)
#retrieve the positions of start and end codons relative to the transcript
cdsPosTransc <- orfRelativePos(cds, exonGRanges)
}
|
/man/orfRelativePos.Rd
|
no_license
|
alenzhao/RiboProfiling
|
R
| false
| true
| 1,119
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/orfRelativePos.R
\name{orfRelativePos}
\alias{orfRelativePos}
\title{Relative position of the start and stop codon along the transcript}
\usage{
orfRelativePos(cdsTransc, exonGRanges)
}
\arguments{
\item{cdsTransc}{a GRangesList.
It contains the CDS coordinates grouped by transcript.}
\item{exonGRanges}{a GRangesList.
It contains the exon coordinates grouped by transcript.}
}
\value{
a list.
A list of relative positions of the start and end of ORFs.
}
\description{
Relative position of the start and stop codon along the transcript
}
\examples{
#make a txdb object containing the annotations for the specified species.
#In this case hg19.
txdb <- TxDb.Hsapiens.UCSC.hg19.knownGene::TxDb.Hsapiens.UCSC.hg19.knownGene
#get all CDSs by transcript
cds <- GenomicFeatures::cdsBy(txdb, by="tx", use.names=TRUE)
#get all exons by transcript
exonGRanges <- GenomicFeatures::exonsBy(txdb, by="tx", use.names=TRUE)
#retrieve the positions of start and end codons relative to the transcript
cdsPosTransc <- orfRelativePos(cds, exonGRanges)
}
|
#! /usr/bin/env Rscript
args <- commandArgs(TRUE)
z <- read.csv(args[1], header=FALSE)
summary(z)
i = 2
count = 0
acc <- z[,2]
while(i < length(acc)){
delta = acc[i] - acc[i-1]
if(delta >=1.5){
count = count + 1
i = i + 2
bump <- list(time=z[,1][i], weight=delta, lat=z[,3][i], lon=z[,4][i])
write.table(bump, "bumpdata.csv", append=TRUE, sep=",", row.names=FALSE, col.names=FALSE)
} else {
i = i + 1
}
}
count
|
/web/data/zprocess.R
|
no_license
|
angineering/TranSeNS
|
R
| false
| false
| 441
|
r
|
#! /usr/bin/env Rscript
args <- commandArgs(TRUE)
z <- read.csv(args[1], header=FALSE)
summary(z)
i = 2
count = 0
acc <- z[,2]
while(i < length(acc)){
delta = acc[i] - acc[i-1]
if(delta >=1.5){
count = count + 1
i = i + 2
bump <- list(time=z[,1][i], weight=delta, lat=z[,3][i], lon=z[,4][i])
write.table(bump, "bumpdata.csv", append=TRUE, sep=",", row.names=FALSE, col.names=FALSE)
} else {
i = i + 1
}
}
count
|
testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78056859782025e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result)
|
/DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615836498-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false
| false
| 2,048
|
r
|
testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78056859782025e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result)
|
#' describe missing
#'
#' get some simple stats on missingness for a set of variables
#' @param data the data.frame or similar
#' @param v if \code{NULL} all variables are included, else a character vector of the
#' names of wanted variables or a formula (if data is null it will look for
#' the variables in the global workspace, but they need to be of the same length)
#' @param guide a guide (by \code{dtable_guide}), either to select variables OR
#' to provide unit information (the type-info will not be used)
#' @param glist an index list or name of grouping variable
#' @param info type of display
#' @param only.with only show those variables with some missing
#' @export
#' @return a data.frame with
#' \itemize{
#' \item{variable} name of variable
#' \item{count} number of \code{NA} in that variable
#' \item{percent} percent \code{NA} in that variable
#' }
dtable_missing <- function(data = NULL, v = NULL, guide = NULL, glist = NULL,
info = "latex", only.with = TRUE){
df <- get_variables(x = v, data = data)
N <- nrow(df)
m <- ncol(df)
if(N == 0) stop("empty data set")
if(is.null(guide)) guide <- dtable_guide(df)
guide$type[!guide$type %in% c("unit id.", "row id.")] <- "real"
no_miss <- guide$label[!guide$has_missing]
if(only.with){
guide <- subset(guide, guide$has_missing)
if(nrow(guide) == 0) {
message("there are no missing")
invisible(NULL)
}
}
a_flist <- flist(c("Count" = "d_missing", "Percent" = "d_missing.perc"))
dt <- dtable(data = data, type = "real", desc = TRUE, guide = guide,
desc.flist = a_flist, comp = FALSE, glist = glist)
if(only.with & length(no_miss)>0){
a <- if(info == "latex"){
paste0(
paste0("\\texttt{",
gsub("_", "\\_", no_miss, fixed = TRUE),
"}"),
collapse = ", ")
} else {
paste0(no_miss, collapse = ", ")
}
attr(dt, "info") <- c(attr(dt, "info"),
paste0("Variables examined but found to be complete: ", a, "."))
}
dt
}
# - # create data frame from formula or names of variables
get_variables <- function(x = NULL, data = NULL){
if(is.null(x)){
if(is.null(data)) stop("need 'x' or 'data'")
return(data)
} else if(class(x) == "formula"){
vars <- all.vars(x)
} else if(class(x) == "character"){
vars <- x
} else {
stop("what weird beast is 'x'?")
}
if(is.null(data)){
for(k in seq_along(vars)){
tmp <- get(vars[k], envir = .GlobalEnv)
if(k == 1){
R <- data.frame(wot = tmp)
names(R) <- vars[k]
} else {
tryCatch({R[[vars[k]]] <- tmp},
error = function(e) stop("computer says no"))
}
}
R
} else {
subset(data, TRUE, select = vars)
}
}
|
/R/dtable-missing.R
|
no_license
|
renlund/descripteur
|
R
| false
| false
| 3,057
|
r
|
#' describe missing
#'
#' get some simple stats on missingness for a set of variables
#' @param data the data.frame or similar
#' @param v if \code{NULL} all variables are included, else a character vector of the
#' names of wanted variables or a formula (if data is null it will look for
#' the variables in the global workspace, but they need to be of the same length)
#' @param guide a guide (by \code{dtable_guide}), either to select variables OR
#' to provide unit information (the type-info will not be used)
#' @param glist an index list or name of grouping variable
#' @param info type of display
#' @param only.with only show those variables with some missing
#' @export
#' @return a data.frame with
#' \itemize{
#' \item{variable} name of variable
#' \item{count} number of \code{NA} in that variable
#' \item{percent} percent \code{NA} in that variable
#' }
dtable_missing <- function(data = NULL, v = NULL, guide = NULL, glist = NULL,
info = "latex", only.with = TRUE){
df <- get_variables(x = v, data = data)
N <- nrow(df)
m <- ncol(df)
if(N == 0) stop("empty data set")
if(is.null(guide)) guide <- dtable_guide(df)
guide$type[!guide$type %in% c("unit id.", "row id.")] <- "real"
no_miss <- guide$label[!guide$has_missing]
if(only.with){
guide <- subset(guide, guide$has_missing)
if(nrow(guide) == 0) {
message("there are no missing")
invisible(NULL)
}
}
a_flist <- flist(c("Count" = "d_missing", "Percent" = "d_missing.perc"))
dt <- dtable(data = data, type = "real", desc = TRUE, guide = guide,
desc.flist = a_flist, comp = FALSE, glist = glist)
if(only.with & length(no_miss)>0){
a <- if(info == "latex"){
paste0(
paste0("\\texttt{",
gsub("_", "\\_", no_miss, fixed = TRUE),
"}"),
collapse = ", ")
} else {
paste0(no_miss, collapse = ", ")
}
attr(dt, "info") <- c(attr(dt, "info"),
paste0("Variables examined but found to be complete: ", a, "."))
}
dt
}
# - # create data frame from formula or names of variables
get_variables <- function(x = NULL, data = NULL){
if(is.null(x)){
if(is.null(data)) stop("need 'x' or 'data'")
return(data)
} else if(class(x) == "formula"){
vars <- all.vars(x)
} else if(class(x) == "character"){
vars <- x
} else {
stop("what weird beast is 'x'?")
}
if(is.null(data)){
for(k in seq_along(vars)){
tmp <- get(vars[k], envir = .GlobalEnv)
if(k == 1){
R <- data.frame(wot = tmp)
names(R) <- vars[k]
} else {
tryCatch({R[[vars[k]]] <- tmp},
error = function(e) stop("computer says no"))
}
}
R
} else {
subset(data, TRUE, select = vars)
}
}
|
##Downloading and Unzipping dataset after check if it already exists in working directory
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
filename <- "eda_project1files.zip"
if(!file.exists(filename)){
download.file(url, filename, method = "curl")
}
if(!file.exists("household_power_consumption.txt")){
unzip(filename)
}
#Reading in data
data <- read.table("household_power_consumption.txt", sep=";", header=T, dec=".", stringsAsFactors = FALSE)
#Creating datetime variable
data$datetime <- paste(data$Date, data$Time)
data$datetime <- as.POSIXct(strptime(data$datetime, format = "%d/%m/%Y %H:%M:%S"))
#Subsetting to wanted dates
data <- subset(data, datetime >= "2007-02-01 00:00:00" & datetime < "2007-02-02 24:00:00")
#Converting number variables from character to numeric
data[,3:9] <- lapply(data[,3:9], as.numeric)
#Plot 3
png('plot3.png')
with(data, plot(datetime, Sub_metering_1, type = "l", xlab = "", ylab = "Energy Sub Metering"))
with(data, lines(datetime, Sub_metering_2, type="l", col = "red"))
with(data, lines(datetime, Sub_metering_3, type="l", col = "blue"))
legend("topright", c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), col = c("black", "red", "blue"), lwd = 1, cex = 1.15)
dev.off()
|
/plot3.R
|
no_license
|
sethltaylor/ExData_Plotting1
|
R
| false
| false
| 1,310
|
r
|
##Downloading and Unzipping dataset after check if it already exists in working directory
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
filename <- "eda_project1files.zip"
if(!file.exists(filename)){
download.file(url, filename, method = "curl")
}
if(!file.exists("household_power_consumption.txt")){
unzip(filename)
}
#Reading in data
data <- read.table("household_power_consumption.txt", sep=";", header=T, dec=".", stringsAsFactors = FALSE)
#Creating datetime variable
data$datetime <- paste(data$Date, data$Time)
data$datetime <- as.POSIXct(strptime(data$datetime, format = "%d/%m/%Y %H:%M:%S"))
#Subsetting to wanted dates
data <- subset(data, datetime >= "2007-02-01 00:00:00" & datetime < "2007-02-02 24:00:00")
#Converting number variables from character to numeric
data[,3:9] <- lapply(data[,3:9], as.numeric)
#Plot 3
png('plot3.png')
with(data, plot(datetime, Sub_metering_1, type = "l", xlab = "", ylab = "Energy Sub Metering"))
with(data, lines(datetime, Sub_metering_2, type="l", col = "red"))
with(data, lines(datetime, Sub_metering_3, type="l", col = "blue"))
legend("topright", c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), col = c("black", "red", "blue"), lwd = 1, cex = 1.15)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/logger_Ori_MLog.R
\name{readLogger_Ori_BasicEx1}
\alias{readLogger_Ori_BasicEx1}
\title{Read Logger File from Ori BasicEx1}
\usage{
readLogger_Ori_BasicEx1(
filepath,
infotype = validInfoTypes(),
blockbegin = "ORI BasicEx1",
warn = TRUE,
sep = ";",
dec = ",",
colnameDate = "Datum",
colnameTime = "Uhrzeit",
dateformat = .defaultTimeFormat("v5"),
timeformat = .defaultTimeFormat("v1")
)
}
\arguments{
\item{filepath}{full path to file generated by automatic sampler}
\item{infotype}{one or more of the values returned by \code{validInfoTypes}}
\item{blockbegin}{identification of "block begins"; Default: "ORI BasicEx1"}
\item{warn}{if TRUE, warnings are generated if a block does not contain
"Probe"}
\item{sep}{column separator}
\item{dec}{decimal character}
\item{colnameDate}{name of date column}
\item{colnameTime}{name of time column}
\item{dateformat}{date format string}
\item{timeformat}{time format string}
}
\description{
Read Logger File from Ori BasicEx1
}
\examples{
\dontrun{
# set path to example file (contained in this package)
(file <- extdataFile("Ori/example_Ori_BasicEx1.csv"))
# read the "actions" from the file
readLogger_Ori_BasicEx1(file, infotype = "actions")
# read the sample times from the file
readLogger_Ori_BasicEx1(
file, infotype = "times", blockbegin = "ORI BasicEx1 TU Berlin"
)
# read both at the same time
x <- readLogger_Ori_BasicEx1(
file, blockbegin = "ORI BasicEx1 TU Berlin"
)
# examine the list structure of the result
str(x)
}
}
\references{
\url{http://www.origmbh.de/fileadmin/user_upload/pdf/basic_ex_1_mobil/ORI_Basic_Ex1_mobil_de.pdf}
}
|
/man/readLogger_Ori_BasicEx1.Rd
|
permissive
|
KWB-R/kwb.logger
|
R
| false
| true
| 1,712
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/logger_Ori_MLog.R
\name{readLogger_Ori_BasicEx1}
\alias{readLogger_Ori_BasicEx1}
\title{Read Logger File from Ori BasicEx1}
\usage{
readLogger_Ori_BasicEx1(
filepath,
infotype = validInfoTypes(),
blockbegin = "ORI BasicEx1",
warn = TRUE,
sep = ";",
dec = ",",
colnameDate = "Datum",
colnameTime = "Uhrzeit",
dateformat = .defaultTimeFormat("v5"),
timeformat = .defaultTimeFormat("v1")
)
}
\arguments{
\item{filepath}{full path to file generated by automatic sampler}
\item{infotype}{one or more of the values returned by \code{validInfoTypes}}
\item{blockbegin}{identification of "block begins"; Default: "ORI BasicEx1"}
\item{warn}{if TRUE, warnings are generated if a block does not contain
"Probe"}
\item{sep}{column separator}
\item{dec}{decimal character}
\item{colnameDate}{name of date column}
\item{colnameTime}{name of time column}
\item{dateformat}{date format string}
\item{timeformat}{time format string}
}
\description{
Read Logger File from Ori BasicEx1
}
\examples{
\dontrun{
# set path to example file (contained in this package)
(file <- extdataFile("Ori/example_Ori_BasicEx1.csv"))
# read the "actions" from the file
readLogger_Ori_BasicEx1(file, infotype = "actions")
# read the sample times from the file
readLogger_Ori_BasicEx1(
file, infotype = "times", blockbegin = "ORI BasicEx1 TU Berlin"
)
# read both at the same time
x <- readLogger_Ori_BasicEx1(
file, blockbegin = "ORI BasicEx1 TU Berlin"
)
# examine the list structure of the result
str(x)
}
}
\references{
\url{http://www.origmbh.de/fileadmin/user_upload/pdf/basic_ex_1_mobil/ORI_Basic_Ex1_mobil_de.pdf}
}
|
jMat <- outer(as.character(1:8), as.character(1:13), function(x, y) {
paste0("x", x,",", y)
})
jMat
|
/R/mekametwik_widaindek.R
|
no_license
|
d8aninja/code
|
R
| false
| false
| 104
|
r
|
jMat <- outer(as.character(1:8), as.character(1:13), function(x, y) {
paste0("x", x,",", y)
})
jMat
|
## Put comments here that give an overall description of what your
## functions do
## A matrix object that stores its value and is able to cache it's inverse
makeCacheMatrix <- function(mat = matrix()) {
i <- NULL
# A function that sets a new matrix object and resets the cache
set <- function(y) {
mat <<- y
i <<- NULL
}
# A function to retrieve the matrix from the object
get <- function() mat
# A function to set the inverse value in the cache
setinv <- function(inverse) i <<- inverse
# A function to return the inverse from the cache
getinv <- function() i
# An internal object to hold the defined functions
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## Returns the inverse of a makeCacheMatrix object
cacheSolve <- function(x, ...) {
i <- x$getinv()
#if the inverse is cached, return that value
if(!is.null(i)) {
message("getting cached data")
return(i)
}
#if not, calculate, cache, and return the inverse
data <- x$get()
i <- solve(data, ...)
x$setinv(i)
i
}
|
/cachematrix.R
|
no_license
|
matthewphillips/ProgrammingAssignment2
|
R
| false
| false
| 1,123
|
r
|
## Put comments here that give an overall description of what your
## functions do
## A matrix object that stores its value and is able to cache it's inverse
makeCacheMatrix <- function(mat = matrix()) {
i <- NULL
# A function that sets a new matrix object and resets the cache
set <- function(y) {
mat <<- y
i <<- NULL
}
# A function to retrieve the matrix from the object
get <- function() mat
# A function to set the inverse value in the cache
setinv <- function(inverse) i <<- inverse
# A function to return the inverse from the cache
getinv <- function() i
# An internal object to hold the defined functions
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## Returns the inverse of a makeCacheMatrix object
cacheSolve <- function(x, ...) {
i <- x$getinv()
#if the inverse is cached, return that value
if(!is.null(i)) {
message("getting cached data")
return(i)
}
#if not, calculate, cache, and return the inverse
data <- x$get()
i <- solve(data, ...)
x$setinv(i)
i
}
|
# Processing of near_analysis table
#
# Input: reburns_x2_near_analysis_all_dissolved.csv
# firePerimeters_1940_2016_gt1000ac_notPrescribed_table.csv
# Output:
# 1 Dec 2017 JJWalker
#
library(dplyr)
# fires
fires <- read.csv("data/x_original.csv", header = T)
# input
x <- read.csv("data/reburns_x2_near_analysis_all_dissolved.csv", header = T)
# > head(x)
# FID pointid NEAR_DIST NEAR_X NEAR_Y NEAR_ANGLE parentid1 parentid2
# 1 360950 1 0.1954738 89856.51 1573780 96.84267 2 877
# 2 360951 2 0.1929139 90106.57 1573780 79.21534 2 877
# 3 360952 3 92.7670997 89062.11 1573611 118.61040 2 877
# 4 360953 4 188.8429913 89334.03 1573718 96.84267 2 877
# 5 360954 5 218.6288633 89580.48 1573747 96.84267 2 877
# 6 360955 6 248.4147354 89826.94 1573777 96.84267 2 877
x$parentid1 <- as.factor(x$parentid1)
x$parentid2 <- as.factor(x$parentid2)
# Get the maximum distance of penetration for each fire ID pair
x.max <- x %>%
group_by(parentid1, parentid2) %>%
summarize(max_dist = max(NEAR_DIST))
# > head(x.max)
# Source: local data frame [6 x 3]
# Groups: parentid1 [3]
#
# parentid1 parentid2 max_dist
# <fctr> <fctr> <dbl>
# 1 2 877 3328.921
# 2 2 1817 2582.504
# 3 3 249 2074.284
# 4 6 1909 3398.801
# 5 6 2030 1130.461
# 6 6 2068 2274.626
#
merge1 <- merge(x.max, fires.sub, by.x = "parentid1", by.y = "parentid")
head(merge1)
colnames(merge1) <- c("parentid1", "parentid2", "max_dist", "year1", "name1", "doy1", "ac1", "ecoreg1")
merge2 <- merge(merge1, fires.sub, by.x = "parentid2", by.y = "parentid")
colnames(merge2) <- c("parentid2", "parentid1", "max_dist", "year1", "name1", "doy1", "ac1",
"ecoreg1", "year2", "name2", "doy2","ac2", "ecoreg2")
# Take out the smaller ones--could be due to poor border delineation
merge2.gt100m <- subset(merge2, max_dist > 100)
## max_dist and ac1 are correlated
test <- lm(max_dist ~ int_yr + ac1, data = subset(merge2.gt100m, ecoreg1 == "boreal"))
summary(test)
#
# Call:
# lm(formula = max_dist ~ int_yr + ac1, data = subset(merge2.gt100m,
# ecoreg1 == "boreal"))
#
# Residuals:
# Min 1Q Median 3Q Max
# -9864.7 -1218.0 -540.1 628.8 21123.8
#
# Coefficients:
# Estimate Std. Error t value Pr(>|t|)
# (Intercept) 1.136e+03 1.128e+02 10.075 < 2e-16 ***
# int_yr 2.530e+01 3.858e+00 6.557 7.61e-11 ***
# ac1 5.788e-03 3.358e-04 17.237 < 2e-16 ***
# ---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
#
# Residual standard error: 2462 on 1442 degrees of freedom
# Multiple R-squared: 0.2205, Adjusted R-squared: 0.2194
# F-statistic: 203.9 on 2 and 1442 DF, p-value: < 2.2e-16
|
/R/distance_of_burn_intrusion.R
|
no_license
|
jesswalker/ak_fire
|
R
| false
| false
| 2,990
|
r
|
# Processing of near_analysis table
#
# Input: reburns_x2_near_analysis_all_dissolved.csv
# firePerimeters_1940_2016_gt1000ac_notPrescribed_table.csv
# Output:
# 1 Dec 2017 JJWalker
#
library(dplyr)
# fires
fires <- read.csv("data/x_original.csv", header = T)
# input
x <- read.csv("data/reburns_x2_near_analysis_all_dissolved.csv", header = T)
# > head(x)
# FID pointid NEAR_DIST NEAR_X NEAR_Y NEAR_ANGLE parentid1 parentid2
# 1 360950 1 0.1954738 89856.51 1573780 96.84267 2 877
# 2 360951 2 0.1929139 90106.57 1573780 79.21534 2 877
# 3 360952 3 92.7670997 89062.11 1573611 118.61040 2 877
# 4 360953 4 188.8429913 89334.03 1573718 96.84267 2 877
# 5 360954 5 218.6288633 89580.48 1573747 96.84267 2 877
# 6 360955 6 248.4147354 89826.94 1573777 96.84267 2 877
x$parentid1 <- as.factor(x$parentid1)
x$parentid2 <- as.factor(x$parentid2)
# Get the maximum distance of penetration for each fire ID pair
x.max <- x %>%
group_by(parentid1, parentid2) %>%
summarize(max_dist = max(NEAR_DIST))
# > head(x.max)
# Source: local data frame [6 x 3]
# Groups: parentid1 [3]
#
# parentid1 parentid2 max_dist
# <fctr> <fctr> <dbl>
# 1 2 877 3328.921
# 2 2 1817 2582.504
# 3 3 249 2074.284
# 4 6 1909 3398.801
# 5 6 2030 1130.461
# 6 6 2068 2274.626
#
merge1 <- merge(x.max, fires.sub, by.x = "parentid1", by.y = "parentid")
head(merge1)
colnames(merge1) <- c("parentid1", "parentid2", "max_dist", "year1", "name1", "doy1", "ac1", "ecoreg1")
merge2 <- merge(merge1, fires.sub, by.x = "parentid2", by.y = "parentid")
colnames(merge2) <- c("parentid2", "parentid1", "max_dist", "year1", "name1", "doy1", "ac1",
"ecoreg1", "year2", "name2", "doy2","ac2", "ecoreg2")
# Take out the smaller ones--could be due to poor border delineation
merge2.gt100m <- subset(merge2, max_dist > 100)
## max_dist and ac1 are correlated
test <- lm(max_dist ~ int_yr + ac1, data = subset(merge2.gt100m, ecoreg1 == "boreal"))
summary(test)
#
# Call:
# lm(formula = max_dist ~ int_yr + ac1, data = subset(merge2.gt100m,
# ecoreg1 == "boreal"))
#
# Residuals:
# Min 1Q Median 3Q Max
# -9864.7 -1218.0 -540.1 628.8 21123.8
#
# Coefficients:
# Estimate Std. Error t value Pr(>|t|)
# (Intercept) 1.136e+03 1.128e+02 10.075 < 2e-16 ***
# int_yr 2.530e+01 3.858e+00 6.557 7.61e-11 ***
# ac1 5.788e-03 3.358e-04 17.237 < 2e-16 ***
# ---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
#
# Residual standard error: 2462 on 1442 degrees of freedom
# Multiple R-squared: 0.2205, Adjusted R-squared: 0.2194
# F-statistic: 203.9 on 2 and 1442 DF, p-value: < 2.2e-16
|
install.packages("DTRreg")
library(DTRreg)
source("choose_alpha.R")
expit <- function(x) exp(x)/(1+exp(x))
set.seed(543) # each scenario used a different seed. See README file.
# gamma parameters following Chakraborty et al (2013) to control for irregularity in the generated data
g <- matrix(NA, nrow = 9, ncol = 7)
g[1,] <- c(0,0,0,0,0,0,0)
g[2,] <- c(0,0,0,0,0.01,0,0)
g[3,] <- c(0,0,-0.5,0,0.5,0,-0.5)
g[4,] <- c(0,0,-0.5,0,0.99,0,-0.98)
g[5,] <- c(0,0,-0.5,0,1,0.5,-0.5)
g[6,] <- c(0,0,-0.5,0,0.25,0.5,0.5)
g[7,] <- c(0,0,-0.25,0,0.75,0.5,0.5)
g[8,] <- c(0,0,0,0,1,0,-1)
g[9,] <- c(0,0,0,0,0.25,0,-0.24)
# delta parameters following Chakraborty et al (2013) to control for irregularity in the generated data
d <- matrix(NA, nrow = 9, ncol = 2)
d[1,] <- c(0.5,0.5)
d[2,] <- c(0.5,0.5)
d[3,] <- c(0.5,0.5)
d[4,] <- c(0.5,0.5)
d[5,] <- c(1,0)
d[6,] <- c(0.1,0.1)
d[7,] <- c(0.1,0.1)
d[8,] <- c(0,0)
d[9,] <- c(0,0)
######################### m-out-of-n bootstrap : adpative alpha #############################
sc <- seq(1,9)
# number of simulated dataset
Nsimul <- 500
# number of boostrap samples
Nboot <- 1000
# sample size
n <- 300
# model specification
blip.model <- list(~ O1, ~ O2 + A1)
treat.model <- list(A1~1, A2~1)
tf.model <- list(~ O1, ~ O1 + A1 + O1*A1)
# allocate space and predefined quantities
estm <- vector(mode = "list", length = 1)
# specify scenario with i between 1 and 9. Here scenario 1
i <- 1
# reset estimates to NA for new scenario
estm[[1]] <- matrix(NA, nrow = Nsimul , ncol = Nboot + 4)
for(s in 1:Nsimul) # loop over number of simulations
{
# treatment A1, A2: P(Aj = 1) = P(Aj = 0) = 0.5
A1 <- rbinom(n, size = 1, prob = 0.5)
A2 <- rbinom(n, size = 1, prob = 0.5)
# covariates O1, O2: coded as -1, 1, where O2 depends on A1, O1 and (delta_1,delta_2)
O1 <- 2*rbinom(n, size = 1, prob = 0.5) - 1
O2 <- 2*rbinom(n, size = 1, prob = expit(d[sc[i],1]*O1 + d[sc[i],2]*(2*A1-1))) - 1
# generated outcome Y2 (Y1 set to 0), using parameters (gamma_1,...,gamma_7)
Y2 <- g[sc[i],1] + g[sc[i],2]*O1 + g[sc[i],3]*A1 + g[sc[i],4]*O1*A1 + g[sc[i],5]*A2 + g[sc[i],6]*O2*A2 + g[sc[i],7]*A1*A2 + rnorm(n)
# generated dataset
complete <- cbind(A1, A2, O1, O2, Y2)
# fit dWOLS to the generated dataset, using all n=300 observations
proba <- list(as.vector(rep(0.5,n)))
res.n <- try(DTRreg(outcome = Y2, blip.mod = blip.model, treat.mod = treat.model, tf.mod = tf.model, treat.mod.man = rep(proba,2), method = "dwols", var.estim = "bootstrap", data = as.data.frame(complete)))
es <- try(extract(res.n))
# save estimates using all observations in the first column
estm[[1]][s,1] <- es
# estimate of nonregularity
phat <- res.n$nonreg[2]
estm[[1]][s,2] <- phat
# choice of alpha
alpha <- dbalpha(data = complete, psin = es, blip.model = blip.model, treat.model = treat.model, tf.model = tf.model)
estm[[1]][s,3] <- alpha
print(c(s,alpha))
# resampling size
m <- n^((1 + alpha*(1-phat))/(1 + alpha))
estm[[1]][s,4] <- m
# probability treatment with m
proba <- list(as.vector(rep(0.5,floor(m))))
# bootstrap resampling + estimate
for(b in 1:Nboot) # loop over number of bootstrap samples
{
# resample with replacement
index <- sample(1:n, floor(m), replace = TRUE)
boot <- complete[index,]
# fit the model to bootstrap sample
res <- try(DTRreg(outcome = Y2, blip.mod = blip.model, treat.mod = treat.model, tf.mod = tf.model, treat.mod.man = rep(proba,2), method = "dwols", data = as.data.frame(boot)))
esb <- try(extract(res))
# save bootstrap estimates i in the (i+1) column
estm[[1]][s, b + 4] <- esb
}
}
# linux command to save results of the simulations in a CSV file
colnames(estm[[1]])[1:4] <- c("psi10","phat","alpha","m")
name1 <- paste("mnad_psi10_scenario", paste(sc[i]),"_2.csv",sep ="")
write.csv(estm[[1]], file = name1, row.names = FALSE)
|
/Simulations/R code/mnad_scenarioi_2.R
|
no_license
|
gabriellesimoneau/Rcode-Biostatistics
|
R
| false
| false
| 4,033
|
r
|
install.packages("DTRreg")
library(DTRreg)
source("choose_alpha.R")
expit <- function(x) exp(x)/(1+exp(x))
set.seed(543) # each scenario used a different seed. See README file.
# gamma parameters following Chakraborty et al (2013) to control for irregularity in the generated data
g <- matrix(NA, nrow = 9, ncol = 7)
g[1,] <- c(0,0,0,0,0,0,0)
g[2,] <- c(0,0,0,0,0.01,0,0)
g[3,] <- c(0,0,-0.5,0,0.5,0,-0.5)
g[4,] <- c(0,0,-0.5,0,0.99,0,-0.98)
g[5,] <- c(0,0,-0.5,0,1,0.5,-0.5)
g[6,] <- c(0,0,-0.5,0,0.25,0.5,0.5)
g[7,] <- c(0,0,-0.25,0,0.75,0.5,0.5)
g[8,] <- c(0,0,0,0,1,0,-1)
g[9,] <- c(0,0,0,0,0.25,0,-0.24)
# delta parameters following Chakraborty et al (2013) to control for irregularity in the generated data
d <- matrix(NA, nrow = 9, ncol = 2)
d[1,] <- c(0.5,0.5)
d[2,] <- c(0.5,0.5)
d[3,] <- c(0.5,0.5)
d[4,] <- c(0.5,0.5)
d[5,] <- c(1,0)
d[6,] <- c(0.1,0.1)
d[7,] <- c(0.1,0.1)
d[8,] <- c(0,0)
d[9,] <- c(0,0)
######################### m-out-of-n bootstrap : adpative alpha #############################
sc <- seq(1,9)
# number of simulated dataset
Nsimul <- 500
# number of boostrap samples
Nboot <- 1000
# sample size
n <- 300
# model specification
blip.model <- list(~ O1, ~ O2 + A1)
treat.model <- list(A1~1, A2~1)
tf.model <- list(~ O1, ~ O1 + A1 + O1*A1)
# allocate space and predefined quantities
estm <- vector(mode = "list", length = 1)
# specify scenario with i between 1 and 9. Here scenario 1
i <- 1
# reset estimates to NA for new scenario
estm[[1]] <- matrix(NA, nrow = Nsimul , ncol = Nboot + 4)
for(s in 1:Nsimul) # loop over number of simulations
{
# treatment A1, A2: P(Aj = 1) = P(Aj = 0) = 0.5
A1 <- rbinom(n, size = 1, prob = 0.5)
A2 <- rbinom(n, size = 1, prob = 0.5)
# covariates O1, O2: coded as -1, 1, where O2 depends on A1, O1 and (delta_1,delta_2)
O1 <- 2*rbinom(n, size = 1, prob = 0.5) - 1
O2 <- 2*rbinom(n, size = 1, prob = expit(d[sc[i],1]*O1 + d[sc[i],2]*(2*A1-1))) - 1
# generated outcome Y2 (Y1 set to 0), using parameters (gamma_1,...,gamma_7)
Y2 <- g[sc[i],1] + g[sc[i],2]*O1 + g[sc[i],3]*A1 + g[sc[i],4]*O1*A1 + g[sc[i],5]*A2 + g[sc[i],6]*O2*A2 + g[sc[i],7]*A1*A2 + rnorm(n)
# generated dataset
complete <- cbind(A1, A2, O1, O2, Y2)
# fit dWOLS to the generated dataset, using all n=300 observations
proba <- list(as.vector(rep(0.5,n)))
res.n <- try(DTRreg(outcome = Y2, blip.mod = blip.model, treat.mod = treat.model, tf.mod = tf.model, treat.mod.man = rep(proba,2), method = "dwols", var.estim = "bootstrap", data = as.data.frame(complete)))
es <- try(extract(res.n))
# save estimates using all observations in the first column
estm[[1]][s,1] <- es
# estimate of nonregularity
phat <- res.n$nonreg[2]
estm[[1]][s,2] <- phat
# choice of alpha
alpha <- dbalpha(data = complete, psin = es, blip.model = blip.model, treat.model = treat.model, tf.model = tf.model)
estm[[1]][s,3] <- alpha
print(c(s,alpha))
# resampling size
m <- n^((1 + alpha*(1-phat))/(1 + alpha))
estm[[1]][s,4] <- m
# probability treatment with m
proba <- list(as.vector(rep(0.5,floor(m))))
# bootstrap resampling + estimate
for(b in 1:Nboot) # loop over number of bootstrap samples
{
# resample with replacement
index <- sample(1:n, floor(m), replace = TRUE)
boot <- complete[index,]
# fit the model to bootstrap sample
res <- try(DTRreg(outcome = Y2, blip.mod = blip.model, treat.mod = treat.model, tf.mod = tf.model, treat.mod.man = rep(proba,2), method = "dwols", data = as.data.frame(boot)))
esb <- try(extract(res))
# save bootstrap estimates i in the (i+1) column
estm[[1]][s, b + 4] <- esb
}
}
# linux command to save results of the simulations in a CSV file
colnames(estm[[1]])[1:4] <- c("psi10","phat","alpha","m")
name1 <- paste("mnad_psi10_scenario", paste(sc[i]),"_2.csv",sep ="")
write.csv(estm[[1]], file = name1, row.names = FALSE)
|
best <- function(state, outcome) {
## Read outcome data"
outfile <- read.csv("outcome-of-care-measures.csv",
colClasses = "character")
## Check that state and outcome are valid
# checking if valid outcome
out <- c("heart attack","heart failure","pneumonia")
if (!any(out == outcome)){
stop("invalid outcome")
}
# checking if valid state
if (!any(outfile$State == state)){
stop("invalid state")
}
## Return hospital name in that state with lowest 30-day death
## rate
# subset data with state and outcome desired as filters
if (outcome == "heart attack"){
subcols <- c("Hospital.Name",
"Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack" )
} else if (outcome == "heart failure"){
subcols <- c("Hospital.Name",
"Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure" )
} else {
subcols <- c("Hospital.Name",
"Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia" )
}
stateout <- subset(outfile, State == state, select = subcols)
# convert the outcome column from string to numeric
stateout[ , 2] <- as.numeric(stateout[ , 2])
# get the vector of hospital names with minimum mortality rates
res <- rank(stateout[, 2], na.last = NA, ties.method = "first")
# sort by alpha in case of multiple hospitals returned
# and return first row
# sort(res)[1]
}
|
/assignments/assign3/rprog_data_ProgAssignment3-data/best.R
|
no_license
|
rshrinivasan/rprog
|
R
| false
| false
| 1,690
|
r
|
best <- function(state, outcome) {
## Read outcome data"
outfile <- read.csv("outcome-of-care-measures.csv",
colClasses = "character")
## Check that state and outcome are valid
# checking if valid outcome
out <- c("heart attack","heart failure","pneumonia")
if (!any(out == outcome)){
stop("invalid outcome")
}
# checking if valid state
if (!any(outfile$State == state)){
stop("invalid state")
}
## Return hospital name in that state with lowest 30-day death
## rate
# subset data with state and outcome desired as filters
if (outcome == "heart attack"){
subcols <- c("Hospital.Name",
"Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack" )
} else if (outcome == "heart failure"){
subcols <- c("Hospital.Name",
"Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure" )
} else {
subcols <- c("Hospital.Name",
"Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia" )
}
stateout <- subset(outfile, State == state, select = subcols)
# convert the outcome column from string to numeric
stateout[ , 2] <- as.numeric(stateout[ , 2])
# get the vector of hospital names with minimum mortality rates
res <- rank(stateout[, 2], na.last = NA, ties.method = "first")
# sort by alpha in case of multiple hospitals returned
# and return first row
# sort(res)[1]
}
|
##download file
## download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip", "GCD_project_data.zip", mode = "wb")
## unzip the data
unzip("GCD_project_data.zip", overwrite = T)
## read test data
x_test <- read.delim("./UCI HAR Dataset/test/X_test.txt", sep="" , header=F)
y_test <- read.delim("./UCI HAR Dataset/test/y_test.txt", sep="" , header=F)
subject_test <- read.delim("./UCI HAR Dataset/test/subject_test.txt", sep="" , header=F)
## merge test data into one data frame
test_data <- cbind(subject_test,y_test,x_test)
## read train data
x_train <- read.delim("./UCI HAR Dataset/train/X_train.txt", sep="" , header=F)
y_train <- read.delim("./UCI HAR Dataset/train/y_train.txt", sep="" , header=F)
subject_train <- read.delim("./UCI HAR Dataset/train/subject_train.txt", sep="" , header=F)
## merge train data into one data frame
train_data <- cbind(subject_train,y_train,x_train)
## create labels for the dataset
features <- read.delim("./UCI HAR Dataset/features.txt", sep = "", header = F)
labels <- c("subject","activity_id", as.character(features[[2]]))
## Assign the labels to the data sets
names(test_data) <- labels
names(train_data) <- labels
## merge train and test data sets
combined_data <- rbind(test_data, train_data)
## to determine which columns to include, grep for mean or std in the columns
mean_std_columns <- grep("mean|std",labels,ignore.case = T)
## subset the combined data frame using the mean_std_columns.
## Include the subject and activity columns
mean_std_data <- combined_data[,c(1,2,mean_std_columns)]
## read in the activity labels. rename columns (this will help with merge activity thereafter)
activity_labels <- read.delim("./UCI HAR Dataset/activity_labels.txt", sep="" , header=F)
names(activity_labels) <- c("activity_id", "activity")
## merge activity lables with mean_std_data using activity_id
labelled_data <- merge(activity_labels, mean_std_data, by = "activity_id", sort = FALSE)
## remove duplicate column activity
final_dataset <- labelled_data[-1]
## optional step
## reorder final_dataset by interchanging 1st and 2nd column
## Maintaining order for the rest of the columns i.e. 3:columns
num_of_columns <- ncol(final_dataset) ## get the number of columns of the dataset
final_dataset <- final_dataset[c(2,1,3:num_of_columns)]
## optional step
## order final_dataset by subject
final_dataset <- final_dataset[order(final_dataset$subject),]
## write final dataset into a txt file
write.table(final_dataset, file="UCI HAR Dataset/final_dataset.txt", row.names = F)
## Calcualted mean of the measurements aggregated by activity and subject
## columns to calculate means on. Exclude activity & subject columns
range <- 3:num_of_columns
aggregated_dataset <- aggregate(final_dataset[, range],
list(final_dataset$activity, final_dataset$subject), mean)
## rename the group columns appropriately
names(aggregated_dataset)[names(aggregated_dataset) == 'Group.1'] <- 'activity'
names(aggregated_dataset)[names(aggregated_dataset) == 'Group.2'] <- 'subject'
## write aggregated dataset into a txt file
write.table(aggregated_dataset, file="UCI HAR Dataset/aggregated_dataset.txt",
row.names = F)
|
/run_analysis.R
|
no_license
|
ruaram/GCD-Project
|
R
| false
| false
| 3,249
|
r
|
##download file
## download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip", "GCD_project_data.zip", mode = "wb")
## unzip the data
unzip("GCD_project_data.zip", overwrite = T)
## read test data
x_test <- read.delim("./UCI HAR Dataset/test/X_test.txt", sep="" , header=F)
y_test <- read.delim("./UCI HAR Dataset/test/y_test.txt", sep="" , header=F)
subject_test <- read.delim("./UCI HAR Dataset/test/subject_test.txt", sep="" , header=F)
## merge test data into one data frame
test_data <- cbind(subject_test,y_test,x_test)
## read train data
x_train <- read.delim("./UCI HAR Dataset/train/X_train.txt", sep="" , header=F)
y_train <- read.delim("./UCI HAR Dataset/train/y_train.txt", sep="" , header=F)
subject_train <- read.delim("./UCI HAR Dataset/train/subject_train.txt", sep="" , header=F)
## merge train data into one data frame
train_data <- cbind(subject_train,y_train,x_train)
## create labels for the dataset
features <- read.delim("./UCI HAR Dataset/features.txt", sep = "", header = F)
labels <- c("subject","activity_id", as.character(features[[2]]))
## Assign the labels to the data sets
names(test_data) <- labels
names(train_data) <- labels
## merge train and test data sets
combined_data <- rbind(test_data, train_data)
## to determine which columns to include, grep for mean or std in the columns
mean_std_columns <- grep("mean|std",labels,ignore.case = T)
## subset the combined data frame using the mean_std_columns.
## Include the subject and activity columns
mean_std_data <- combined_data[,c(1,2,mean_std_columns)]
## read in the activity labels. rename columns (this will help with merge activity thereafter)
activity_labels <- read.delim("./UCI HAR Dataset/activity_labels.txt", sep="" , header=F)
names(activity_labels) <- c("activity_id", "activity")
## merge activity lables with mean_std_data using activity_id
labelled_data <- merge(activity_labels, mean_std_data, by = "activity_id", sort = FALSE)
## remove duplicate column activity
final_dataset <- labelled_data[-1]
## optional step
## reorder final_dataset by interchanging 1st and 2nd column
## Maintaining order for the rest of the columns i.e. 3:columns
num_of_columns <- ncol(final_dataset) ## get the number of columns of the dataset
final_dataset <- final_dataset[c(2,1,3:num_of_columns)]
## optional step
## order final_dataset by subject
final_dataset <- final_dataset[order(final_dataset$subject),]
## write final dataset into a txt file
write.table(final_dataset, file="UCI HAR Dataset/final_dataset.txt", row.names = F)
## Calcualted mean of the measurements aggregated by activity and subject
## columns to calculate means on. Exclude activity & subject columns
range <- 3:num_of_columns
aggregated_dataset <- aggregate(final_dataset[, range],
list(final_dataset$activity, final_dataset$subject), mean)
## rename the group columns appropriately
names(aggregated_dataset)[names(aggregated_dataset) == 'Group.1'] <- 'activity'
names(aggregated_dataset)[names(aggregated_dataset) == 'Group.2'] <- 'subject'
## write aggregated dataset into a txt file
write.table(aggregated_dataset, file="UCI HAR Dataset/aggregated_dataset.txt",
row.names = F)
|
library(shiny)
library(googleVis)
library(dplyr)
# read in data
mortality <- read.csv('https://raw.githubusercontent.com/maxwagner/608/master/lecture3/q1/cleaned-cdc-mortality-1999-2010.csv')
# rem some columns, rename remaining
mortality <- mortality[, c(3,5,7,11)]
colnames(mortality) <- c("Chapter", "State", "Year", "Rate")
# only keep 2010 rates
mortality2010 <- filter(mortality, Year == 2010)
# shiny server
function(input, output) {
mortalityReactive <- reactive({
mortalityFilt <- filter(mortality2010, Year == 2010, Chapter == input$chapter)
mortalitySel <- select(mortalityFilt, State, Rate)
arrange(mortalitySel, desc(Rate))
})
output$gvisplot <- renderGvis({
gvisBarChart(mortalityReactive(),
options = list(
title = "2010 Mortality Rate",
backgroundColor = "#CFD8DC",
backgroundColor.stroke = "black",
backgroundColor.strokeWidth = 10,
height = 1000,
width = 500,
chartArea = "{width: '60%', height: '95%'}"
))
})
}
|
/lecture3/q1/server.R
|
no_license
|
maxwagner/608
|
R
| false
| false
| 1,132
|
r
|
library(shiny)
library(googleVis)
library(dplyr)
# read in data
mortality <- read.csv('https://raw.githubusercontent.com/maxwagner/608/master/lecture3/q1/cleaned-cdc-mortality-1999-2010.csv')
# rem some columns, rename remaining
mortality <- mortality[, c(3,5,7,11)]
colnames(mortality) <- c("Chapter", "State", "Year", "Rate")
# only keep 2010 rates
mortality2010 <- filter(mortality, Year == 2010)
# shiny server
function(input, output) {
mortalityReactive <- reactive({
mortalityFilt <- filter(mortality2010, Year == 2010, Chapter == input$chapter)
mortalitySel <- select(mortalityFilt, State, Rate)
arrange(mortalitySel, desc(Rate))
})
output$gvisplot <- renderGvis({
gvisBarChart(mortalityReactive(),
options = list(
title = "2010 Mortality Rate",
backgroundColor = "#CFD8DC",
backgroundColor.stroke = "black",
backgroundColor.strokeWidth = 10,
height = 1000,
width = 500,
chartArea = "{width: '60%', height: '95%'}"
))
})
}
|
#' Send an email message through the Mailgun API
#'
#' Send an email message via the Mailgun API.
#' This requires an account with Mailgun.
#' @param message the email message object,
#' as created by the \code{compose_email()}
#' function. The object's class is
#' \code{email_message}
#' @param subject the subject of the
#' email.
#' @param from the email address of the
#' sender. This does not have to be
#' the same email that is associated with
#' the account actually sending the message.
#' @param recipients a vector of email
#' addresses.
#' @param url the URL for the sending domain.
#' @param api_key the API key registered to
#' the Mailgun service.
#' @examples
#' \dontrun{
#' # Create a simple email message using
#' # Markdown formatting
#' email <-
#' compose_email(
#' body = "
#' Hello!
#'
#' ## This a section heading
#'
#' We can use Markdown formatting \\
#' to **embolden** text or to add \\
#' *emphasis*. This is exciting, \\
#' right?
#'
#' Cheers")
#'
#' # Generate a vector of recipients
#' recipient_list <-
#' c("person_1@site.net",
#' "person_2@site.net")
#'
#' # Send it to multiple people through
#' # the Mailgun API
#' email %>%
#' send_by_mailgun(
#' subject = "Sent through Mailgun",
#' from = "The Sender <sender@send.org>",
#' recipients = recipient_list,
#' url = "<..mailgun_sending_domain..>",
#' api = "<..mailgun_api_key..>")
#' }
#' @import httr
#' @importFrom glue glue
#' @export
send_by_mailgun <- function(message,
subject = NULL,
from,
recipients,
url,
api_key) {
# Verify that the `message` object
# is of the class `email_message`
if (!inherits(x = message, what = "email_message")) {
stop("The object provided in `message` must be created by the `compose_email()` function.")
}
if (is.null(subject)) {
subject_text <- "<no subject>"
} else {
subject_text <- glue::glue(subject)
}
# Collapse vector of recipients to a single string
recipients <- paste(recipients, collapse = ", ")
# Post the message to Mailgun
httr::POST(
url = url,
authenticate("api", api_key),
encode = "form",
body = list(
from = from,
to = recipients,
subject = subject,
html = message$html_html))
}
|
/R/send_by_mailgun.R
|
permissive
|
fxcebx/blastula
|
R
| false
| false
| 2,390
|
r
|
#' Send an email message through the Mailgun API
#'
#' Send an email message via the Mailgun API.
#' This requires an account with Mailgun.
#' @param message the email message object,
#' as created by the \code{compose_email()}
#' function. The object's class is
#' \code{email_message}
#' @param subject the subject of the
#' email.
#' @param from the email address of the
#' sender. This does not have to be
#' the same email that is associated with
#' the account actually sending the message.
#' @param recipients a vector of email
#' addresses.
#' @param url the URL for the sending domain.
#' @param api_key the API key registered to
#' the Mailgun service.
#' @examples
#' \dontrun{
#' # Create a simple email message using
#' # Markdown formatting
#' email <-
#' compose_email(
#' body = "
#' Hello!
#'
#' ## This a section heading
#'
#' We can use Markdown formatting \\
#' to **embolden** text or to add \\
#' *emphasis*. This is exciting, \\
#' right?
#'
#' Cheers")
#'
#' # Generate a vector of recipients
#' recipient_list <-
#' c("person_1@site.net",
#' "person_2@site.net")
#'
#' # Send it to multiple people through
#' # the Mailgun API
#' email %>%
#' send_by_mailgun(
#' subject = "Sent through Mailgun",
#' from = "The Sender <sender@send.org>",
#' recipients = recipient_list,
#' url = "<..mailgun_sending_domain..>",
#' api = "<..mailgun_api_key..>")
#' }
#' @import httr
#' @importFrom glue glue
#' @export
send_by_mailgun <- function(message,
subject = NULL,
from,
recipients,
url,
api_key) {
# Verify that the `message` object
# is of the class `email_message`
if (!inherits(x = message, what = "email_message")) {
stop("The object provided in `message` must be created by the `compose_email()` function.")
}
if (is.null(subject)) {
subject_text <- "<no subject>"
} else {
subject_text <- glue::glue(subject)
}
# Collapse vector of recipients to a single string
recipients <- paste(recipients, collapse = ", ")
# Post the message to Mailgun
httr::POST(
url = url,
authenticate("api", api_key),
encode = "form",
body = list(
from = from,
to = recipients,
subject = subject,
html = message$html_html))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lotR_example_data.R
\docType{data}
\name{lotR_example_edges}
\alias{lotR_example_edges}
\title{Edgelist for constructing an example tree.}
\format{
The edge list is a matrix with 15 rows with 2 columns:
the left column
represents parent nodes or categories, and the right column
represents children or subcategories of the parents.
There is one row for every parent-child pair.
}
\usage{
lotR_example_edges
}
\description{
A matrix representing edges
from the outcome tree formed by cardiovascular diseases
in category 7.4
(diseases of arteries, arterioles, and capillaries)
of the multilevel Clinical Classifications Software (CCS)
hierarchical disease classification system.
See vignette("moretrees") for details of how to construct
a tree from this edgelist.
}
\keyword{datasets}
|
/man/lotR_example_edges.Rd
|
no_license
|
limengbinggz/lotR
|
R
| false
| true
| 861
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lotR_example_data.R
\docType{data}
\name{lotR_example_edges}
\alias{lotR_example_edges}
\title{Edgelist for constructing an example tree.}
\format{
The edge list is a matrix with 15 rows with 2 columns:
the left column
represents parent nodes or categories, and the right column
represents children or subcategories of the parents.
There is one row for every parent-child pair.
}
\usage{
lotR_example_edges
}
\description{
A matrix representing edges
from the outcome tree formed by cardiovascular diseases
in category 7.4
(diseases of arteries, arterioles, and capillaries)
of the multilevel Clinical Classifications Software (CCS)
hierarchical disease classification system.
See vignette("moretrees") for details of how to construct
a tree from this edgelist.
}
\keyword{datasets}
|
####################################################################
# 8. Plot Multiple Shapefile
ggplot() +
#area
geom_sf(data = aoi_boundary_HARV, fill = "grey", color = "grey") +
#line
geom_sf(data = lines_HARV, aes(color = TYPE), size = 1) +
#point
geom_sf(data = point_HARV) +
ggtitle("NEON Harvard Forest Field Site") +
coord_sf()
## customize legend
ggplot() +
geom_sf(data = aoi_boundary_HARV, fill = "grey", color = "grey") +
geom_sf(data = lines_HARV, aes(color = TYPE), show.legend = "line", size = 1) +
geom_sf(data = point_HARV, aes(fill = Sub_Type), color = "black") +
scale_color_brewer(palette = "Dark2", name = "Line Type") +
scale_fill_manual(values = "black", name = "Tower Location") +
ggtitle("NEON Harvard Forest Field Site") +
coord_sf()
## challenge 1
plotloc_HARV <- st_read("/Users/kate/Desktop/BENV7503/tut/tut2_2009586/NEON-DS-Site-Layout-Files/HARV/PlotLocations_HARV.shp")
ggplot() +
geom_sf(data = lines_HARV, aes(color = TYPE), show.legend = "line") +
geom_sf(data = plotloc_HARV, aes(fill = soilTypeOr),
shape = 21, show.legend = 'point') +
scale_color_manual(name = "Line Type", values = road_colors,
guide = guide_legend(override.aes = list(linetype = "solid", shape = NA))) +
scale_fill_manual(name = "Soil Type", values = c("lightblue", "darkgreen"),
guide = guide_legend(override.aes = list(linetype = "blank", shape = 21, colour = c("lightblue", "darkgreen")))) +
ggtitle("NEON Harvard Forest Field Site") +
coord_sf()
###############################################################################
# 9. Handling Spatial Projection & CRS
# read US state boundary file
state_boundary_US <- st_read("/Users/kate/Desktop/BENV7503/tut/tut2_2009586/NEON-DS-Site-Layout-Files/US-Boundary-Layers/US-State-Boundaries-Census-2014.shp")
ggplot() +
geom_sf(data = state_boundary_US) +
ggtitle("Map of Contiguous US State Boundaries") +
coord_sf()
### looks good
# US boundary layer
country_boundary_US <- st_read("/Users/kate/Desktop/BENV7503/tut/tut2_2009586/NEON-DS-Site-Layout-Files/US-Boundary-Layers/US-Boundary-Dissolved-States.shp")
ggplot() +
geom_sf(data = country_boundary_US, color = "gray18", size = 2) +
geom_sf(data = state_boundary_US, color = "gray40") +
ggtitle("Map of Contiguous US State Boundaries") +
coord_sf()
# add flux tower
st_crs(point_HARV)
st_crs(state_boundary_US)
st_crs(country_boundary_US)
# view object extent
st_bbox(point_HARV)
st_bbox(state_boundary_US)
st_bbox(country_boundary_US)
# reproject vector data
## ggplot automatically converts all objects to the same CRS before plotting
ggplot() +
geom_sf(data = country_boundary_US, size = 2, color = "gray18") +
geom_sf(data = state_boundary_US, color = "gray40") +
geom_sf(data = point_HARV, shape = 19, color = "purple") +
ggtitle("Map of Contiguous US State Boundaries") +
coord_sf()
|
/GeospatialR_Ch8&9.R
|
no_license
|
yanyidi/geospatial_data_tutorials
|
R
| false
| false
| 2,973
|
r
|
####################################################################
# 8. Plot Multiple Shapefile
ggplot() +
#area
geom_sf(data = aoi_boundary_HARV, fill = "grey", color = "grey") +
#line
geom_sf(data = lines_HARV, aes(color = TYPE), size = 1) +
#point
geom_sf(data = point_HARV) +
ggtitle("NEON Harvard Forest Field Site") +
coord_sf()
## customize legend
ggplot() +
geom_sf(data = aoi_boundary_HARV, fill = "grey", color = "grey") +
geom_sf(data = lines_HARV, aes(color = TYPE), show.legend = "line", size = 1) +
geom_sf(data = point_HARV, aes(fill = Sub_Type), color = "black") +
scale_color_brewer(palette = "Dark2", name = "Line Type") +
scale_fill_manual(values = "black", name = "Tower Location") +
ggtitle("NEON Harvard Forest Field Site") +
coord_sf()
## challenge 1
plotloc_HARV <- st_read("/Users/kate/Desktop/BENV7503/tut/tut2_2009586/NEON-DS-Site-Layout-Files/HARV/PlotLocations_HARV.shp")
ggplot() +
geom_sf(data = lines_HARV, aes(color = TYPE), show.legend = "line") +
geom_sf(data = plotloc_HARV, aes(fill = soilTypeOr),
shape = 21, show.legend = 'point') +
scale_color_manual(name = "Line Type", values = road_colors,
guide = guide_legend(override.aes = list(linetype = "solid", shape = NA))) +
scale_fill_manual(name = "Soil Type", values = c("lightblue", "darkgreen"),
guide = guide_legend(override.aes = list(linetype = "blank", shape = 21, colour = c("lightblue", "darkgreen")))) +
ggtitle("NEON Harvard Forest Field Site") +
coord_sf()
###############################################################################
# 9. Handling Spatial Projection & CRS
# read US state boundary file
state_boundary_US <- st_read("/Users/kate/Desktop/BENV7503/tut/tut2_2009586/NEON-DS-Site-Layout-Files/US-Boundary-Layers/US-State-Boundaries-Census-2014.shp")
ggplot() +
geom_sf(data = state_boundary_US) +
ggtitle("Map of Contiguous US State Boundaries") +
coord_sf()
### looks good
# US boundary layer
country_boundary_US <- st_read("/Users/kate/Desktop/BENV7503/tut/tut2_2009586/NEON-DS-Site-Layout-Files/US-Boundary-Layers/US-Boundary-Dissolved-States.shp")
ggplot() +
geom_sf(data = country_boundary_US, color = "gray18", size = 2) +
geom_sf(data = state_boundary_US, color = "gray40") +
ggtitle("Map of Contiguous US State Boundaries") +
coord_sf()
# add flux tower
st_crs(point_HARV)
st_crs(state_boundary_US)
st_crs(country_boundary_US)
# view object extent
st_bbox(point_HARV)
st_bbox(state_boundary_US)
st_bbox(country_boundary_US)
# reproject vector data
## ggplot automatically converts all objects to the same CRS before plotting
ggplot() +
geom_sf(data = country_boundary_US, size = 2, color = "gray18") +
geom_sf(data = state_boundary_US, color = "gray40") +
geom_sf(data = point_HARV, shape = 19, color = "purple") +
ggtitle("Map of Contiguous US State Boundaries") +
coord_sf()
|
# Exercise 2: advanced ggplot2 practice
# Install and load the `ggplot2` package
#install.packages('ggplot2')
library("ggplot2")
# For this exercise you will again be working with the `diamonds` data set.
# Use `?diamonds` to review details about this data set
?diamonds
## Position Adjustments
# Draw a column (bar) chart of diamonds cuts by price, with each bar filled by
# clarity. You should see a _stacked_ bar chart.
# Draw the same chart again, but with each element positioned to "fill" the y axis
# Draw the same chart again, but with each element positioned to "dodge" each other
# Draw a plot with point geometry with the x-position mapped to `cut` and the
# y-position mapped to `clarity`
# This creates a "grid" grouping the points
# Use the "jitter" position adjustment to keep the points from all overlapping!
# (This works a little better with a sample of diamond data, such as from the
# previous exercise).
## Scales
# Draw a "boxplot" (with `geom_boxplot`) for the diamond's price (y) by color (x)
ggplot(data = diamonds_sample) + geom_boxplot(mapping = aes(x = color, y = price))
# This has a lot of outliers, making it harder to read. To fix this, draw the
# same plot but with a _logarithmic_ scale for the y axis.
ggplot(data = diamonds_sample) + geom_boxplot(mapping = aes(x = color, y = price)) + scale_y_log10()
# For another version, draw the same plot but with `violin` geometry instead of
# `boxplot` geometry!
# How does the logarithmic scale change the data presentation?
ggplot(data = diamonds_sample) + geom_violin(mapping = aes(x = color, y = price)) + scale_y_log10()
# Another interesting plot: draw a plot of the diamonds price (y) by carat (x),
# using a heatmap of 2d bins (geom_bin2d)
# What happens when you make the x and y channels scale logarithmically?
ggplot(data = diamonds_sample) + geom_bin2d(mapping = aes(x = carat, y = price)) + scale_y_log10() + scale_x_log10()
# Draw a scatter plot for the diamonds price (y) by carat (x). Color each point
# by the clarity (Remember, this will take a while. Use a sample of the diamonds
# for faster results)
ggplot(data = diamonds_sample) + geom_point(mapping = aes(x = carat, y = price, color = clarity))
# Change the color of the previous plot using a ColorBrewer scale of your choice.
# What looks nice?
## Coordinate Systems
# Draw a bar chart with x-position and fill color BOTH mapped to cut
# For best results, SET the `width` of the geometry to be 1 (fill plot, no space
# between)
# TIP: You can save the plot to a variable for easier modifications
# Draw the same chart, but with the coordinate system flipped
# Draw the same chart, but in a polar coordinate system. It's a Coxcomb chart!
## Facets
# Take the scatter plot of price by carat data (colored by clarity) and add
# _facets_ based on the diamond's `color`
## Saving Plots
# Use the `ggsave()` function to save the current (recent) plot to disk.
# Name the output file "my-plot.png".
# Make sure you've set the working directory!!
|
/chapter-16-exercises/exercise-2/exercise.R
|
permissive
|
kaamnarishi/book-exercises
|
R
| false
| false
| 3,042
|
r
|
# Exercise 2: advanced ggplot2 practice
# Install and load the `ggplot2` package
#install.packages('ggplot2')
library("ggplot2")
# For this exercise you will again be working with the `diamonds` data set.
# Use `?diamonds` to review details about this data set
?diamonds
## Position Adjustments
# Draw a column (bar) chart of diamonds cuts by price, with each bar filled by
# clarity. You should see a _stacked_ bar chart.
# Draw the same chart again, but with each element positioned to "fill" the y axis
# Draw the same chart again, but with each element positioned to "dodge" each other
# Draw a plot with point geometry with the x-position mapped to `cut` and the
# y-position mapped to `clarity`
# This creates a "grid" grouping the points
# Use the "jitter" position adjustment to keep the points from all overlapping!
# (This works a little better with a sample of diamond data, such as from the
# previous exercise).
## Scales
# Draw a "boxplot" (with `geom_boxplot`) for the diamond's price (y) by color (x)
ggplot(data = diamonds_sample) + geom_boxplot(mapping = aes(x = color, y = price))
# This has a lot of outliers, making it harder to read. To fix this, draw the
# same plot but with a _logarithmic_ scale for the y axis.
ggplot(data = diamonds_sample) + geom_boxplot(mapping = aes(x = color, y = price)) + scale_y_log10()
# For another version, draw the same plot but with `violin` geometry instead of
# `boxplot` geometry!
# How does the logarithmic scale change the data presentation?
ggplot(data = diamonds_sample) + geom_violin(mapping = aes(x = color, y = price)) + scale_y_log10()
# Another interesting plot: draw a plot of the diamonds price (y) by carat (x),
# using a heatmap of 2d bins (geom_bin2d)
# What happens when you make the x and y channels scale logarithmically?
ggplot(data = diamonds_sample) + geom_bin2d(mapping = aes(x = carat, y = price)) + scale_y_log10() + scale_x_log10()
# Draw a scatter plot for the diamonds price (y) by carat (x). Color each point
# by the clarity (Remember, this will take a while. Use a sample of the diamonds
# for faster results)
ggplot(data = diamonds_sample) + geom_point(mapping = aes(x = carat, y = price, color = clarity))
# Change the color of the previous plot using a ColorBrewer scale of your choice.
# What looks nice?
## Coordinate Systems
# Draw a bar chart with x-position and fill color BOTH mapped to cut
# For best results, SET the `width` of the geometry to be 1 (fill plot, no space
# between)
# TIP: You can save the plot to a variable for easier modifications
# Draw the same chart, but with the coordinate system flipped
# Draw the same chart, but in a polar coordinate system. It's a Coxcomb chart!
## Facets
# Take the scatter plot of price by carat data (colored by clarity) and add
# _facets_ based on the diamond's `color`
## Saving Plots
# Use the `ggsave()` function to save the current (recent) plot to disk.
# Name the output file "my-plot.png".
# Make sure you've set the working directory!!
|
##'
#' @title Samples Counting
#' @description Computes the x value corresponding to a given percentile.
#' @details Returns the x value corresponding to a given percentile [0..1]
#' @param varName a study variable, it must be a numeric vector.
#' @param xREF a numeric value, given reference do calculate the sample size.
#' @return The numeric value corresponding to a percentile.
#' @author Rui Camacho, Paula Raissa
#' @section Dependencies:
#' \code{\link{getVarByName}}
#' @export
##'
getSamplesCounting <- function(x=NULL, formula=NULL, xREF=NULL, cVar=NULL, relation=NULL, threshold=NULL, relation2=NULL, threshold2=NULL) {
formula.temp <- as.formula(formula)
formula2use <- formula.temp[-2]
originalValues <- NULL
if (!is.null(x)) {
originalValues <- as.numeric(eval(parse(text=x)))
}
if (!is.null(formula) && !is.null(cVar) && !is.null(relation) && !is.null(threshold)) {
originalValues <- getVarByCondFormula(formula = formula2use, cVar = cVar, relation = relation, relation2 = relation2, threshold = threshold, threshold2 = threshold2)
}
# totalSamples <- nrow(originalValues)
return(originalValues)
# vect.values <- as.vector(originalValues)
# contVal <- length(which(as.numeric(vect.values) <= xREF))
#
# return(list(contVal=contVal, totalSamples=totalSamples))
}
|
/R/getSamplesCounting.R
|
no_license
|
paularaissa/distStatsServer
|
R
| false
| false
| 1,318
|
r
|
##'
#' @title Samples Counting
#' @description Computes the x value corresponding to a given percentile.
#' @details Returns the x value corresponding to a given percentile [0..1]
#' @param varName a study variable, it must be a numeric vector.
#' @param xREF a numeric value, given reference do calculate the sample size.
#' @return The numeric value corresponding to a percentile.
#' @author Rui Camacho, Paula Raissa
#' @section Dependencies:
#' \code{\link{getVarByName}}
#' @export
##'
getSamplesCounting <- function(x=NULL, formula=NULL, xREF=NULL, cVar=NULL, relation=NULL, threshold=NULL, relation2=NULL, threshold2=NULL) {
formula.temp <- as.formula(formula)
formula2use <- formula.temp[-2]
originalValues <- NULL
if (!is.null(x)) {
originalValues <- as.numeric(eval(parse(text=x)))
}
if (!is.null(formula) && !is.null(cVar) && !is.null(relation) && !is.null(threshold)) {
originalValues <- getVarByCondFormula(formula = formula2use, cVar = cVar, relation = relation, relation2 = relation2, threshold = threshold, threshold2 = threshold2)
}
# totalSamples <- nrow(originalValues)
return(originalValues)
# vect.values <- as.vector(originalValues)
# contVal <- length(which(as.numeric(vect.values) <= xREF))
#
# return(list(contVal=contVal, totalSamples=totalSamples))
}
|
# Internal functions -----------------------------------------------------------
read.manifest.Mammal <- function(file, type = 2) {
# NOTE: As is, requires grep
control.line <- system(
sprintf("grep -n \\\\[Controls\\\\] %s", file), intern = TRUE)
control.line <- as.integer(sub(":.*", "", control.line))
stopifnot(length(control.line) == 1 &&
is.integer(control.line) &&
!is.na(control.line))
if(type == 1) {
assay.line <- system(
sprintf("grep -n \\\\[Assay\\\\] %s", file), intern = TRUE)
assay.line <- as.integer(sub(":.*", "", assay.line))
stopifnot(length(assay.line) == 1 &&
is.integer(assay.line) &&
!is.na(assay.line))
} else {
assay.line <- 0
}
colNames <- readLines(file, n = assay.line + 1L)[assay.line + 1L]
colNames <- strsplit(colNames, ",")[[1]]
colClasses <- rep("character", length(colNames))
names(colClasses) <- colNames
names(colClasses) <- make.names(names(colClasses))
colClasses[c("MAPINFO")] <- "integer"
if(type == 1) {
colClasses <- c(colClasses, drop = "logical")
}
manifest <- read.table(
file = file,
header = FALSE,
col.names = names(colClasses),
sep = ",",
comment.char = "",
quote = "",
skip = assay.line + 1L,
colClasses = colClasses,
nrows = control.line - assay.line - 2L)
manifest$drop <- NULL
if(type == 2) {
names(manifest)[c(1,2,30)] <- c("Name", "Internal_Name", "Internal_Name2")
}
manifest$AddressA_ID <- gsub("^0*", "", manifest$AddressA_ID)
manifest$AddressB_ID <- gsub("^0*", "", manifest$AddressB_ID)
TypeI <- manifest[
manifest$Infinium_Design_Type == "I",
c("Name", "AddressA_ID", "AddressB_ID", "Color_Channel", "Next_Base",
"AlleleA_ProbeSeq", "AlleleB_ProbeSeq")]
names(TypeI)[c(2, 3, 4, 5, 6, 7)] <- c(
"AddressA", "AddressB", "Color", "NextBase", "ProbeSeqA", "ProbeSeqB")
TypeI <- as(TypeI, "DataFrame")
TypeI$ProbeSeqA <- DNAStringSet(TypeI$ProbeSeqA)
TypeI$ProbeSeqB <- DNAStringSet(TypeI$ProbeSeqB)
TypeI$NextBase <- DNAStringSet(TypeI$NextBase)
TypeI$nCpG <- as.integer(
oligonucleotideFrequency(TypeI$ProbeSeqB, width = 2)[, "CG"] - 1L)
TypeI$nCpG[TypeI$nCpG < 0] <- 0L
TypeSnpI <- TypeI[grep("^rs", TypeI$Name), ]
TypeI <- TypeI[-grep("^rs", TypeI$Name), ]
TypeII <- manifest[
manifest$Infinium_Design_Type == "II",
c("Name", "AddressA_ID", "AlleleA_ProbeSeq")]
names(TypeII)[c(2,3)] <- c("AddressA", "ProbeSeqA")
TypeII <- as(TypeII, "DataFrame")
TypeII$ProbeSeqA <- DNAStringSet(TypeII$ProbeSeqA)
TypeII$nCpG <- as.integer(letterFrequency(TypeII$ProbeSeqA, letters = "R"))
TypeII$nCpG[TypeII$nCpG < 0] <- 0L
TypeSnpII <- TypeII[grep("^rs", TypeII$Name), ]
TypeII <- TypeII[-grep("^rs", TypeII$Name), ]
if(type == 1) {
controls <- read.table(
file = file,
skip = control.line,
sep = ",",
comment.char = "",
quote = "",
colClasses = c(rep("character", 5)))[, 1:5]
TypeControl <- controls[, 1:4]
names(TypeControl) <- c("Address", "Type", "Color", "ExtendedType")
TypeControl <- as(TypeControl, "DataFrame")
} else {
controls <- read.table(
file = file,
skip = control.line,
sep = ",",
comment.char = "",
quote = "",
colClasses = c(rep("character", 4)))[, 1:4]
TypeControl <- controls[, 1:4]
names(TypeControl) <- c("Address", "Type", "Color", "ExtendedType")
TypeControl$Type <- toupper(TypeControl$Type)
TypeControl <- as(TypeControl, "DataFrame")
}
list(
manifestList = list(
TypeI = TypeI,
TypeII = TypeII,
TypeControl = TypeControl,
TypeSnpI = TypeSnpI,
TypeSnpII = TypeSnpII),
manifest = manifest,
controls = controls)
}
read.manifest.Allergy <- function(file) {
# NOTE: As is, requires grep
control.line <- system(
sprintf("grep -n \\\\[Controls\\\\] %s", file), intern = TRUE)
control.line <- as.integer(sub(":.*", "", control.line))
stopifnot(length(control.line) == 1 &&
is.integer(control.line) &&
!is.na(control.line))
assay.line <- system(
sprintf("grep -n \\\\[Assay\\\\] %s", file), intern = TRUE)
assay.line <- as.integer(sub(":.*", "", assay.line))
stopifnot(length(assay.line) == 1 &&
is.integer(assay.line) &&
!is.na(assay.line))
colNames <- readLines(file, n = assay.line + 1L)[assay.line + 1L]
colNames <- strsplit(colNames, ",")[[1]]
colClasses <- rep("character", length(colNames))
names(colClasses) <- colNames
names(colClasses) <- make.names(names(colClasses))
colClasses[c("MAPINFO")] <- "integer"
manifest <- read.table(
file = file,
header = FALSE,
col.names = names(colClasses),
sep = ",",
comment.char = "",
quote = "",
skip = assay.line + 1L,
colClasses = colClasses,
nrows = control.line - assay.line - 2L)
manifest$AddressA_ID <- gsub("^0*", "", manifest$AddressA_ID)
manifest$AddressB_ID <- gsub("^0*", "", manifest$AddressB_ID)
TypeI <- manifest[
manifest$Infinium_Design_Type == "I",
c("IlmnID", "AddressA_ID", "AddressB_ID", "Color_Channel", "Next_Base",
"AlleleA_ProbeSeq", "AlleleB_ProbeSeq")]
names(TypeI)[c(1, 2, 3, 4, 5, 6, 7)] <- c("Name",
"AddressA", "AddressB", "Color", "NextBase", "ProbeSeqA", "ProbeSeqB")
TypeI <- as(TypeI, "DataFrame")
TypeI$ProbeSeqA <- DNAStringSet(TypeI$ProbeSeqA)
TypeI$ProbeSeqB <- DNAStringSet(TypeI$ProbeSeqB)
TypeI$NextBase <- DNAStringSet(TypeI$NextBase)
TypeI$nCpG <- as.integer(
oligonucleotideFrequency(TypeI$ProbeSeqB, width = 2)[, "CG"] - 1L)
TypeI$nCpG[TypeI$nCpG < 0] <- 0L
TypeSnpI <- TypeI[grep("^rs", TypeI$Name), ]
TypeI <- TypeI[grep("^rs", TypeI$Name, invert=TRUE), ]
TypeII <- manifest[
manifest$Infinium_Design_Type == "II",
c("IlmnID", "AddressA_ID", "AlleleA_ProbeSeq")]
names(TypeII)[c(1,2,3)] <- c("Name", "AddressA", "ProbeSeqA")
TypeII <- as(TypeII, "DataFrame")
TypeII$ProbeSeqA <- DNAStringSet(TypeII$ProbeSeqA)
TypeII$nCpG <- as.integer(letterFrequency(TypeII$ProbeSeqA, letters = "R"))
TypeII$nCpG[TypeII$nCpG < 0] <- 0L
TypeSnpII <- TypeII[grep("^rs", TypeII$Name), ]
TypeII <- TypeII[grep("^rs", TypeII$Name, invert=TRUE), ]
controls <- read.table(
file = file,
skip = control.line,
sep = ",",
comment.char = "",
quote = "",
colClasses = c(rep("character", 5)))[, 1:5]
TypeControl <- controls[, 1:4]
names(TypeControl) <- c("Address", "Type", "Color", "ExtendedType")
TypeControl <- as(TypeControl, "DataFrame")
list(
manifestList = list(
TypeI = TypeI,
TypeII = TypeII,
TypeControl = TypeControl,
TypeSnpI = TypeSnpI,
TypeSnpII = TypeSnpII),
manifest = manifest,
controls = controls)
}
read.manifest.sesame.Allergy <- function(file) {
ID_column <- "IlmnID"
colNames <- readLines(file, n = 1)
colNames <- strsplit(colNames, ",")[[1]]
colClasses <- rep("character", length(colNames))
names(colClasses) <- colNames
names(colClasses) <- make.names(names(colClasses))
colClasses[c("MAPINFO")] <- "integer"
manifest <- read.table(
file = file,
header = TRUE,
sep = ",",
comment.char = "",
colClasses = colClasses,
quote = "")
names(manifest)[names(manifest) == "Probe_ID"] <- "IlmnID"
names(manifest)[names(manifest) == "U"] <- "AddressA_ID"
names(manifest)[names(manifest) == "M"] <- "AddressB_ID"
manifest$Infinium_Design_Type <- sub("1", "I", manifest$Infinium_Design_Type)
manifest$Infinium_Design_Type <- sub("2", "II", manifest$Infinium_Design_Type)
manifest$Color_Channel <- sub("Both", "", manifest$Color_Channel)
controls.idx <- grep("^ctl_", manifest$IlmnID)
nocontrols.idx <- grep("^ctl_", manifest$IlmnID, invert = TRUE)
controls <- manifest[controls.idx,]
manifest <- manifest[nocontrols.idx,]
dupNames <- unique(manifest$Name[duplicated(manifest$Name)])
manifest <- manifest[! manifest$Name %in% dupNames,]
TypeI <- manifest[
manifest$Infinium_Design_Type == "I",
c(ID_column, "AddressA_ID", "AddressB_ID", "Color_Channel", "Next_Base",
"AlleleA_ProbeSeq", "AlleleB_ProbeSeq")]
names(TypeI) <- c("Name", "AddressA", "AddressB", "Color", "NextBase", "ProbeSeqA", "ProbeSeqB")
TypeI$NextBase[is.na(TypeI$NextBase)] <- ""
TypeI$Color[is.na(TypeI$Color)] <- ""
TypeI <- as(TypeI, "DataFrame")
TypeI$ProbeSeqA <- DNAStringSet(TypeI$ProbeSeqA)
TypeI$ProbeSeqB <- DNAStringSet(TypeI$ProbeSeqB)
TypeI$NextBase <- DNAStringSet(TypeI$NextBase)
TypeI$nCpG <- as.integer(
oligonucleotideFrequency(TypeI$ProbeSeqB, width = 2)[, "CG"] - 1L)
TypeI$nCpG[TypeI$nCpG < 0] <- 0L
TypeSnpI <- TypeI[grep("^rs", TypeI$Name), ]
TypeI <- TypeI[grep("^rs", TypeI$Name, invert = TRUE), ]
TypeII <- manifest[
manifest$Infinium_Design_Type == "II",
c(ID_column, "AddressA_ID", "AlleleA_ProbeSeq")]
names(TypeII) <- c("Name", "AddressA", "ProbeSeqA")
TypeII <- as(TypeII, "DataFrame")
TypeII$ProbeSeqA <- DNAStringSet(TypeII$ProbeSeqA)
TypeII$nCpG <- as.integer(letterFrequency(TypeII$ProbeSeqA, letters = "R"))
TypeII$nCpG[TypeII$nCpG < 0] <- 0L
TypeSnpII <- TypeII[grep("^rs", TypeII$Name), ]
TypeII <- TypeII[grep("^rs", TypeII$Name, invert=TRUE), ]
TypeControl <- as(controls[, c("AddressA_ID", "Probe_Type", "IlmnID")], "DataFrame")
names(TypeControl) <- c("Address", "Type", "ExtendedType")
TypeControl$Type <- sub("BISULFITE_CONVERSION_", "BISULFITE CONVERSION ", TypeControl$Type)
TypeControl$Type <- sub("SPECIFICITY_", "SPECIFICITY ", TypeControl$Type)
TypeControl$Type <- sub("TARGET_REMOVAL", "TARGET REMOVAL", TypeControl$Type)
TypeControl$ExtendedType <- sub("^ctl_", "", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("Biotin_5K", "Biotin(5K)", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("Biotin_Bkg", "Biotin (Bkg)", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("Biotin_High", "Biotin (High)", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("BS_Conversion_", "BS Conversion ", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("I_", "I-", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("DNP_20K", "DNP(20K)", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("DNP_Bkg", "DNP (Bkg)", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("DNP_High", "DNP (High)", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("Extension_A", "Extension (A)", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("Extension_C", "Extension (C)", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("Extension_G", "Extension (G)", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("Extension_T", "Extension (T)", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("GT_Mismatch_", "GT Mismatch ", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("_MM", " (MM)", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("_PM", " (PM)", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("Hyb_High", "Hyb (High)", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("Hyb_Low", "Hyb (Low)", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("Hyb_Medium", "Hyb (Medium)", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("NP_A", "NP (A)", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("NP_C", "NP (C)", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("NP_G_1", "NP (G) 1", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("NP_G_2", "NP (G) 2", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("NP_G_3", "NP (G) 3", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("NP_G_4", "NP (G) 4", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("NP_G_5", "NP (G) 5", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("NP_G", "NP (G)", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("NP_T", "NP (T)", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("Negative_", "Negative ", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("Specificity_", "Specificity ", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("Target_Removal_", "Target Removal ", TypeControl$ExtendedType)
return(list(manifestList = list(
TypeI = TypeI,
TypeII = TypeII,
TypeControl = TypeControl,
TypeSnpI = TypeSnpI,
TypeSnpII = TypeSnpII),
manifest = manifest, controls = controls))
}
read.manifest.EPIC <- function(file) {
# NOTE: As is, requires grep
control.line <- system(
sprintf("grep -n \\\\[Controls\\\\] %s", file), intern = TRUE)
control.line <- as.integer(sub(":.*", "", control.line))
stopifnot(length(control.line) == 1 &&
is.integer(control.line) &&
!is.na(control.line))
assay.line <- system(
sprintf("grep -n \\\\[Assay\\\\] %s", file), intern = TRUE)
assay.line <- as.integer(sub(":.*", "", assay.line))
stopifnot(length(assay.line) == 1 &&
is.integer(assay.line) &&
!is.na(assay.line))
colNames <- readLines(file, n = assay.line + 1L)[assay.line + 1L]
colNames <- strsplit(colNames, ",")[[1]]
colClasses <- rep("character", length(colNames))
names(colClasses) <- colNames
names(colClasses) <- make.names(names(colClasses))
colClasses[c("MAPINFO")] <- "integer"
manifest <- read.table(
file = file,
header = FALSE,
col.names = names(colClasses),
sep = ",",
comment.char = "",
quote = "",
skip = assay.line + 1L,
colClasses = colClasses,
nrows = control.line - assay.line - 2L)
manifest$AddressA_ID <- gsub("^0*", "", manifest$AddressA_ID)
manifest$AddressB_ID <- gsub("^0*", "", manifest$AddressB_ID)
TypeI <- manifest[
manifest$Infinium_Design_Type == "I",
c("Name", "AddressA_ID", "AddressB_ID", "Color_Channel", "Next_Base",
"AlleleA_ProbeSeq", "AlleleB_ProbeSeq")]
names(TypeI)[c(2, 3, 4, 5, 6, 7)] <- c(
"AddressA", "AddressB", "Color", "NextBase", "ProbeSeqA", "ProbeSeqB")
TypeI <- as(TypeI, "DataFrame")
TypeI$ProbeSeqA <- DNAStringSet(TypeI$ProbeSeqA)
TypeI$ProbeSeqB <- DNAStringSet(TypeI$ProbeSeqB)
TypeI$NextBase <- DNAStringSet(TypeI$NextBase)
TypeI$nCpG <- as.integer(
oligonucleotideFrequency(TypeI$ProbeSeqB, width = 2)[, "CG"] - 1L)
TypeI$nCpG[TypeI$nCpG < 0] <- 0L
TypeSnpI <- TypeI[grep("^rs", TypeI$Name), ]
TypeI <- TypeI[-grep("^rs", TypeI$Name), ]
TypeII <- manifest[
manifest$Infinium_Design_Type == "II",
c("Name", "AddressA_ID", "AlleleA_ProbeSeq")]
names(TypeII)[c(2,3)] <- c("AddressA", "ProbeSeqA")
TypeII <- as(TypeII, "DataFrame")
TypeII$ProbeSeqA <- DNAStringSet(TypeII$ProbeSeqA)
TypeII$nCpG <- as.integer(letterFrequency(TypeII$ProbeSeqA, letters = "R"))
TypeII$nCpG[TypeII$nCpG < 0] <- 0L
TypeSnpII <- TypeII[grep("^rs", TypeII$Name), ]
TypeII <- TypeII[-grep("^rs", TypeII$Name), ]
controls <- read.table(
file = file,
skip = control.line,
sep = ",",
comment.char = "",
quote = "",
colClasses = c(rep("character", 5)))[, 1:5]
TypeControl <- controls[, 1:4]
names(TypeControl) <- c("Address", "Type", "Color", "ExtendedType")
TypeControl <- as(TypeControl, "DataFrame")
list(
manifestList = list(
TypeI = TypeI,
TypeII = TypeII,
TypeControl = TypeControl,
TypeSnpI = TypeSnpI,
TypeSnpII = TypeSnpII),
manifest = manifest,
controls = controls)
}
read.manifest.450k <- function(file) {
# NOTE: As is, requires grep
control.line <- system(
sprintf("grep -n \\\\[Controls\\\\] %s", file), intern = TRUE)
control.line <- as.integer(sub(":.*", "", control.line))
stopifnot(length(control.line) == 1 &&
is.integer(control.line) &&
!is.na(control.line))
assay.line <- system(
sprintf("grep -n \\\\[Assay\\\\] %s", file), intern = TRUE)
assay.line <- as.integer(sub(":.*", "", assay.line))
stopifnot(length(assay.line) == 1 &&
is.integer(assay.line) &&
!is.na(assay.line))
# NOTE: Column headers is in line 8, hardcoded
colNames <- readLines(file, n = assay.line + 1L)[assay.line + 1L]
colNames <- strsplit(colNames, ",")[[1]]
colClasses <- rep("character", length(colNames))
names(colClasses) <- colNames
colClasses[c("MAPINFO")] <- "integer"
manifest <- read.table(
file = file,
header = TRUE,
sep = ",",
comment.char = "",
quote = "",
skip = 7,
colClasses = colClasses,
nrows = control.line - 9)
TypeI <- manifest[
manifest$Infinium_Design_Type == "I",
c("Name", "AddressA_ID", "AddressB_ID", "Color_Channel", "Next_Base",
"AlleleA_ProbeSeq", "AlleleB_ProbeSeq")]
names(TypeI)[c(2, 3, 4, 5, 6 , 7)] <-
c("AddressA", "AddressB", "Color", "NextBase", "ProbeSeqA", "ProbeSeqB")
TypeI <- as(TypeI, "DataFrame")
TypeI$ProbeSeqA <- DNAStringSet(TypeI$ProbeSeqA)
TypeI$ProbeSeqB <- DNAStringSet(TypeI$ProbeSeqB)
TypeI$NextBase <- DNAStringSet(TypeI$NextBase)
TypeI$nCpG <- as.integer(
oligonucleotideFrequency(TypeI$ProbeSeqB, width = 2)[, "CG"] - 1L)
TypeI$nCpG[TypeI$nCpG < 0] <- 0L
TypeSnpI <- TypeI[grep("^rs", TypeI$Name), ]
TypeI <- TypeI[-grep("^rs", TypeI$Name), ]
TypeII <- manifest[
manifest$Infinium_Design_Type == "II",
c("Name", "AddressA_ID", "AlleleA_ProbeSeq")]
names(TypeII)[c(2, 3)] <- c("AddressA", "ProbeSeqA")
TypeII <- as(TypeII, "DataFrame")
TypeII$ProbeSeqA <- DNAStringSet(TypeII$ProbeSeqA)
TypeII$nCpG <- as.integer(letterFrequency(TypeII$ProbeSeqA, letters = "R"))
TypeII$nCpG[TypeII$nCpG < 0] <- 0L
TypeSnpII <- TypeII[grep("^rs", TypeII$Name), ]
TypeII <- TypeII[-grep("^rs", TypeII$Name), ]
controls <- read.table(
file = file,
skip = control.line,
sep = ",",
comment.char = "",
quote = "",
colClasses = c(rep("character", 5)))[, 1:5]
TypeControl <- controls[, 1:4]
names(TypeControl) <- c("Address", "Type", "Color", "ExtendedType")
TypeControl <- as(TypeControl, "DataFrame")
list(
manifestList = list(
TypeI = TypeI,
TypeII = TypeII,
TypeControl = TypeControl,
TypeSnpI = TypeSnpI,
TypeSnpII = TypeSnpII),
manifest = manifest,
controls = controls)
}
read.manifest.27k <- function(file) {
# NOTE: As is, requires grep
control.line <- system(
sprintf("grep -a -n \\\\[Controls\\\\] %s", file), intern = TRUE)
control.line <- as.integer(sub(":.*", "", control.line))
assay.line <- system(
sprintf("grep -a -n \\\\[Assay\\\\] %s", file), intern = TRUE)
assay.line <- as.integer(sub(":.*", "", assay.line))
# NOTE: Column headers is in line 8, hardcoded
colNames <- tail(readLines(file, n = assay.line + 1), n = 1)
colNames <- strsplit(colNames, ",")[[1]]
colClasses <- rep("character", length(colNames))
names(colClasses) = colNames
colClasses[c("MAPINFO")] <- "integer"
manifest <- read.table(
file = file,
header = TRUE,
sep = ",",
comment.char = "",
quote = "",
skip = assay.line,
colClasses = colClasses,
nrows = control.line - (assay.line + 1),
fill = TRUE)
TypeI <- manifest[
c("Name", "AddressA_ID", "AddressB_ID", "Color_Channel", "Next_Base",
"AlleleA_ProbeSeq", "AlleleB_ProbeSeq")]
TypeI <- TypeI[TypeI$Name != "", ]
names(TypeI)[c(2, 3, 4, 5, 6, 7)] <- c(
"AddressA", "AddressB", "Color", "NextBase", "ProbeSeqA", "ProbeSeqB")
TypeI <- as(TypeI, "DataFrame")
TypeI$ProbeSeqA <- DNAStringSet(TypeI$ProbeSeqA)
TypeI$ProbeSeqB <- DNAStringSet(TypeI$ProbeSeqB)
TypeI$NextBase <- DNAStringSet(TypeI$NextBase)
TypeI$nCpG <- as.integer(
oligonucleotideFrequency(TypeI$ProbeSeqB, width = 2)[, "CG"] - 1L)
TypeI$nCpG[TypeI$nCpG < 0] <- 0L
TypeII <- manifest[
manifest$Infinium_Design_Type == "II",
c("Name", "AddressA_ID", "AlleleA_ProbeSeq")]
names(TypeII)[c(2, 3)] <- c("AddressA", "ProbeSeqA")
TypeII <- as(TypeII, "DataFrame")
TypeII$ProbeSeqA <- BStringSet(TypeII$ProbeSeqA)
TypeII$nCpG <- as.integer(letterFrequency(TypeII$ProbeSeq, letters = "R"))
controls <- read.table(
file = file,
skip = control.line,
sep = ",",
comment.char = "",
quote = "",
colClasses = c(rep("character", 5)))
TypeControl <- controls[, 1:4]
names(TypeControl) <- c("Address", "Type", "Color", "ExtendedType")
TypeControl <- as(TypeControl, "DataFrame")
snps <- TypeControl[TypeControl$Type == "Genotyping",]
TypeControl <- TypeControl[TypeControl$Type != "Genotyping",]
rsname <- sub("_[AB]", "", snps$ExtendedType)
snps.sp <- split(snps, rsname)
snps.sp <- lapply(names(snps.sp), function(rs) {
snp <- snps.sp[[rs]]
DataFrame(
Name = rs,
AddressA = snp[grep("_A", snp$ExtendedType), "Address"],
AddressB = snp[grep("_B", snp$ExtendedType), "Address"],
Color = "Unknown")
})
TypeSnpI <- do.call(rbind, snps.sp)
TypeSnpII <- TypeSnpI[0, ]
list(manifestList =
list(TypeI = TypeI,
TypeII = TypeII,
TypeControl = TypeControl,
TypeSnpI = TypeSnpI,
TypeSnpII = TypeSnpII),
manifest = manifest,
controls = controls)
}
# TODOs ------------------------------------------------------------------------
# TODO: Lots of duplicated code; DRY
|
/R/read.manifest.R
|
no_license
|
mwsill/minfi
|
R
| false
| false
| 22,976
|
r
|
# Internal functions -----------------------------------------------------------
read.manifest.Mammal <- function(file, type = 2) {
# NOTE: As is, requires grep
control.line <- system(
sprintf("grep -n \\\\[Controls\\\\] %s", file), intern = TRUE)
control.line <- as.integer(sub(":.*", "", control.line))
stopifnot(length(control.line) == 1 &&
is.integer(control.line) &&
!is.na(control.line))
if(type == 1) {
assay.line <- system(
sprintf("grep -n \\\\[Assay\\\\] %s", file), intern = TRUE)
assay.line <- as.integer(sub(":.*", "", assay.line))
stopifnot(length(assay.line) == 1 &&
is.integer(assay.line) &&
!is.na(assay.line))
} else {
assay.line <- 0
}
colNames <- readLines(file, n = assay.line + 1L)[assay.line + 1L]
colNames <- strsplit(colNames, ",")[[1]]
colClasses <- rep("character", length(colNames))
names(colClasses) <- colNames
names(colClasses) <- make.names(names(colClasses))
colClasses[c("MAPINFO")] <- "integer"
if(type == 1) {
colClasses <- c(colClasses, drop = "logical")
}
manifest <- read.table(
file = file,
header = FALSE,
col.names = names(colClasses),
sep = ",",
comment.char = "",
quote = "",
skip = assay.line + 1L,
colClasses = colClasses,
nrows = control.line - assay.line - 2L)
manifest$drop <- NULL
if(type == 2) {
names(manifest)[c(1,2,30)] <- c("Name", "Internal_Name", "Internal_Name2")
}
manifest$AddressA_ID <- gsub("^0*", "", manifest$AddressA_ID)
manifest$AddressB_ID <- gsub("^0*", "", manifest$AddressB_ID)
TypeI <- manifest[
manifest$Infinium_Design_Type == "I",
c("Name", "AddressA_ID", "AddressB_ID", "Color_Channel", "Next_Base",
"AlleleA_ProbeSeq", "AlleleB_ProbeSeq")]
names(TypeI)[c(2, 3, 4, 5, 6, 7)] <- c(
"AddressA", "AddressB", "Color", "NextBase", "ProbeSeqA", "ProbeSeqB")
TypeI <- as(TypeI, "DataFrame")
TypeI$ProbeSeqA <- DNAStringSet(TypeI$ProbeSeqA)
TypeI$ProbeSeqB <- DNAStringSet(TypeI$ProbeSeqB)
TypeI$NextBase <- DNAStringSet(TypeI$NextBase)
TypeI$nCpG <- as.integer(
oligonucleotideFrequency(TypeI$ProbeSeqB, width = 2)[, "CG"] - 1L)
TypeI$nCpG[TypeI$nCpG < 0] <- 0L
TypeSnpI <- TypeI[grep("^rs", TypeI$Name), ]
TypeI <- TypeI[-grep("^rs", TypeI$Name), ]
TypeII <- manifest[
manifest$Infinium_Design_Type == "II",
c("Name", "AddressA_ID", "AlleleA_ProbeSeq")]
names(TypeII)[c(2,3)] <- c("AddressA", "ProbeSeqA")
TypeII <- as(TypeII, "DataFrame")
TypeII$ProbeSeqA <- DNAStringSet(TypeII$ProbeSeqA)
TypeII$nCpG <- as.integer(letterFrequency(TypeII$ProbeSeqA, letters = "R"))
TypeII$nCpG[TypeII$nCpG < 0] <- 0L
TypeSnpII <- TypeII[grep("^rs", TypeII$Name), ]
TypeII <- TypeII[-grep("^rs", TypeII$Name), ]
if(type == 1) {
controls <- read.table(
file = file,
skip = control.line,
sep = ",",
comment.char = "",
quote = "",
colClasses = c(rep("character", 5)))[, 1:5]
TypeControl <- controls[, 1:4]
names(TypeControl) <- c("Address", "Type", "Color", "ExtendedType")
TypeControl <- as(TypeControl, "DataFrame")
} else {
controls <- read.table(
file = file,
skip = control.line,
sep = ",",
comment.char = "",
quote = "",
colClasses = c(rep("character", 4)))[, 1:4]
TypeControl <- controls[, 1:4]
names(TypeControl) <- c("Address", "Type", "Color", "ExtendedType")
TypeControl$Type <- toupper(TypeControl$Type)
TypeControl <- as(TypeControl, "DataFrame")
}
list(
manifestList = list(
TypeI = TypeI,
TypeII = TypeII,
TypeControl = TypeControl,
TypeSnpI = TypeSnpI,
TypeSnpII = TypeSnpII),
manifest = manifest,
controls = controls)
}
read.manifest.Allergy <- function(file) {
# NOTE: As is, requires grep
control.line <- system(
sprintf("grep -n \\\\[Controls\\\\] %s", file), intern = TRUE)
control.line <- as.integer(sub(":.*", "", control.line))
stopifnot(length(control.line) == 1 &&
is.integer(control.line) &&
!is.na(control.line))
assay.line <- system(
sprintf("grep -n \\\\[Assay\\\\] %s", file), intern = TRUE)
assay.line <- as.integer(sub(":.*", "", assay.line))
stopifnot(length(assay.line) == 1 &&
is.integer(assay.line) &&
!is.na(assay.line))
colNames <- readLines(file, n = assay.line + 1L)[assay.line + 1L]
colNames <- strsplit(colNames, ",")[[1]]
colClasses <- rep("character", length(colNames))
names(colClasses) <- colNames
names(colClasses) <- make.names(names(colClasses))
colClasses[c("MAPINFO")] <- "integer"
manifest <- read.table(
file = file,
header = FALSE,
col.names = names(colClasses),
sep = ",",
comment.char = "",
quote = "",
skip = assay.line + 1L,
colClasses = colClasses,
nrows = control.line - assay.line - 2L)
manifest$AddressA_ID <- gsub("^0*", "", manifest$AddressA_ID)
manifest$AddressB_ID <- gsub("^0*", "", manifest$AddressB_ID)
TypeI <- manifest[
manifest$Infinium_Design_Type == "I",
c("IlmnID", "AddressA_ID", "AddressB_ID", "Color_Channel", "Next_Base",
"AlleleA_ProbeSeq", "AlleleB_ProbeSeq")]
names(TypeI)[c(1, 2, 3, 4, 5, 6, 7)] <- c("Name",
"AddressA", "AddressB", "Color", "NextBase", "ProbeSeqA", "ProbeSeqB")
TypeI <- as(TypeI, "DataFrame")
TypeI$ProbeSeqA <- DNAStringSet(TypeI$ProbeSeqA)
TypeI$ProbeSeqB <- DNAStringSet(TypeI$ProbeSeqB)
TypeI$NextBase <- DNAStringSet(TypeI$NextBase)
TypeI$nCpG <- as.integer(
oligonucleotideFrequency(TypeI$ProbeSeqB, width = 2)[, "CG"] - 1L)
TypeI$nCpG[TypeI$nCpG < 0] <- 0L
TypeSnpI <- TypeI[grep("^rs", TypeI$Name), ]
TypeI <- TypeI[grep("^rs", TypeI$Name, invert=TRUE), ]
TypeII <- manifest[
manifest$Infinium_Design_Type == "II",
c("IlmnID", "AddressA_ID", "AlleleA_ProbeSeq")]
names(TypeII)[c(1,2,3)] <- c("Name", "AddressA", "ProbeSeqA")
TypeII <- as(TypeII, "DataFrame")
TypeII$ProbeSeqA <- DNAStringSet(TypeII$ProbeSeqA)
TypeII$nCpG <- as.integer(letterFrequency(TypeII$ProbeSeqA, letters = "R"))
TypeII$nCpG[TypeII$nCpG < 0] <- 0L
TypeSnpII <- TypeII[grep("^rs", TypeII$Name), ]
TypeII <- TypeII[grep("^rs", TypeII$Name, invert=TRUE), ]
controls <- read.table(
file = file,
skip = control.line,
sep = ",",
comment.char = "",
quote = "",
colClasses = c(rep("character", 5)))[, 1:5]
TypeControl <- controls[, 1:4]
names(TypeControl) <- c("Address", "Type", "Color", "ExtendedType")
TypeControl <- as(TypeControl, "DataFrame")
list(
manifestList = list(
TypeI = TypeI,
TypeII = TypeII,
TypeControl = TypeControl,
TypeSnpI = TypeSnpI,
TypeSnpII = TypeSnpII),
manifest = manifest,
controls = controls)
}
read.manifest.sesame.Allergy <- function(file) {
ID_column <- "IlmnID"
colNames <- readLines(file, n = 1)
colNames <- strsplit(colNames, ",")[[1]]
colClasses <- rep("character", length(colNames))
names(colClasses) <- colNames
names(colClasses) <- make.names(names(colClasses))
colClasses[c("MAPINFO")] <- "integer"
manifest <- read.table(
file = file,
header = TRUE,
sep = ",",
comment.char = "",
colClasses = colClasses,
quote = "")
names(manifest)[names(manifest) == "Probe_ID"] <- "IlmnID"
names(manifest)[names(manifest) == "U"] <- "AddressA_ID"
names(manifest)[names(manifest) == "M"] <- "AddressB_ID"
manifest$Infinium_Design_Type <- sub("1", "I", manifest$Infinium_Design_Type)
manifest$Infinium_Design_Type <- sub("2", "II", manifest$Infinium_Design_Type)
manifest$Color_Channel <- sub("Both", "", manifest$Color_Channel)
controls.idx <- grep("^ctl_", manifest$IlmnID)
nocontrols.idx <- grep("^ctl_", manifest$IlmnID, invert = TRUE)
controls <- manifest[controls.idx,]
manifest <- manifest[nocontrols.idx,]
dupNames <- unique(manifest$Name[duplicated(manifest$Name)])
manifest <- manifest[! manifest$Name %in% dupNames,]
TypeI <- manifest[
manifest$Infinium_Design_Type == "I",
c(ID_column, "AddressA_ID", "AddressB_ID", "Color_Channel", "Next_Base",
"AlleleA_ProbeSeq", "AlleleB_ProbeSeq")]
names(TypeI) <- c("Name", "AddressA", "AddressB", "Color", "NextBase", "ProbeSeqA", "ProbeSeqB")
TypeI$NextBase[is.na(TypeI$NextBase)] <- ""
TypeI$Color[is.na(TypeI$Color)] <- ""
TypeI <- as(TypeI, "DataFrame")
TypeI$ProbeSeqA <- DNAStringSet(TypeI$ProbeSeqA)
TypeI$ProbeSeqB <- DNAStringSet(TypeI$ProbeSeqB)
TypeI$NextBase <- DNAStringSet(TypeI$NextBase)
TypeI$nCpG <- as.integer(
oligonucleotideFrequency(TypeI$ProbeSeqB, width = 2)[, "CG"] - 1L)
TypeI$nCpG[TypeI$nCpG < 0] <- 0L
TypeSnpI <- TypeI[grep("^rs", TypeI$Name), ]
TypeI <- TypeI[grep("^rs", TypeI$Name, invert = TRUE), ]
TypeII <- manifest[
manifest$Infinium_Design_Type == "II",
c(ID_column, "AddressA_ID", "AlleleA_ProbeSeq")]
names(TypeII) <- c("Name", "AddressA", "ProbeSeqA")
TypeII <- as(TypeII, "DataFrame")
TypeII$ProbeSeqA <- DNAStringSet(TypeII$ProbeSeqA)
TypeII$nCpG <- as.integer(letterFrequency(TypeII$ProbeSeqA, letters = "R"))
TypeII$nCpG[TypeII$nCpG < 0] <- 0L
TypeSnpII <- TypeII[grep("^rs", TypeII$Name), ]
TypeII <- TypeII[grep("^rs", TypeII$Name, invert=TRUE), ]
TypeControl <- as(controls[, c("AddressA_ID", "Probe_Type", "IlmnID")], "DataFrame")
names(TypeControl) <- c("Address", "Type", "ExtendedType")
TypeControl$Type <- sub("BISULFITE_CONVERSION_", "BISULFITE CONVERSION ", TypeControl$Type)
TypeControl$Type <- sub("SPECIFICITY_", "SPECIFICITY ", TypeControl$Type)
TypeControl$Type <- sub("TARGET_REMOVAL", "TARGET REMOVAL", TypeControl$Type)
TypeControl$ExtendedType <- sub("^ctl_", "", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("Biotin_5K", "Biotin(5K)", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("Biotin_Bkg", "Biotin (Bkg)", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("Biotin_High", "Biotin (High)", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("BS_Conversion_", "BS Conversion ", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("I_", "I-", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("DNP_20K", "DNP(20K)", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("DNP_Bkg", "DNP (Bkg)", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("DNP_High", "DNP (High)", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("Extension_A", "Extension (A)", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("Extension_C", "Extension (C)", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("Extension_G", "Extension (G)", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("Extension_T", "Extension (T)", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("GT_Mismatch_", "GT Mismatch ", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("_MM", " (MM)", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("_PM", " (PM)", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("Hyb_High", "Hyb (High)", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("Hyb_Low", "Hyb (Low)", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("Hyb_Medium", "Hyb (Medium)", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("NP_A", "NP (A)", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("NP_C", "NP (C)", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("NP_G_1", "NP (G) 1", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("NP_G_2", "NP (G) 2", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("NP_G_3", "NP (G) 3", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("NP_G_4", "NP (G) 4", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("NP_G_5", "NP (G) 5", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("NP_G", "NP (G)", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("NP_T", "NP (T)", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("Negative_", "Negative ", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("Specificity_", "Specificity ", TypeControl$ExtendedType)
TypeControl$ExtendedType <- sub("Target_Removal_", "Target Removal ", TypeControl$ExtendedType)
return(list(manifestList = list(
TypeI = TypeI,
TypeII = TypeII,
TypeControl = TypeControl,
TypeSnpI = TypeSnpI,
TypeSnpII = TypeSnpII),
manifest = manifest, controls = controls))
}
read.manifest.EPIC <- function(file) {
# NOTE: As is, requires grep
control.line <- system(
sprintf("grep -n \\\\[Controls\\\\] %s", file), intern = TRUE)
control.line <- as.integer(sub(":.*", "", control.line))
stopifnot(length(control.line) == 1 &&
is.integer(control.line) &&
!is.na(control.line))
assay.line <- system(
sprintf("grep -n \\\\[Assay\\\\] %s", file), intern = TRUE)
assay.line <- as.integer(sub(":.*", "", assay.line))
stopifnot(length(assay.line) == 1 &&
is.integer(assay.line) &&
!is.na(assay.line))
colNames <- readLines(file, n = assay.line + 1L)[assay.line + 1L]
colNames <- strsplit(colNames, ",")[[1]]
colClasses <- rep("character", length(colNames))
names(colClasses) <- colNames
names(colClasses) <- make.names(names(colClasses))
colClasses[c("MAPINFO")] <- "integer"
manifest <- read.table(
file = file,
header = FALSE,
col.names = names(colClasses),
sep = ",",
comment.char = "",
quote = "",
skip = assay.line + 1L,
colClasses = colClasses,
nrows = control.line - assay.line - 2L)
manifest$AddressA_ID <- gsub("^0*", "", manifest$AddressA_ID)
manifest$AddressB_ID <- gsub("^0*", "", manifest$AddressB_ID)
TypeI <- manifest[
manifest$Infinium_Design_Type == "I",
c("Name", "AddressA_ID", "AddressB_ID", "Color_Channel", "Next_Base",
"AlleleA_ProbeSeq", "AlleleB_ProbeSeq")]
names(TypeI)[c(2, 3, 4, 5, 6, 7)] <- c(
"AddressA", "AddressB", "Color", "NextBase", "ProbeSeqA", "ProbeSeqB")
TypeI <- as(TypeI, "DataFrame")
TypeI$ProbeSeqA <- DNAStringSet(TypeI$ProbeSeqA)
TypeI$ProbeSeqB <- DNAStringSet(TypeI$ProbeSeqB)
TypeI$NextBase <- DNAStringSet(TypeI$NextBase)
TypeI$nCpG <- as.integer(
oligonucleotideFrequency(TypeI$ProbeSeqB, width = 2)[, "CG"] - 1L)
TypeI$nCpG[TypeI$nCpG < 0] <- 0L
TypeSnpI <- TypeI[grep("^rs", TypeI$Name), ]
TypeI <- TypeI[-grep("^rs", TypeI$Name), ]
TypeII <- manifest[
manifest$Infinium_Design_Type == "II",
c("Name", "AddressA_ID", "AlleleA_ProbeSeq")]
names(TypeII)[c(2,3)] <- c("AddressA", "ProbeSeqA")
TypeII <- as(TypeII, "DataFrame")
TypeII$ProbeSeqA <- DNAStringSet(TypeII$ProbeSeqA)
TypeII$nCpG <- as.integer(letterFrequency(TypeII$ProbeSeqA, letters = "R"))
TypeII$nCpG[TypeII$nCpG < 0] <- 0L
TypeSnpII <- TypeII[grep("^rs", TypeII$Name), ]
TypeII <- TypeII[-grep("^rs", TypeII$Name), ]
controls <- read.table(
file = file,
skip = control.line,
sep = ",",
comment.char = "",
quote = "",
colClasses = c(rep("character", 5)))[, 1:5]
TypeControl <- controls[, 1:4]
names(TypeControl) <- c("Address", "Type", "Color", "ExtendedType")
TypeControl <- as(TypeControl, "DataFrame")
list(
manifestList = list(
TypeI = TypeI,
TypeII = TypeII,
TypeControl = TypeControl,
TypeSnpI = TypeSnpI,
TypeSnpII = TypeSnpII),
manifest = manifest,
controls = controls)
}
read.manifest.450k <- function(file) {
# NOTE: As is, requires grep
control.line <- system(
sprintf("grep -n \\\\[Controls\\\\] %s", file), intern = TRUE)
control.line <- as.integer(sub(":.*", "", control.line))
stopifnot(length(control.line) == 1 &&
is.integer(control.line) &&
!is.na(control.line))
assay.line <- system(
sprintf("grep -n \\\\[Assay\\\\] %s", file), intern = TRUE)
assay.line <- as.integer(sub(":.*", "", assay.line))
stopifnot(length(assay.line) == 1 &&
is.integer(assay.line) &&
!is.na(assay.line))
# NOTE: Column headers is in line 8, hardcoded
colNames <- readLines(file, n = assay.line + 1L)[assay.line + 1L]
colNames <- strsplit(colNames, ",")[[1]]
colClasses <- rep("character", length(colNames))
names(colClasses) <- colNames
colClasses[c("MAPINFO")] <- "integer"
manifest <- read.table(
file = file,
header = TRUE,
sep = ",",
comment.char = "",
quote = "",
skip = 7,
colClasses = colClasses,
nrows = control.line - 9)
TypeI <- manifest[
manifest$Infinium_Design_Type == "I",
c("Name", "AddressA_ID", "AddressB_ID", "Color_Channel", "Next_Base",
"AlleleA_ProbeSeq", "AlleleB_ProbeSeq")]
names(TypeI)[c(2, 3, 4, 5, 6 , 7)] <-
c("AddressA", "AddressB", "Color", "NextBase", "ProbeSeqA", "ProbeSeqB")
TypeI <- as(TypeI, "DataFrame")
TypeI$ProbeSeqA <- DNAStringSet(TypeI$ProbeSeqA)
TypeI$ProbeSeqB <- DNAStringSet(TypeI$ProbeSeqB)
TypeI$NextBase <- DNAStringSet(TypeI$NextBase)
TypeI$nCpG <- as.integer(
oligonucleotideFrequency(TypeI$ProbeSeqB, width = 2)[, "CG"] - 1L)
TypeI$nCpG[TypeI$nCpG < 0] <- 0L
TypeSnpI <- TypeI[grep("^rs", TypeI$Name), ]
TypeI <- TypeI[-grep("^rs", TypeI$Name), ]
TypeII <- manifest[
manifest$Infinium_Design_Type == "II",
c("Name", "AddressA_ID", "AlleleA_ProbeSeq")]
names(TypeII)[c(2, 3)] <- c("AddressA", "ProbeSeqA")
TypeII <- as(TypeII, "DataFrame")
TypeII$ProbeSeqA <- DNAStringSet(TypeII$ProbeSeqA)
TypeII$nCpG <- as.integer(letterFrequency(TypeII$ProbeSeqA, letters = "R"))
TypeII$nCpG[TypeII$nCpG < 0] <- 0L
TypeSnpII <- TypeII[grep("^rs", TypeII$Name), ]
TypeII <- TypeII[-grep("^rs", TypeII$Name), ]
controls <- read.table(
file = file,
skip = control.line,
sep = ",",
comment.char = "",
quote = "",
colClasses = c(rep("character", 5)))[, 1:5]
TypeControl <- controls[, 1:4]
names(TypeControl) <- c("Address", "Type", "Color", "ExtendedType")
TypeControl <- as(TypeControl, "DataFrame")
list(
manifestList = list(
TypeI = TypeI,
TypeII = TypeII,
TypeControl = TypeControl,
TypeSnpI = TypeSnpI,
TypeSnpII = TypeSnpII),
manifest = manifest,
controls = controls)
}
read.manifest.27k <- function(file) {
# NOTE: As is, requires grep
control.line <- system(
sprintf("grep -a -n \\\\[Controls\\\\] %s", file), intern = TRUE)
control.line <- as.integer(sub(":.*", "", control.line))
assay.line <- system(
sprintf("grep -a -n \\\\[Assay\\\\] %s", file), intern = TRUE)
assay.line <- as.integer(sub(":.*", "", assay.line))
# NOTE: Column headers is in line 8, hardcoded
colNames <- tail(readLines(file, n = assay.line + 1), n = 1)
colNames <- strsplit(colNames, ",")[[1]]
colClasses <- rep("character", length(colNames))
names(colClasses) = colNames
colClasses[c("MAPINFO")] <- "integer"
manifest <- read.table(
file = file,
header = TRUE,
sep = ",",
comment.char = "",
quote = "",
skip = assay.line,
colClasses = colClasses,
nrows = control.line - (assay.line + 1),
fill = TRUE)
TypeI <- manifest[
c("Name", "AddressA_ID", "AddressB_ID", "Color_Channel", "Next_Base",
"AlleleA_ProbeSeq", "AlleleB_ProbeSeq")]
TypeI <- TypeI[TypeI$Name != "", ]
names(TypeI)[c(2, 3, 4, 5, 6, 7)] <- c(
"AddressA", "AddressB", "Color", "NextBase", "ProbeSeqA", "ProbeSeqB")
TypeI <- as(TypeI, "DataFrame")
TypeI$ProbeSeqA <- DNAStringSet(TypeI$ProbeSeqA)
TypeI$ProbeSeqB <- DNAStringSet(TypeI$ProbeSeqB)
TypeI$NextBase <- DNAStringSet(TypeI$NextBase)
TypeI$nCpG <- as.integer(
oligonucleotideFrequency(TypeI$ProbeSeqB, width = 2)[, "CG"] - 1L)
TypeI$nCpG[TypeI$nCpG < 0] <- 0L
TypeII <- manifest[
manifest$Infinium_Design_Type == "II",
c("Name", "AddressA_ID", "AlleleA_ProbeSeq")]
names(TypeII)[c(2, 3)] <- c("AddressA", "ProbeSeqA")
TypeII <- as(TypeII, "DataFrame")
TypeII$ProbeSeqA <- BStringSet(TypeII$ProbeSeqA)
TypeII$nCpG <- as.integer(letterFrequency(TypeII$ProbeSeq, letters = "R"))
controls <- read.table(
file = file,
skip = control.line,
sep = ",",
comment.char = "",
quote = "",
colClasses = c(rep("character", 5)))
TypeControl <- controls[, 1:4]
names(TypeControl) <- c("Address", "Type", "Color", "ExtendedType")
TypeControl <- as(TypeControl, "DataFrame")
snps <- TypeControl[TypeControl$Type == "Genotyping",]
TypeControl <- TypeControl[TypeControl$Type != "Genotyping",]
rsname <- sub("_[AB]", "", snps$ExtendedType)
snps.sp <- split(snps, rsname)
snps.sp <- lapply(names(snps.sp), function(rs) {
snp <- snps.sp[[rs]]
DataFrame(
Name = rs,
AddressA = snp[grep("_A", snp$ExtendedType), "Address"],
AddressB = snp[grep("_B", snp$ExtendedType), "Address"],
Color = "Unknown")
})
TypeSnpI <- do.call(rbind, snps.sp)
TypeSnpII <- TypeSnpI[0, ]
list(manifestList =
list(TypeI = TypeI,
TypeII = TypeII,
TypeControl = TypeControl,
TypeSnpI = TypeSnpI,
TypeSnpII = TypeSnpII),
manifest = manifest,
controls = controls)
}
# TODOs ------------------------------------------------------------------------
# TODO: Lots of duplicated code; DRY
|
library(tidyverse)
library(sf)
library(readxl)
library(ggpubr)
library(ggfortify)
library(here)
library(knitr)
library(kableExtra)
library(DT)
library(mapview)
library(rgdal)
library(SerbianCyrLat)
library(stringr)
library(classInt)
library(viridis)
library(gridExtra)
library(ggsflabel)
library(ggspatial)
my_theme <- function(base_size = 10, base_family = "sans"){
theme_minimal(base_size = base_size, base_family = base_family) +
theme(
axis.text = element_text(size = 10),
axis.text.x = element_text(angle = 0, vjust = 0.5, hjust = 0.5),
axis.title = element_text(size = 12),
panel.grid.major = element_line(color = "grey"),
panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "#fffcfc"),
strip.background = element_rect(fill = "#820000", color = "#820000", size =0.5),
strip.text = element_text(face = "bold", size = 10, color = "white"),
legend.position = "bottom",
legend.justification = "center",
legend.background = element_blank(),
panel.border = element_rect(color = "grey30", fill = NA, size = 0.5)
)
}
theme_set(my_theme())
mycolors=c("#f32440","#2185ef","#d421ef")
# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
# Maps by category
# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
my_theme <- function(base_size = 10, base_family = "sans"){
theme_minimal(base_size = base_size, base_family = base_family) +
theme(
axis.text = element_text(size = 10),
axis.text.x = element_text(angle = 0, vjust = 0.5, hjust = 0.5),
axis.title = element_text(size = 12),
panel.grid.major = element_line(color = "grey"),
panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "#fffcfc"),
strip.background = element_rect(fill = "#820000", color = "#820000", size =0.5),
strip.text = element_text(face = "bold", size = 10, color = "white"),
legend.position = "bottom",
legend.justification = "center",
legend.background = element_blank(),
panel.border = element_rect(color = "grey30", fill = NA, size = 0.5)
)
}
theme_set(my_theme())
mycolors=c("#f32440","#2185ef","#d421ef")
# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
sf_data_energy <- st_read(dsn = "Products/Sum-up_By_category/1A1 - Energy.gpkg")
classes.NOx <- classIntervals(sf_data_energy$NOx, n = 12, style = "fisher")
classes.SO2 <- classIntervals(sf_data_energy$SO2, n = 12, style = "fisher")
classes.PM10 <- classIntervals(sf_data_energy$PM10, n = 12, style = "fisher")
classes.PM2.5 <- classIntervals(sf_data_energy$PM2.5, n = 12, style = "fisher")
classes.NMVOC <- classIntervals(sf_data_energy$NMVOC, n = 12, style = "fisher")
classes.NH3 <- classIntervals(sf_data_energy$NH3, n = 12, style = "fisher")
sf_data_energy <- sf_data_energy %>%
mutate(percent_class_NOx = cut(NOx, classes.NOx$brks, include.lowest = T,dig.lab=7),
percent_class_SO2 = cut(SO2, classes.SO2$brks, include.lowest = T,dig.lab=7),
percent_class_PM10 = cut(PM10, classes.PM10$brks, include.lowest = T,dig.lab=7),
percent_class_PM2.5 = cut(PM2.5, classes.PM2.5$brks, include.lowest = T,dig.lab=7),
percent_class_NMVOC = cut(NMVOC, classes.NMVOC$brks, include.lowest = T,dig.lab=7),
percent_class_NH3 = cut(NH3, classes.NH3$brks, include.lowest = T,dig.lab=7)
)
pal1 <- viridisLite::inferno(12, direction = -1)
pal2 <- viridisLite::inferno(3, direction = -1)
pal3 <- viridisLite::viridis(12, direction = -1)
pal4 <- viridisLite::viridis(12, direction = -1)
pal5 <- viridisLite::viridis(12, direction = -1)
pal6 <- viridisLite::viridis(12, direction = -1)
opstine <- readOGR("Data/opstine/gadm36_SRB_2.shp",
use_iconv=TRUE,
encoding = "UTF-8")
sf_opstine <- st_as_sf(opstine)
#+ include = FALSE
a1<-ggplot() +
geom_sf(data = sf_data_energy,
aes(fill = percent_class_NOx)) +
scale_fill_manual(values = pal1,
name = "NOx [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - NOx",
subtitle = "GNFR sector: 1A1 - Energy",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
axis.title = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
a1
b1<-ggplot() +
geom_sf(data = sf_data_energy,
aes(fill = percent_class_SO2)) +
scale_fill_manual(values = pal1,
name = "SO2 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - SO2",
subtitle = "GNFR sector: 1A1 - Energy",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
axis.title = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
b1
c1<-ggplot() +
geom_sf(data = sf_data_energy,
aes(fill = percent_class_PM10)) +
scale_fill_manual(values = pal1,
name = "PM10 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - PM10",
subtitle = "GNFR sector: 1A1 - Energy",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
c1
d1<-ggplot() +
geom_sf(data = sf_data_energy,
aes(fill = percent_class_PM2.5)) +
scale_fill_manual(values = pal1,
name = "PM2.5 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - PM2.5",
subtitle = "GNFR sector: 1A1 - Energy",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
d1
e1<-ggplot() +
geom_sf(data = sf_data_energy,
aes(fill = percent_class_NMVOC)) +
scale_fill_manual(values = pal1,
name = "NMVOC [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - NMVOC",
subtitle = "GNFR sector: 1A1 - Energy",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
e1
f1<-ggplot() +
geom_sf(data = sf_data_energy,
aes(fill = percent_class_NH3)) +
scale_fill_manual(values = pal2,
name = "NH3 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - NH3",
subtitle = "GNFR sector: 1A1 - Energy",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
f1
energy_map <- grid.arrange(a1, b1, c1, d1, e1, f1, ncol = 2, nrow = 3)
ggsave(plot = e1,
filename = "Maps/FINAL2015/Maps_per_each_GNFR_sector/Map_1A1 - Energy_NMVOC.jpg",
width = 30,
height = 30,
units = "cm",
device = "jpeg",
dpi = 600)
# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
sf_data_industry <- st_read(dsn = "Products/Sum-up_By_category/1A2 - Industry.gpkg")
classes.NOx <- classIntervals(sf_data_industry$NOx, n = 12, style = "fisher")
classes.SO2 <- classIntervals(sf_data_industry$SO2, n = 12, style = "fisher")
classes.PM10 <- classIntervals(sf_data_industry$PM10, n = 12, style = "fisher")
classes.PM2.5 <- classIntervals(sf_data_industry$PM2.5, n = 12, style = "fisher")
classes.NMVOC <- classIntervals(sf_data_industry$NMVOC, n = 12, style = "fisher")
classes.NH3 <- classIntervals(sf_data_industry$NH3, n = 12, style = "fisher")
sf_data_industry <- sf_data_industry %>%
mutate(percent_class_NOx = cut(NOx, classes.NOx$brks, include.lowest = T,dig.lab=7),
percent_class_SO2 = cut(SO2, classes.SO2$brks, include.lowest = T,dig.lab=7),
percent_class_PM10 = cut(PM10, classes.PM10$brks, include.lowest = T,dig.lab=7),
percent_class_PM2.5 = cut(PM2.5, classes.PM2.5$brks, include.lowest = T,dig.lab=7),
percent_class_NMVOC = cut(NMVOC, classes.NMVOC$brks, include.lowest = T,dig.lab=7),
percent_class_NH3 = cut(NH3, classes.NH3$brks, include.lowest = T,dig.lab=7)
)
pal1 <- viridisLite::inferno(12, direction = -1)
pal2 <- viridisLite::viridis(30)
pal3 <- viridisLite::viridis(30)
pal4 <- viridisLite::viridis(30)
pal5 <- viridisLite::viridis(30)
pal6 <- viridisLite::viridis(30)
#+ include = FALSE
a2<-ggplot() +
geom_sf(data = sf_data_industry,
aes(fill = percent_class_NOx)) +
scale_fill_manual(values = pal1,
name = "NOx [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - NOx",
subtitle = "GNFR sector: 1A2 - Industry",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
a2
b2<-ggplot() +
geom_sf(data = sf_data_industry,
aes(fill = percent_class_SO2)) +
scale_fill_manual(values = pal1,
name = "SO2 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - SO2",
subtitle = "GNFR sector: 1A2 - Industry",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
b2
c2<-ggplot() +
geom_sf(data = sf_data_industry,
aes(fill = percent_class_PM10)) +
scale_fill_manual(values = pal1,
name = "PM10 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - PM10",
subtitle = "GNFR sector: 1A2 - Industry",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
c2
d2<-ggplot() +
geom_sf(data = sf_data_industry,
aes(fill = percent_class_PM2.5)) +
scale_fill_manual(values = pal1,
name = "PM2.5 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - PM2.5",
subtitle = "GNFR sector: 1A2 - Industry",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
d2
e2<-ggplot() +
geom_sf(data = sf_data_industry,
aes(fill = percent_class_NMVOC)) +
scale_fill_manual(values = pal1,
name = "NMVOC [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - NMVOC",
subtitle = "GNFR sector: 1A2 - Industry",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
e2
f2<-ggplot() +
geom_sf(data = sf_data_industry,
aes(fill = percent_class_NH3)) +
scale_fill_manual(values = pal1,
name = "NH3 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - NH3",
subtitle = "GNFR sector: 1A2 - Industry",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
f2
industry_map <- grid.arrange(a, b, c, d, e, f, ncol = 2, nrow = 3)
ggsave(plot = f2,
filename = "Maps/FINAL2015/Maps_per_each_GNFR_sector/Map_1A2 - Industry_NH3.jpg",
width = 30,
height = 30,
units = "cm",
device = "jpeg",
dpi = 600)
# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
sf_data_transport <- st_read(dsn = "Products/Sum-up_By_category/1A3 - Transport.gpkg")
classes.NOx <- classIntervals(sf_data_transport$NOx, n = 12, style = "fisher")
classes.SO2 <- classIntervals(sf_data_transport$SO2, n = 12, style = "fisher")
classes.PM10 <- classIntervals(sf_data_transport$PM10, n = 12, style = "fisher")
classes.PM2.5 <- classIntervals(sf_data_transport$PM2.5, n = 12, style = "fisher")
classes.NMVOC <- classIntervals(sf_data_transport$NMVOC, n = 12, style = "fisher")
classes.NH3 <- classIntervals(sf_data_transport$NH3, n = 12, style = "fisher")
sf_data_transport <- sf_data_transport %>%
mutate(percent_class_NOx = cut(NOx, classes.NOx$brks, include.lowest = T),
percent_class_SO2 = cut(SO2, classes.SO2$brks, include.lowest = T),
percent_class_PM10 = cut(PM10, classes.PM10$brks, include.lowest = T),
percent_class_PM2.5 = cut(PM2.5, classes.PM2.5$brks, include.lowest = T),
percent_class_NMVOC = cut(NMVOC, classes.NMVOC$brks, include.lowest = T),
percent_class_NH3 = cut(NH3, classes.NH3$brks, include.lowest = T)
)
pal1 <- viridisLite::inferno(12, direction = -1)
pal2 <- viridisLite::viridis(12)
pal3 <- viridisLite::viridis(12)
pal4 <- viridisLite::viridis(12)
pal5 <- viridisLite::viridis(12)
pal6 <- viridisLite::viridis(12)
#+ include = FALSE
a3<-ggplot() +
geom_sf(data = sf_data_transport,
aes(fill = percent_class_NOx)) +
scale_fill_manual(values = pal1,
name = "NOx [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - NOx",
subtitle = "GNFR sector: 1A3 - Transport",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
b3<-ggplot() +
geom_sf(data = sf_data_transport,
aes(fill = percent_class_SO2)) +
scale_fill_manual(values = pal1,
name = "SO2 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - SO2",
subtitle = "GNFR sector: 1A3 - Transport",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
c3<-ggplot() +
geom_sf(data = sf_data_transport,
aes(fill = percent_class_PM10)) +
scale_fill_manual(values = pal1,
name = "PM10 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - PM10",
subtitle = "GNFR sector: 1A3 - Transport",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
d3<-ggplot() +
geom_sf(data = sf_data_transport,
aes(fill = percent_class_PM2.5)) +
scale_fill_manual(values = pal1,
name = "PM2.5 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - PM2.5",
subtitle = "GNFR sector: 1A3 - Transport",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
e3<-ggplot() +
geom_sf(data = sf_data_transport,
aes(fill = percent_class_NMVOC)) +
scale_fill_manual(values = pal1,
name = "NMVOC [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - NMVOC",
subtitle = "GNFR sector: 1A3 - Transport",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
f3<-ggplot() +
geom_sf(data = sf_data_transport,
aes(fill = percent_class_NH3)) +
scale_fill_manual(values = pal1,
name = "NH3 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - NH3",
subtitle = "GNFR sector: 1A3 - Transport",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
transport_map <- grid.arrange(a, b, c, d, e, f, ncol = 2, nrow = 3)
ggsave(plot = f3,
filename = "Maps/FINAL2015/Maps_per_each_GNFR_sector/Map_1A3 - Transport_NH3.jpg",
width = 30,
height = 30,
units = "cm",
device = "jpeg",
dpi = 600)
# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
sf_data_residential <- st_read(dsn = "Products/Sum-up_By_category/1A4 - Residential-Tertiary.gpkg")
classes.NOx <- classIntervals(sf_data_residential$NOx, n = 12, style = "fisher")
classes.SO2 <- classIntervals(sf_data_residential$SO2, n = 12, style = "fisher")
classes.PM10 <- classIntervals(sf_data_residential$PM10, n = 12, style = "fisher")
classes.PM2.5 <- classIntervals(sf_data_residential$PM2.5, n = 12, style = "fisher")
classes.NMVOC <- classIntervals(sf_data_residential$NMVOC, n = 12, style = "fisher")
classes.NH3 <- classIntervals(sf_data_residential$NH3, n = 12, style = "fisher")
sf_data_residential <- sf_data_residential %>%
mutate(percent_class_NOx = cut(NOx, classes.NOx$brks, include.lowest = T,dig.lab=7),
percent_class_SO2 = cut(SO2, classes.SO2$brks, include.lowest = T,dig.lab=7),
percent_class_PM10 = cut(PM10, classes.PM10$brks, include.lowest = T,dig.lab=7),
percent_class_PM2.5 = cut(PM2.5, classes.PM2.5$brks, include.lowest = T,dig.lab=7),
percent_class_NMVOC = cut(NMVOC, classes.NMVOC$brks, include.lowest = T,dig.lab=7),
percent_class_NH3 = cut(NH3, classes.NH3$brks, include.lowest = T,dig.lab=7)
)
pal1 <- viridisLite::inferno(12, direction = -1)
pal2 <- viridisLite::viridis(12)
pal3 <- viridisLite::viridis(12)
pal4 <- viridisLite::viridis(12)
pal5 <- viridisLite::viridis(12)
pal6 <- viridisLite::viridis(12)
#+ include = FALSE
a4<-ggplot() +
geom_sf(data = sf_data_residential,
aes(fill = percent_class_NOx)) +
scale_fill_manual(values = pal1,
name = "NOx [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - NOx",
subtitle = "GNFR sector: 1A4 - Residential-Tertiary",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
a4
b4<-ggplot() +
geom_sf(data = sf_data_residential,
aes(fill = percent_class_SO2)) +
scale_fill_manual(values = pal1,
name = "SO2 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - SO2",
subtitle = "GNFR sector: 1A4 - Residential-Tertiary",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
b4
c4<-ggplot() +
geom_sf(data = sf_data_residential,
aes(fill = percent_class_PM10)) +
scale_fill_manual(values = pal1,
name = "PM10 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - PM10",
subtitle = "GNFR sector: 1A4 - Residential-Tertiary",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
c4
d4<-ggplot() +
geom_sf(data = sf_data_residential,
aes(fill = percent_class_PM2.5)) +
scale_fill_manual(values = pal1,
name = "PM2.5 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - PM2.5",
subtitle = "GNFR sector: 1A4 - Residential-Tertiary",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
d4
e4<-ggplot() +
geom_sf(data = sf_data_residential,
aes(fill = percent_class_NMVOC)) +
scale_fill_manual(values = pal1,
name = "NMVOC [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - NMVOC",
subtitle = "GNFR sector: 1A4 - Residential-Tertiary",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
e4
f4<-ggplot() +
geom_sf(data = sf_data_residential,
aes(fill = percent_class_NH3)) +
scale_fill_manual(values = pal1,
name = "NH3 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - NH3",
subtitle = "GNFR sector: 1A4 - Residential-Tertiary",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
f4
residential_map <- grid.arrange(a, b, c, d, e, f, ncol = 2, nrow = 3)
ggsave(plot = f4,
filename = "Maps/FINAL2015/Maps_per_each_GNFR_sector/Map_1A4 - Residential-Tertiary_NH3.jpg",
width = 30,
height = 30,
units = "cm",
device = "jpeg",
dpi = 600)
# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
sf_data_fugitive <- st_read(dsn = "Products/Sum-up_By_category/1B - Fugitive emissions.gpkg")
classes.NOx <- classIntervals(sf_data_fugitive$NOx, n = 12, style = "fisher")
classes.SO2 <- classIntervals(sf_data_fugitive$SO2, n = 12, style = "fisher")
classes.PM10 <- classIntervals(sf_data_fugitive$PM10, n = 12, style = "fisher")
classes.PM2.5 <- classIntervals(sf_data_fugitive$PM2.5, n = 12, style = "fisher")
classes.NMVOC <- classIntervals(sf_data_fugitive$NMVOC, n = 12, style = "fisher")
classes.NH3 <- classIntervals(sf_data_fugitive$NH3, n = 12, style = "fisher")
sf_data_fugitive <- sf_data_fugitive %>%
mutate(percent_class_NOx = cut(NOx, classes.NOx$brks, include.lowest = T,dig.lab=7),
percent_class_SO2 = cut(SO2, classes.SO2$brks, include.lowest = T,dig.lab=7),
percent_class_PM10 = cut(PM10, classes.PM10$brks, include.lowest = T,dig.lab=7),
percent_class_PM2.5 = cut(PM2.5, classes.PM2.5$brks, include.lowest = T,dig.lab=7),
percent_class_NMVOC = cut(NMVOC, classes.NMVOC$brks, include.lowest = T,dig.lab=7),
percent_class_NH3 = cut(NH3, classes.NH3$brks, include.lowest = T,dig.lab=7)
)
pal1 <- viridisLite::inferno(12, direction = -1)
pal2 <- viridisLite::inferno(2, direction = -1)
pal3 <- viridisLite::viridis(12)
pal4 <- viridisLite::viridis(12)
pal5 <- viridisLite::viridis(12)
pal6 <- viridisLite::viridis(12)
#+ include = FALSE
a5<-ggplot() +
geom_sf(data = sf_data_fugitive,
aes(fill = percent_class_NOx)) +
scale_fill_manual(values = pal1,
name = "NOx [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - NOx",
subtitle = "GNFR sector: 1B - Fugitive emissions",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
a5
b5<-ggplot() +
geom_sf(data = sf_data_fugitive,
aes(fill = percent_class_SO2)) +
scale_fill_manual(values = pal1,
name = "SO2 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - SO2",
subtitle = "GNFR sector: 1B - Fugitive emissions",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
b5
c5<-ggplot() +
geom_sf(data = sf_data_fugitive,
aes(fill = percent_class_PM10)) +
scale_fill_manual(values = pal1,
name = "PM10 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - PM10",
subtitle = "GNFR sector: 1B - Fugitive emissions",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
c5
d5<-ggplot() +
geom_sf(data = sf_data_fugitive,
aes(fill = percent_class_PM2.5)) +
scale_fill_manual(values = pal1,
name = "PM2.5 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - PM2.5",
subtitle = "GNFR sector: 1B - Fugitive emissions",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
d5
e5<-ggplot() +
geom_sf(data = sf_data_fugitive,
aes(fill = percent_class_NMVOC)) +
scale_fill_manual(values = pal1,
name = "NMVOC [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - NMVOC",
subtitle = "GNFR sector: 1B - Fugitive emissions",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
e5
f5<-ggplot() +
geom_sf(data = sf_data_fugitive,
aes(fill = percent_class_NH3)) +
scale_fill_manual(values = pal1,
name = "NH3 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - NH3",
subtitle = "GNFR sector: 1B - Fugitive emissions",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
f5
fugitive_map <- grid.arrange(a, b, c, d, e, f, ncol = 2, nrow = 3)
ggsave(plot = f5,
filename = "Maps/FINAL2015/Maps_per_each_GNFR_sector/Map_1B - Fugitive emissions_NH3.jpg",
width = 30,
height = 30,
units = "cm",
device = "jpeg",
dpi = 600)
# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
sf_data_other <- st_read(dsn = "Products/Sum-up_By_category/2 - Other processes.gpkg")
classes.NOx <- classIntervals(sf_data_other$NOx, n = 12, style = "fisher")
classes.SO2 <- classIntervals(sf_data_other$SO2, n = 12, style = "fisher")
classes.PM10 <- classIntervals(sf_data_other$PM10, n = 12, style = "fisher")
classes.PM2.5 <- classIntervals(sf_data_other$PM2.5, n = 12, style = "fisher")
classes.NMVOC <- classIntervals(sf_data_other$NMVOC, n = 12, style = "fisher")
classes.NH3 <- classIntervals(sf_data_other$NH3, n = 12, style = "fisher")
sf_data_other <- sf_data_other %>%
mutate(percent_class_NOx = cut(NOx, classes.NOx$brks, include.lowest = T,dig.lab=7),
percent_class_SO2 = cut(SO2, classes.SO2$brks, include.lowest = T,dig.lab=7),
percent_class_PM10 = cut(PM10, classes.PM10$brks, include.lowest = T,dig.lab=7),
percent_class_PM2.5 = cut(PM2.5, classes.PM2.5$brks, include.lowest = T,dig.lab=7),
percent_class_NMVOC = cut(NMVOC, classes.NMVOC$brks, include.lowest = T,dig.lab=7),
percent_class_NH3 = cut(NH3, classes.NH3$brks, include.lowest = T,dig.lab=7)
)
pal1 <- viridisLite::inferno(12, direction = -1)
pal2 <- viridisLite::viridis(12)
pal3 <- viridisLite::viridis(12)
pal4 <- viridisLite::viridis(12)
pal5 <- viridisLite::viridis(12)
pal6 <- viridisLite::viridis(12)
a6<-ggplot() +
geom_sf(data = sf_data_other,
aes(fill = percent_class_NOx)) +
scale_fill_manual(values = pal1,
name = "NOx [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - NOx",
subtitle = "GNFR sector: 2 - Other processes",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
a6
b6<-ggplot() +
geom_sf(data = sf_data_other,
aes(fill = percent_class_SO2)) +
scale_fill_manual(values = pal1,
name = "SO2 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - SO2",
subtitle = "GNFR sector: 2 - Other processes",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
b6
c6<-ggplot() +
geom_sf(data = sf_data_other,
aes(fill = percent_class_PM10)) +
scale_fill_manual(values = pal1,
name = "PM10 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - PM10",
subtitle = "GNFR sector: 2 - Other processes",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
c6
d6<-ggplot() +
geom_sf(data = sf_data_other,
aes(fill = percent_class_PM2.5)) +
scale_fill_manual(values = pal1,
name = "PM2.5 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - PM2.5",
subtitle = "GNFR sector: 2 - Other processes",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
d6
e6<-ggplot() +
geom_sf(data = sf_data_other,
aes(fill = percent_class_NMVOC)) +
scale_fill_manual(values = pal1,
name = "NMVOC [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - NMVOC",
subtitle = "GNFR sector: 2 - Other processes",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
e6
f6<-ggplot() +
geom_sf(data = sf_data_other,
aes(fill = percent_class_NH3)) +
scale_fill_manual(values = pal1,
name = "NH3 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - NH3",
subtitle = "GNFR sector: 2 - Other processes",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
f6
other_map <- grid.arrange(a, b, c, d, e, f, ncol = 2, nrow = 3)
ggsave(plot = d6,
filename = "Maps/FINAL2015/Maps_per_each_GNFR_sector/Map_2 - Other processes_PM2.5.jpg",
width = 30,
height = 30,
units = "cm",
device = "jpeg",
dpi = 600)
# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
sf_data_agriculture <- st_read(dsn = "Products/Sum-up_By_category/3 - Agriculture.gpkg")
classes.NOx <- classIntervals(sf_data_agriculture$NOx, n = 12, style = "fisher")
classes.SO2 <- classIntervals(sf_data_agriculture$SO2, n = 12, style = "fisher")
classes.PM10 <- classIntervals(sf_data_agriculture$PM10, n = 12, style = "fisher")
classes.PM2.5 <- classIntervals(sf_data_agriculture$PM2.5, n = 12, style = "fisher")
classes.NMVOC <- classIntervals(sf_data_agriculture$NMVOC, n = 12, style = "fisher")
classes.NH3 <- classIntervals(sf_data_agriculture$NH3, n = 12, style = "fisher")
sf_data_agriculture <- sf_data_agriculture %>%
mutate(percent_class_NOx = cut(NOx, classes.NOx$brks, include.lowest = T,dig.lab=7),
percent_class_SO2 = cut(SO2, classes.SO2$brks, include.lowest = T,dig.lab=7),
percent_class_PM10 = cut(PM10, classes.PM10$brks, include.lowest = T,dig.lab=7),
percent_class_PM2.5 = cut(PM2.5, classes.PM2.5$brks, include.lowest = T,dig.lab=7),
percent_class_NMVOC = cut(NMVOC, classes.NMVOC$brks, include.lowest = T,dig.lab=7),
percent_class_NH3 = cut(NH3, classes.NH3$brks, include.lowest = T,dig.lab=7)
)
pal1 <- viridisLite::inferno(12, direction = -1)
pal2 <- viridisLite::viridis(12)
pal3 <- viridisLite::viridis(12)
pal4 <- viridisLite::viridis(12)
pal5 <- viridisLite::viridis(12)
pal6 <- viridisLite::viridis(12)
a7<-ggplot() +
geom_sf(data = sf_data_agriculture,
aes(fill = percent_class_NOx)) +
scale_fill_manual(values = pal1,
name = "NOx [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - NOx",
subtitle = "GNFR sector: 3 - Agriculture",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
a7
b7<-ggplot() +
geom_sf(data = sf_data_agriculture,
aes(fill = percent_class_SO2)) +
scale_fill_manual(values = pal1,
name = "SO2 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - SO2",
subtitle = "GNFR sector: 3 - Agriculture",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
b7
c7<-ggplot() +
geom_sf(data = sf_data_agriculture,
aes(fill = percent_class_PM10)) +
scale_fill_manual(values = pal1,
name = "PM10 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - PM10",
subtitle = "GNFR sector: 3 - Agriculture",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
c7
d7<-ggplot() +
geom_sf(data = sf_data_agriculture,
aes(fill = percent_class_PM2.5)) +
scale_fill_manual(values = pal1,
name = "PM2.5 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - PM2.5",
subtitle = "GNFR sector: 3 - Agriculture",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
d7
e7<-ggplot() +
geom_sf(data = sf_data_agriculture,
aes(fill = percent_class_NMVOC)) +
scale_fill_manual(values = pal1,
name = "NMVOC [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - NMVOC",
subtitle = "GNFR sector: 3 - Agriculture",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
e7
f7<-ggplot() +
geom_sf(data = sf_data_agriculture,
aes(fill = percent_class_NH3)) +
scale_fill_manual(values = pal1,
name = "NH3 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - NH3",
subtitle = "GNFR sector: 3 - Agriculture",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
f7
agriculture_map <- grid.arrange(a, b, c, d, e, f, ncol = 2, nrow = 3)
ggsave(plot = f7,
filename = "Maps/FINAL2015/Maps_per_each_GNFR_sector/Map_3 - Agriculture_NH3.jpg",
width = 30,
height = 30,
units = "cm",
device = "jpeg",
dpi = 600)
# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
sf_data_waste <- st_read(dsn = "Products/Sum-up_By_category/5 - Waste.gpkg")
classes.NOx <- classIntervals(sf_data_waste$NOx, n = 12, style = "fisher")
classes.SO2 <- classIntervals(sf_data_waste$SO2, n = 12, style = "fisher")
classes.PM10 <- classIntervals(sf_data_waste$PM10, n = 12, style = "fisher")
classes.PM2.5 <- classIntervals(sf_data_waste$PM2.5, n = 12, style = "fisher")
classes.NMVOC <- classIntervals(sf_data_waste$NMVOC, n = 12, style = "fisher")
classes.NH3 <- classIntervals(sf_data_waste$NH3, n = 12, style = "fisher")
sf_data_waste <- sf_data_waste %>%
mutate(percent_class_NOx = cut(NOx, classes.NOx$brks, include.lowest = T,dig.lab=7),
percent_class_SO2 = cut(SO2, classes.SO2$brks, include.lowest = T,dig.lab=7),
percent_class_PM10 = cut(PM10, classes.PM10$brks, include.lowest = T,dig.lab=7),
percent_class_PM2.5 = cut(PM2.5, classes.PM2.5$brks, include.lowest = T,dig.lab=7),
percent_class_NMVOC = cut(NMVOC, classes.NMVOC$brks, include.lowest = T,dig.lab=7),
percent_class_NH3 = cut(NH3, classes.NH3$brks, include.lowest = T,dig.lab=7)
)
pal1 <- viridisLite::inferno(12, direction = -1)
pal2 <- viridisLite::inferno(3, direction = -1)
pal3 <- viridisLite::viridis(12)
pal4 <- viridisLite::viridis(12)
pal5 <- viridisLite::viridis(12)
pal6 <- viridisLite::viridis(12)
a8<-ggplot() +
geom_sf(data = sf_data_waste,
aes(fill = percent_class_NOx)) +
scale_fill_manual(values = pal2,
name = "NOx [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - NOx",
subtitle = "GNFR sector: 5 - Waste",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
a8
b8<-ggplot() +
geom_sf(data = sf_data_waste,
aes(fill = percent_class_SO2)) +
scale_fill_manual(values = pal2,
name = "SO2 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - SO2",
subtitle = "GNFR sector: 5 - Waste",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
b8
c8<-ggplot() +
geom_sf(data = sf_data_waste,
aes(fill = percent_class_PM10)) +
scale_fill_manual(values = pal2,
name = "PM10 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - PM10",
subtitle = "GNFR sector: 5 - Waste",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
c8
d8<-ggplot() +
geom_sf(data = sf_data_waste,
aes(fill = percent_class_PM2.5)) +
scale_fill_manual(values = pal2,
name = "PM2.5 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - PM2.5",
subtitle = "GNFR sector: 5 - Waste",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
d8
e8<-ggplot() +
geom_sf(data = sf_data_waste,
aes(fill = percent_class_NMVOC)) +
scale_fill_manual(values = pal1,
name = "NMVOC [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - NMVOC",
subtitle = "GNFR sector: 5 - Waste",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
e8
f8<-ggplot() +
geom_sf(data = sf_data_waste,
aes(fill = percent_class_NH3)) +
scale_fill_manual(values = pal1,
name = "NH3 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - NH3",
subtitle = "GNFR sector: 5 - Waste",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
f8
waste_map <- grid.arrange(a, b, c, d, e, f, ncol = 2, nrow = 3)
ggsave(plot = f8,
filename = "Maps/FINAL2015/Maps_per_each_GNFR_sector/Map_5 - Waste_NH3.jpg",
width = 30,
height = 30,
units = "cm",
device = "jpeg",
dpi = 600)
|
/2030/Maps_preparing_for_report.R
|
no_license
|
pejovic/Spatialization
|
R
| false
| false
| 63,053
|
r
|
library(tidyverse)
library(sf)
library(readxl)
library(ggpubr)
library(ggfortify)
library(here)
library(knitr)
library(kableExtra)
library(DT)
library(mapview)
library(rgdal)
library(SerbianCyrLat)
library(stringr)
library(classInt)
library(viridis)
library(gridExtra)
library(ggsflabel)
library(ggspatial)
my_theme <- function(base_size = 10, base_family = "sans"){
theme_minimal(base_size = base_size, base_family = base_family) +
theme(
axis.text = element_text(size = 10),
axis.text.x = element_text(angle = 0, vjust = 0.5, hjust = 0.5),
axis.title = element_text(size = 12),
panel.grid.major = element_line(color = "grey"),
panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "#fffcfc"),
strip.background = element_rect(fill = "#820000", color = "#820000", size =0.5),
strip.text = element_text(face = "bold", size = 10, color = "white"),
legend.position = "bottom",
legend.justification = "center",
legend.background = element_blank(),
panel.border = element_rect(color = "grey30", fill = NA, size = 0.5)
)
}
theme_set(my_theme())
mycolors=c("#f32440","#2185ef","#d421ef")
# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
# Maps by category
# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
my_theme <- function(base_size = 10, base_family = "sans"){
theme_minimal(base_size = base_size, base_family = base_family) +
theme(
axis.text = element_text(size = 10),
axis.text.x = element_text(angle = 0, vjust = 0.5, hjust = 0.5),
axis.title = element_text(size = 12),
panel.grid.major = element_line(color = "grey"),
panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "#fffcfc"),
strip.background = element_rect(fill = "#820000", color = "#820000", size =0.5),
strip.text = element_text(face = "bold", size = 10, color = "white"),
legend.position = "bottom",
legend.justification = "center",
legend.background = element_blank(),
panel.border = element_rect(color = "grey30", fill = NA, size = 0.5)
)
}
theme_set(my_theme())
mycolors=c("#f32440","#2185ef","#d421ef")
# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
sf_data_energy <- st_read(dsn = "Products/Sum-up_By_category/1A1 - Energy.gpkg")
classes.NOx <- classIntervals(sf_data_energy$NOx, n = 12, style = "fisher")
classes.SO2 <- classIntervals(sf_data_energy$SO2, n = 12, style = "fisher")
classes.PM10 <- classIntervals(sf_data_energy$PM10, n = 12, style = "fisher")
classes.PM2.5 <- classIntervals(sf_data_energy$PM2.5, n = 12, style = "fisher")
classes.NMVOC <- classIntervals(sf_data_energy$NMVOC, n = 12, style = "fisher")
classes.NH3 <- classIntervals(sf_data_energy$NH3, n = 12, style = "fisher")
sf_data_energy <- sf_data_energy %>%
mutate(percent_class_NOx = cut(NOx, classes.NOx$brks, include.lowest = T,dig.lab=7),
percent_class_SO2 = cut(SO2, classes.SO2$brks, include.lowest = T,dig.lab=7),
percent_class_PM10 = cut(PM10, classes.PM10$brks, include.lowest = T,dig.lab=7),
percent_class_PM2.5 = cut(PM2.5, classes.PM2.5$brks, include.lowest = T,dig.lab=7),
percent_class_NMVOC = cut(NMVOC, classes.NMVOC$brks, include.lowest = T,dig.lab=7),
percent_class_NH3 = cut(NH3, classes.NH3$brks, include.lowest = T,dig.lab=7)
)
pal1 <- viridisLite::inferno(12, direction = -1)
pal2 <- viridisLite::inferno(3, direction = -1)
pal3 <- viridisLite::viridis(12, direction = -1)
pal4 <- viridisLite::viridis(12, direction = -1)
pal5 <- viridisLite::viridis(12, direction = -1)
pal6 <- viridisLite::viridis(12, direction = -1)
opstine <- readOGR("Data/opstine/gadm36_SRB_2.shp",
use_iconv=TRUE,
encoding = "UTF-8")
sf_opstine <- st_as_sf(opstine)
#+ include = FALSE
a1<-ggplot() +
geom_sf(data = sf_data_energy,
aes(fill = percent_class_NOx)) +
scale_fill_manual(values = pal1,
name = "NOx [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - NOx",
subtitle = "GNFR sector: 1A1 - Energy",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
axis.title = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
a1
b1<-ggplot() +
geom_sf(data = sf_data_energy,
aes(fill = percent_class_SO2)) +
scale_fill_manual(values = pal1,
name = "SO2 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - SO2",
subtitle = "GNFR sector: 1A1 - Energy",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
axis.title = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
b1
c1<-ggplot() +
geom_sf(data = sf_data_energy,
aes(fill = percent_class_PM10)) +
scale_fill_manual(values = pal1,
name = "PM10 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - PM10",
subtitle = "GNFR sector: 1A1 - Energy",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
c1
d1<-ggplot() +
geom_sf(data = sf_data_energy,
aes(fill = percent_class_PM2.5)) +
scale_fill_manual(values = pal1,
name = "PM2.5 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - PM2.5",
subtitle = "GNFR sector: 1A1 - Energy",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
d1
e1<-ggplot() +
geom_sf(data = sf_data_energy,
aes(fill = percent_class_NMVOC)) +
scale_fill_manual(values = pal1,
name = "NMVOC [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - NMVOC",
subtitle = "GNFR sector: 1A1 - Energy",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
e1
f1<-ggplot() +
geom_sf(data = sf_data_energy,
aes(fill = percent_class_NH3)) +
scale_fill_manual(values = pal2,
name = "NH3 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - NH3",
subtitle = "GNFR sector: 1A1 - Energy",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
f1
energy_map <- grid.arrange(a1, b1, c1, d1, e1, f1, ncol = 2, nrow = 3)
ggsave(plot = e1,
filename = "Maps/FINAL2015/Maps_per_each_GNFR_sector/Map_1A1 - Energy_NMVOC.jpg",
width = 30,
height = 30,
units = "cm",
device = "jpeg",
dpi = 600)
# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
sf_data_industry <- st_read(dsn = "Products/Sum-up_By_category/1A2 - Industry.gpkg")
classes.NOx <- classIntervals(sf_data_industry$NOx, n = 12, style = "fisher")
classes.SO2 <- classIntervals(sf_data_industry$SO2, n = 12, style = "fisher")
classes.PM10 <- classIntervals(sf_data_industry$PM10, n = 12, style = "fisher")
classes.PM2.5 <- classIntervals(sf_data_industry$PM2.5, n = 12, style = "fisher")
classes.NMVOC <- classIntervals(sf_data_industry$NMVOC, n = 12, style = "fisher")
classes.NH3 <- classIntervals(sf_data_industry$NH3, n = 12, style = "fisher")
sf_data_industry <- sf_data_industry %>%
mutate(percent_class_NOx = cut(NOx, classes.NOx$brks, include.lowest = T,dig.lab=7),
percent_class_SO2 = cut(SO2, classes.SO2$brks, include.lowest = T,dig.lab=7),
percent_class_PM10 = cut(PM10, classes.PM10$brks, include.lowest = T,dig.lab=7),
percent_class_PM2.5 = cut(PM2.5, classes.PM2.5$brks, include.lowest = T,dig.lab=7),
percent_class_NMVOC = cut(NMVOC, classes.NMVOC$brks, include.lowest = T,dig.lab=7),
percent_class_NH3 = cut(NH3, classes.NH3$brks, include.lowest = T,dig.lab=7)
)
pal1 <- viridisLite::inferno(12, direction = -1)
pal2 <- viridisLite::viridis(30)
pal3 <- viridisLite::viridis(30)
pal4 <- viridisLite::viridis(30)
pal5 <- viridisLite::viridis(30)
pal6 <- viridisLite::viridis(30)
#+ include = FALSE
a2<-ggplot() +
geom_sf(data = sf_data_industry,
aes(fill = percent_class_NOx)) +
scale_fill_manual(values = pal1,
name = "NOx [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - NOx",
subtitle = "GNFR sector: 1A2 - Industry",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
a2
b2<-ggplot() +
geom_sf(data = sf_data_industry,
aes(fill = percent_class_SO2)) +
scale_fill_manual(values = pal1,
name = "SO2 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - SO2",
subtitle = "GNFR sector: 1A2 - Industry",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
b2
c2<-ggplot() +
geom_sf(data = sf_data_industry,
aes(fill = percent_class_PM10)) +
scale_fill_manual(values = pal1,
name = "PM10 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - PM10",
subtitle = "GNFR sector: 1A2 - Industry",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
c2
d2<-ggplot() +
geom_sf(data = sf_data_industry,
aes(fill = percent_class_PM2.5)) +
scale_fill_manual(values = pal1,
name = "PM2.5 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - PM2.5",
subtitle = "GNFR sector: 1A2 - Industry",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
d2
e2<-ggplot() +
geom_sf(data = sf_data_industry,
aes(fill = percent_class_NMVOC)) +
scale_fill_manual(values = pal1,
name = "NMVOC [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - NMVOC",
subtitle = "GNFR sector: 1A2 - Industry",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
e2
f2<-ggplot() +
geom_sf(data = sf_data_industry,
aes(fill = percent_class_NH3)) +
scale_fill_manual(values = pal1,
name = "NH3 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - NH3",
subtitle = "GNFR sector: 1A2 - Industry",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
f2
industry_map <- grid.arrange(a, b, c, d, e, f, ncol = 2, nrow = 3)
ggsave(plot = f2,
filename = "Maps/FINAL2015/Maps_per_each_GNFR_sector/Map_1A2 - Industry_NH3.jpg",
width = 30,
height = 30,
units = "cm",
device = "jpeg",
dpi = 600)
# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
sf_data_transport <- st_read(dsn = "Products/Sum-up_By_category/1A3 - Transport.gpkg")
classes.NOx <- classIntervals(sf_data_transport$NOx, n = 12, style = "fisher")
classes.SO2 <- classIntervals(sf_data_transport$SO2, n = 12, style = "fisher")
classes.PM10 <- classIntervals(sf_data_transport$PM10, n = 12, style = "fisher")
classes.PM2.5 <- classIntervals(sf_data_transport$PM2.5, n = 12, style = "fisher")
classes.NMVOC <- classIntervals(sf_data_transport$NMVOC, n = 12, style = "fisher")
classes.NH3 <- classIntervals(sf_data_transport$NH3, n = 12, style = "fisher")
sf_data_transport <- sf_data_transport %>%
mutate(percent_class_NOx = cut(NOx, classes.NOx$brks, include.lowest = T),
percent_class_SO2 = cut(SO2, classes.SO2$brks, include.lowest = T),
percent_class_PM10 = cut(PM10, classes.PM10$brks, include.lowest = T),
percent_class_PM2.5 = cut(PM2.5, classes.PM2.5$brks, include.lowest = T),
percent_class_NMVOC = cut(NMVOC, classes.NMVOC$brks, include.lowest = T),
percent_class_NH3 = cut(NH3, classes.NH3$brks, include.lowest = T)
)
pal1 <- viridisLite::inferno(12, direction = -1)
pal2 <- viridisLite::viridis(12)
pal3 <- viridisLite::viridis(12)
pal4 <- viridisLite::viridis(12)
pal5 <- viridisLite::viridis(12)
pal6 <- viridisLite::viridis(12)
#+ include = FALSE
a3<-ggplot() +
geom_sf(data = sf_data_transport,
aes(fill = percent_class_NOx)) +
scale_fill_manual(values = pal1,
name = "NOx [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - NOx",
subtitle = "GNFR sector: 1A3 - Transport",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
b3<-ggplot() +
geom_sf(data = sf_data_transport,
aes(fill = percent_class_SO2)) +
scale_fill_manual(values = pal1,
name = "SO2 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - SO2",
subtitle = "GNFR sector: 1A3 - Transport",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
c3<-ggplot() +
geom_sf(data = sf_data_transport,
aes(fill = percent_class_PM10)) +
scale_fill_manual(values = pal1,
name = "PM10 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - PM10",
subtitle = "GNFR sector: 1A3 - Transport",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
d3<-ggplot() +
geom_sf(data = sf_data_transport,
aes(fill = percent_class_PM2.5)) +
scale_fill_manual(values = pal1,
name = "PM2.5 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - PM2.5",
subtitle = "GNFR sector: 1A3 - Transport",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
e3<-ggplot() +
geom_sf(data = sf_data_transport,
aes(fill = percent_class_NMVOC)) +
scale_fill_manual(values = pal1,
name = "NMVOC [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - NMVOC",
subtitle = "GNFR sector: 1A3 - Transport",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
f3<-ggplot() +
geom_sf(data = sf_data_transport,
aes(fill = percent_class_NH3)) +
scale_fill_manual(values = pal1,
name = "NH3 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - NH3",
subtitle = "GNFR sector: 1A3 - Transport",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
transport_map <- grid.arrange(a, b, c, d, e, f, ncol = 2, nrow = 3)
ggsave(plot = f3,
filename = "Maps/FINAL2015/Maps_per_each_GNFR_sector/Map_1A3 - Transport_NH3.jpg",
width = 30,
height = 30,
units = "cm",
device = "jpeg",
dpi = 600)
# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
sf_data_residential <- st_read(dsn = "Products/Sum-up_By_category/1A4 - Residential-Tertiary.gpkg")
classes.NOx <- classIntervals(sf_data_residential$NOx, n = 12, style = "fisher")
classes.SO2 <- classIntervals(sf_data_residential$SO2, n = 12, style = "fisher")
classes.PM10 <- classIntervals(sf_data_residential$PM10, n = 12, style = "fisher")
classes.PM2.5 <- classIntervals(sf_data_residential$PM2.5, n = 12, style = "fisher")
classes.NMVOC <- classIntervals(sf_data_residential$NMVOC, n = 12, style = "fisher")
classes.NH3 <- classIntervals(sf_data_residential$NH3, n = 12, style = "fisher")
sf_data_residential <- sf_data_residential %>%
mutate(percent_class_NOx = cut(NOx, classes.NOx$brks, include.lowest = T,dig.lab=7),
percent_class_SO2 = cut(SO2, classes.SO2$brks, include.lowest = T,dig.lab=7),
percent_class_PM10 = cut(PM10, classes.PM10$brks, include.lowest = T,dig.lab=7),
percent_class_PM2.5 = cut(PM2.5, classes.PM2.5$brks, include.lowest = T,dig.lab=7),
percent_class_NMVOC = cut(NMVOC, classes.NMVOC$brks, include.lowest = T,dig.lab=7),
percent_class_NH3 = cut(NH3, classes.NH3$brks, include.lowest = T,dig.lab=7)
)
pal1 <- viridisLite::inferno(12, direction = -1)
pal2 <- viridisLite::viridis(12)
pal3 <- viridisLite::viridis(12)
pal4 <- viridisLite::viridis(12)
pal5 <- viridisLite::viridis(12)
pal6 <- viridisLite::viridis(12)
#+ include = FALSE
a4<-ggplot() +
geom_sf(data = sf_data_residential,
aes(fill = percent_class_NOx)) +
scale_fill_manual(values = pal1,
name = "NOx [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - NOx",
subtitle = "GNFR sector: 1A4 - Residential-Tertiary",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
a4
b4<-ggplot() +
geom_sf(data = sf_data_residential,
aes(fill = percent_class_SO2)) +
scale_fill_manual(values = pal1,
name = "SO2 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - SO2",
subtitle = "GNFR sector: 1A4 - Residential-Tertiary",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
b4
c4<-ggplot() +
geom_sf(data = sf_data_residential,
aes(fill = percent_class_PM10)) +
scale_fill_manual(values = pal1,
name = "PM10 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - PM10",
subtitle = "GNFR sector: 1A4 - Residential-Tertiary",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
c4
d4<-ggplot() +
geom_sf(data = sf_data_residential,
aes(fill = percent_class_PM2.5)) +
scale_fill_manual(values = pal1,
name = "PM2.5 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - PM2.5",
subtitle = "GNFR sector: 1A4 - Residential-Tertiary",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
d4
e4<-ggplot() +
geom_sf(data = sf_data_residential,
aes(fill = percent_class_NMVOC)) +
scale_fill_manual(values = pal1,
name = "NMVOC [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - NMVOC",
subtitle = "GNFR sector: 1A4 - Residential-Tertiary",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
e4
f4<-ggplot() +
geom_sf(data = sf_data_residential,
aes(fill = percent_class_NH3)) +
scale_fill_manual(values = pal1,
name = "NH3 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - NH3",
subtitle = "GNFR sector: 1A4 - Residential-Tertiary",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
f4
residential_map <- grid.arrange(a, b, c, d, e, f, ncol = 2, nrow = 3)
ggsave(plot = f4,
filename = "Maps/FINAL2015/Maps_per_each_GNFR_sector/Map_1A4 - Residential-Tertiary_NH3.jpg",
width = 30,
height = 30,
units = "cm",
device = "jpeg",
dpi = 600)
# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
sf_data_fugitive <- st_read(dsn = "Products/Sum-up_By_category/1B - Fugitive emissions.gpkg")
classes.NOx <- classIntervals(sf_data_fugitive$NOx, n = 12, style = "fisher")
classes.SO2 <- classIntervals(sf_data_fugitive$SO2, n = 12, style = "fisher")
classes.PM10 <- classIntervals(sf_data_fugitive$PM10, n = 12, style = "fisher")
classes.PM2.5 <- classIntervals(sf_data_fugitive$PM2.5, n = 12, style = "fisher")
classes.NMVOC <- classIntervals(sf_data_fugitive$NMVOC, n = 12, style = "fisher")
classes.NH3 <- classIntervals(sf_data_fugitive$NH3, n = 12, style = "fisher")
sf_data_fugitive <- sf_data_fugitive %>%
mutate(percent_class_NOx = cut(NOx, classes.NOx$brks, include.lowest = T,dig.lab=7),
percent_class_SO2 = cut(SO2, classes.SO2$brks, include.lowest = T,dig.lab=7),
percent_class_PM10 = cut(PM10, classes.PM10$brks, include.lowest = T,dig.lab=7),
percent_class_PM2.5 = cut(PM2.5, classes.PM2.5$brks, include.lowest = T,dig.lab=7),
percent_class_NMVOC = cut(NMVOC, classes.NMVOC$brks, include.lowest = T,dig.lab=7),
percent_class_NH3 = cut(NH3, classes.NH3$brks, include.lowest = T,dig.lab=7)
)
pal1 <- viridisLite::inferno(12, direction = -1)
pal2 <- viridisLite::inferno(2, direction = -1)
pal3 <- viridisLite::viridis(12)
pal4 <- viridisLite::viridis(12)
pal5 <- viridisLite::viridis(12)
pal6 <- viridisLite::viridis(12)
#+ include = FALSE
a5<-ggplot() +
geom_sf(data = sf_data_fugitive,
aes(fill = percent_class_NOx)) +
scale_fill_manual(values = pal1,
name = "NOx [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - NOx",
subtitle = "GNFR sector: 1B - Fugitive emissions",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
a5
b5<-ggplot() +
geom_sf(data = sf_data_fugitive,
aes(fill = percent_class_SO2)) +
scale_fill_manual(values = pal1,
name = "SO2 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - SO2",
subtitle = "GNFR sector: 1B - Fugitive emissions",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
b5
c5<-ggplot() +
geom_sf(data = sf_data_fugitive,
aes(fill = percent_class_PM10)) +
scale_fill_manual(values = pal1,
name = "PM10 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - PM10",
subtitle = "GNFR sector: 1B - Fugitive emissions",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
c5
d5<-ggplot() +
geom_sf(data = sf_data_fugitive,
aes(fill = percent_class_PM2.5)) +
scale_fill_manual(values = pal1,
name = "PM2.5 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - PM2.5",
subtitle = "GNFR sector: 1B - Fugitive emissions",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
d5
e5<-ggplot() +
geom_sf(data = sf_data_fugitive,
aes(fill = percent_class_NMVOC)) +
scale_fill_manual(values = pal1,
name = "NMVOC [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - NMVOC",
subtitle = "GNFR sector: 1B - Fugitive emissions",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
e5
f5<-ggplot() +
geom_sf(data = sf_data_fugitive,
aes(fill = percent_class_NH3)) +
scale_fill_manual(values = pal1,
name = "NH3 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - NH3",
subtitle = "GNFR sector: 1B - Fugitive emissions",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
f5
fugitive_map <- grid.arrange(a, b, c, d, e, f, ncol = 2, nrow = 3)
ggsave(plot = f5,
filename = "Maps/FINAL2015/Maps_per_each_GNFR_sector/Map_1B - Fugitive emissions_NH3.jpg",
width = 30,
height = 30,
units = "cm",
device = "jpeg",
dpi = 600)
# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
sf_data_other <- st_read(dsn = "Products/Sum-up_By_category/2 - Other processes.gpkg")
classes.NOx <- classIntervals(sf_data_other$NOx, n = 12, style = "fisher")
classes.SO2 <- classIntervals(sf_data_other$SO2, n = 12, style = "fisher")
classes.PM10 <- classIntervals(sf_data_other$PM10, n = 12, style = "fisher")
classes.PM2.5 <- classIntervals(sf_data_other$PM2.5, n = 12, style = "fisher")
classes.NMVOC <- classIntervals(sf_data_other$NMVOC, n = 12, style = "fisher")
classes.NH3 <- classIntervals(sf_data_other$NH3, n = 12, style = "fisher")
sf_data_other <- sf_data_other %>%
mutate(percent_class_NOx = cut(NOx, classes.NOx$brks, include.lowest = T,dig.lab=7),
percent_class_SO2 = cut(SO2, classes.SO2$brks, include.lowest = T,dig.lab=7),
percent_class_PM10 = cut(PM10, classes.PM10$brks, include.lowest = T,dig.lab=7),
percent_class_PM2.5 = cut(PM2.5, classes.PM2.5$brks, include.lowest = T,dig.lab=7),
percent_class_NMVOC = cut(NMVOC, classes.NMVOC$brks, include.lowest = T,dig.lab=7),
percent_class_NH3 = cut(NH3, classes.NH3$brks, include.lowest = T,dig.lab=7)
)
pal1 <- viridisLite::inferno(12, direction = -1)
pal2 <- viridisLite::viridis(12)
pal3 <- viridisLite::viridis(12)
pal4 <- viridisLite::viridis(12)
pal5 <- viridisLite::viridis(12)
pal6 <- viridisLite::viridis(12)
a6<-ggplot() +
geom_sf(data = sf_data_other,
aes(fill = percent_class_NOx)) +
scale_fill_manual(values = pal1,
name = "NOx [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - NOx",
subtitle = "GNFR sector: 2 - Other processes",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
a6
b6<-ggplot() +
geom_sf(data = sf_data_other,
aes(fill = percent_class_SO2)) +
scale_fill_manual(values = pal1,
name = "SO2 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - SO2",
subtitle = "GNFR sector: 2 - Other processes",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
b6
c6<-ggplot() +
geom_sf(data = sf_data_other,
aes(fill = percent_class_PM10)) +
scale_fill_manual(values = pal1,
name = "PM10 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - PM10",
subtitle = "GNFR sector: 2 - Other processes",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
c6
d6<-ggplot() +
geom_sf(data = sf_data_other,
aes(fill = percent_class_PM2.5)) +
scale_fill_manual(values = pal1,
name = "PM2.5 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - PM2.5",
subtitle = "GNFR sector: 2 - Other processes",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
d6
e6<-ggplot() +
geom_sf(data = sf_data_other,
aes(fill = percent_class_NMVOC)) +
scale_fill_manual(values = pal1,
name = "NMVOC [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - NMVOC",
subtitle = "GNFR sector: 2 - Other processes",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
e6
f6<-ggplot() +
geom_sf(data = sf_data_other,
aes(fill = percent_class_NH3)) +
scale_fill_manual(values = pal1,
name = "NH3 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - NH3",
subtitle = "GNFR sector: 2 - Other processes",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
f6
other_map <- grid.arrange(a, b, c, d, e, f, ncol = 2, nrow = 3)
ggsave(plot = d6,
filename = "Maps/FINAL2015/Maps_per_each_GNFR_sector/Map_2 - Other processes_PM2.5.jpg",
width = 30,
height = 30,
units = "cm",
device = "jpeg",
dpi = 600)
# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
sf_data_agriculture <- st_read(dsn = "Products/Sum-up_By_category/3 - Agriculture.gpkg")
classes.NOx <- classIntervals(sf_data_agriculture$NOx, n = 12, style = "fisher")
classes.SO2 <- classIntervals(sf_data_agriculture$SO2, n = 12, style = "fisher")
classes.PM10 <- classIntervals(sf_data_agriculture$PM10, n = 12, style = "fisher")
classes.PM2.5 <- classIntervals(sf_data_agriculture$PM2.5, n = 12, style = "fisher")
classes.NMVOC <- classIntervals(sf_data_agriculture$NMVOC, n = 12, style = "fisher")
classes.NH3 <- classIntervals(sf_data_agriculture$NH3, n = 12, style = "fisher")
sf_data_agriculture <- sf_data_agriculture %>%
mutate(percent_class_NOx = cut(NOx, classes.NOx$brks, include.lowest = T,dig.lab=7),
percent_class_SO2 = cut(SO2, classes.SO2$brks, include.lowest = T,dig.lab=7),
percent_class_PM10 = cut(PM10, classes.PM10$brks, include.lowest = T,dig.lab=7),
percent_class_PM2.5 = cut(PM2.5, classes.PM2.5$brks, include.lowest = T,dig.lab=7),
percent_class_NMVOC = cut(NMVOC, classes.NMVOC$brks, include.lowest = T,dig.lab=7),
percent_class_NH3 = cut(NH3, classes.NH3$brks, include.lowest = T,dig.lab=7)
)
pal1 <- viridisLite::inferno(12, direction = -1)
pal2 <- viridisLite::viridis(12)
pal3 <- viridisLite::viridis(12)
pal4 <- viridisLite::viridis(12)
pal5 <- viridisLite::viridis(12)
pal6 <- viridisLite::viridis(12)
a7<-ggplot() +
geom_sf(data = sf_data_agriculture,
aes(fill = percent_class_NOx)) +
scale_fill_manual(values = pal1,
name = "NOx [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - NOx",
subtitle = "GNFR sector: 3 - Agriculture",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
a7
b7<-ggplot() +
geom_sf(data = sf_data_agriculture,
aes(fill = percent_class_SO2)) +
scale_fill_manual(values = pal1,
name = "SO2 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - SO2",
subtitle = "GNFR sector: 3 - Agriculture",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
b7
c7<-ggplot() +
geom_sf(data = sf_data_agriculture,
aes(fill = percent_class_PM10)) +
scale_fill_manual(values = pal1,
name = "PM10 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - PM10",
subtitle = "GNFR sector: 3 - Agriculture",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
c7
d7<-ggplot() +
geom_sf(data = sf_data_agriculture,
aes(fill = percent_class_PM2.5)) +
scale_fill_manual(values = pal1,
name = "PM2.5 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - PM2.5",
subtitle = "GNFR sector: 3 - Agriculture",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
d7
e7<-ggplot() +
geom_sf(data = sf_data_agriculture,
aes(fill = percent_class_NMVOC)) +
scale_fill_manual(values = pal1,
name = "NMVOC [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - NMVOC",
subtitle = "GNFR sector: 3 - Agriculture",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
e7
f7<-ggplot() +
geom_sf(data = sf_data_agriculture,
aes(fill = percent_class_NH3)) +
scale_fill_manual(values = pal1,
name = "NH3 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - NH3",
subtitle = "GNFR sector: 3 - Agriculture",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
f7
agriculture_map <- grid.arrange(a, b, c, d, e, f, ncol = 2, nrow = 3)
ggsave(plot = f7,
filename = "Maps/FINAL2015/Maps_per_each_GNFR_sector/Map_3 - Agriculture_NH3.jpg",
width = 30,
height = 30,
units = "cm",
device = "jpeg",
dpi = 600)
# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
sf_data_waste <- st_read(dsn = "Products/Sum-up_By_category/5 - Waste.gpkg")
classes.NOx <- classIntervals(sf_data_waste$NOx, n = 12, style = "fisher")
classes.SO2 <- classIntervals(sf_data_waste$SO2, n = 12, style = "fisher")
classes.PM10 <- classIntervals(sf_data_waste$PM10, n = 12, style = "fisher")
classes.PM2.5 <- classIntervals(sf_data_waste$PM2.5, n = 12, style = "fisher")
classes.NMVOC <- classIntervals(sf_data_waste$NMVOC, n = 12, style = "fisher")
classes.NH3 <- classIntervals(sf_data_waste$NH3, n = 12, style = "fisher")
sf_data_waste <- sf_data_waste %>%
mutate(percent_class_NOx = cut(NOx, classes.NOx$brks, include.lowest = T,dig.lab=7),
percent_class_SO2 = cut(SO2, classes.SO2$brks, include.lowest = T,dig.lab=7),
percent_class_PM10 = cut(PM10, classes.PM10$brks, include.lowest = T,dig.lab=7),
percent_class_PM2.5 = cut(PM2.5, classes.PM2.5$brks, include.lowest = T,dig.lab=7),
percent_class_NMVOC = cut(NMVOC, classes.NMVOC$brks, include.lowest = T,dig.lab=7),
percent_class_NH3 = cut(NH3, classes.NH3$brks, include.lowest = T,dig.lab=7)
)
pal1 <- viridisLite::inferno(12, direction = -1)
pal2 <- viridisLite::inferno(3, direction = -1)
pal3 <- viridisLite::viridis(12)
pal4 <- viridisLite::viridis(12)
pal5 <- viridisLite::viridis(12)
pal6 <- viridisLite::viridis(12)
a8<-ggplot() +
geom_sf(data = sf_data_waste,
aes(fill = percent_class_NOx)) +
scale_fill_manual(values = pal2,
name = "NOx [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - NOx",
subtitle = "GNFR sector: 5 - Waste",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
a8
b8<-ggplot() +
geom_sf(data = sf_data_waste,
aes(fill = percent_class_SO2)) +
scale_fill_manual(values = pal2,
name = "SO2 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - SO2",
subtitle = "GNFR sector: 5 - Waste",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
b8
c8<-ggplot() +
geom_sf(data = sf_data_waste,
aes(fill = percent_class_PM10)) +
scale_fill_manual(values = pal2,
name = "PM10 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - PM10",
subtitle = "GNFR sector: 5 - Waste",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
c8
d8<-ggplot() +
geom_sf(data = sf_data_waste,
aes(fill = percent_class_PM2.5)) +
scale_fill_manual(values = pal2,
name = "PM2.5 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - PM2.5",
subtitle = "GNFR sector: 5 - Waste",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
d8
e8<-ggplot() +
geom_sf(data = sf_data_waste,
aes(fill = percent_class_NMVOC)) +
scale_fill_manual(values = pal1,
name = "NMVOC [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - NMVOC",
subtitle = "GNFR sector: 5 - Waste",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
e8
f8<-ggplot() +
geom_sf(data = sf_data_waste,
aes(fill = percent_class_NH3)) +
scale_fill_manual(values = pal1,
name = "NH3 [t]") +
labs(x = NULL, y = NULL,
title = "Pollutant inventory spatialization - NH3",
subtitle = "GNFR sector: 5 - Waste",
caption = "Spatial resolution 0.05°x0.05°, Teritory of the Republic of Serbia\n UBFCE (2020)")+
theme(line = element_blank(),
#axis.text = element_blank(),
legend.position = "bottom", ###################### legend
panel.background = element_blank()) +
geom_sf(data = sf_opstine, fill = NA, colour = "black", lwd = 0.6)+
coord_sf(datum = sf::st_crs(4326))+
annotation_scale(location = "bl", width_hint = 0.5) +
annotation_north_arrow(location = "bl", which_north = "true",
pad_x = unit(0.75, "in"), pad_y = unit(0.5, "in"),
style = north_arrow_fancy_orienteering)
f8
waste_map <- grid.arrange(a, b, c, d, e, f, ncol = 2, nrow = 3)
ggsave(plot = f8,
filename = "Maps/FINAL2015/Maps_per_each_GNFR_sector/Map_5 - Waste_NH3.jpg",
width = 30,
height = 30,
units = "cm",
device = "jpeg",
dpi = 600)
|
# 1) vectorization ####
# We are talking about vectorization, and how scalars do not exist in R.
# Everything is a vector, even if you might not think it is!
is.vector(5)
is.vector(5.002)
# scalars in R are vectors of length 1. Keep this in mind throughout the seminar!
# a vectorized function
examplevect <- c(1:10)
sqrt(examplevect)
# a vectorized operator
examplevect^2
# not only this , but also
(x <- 1:6)
(y<- 1:6)
x^y
# what if length(x) != length(y)? Recycling happens!!! CAREFUL!
(x <- 1:6)
(y<- 1:3)
x^y
# can you see what happened??
# Recycling is "silent" in R... if all goes well*, R doesn't even tell you that it is doing it.
# * in the case of recycling, well means that the longer vector length is a multiple of shorter vector length
# if this isn't the case...
(x <- 1:6)
(y<- 1:4)
x^y
# a warning is issued. The operation is still performed though! (recall the difference
# between a warning and an error)
# this isn't inherently good or bad (although it drives some people crazy). It is just the
# way R behaves, and we need to be aware of that (hence the warnings.)
# if R wasn't vectorized... what would we do??
# if R wasn't vectorized... we would use a for loop.
# 2) "for" loops ####
# For loops are iterations of the same action or operation
# over each elements of a vector.
# how do they work?
# for counter over vector {command}
for (i in 1:10){
print(paste("I have", i, "eggs"))
}
# and an example with a character vector
animals <- c("dog","cat","unicorn")
for (i in animals){
print(i)
}
# 2a) basic for loop examples ####
# for our square example
for (i in 1:length(examplevect)) print(examplevect[i]^2)
# compare with the vectorized version
examplevect^2
# you have to agree that examplevect^2 is a lot more concise... it is also faster,
# although you might not notice with an "average" (or very short) length of data.
# 2b) storing values and memory ####
# for loops to STORE values. in this case, memory pre-allocation is very important!
# take home message: DO NOT GROW VECTORS.
# importance of pre-allocating all the memory we need. We will see how much difference
# this can make in terms of speed by using the handy function system.time().
# We measure how long it takes
# to create a vector made of a sequence of numbers (from 1 to 100000)
# 1. we cannot start from nothing, we said. Try the following:
for(i in 1:100000) vec[i] <- i
# we have said that we must create a "container" for our values beforehands.
# in this case we will use a vector.
# if we start with an empty vector
vec <- vector()
# by all means, let's have a look at what we have created!
head(vec)
# it is an "empty" logical vector, i.e. a vector of length zero.
# and now for the performance testing! Let's fill it up.
system.time(for(i in 1:100000) vec[i] <- i)
# it took around 10 seconds on my laptop... not very good! Can you guess what our command did?
# On the other hand, if we start with a vector filled with anything, but of the right size:
vec <- numeric(length=100000)
head(vec)
# in this case, it is filled of zeroes. The important thing is not what is inside
# the vector, but its size!
# Let's see how long it takes this time.
system.time(for(i in 1:100000) vec[i] <- i)
# it took less than one second! and let's check that the command was executed
# and actually changed the content of our vec
head(vec); tail(vec)
# it doesn't matter if you create a numeric or a logical vector, as long as it is not
# an empty one. the following is as quick as above.
vec <- rep(NA,200000)
head(vec)
system.time(for(i in 1:100000) vec[i] <- i)
head(vec); tail(vec)
vec <- vec[!is.na(vec)]
# of course, the R way of doing this all is:
# VECTORIZATION!
system.time(vec <- 1:100000)
# we will use for loops for quick plotting of multiple lines of a dataframe
## NB. for the following exercise, we will be using an example file called "traces".
# This is a binary compressed file. It is simply called "traces" in the Dropbox folder.
# It does not have any extension (i.e. no "traces.zip", "traces.xls" or similar).
# Please do not try to "open with" anything (text editor, excel), because it won't work.
# Simply DOWNLOAD IT TO YOUR (current R session/ R project) WORKING DIRECTORY, and we will
# load it within our R workspace with the command readRDS().
# 2c) for loops for plotting ####
traces <- readRDS("traces")
View(traces)
# i need to create an x axis
xaxis <- 1:33001
# I decide to plot 3 traces
linestoplot <- c("SE1", "SE7", "SE61")
# with certain colors
cols <- c("red","blue","forestgreen","gray")
plot(xaxis, traces["SE1",], type = "l", xlab = "sampling interval", ylab = "uV", col='white')
for(i in linestoplot) {
lines(xaxis, traces[i,], col = cols[which(linestoplot == i)])
print(i)
print(which(linestoplot == i))
}
legend("topright", legend = linestoplot, col = cols, lwd = 1, bty = "n")
# 3) apply family ####
# the R "correspondent" of for loops is the apply family of functions.
# using apply improves readability and clarity, and the code will therefore be
# easier to maintain. This is also part of optimization!
# 3a) apply ####
# Example 1. I want to find the maximum intensity for each of my 7 traces.
# "where" are my traces? In the dataframe traces, and
# they are "in the rows". row 1 = trace of SE1, row 4 = trace of SE8, etc
# each column is a value of intensity. Therefore to find the maximum intensity per trace
# I want to APPLY the function MAX to MARGIN=1 (across rows).
# apply() syntax: apply(X, MARGIN, FUN, ...) [have a look at help too.]
# for our example 1 I write:
apply(traces, MARGIN=1, max)
# that's it!
# Example 2. let's create a fake data-set of extremely skewed values... let's pretend we
# collected measures of pollutants from a body of water, which happened to be quite clean.
metals <- as.data.frame(replicate(5, rgamma(200, shape=2)))
colnames(metals) <- c("Al","Cu","Fe","Zn","Pb")
# let's have a look at how each column looks... with a for loop!
for (i in colnames(metals)){
Sys.sleep(0.1);hist(metals[,i], main=i)
}
# skim through the plots we have produced with the blue arrows.
# let's see if the variables, as they are, would pass a normality test...
# let's use apply again!
apply(metals, MARGIN=2, shapiro.test)
# and if I wanted to store the result of my call to apply...
shtest <- apply(metals, MARGIN=2, shapiro.test)
# check the structure of the object
str(shtest)
# it is a list! I can access its elements in various ways, for instance...
shtest$Al
# if I wanted to correct the skew, to try and "normalize" the data, I could do
# so with sapply
# 3b) sapply ####
# sapply and lapply apply functions to ELEMENTS of X.
logmetals <- sapply(metals, log10)
colnames(logmetals) <- c("Al","Cu","Fe","Zn","Pb")
# let's have a look at how each column looks NOW = in logmetals rather than metals!
for (i in colnames(logmetals)){
Sys.sleep(0.1);hist(logmetals[,i], main=i)
}
# will the non normality have been corrected??
apply(logmetals, MARGIN=2, shapiro.test)
# the beauty of the apply family is that where we put max() or sqrt(), you can put ANY
# function, including your own!
### exercise 1 ####
# calculate the square root of sepal length from iris dataset,
# using the different techniques we just learned.
# load iris
data(iris)
View(iris)
# phase zero:
# please subset iris first, to get rid of the last column, i.e. column 5, or Species
# which, being non-numeric, might throw an error at you if you try to apply functions
# to the whole dataframe....
# use any of the techniques Simon introduced last week!
# as long as you get rid of the last column!
# once you have done this, proceed to 1!
# 1. calculate square root exploiting R's vectorization
# --- write your answer-code below ---
# 2. calculate square root using one of the apply functions
# note the difference between apply and sapply (sapply is ~ to lapply)
# apply applies a function to MARGINS of X
# sapply and lapply apply functions to elements of X.
# which one would you use in this case??
# if in doubt ask!
# --- write your answer-code below ---
# 3. calculate square root using a for loop
# --- write your answer-code below ---
# solutions are in the ex1_sessionR_answer.R script!
##########################################################################
##########################################################################
# Example 2: classifying the sepal length into size categories
# I want to create a factor variable in iris that separates 'small' sepal
# lengths and 'large' sepla lengths.
# 1st of all: I decide that the mean will be the threshold
Mean.Sepal.Length <- mean(iris$Sepal.Length)
Mean.Sepal.Length
# I create a storage ("container") vector:
Sepal.Size <- c(1:length(iris$Sepal.Length))
Sepal.Size
# Tip: if you have a ard time following what's going on, take the elements
# of code one by one and run them in the console.
for ( i in 1:length(iris$Sepal.Length)) # What does 1:length(iris$Sepal.Length) correspond to?
{
if (iris$Sepal.Length[i] <= Mean.Sepal.Length)
{
Sepal.Size[i] <- "small"
} else
{
Sepal.Size[i] <- "large"
}
}
Sepal.Size
# now if I want to merge this to the iris dataset:
iris$Sepal.Size <- Sepal.Size
View(iris)
#~~~~~~~~~~~
# Debugging
#~~~~~~~~~~~
# Same loop, but with an error:
Sepal.Size <- c(1:length(iris$Sepal.Length))
Sepal.Size
for ( i in 1:length(iris$Sepal.Length)) # What does 1:length(iris$Sepal.Length) correspond to?
{
if (iris$Sepal.Length[i] <= Mean.Sepal.Length)
{
Sepal.Size <- "small" #### I forgot the '[i]'
} else
{
Sepal.Size[i] <- "large"
}
}
Sepal.Size
# oops. R always wait for the brackets to be opened and closed, so if I want
# to run step by step, I have to do it manually and skip the parts with brackets.
# re-set everything to initial values:
Sepal.Size <- c(1:length(iris$Sepal.Length))
Sepal.Size
# Let's see what happens in the first step, when i = 1
i=1
iris$Sepal.Length[i] <= Mean.Sepal.Length
# TRUE, so I run what is in the 'if' bracket:
Sepal.Size <- "small"
Sepal.Size
# so I re-wrote on top of the whole Sepal.Size vector, erasing all of what happened before
# re-set everything to initial values again:
Sepal.Size <- c(1:length(iris$Sepal.Length))
Sepal.Size
# fix it:
Sepal.Size[i] <- "small"
Sepal.Size
# Check next step if you want:
i=2
iris$Sepal.Length[i] <= Mean.Sepal.Length # TRUE again
Sepal.Size[i] <- "small"
Sepal.Size
# everything seems normal, so rerun the whole loop
for ( i in 1:length(iris$Sepal.Length)) # What does 1:length(iris$Sepal.Length) correspond to?
{
if (iris$Sepal.Length[i] <= Mean.Sepal.Length)
{
Sepal.Size[i] <- "small"
} else
{
Sepal.Size[i] <- "large"
}
}
Sepal.Size
# EXERCISE
#~~~~~~~~~
# Create another classification using petal length : if the length
# is <=2, the class is “Small”, if the length is >2 and <=6, the class
# is “Large” and if the length >6 the class is “Extra Large”.
#############################################################################
# FUNCTIONS
#############################################################################
# Let's see wha an unfolded function looks like:
read.table
# plenty of ifs and elses and fors! Just what we were doing...
# What does the help menu say?
?read.table
# Using the function outputs, example with lm()
model<-lm(Sepal.Length~Petal.Length, data=iris)
model
summary(model)
model$residuals
model$fitted
# etc
##Examples sec tion from the lm() function
require(graphics)
## Annette Dobson (1990) "An Introduction to Generalized Linear Models".
## Page 9: Plant Weight Data.
ctl <- c(4.17,5.58,5.18,6.11,4.50,4.61,5.17,4.53,5.33,5.14)
trt <- c(4.81,4.17,4.41,3.59,5.87,3.83,6.03,4.89,4.32,4.69)
group <- gl(2, 10, 20, labels = c("Ctl","Trt"))
weight <- c(ctl, trt)
lm.D9 <- lm(weight ~ group)
lm.D90 <- lm(weight ~ group - 1) # omitting intercept
anova(lm.D9)
summary(lm.D90)
opar <- par(mfrow = c(2,2), oma = c(0, 0, 1.1, 0))
plot(lm.D9, las = 1) # Residuals, Fitted, ...
par(opar)
# Writing your own functions
#~~~~~~~~~~~~~~~~~~~~~~~~~~~
new.func<-function(m, n) # my arguments are x and y
{
result <- rank(m) - rank(n) #this is what happens to x and y when they go through the function
return( result )
}
# Notice how x and y don not appear in the environment??
new.func(iris$Sepal.Length, iris$Petal.Length) # works just like a regular function
rank.diff<-new.func(iris$Sepal.Length, iris$Petal.Length) # I can store the result somewhere
barplot(rank.diff) # plot it... etc
# Now, what if I transform the iris 'for' loop that creates 'small' and 'large' categories into
# a function?
size.factor <- function (x)
{
Mean.Sepal.Length <- mean(x)
Mean.Sepal.Length
# I create a storage vector:
Sepal.Size <- c(1:length(x))
Sepal.Size
# Tip: if you have a ard time following what's going on, take the elements
# of code one by one and run them in the console.
for ( i in 1:length(x)) # What does 1:length(x) correspond to?
{
if (x[i] <= Mean.Sepal.Length)
{
Sepal.Size[i] <- "small"
} else
{
Sepal.Size[i] <- "large"
}
}
return(Sepal.Size)
}
# The *only* thing I did was to replace 'iris$Sepal.Length' by 'x', and add a return()
# x can be any of the other variables, since the mean is relative to x
# The other names don't really matter because they are in the function's environment and don't appear
# in the global one
Sepal.size <- size.factor(iris$Sepal.Length)
Petal.size <-size.factor(iris$Petal.Length)
Petal.w.size <-size.factor(iris$Petal.Width)
# Let's try for the nex one:
three.sizes <- function(x)
{
Petal.Size <- c(1:length(x))
Petal.Size
for ( i in 1:length(x))
{
if (x[i] <= 2)
{
Petal.Size[i] <- "Small"
}
if (x[i] > 2 & x[i] <=6)
{
Petal.Size[i] <- "Large"
}
if (x[i] >6)
{
Petal.Size[i] <- "Extra Large"
}
}
return(Petal.Size)
}
three.sizes(iris$Petal.Length)
three.sizes(iris$Sepal.Length)
three.sizes(iris$Petal.Width) # it'll work but there is no "extra large" category. That's because
# we set fixed numbers as the category bounderies. If you want the functions to be generalisable -
# and thus be able to recycle them as often as possible - you must avoid those. That's why we keep
# the 1:length(iris$Peatl.Length), instead of replacing it by the actual length of the dataframe (150).
# In this case it would not have changed anything, but imagine if you decide later on to remove
# some rows from iris? None of the scripts would work!
## Another application, using the apply family again:
apply(iris[,1:4], 2, three.sizes)
iris.new<-cbind(iris, apply(iris[,1:4], 2, three.sizes))
|
/seminar_3_introduction_basic_programming/basic_programming_script.R
|
no_license
|
orb16/seminaRs
|
R
| false
| false
| 14,687
|
r
|
# 1) vectorization ####
# We are talking about vectorization, and how scalars do not exist in R.
# Everything is a vector, even if you might not think it is!
is.vector(5)
is.vector(5.002)
# scalars in R are vectors of length 1. Keep this in mind throughout the seminar!
# a vectorized function
examplevect <- c(1:10)
sqrt(examplevect)
# a vectorized operator
examplevect^2
# not only this , but also
(x <- 1:6)
(y<- 1:6)
x^y
# what if length(x) != length(y)? Recycling happens!!! CAREFUL!
(x <- 1:6)
(y<- 1:3)
x^y
# can you see what happened??
# Recycling is "silent" in R... if all goes well*, R doesn't even tell you that it is doing it.
# * in the case of recycling, well means that the longer vector length is a multiple of shorter vector length
# if this isn't the case...
(x <- 1:6)
(y<- 1:4)
x^y
# a warning is issued. The operation is still performed though! (recall the difference
# between a warning and an error)
# this isn't inherently good or bad (although it drives some people crazy). It is just the
# way R behaves, and we need to be aware of that (hence the warnings.)
# if R wasn't vectorized... what would we do??
# if R wasn't vectorized... we would use a for loop.
# 2) "for" loops ####
# For loops are iterations of the same action or operation
# over each elements of a vector.
# how do they work?
# for counter over vector {command}
for (i in 1:10){
print(paste("I have", i, "eggs"))
}
# and an example with a character vector
animals <- c("dog","cat","unicorn")
for (i in animals){
print(i)
}
# 2a) basic for loop examples ####
# for our square example
for (i in 1:length(examplevect)) print(examplevect[i]^2)
# compare with the vectorized version
examplevect^2
# you have to agree that examplevect^2 is a lot more concise... it is also faster,
# although you might not notice with an "average" (or very short) length of data.
# 2b) storing values and memory ####
# for loops to STORE values. in this case, memory pre-allocation is very important!
# take home message: DO NOT GROW VECTORS.
# importance of pre-allocating all the memory we need. We will see how much difference
# this can make in terms of speed by using the handy function system.time().
# We measure how long it takes
# to create a vector made of a sequence of numbers (from 1 to 100000)
# 1. we cannot start from nothing, we said. Try the following:
for(i in 1:100000) vec[i] <- i
# we have said that we must create a "container" for our values beforehands.
# in this case we will use a vector.
# if we start with an empty vector
vec <- vector()
# by all means, let's have a look at what we have created!
head(vec)
# it is an "empty" logical vector, i.e. a vector of length zero.
# and now for the performance testing! Let's fill it up.
system.time(for(i in 1:100000) vec[i] <- i)
# it took around 10 seconds on my laptop... not very good! Can you guess what our command did?
# On the other hand, if we start with a vector filled with anything, but of the right size:
vec <- numeric(length=100000)
head(vec)
# in this case, it is filled of zeroes. The important thing is not what is inside
# the vector, but its size!
# Let's see how long it takes this time.
system.time(for(i in 1:100000) vec[i] <- i)
# it took less than one second! and let's check that the command was executed
# and actually changed the content of our vec
head(vec); tail(vec)
# it doesn't matter if you create a numeric or a logical vector, as long as it is not
# an empty one. the following is as quick as above.
vec <- rep(NA,200000)
head(vec)
system.time(for(i in 1:100000) vec[i] <- i)
head(vec); tail(vec)
vec <- vec[!is.na(vec)]
# of course, the R way of doing this all is:
# VECTORIZATION!
system.time(vec <- 1:100000)
# we will use for loops for quick plotting of multiple lines of a dataframe
## NB. for the following exercise, we will be using an example file called "traces".
# This is a binary compressed file. It is simply called "traces" in the Dropbox folder.
# It does not have any extension (i.e. no "traces.zip", "traces.xls" or similar).
# Please do not try to "open with" anything (text editor, excel), because it won't work.
# Simply DOWNLOAD IT TO YOUR (current R session/ R project) WORKING DIRECTORY, and we will
# load it within our R workspace with the command readRDS().
# 2c) for loops for plotting ####
traces <- readRDS("traces")
View(traces)
# i need to create an x axis
xaxis <- 1:33001
# I decide to plot 3 traces
linestoplot <- c("SE1", "SE7", "SE61")
# with certain colors
cols <- c("red","blue","forestgreen","gray")
plot(xaxis, traces["SE1",], type = "l", xlab = "sampling interval", ylab = "uV", col='white')
for(i in linestoplot) {
lines(xaxis, traces[i,], col = cols[which(linestoplot == i)])
print(i)
print(which(linestoplot == i))
}
legend("topright", legend = linestoplot, col = cols, lwd = 1, bty = "n")
# 3) apply family ####
# the R "correspondent" of for loops is the apply family of functions.
# using apply improves readability and clarity, and the code will therefore be
# easier to maintain. This is also part of optimization!
# 3a) apply ####
# Example 1. I want to find the maximum intensity for each of my 7 traces.
# "where" are my traces? In the dataframe traces, and
# they are "in the rows". row 1 = trace of SE1, row 4 = trace of SE8, etc
# each column is a value of intensity. Therefore to find the maximum intensity per trace
# I want to APPLY the function MAX to MARGIN=1 (across rows).
# apply() syntax: apply(X, MARGIN, FUN, ...) [have a look at help too.]
# for our example 1 I write:
apply(traces, MARGIN=1, max)
# that's it!
# Example 2. let's create a fake data-set of extremely skewed values... let's pretend we
# collected measures of pollutants from a body of water, which happened to be quite clean.
metals <- as.data.frame(replicate(5, rgamma(200, shape=2)))
colnames(metals) <- c("Al","Cu","Fe","Zn","Pb")
# let's have a look at how each column looks... with a for loop!
for (i in colnames(metals)){
Sys.sleep(0.1);hist(metals[,i], main=i)
}
# skim through the plots we have produced with the blue arrows.
# let's see if the variables, as they are, would pass a normality test...
# let's use apply again!
apply(metals, MARGIN=2, shapiro.test)
# and if I wanted to store the result of my call to apply...
shtest <- apply(metals, MARGIN=2, shapiro.test)
# check the structure of the object
str(shtest)
# it is a list! I can access its elements in various ways, for instance...
shtest$Al
# if I wanted to correct the skew, to try and "normalize" the data, I could do
# so with sapply
# 3b) sapply ####
# sapply and lapply apply functions to ELEMENTS of X.
logmetals <- sapply(metals, log10)
colnames(logmetals) <- c("Al","Cu","Fe","Zn","Pb")
# let's have a look at how each column looks NOW = in logmetals rather than metals!
for (i in colnames(logmetals)){
Sys.sleep(0.1);hist(logmetals[,i], main=i)
}
# will the non normality have been corrected??
apply(logmetals, MARGIN=2, shapiro.test)
# the beauty of the apply family is that where we put max() or sqrt(), you can put ANY
# function, including your own!
### exercise 1 ####
# calculate the square root of sepal length from iris dataset,
# using the different techniques we just learned.
# load iris
data(iris)
View(iris)
# phase zero:
# please subset iris first, to get rid of the last column, i.e. column 5, or Species
# which, being non-numeric, might throw an error at you if you try to apply functions
# to the whole dataframe....
# use any of the techniques Simon introduced last week!
# as long as you get rid of the last column!
# once you have done this, proceed to 1!
# 1. calculate square root exploiting R's vectorization
# --- write your answer-code below ---
# 2. calculate square root using one of the apply functions
# note the difference between apply and sapply (sapply is ~ to lapply)
# apply applies a function to MARGINS of X
# sapply and lapply apply functions to elements of X.
# which one would you use in this case??
# if in doubt ask!
# --- write your answer-code below ---
# 3. calculate square root using a for loop
# --- write your answer-code below ---
# solutions are in the ex1_sessionR_answer.R script!
##########################################################################
##########################################################################
# Example 2: classifying the sepal length into size categories
# I want to create a factor variable in iris that separates 'small' sepal
# lengths and 'large' sepla lengths.
# 1st of all: I decide that the mean will be the threshold
Mean.Sepal.Length <- mean(iris$Sepal.Length)
Mean.Sepal.Length
# I create a storage ("container") vector:
Sepal.Size <- c(1:length(iris$Sepal.Length))
Sepal.Size
# Tip: if you have a ard time following what's going on, take the elements
# of code one by one and run them in the console.
for ( i in 1:length(iris$Sepal.Length)) # What does 1:length(iris$Sepal.Length) correspond to?
{
if (iris$Sepal.Length[i] <= Mean.Sepal.Length)
{
Sepal.Size[i] <- "small"
} else
{
Sepal.Size[i] <- "large"
}
}
Sepal.Size
# now if I want to merge this to the iris dataset:
iris$Sepal.Size <- Sepal.Size
View(iris)
#~~~~~~~~~~~
# Debugging
#~~~~~~~~~~~
# Same loop, but with an error:
Sepal.Size <- c(1:length(iris$Sepal.Length))
Sepal.Size
for ( i in 1:length(iris$Sepal.Length)) # What does 1:length(iris$Sepal.Length) correspond to?
{
if (iris$Sepal.Length[i] <= Mean.Sepal.Length)
{
Sepal.Size <- "small" #### I forgot the '[i]'
} else
{
Sepal.Size[i] <- "large"
}
}
Sepal.Size
# oops. R always wait for the brackets to be opened and closed, so if I want
# to run step by step, I have to do it manually and skip the parts with brackets.
# re-set everything to initial values:
Sepal.Size <- c(1:length(iris$Sepal.Length))
Sepal.Size
# Let's see what happens in the first step, when i = 1
i=1
iris$Sepal.Length[i] <= Mean.Sepal.Length
# TRUE, so I run what is in the 'if' bracket:
Sepal.Size <- "small"
Sepal.Size
# so I re-wrote on top of the whole Sepal.Size vector, erasing all of what happened before
# re-set everything to initial values again:
Sepal.Size <- c(1:length(iris$Sepal.Length))
Sepal.Size
# fix it:
Sepal.Size[i] <- "small"
Sepal.Size
# Check next step if you want:
i=2
iris$Sepal.Length[i] <= Mean.Sepal.Length # TRUE again
Sepal.Size[i] <- "small"
Sepal.Size
# everything seems normal, so rerun the whole loop
for ( i in 1:length(iris$Sepal.Length)) # What does 1:length(iris$Sepal.Length) correspond to?
{
if (iris$Sepal.Length[i] <= Mean.Sepal.Length)
{
Sepal.Size[i] <- "small"
} else
{
Sepal.Size[i] <- "large"
}
}
Sepal.Size
# EXERCISE
#~~~~~~~~~
# Create another classification using petal length : if the length
# is <=2, the class is “Small”, if the length is >2 and <=6, the class
# is “Large” and if the length >6 the class is “Extra Large”.
#############################################################################
# FUNCTIONS
#############################################################################
# Let's see wha an unfolded function looks like:
read.table
# plenty of ifs and elses and fors! Just what we were doing...
# What does the help menu say?
?read.table
# Using the function outputs, example with lm()
model<-lm(Sepal.Length~Petal.Length, data=iris)
model
summary(model)
model$residuals
model$fitted
# etc
##Examples sec tion from the lm() function
require(graphics)
## Annette Dobson (1990) "An Introduction to Generalized Linear Models".
## Page 9: Plant Weight Data.
ctl <- c(4.17,5.58,5.18,6.11,4.50,4.61,5.17,4.53,5.33,5.14)
trt <- c(4.81,4.17,4.41,3.59,5.87,3.83,6.03,4.89,4.32,4.69)
group <- gl(2, 10, 20, labels = c("Ctl","Trt"))
weight <- c(ctl, trt)
lm.D9 <- lm(weight ~ group)
lm.D90 <- lm(weight ~ group - 1) # omitting intercept
anova(lm.D9)
summary(lm.D90)
opar <- par(mfrow = c(2,2), oma = c(0, 0, 1.1, 0))
plot(lm.D9, las = 1) # Residuals, Fitted, ...
par(opar)
# Writing your own functions
#~~~~~~~~~~~~~~~~~~~~~~~~~~~
new.func<-function(m, n) # my arguments are x and y
{
result <- rank(m) - rank(n) #this is what happens to x and y when they go through the function
return( result )
}
# Notice how x and y don not appear in the environment??
new.func(iris$Sepal.Length, iris$Petal.Length) # works just like a regular function
rank.diff<-new.func(iris$Sepal.Length, iris$Petal.Length) # I can store the result somewhere
barplot(rank.diff) # plot it... etc
# Now, what if I transform the iris 'for' loop that creates 'small' and 'large' categories into
# a function?
size.factor <- function (x)
{
Mean.Sepal.Length <- mean(x)
Mean.Sepal.Length
# I create a storage vector:
Sepal.Size <- c(1:length(x))
Sepal.Size
# Tip: if you have a ard time following what's going on, take the elements
# of code one by one and run them in the console.
for ( i in 1:length(x)) # What does 1:length(x) correspond to?
{
if (x[i] <= Mean.Sepal.Length)
{
Sepal.Size[i] <- "small"
} else
{
Sepal.Size[i] <- "large"
}
}
return(Sepal.Size)
}
# The *only* thing I did was to replace 'iris$Sepal.Length' by 'x', and add a return()
# x can be any of the other variables, since the mean is relative to x
# The other names don't really matter because they are in the function's environment and don't appear
# in the global one
Sepal.size <- size.factor(iris$Sepal.Length)
Petal.size <-size.factor(iris$Petal.Length)
Petal.w.size <-size.factor(iris$Petal.Width)
# Let's try for the nex one:
three.sizes <- function(x)
{
Petal.Size <- c(1:length(x))
Petal.Size
for ( i in 1:length(x))
{
if (x[i] <= 2)
{
Petal.Size[i] <- "Small"
}
if (x[i] > 2 & x[i] <=6)
{
Petal.Size[i] <- "Large"
}
if (x[i] >6)
{
Petal.Size[i] <- "Extra Large"
}
}
return(Petal.Size)
}
three.sizes(iris$Petal.Length)
three.sizes(iris$Sepal.Length)
three.sizes(iris$Petal.Width) # it'll work but there is no "extra large" category. That's because
# we set fixed numbers as the category bounderies. If you want the functions to be generalisable -
# and thus be able to recycle them as often as possible - you must avoid those. That's why we keep
# the 1:length(iris$Peatl.Length), instead of replacing it by the actual length of the dataframe (150).
# In this case it would not have changed anything, but imagine if you decide later on to remove
# some rows from iris? None of the scripts would work!
## Another application, using the apply family again:
apply(iris[,1:4], 2, three.sizes)
iris.new<-cbind(iris, apply(iris[,1:4], 2, three.sizes))
|
## Work toward a flexible method of defining arbitrary scoring regions for multi-line texts.
## Also see yaml-test.txt system.file("extdata/yaml-test.txt")
##' @title Convert region file to region definition file.
##'
##' @description Convert a file containing full region description
##' into a region definition file. The latter is suitable for hand
##' editing and can be used to generate alternative region
##' specifications (e.g., multi word regions) for text stimuli.
##'
##' @details We aspire to handle input files (region files) for multi
##' line texts, but at present only region files for single line
##' texts are handled.
##'
##' File parameter values are used to fill in parameters written
##' to the yaml block of the region definition file. Note that few
##' of these parameters are important when the region definition
##' file is used to create a new region file. Three parameters are
##' critical to proper region definitions.
##'
##' \describe{
##'
##' \item{chrW}{This parameter is used to translate region
##' boundaries in x dimension from letter positions (as specified
##' in the region definition file) to pixel positions (as required
##' for the region or ias file). If not specified in the function
##' call, chrW is estimated from contents of region.csv file, and
##' will probably be correct most of the time. Regardless, it
##' should be checked and corrected if necessary (i.e., specified
##' in the function call).}
##'
##' \item{baseline}{Accurate baseline positions are also critical
##' to determining the y positions of regions. Baselines are
##' read directly from the region.csv file and should be
##' accurate. Note that baseline positions, in pixels, are
##' measured from the TOP of the screen.}
##'
##' \item{mrgn.left}{The left margin is an x offset that will be
##' applied to all regions. There is no easy way to read this from
##' a region file, so it will need to be specified in the function
##' call. In most cases, it will be the same for all stimulus
##' items.}
##'
##' }
##'
##' Four optional parameters (rgn.minH, rgn.maxH, rgn.padL,
##' rgn.padR) can be used to control various aspects of region
##' extent.
##'
##' \strong{To Do:}
##'
##' ToDo: This function presently does not work for regioning
##' multi-line text stimuli. Fix that. stims.
##'
##' ToDo: Make a simuliar function to read/parse SRR IAS files and
##' build region defs based on them.
##'
##' @param reg A data.frame containing region specifications, as read
##' from a region file ("*.region.csv").
##' @param scrnW Screen width in pixels (integer).
##' @param scrnH Screen height in pixels (integer).
##' @param fnt.name Font name used for stimulus text.
##' @param fnt.size Nominal font size in points for text display.
##' @param chrW Letter width in pixels.
##' @param chrH Nominal letter height in pixels.
##' @param ln.space Line spacing in pixels for multi line texts. Multi
##' line texts are not currently supported.
##' @param baseline Baseline positions for each line of text. Measured
##' in pixels from the top of the screen. Multi line texts are not
##' currently supported.
##' @param mrgn.top Top margin in pixels.
##' @param mrgn.left Left margin in pixels.
##' @param mrgn.bottom Bottom margin in pixels.
##' @param mrgn.right Right margin in pixels.
##' @param rgn.maxH Extent of regions of interest above baseline in
##' pixels.
##' @param rgn.minH Extent of regions of interest below baseline in
##' pixels.
##' @param rgn.padL Expand leftmost region on each line leftward by
##' this amount in pixels.
##' @param rgn.padR Expand rightmost region on each line rightward by
##' this amount in pixels.
##' @return A vector of strings containing the region definition. The
##' vector includes a yaml block with values for each of the
##' function parameters except for "reg". In addition to the yaml
##' block, the vector will include a pair of lines for each line
##' of text in the stimulus. The first element of each pair is the
##' text displayed on that line. The second element is a regioning
##' string made up of dots ("."), and pipe ("|")
##' characters. Pipes indicate the beginnings of regions. By
##' default, the region definition file will specify that each
##' text line be exhaustively dividied into space delimited regions
##' (i.e. there will be a pipe character corresponding to each space
##' character in the paired text line.
##'
##' This vector can be written to file and hand edited to add or
##' correct information in the yaml block, or to re-specify region
##' placements.
##' @seealso \code{\link{regdef2ias}}
##' @author Dave Braze \email{davebraze@@gmail.com}
##' @export
reg2regdef <- function(reg, scrnW=NA, scrnH=NA,
fnt.name=NA, fnt.size=NA,
chrW=NA, chrH=NA,
ln.space=NA, baseline=NA,
mrgn.top=NA, mrgn.left=NA, mrgn.bottom=NA, mrgn.right=NA,
rgn.maxH=NA, rgn.minH=NA, rgn.padL=NA, rgn.padR=NA) {
##### build yaml block #####
if (is.na(baseline)) baseline <- unique(reg$baseline)
if (is.na(chrH)) chrH <- unique(reg$height)
if (is.na(chrW)) {
chrW <- diff(reg$x1_pos)
chrW <- FDButils::gcd(chrW)
}
scrn <- list(screen=list(width=as.integer(scrnW), height=as.integer(scrnH))) # likely defaults
fnt <- list(font=list(name=fnt.name, size=fnt.size)) # no default; not important to region definitions
chr <- list(character=list(width=as.integer(chrW), height=as.integer(chrH)))
lns <- list(lines=list(spacing=ln.space, baseline=as.integer(baseline)))
mrg <- list(margins=list(top=as.integer(mrgn.top), left=as.integer(mrgn.left),
bottom=as.integer(mrgn.bottom), right=as.integer(mrgn.right)))
rgns <- list(regions=list(maxH=as.integer(rgn.maxH), minH=as.integer(rgn.minH),
padL=as.integer(rgn.padL), padR=as.integer(rgn.padR)))
hdr <- c("---\n",
sapply(list(scrn, fnt, chr, lns, mrg, rgns), yaml::as.yaml),
"---\n")
## build regdef block
txt <- stringr::str_c(reg$Word, collapse="")
idx <- stringr::str_locate_all(txt, " ")[[1]][,1]
regmarks <- rep(".", stringr::str_length(txt))
regmarks[idx] <- "|"
regmarks <- paste(regmarks, collapse="")
ln <- c(paste0("\n", txt, "\n", regmarks, "\n"))
retval <- c(hdr, ln)
retval
}
##' @title Convert region definition file to SRR Interest Area file.
##'
##' @description Convert a region definition file to an SRR Interest
##' Area file (*.ias). The latter can be hand edited to specify
##' alternative region specifications (e.g., multi word regions)
##' for text stimuli.
##'
##' @details We aspire to handle region definitions for multi line
##' texts, but at present only region files for single line texts
##' are handled.
##'
##' Parameter values are read from the yaml block of the region
##' definition file. Note that a few of these parameters are
##' important when translating region definitions to interest
##' areas.
##'
##' A region definition file contains 2 parts. The first is a yaml
##' block, which is followed by a region block.
##'
##' \strong{Yaml Block:}
##'
##' Three parameters in the yaml block are critical to proper
##' region definitions. Four others are also useful, but optional.
##'
##' \describe{
##'
##' \item{character$width}{This parameter is used to translate
##' region boundaries in x dimension from letter positions (as
##' specified in the region definition file) to pixel positions
##' (as required for the region or ias file). Character width (in
##' pixels) is not explicitly encoded in a region file, but is
##' typically estimated from contents of region.csv file, and will
##' probably be correct most of the time. Regardless, it should be
##' checked and, if necessary, manually edited in the resulting
##' region definition file.}
##'
##' \item{lines$baseline}{Accurate baseline positions are also
##' critical to determining the y positions of regions. Baselines
##' are read directly from the region.csv file and should be
##' accurate. Note that baseline positions, in pixels, are
##' measured from the TOP of the screen.}
##'
##' \item{margins$left}{The left margin is an x offset that will be
##' applied to all regions. There is no easy way to read this from
##' a region file, so it will need to be specified in the function
##' call. In most cases, it will be the same for all stimulus
##' items.}
##'
##' }
##'
##' Four additional parameters in the yaml block of the region
##' definition file will be used to modify regions. They are:
##' regions$maxH, regions$minH, regions$padL, and regions$padR.
##'
##' Before running \code{regdef2ias} on a file, its yaml block can
##' be hand edited to add or correct information. However, the
##' easiest way to fill information in the yaml block will
##' probably be to specify it in the form of parameters to
##' \code{reg2regdef} or similar function used to create the
##' region definition file in the first place.
##'
##' \strong{Region Block:}
##'
##' The second part of a region definition file is the region
##' block. This block contains a pair of lines for each line of
##' text in the stimulus. The first element of each pair is the
##' text displayed on that line. The second element is a regioning
##' string made up of dots ("."), and pipe ("|") characters. Pipes
##' indicate the beginnings of regions. By default, the region
##' definition file will specify that each text line be
##' exhaustively divided into space delimited regions (i.e. there
##' will be a pipe character corrponding to each space character
##' in the paired text line.
##'
##' Before running \code{regdef2ias} on a file, its region block
##' can be hand edited to add or correct information to specify
##' region placements.
##'
##' \strong{To Do:}
##'
##' This function presently does not handle regioning for
##' multi-line stimulus texts. Fix that.
##'
##' @param fname A string containing the name of a "region definition"
##' file, such as might be created by reg2regdef(). See Details.
##'
##' @return A data.frame specifying the content of an SRR interest
##' area file (*.ias). Use \code{readr::write_delim(...,
##' delim="\n", col_names=FALSE)} to save the interest area
##' specification to file.
##' @seealso \code{\link{reg2regdef}}
##' @author Dave Braze \email{davebraze@@gmail.com}
##' @export
regdef2ias <- function(fname) {
l <- readLines(fname)
## get parameters from yaml block
yidx <- which(stringr::str_detect(l, "^---$"))
yml <- (min(yidx)+1):(max(yidx)-1)
yml <- paste(l[yml], collapse="\n")
parms <- yaml::yaml.load(yml)
## get regdef block
tstart <- max(yidx)+1
tend <- length(l)
tblock <- l[tstart:tend]
tidx <- stringr::str_detect(tblock, "^ *$") ## find blank lines
## drop leading blank lines from regdef block
if (min(which(tidx))==1) {
tmp <- FDButils::series(which(tidx), minseries=1)
tstart <- tmp[1,1] + tmp[1,3]
tend <- length(tblock)
tblock <- tblock[tstart:tend]
}
##### from this point on needs to be iterated over text/regdef lines
## find the (vertical) beginnings and ends of regions, in character units
txt <- tblock[1]
mrk <- tblock[2]
if(str_length(txt) != str_length(mrk)) {
tt <- paste0("\n [", txt, "]")
mm <- paste0("\n [", mrk, "]")
ww <- paste("Warning! region mark line is not same length as text line in" ,
fname, tt, mm)
warning(ww)
}
midx <- stringr::str_locate_all(mrk, "[|]")[[1]][,1]
x1_char <- c(1, midx)-1
x2_char <- c(midx-1, str_length(txt))
## find the (vertical) beginnings and ends of regions, in pixel units
x1 <- (x1_char * 12) + (parms$margins$left - 1) ## translate char to pix
if (!is.na(parms$regions$padL)) x1[1] <- x1[1] - parms$regions$padL
x1 <- as.integer(x1)
x2 <- (x2_char * 12) + (parms$margins$left - 1) ## translate char to pix
if (!is.na(parms$regions$padR)) x2[length(x2)] <- x2[length(x2)] + parms$regions$padR
x2 <- as.integer(x2)
## get the upper and lower y coordinates (pixels) of regions
y1 <- as.integer(parms$lines$baseline[1] - parms$regions$maxH)
y2 <- as.integer(parms$lines$baseline[1] + parms$regions$minH)
## other columns for current text/mark line
type <- "RECTANGLE" ## region type
regnum <- as.integer(1:length(x1)) ## region numbers
labs <- txt ## region labels
labs <- stringr::str_replace_all(labs, " ", "_")
labs <- stringr::str_replace_all(labs, '"', "'")
labs <- stringr::str_sub(labs, x1_char+1, x2_char)
ias <- data.frame(type, regnum, x1, y1, x2, y2, labs)
ias
}
|
/R/textRegions.R
|
permissive
|
nagaflokhu/FDBeye
|
R
| false
| false
| 13,252
|
r
|
## Work toward a flexible method of defining arbitrary scoring regions for multi-line texts.
## Also see yaml-test.txt system.file("extdata/yaml-test.txt")
##' @title Convert region file to region definition file.
##'
##' @description Convert a file containing full region description
##' into a region definition file. The latter is suitable for hand
##' editing and can be used to generate alternative region
##' specifications (e.g., multi word regions) for text stimuli.
##'
##' @details We aspire to handle input files (region files) for multi
##' line texts, but at present only region files for single line
##' texts are handled.
##'
##' File parameter values are used to fill in parameters written
##' to the yaml block of the region definition file. Note that few
##' of these parameters are important when the region definition
##' file is used to create a new region file. Three parameters are
##' critical to proper region definitions.
##'
##' \describe{
##'
##' \item{chrW}{This parameter is used to translate region
##' boundaries in x dimension from letter positions (as specified
##' in the region definition file) to pixel positions (as required
##' for the region or ias file). If not specified in the function
##' call, chrW is estimated from contents of region.csv file, and
##' will probably be correct most of the time. Regardless, it
##' should be checked and corrected if necessary (i.e., specified
##' in the function call).}
##'
##' \item{baseline}{Accurate baseline positions are also critical
##' to determining the y positions of regions. Baselines are
##' read directly from the region.csv file and should be
##' accurate. Note that baseline positions, in pixels, are
##' measured from the TOP of the screen.}
##'
##' \item{mrgn.left}{The left margin is an x offset that will be
##' applied to all regions. There is no easy way to read this from
##' a region file, so it will need to be specified in the function
##' call. In most cases, it will be the same for all stimulus
##' items.}
##'
##' }
##'
##' Four optional parameters (rgn.minH, rgn.maxH, rgn.padL,
##' rgn.padR) can be used to control various aspects of region
##' extent.
##'
##' \strong{To Do:}
##'
##' ToDo: This function presently does not work for regioning
##' multi-line text stimuli. Fix that. stims.
##'
##' ToDo: Make a simuliar function to read/parse SRR IAS files and
##' build region defs based on them.
##'
##' @param reg A data.frame containing region specifications, as read
##' from a region file ("*.region.csv").
##' @param scrnW Screen width in pixels (integer).
##' @param scrnH Screen height in pixels (integer).
##' @param fnt.name Font name used for stimulus text.
##' @param fnt.size Nominal font size in points for text display.
##' @param chrW Letter width in pixels.
##' @param chrH Nominal letter height in pixels.
##' @param ln.space Line spacing in pixels for multi line texts. Multi
##' line texts are not currently supported.
##' @param baseline Baseline positions for each line of text. Measured
##' in pixels from the top of the screen. Multi line texts are not
##' currently supported.
##' @param mrgn.top Top margin in pixels.
##' @param mrgn.left Left margin in pixels.
##' @param mrgn.bottom Bottom margin in pixels.
##' @param mrgn.right Right margin in pixels.
##' @param rgn.maxH Extent of regions of interest above baseline in
##' pixels.
##' @param rgn.minH Extent of regions of interest below baseline in
##' pixels.
##' @param rgn.padL Expand leftmost region on each line leftward by
##' this amount in pixels.
##' @param rgn.padR Expand rightmost region on each line rightward by
##' this amount in pixels.
##' @return A vector of strings containing the region definition. The
##' vector includes a yaml block with values for each of the
##' function parameters except for "reg". In addition to the yaml
##' block, the vector will include a pair of lines for each line
##' of text in the stimulus. The first element of each pair is the
##' text displayed on that line. The second element is a regioning
##' string made up of dots ("."), and pipe ("|")
##' characters. Pipes indicate the beginnings of regions. By
##' default, the region definition file will specify that each
##' text line be exhaustively dividied into space delimited regions
##' (i.e. there will be a pipe character corresponding to each space
##' character in the paired text line.
##'
##' This vector can be written to file and hand edited to add or
##' correct information in the yaml block, or to re-specify region
##' placements.
##' @seealso \code{\link{regdef2ias}}
##' @author Dave Braze \email{davebraze@@gmail.com}
##' @export
reg2regdef <- function(reg, scrnW=NA, scrnH=NA,
fnt.name=NA, fnt.size=NA,
chrW=NA, chrH=NA,
ln.space=NA, baseline=NA,
mrgn.top=NA, mrgn.left=NA, mrgn.bottom=NA, mrgn.right=NA,
rgn.maxH=NA, rgn.minH=NA, rgn.padL=NA, rgn.padR=NA) {
##### build yaml block #####
if (is.na(baseline)) baseline <- unique(reg$baseline)
if (is.na(chrH)) chrH <- unique(reg$height)
if (is.na(chrW)) {
chrW <- diff(reg$x1_pos)
chrW <- FDButils::gcd(chrW)
}
scrn <- list(screen=list(width=as.integer(scrnW), height=as.integer(scrnH))) # likely defaults
fnt <- list(font=list(name=fnt.name, size=fnt.size)) # no default; not important to region definitions
chr <- list(character=list(width=as.integer(chrW), height=as.integer(chrH)))
lns <- list(lines=list(spacing=ln.space, baseline=as.integer(baseline)))
mrg <- list(margins=list(top=as.integer(mrgn.top), left=as.integer(mrgn.left),
bottom=as.integer(mrgn.bottom), right=as.integer(mrgn.right)))
rgns <- list(regions=list(maxH=as.integer(rgn.maxH), minH=as.integer(rgn.minH),
padL=as.integer(rgn.padL), padR=as.integer(rgn.padR)))
hdr <- c("---\n",
sapply(list(scrn, fnt, chr, lns, mrg, rgns), yaml::as.yaml),
"---\n")
## build regdef block
txt <- stringr::str_c(reg$Word, collapse="")
idx <- stringr::str_locate_all(txt, " ")[[1]][,1]
regmarks <- rep(".", stringr::str_length(txt))
regmarks[idx] <- "|"
regmarks <- paste(regmarks, collapse="")
ln <- c(paste0("\n", txt, "\n", regmarks, "\n"))
retval <- c(hdr, ln)
retval
}
##' @title Convert region definition file to SRR Interest Area file.
##'
##' @description Convert a region definition file to an SRR Interest
##' Area file (*.ias). The latter can be hand edited to specify
##' alternative region specifications (e.g., multi word regions)
##' for text stimuli.
##'
##' @details We aspire to handle region definitions for multi line
##' texts, but at present only region files for single line texts
##' are handled.
##'
##' Parameter values are read from the yaml block of the region
##' definition file. Note that a few of these parameters are
##' important when translating region definitions to interest
##' areas.
##'
##' A region definition file contains 2 parts. The first is a yaml
##' block, which is followed by a region block.
##'
##' \strong{Yaml Block:}
##'
##' Three parameters in the yaml block are critical to proper
##' region definitions. Four others are also useful, but optional.
##'
##' \describe{
##'
##' \item{character$width}{This parameter is used to translate
##' region boundaries in x dimension from letter positions (as
##' specified in the region definition file) to pixel positions
##' (as required for the region or ias file). Character width (in
##' pixels) is not explicitly encoded in a region file, but is
##' typically estimated from contents of region.csv file, and will
##' probably be correct most of the time. Regardless, it should be
##' checked and, if necessary, manually edited in the resulting
##' region definition file.}
##'
##' \item{lines$baseline}{Accurate baseline positions are also
##' critical to determining the y positions of regions. Baselines
##' are read directly from the region.csv file and should be
##' accurate. Note that baseline positions, in pixels, are
##' measured from the TOP of the screen.}
##'
##' \item{margins$left}{The left margin is an x offset that will be
##' applied to all regions. There is no easy way to read this from
##' a region file, so it will need to be specified in the function
##' call. In most cases, it will be the same for all stimulus
##' items.}
##'
##' }
##'
##' Four additional parameters in the yaml block of the region
##' definition file will be used to modify regions. They are:
##' regions$maxH, regions$minH, regions$padL, and regions$padR.
##'
##' Before running \code{regdef2ias} on a file, its yaml block can
##' be hand edited to add or correct information. However, the
##' easiest way to fill information in the yaml block will
##' probably be to specify it in the form of parameters to
##' \code{reg2regdef} or similar function used to create the
##' region definition file in the first place.
##'
##' \strong{Region Block:}
##'
##' The second part of a region definition file is the region
##' block. This block contains a pair of lines for each line of
##' text in the stimulus. The first element of each pair is the
##' text displayed on that line. The second element is a regioning
##' string made up of dots ("."), and pipe ("|") characters. Pipes
##' indicate the beginnings of regions. By default, the region
##' definition file will specify that each text line be
##' exhaustively divided into space delimited regions (i.e. there
##' will be a pipe character corrponding to each space character
##' in the paired text line.
##'
##' Before running \code{regdef2ias} on a file, its region block
##' can be hand edited to add or correct information to specify
##' region placements.
##'
##' \strong{To Do:}
##'
##' This function presently does not handle regioning for
##' multi-line stimulus texts. Fix that.
##'
##' @param fname A string containing the name of a "region definition"
##' file, such as might be created by reg2regdef(). See Details.
##'
##' @return A data.frame specifying the content of an SRR interest
##' area file (*.ias). Use \code{readr::write_delim(...,
##' delim="\n", col_names=FALSE)} to save the interest area
##' specification to file.
##' @seealso \code{\link{reg2regdef}}
##' @author Dave Braze \email{davebraze@@gmail.com}
##' @export
regdef2ias <- function(fname) {
l <- readLines(fname)
## get parameters from yaml block
yidx <- which(stringr::str_detect(l, "^---$"))
yml <- (min(yidx)+1):(max(yidx)-1)
yml <- paste(l[yml], collapse="\n")
parms <- yaml::yaml.load(yml)
## get regdef block
tstart <- max(yidx)+1
tend <- length(l)
tblock <- l[tstart:tend]
tidx <- stringr::str_detect(tblock, "^ *$") ## find blank lines
## drop leading blank lines from regdef block
if (min(which(tidx))==1) {
tmp <- FDButils::series(which(tidx), minseries=1)
tstart <- tmp[1,1] + tmp[1,3]
tend <- length(tblock)
tblock <- tblock[tstart:tend]
}
##### from this point on needs to be iterated over text/regdef lines
## find the (vertical) beginnings and ends of regions, in character units
txt <- tblock[1]
mrk <- tblock[2]
if(str_length(txt) != str_length(mrk)) {
tt <- paste0("\n [", txt, "]")
mm <- paste0("\n [", mrk, "]")
ww <- paste("Warning! region mark line is not same length as text line in" ,
fname, tt, mm)
warning(ww)
}
midx <- stringr::str_locate_all(mrk, "[|]")[[1]][,1]
x1_char <- c(1, midx)-1
x2_char <- c(midx-1, str_length(txt))
## find the (vertical) beginnings and ends of regions, in pixel units
x1 <- (x1_char * 12) + (parms$margins$left - 1) ## translate char to pix
if (!is.na(parms$regions$padL)) x1[1] <- x1[1] - parms$regions$padL
x1 <- as.integer(x1)
x2 <- (x2_char * 12) + (parms$margins$left - 1) ## translate char to pix
if (!is.na(parms$regions$padR)) x2[length(x2)] <- x2[length(x2)] + parms$regions$padR
x2 <- as.integer(x2)
## get the upper and lower y coordinates (pixels) of regions
y1 <- as.integer(parms$lines$baseline[1] - parms$regions$maxH)
y2 <- as.integer(parms$lines$baseline[1] + parms$regions$minH)
## other columns for current text/mark line
type <- "RECTANGLE" ## region type
regnum <- as.integer(1:length(x1)) ## region numbers
labs <- txt ## region labels
labs <- stringr::str_replace_all(labs, " ", "_")
labs <- stringr::str_replace_all(labs, '"', "'")
labs <- stringr::str_sub(labs, x1_char+1, x2_char)
ias <- data.frame(type, regnum, x1, y1, x2, y2, labs)
ias
}
|
library(DescTools)
### Name: StrAlign
### Title: String Alignment
### Aliases: StrAlign
### Keywords: character
### ** Examples
# align on (the first occuring) B
x <- c("ABCDMNB", "CDGHEBK", "BCI")
cbind(StrAlign(x, sep="B"))
# align to decimal point
z <- c(" 6.0", "6.00 ", " 45.12 ", "784", NA)
cbind(StrAlign(z, sep="."))
# right align, the width will be the max number of characters in x
cbind(StrAlign(x, sep="\\r"))
# left align
cbind(StrAlign(x, sep="\\l"))
# center
cbind(StrAlign(x, sep="\\c"))
|
/data/genthat_extracted_code/DescTools/examples/StrAlign.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 520
|
r
|
library(DescTools)
### Name: StrAlign
### Title: String Alignment
### Aliases: StrAlign
### Keywords: character
### ** Examples
# align on (the first occuring) B
x <- c("ABCDMNB", "CDGHEBK", "BCI")
cbind(StrAlign(x, sep="B"))
# align to decimal point
z <- c(" 6.0", "6.00 ", " 45.12 ", "784", NA)
cbind(StrAlign(z, sep="."))
# right align, the width will be the max number of characters in x
cbind(StrAlign(x, sep="\\r"))
# left align
cbind(StrAlign(x, sep="\\l"))
# center
cbind(StrAlign(x, sep="\\c"))
|
## ---- eval = TRUE, include=FALSE----------------------------------------------
# load data
data("Ross76")
# time vector
tin <- Ross76[seq(1, 3000, by = 8), 1]
# single time series, here y(t) is chosen
data <- Ross76[seq(1, 3000, by = 8), 3]
# global modelling
# results are put in list outputGPoM
outputGPoM <- gPoMo(data[1:300], tin = tin[1:300], dMax = 2, nS=c(3),
show = 0, method = 'rk4',
nPmin = 3, nPmax = 12, IstepMin = 400, IstepMax = 401)
## ---- eval = TRUE-------------------------------------------------------------
sum(outputGPoM$okMod)
## ---- eval = TRUE-------------------------------------------------------------
which(outputGPoM$okMod == 1)
## ---- eval = TRUE-------------------------------------------------------------
visuEq(outputGPoM$models$model1)
## ---- eval = TRUE-------------------------------------------------------------
x0 <- head(outputGPoM$filtdata, 1)[1:3]
## ---- eval = TRUE, fig.align='center'-----------------------------------------
###############
# forecasting #
###############
outNumi <- numicano(nVar = 3, dMax = 2, Istep = 100, onestep = 0.08,
KL = outputGPoM$models$model7, v0 = x0, method = 'rk4')
plot(outputGPoM$tfilt, outputGPoM$filtdata[,1], xlim = c(0,10),
type='l', main = 'Observed and simulated',
xlab = expression(italic(h)), ylab = expression(italic(y(h))))
t0 = outputGPoM$tfilt[1]
lines(outNumi$reconstr[,1] + t0,outNumi$reconstr[,2], type='l', col = 'red')
nbpt <- length(outNumi$reconstr[,2])
lines(c(-5,30), c(0,0), type='l', col = 'gray')
lines(outNumi$reconstr[,1] + t0, outNumi$reconstr[,2] - outputGPoM$filtdata[1:nbpt,1],
type='l', col = 'green')
legend(0,-4, c("simulated", "observed", "difference"), col=c('red', 'black', 'green'),
lty=1, cex = 0.6)
## ---- eval = TRUE-------------------------------------------------------------
#######################
# test predictability #
#######################
outpred <- predictab(outputGPoM, hp = 15, Nech = 30, selecmod = 9, show = 0)
## ---- eval = TRUE, fig.show='hold'--------------------------------------------
# manual visualisation of the outputs (e.g. for model 9):
plot(c(outpred$hpE[1], max(outpred$hpE)), c(0,0),
type='l', main = 'Error growth',
xlab = expression(h), ylab = expression(italic(e(h))),
ylim = c(min(outpred$Errmod9), max(outpred$Errmod9)))
for (i in 1:dim(outpred$Errmod9)[2]) {
lines(outpred$hpE, outpred$Errmod9[,i], col = 'green')
}
lines(c(outpred$hpE[1], max(outpred$hpE)), c(0,0), type='l')
# in terms of variance
# manual visualisation of the outputs (e.g. for model 9):
plot(c(outpred$hpE[1], max(outpred$hpE)), c(0,0),
type='l', main = 'Square error growth',
xlab = expression(italic(h)), ylab = expression(italic(e^2) (italic(h))),
ylim = c(0, 0.25*max(outpred$Errmod9)^2))
for (i in 1:dim(outpred$Errmod9)[2]) {
lines(outpred$hpE, outpred$Errmod9[,i]^2, col = 'green')
}
lines(c(outpred$hpE[1], max(outpred$hpE)), c(0,0), type='l')
## ---- eval = TRUE-------------------------------------------------------------
#######################
# test predictability #
#######################
outpred <- predictab(outputGPoM, hp = 15, Nech = 30, selecmod = c(1,9), show = 0)
## ---- eval = TRUE, fig.show='hold'--------------------------------------------
# manual visualisation of the outputs (e.g. for model 1):
image(outpred$tE, outpred$hpE, t(outpred$Errmod1),
xlab = expression(italic(t)), ylab = expression(italic(h)),
main = expression(italic(e[model1](t,h))))
# (e.g. for model 9):
image(outpred$tE, outpred$hpE, t(outpred$Errmod9),
xlab = expression(italic(t)), ylab = expression(italic(h)),
main = expression(italic(e[model9])(italic(t),italic(h))))
## ---- eval = FALSE------------------------------------------------------------
# #######################
# # test predictability #
# #######################
# outpred <- predictab(outputGPoM, hp = 15, Nech = 30)
|
/inst/doc/b5_Predictability.R
|
no_license
|
cran/GPoM
|
R
| false
| false
| 3,988
|
r
|
## ---- eval = TRUE, include=FALSE----------------------------------------------
# load data
data("Ross76")
# time vector
tin <- Ross76[seq(1, 3000, by = 8), 1]
# single time series, here y(t) is chosen
data <- Ross76[seq(1, 3000, by = 8), 3]
# global modelling
# results are put in list outputGPoM
outputGPoM <- gPoMo(data[1:300], tin = tin[1:300], dMax = 2, nS=c(3),
show = 0, method = 'rk4',
nPmin = 3, nPmax = 12, IstepMin = 400, IstepMax = 401)
## ---- eval = TRUE-------------------------------------------------------------
sum(outputGPoM$okMod)
## ---- eval = TRUE-------------------------------------------------------------
which(outputGPoM$okMod == 1)
## ---- eval = TRUE-------------------------------------------------------------
visuEq(outputGPoM$models$model1)
## ---- eval = TRUE-------------------------------------------------------------
x0 <- head(outputGPoM$filtdata, 1)[1:3]
## ---- eval = TRUE, fig.align='center'-----------------------------------------
###############
# forecasting #
###############
outNumi <- numicano(nVar = 3, dMax = 2, Istep = 100, onestep = 0.08,
KL = outputGPoM$models$model7, v0 = x0, method = 'rk4')
plot(outputGPoM$tfilt, outputGPoM$filtdata[,1], xlim = c(0,10),
type='l', main = 'Observed and simulated',
xlab = expression(italic(h)), ylab = expression(italic(y(h))))
t0 = outputGPoM$tfilt[1]
lines(outNumi$reconstr[,1] + t0,outNumi$reconstr[,2], type='l', col = 'red')
nbpt <- length(outNumi$reconstr[,2])
lines(c(-5,30), c(0,0), type='l', col = 'gray')
lines(outNumi$reconstr[,1] + t0, outNumi$reconstr[,2] - outputGPoM$filtdata[1:nbpt,1],
type='l', col = 'green')
legend(0,-4, c("simulated", "observed", "difference"), col=c('red', 'black', 'green'),
lty=1, cex = 0.6)
## ---- eval = TRUE-------------------------------------------------------------
#######################
# test predictability #
#######################
outpred <- predictab(outputGPoM, hp = 15, Nech = 30, selecmod = 9, show = 0)
## ---- eval = TRUE, fig.show='hold'--------------------------------------------
# manual visualisation of the outputs (e.g. for model 9):
plot(c(outpred$hpE[1], max(outpred$hpE)), c(0,0),
type='l', main = 'Error growth',
xlab = expression(h), ylab = expression(italic(e(h))),
ylim = c(min(outpred$Errmod9), max(outpred$Errmod9)))
for (i in 1:dim(outpred$Errmod9)[2]) {
lines(outpred$hpE, outpred$Errmod9[,i], col = 'green')
}
lines(c(outpred$hpE[1], max(outpred$hpE)), c(0,0), type='l')
# in terms of variance
# manual visualisation of the outputs (e.g. for model 9):
plot(c(outpred$hpE[1], max(outpred$hpE)), c(0,0),
type='l', main = 'Square error growth',
xlab = expression(italic(h)), ylab = expression(italic(e^2) (italic(h))),
ylim = c(0, 0.25*max(outpred$Errmod9)^2))
for (i in 1:dim(outpred$Errmod9)[2]) {
lines(outpred$hpE, outpred$Errmod9[,i]^2, col = 'green')
}
lines(c(outpred$hpE[1], max(outpred$hpE)), c(0,0), type='l')
## ---- eval = TRUE-------------------------------------------------------------
#######################
# test predictability #
#######################
outpred <- predictab(outputGPoM, hp = 15, Nech = 30, selecmod = c(1,9), show = 0)
## ---- eval = TRUE, fig.show='hold'--------------------------------------------
# manual visualisation of the outputs (e.g. for model 1):
image(outpred$tE, outpred$hpE, t(outpred$Errmod1),
xlab = expression(italic(t)), ylab = expression(italic(h)),
main = expression(italic(e[model1](t,h))))
# (e.g. for model 9):
image(outpred$tE, outpred$hpE, t(outpred$Errmod9),
xlab = expression(italic(t)), ylab = expression(italic(h)),
main = expression(italic(e[model9])(italic(t),italic(h))))
## ---- eval = FALSE------------------------------------------------------------
# #######################
# # test predictability #
# #######################
# outpred <- predictab(outputGPoM, hp = 15, Nech = 30)
|
#' Pivot a data frame from wide to long or long to wide
#'
#' `pivot()` is provides rectangular reshaping like `gather()` and `spread()`.
#' It differs primarily from existing approaches in tidyr because the details
#' of the reshaping a described by a data frame, `spec`. For simple cases, you
#' can construct the spec with `pivot_spec_long()` and `pivot_spec_wide()`.
#' See details in `vignette("pivot")`
#'
#' @param df A data frame to reshape.
#' @param spec A data frame defining the reshaping specification.
#' Must contain `col_name` and `measure` columns that are character
#' vectors.
#' @param na.rm If `TRUE`, will convert explicit missing values to implicit
#' missing values. Used only when pivotting to long.
#' @param .ptype A named list that optionally override the types of
#' measured columns. Used only when pivotting to long.
#' @keywords internal
#' @export
pivot <- function(df, spec, na.rm = FALSE, ptypes = NULL) {
spec <- check_spec(spec)
# Check colnames match up and error otherwise
df_in_spec <- all(spec$col_name %in% names(df))
spec_in_df <- all(setdiff(names(spec), c("col_name", "measure")) %in% names(df))
if (df_in_spec) {
pivot_to_long(df, spec, na.rm = na.rm, .ptype = ptypes)
} else if (spec_in_df) {
pivot_to_wide(df, spec)
} else {
stop("Mismatch between spec and df. Need better message")
}
}
check_spec <- function(spec) {
# Eventually should just be vec_assert() on partial_frame()
# Waiting for https://github.com/r-lib/vctrs/issues/198
if (!is.data.frame(spec)) {
stop("`spec` must be a data frame", call. = FALSE)
}
if (!has_name(spec, "col_name") || !has_name(spec, "measure")) {
stop("`spec` must have `col_name` and `measure` columns", call. = FALSE)
}
# Ensure col_name and measure come first
vars <- union(c("col_name", "measure"), names(spec))
spec[vars]
}
pivot_to_long <- function(df, spec, na.rm = FALSE, .ptype = list()) {
measures <- split(spec$col_name, spec$measure)
measure_keys <- split(spec[-(1:2)], spec$measure)
keys <- vec_unique(spec[-(1:2)])
vals <- set_names(vec_na(list(), length(measures)), names(measures))
for (measure in names(measures)) {
cols <- measures[[measure]]
col_id <- vec_match(measure_keys[[measure]], keys)
val_cols <- vec_na(list(), nrow(keys))
val_cols[col_id] <- unname(as.list(df[cols]))
val_cols[-col_id] <- list(rep(NA, nrow(df)))
val_type <- vec_type_common(!!!val_cols, .ptype = .ptype[[measure]])
out <- vec_c(!!!val_cols, .ptype = val_type)
# Interleave into correct order
idx <- (matrix(seq_len(nrow(df) * length(val_cols)), ncol = nrow(df), byrow = TRUE))
vals[[measure]] <- vec_slice(out, as.integer(idx))
}
vals <- as_tibble(vals)
# Line up output rows by combining spec and existing data frame
# https://github.com/tidyverse/tidyr/issues/557
rows <- expand.grid(
key_id = vec_along(keys),
df_id = vec_along(df),
KEEP.OUT.ATTRS = FALSE
)
rows$val_id <- vec_along(rows)
if (na.rm) {
# https://github.com/r-lib/vctrs/issues/201
rows <- vec_slice(rows, !vec_equal_na(vals))
}
# Join together df, spec, and val to produce final tibble
out <- vec_cbind(
vec_slice(keys, rows$key_id),
vec_slice(vals, rows$val_id),
)
# Bind original keys back on if there are any
# Because of https://github.com/r-lib/vctrs/issues/199
df_out <- df[setdiff(names(df), spec$col_name)]
if (ncol(df_out) > 0) {
out <- vec_cbind(vec_slice(df_out, rows$df_id), out)
}
out
}
# https://github.com/r-lib/vctrs/issues/189
vec_along <- function(x) {
seq_len(vec_size(x))
}
pivot_to_wide <- function(df, spec) {
measures <- vec_unique(spec$measure)
spec_cols <- c(names(spec)[-(1:2)], measures)
# Figure out rows in output
df_rows <- df[setdiff(names(df), spec_cols)]
if (ncol(df_rows) == 0) {
rows <- tibble(.rows = 1)
row_id <- rep(1L, nrow(spec))
} else {
rows <- vec_unique(df_rows)
row_id <- vec_match(df_rows, rows)
}
measure_specs <- unname(split(spec, spec$measure))
measure_out <- vec_na(list(), length(measure_specs))
for (i in seq_along(measure_out)) {
spec <- measure_specs[[i]]
measure <- spec$measure[[1]]
val <- df[[measure]]
cols <- df[names(spec)[-(1:2)]]
col_id <- vec_match(cols, spec[-(1:2)])
val_id <- data.frame(row = row_id, col = col_id)
if (vec_duplicate_any(val_id)) {
warn("Values are not uniquely identified; output will contain list-columns")
# https://github.com/r-lib/vctrs/issues/196
val <- unname(split(val, vec_duplicate_id(val_id)))
val_id <- vec_unique(val_id)
}
nrow <- nrow(rows)
ncol <- nrow(spec)
out <- vec_na(val, nrow * ncol)
vec_slice(out, val_id$row + nrow * (val_id$col - 1L)) <- val
measure_out[[i]] <- wrap_vec(out, spec$col_name)
}
vec_cbind(rows, !!!measure_out)
}
# Wrap a "rectangular" vector into a data frame
wrap_vec <- function(vec, names) {
ncol <- length(names)
nrow <- length(vec) / ncol
out <- set_names(vec_na(list(), ncol), names)
for (i in 1:ncol) {
out[[i]] <- vec_slice(vec, ((i - 1) * nrow + 1):(i * nrow))
}
as_tibble(out)
}
|
/R/pivot.R
|
permissive
|
coolbutuseless/tidyr
|
R
| false
| false
| 5,196
|
r
|
#' Pivot a data frame from wide to long or long to wide
#'
#' `pivot()` is provides rectangular reshaping like `gather()` and `spread()`.
#' It differs primarily from existing approaches in tidyr because the details
#' of the reshaping a described by a data frame, `spec`. For simple cases, you
#' can construct the spec with `pivot_spec_long()` and `pivot_spec_wide()`.
#' See details in `vignette("pivot")`
#'
#' @param df A data frame to reshape.
#' @param spec A data frame defining the reshaping specification.
#' Must contain `col_name` and `measure` columns that are character
#' vectors.
#' @param na.rm If `TRUE`, will convert explicit missing values to implicit
#' missing values. Used only when pivotting to long.
#' @param .ptype A named list that optionally override the types of
#' measured columns. Used only when pivotting to long.
#' @keywords internal
#' @export
pivot <- function(df, spec, na.rm = FALSE, ptypes = NULL) {
spec <- check_spec(spec)
# Check colnames match up and error otherwise
df_in_spec <- all(spec$col_name %in% names(df))
spec_in_df <- all(setdiff(names(spec), c("col_name", "measure")) %in% names(df))
if (df_in_spec) {
pivot_to_long(df, spec, na.rm = na.rm, .ptype = ptypes)
} else if (spec_in_df) {
pivot_to_wide(df, spec)
} else {
stop("Mismatch between spec and df. Need better message")
}
}
check_spec <- function(spec) {
# Eventually should just be vec_assert() on partial_frame()
# Waiting for https://github.com/r-lib/vctrs/issues/198
if (!is.data.frame(spec)) {
stop("`spec` must be a data frame", call. = FALSE)
}
if (!has_name(spec, "col_name") || !has_name(spec, "measure")) {
stop("`spec` must have `col_name` and `measure` columns", call. = FALSE)
}
# Ensure col_name and measure come first
vars <- union(c("col_name", "measure"), names(spec))
spec[vars]
}
pivot_to_long <- function(df, spec, na.rm = FALSE, .ptype = list()) {
measures <- split(spec$col_name, spec$measure)
measure_keys <- split(spec[-(1:2)], spec$measure)
keys <- vec_unique(spec[-(1:2)])
vals <- set_names(vec_na(list(), length(measures)), names(measures))
for (measure in names(measures)) {
cols <- measures[[measure]]
col_id <- vec_match(measure_keys[[measure]], keys)
val_cols <- vec_na(list(), nrow(keys))
val_cols[col_id] <- unname(as.list(df[cols]))
val_cols[-col_id] <- list(rep(NA, nrow(df)))
val_type <- vec_type_common(!!!val_cols, .ptype = .ptype[[measure]])
out <- vec_c(!!!val_cols, .ptype = val_type)
# Interleave into correct order
idx <- (matrix(seq_len(nrow(df) * length(val_cols)), ncol = nrow(df), byrow = TRUE))
vals[[measure]] <- vec_slice(out, as.integer(idx))
}
vals <- as_tibble(vals)
# Line up output rows by combining spec and existing data frame
# https://github.com/tidyverse/tidyr/issues/557
rows <- expand.grid(
key_id = vec_along(keys),
df_id = vec_along(df),
KEEP.OUT.ATTRS = FALSE
)
rows$val_id <- vec_along(rows)
if (na.rm) {
# https://github.com/r-lib/vctrs/issues/201
rows <- vec_slice(rows, !vec_equal_na(vals))
}
# Join together df, spec, and val to produce final tibble
out <- vec_cbind(
vec_slice(keys, rows$key_id),
vec_slice(vals, rows$val_id),
)
# Bind original keys back on if there are any
# Because of https://github.com/r-lib/vctrs/issues/199
df_out <- df[setdiff(names(df), spec$col_name)]
if (ncol(df_out) > 0) {
out <- vec_cbind(vec_slice(df_out, rows$df_id), out)
}
out
}
# https://github.com/r-lib/vctrs/issues/189
vec_along <- function(x) {
seq_len(vec_size(x))
}
pivot_to_wide <- function(df, spec) {
measures <- vec_unique(spec$measure)
spec_cols <- c(names(spec)[-(1:2)], measures)
# Figure out rows in output
df_rows <- df[setdiff(names(df), spec_cols)]
if (ncol(df_rows) == 0) {
rows <- tibble(.rows = 1)
row_id <- rep(1L, nrow(spec))
} else {
rows <- vec_unique(df_rows)
row_id <- vec_match(df_rows, rows)
}
measure_specs <- unname(split(spec, spec$measure))
measure_out <- vec_na(list(), length(measure_specs))
for (i in seq_along(measure_out)) {
spec <- measure_specs[[i]]
measure <- spec$measure[[1]]
val <- df[[measure]]
cols <- df[names(spec)[-(1:2)]]
col_id <- vec_match(cols, spec[-(1:2)])
val_id <- data.frame(row = row_id, col = col_id)
if (vec_duplicate_any(val_id)) {
warn("Values are not uniquely identified; output will contain list-columns")
# https://github.com/r-lib/vctrs/issues/196
val <- unname(split(val, vec_duplicate_id(val_id)))
val_id <- vec_unique(val_id)
}
nrow <- nrow(rows)
ncol <- nrow(spec)
out <- vec_na(val, nrow * ncol)
vec_slice(out, val_id$row + nrow * (val_id$col - 1L)) <- val
measure_out[[i]] <- wrap_vec(out, spec$col_name)
}
vec_cbind(rows, !!!measure_out)
}
# Wrap a "rectangular" vector into a data frame
wrap_vec <- function(vec, names) {
ncol <- length(names)
nrow <- length(vec) / ncol
out <- set_names(vec_na(list(), ncol), names)
for (i in 1:ncol) {
out[[i]] <- vec_slice(vec, ((i - 1) * nrow + 1):(i * nrow))
}
as_tibble(out)
}
|
#Vectors
#Logical
vtrl =c(TRUE,FALSE)
class(vtrl)
#Numeric
vtrl1 =c(15,85.2344,9999)
class(vtrl1)
#Integer
vtrl2 =c(38L,20L,12L)
class(vtrl2)
vtrl2
#Matrix
mtr = matrix(c(6:30), byrow = TRUE,nrow = 5, ncol = 5)
#Array
arr = array(c(1:9),dim = c(3,3,2,2))
arr1 = array(c(1:24),dim =c(4,3,2))
#List
vct1 = c(3.14,79,04,19)
vct2 = c("Hi","How","are","you?","True")
mylist = list(vct1,vct2,"False")
#Data Frame
Index =c(1:5)
Name = c("Adam","Bill","Carter","Deny","Eshan")
Age = c(13,7,39,41,40)
data.frame(Index,Name,Age)
data.frame(airquality)
data.frame(mtcars)
#Operators
print(5^2) # Exponent
print(5%%2) #Modulus - gives reminder
print(5%/%2) #Floor Division - gives quotant
#Conditional Operators
var1=5
var2=35
if((var1+var2)>100){
print(">100")
}else if((var1+var2)<50){
print("<50")}else
print("else")
#Loops
#repeat
var1=5
repeat{
print(var1)
var1<-var1+2
if(var1>21){
break
}
}
#while
var1=5
while(var1<=21){
print(var1)
var1=var1+2
}
#for
for (i in c(1:25)) {
print(i)
i<-i+1
}
#Functions
#Fibonocci series
fibo <- function(a){
var1=1
var2=2
print(var1)
print(var2)
for(i in 1:a){
var3=var1+var2
print(var3)
var1=var2
var2=var3
}
}
fibo(10)
|
/Datatypes.R
|
no_license
|
Lakshhmi/R-Course
|
R
| false
| false
| 1,244
|
r
|
#Vectors
#Logical
vtrl =c(TRUE,FALSE)
class(vtrl)
#Numeric
vtrl1 =c(15,85.2344,9999)
class(vtrl1)
#Integer
vtrl2 =c(38L,20L,12L)
class(vtrl2)
vtrl2
#Matrix
mtr = matrix(c(6:30), byrow = TRUE,nrow = 5, ncol = 5)
#Array
arr = array(c(1:9),dim = c(3,3,2,2))
arr1 = array(c(1:24),dim =c(4,3,2))
#List
vct1 = c(3.14,79,04,19)
vct2 = c("Hi","How","are","you?","True")
mylist = list(vct1,vct2,"False")
#Data Frame
Index =c(1:5)
Name = c("Adam","Bill","Carter","Deny","Eshan")
Age = c(13,7,39,41,40)
data.frame(Index,Name,Age)
data.frame(airquality)
data.frame(mtcars)
#Operators
print(5^2) # Exponent
print(5%%2) #Modulus - gives reminder
print(5%/%2) #Floor Division - gives quotant
#Conditional Operators
var1=5
var2=35
if((var1+var2)>100){
print(">100")
}else if((var1+var2)<50){
print("<50")}else
print("else")
#Loops
#repeat
var1=5
repeat{
print(var1)
var1<-var1+2
if(var1>21){
break
}
}
#while
var1=5
while(var1<=21){
print(var1)
var1=var1+2
}
#for
for (i in c(1:25)) {
print(i)
i<-i+1
}
#Functions
#Fibonocci series
fibo <- function(a){
var1=1
var2=2
print(var1)
print(var2)
for(i in 1:a){
var3=var1+var2
print(var3)
var1=var2
var2=var3
}
}
fibo(10)
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 369
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 369
c
c Input Parameter (command line, file):
c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/exquery_query07_1344n.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 212
c no.of clauses 369
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 369
c
c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/exquery_query07_1344n.qdimacs 212 369 E1 [] 0 49 163 369 NONE
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/exquery_query07_1344n/exquery_query07_1344n.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false
| false
| 704
|
r
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 369
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 369
c
c Input Parameter (command line, file):
c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/exquery_query07_1344n.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 212
c no.of clauses 369
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 369
c
c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/exquery_query07_1344n.qdimacs 212 369 E1 [] 0 49 163 369 NONE
|
library(tidyverse)
library(jsonlite)
precinct_values <- read_json("../precinct_data_api/precinct_data_api/precinct_ingest/values/pct_dict.json",
simplifyVector = T)
candidate_values <- read_json("../precinct_data_api/precinct_data_api/precinct_ingest/values/cand_dict.json") %>%
map(function(c){
map(c, function(of){
if (!is.null(names(of))){
map(of, function(cd){
purrr::flatten_chr(cd)
})
} else {
purrr::flatten_chr(of)
}
})
})
devtools::use_data(precinct_values, candidate_values,
internal = TRUE, overwrite = TRUE)
|
/data-raw/buildtables.R
|
no_license
|
yougov-datascience/ygresults
|
R
| false
| false
| 700
|
r
|
library(tidyverse)
library(jsonlite)
precinct_values <- read_json("../precinct_data_api/precinct_data_api/precinct_ingest/values/pct_dict.json",
simplifyVector = T)
candidate_values <- read_json("../precinct_data_api/precinct_data_api/precinct_ingest/values/cand_dict.json") %>%
map(function(c){
map(c, function(of){
if (!is.null(names(of))){
map(of, function(cd){
purrr::flatten_chr(cd)
})
} else {
purrr::flatten_chr(of)
}
})
})
devtools::use_data(precinct_values, candidate_values,
internal = TRUE, overwrite = TRUE)
|
###################################################
### code chunk number 32: Cs704_multivariate
###################################################
mod.list.y <- list(
A = "zero",
R = "diagonal and equal"
)
|
/inst/userguide/figures/STS--Cs704_multivariate.R
|
permissive
|
nwfsc-timeseries/MARSS
|
R
| false
| false
| 214
|
r
|
###################################################
### code chunk number 32: Cs704_multivariate
###################################################
mod.list.y <- list(
A = "zero",
R = "diagonal and equal"
)
|
# Installing and loading necessary packages
if (!require("dplyr")) install.packages("dplyr")
library(dplyr)
if (!require("reshape2")) install.packages("reshape2")
library(tidyr)
# Read training and test data and combine them into three separate data frames
trainSubjects <- read.table("./train/subject_train.txt")
trainActivities <- read.table("./train/y_train.txt")
trainMeasures <- read.table("./train/X_train.txt")
testSubjects <- read.table("./test/subject_test.txt")
testActivities <- read.table("./test/y_test.txt")
testMeasures <- read.table("./test/X_test.txt")
allSubjects <- rbind(trainSubjects, testSubjects)
allActivities <- rbind(trainActivities, testActivities)
allMeasures <- rbind(trainMeasures, testMeasures)
# Replace activity label IDs with activity label names
labels <- read.table("./activity_labels.txt")
features <- read.table("./features.txt")
names(labels) <- c("label_id", "label_name")
names(features) <- c("feature_id", "feature_name")
names(allSubjects) <- "subject_id"
names(allActivities) <- "activity"
allActivities <- mutate(allActivities, activity = labels[activity, "label_name"])
# Look for feature names containing "mean" or "std" to subset the measurements data frame
colWanted <- grep("mean\\(\\)|std\\(\\)", features$feature_name)
allMeasures <- allMeasures[, colWanted]
# Modify feature names to make them more descriptive
names(allMeasures) <- features$feature_name[colWanted]
names(allMeasures) <- gsub("-", "", names(allMeasures))
names(allMeasures) <- gsub("\\(", "", names(allMeasures))
names(allMeasures) <- gsub("\\)", "", names(allMeasures))
names(allMeasures) <- gsub("mean", "Mean", names(allMeasures))
names(allMeasures) <- gsub("std", "Std", names(allMeasures))
names(allMeasures) <- gsub("BodyBody", "Body", names(allMeasures))
# Finally merge the three data frames into one
mergedData <- cbind(allSubjects, allActivities, allMeasures)
# Melt the "wide" data frame into a "narraow" data frame
meltData <- melt(mergedData, id.vars = c("subject_id", "activity"))
# Calculate the average measurements of each feature for each activity and each subject
avgData <- aggregate(meltData$value, list(meltData$subject_id, meltData$activity, meltData$variable), mean)
names(avgData) <- c("subject", "activity", "feature", "average")
# Write the data frame into a txt file
write.table(avgData, file = "tidy_data.txt", row.names = FALSE)
|
/run_analysis.R
|
no_license
|
tradewind/getting_cleaning_data_project
|
R
| false
| false
| 2,389
|
r
|
# Installing and loading necessary packages
if (!require("dplyr")) install.packages("dplyr")
library(dplyr)
if (!require("reshape2")) install.packages("reshape2")
library(tidyr)
# Read training and test data and combine them into three separate data frames
trainSubjects <- read.table("./train/subject_train.txt")
trainActivities <- read.table("./train/y_train.txt")
trainMeasures <- read.table("./train/X_train.txt")
testSubjects <- read.table("./test/subject_test.txt")
testActivities <- read.table("./test/y_test.txt")
testMeasures <- read.table("./test/X_test.txt")
allSubjects <- rbind(trainSubjects, testSubjects)
allActivities <- rbind(trainActivities, testActivities)
allMeasures <- rbind(trainMeasures, testMeasures)
# Replace activity label IDs with activity label names
labels <- read.table("./activity_labels.txt")
features <- read.table("./features.txt")
names(labels) <- c("label_id", "label_name")
names(features) <- c("feature_id", "feature_name")
names(allSubjects) <- "subject_id"
names(allActivities) <- "activity"
allActivities <- mutate(allActivities, activity = labels[activity, "label_name"])
# Look for feature names containing "mean" or "std" to subset the measurements data frame
colWanted <- grep("mean\\(\\)|std\\(\\)", features$feature_name)
allMeasures <- allMeasures[, colWanted]
# Modify feature names to make them more descriptive
names(allMeasures) <- features$feature_name[colWanted]
names(allMeasures) <- gsub("-", "", names(allMeasures))
names(allMeasures) <- gsub("\\(", "", names(allMeasures))
names(allMeasures) <- gsub("\\)", "", names(allMeasures))
names(allMeasures) <- gsub("mean", "Mean", names(allMeasures))
names(allMeasures) <- gsub("std", "Std", names(allMeasures))
names(allMeasures) <- gsub("BodyBody", "Body", names(allMeasures))
# Finally merge the three data frames into one
mergedData <- cbind(allSubjects, allActivities, allMeasures)
# Melt the "wide" data frame into a "narraow" data frame
meltData <- melt(mergedData, id.vars = c("subject_id", "activity"))
# Calculate the average measurements of each feature for each activity and each subject
avgData <- aggregate(meltData$value, list(meltData$subject_id, meltData$activity, meltData$variable), mean)
names(avgData) <- c("subject", "activity", "feature", "average")
# Write the data frame into a txt file
write.table(avgData, file = "tidy_data.txt", row.names = FALSE)
|
MFA_iter<-function(data,groups,stream=TRUE,nb_fact,principal_factors=TRUE,principal_axes=FALSE,eigenvalues=FALSE,corr=FALSE,exec_time)
{
if (stream==TRUE) {iter_init <- 300}
eps <- 0.9
p <- ncol(data)
nb_ind <- nrow(data)
q<-length(groups)
indices_debut_groupes<-c(1,cumsum(groups)+1)
A<-X <- Y<-Corr <- vector("list",nb_fact)
Xk<-Mk<-Ck<-vector("list",q)
L1 <- vector(length=nb_fact)
Lk_max<-vector(length=q)
C <- ((nb_ind-1)/nb_ind)*cov(data)
zbar <- apply(data,2,mean)
for(i in 1:nb_fact)
{ X[[i]] <- Y[[i]] <- runif(p,min=-1,max=1)
}
for (k in 1:q){
Ck[[k]] <- C[indices_debut_groupes[k]:(indices_debut_groupes[k+1]-1),indices_debut_groupes[k]:(indices_debut_groupes[k+1]-1)]
Mk[[k]] <- 1/diag(Ck[[k]])
Xk[[k]] <- runif(groups[k],min=-1,max=1)
}
if (principal_axes==TRUE) {A<-X}
if (eigenvalues==TRUE)
{ L1 <- nb_fact:1
}
if(corr==TRUE)
{ Corr<-X
}
n <- 1
if (stream==TRUE)
{ while (n<=iter_init)
{ an <- n^(-eps)
M1<-c()
for (k in 1:q){
temp<-Ck[[k]]%*%Xk[[k]]
Lk_max[k] <- sum(Xk[[k]]*temp) / sum(Xk[[k]]^2 /Mk[[k]] )
Xk[[k]] <- Xk[[k]] + an * (Mk[[k]]*temp -Lk_max[k]*Xk[[k]] )
M1<-c(M1,Mk[[k]]/Lk_max[k])
}
if (n>=2)
{ X <- orth_Gram_Schmidt_metrique_diag(1/M1,Y)
}
for (i in 1:nb_fact)
{ temp <- C%*%X[[i]]
FX <- sum(X[[i]] * temp) / sum(X[[i]]^2 / M1)
Y[[i]] <- X[[i]] + an*(M1*temp - FX*X[[i]])
if ( principal_axes==TRUE) { A[[i]]=(1/M1)*Y[[i]]
}
if (eigenvalues==TRUE) { L1[i] <- L1[i] - an*(L1[i] - FX)
}
if (corr==TRUE) { Corr[[i]] <- sqrt(L1[i])*(A[[i]]/sqrt(sum(A[[i]]^2*M1)))*sqrt(M1)
}
}
n <- n+1
}
} else {
debut_chrono<-proc.time()
while ((n<nrow(data)) && ( (proc.time()-debut_chrono)<exec_time ))
{ an <- n^(-eps)
M1<-c()
for (k in 1:q){
temp<-Ck[[k]]%*%Xk[[k]]
Lk_max[k] <- sum(Xk[[k]]*temp) / sum(Xk[[k]]^2 /Mk[[k]] )
Xk[[k]] <- Xk[[k]] + an * (Mk[[k]]*temp -Lk_max[k]*Xk[[k]] )
M1<-c(M1,Mk[[k]]/Lk_max[k])
}
if (n>=2)
{ X <- orth_Gram_Schmidt_metrique_diag(1/M1,Y)
}
for (i in 1:nb_fact)
{ temp <- C%*%X[[i]]
FX <- sum(X[[i]] * temp) / sum(X[[i]]^2 / M1)
Y[[i]] <- X[[i]] + an*(M1*temp - FX*X[[i]])
if ( principal_axes==TRUE) { A[[i]]=(1/M1)*Y[[i]]
}
if (eigenvalues==TRUE) { L1[i] <- L1[i] - an*(L1[i] - FX)
}
if (corr==TRUE) { Corr[[i]] <- sqrt(L1[i])*(A[[i]]/sqrt(sum(A[[i]]^2*M1)))*sqrt(M1)
}
}
n <- n+1
}
}
return(list(X=X,Xk=Xk,Ck=Ck,Mk=Mk,L1=L1,A=A,Corr=Corr,zbar=zbar))
}
|
/R/MFA_iter_function.R
|
no_license
|
cran/factas
|
R
| false
| false
| 3,102
|
r
|
MFA_iter<-function(data,groups,stream=TRUE,nb_fact,principal_factors=TRUE,principal_axes=FALSE,eigenvalues=FALSE,corr=FALSE,exec_time)
{
if (stream==TRUE) {iter_init <- 300}
eps <- 0.9
p <- ncol(data)
nb_ind <- nrow(data)
q<-length(groups)
indices_debut_groupes<-c(1,cumsum(groups)+1)
A<-X <- Y<-Corr <- vector("list",nb_fact)
Xk<-Mk<-Ck<-vector("list",q)
L1 <- vector(length=nb_fact)
Lk_max<-vector(length=q)
C <- ((nb_ind-1)/nb_ind)*cov(data)
zbar <- apply(data,2,mean)
for(i in 1:nb_fact)
{ X[[i]] <- Y[[i]] <- runif(p,min=-1,max=1)
}
for (k in 1:q){
Ck[[k]] <- C[indices_debut_groupes[k]:(indices_debut_groupes[k+1]-1),indices_debut_groupes[k]:(indices_debut_groupes[k+1]-1)]
Mk[[k]] <- 1/diag(Ck[[k]])
Xk[[k]] <- runif(groups[k],min=-1,max=1)
}
if (principal_axes==TRUE) {A<-X}
if (eigenvalues==TRUE)
{ L1 <- nb_fact:1
}
if(corr==TRUE)
{ Corr<-X
}
n <- 1
if (stream==TRUE)
{ while (n<=iter_init)
{ an <- n^(-eps)
M1<-c()
for (k in 1:q){
temp<-Ck[[k]]%*%Xk[[k]]
Lk_max[k] <- sum(Xk[[k]]*temp) / sum(Xk[[k]]^2 /Mk[[k]] )
Xk[[k]] <- Xk[[k]] + an * (Mk[[k]]*temp -Lk_max[k]*Xk[[k]] )
M1<-c(M1,Mk[[k]]/Lk_max[k])
}
if (n>=2)
{ X <- orth_Gram_Schmidt_metrique_diag(1/M1,Y)
}
for (i in 1:nb_fact)
{ temp <- C%*%X[[i]]
FX <- sum(X[[i]] * temp) / sum(X[[i]]^2 / M1)
Y[[i]] <- X[[i]] + an*(M1*temp - FX*X[[i]])
if ( principal_axes==TRUE) { A[[i]]=(1/M1)*Y[[i]]
}
if (eigenvalues==TRUE) { L1[i] <- L1[i] - an*(L1[i] - FX)
}
if (corr==TRUE) { Corr[[i]] <- sqrt(L1[i])*(A[[i]]/sqrt(sum(A[[i]]^2*M1)))*sqrt(M1)
}
}
n <- n+1
}
} else {
debut_chrono<-proc.time()
while ((n<nrow(data)) && ( (proc.time()-debut_chrono)<exec_time ))
{ an <- n^(-eps)
M1<-c()
for (k in 1:q){
temp<-Ck[[k]]%*%Xk[[k]]
Lk_max[k] <- sum(Xk[[k]]*temp) / sum(Xk[[k]]^2 /Mk[[k]] )
Xk[[k]] <- Xk[[k]] + an * (Mk[[k]]*temp -Lk_max[k]*Xk[[k]] )
M1<-c(M1,Mk[[k]]/Lk_max[k])
}
if (n>=2)
{ X <- orth_Gram_Schmidt_metrique_diag(1/M1,Y)
}
for (i in 1:nb_fact)
{ temp <- C%*%X[[i]]
FX <- sum(X[[i]] * temp) / sum(X[[i]]^2 / M1)
Y[[i]] <- X[[i]] + an*(M1*temp - FX*X[[i]])
if ( principal_axes==TRUE) { A[[i]]=(1/M1)*Y[[i]]
}
if (eigenvalues==TRUE) { L1[i] <- L1[i] - an*(L1[i] - FX)
}
if (corr==TRUE) { Corr[[i]] <- sqrt(L1[i])*(A[[i]]/sqrt(sum(A[[i]]^2*M1)))*sqrt(M1)
}
}
n <- n+1
}
}
return(list(X=X,Xk=Xk,Ck=Ck,Mk=Mk,L1=L1,A=A,Corr=Corr,zbar=zbar))
}
|
#' Conversion of a single genus and species name to a single MFG. Uses species.mfg.library
#'
#' @param genus Character string: genus name
#' @param species Character string: species name
#' @param flag Resolve ambiguous mfg: 1 = return(NA),2= manual selection
#' @param mfgDbase data.frame of species MFG classifications. Defaults to the supplied species.mfg.library data object
#'
#' @export species_to_mfg
#'
#' @return a data frame with MFG classification and diagnostic information.
#' ambiguous.mfg=1 if multiple possible mfg matches
#' genus.classification=1 if no exact match was found with genus + species name
#' partial.match=1 if mfg was based on fuzzy matching of taxonomic name.
#'
#' @examples
#' species_to_mfg('Scenedesmus','bijuga')
#' #returns "11a-NakeChlor"
species_to_mfg<-function(genus,species="",flag=1,mfgDbase=NA)#set flag to two if you want to
#manually resolve ambiguous mfg class.
#default behavior is to set ambiguous classes to NA (flag=1)
{
if(!is.data.frame(mfgDbase))
{
mfgDbase<-algaeClassify::species_mfg_library
}
mfgDbase<-mfgDbase[!duplicated(mfgDbase),]
#create vector for indicator if name is ambiguous or not.
ambiguous.mfg=0
#vector indicating if mfg classification was based on genus-level match.
genus.classification=0
#vector indicating whether the bestmatch function was used or not.
partial.match=0
genus=gsub('Unknown ','',genus)
if(species %in% mfgDbase$species==F){species=''}#replacing spp., sp. etc. with ''
#hardcoding a match
if(genus=='Hyloraphidium'){genus='Hyaloraphidium'}
if(species==''){genus.classification=1}
#check for genus and species match first.
mfg=mfgDbase$MFG[mfgDbase$genus==genus &
mfgDbase$species==species]
#go to genus match
if(length(unique(mfg)==1))
{
mfg=unique(mfg)
}else{
species=''
mfg=mfgDbase$MFG[mfgDbase$genus==genus & mfgDbase$species==species]
genus.classification=1
}
#if there is no genus only match, see if there is another species with the same genus
if(length(unique(mfg))==0)
{
mfg=mfgDbase$MFG[mfgDbase$genus==genus]
genus.classification=1
}
if(length(unique(mfg))==0)
{
#try for fuzzy genus matching
genus.bestmatch<-bestmatch(enteredName=genus,possibleNames=unique(mfgDbase$genus))
if(genus.bestmatch != 'multiplePartialMatches' & !is.na(genus.bestmatch))
{
mfg=mfgDbase$MFG[mfgDbase$genus==genus.bestmatch]
genus.classification=1
partial.match=1
}
}
#now, mfg length should be 0, 1, or 2
if(length(unique(mfg[!is.na(mfg)]))==2)#flag 2 means you can interactively
#choose among two possible mfgs for a particular genus or species
{
ambiguous.mfg=1
if(flag==1)
{
mfg=NA
}else if (flag==2)
{
mfg=unique(mfg)
cat(paste('\n two possible mfgs for the species: ',genus,species))
cat()
cat(paste('\n1:',mfg[1]))
cat(paste('\n2:',mfg[2]))
cat(paste('\n3:', NA))
choice=as.numeric(readline(prompt='\nenter your choice: (1,2,3): \n'))
mfg=mfg[choice]
}
}
mfg<-unique(mfg[!is.na(mfg)])
if(length(mfg)==0 )
{
mfg=NA
}else
{
mfg=mfg[1]
}
mfg.df<-data.frame(MFG=mfg,
ambiguous.mfg=ambiguous.mfg,
genus.classification=genus.classification,
partial.match=partial.match,
flag=flag)
return(mfg.df)
}
|
/R/species_to_mfg.r
|
no_license
|
cran/algaeClassify
|
R
| false
| false
| 3,548
|
r
|
#' Conversion of a single genus and species name to a single MFG. Uses species.mfg.library
#'
#' @param genus Character string: genus name
#' @param species Character string: species name
#' @param flag Resolve ambiguous mfg: 1 = return(NA),2= manual selection
#' @param mfgDbase data.frame of species MFG classifications. Defaults to the supplied species.mfg.library data object
#'
#' @export species_to_mfg
#'
#' @return a data frame with MFG classification and diagnostic information.
#' ambiguous.mfg=1 if multiple possible mfg matches
#' genus.classification=1 if no exact match was found with genus + species name
#' partial.match=1 if mfg was based on fuzzy matching of taxonomic name.
#'
#' @examples
#' species_to_mfg('Scenedesmus','bijuga')
#' #returns "11a-NakeChlor"
species_to_mfg<-function(genus,species="",flag=1,mfgDbase=NA)#set flag to two if you want to
#manually resolve ambiguous mfg class.
#default behavior is to set ambiguous classes to NA (flag=1)
{
if(!is.data.frame(mfgDbase))
{
mfgDbase<-algaeClassify::species_mfg_library
}
mfgDbase<-mfgDbase[!duplicated(mfgDbase),]
#create vector for indicator if name is ambiguous or not.
ambiguous.mfg=0
#vector indicating if mfg classification was based on genus-level match.
genus.classification=0
#vector indicating whether the bestmatch function was used or not.
partial.match=0
genus=gsub('Unknown ','',genus)
if(species %in% mfgDbase$species==F){species=''}#replacing spp., sp. etc. with ''
#hardcoding a match
if(genus=='Hyloraphidium'){genus='Hyaloraphidium'}
if(species==''){genus.classification=1}
#check for genus and species match first.
mfg=mfgDbase$MFG[mfgDbase$genus==genus &
mfgDbase$species==species]
#go to genus match
if(length(unique(mfg)==1))
{
mfg=unique(mfg)
}else{
species=''
mfg=mfgDbase$MFG[mfgDbase$genus==genus & mfgDbase$species==species]
genus.classification=1
}
#if there is no genus only match, see if there is another species with the same genus
if(length(unique(mfg))==0)
{
mfg=mfgDbase$MFG[mfgDbase$genus==genus]
genus.classification=1
}
if(length(unique(mfg))==0)
{
#try for fuzzy genus matching
genus.bestmatch<-bestmatch(enteredName=genus,possibleNames=unique(mfgDbase$genus))
if(genus.bestmatch != 'multiplePartialMatches' & !is.na(genus.bestmatch))
{
mfg=mfgDbase$MFG[mfgDbase$genus==genus.bestmatch]
genus.classification=1
partial.match=1
}
}
#now, mfg length should be 0, 1, or 2
if(length(unique(mfg[!is.na(mfg)]))==2)#flag 2 means you can interactively
#choose among two possible mfgs for a particular genus or species
{
ambiguous.mfg=1
if(flag==1)
{
mfg=NA
}else if (flag==2)
{
mfg=unique(mfg)
cat(paste('\n two possible mfgs for the species: ',genus,species))
cat()
cat(paste('\n1:',mfg[1]))
cat(paste('\n2:',mfg[2]))
cat(paste('\n3:', NA))
choice=as.numeric(readline(prompt='\nenter your choice: (1,2,3): \n'))
mfg=mfg[choice]
}
}
mfg<-unique(mfg[!is.na(mfg)])
if(length(mfg)==0 )
{
mfg=NA
}else
{
mfg=mfg[1]
}
mfg.df<-data.frame(MFG=mfg,
ambiguous.mfg=ambiguous.mfg,
genus.classification=genus.classification,
partial.match=partial.match,
flag=flag)
return(mfg.df)
}
|
library(testthat)
library(devtools)
library(Biobase)
library(reshape)
geneid="Gene"
primerid='Gene'
measurement='et'
idvars=c('Subject.ID', 'Chip.Number', 'Stim.Condition', 'Population', 'Well')
ncells <- 'Number.of.Cells'
phenovars=NULL
cellvars='Experiment.Number'
featurevars=NULL
##Tests depending on vbeta
data(vbeta)
test_that("vbeta can be loaded",{
expect_that(vbeta,is_a("data.frame"))
})
vbeta$et <- ifelse(is.na(vbeta$Ct), 0, 40-vbeta$Ct)
fd <- FluidigmAssay(vbeta, idvars=idvars, primerid=primerid, measurement=measurement, ncells=ncells, geneid=geneid)
test_that('could create FluidigmAssay', {
expect_that(fd, is_a('SingleCellAssay'))
expect_that(fd, is_a('FluidigmAssay'))
})
|
/inst/tests/common-fixtures.R
|
no_license
|
chesterni/SingleCellAssay
|
R
| false
| false
| 703
|
r
|
library(testthat)
library(devtools)
library(Biobase)
library(reshape)
geneid="Gene"
primerid='Gene'
measurement='et'
idvars=c('Subject.ID', 'Chip.Number', 'Stim.Condition', 'Population', 'Well')
ncells <- 'Number.of.Cells'
phenovars=NULL
cellvars='Experiment.Number'
featurevars=NULL
##Tests depending on vbeta
data(vbeta)
test_that("vbeta can be loaded",{
expect_that(vbeta,is_a("data.frame"))
})
vbeta$et <- ifelse(is.na(vbeta$Ct), 0, 40-vbeta$Ct)
fd <- FluidigmAssay(vbeta, idvars=idvars, primerid=primerid, measurement=measurement, ncells=ncells, geneid=geneid)
test_that('could create FluidigmAssay', {
expect_that(fd, is_a('SingleCellAssay'))
expect_that(fd, is_a('FluidigmAssay'))
})
|
## Ford Fishman
suppressMessages(library("ggplot2"))
suppressMessages(library("tidyr"))
suppressMessages(library("optparse"))
suppressMessages(library("scales"))
suppressMessages(library("MASS"))
option_list <- list(
make_option(c("-f", "--file"), type="character", default=NULL,
help="dataset file root name", metavar="character")
# make_option(c("-o", "--out"), type="character", default="out.png",
# help="output file name [default= %default]", metavar="character")
)
opt_parser <- OptionParser(option_list=option_list)
opt <- parse_args(opt_parser)
if (is.null(opt$file)){
print_help(opt_parser)
stop("At least one argument must be supplied (input file).n", call.=FALSE)
}
# the datafile
dfile1 <- paste0(opt$file, "main.csv")
dfile2 <- paste0(opt$file, "richness.csv")
dfile3 <- paste0(opt$file, "full.csv")
# dfile1 <- paste(path1, "csv", sep=".")
# dfile2 <- paste(path2, "csv", sep=".")
# dfile3 <- paste(path3, "csv", sep=".")
df1 <- read.csv(file=dfile1, header = T, row.names = 1)
df2 <- read.csv(file=dfile2, header = T, row.names = 1)
df3 <- read.csv(file=dfile3, header = T, row.names = 1)
df1.1 <- gather(df1, key = type, value = density, N:I)
df1.1$density <- log10(df1.1$density)
## plots
## plot of the sums across groups
p1 <- ggplot(df1.1, aes(x=t,y=density, group = type)) +
geom_line(aes(color=type)) +
scale_y_continuous("Density",labels = math_format(10^.x), expand = c(0,0)) +
scale_x_continuous("Timestep")+
scale_color_manual("",
breaks = c("N","S","I","P"),
values = c("black","blue","green","red"),
labels = c("Total Host","Initial Host","Spacer Variant","Phage")) +
annotation_logticks(sides = "l") +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"),
axis.text = element_text(size = 11),
axis.ticks.length = unit(5,"pt"),)
output1 <- paste0(opt$file, "total.png")
ggsave(plot = p1, filename = output1, width = 7, height = 5)
# all strains: shows coevolution
p2 <- ggplot(df3, aes(x=timestep,y=log10(pop), group = name))+
geom_line(aes(color=type)) +
scale_y_continuous("Density", limits = c(0, 8),breaks = seq(0,10,by = 2),labels = math_format(10^.x), expand = c(0,0)) +
scale_x_continuous("Timestep",)+
scale_color_manual("", labels = c("Initial Strain","Spacer Variant","Phage"),values = c("blue","green","red")) +
annotation_logticks(sides = "l") +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"),
axis.ticks.length = unit(5,"pt"),)
output2 <- paste0(opt$file,"strains.png")
ggsave(plot = p2, filename = output2, width = 7, height = 5)
|
/r/plotGraphs1Sim.R
|
no_license
|
fordfishman/CommunityImmunity
|
R
| false
| false
| 2,898
|
r
|
## Ford Fishman
suppressMessages(library("ggplot2"))
suppressMessages(library("tidyr"))
suppressMessages(library("optparse"))
suppressMessages(library("scales"))
suppressMessages(library("MASS"))
option_list <- list(
make_option(c("-f", "--file"), type="character", default=NULL,
help="dataset file root name", metavar="character")
# make_option(c("-o", "--out"), type="character", default="out.png",
# help="output file name [default= %default]", metavar="character")
)
opt_parser <- OptionParser(option_list=option_list)
opt <- parse_args(opt_parser)
if (is.null(opt$file)){
print_help(opt_parser)
stop("At least one argument must be supplied (input file).n", call.=FALSE)
}
# the datafile
dfile1 <- paste0(opt$file, "main.csv")
dfile2 <- paste0(opt$file, "richness.csv")
dfile3 <- paste0(opt$file, "full.csv")
# dfile1 <- paste(path1, "csv", sep=".")
# dfile2 <- paste(path2, "csv", sep=".")
# dfile3 <- paste(path3, "csv", sep=".")
df1 <- read.csv(file=dfile1, header = T, row.names = 1)
df2 <- read.csv(file=dfile2, header = T, row.names = 1)
df3 <- read.csv(file=dfile3, header = T, row.names = 1)
df1.1 <- gather(df1, key = type, value = density, N:I)
df1.1$density <- log10(df1.1$density)
## plots
## plot of the sums across groups
p1 <- ggplot(df1.1, aes(x=t,y=density, group = type)) +
geom_line(aes(color=type)) +
scale_y_continuous("Density",labels = math_format(10^.x), expand = c(0,0)) +
scale_x_continuous("Timestep")+
scale_color_manual("",
breaks = c("N","S","I","P"),
values = c("black","blue","green","red"),
labels = c("Total Host","Initial Host","Spacer Variant","Phage")) +
annotation_logticks(sides = "l") +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"),
axis.text = element_text(size = 11),
axis.ticks.length = unit(5,"pt"),)
output1 <- paste0(opt$file, "total.png")
ggsave(plot = p1, filename = output1, width = 7, height = 5)
# all strains: shows coevolution
p2 <- ggplot(df3, aes(x=timestep,y=log10(pop), group = name))+
geom_line(aes(color=type)) +
scale_y_continuous("Density", limits = c(0, 8),breaks = seq(0,10,by = 2),labels = math_format(10^.x), expand = c(0,0)) +
scale_x_continuous("Timestep",)+
scale_color_manual("", labels = c("Initial Strain","Spacer Variant","Phage"),values = c("blue","green","red")) +
annotation_logticks(sides = "l") +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"),
axis.ticks.length = unit(5,"pt"),)
output2 <- paste0(opt$file,"strains.png")
ggsave(plot = p2, filename = output2, width = 7, height = 5)
|
library(data.table)
library(lubridate)
### Load Historical data -----------
Path='Y:\\Ziwen\\Research\\Precipitation analysis\\Data\\'
#### Syracuse data
#Get column names
Col_Nm=c('Date','Time','Precip_cm')
MIAMI_Precip=fread(paste0(Path,'Miami\\Miami Precip.csv'),
skip=1,
sep=' ',
col.names = Col_Nm,
colClasses = c('character',
'character',
'numeric')) %>%
mutate(Time=substr(Time,1,8)) %>%
mutate(Time=ymd_hms(paste(Date,Time))) %>%
arrange(Time) %>%
select(Time,Precip_cm)
|
/General-Functions/ReadMiamiPrecip.R
|
no_license
|
ZyuAFD/SWRE_Project
|
R
| false
| false
| 640
|
r
|
library(data.table)
library(lubridate)
### Load Historical data -----------
Path='Y:\\Ziwen\\Research\\Precipitation analysis\\Data\\'
#### Syracuse data
#Get column names
Col_Nm=c('Date','Time','Precip_cm')
MIAMI_Precip=fread(paste0(Path,'Miami\\Miami Precip.csv'),
skip=1,
sep=' ',
col.names = Col_Nm,
colClasses = c('character',
'character',
'numeric')) %>%
mutate(Time=substr(Time,1,8)) %>%
mutate(Time=ymd_hms(paste(Date,Time))) %>%
arrange(Time) %>%
select(Time,Precip_cm)
|
context("generate_community")
test_that("generate_community runs without error", {
expect_silent(generate_community(100))
expect_silent(generate_community(1))
expect_silent(generate_community())
})
test_that("relative abundances sum to 100", {
comm <- generate_community()
expect_equal(sum(comm$rDNA_relabund), 100)
expect_equal(sum(comm$rRNA_relabund), 100)
})
test_that("expected number of OTUs are generated", {
n <- sample(1:99999, 1)
expect_equal(nrow(generate_community(n)), n)
})
|
/tests/testthat/test_generate_community.R
|
no_license
|
wilkox/rdrsimulate
|
R
| false
| false
| 506
|
r
|
context("generate_community")
test_that("generate_community runs without error", {
expect_silent(generate_community(100))
expect_silent(generate_community(1))
expect_silent(generate_community())
})
test_that("relative abundances sum to 100", {
comm <- generate_community()
expect_equal(sum(comm$rDNA_relabund), 100)
expect_equal(sum(comm$rRNA_relabund), 100)
})
test_that("expected number of OTUs are generated", {
n <- sample(1:99999, 1)
expect_equal(nrow(generate_community(n)), n)
})
|
\name{computeConditionalCS_DeltaSDRMB}
\alias{computeConditionalCS_DeltaSDRMB}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Computes conditional and hybridized confidence set for Delta = Delta^{SDRMB}(Mbar).
}
\description{
Computes the conditional confidence set and hybridized confidence set for Delta = Delta^{SDRMB}(Mbar). The set Delta^{SDRMB}(Mbar) adds an additional sign restriction to Delta^{SDRM}(Mbar) that restricts the sign of the bias to be either positive (delta >= 0) or negative (delta <= 0).
}
\usage{
computeConditionalCS_DeltaSDRMB(betahat, sigma, numPrePeriods, numPostPeriods,
l_vec = .basisVector(index = 1, size = numPostPeriods), Mbar = 0,
alpha = 0.05, hybrid_flag = "LF", hybrid_kappa = alpha/10,
returnLength = F, biasDirection = "positive", postPeriodMomentsOnly = T,
gridPoints=10^3, grid.ub = NA, grid.lb = NA)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{betahat}{
Vector of estimated event study coefficients.
}
\item{sigma}{
Covariance matrix of event study coefficients.
}
\item{numPrePeriods}{
Number of pre-periods.
}
\item{numPostPeriods}{
Number of post-periods.
}
\item{l_vec}{
Vector of length numPostPeriods that describes the scalar parameter of interest, theta = l_vec'tau. Default equals to first basis vector, (1, 0, ..., 0)
}
\item{Mbar}{
Tuning parameter Mbar for Delta^{SDRM}(Mbar) that governs how different the maximal pre-period deviation from a linear trend may be from the maximal deviation from a linear trend in the post-treatment period the post-treatment period. Default sets Mbar = 0. See Section 2.3.2 of Rambachan & Roth (2021) for more details.
}
\item{alpha}{
Desired level of the confidence set. Default equals 0.05 (corresponding to 95\% confidence interval)
}
\item{hybrid_flag}{
Flag for whether user wishes to compute a hybridized confidence set. "ARP" specifies the conditional confidence set "LF" specifies the conditional least-favorable confidence set. The conditional FLCI hybrid confidence set is not available for Delta^{SDRMB}(Mbar) since the FLCI is infinite length for this choice of Delta. See Section 3.3 and Section 5.3 of Rambachan & Roth (2021) for details. Default equals "LF".
}
\item{hybrid_kappa}{
Desired first-stage size of hybridized confidence set. Only specify this value if the user wishes to compute a hybridized confidence set. Default equals alpha/10. If user specifies hybrid_flag = "ARP", set this value to NULL.
}
\item{returnLength}{
Logical value. If TRUE, function only returns the length of the robust confidence. If FALSE, function returns dataframe that contains a grid of possible parameter values and a vector of zeros and ones associated with each value in the grid (one denotes that the grid value lies in the confidence set and zero denotes that the grid value does not fall within the confidence set.) Default equals FALSE.
}
\item{biasDirection}{
Specifies direction of bias restriction. If "positive", bias is restricted to be positive, delta >= 0. If "negative", bias is restricted to be negative, delta <= 0. Default equals "positive".
}
\item{postPeriodMomentsOnly}{
Logical value. If TRUE, function excludes moments for Delta^{SDRMB}(Mbar) that only include pre-period coefficients. Default equals TRUE.
}
\item{gridPoints}{
Number of grid points used in test inversion step. Default equals 1000.
}
\item{grid.ub}{
Upper bound of grid for test inversion. The user should only specify this if she wishes to manually specify the upper bound of the grid. Default equals NA and sets grid upper bound to equal the upper bound of the identified set under parallel trends plus 20*standard deviation of the point estimate, l_vec'betahat.
}
\item{grid.lb}{
Lower bound of grid for test inversion. The user should only specify this if she wishes to manually specify the upper bound of the grid. Default equals NA sets grid lower bound to equal the lower bound of the identified set under parallel trends minus 20*standard deviation of the point estimate, l_vec'betahat.
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
If returnLength equals TRUE, function returns a scalar that equals the length of the confidence interval. If returnLength equals FALSE, function returns a dataframe with columns
\item{grid}{
Vector of grid values used to construct the confidence interval by test inversion.
}
\item{accept}{
Vector of zeros-ones associated with grid values, where one denotes a grid value that falls within the confidence interval and zero denotes a grid value that falls outside the confidence interval.
}
}
\references{
Rambachan, Ashesh and Jonathan Roth. "An Honest Approach to Parallel Trends." 2021.
}
\author{
Ashesh Rambachan
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
}
|
/man/computeConditionalCS_DeltaSDRMB.Rd
|
no_license
|
ywang-econ/HonestDiD
|
R
| false
| false
| 5,122
|
rd
|
\name{computeConditionalCS_DeltaSDRMB}
\alias{computeConditionalCS_DeltaSDRMB}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Computes conditional and hybridized confidence set for Delta = Delta^{SDRMB}(Mbar).
}
\description{
Computes the conditional confidence set and hybridized confidence set for Delta = Delta^{SDRMB}(Mbar). The set Delta^{SDRMB}(Mbar) adds an additional sign restriction to Delta^{SDRM}(Mbar) that restricts the sign of the bias to be either positive (delta >= 0) or negative (delta <= 0).
}
\usage{
computeConditionalCS_DeltaSDRMB(betahat, sigma, numPrePeriods, numPostPeriods,
l_vec = .basisVector(index = 1, size = numPostPeriods), Mbar = 0,
alpha = 0.05, hybrid_flag = "LF", hybrid_kappa = alpha/10,
returnLength = F, biasDirection = "positive", postPeriodMomentsOnly = T,
gridPoints=10^3, grid.ub = NA, grid.lb = NA)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{betahat}{
Vector of estimated event study coefficients.
}
\item{sigma}{
Covariance matrix of event study coefficients.
}
\item{numPrePeriods}{
Number of pre-periods.
}
\item{numPostPeriods}{
Number of post-periods.
}
\item{l_vec}{
Vector of length numPostPeriods that describes the scalar parameter of interest, theta = l_vec'tau. Default equals to first basis vector, (1, 0, ..., 0)
}
\item{Mbar}{
Tuning parameter Mbar for Delta^{SDRM}(Mbar) that governs how different the maximal pre-period deviation from a linear trend may be from the maximal deviation from a linear trend in the post-treatment period the post-treatment period. Default sets Mbar = 0. See Section 2.3.2 of Rambachan & Roth (2021) for more details.
}
\item{alpha}{
Desired level of the confidence set. Default equals 0.05 (corresponding to 95\% confidence interval)
}
\item{hybrid_flag}{
Flag for whether user wishes to compute a hybridized confidence set. "ARP" specifies the conditional confidence set "LF" specifies the conditional least-favorable confidence set. The conditional FLCI hybrid confidence set is not available for Delta^{SDRMB}(Mbar) since the FLCI is infinite length for this choice of Delta. See Section 3.3 and Section 5.3 of Rambachan & Roth (2021) for details. Default equals "LF".
}
\item{hybrid_kappa}{
Desired first-stage size of hybridized confidence set. Only specify this value if the user wishes to compute a hybridized confidence set. Default equals alpha/10. If user specifies hybrid_flag = "ARP", set this value to NULL.
}
\item{returnLength}{
Logical value. If TRUE, function only returns the length of the robust confidence. If FALSE, function returns dataframe that contains a grid of possible parameter values and a vector of zeros and ones associated with each value in the grid (one denotes that the grid value lies in the confidence set and zero denotes that the grid value does not fall within the confidence set.) Default equals FALSE.
}
\item{biasDirection}{
Specifies direction of bias restriction. If "positive", bias is restricted to be positive, delta >= 0. If "negative", bias is restricted to be negative, delta <= 0. Default equals "positive".
}
\item{postPeriodMomentsOnly}{
Logical value. If TRUE, function excludes moments for Delta^{SDRMB}(Mbar) that only include pre-period coefficients. Default equals TRUE.
}
\item{gridPoints}{
Number of grid points used in test inversion step. Default equals 1000.
}
\item{grid.ub}{
Upper bound of grid for test inversion. The user should only specify this if she wishes to manually specify the upper bound of the grid. Default equals NA and sets grid upper bound to equal the upper bound of the identified set under parallel trends plus 20*standard deviation of the point estimate, l_vec'betahat.
}
\item{grid.lb}{
Lower bound of grid for test inversion. The user should only specify this if she wishes to manually specify the upper bound of the grid. Default equals NA sets grid lower bound to equal the lower bound of the identified set under parallel trends minus 20*standard deviation of the point estimate, l_vec'betahat.
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
If returnLength equals TRUE, function returns a scalar that equals the length of the confidence interval. If returnLength equals FALSE, function returns a dataframe with columns
\item{grid}{
Vector of grid values used to construct the confidence interval by test inversion.
}
\item{accept}{
Vector of zeros-ones associated with grid values, where one denotes a grid value that falls within the confidence interval and zero denotes a grid value that falls outside the confidence interval.
}
}
\references{
Rambachan, Ashesh and Jonathan Roth. "An Honest Approach to Parallel Trends." 2021.
}
\author{
Ashesh Rambachan
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
}
|
library(plotly)
# Read in data
df <- read.csv("C:/Users/Jorge/Dropbox/jorge_guede/TFG RESULTADOS/159TASKS/159TasksAreasGantt.txt", stringsAsFactors = F)
# Initialize empty plot
p <- plot_ly()
# Each task is a separate trace
# Each trace is essentially a thick line plot
# x-axis ticks are dates and handled automatically
for(i in 1:(nrow(df))){
if(df$Area1_1StartInThisArea[i]!=df$Area1_1EndInThisArea[i]) {
p <- add_trace(p,
x = c(df$Area1_1StartInThisArea[i], df$Area1_1EndInThisArea[i]), # x0, x1
y = c(1, 1), # y0, y1
mode = "lines",
line = list(color = df$color[i], width = 10),
showlegend = F,
hoverinfo = "text",
# Create custom hover text
text = paste("Task: ", df$Area1_1.Task[i], "<br>",
"TaskStart: ", df$Area1_1.TaskBeginning[i], "<br>",
"TaskEnd: ", df$Area1_1.TaskEnd[i], "<br>",
"TaskStartInArea: ", df$Area1_1StartInThisArea[i], "<br>",
"TaskEndInArea: ", df$Area1_1EndInThisArea[i], "<br>",
"NumberOfWorkers: ", df$Area1_1.NumberOfWorkers[i], "<br>",
"Worker: ", df$Area1_1.Worker[i], "<br>"),
evaluate = T # needed to avoid lazy loading
)
}
if(df$Area1_2StartInThisArea[i]!=df$Area1_2EndInThisArea[i]) {
p <- add_trace(p,
x = c(df$Area1_2StartInThisArea[i], df$Area1_2EndInThisArea[i]), # x0, x1
y = c(2, 2), # y0, y1
mode = "lines",
line = list(color = df$color[i], width = 10),
showlegend = F,
hoverinfo = "text",
# Create custom hover text
text = paste("Task: ", df$Area1_2.Task[i], "<br>",
"TaskStart: ", df$Area1_2.TaskBeginning[i], "<br>",
"TaskEnd: ", df$Area1_2.TaskEnd[i], "<br>",
"TaskStartInArea: ", df$Area1_2StartInThisArea[i], "<br>",
"TaskEndInArea: ", df$Area1_2EndInThisArea[i], "<br>",
"NumberOfWorkers: ", df$Area1_2.NumberOfWorkers[i], "<br>",
"Worker: ", df$Area1_2.Worker[i], "<br>"),
evaluate = T # needed to avoid lazy loading
)
}
if(df$Area1_3StartInThisArea[i]!=df$Area1_3EndInThisArea[i]) {
p <- add_trace(p,
x = c(df$Area1_3StartInThisArea[i], df$Area1_3EndInThisArea[i]), # x0, x1
y = c(3, 3), # y0, y1
mode = "lines",
line = list(color = df$color[i], width = 10),
showlegend = F,
hoverinfo = "text",
# Create custom hover text
text = paste("Task: ", df$Area1_3.Task[i], "<br>",
"TaskStart: ", df$Area1_3.TaskBeginning[i], "<br>",
"TaskEnd: ", df$Area1_3.TaskEnd[i], "<br>",
"TaskStartInArea: ", df$Area1_3StartInThisArea[i], "<br>",
"TaskEndInArea: ", df$Area1_3EndInThisArea[i], "<br>",
"NumberOfWorkers: ", df$Area1_3.NumberOfWorkers[i], "<br>",
"Worker: ", df$Area1_3.Worker[i], "<br>"),
evaluate = T # needed to avoid lazy loading
)
}
if(df$Area1_4StartInThisArea[i]!=df$Area1_4EndInThisArea[i]) {
p <- add_trace(p,
x = c(df$Area1_4StartInThisArea[i], df$Area1_4EndInThisArea[i]), # x0, x1
y = c(4, 4), # y0, y1
mode = "lines",
line = list(color = df$color[i], width = 10),
showlegend = F,
hoverinfo = "text",
# Create custom hover text
text = paste("Task: ", df$Area1_4.Task[i], "<br>",
"TaskStart: ", df$Area1_4.TaskBeginning[i], "<br>",
"TaskEnd: ", df$Area1_4.TaskEnd[i], "<br>",
"TaskStartInArea: ", df$Area1_4StartInThisArea[i], "<br>",
"TaskEndInArea: ", df$Area1_4EndInThisArea[i], "<br>",
"NumberOfWorkers: ", df$Area1_4.NumberOfWorkers[i], "<br>",
"Worker: ", df$Area1_4.Worker[i], "<br>"),
evaluate = T # needed to avoid lazy loading
)
}
if(df$Area1_5StartInThisArea[i]!=df$Area1_5EndInThisArea[i]) {
p <- add_trace(p,
x = c(df$Area1_5StartInThisArea[i], df$Area1_5EndInThisArea[i]), # x0, x1
y = c(5, 5), # y0, y1
mode = "lines",
line = list(color = df$color[i], width = 10),
showlegend = F,
hoverinfo = "text",
# Create custom hover text
text = paste("Task: ", df$Area1_5.Task[i], "<br>",
"TaskStart: ", df$Area1_5.TaskBeginning[i], "<br>",
"TaskEnd: ", df$Area1_5.TaskEnd[i], "<br>",
"TaskStartInArea: ", df$Area1_5StartInThisArea[i], "<br>",
"TaskEndInArea: ", df$Area1_5EndInThisArea[i], "<br>",
"NumberOfWorkers: ", df$Area1_5.NumberOfWorkers[i], "<br>",
"Worker: ", df$Area1_5.Worker[i], "<br>"),
evaluate = T # needed to avoid lazy loading
)
}
if(df$Area2_1StartInThisArea[i]!=df$Area2_1EndInThisArea[i]) {
p <- add_trace(p,
x = c(df$Area2_1StartInThisArea[i], df$Area2_1EndInThisArea[i]), # x0, x1
y = c(6, 6), # y0, y1
mode = "lines",
line = list(color = df$color[i], width = 10),
showlegend = F,
hoverinfo = "text",
# Create custom hover text
text = paste("Task: ", df$Area2_1.Task[i], "<br>",
"TaskStart: ", df$Area2_1.TaskBeginning[i], "<br>",
"TaskEnd: ", df$Area2_1.TaskEnd[i], "<br>",
"TaskStartInArea: ", df$Area2_1StartInThisArea[i], "<br>",
"TaskEndInArea: ", df$Area2_1EndInThisArea[i], "<br>",
"NumberOfWorkers: ", df$Area2_1.NumberOfWorkers[i], "<br>",
"Worker: ", df$Area2_1.Worker[i], "<br>"),
evaluate = T # needed to avoid lazy loading
)
}
if(df$Area2_2StartInThisArea[i]!=df$Area2_2EndInThisArea[i]) {
p <- add_trace(p,
x = c(df$Area2_2StartInThisArea[i], df$Area2_2EndInThisArea[i]), # x0, x1
y = c(7, 7), # y0, y1
mode = "lines",
line = list(color = df$color[i], width = 10),
showlegend = F,
hoverinfo = "text",
# Create custom hover text
text = paste("Task: ", df$Area2_2.Task[i], "<br>",
"TaskStart: ", df$Area2_2.TaskBeginning[i], "<br>",
"TaskEnd: ", df$Area2_2.TaskEnd[i], "<br>",
"TaskStartInArea: ", df$Area2_2StartInThisArea[i], "<br>",
"TaskEndInArea: ", df$Area2_2EndInThisArea[i], "<br>",
"NumberOfWorkers: ", df$Area2_2.NumberOfWorkers[i], "<br>",
"Worker: ", df$Area2_2.Worker[i], "<br>"),
evaluate = T # needed to avoid lazy loading
)
}
if(df$Area2_3StartInThisArea[i]!=df$Area2_3EndInThisArea[i]) {
p <- add_trace(p,
x = c(df$Area2_3StartInThisArea[i], df$Area2_3EndInThisArea[i]), # x0, x1
y = c(8, 8), # y0, y1
mode = "lines",
line = list(color = df$color[i], width = 10),
showlegend = F,
hoverinfo = "text",
# Create custom hover text
text = paste("Task: ", df$Area2_3.Task[i], "<br>",
"TaskStart: ", df$Area2_3.TaskBeginning[i], "<br>",
"TaskEnd: ", df$Area2_3.TaskEnd[i], "<br>",
"TaskStartInArea: ", df$Area2_3StartInThisArea[i], "<br>",
"TaskEndInArea: ", df$Area2_3EndInThisArea[i], "<br>",
"NumberOfWorkers: ", df$Area2_3.NumberOfWorkers[i], "<br>",
"Worker: ", df$Area2_3.Worker[i], "<br>"),
evaluate = T # needed to avoid lazy loading
)
}
}
f <- list(family = "Courier New, monospace", size = 18, color = "#7f7f7f")
x <- list(title = "Time", titlefont = f)
y <- list(title = "Area", titlefont = f)
p <-layout(xaxis = x, yaxis = y)
p
|
/159TASKS/159TasksAreasGantt.R
|
no_license
|
Jorge-Guede-Barcenilla/TFG
|
R
| false
| false
| 9,491
|
r
|
library(plotly)
# Read in data
df <- read.csv("C:/Users/Jorge/Dropbox/jorge_guede/TFG RESULTADOS/159TASKS/159TasksAreasGantt.txt", stringsAsFactors = F)
# Initialize empty plot
p <- plot_ly()
# Each task is a separate trace
# Each trace is essentially a thick line plot
# x-axis ticks are dates and handled automatically
for(i in 1:(nrow(df))){
if(df$Area1_1StartInThisArea[i]!=df$Area1_1EndInThisArea[i]) {
p <- add_trace(p,
x = c(df$Area1_1StartInThisArea[i], df$Area1_1EndInThisArea[i]), # x0, x1
y = c(1, 1), # y0, y1
mode = "lines",
line = list(color = df$color[i], width = 10),
showlegend = F,
hoverinfo = "text",
# Create custom hover text
text = paste("Task: ", df$Area1_1.Task[i], "<br>",
"TaskStart: ", df$Area1_1.TaskBeginning[i], "<br>",
"TaskEnd: ", df$Area1_1.TaskEnd[i], "<br>",
"TaskStartInArea: ", df$Area1_1StartInThisArea[i], "<br>",
"TaskEndInArea: ", df$Area1_1EndInThisArea[i], "<br>",
"NumberOfWorkers: ", df$Area1_1.NumberOfWorkers[i], "<br>",
"Worker: ", df$Area1_1.Worker[i], "<br>"),
evaluate = T # needed to avoid lazy loading
)
}
if(df$Area1_2StartInThisArea[i]!=df$Area1_2EndInThisArea[i]) {
p <- add_trace(p,
x = c(df$Area1_2StartInThisArea[i], df$Area1_2EndInThisArea[i]), # x0, x1
y = c(2, 2), # y0, y1
mode = "lines",
line = list(color = df$color[i], width = 10),
showlegend = F,
hoverinfo = "text",
# Create custom hover text
text = paste("Task: ", df$Area1_2.Task[i], "<br>",
"TaskStart: ", df$Area1_2.TaskBeginning[i], "<br>",
"TaskEnd: ", df$Area1_2.TaskEnd[i], "<br>",
"TaskStartInArea: ", df$Area1_2StartInThisArea[i], "<br>",
"TaskEndInArea: ", df$Area1_2EndInThisArea[i], "<br>",
"NumberOfWorkers: ", df$Area1_2.NumberOfWorkers[i], "<br>",
"Worker: ", df$Area1_2.Worker[i], "<br>"),
evaluate = T # needed to avoid lazy loading
)
}
if(df$Area1_3StartInThisArea[i]!=df$Area1_3EndInThisArea[i]) {
p <- add_trace(p,
x = c(df$Area1_3StartInThisArea[i], df$Area1_3EndInThisArea[i]), # x0, x1
y = c(3, 3), # y0, y1
mode = "lines",
line = list(color = df$color[i], width = 10),
showlegend = F,
hoverinfo = "text",
# Create custom hover text
text = paste("Task: ", df$Area1_3.Task[i], "<br>",
"TaskStart: ", df$Area1_3.TaskBeginning[i], "<br>",
"TaskEnd: ", df$Area1_3.TaskEnd[i], "<br>",
"TaskStartInArea: ", df$Area1_3StartInThisArea[i], "<br>",
"TaskEndInArea: ", df$Area1_3EndInThisArea[i], "<br>",
"NumberOfWorkers: ", df$Area1_3.NumberOfWorkers[i], "<br>",
"Worker: ", df$Area1_3.Worker[i], "<br>"),
evaluate = T # needed to avoid lazy loading
)
}
if(df$Area1_4StartInThisArea[i]!=df$Area1_4EndInThisArea[i]) {
p <- add_trace(p,
x = c(df$Area1_4StartInThisArea[i], df$Area1_4EndInThisArea[i]), # x0, x1
y = c(4, 4), # y0, y1
mode = "lines",
line = list(color = df$color[i], width = 10),
showlegend = F,
hoverinfo = "text",
# Create custom hover text
text = paste("Task: ", df$Area1_4.Task[i], "<br>",
"TaskStart: ", df$Area1_4.TaskBeginning[i], "<br>",
"TaskEnd: ", df$Area1_4.TaskEnd[i], "<br>",
"TaskStartInArea: ", df$Area1_4StartInThisArea[i], "<br>",
"TaskEndInArea: ", df$Area1_4EndInThisArea[i], "<br>",
"NumberOfWorkers: ", df$Area1_4.NumberOfWorkers[i], "<br>",
"Worker: ", df$Area1_4.Worker[i], "<br>"),
evaluate = T # needed to avoid lazy loading
)
}
if(df$Area1_5StartInThisArea[i]!=df$Area1_5EndInThisArea[i]) {
p <- add_trace(p,
x = c(df$Area1_5StartInThisArea[i], df$Area1_5EndInThisArea[i]), # x0, x1
y = c(5, 5), # y0, y1
mode = "lines",
line = list(color = df$color[i], width = 10),
showlegend = F,
hoverinfo = "text",
# Create custom hover text
text = paste("Task: ", df$Area1_5.Task[i], "<br>",
"TaskStart: ", df$Area1_5.TaskBeginning[i], "<br>",
"TaskEnd: ", df$Area1_5.TaskEnd[i], "<br>",
"TaskStartInArea: ", df$Area1_5StartInThisArea[i], "<br>",
"TaskEndInArea: ", df$Area1_5EndInThisArea[i], "<br>",
"NumberOfWorkers: ", df$Area1_5.NumberOfWorkers[i], "<br>",
"Worker: ", df$Area1_5.Worker[i], "<br>"),
evaluate = T # needed to avoid lazy loading
)
}
if(df$Area2_1StartInThisArea[i]!=df$Area2_1EndInThisArea[i]) {
p <- add_trace(p,
x = c(df$Area2_1StartInThisArea[i], df$Area2_1EndInThisArea[i]), # x0, x1
y = c(6, 6), # y0, y1
mode = "lines",
line = list(color = df$color[i], width = 10),
showlegend = F,
hoverinfo = "text",
# Create custom hover text
text = paste("Task: ", df$Area2_1.Task[i], "<br>",
"TaskStart: ", df$Area2_1.TaskBeginning[i], "<br>",
"TaskEnd: ", df$Area2_1.TaskEnd[i], "<br>",
"TaskStartInArea: ", df$Area2_1StartInThisArea[i], "<br>",
"TaskEndInArea: ", df$Area2_1EndInThisArea[i], "<br>",
"NumberOfWorkers: ", df$Area2_1.NumberOfWorkers[i], "<br>",
"Worker: ", df$Area2_1.Worker[i], "<br>"),
evaluate = T # needed to avoid lazy loading
)
}
if(df$Area2_2StartInThisArea[i]!=df$Area2_2EndInThisArea[i]) {
p <- add_trace(p,
x = c(df$Area2_2StartInThisArea[i], df$Area2_2EndInThisArea[i]), # x0, x1
y = c(7, 7), # y0, y1
mode = "lines",
line = list(color = df$color[i], width = 10),
showlegend = F,
hoverinfo = "text",
# Create custom hover text
text = paste("Task: ", df$Area2_2.Task[i], "<br>",
"TaskStart: ", df$Area2_2.TaskBeginning[i], "<br>",
"TaskEnd: ", df$Area2_2.TaskEnd[i], "<br>",
"TaskStartInArea: ", df$Area2_2StartInThisArea[i], "<br>",
"TaskEndInArea: ", df$Area2_2EndInThisArea[i], "<br>",
"NumberOfWorkers: ", df$Area2_2.NumberOfWorkers[i], "<br>",
"Worker: ", df$Area2_2.Worker[i], "<br>"),
evaluate = T # needed to avoid lazy loading
)
}
if(df$Area2_3StartInThisArea[i]!=df$Area2_3EndInThisArea[i]) {
p <- add_trace(p,
x = c(df$Area2_3StartInThisArea[i], df$Area2_3EndInThisArea[i]), # x0, x1
y = c(8, 8), # y0, y1
mode = "lines",
line = list(color = df$color[i], width = 10),
showlegend = F,
hoverinfo = "text",
# Create custom hover text
text = paste("Task: ", df$Area2_3.Task[i], "<br>",
"TaskStart: ", df$Area2_3.TaskBeginning[i], "<br>",
"TaskEnd: ", df$Area2_3.TaskEnd[i], "<br>",
"TaskStartInArea: ", df$Area2_3StartInThisArea[i], "<br>",
"TaskEndInArea: ", df$Area2_3EndInThisArea[i], "<br>",
"NumberOfWorkers: ", df$Area2_3.NumberOfWorkers[i], "<br>",
"Worker: ", df$Area2_3.Worker[i], "<br>"),
evaluate = T # needed to avoid lazy loading
)
}
}
f <- list(family = "Courier New, monospace", size = 18, color = "#7f7f7f")
x <- list(title = "Time", titlefont = f)
y <- list(title = "Area", titlefont = f)
p <-layout(xaxis = x, yaxis = y)
p
|
####################
#### TITLE: Give back the correspondig run and step from a sequence of numbers
#### Contents:
####
#### Source Files: \\FreddieFreeloader/Script.git/Sampling/SplitSubjects
#### First Modified: 26/06/2015
#### Notes:
#################
##
###############
### Notes
###############
##
# INCENTIVE dataset
##
###############
### Preparation
###############
##
# Take arguments from master file
args <- commandArgs(TRUE)
# Which index are we in?
index <- as.numeric(as.character(args[1]))
# Which scenario is this?
scenario <- as.character(args[2])
##
###############
### RUN and STEP
###############
##
# There are in scenario A:
# A) 50 runs
# B) 70 steps in each run
# C) 2 groups in each step within a run
if(scenario=='A'){NRUNS <- 50;NSTEP <- 70}
# Provide the sequenced numbers
RUNsequence <- rep(c(1:NRUNS),each=(NSTEP*2))
run <- RUNsequence[index]
STEPsequence <- rep(rep(c(1:NSTEP),each=2),NRUNS)
step <- STEPsequence[index]
GROUPsequence <- rep(c(1,2),(NSTEP*NRUNS))
group <- GROUPsequence[index]
cat(c(run,step,group))
|
/Sampling/Incentive/SplitSubjects/RUNSTEP.R
|
permissive
|
NeuroStat/replicability_fmri
|
R
| false
| false
| 1,078
|
r
|
####################
#### TITLE: Give back the correspondig run and step from a sequence of numbers
#### Contents:
####
#### Source Files: \\FreddieFreeloader/Script.git/Sampling/SplitSubjects
#### First Modified: 26/06/2015
#### Notes:
#################
##
###############
### Notes
###############
##
# INCENTIVE dataset
##
###############
### Preparation
###############
##
# Take arguments from master file
args <- commandArgs(TRUE)
# Which index are we in?
index <- as.numeric(as.character(args[1]))
# Which scenario is this?
scenario <- as.character(args[2])
##
###############
### RUN and STEP
###############
##
# There are in scenario A:
# A) 50 runs
# B) 70 steps in each run
# C) 2 groups in each step within a run
if(scenario=='A'){NRUNS <- 50;NSTEP <- 70}
# Provide the sequenced numbers
RUNsequence <- rep(c(1:NRUNS),each=(NSTEP*2))
run <- RUNsequence[index]
STEPsequence <- rep(rep(c(1:NSTEP),each=2),NRUNS)
step <- STEPsequence[index]
GROUPsequence <- rep(c(1,2),(NSTEP*NRUNS))
group <- GROUPsequence[index]
cat(c(run,step,group))
|
# ideal.R
#' ideal: Interactive Differential Expression Analysis
#'
#' ideal makes differential expression analysis interactive, easy and reproducible.
#' This function launches the main application included in the package.
#'
#' @param dds_obj A \code{\link{DESeqDataSet}} object. If not provided, then a
#' \code{countmatrix} and a \code{expdesign} need to be provided. If none of
#' the above is provided, it is possible to upload the data during the
#' execution of the Shiny App
#' @param res_obj A \code{\link{DESeqResults}} object. If not provided, it can
#' be computed during the execution of the application
#' @param annotation_obj A \code{data.frame} object, with row.names as gene
#' identifiers (e.g. ENSEMBL ids) and a column, \code{gene_name}, containing
#' e.g. HGNC-based gene symbols. If not provided, it can be constructed during
#' the execution via the org.eg.XX.db packages - these need to be installed
#' @param countmatrix A count matrix, with genes as rows and samples as columns.
#' If not provided, it is possible to upload the data during the execution of
#' the Shiny App
#' @param expdesign A \code{data.frame} containing the info on the covariates
#' of each sample. If not provided, it is possible to upload the data during the
#' execution of the Shiny App
#' @param gene_signatures A list of vectors, one for each pathway/signature. This
#' is for example the output of the \code{\link{read_gmt}} function. The provided
#' object can also be replaced during runtime in the dedicated upload widget.
#'
#' @return A Shiny App is launched for interactive data exploration and
#' differential expression analysis
#'
#' @export
#'
#' @examples
#' # with simulated data...
#' library(DESeq2)
#' dds <- DESeq2::makeExampleDESeqDataSet(n = 100, m = 8)
#' cm <- counts(dds)
#' cd <- colData(dds)
#'
#' # with the well known airway package...
#' library(airway)
#' data(airway)
#' airway
#' dds_airway <- DESeq2::DESeqDataSetFromMatrix(assay(airway),
#' colData = colData(airway),
#' design = ~ cell + dex
#' )
#' \dontrun{
#'
#' ideal()
#' ideal(dds)
#' ideal(dds_airway)
#'
#' dds_airway <- DESeq2::DESeq(dds_airway)
#' res_airway <- DESeq2::results(dds_airway)
#' ideal(dds_airway, res_airway)
#' }
#'
ideal <- function(dds_obj = NULL,
res_obj = NULL,
annotation_obj = NULL,
countmatrix = NULL,
expdesign = NULL,
gene_signatures = NULL) {
if (!requireNamespace("shiny", quietly = TRUE)) {
stop("ideal requires 'shiny'. Please install it using
install.packages('shiny')")
}
# create environment for storing inputs and values
## i need the assignment like this to export it up one level - i.e. "globally"
ideal_env <<- new.env(parent = emptyenv())
## upload max 300mb files - can be changed if necessary
options(shiny.maxRequestSize = 300 * 1024^2)
options(shiny.launch.browser = TRUE)
## ------------------------------------------------------------------ ##
## Define UI ##
## ------------------------------------------------------------------ ##
# # components defined in separated .R files
# shinyApp(ui = ideal_ui, server = ideal_server)
# ui definition -----------------------------------------------------------
ideal_ui <- shinydashboard::dashboardPage(
title = "ideal - Interactive Differential Expression AnaLysis",
# header definition -----------------------------------------------------------
shinydashboard::dashboardHeader(
title = tags$span(
img(src = "ideal/ideal_logo_v2.png", height = "50px"),
paste0(
"ideal - Interactive Differential Expression AnaLysis ",
packageVersion("ideal")
)
),
titleWidth = 600,
# TODO:
# http://stackoverflow.com/questions/31440564/adding-a-company-logo-to-shinydashboard-header
# replace text with image
# ideal_header$children[[2]]$children <- tags$a(href='https://github.com/federicomarini/ideal',
# tags$img(src='ideal_logo_v2.png',height='50',width='200'))
# title = tags$a(href='https://github.com/federicomarini/ideal',
# tags$img(src='ideal_logo_v2.png',height='50',width='200')),
# task menu for saving state to environment or binary data
shinydashboard::dropdownMenu(
type = "tasks", icon = icon("cog"),
badgeStatus = NULL,
headerText = "ideal Tasks menu",
notificationItem(
text = actionButton("task_exit_and_save", "Exit ideal & save",
class = "btn_no_border",
onclick = "setTimeout(function(){window.close();}, 100); "
),
icon = icon("sign-out"), status = "primary"
),
menuItem(
text = downloadButton("task_state_save", "Save State as .RData")
)
)
), # end of dashboardHeader
# sidebar definition -----------------------------------------------------------
dashboardSidebar(
width = 280,
menuItem(
text = "App settings",
icon = icon("cogs"),
startExpanded = TRUE,
uiOutput("color_by"),
shinyBS::bsTooltip(
"color_by",
paste0("Select the group(s) of samples to stratify the analysis, and ideally match the contrast of interest. Can also assume multiple values, in this case the interaction of the factors is used."),
"right",
options = list(container = "body")
),
uiOutput("available_genes"),
shinyBS::bsTooltip(
"available_genes",
paste0("Select one or more features (genes) from the list to inspect. Autocompletion is provided, so you can easily find your genes of interest by started typing their names. Defaults to the row names if no annotation object is provided."),
"right",
options = list(container = "body")
),
numericInput("FDR", "False Discovery Rate", value = 0.05, min = 0, max = 1, step = 0.01),
shinyBS::bsTooltip(
"FDR",
paste0("Select the alpha level at which you would like to control the FDR (False Discovery Rate) for the set of multiple tests in your dataset. The sensible choice of 0.05 is provided as default, 0.1 is more liberal, while 0.01 is more stringent - keep in mind this does not tell anything on the effect size for the expression change."),
"right",
options = list(container = "body")
)
),
menuItem("Plot export settings",
icon = icon("paint-brush"),
startExpanded = TRUE,
numericInput("export_width", label = "Width of exported figures (cm)", value = 16, min = 2),
shinyBS::bsTooltip(
"export_width", paste0("Width of the figures to export, expressed in cm"),
"right",
options = list(container = "body")
),
numericInput("export_height", label = "Height of exported figures (cm)", value = 10, min = 2),
shinyBS::bsTooltip(
"export_height", paste0("Height of the figures to export, expressed in cm"),
"right",
options = list(container = "body")
)
),
menuItem("Quick viewer",
icon = icon("flash"),
startExpanded = TRUE,
id = "qvmenu",
fluidRow(
fluidRow(column(6, p("Count matrix")), column(6, uiOutput("ok_cm"))),
fluidRow(column(6, p("Experimental design")), column(6, uiOutput("ok_ed"))),
fluidRow(column(6, p("DESeqDataset")), column(6, uiOutput("ok_dds"))),
fluidRow(column(6, p("Annotation")), column(6, uiOutput("ok_anno"))),
fluidRow(column(6, p("Results")), column(6, uiOutput("ok_resu")))
)
),
menuItem("First steps help",
icon = icon("question-circle"),
startExpanded = TRUE,
actionButton("btn", "Click me for a quick tour", icon("info"),
style = "color: #ffffff; background-color: #0092AC; border-color: #2e6da4"
)
)
), # end of dashboardSidebar
# body definition -----------------------------------------------------------
dashboardBody(
introjsUI(),
## Define output size and style of error messages, and also the style of the icons e.g. check
## plus, define the myscrollbox div to prevent y overflow when page fills up
tags$head(
tags$style(HTML("
.shiny-output-error-validation {
font-size: 15px;
color: forestgreen;
text-align: center;
}
.icon-done {
color: green;
}
#myScrollBox{
overflow-y: scroll;
.dataTables_wrapper{
overflow-x: scroll;
}
}
#myAnchorBox{}
"))
),
# value boxes to always have an overview on the available data
fluidRow(
valueBoxOutput("box_ddsobj"),
valueBoxOutput("box_annobj"),
valueBoxOutput("box_resobj")
),
## main structure of the body for the dashboard
div(
id = "myScrollBox", # trick to have the y direction scrollable
tabBox(
width = 12,
# ui panel welcome -----------------------------------------------------------
tabPanel(
title = "Welcome!", icon = icon("home"), value = "tab-welcome",
fluidRow(
column(
width = 8,
includeMarkdown(system.file("extdata", "welcome.md", package = "ideal")),
br(), br(),
p("If you see a grey box like this one open below..."),
shinyBS::bsCollapse(
id = "help_welcome", open = "Help",
shinyBS::bsCollapsePanel(
"Help",
includeMarkdown(system.file("extdata", "help_welcome.md", package = "ideal"))
)
),
actionButton("introexample", "If you see a button like this...", icon("info"),
style = "color: #ffffff; background-color: #0092AC; border-color: #2e6da4"
),
p("... you can click on that to start a tour based on introJS"),
br(), br(),
uiOutput("ui_instructions")
)
)
), # end of Welcome panel
# ui panel data setup -----------------------------------------------------------
tabPanel(
"Data Setup",
icon = icon("upload"), # value="tab-ds",
value = "tab-datasetup",
headerPanel("Setup your data for the analysis"),
fluidRow(
column(
width = 8,
shinyBS::bsCollapse(
id = "help_datasetup", open = NULL,
shinyBS::bsCollapsePanel(
"Help",
includeMarkdown(system.file("extdata", "help_datasetup.md", package = "ideal"))
)
)
)
),
actionButton("tour_datasetup", "Click me for a quick tour of the section", icon("info"),
style = "color: #ffffff; background-color: #0092AC; border-color: #2e6da4"
), br(),
box(
width = 12,
title = "Step 1", status = "danger", solidHeader = TRUE,
h2("Upload your count matrix and the info on the experimental design"),
fluidRow(
column(
width = 4,
uiOutput("upload_count_matrix"),
uiOutput("upload_metadata"),
br(),
"... or you can also ",
actionButton("btn_loaddemo", "Load the demo airway data",
icon = icon("play-circle"),
class = "btn btn-info"
), br(), p()
),
column(
width = 4,
br(),
actionButton("help_format",
label = "", icon = icon("question-circle"),
style = "color: #0092AC; background-color: #FFFFFF; border-color: #FFFFFF"
),
shinyBS::bsTooltip(
"help_format",
"How to provide your input data to ideal",
"bottom",
options = list(container = "body")
)
)
),
fluidRow(
column(
width = 6,
box(
width = NULL, title = "Count matrix preview", status = "primary",
solidHeader = TRUE, collapsible = TRUE, collapsed = TRUE,
fluidRow(
column(
width = 12,
offset = 0.5,
DT::dataTableOutput("dt_cm")
)
)
)
),
column(
width = 6,
box(
width = NULL, title = "Experimental design preview", status = "primary",
solidHeader = TRUE, collapsible = TRUE, collapsed = TRUE,
fluidRow(
column(
width = 12,
offset = 0.5,
DT::dataTableOutput("dt_ed")
)
)
)
)
)
),
uiOutput("ui_step2"),
fluidRow(
column(
width = 6,
uiOutput("ui_stepanno")
## this ideally populates also the list of genes of interest to choose among
),
column(
width = 6,
uiOutput("ui_stepoutlier")
)
),
uiOutput("ui_step3")
), # end of Data Setup panel
# ui panel counts overview -----------------------------------------------------------
tabPanel(
"Counts Overview",
icon = icon("eye"),
conditionalPanel(
condition = "!output.checkdds",
headerPanel("Get an overview on your data"),
fluidRow(
column(
width = 8,
shinyBS::bsCollapse(
id = "help_countsoverview", open = NULL,
shinyBS::bsCollapsePanel(
"Help",
includeMarkdown(system.file("extdata", "help_overview.md", package = "ideal"))
)
)
)
),
actionButton("tour_countsoverview", "Click me for a quick tour of the section",
icon("info"),
style = "color: #ffffff; background-color: #0092AC; border-color: #2e6da4"
),
br(),
selectInput("countstable_unit",
label = "Data scale in the table",
choices = list(
"Counts (raw)" = "raw_counts",
"Counts (normalized)" = "normalized_counts",
"Variance stabilizing transformed values" = "vst_counts",
"Log10 (pseudocount of 1 added)" = "log10_counts"
)
),
DT::dataTableOutput("showcountmat"),
downloadButton("downloadData", "Download", class = "btn btn-success"),
hr(),
fluidRow(
column(
width = 8,
h3("Basic summary for the counts"),
p("Number of uniquely aligned reads assigned to each sample"),
# verbatimTextOutput("reads_summary"),
wellPanel(
fluidRow(
column(
width = 6,
numericInput("threshold_rowsums", "Threshold on the row sums of the counts", value = 0, min = 0)
),
column(
width = 6,
numericInput("threshold_rowmeans", "Threshold on the row means of the normalized counts", value = 0, min = 0)
)
)
),
p("According to the selected filtering criteria, this is an overview on the provided count data"),
verbatimTextOutput("detected_genes"),
selectInput("filter_crit",
label = "Choose the filtering criterium",
choices = c("row means", "row sums"), selected = "row means"
),
actionButton("featfilt_dds", "Filter the DDS object", class = "btn btn-primary")
)
),
h3("Sample to sample scatter plots"),
selectInput("corr_method", "Correlation method", choices = list("pearson", "spearman", "kendall")),
checkboxInput(
inputId = "corr_uselogs",
label = "Use log2 values for plot axes and values",
value = TRUE
),
checkboxInput(
inputId = "corr_usesubset",
label = "Use a subset of max 1000 genes (quicker to plot)",
value = TRUE
),
p("Compute sample to sample correlations on the normalized counts - warning, it can take a while to plot all points (depending mostly on the number of samples you provided)."),
actionButton("compute_pairwisecorr", "Run", class = "btn btn-primary"),
uiOutput("pairwise_plotUI"),
uiOutput("heatcorr_plotUI")
),
conditionalPanel(
condition = "output.checkdds",
h2("You did not create the dds object yet. Please go the main tab and generate it")
)
), # end of Counts Overview panel
# ui panel extract results -----------------------------------------------------------
tabPanel(
"Extract Results",
icon = icon("table"),
# see: http://stackoverflow.com/questions/21609436/r-shiny-conditionalpanel-output-value?noredirect=1&lq=1
conditionalPanel(
condition = "!output.checkdds",
headerPanel("Extract and inspect the DE results"),
fluidRow(
column(
width = 8,
shinyBS::bsCollapse(
id = "help_extractresults", open = NULL,
shinyBS::bsCollapsePanel(
"Help",
includeMarkdown(system.file("extdata", "help_results.md", package = "ideal"))
)
)
)
),
actionButton("tour_results", "Click me for a quick tour of the section", icon("info"),
style = "color: #ffffff; background-color: #0092AC; border-color: #2e6da4"
),
br(),
fluidRow(
column(
width = 6,
uiOutput("choose_fac")
)
),
fluidRow(
column(
width = 4,
# factor as covariate
wellPanel(
width = 4, id = "factor_opts",
uiOutput("fac1"),
uiOutput("fac2"),
# continuous covariate
uiOutput("facnum")
)
),
column(
width = 4,
# factor with > 2 levels
wellPanel(
width = 4,
uiOutput("lrtavailable"),
uiOutput("lrtfull"),
uiOutput("lrtreduced")
),
uiOutput("runlrt")
)
),
## general options for result function
# alpha is set via FDR on the left side
fluidRow(
column(
width = 4,
wellPanel(
id = "resu_opts",
selectInput("resu_indfil",
label = "Apply independent filtering automatically",
choices = c(TRUE, FALSE), selected = TRUE
),
selectInput("resu_lfcshrink",
label = "Shrink the log fold change for the contrast of interest",
choices = c(TRUE, FALSE), selected = TRUE
),
selectInput("resu_ihw", "Use Independent Hypothesis Weighting (IHW) as a filtering function",
choices = c(TRUE, FALSE), selected = FALSE
)
)
)
),
# , evtl also the *filter* parameter of the function, i.e. baseMean if not specified
fluidRow(
column(
width = 6,
uiOutput("runresults"),
uiOutput("store_result"),
verbatimTextOutput("diyres_summary")
)
),
DT::dataTableOutput("table_res"),
downloadButton("downloadTblResu", "Download", class = "btn btn-success"),
fluidRow(
h3("Diagnostic plots"),
column(
width = 6,
plotOutput("pvals_hist"),
div(
align = "right", style = "margin-right:15px; margin-bottom:10px",
downloadButton("download_plot_pvals_hist", "Download Plot"),
textInput("filename_plot_pvals_hist", label = "Save as...", value = "plot_pvals_hist.pdf")
)
),
column(
width = 6,
plotOutput("pvals_hist_strat"),
div(
align = "right", style = "margin-right:15px; margin-bottom:10px",
downloadButton("download_plot_pvals_hist_strat", "Download Plot"),
textInput("filename_plot_pvals_hist_strat", label = "Save as...", value = "plot_pvals_hist_strat.pdf")
)
),
column(
width = 6,
plotOutput("pvals_ss"),
div(
align = "right", style = "margin-right:15px; margin-bottom:10px",
downloadButton("download_plot_pvals_ss", "Download Plot"),
textInput("filename_plot_pvals_ss", label = "Save as...", value = "plot_pvals_ss.pdf")
)
),
column(
width = 6,
plotOutput("logfc_hist"),
div(
align = "right", style = "margin-right:15px; margin-bottom:10px",
downloadButton("download_plot_logfc_hist", "Download Plot"),
textInput("filename_plot_logfc_hist", label = "Save as...", value = "plot_logfc_hist.pdf")
)
)
)
),
conditionalPanel(
condition = "output.checkdds",
h2("You did not create the dds object yet. Please go the main tab and generate it")
)
), # end of Extract Results panel
# ui panel summary plots -----------------------------------------------------------
tabPanel(
"Summary Plots",
icon = icon("photo"),
conditionalPanel(
condition = "!output.checkresu",
headerPanel("Interactive graphical exploration of the results"),
fluidRow(
column(
width = 8,
shinyBS::bsCollapse(
id = "help_summaryplots", open = NULL,
shinyBS::bsCollapsePanel(
"Help",
includeMarkdown(system.file("extdata", "help_plots.md", package = "ideal"))
)
)
)
),
actionButton("tour_plots", "Click me for a quick tour of the section", icon("info"),
style = "color: #ffffff; background-color: #0092AC; border-color: #2e6da4"
),
br(),
fluidRow(
column(
6,
h4("MA plot - Interactive!"),
plotOutput("plotma", brush = "ma_brush"),
div(
align = "right", style = "margin-right:15px; margin-bottom:10px",
downloadButton("download_plot_ma", "Download Plot"),
textInput("filename_plot_ma", label = "Save as...", value = "plot_ma.pdf")
)
),
column(
6,
h4("Zoomed section"),
plotOutput("mazoom", click = "mazoom_click"),
numericInput("size_genelabels", label = "Labels size: ", value = 4, min = 1, max = 8),
div(
align = "right", style = "margin-right:15px; margin-bottom:10px",
downloadButton("download_plot_mazoom", "Download Plot"),
textInput("filename_plot_mazoom", label = "Save as...", value = "plot_mazoom.pdf")
)
)
),
fluidRow(
column(
6,
h4("Selected gene"),
checkboxInput("ylimZero_genes", "Set y axis limit to 0", value = TRUE),
plotOutput("genefinder_plot"),
div(
align = "right", style = "margin-right:15px; margin-bottom:10px",
downloadButton("download_plot_genefinder", "Download Plot"),
textInput("filename_plot_genefinder", label = "Save as...", value = "plot_genefinder.pdf")
)
),
column(
6,
h4("Gene infobox"),
htmlOutput("rentrez_infobox")
)
),
fluidRow(
column(
6,
h4("volcano plot"),
plotOutput("volcanoplot"),
div(
align = "right", style = "margin-right:15px; margin-bottom:10px",
downloadButton("download_plot_volcanoplot", "Download Plot"),
textInput("filename_plot_volcanoplot", label = "Save as...", value = "plot_volcanoplot.pdf")
)
)
),
fluidRow(radioButtons("heatmap_colv", "Cluster samples", choices = list("Yes" = TRUE, "No" = FALSE), selected = TRUE)),
fluidRow(
column(
4,
checkboxInput("rowscale", label = "Scale by rows", value = TRUE)
),
column(
4,
checkboxInput("pseudocounts", "use log2(1+counts)", value = TRUE)
)
),
fluidRow(
column(
6,
plotOutput("heatbrush"),
div(
align = "right", style = "margin-right:15px; margin-bottom:10px",
downloadButton("download_plot_heatbrush", "Download Plot"),
textInput("filename_plot_heatbrush", label = "Save as...", value = "plot_heatbrush.pdf")
)
),
column(
6,
plotlyOutput("hpi_brush")
)
),
box(
title = "Brushed table", status = "primary", solidHeader = TRUE,
id = "box_brushedtbl",
collapsible = TRUE, collapsed = TRUE, width = 12,
fluidRow(
DT::dataTableOutput("ma_brush_out"),
downloadButton("downloadTblMabrush", "Download", class = "btn btn-success")
)
)
),
conditionalPanel(
condition = "output.checkresu",
h2("You did not create the result object yet. Please go the dedicated tab and generate it")
)
), # end of Summary Plots panel
# ui panel gene finder -----------------------------------------------------------
tabPanel(
"Gene Finder",
icon = icon("crosshairs"),
conditionalPanel(
condition = "!output.checkdds",
headerPanel("Find your gene(s) of interest"),
fluidRow(
column(
width = 8,
shinyBS::bsCollapse(
id = "help_genefinder", open = NULL,
shinyBS::bsCollapsePanel(
"Help",
includeMarkdown(system.file("extdata", "help_genefinder.md", package = "ideal"))
)
)
)
),
actionButton("tour_genefinder", "Click me for a quick tour of the section", icon("info"),
style = "color: #ffffff; background-color: #0092AC; border-color: #2e6da4"
),
br(),
fluidRow(
column(6, checkboxInput("ylimZero_genefinder", "Set y axis limit to 0", value = TRUE))
),
fluidRow(
column(
6,
plotOutput("bp1"),
div(
align = "right", style = "margin-right:15px; margin-bottom:10px",
downloadButton("download_plotbp1", "Download Plot"),
textInput("filename_plotbp1", label = "Save as...", value = "plotbp1.pdf")
)
),
column(
6,
plotOutput("bp2"),
div(
align = "right", style = "margin-right:15px; margin-bottom:10px",
downloadButton("download_plotbp2", "Download Plot"),
textInput("filename_plotbp2", label = "Save as...", value = "plotbp2.pdf")
)
)
),
fluidRow(
column(
6,
plotOutput("bp3"),
div(
align = "right", style = "margin-right:15px; margin-bottom:10px",
downloadButton("download_plotbp3", "Download Plot"),
textInput("filename_plotbp3", label = "Save as...", value = "plotbp3.pdf")
)
),
column(
6,
plotOutput("bp4"),
div(
align = "right", style = "margin-right:15px; margin-bottom:10px",
downloadButton("download_plotbp4", "Download Plot"),
textInput("filename_plotbp4", label = "Save as...", value = "plotbp4.pdf")
)
)
),
fluidRow(
column(
width = 10, offset = 1,
plotOutput("ma_highlight"),
div(
align = "right", style = "margin-right:15px; margin-bottom:10px",
downloadButton("download_plot_mahighlight", "Download Plot"),
textInput("filename_plot_mahighlight", label = "Save as...", value = "plot_mahighlight.pdf")
),
DT::dataTableOutput("table_combi"),
downloadButton("downloadTblCombi", "Download", class = "btn btn-success"),
fileInput(
inputId = "gl_ma",
label = "Upload a gene list file",
accept = c(
"text/csv", "text/comma-separated-values",
"text/tab-separated-values", "text/plain",
".csv", ".tsv"
), multiple = FALSE
),
plotOutput("ma_hl_list"),
div(
align = "right", style = "margin-right:15px; margin-bottom:10px",
downloadButton("download_plot_mahllist", "Download Plot"),
textInput("filename_plot_mahllist", label = "Save as...", value = "plot_mahllist.pdf")
),
DT::dataTableOutput("table_combi_list"),
downloadButton("downloadTblCombiList", "Download", class = "btn btn-success")
)
)
),
conditionalPanel(
condition = "output.checkdds",
h2("You did not create the dds object yet. Please go the main tab and generate it")
)
), # end of Gene Finder panel
# ui panel functional analysis ----------------------------------------------------------
tabPanel(
"Functional Analysis",
icon = icon("list-alt"),
conditionalPanel(
condition = "!output.checkresu",
headerPanel("Find functions enriched in gene sets"),
fluidRow(
column(
width = 8,
shinyBS::bsCollapse(
id = "help_functionalanalysis", open = NULL,
shinyBS::bsCollapsePanel(
"Help",
includeMarkdown(system.file("extdata", "help_funcanalysis.md", package = "ideal"))
)
)
)
),
actionButton("tour_funcanalysis", "Click me for a quick tour of the section", icon("info"),
style = "color: #ffffff; background-color: #0092AC; border-color: #2e6da4"
), br(),
selectInput("go_cats",
label = "Select the GO category(ies) of interest",
choices = list("GO Biological Process" = "BP", "GO Molecular Function" = "MF", "GO Cellular Component" = "CC"),
selected = "BP", multiple = TRUE
),
div(
id = "myAnchorBox",
tabBox(
width = NULL,
id = "gse_tabbox",
tabPanel("UPregu",
icon = icon("arrow-circle-up"),
fluidRow(column(width = 6, actionButton("button_enrUP", "Perform gene set enrichment analysis on the upregulated genes", class = "btn btn-primary"))),
fluidRow(column(width = 6, actionButton("button_enrUP_goseq", "Perform gene set enrichment analysis on the upregulated genes - goseq", class = "btn btn-primary"))),
fluidRow(column(width = 6, actionButton("button_enrUP_topgo", "Perform gene set enrichment analysis on the upregulated genes - topGO", class = "btn btn-primary"))),
uiOutput("ui_DT_gse_up"),
uiOutput("ui_DT_gse_up_goseq"),
# DT::dataTableOutput("DT_gse_up"),
# DT::dataTableOutput("DT_gse_up_goseq"),
fluidRow(
column(
width = 9,
uiOutput("ui_DT_gse_up_topgo"),
# DT::dataTableOutput("DT_gse_up_topgo"),
downloadButton("downloadGOTbl_up", "Download", class = "btn btn-success")
),
column(width = 3, plotOutput("goterm_heatmap_up_topgo"))
)
),
tabPanel("DOWNregu",
icon = icon("arrow-circle-down"),
fluidRow(column(width = 6, actionButton("button_enrDOWN", "Perform gene set enrichment analysis on the downregulated genes", class = "btn btn-primary"))),
fluidRow(column(width = 6, actionButton("button_enrDOWN_goseq", "Perform gene set enrichment analysis on the downregulated genes - goseq", class = "btn btn-primary"))),
fluidRow(column(width = 6, actionButton("button_enrDOWN_topgo", "Perform gene set enrichment analysis on the downregulated genes - topGO", class = "btn btn-primary"))),
# DT::dataTableOutput("DT_gse_down"),
# DT::dataTableOutput("DT_gse_down_goseq"),
uiOutput("ui_DT_gse_down"),
uiOutput("ui_DT_gse_down_goseq"),
fluidRow(
column(
width = 9,
# DT::dataTableOutput("DT_gse_down_topgo"),
uiOutput("ui_DT_gse_down_topgo"),
downloadButton("downloadGOTbl_down", "Download", class = "btn btn-success")
),
column(width = 3, plotOutput("goterm_heatmap_down_topgo"))
)
),
tabPanel("UPDOWN",
icon = icon("arrows-v"),
fluidRow(column(width = 6, actionButton("button_enrUPDOWN", "Perform gene set enrichment analysis on the up- and downregulated genes", class = "btn btn-primary"))),
fluidRow(column(width = 6, actionButton("button_enrUPDOWN_goseq", "Perform gene set enrichment analysis on the up- and downregulated genes - goseq", class = "btn btn-primary"))),
fluidRow(column(width = 6, actionButton("button_enrUPDOWN_topgo", "Perform gene set enrichment analysis on the up- and downregulated genes - topGO", class = "btn btn-primary"))),
# DT::dataTableOutput("DT_gse_updown"),
# DT::dataTableOutput("DT_gse_updown_goseq"),
uiOutput("ui_DT_gse_updown"),
uiOutput("ui_DT_gse_updown_goseq"),
fluidRow(
column(
width = 9,
uiOutput("ui_DT_gse_updown_topgo"),
downloadButton("downloadGOTbl_updown", "Download", class = "btn btn-success")
),
column(width = 3, plotOutput("goterm_heatmap_updown_topgo"))
)
),
tabPanel("List1",
icon = icon("list"),
fileInput(
inputId = "gl1",
label = "Upload a gene list file",
accept = c(
"text/csv", "text/comma-separated-values",
"text/tab-separated-values", "text/plain",
".csv", ".tsv"
), multiple = FALSE
),
fluidRow(column(width = 6, actionButton("button_enrLIST1", "Perform gene set enrichment analysis on the genes in list1", class = "btn btn-primary"))),
fluidRow(column(width = 6, actionButton("button_enrLIST1_goseq", "Perform gene set enrichment analysis on the list1 genes - goseq", class = "btn btn-primary"))),
fluidRow(column(width = 6, actionButton("button_enrLIST1_topgo", "Perform gene set enrichment analysis on the list1 genes - topGO", class = "btn btn-primary"))),
# DT::dataTableOutput("DT_gse_list1"),
# DT::dataTableOutput("DT_gse_list1_goseq"),
uiOutput("ui_DT_gse_list1"),
uiOutput("ui_DT_gse_list1_goseq"),
fluidRow(
column(
width = 9,
# DT::dataTableOutput("DT_gse_list1_topgo"),
uiOutput("ui_DT_gse_list1_topgo"),
downloadButton("downloadGOTbl_l1", "Download", class = "btn btn-success")
),
column(width = 3, plotOutput("goterm_heatmap_l1_topgo"))
)
),
tabPanel("List2",
icon = icon("list-alt"),
fileInput(
inputId = "gl2",
label = "Upload a gene list file",
accept = c(
"text/csv", "text/comma-separated-values",
"text/tab-separated-values", "text/plain",
".csv", ".tsv"
), multiple = FALSE
),
fluidRow(column(width = 6, actionButton("button_enrLIST2", "Perform gene set enrichment analysis on the genes in list2", class = "btn btn-primary"))),
fluidRow(column(width = 6, actionButton("button_enrLIST2_goseq", "Perform gene set enrichment analysis on the list2 genes - goseq", class = "btn btn-primary"))),
fluidRow(column(width = 6, actionButton("button_enrLIST2_topgo", "Perform gene set enrichment analysis on the list2 genes - topGO", class = "btn btn-primary"))),
# DT::dataTableOutput("DT_gse_list2"),
# DT::dataTableOutput("DT_gse_list2_goseq"),
uiOutput("ui_DT_gse_list2"),
uiOutput("ui_DT_gse_list2_goseq"),
fluidRow(
column(
width = 9,
# DT::dataTableOutput("DT_gse_list2_topgo"),
uiOutput("ui_DT_gse_list2_topgo"),
downloadButton("downloadGOTbl_l2", "Download", class = "btn btn-success")
),
column(width = 3, plotOutput("goterm_heatmap_l2_topgo"))
)
)
)
),
## will put collapsible list elements? or multi tab panel? or something to select on the left, and operate output-wise on the right e.g. venn diagrams or table for gene set enrichment
# h3("custom list 3 - handpicked") # use the select input from the left column?
# ,verbatimTextOutput("debuggls"),
# verbatimTextOutput("printUPgenes"),
# verbatimTextOutput("debuglists"),
h2("Intersection of gene sets"),
fluidRow(
column(
width = 4,
checkboxInput("toggle_updown", "Use up and down regulated genes", TRUE),
checkboxInput("toggle_up", "Use up regulated genes", FALSE),
checkboxInput("toggle_down", "Use down regulated genes", FALSE)
),
column(
width = 4,
checkboxInput("toggle_list1", "Use list1 genes", TRUE),
checkboxInput("toggle_list2", "Use list2 genes", FALSE),
checkboxInput("toggle_list3", "Use list3 genes", FALSE)
)
),
fluidRow(
column(
width = 6, plotOutput("vennlists"),
div(
align = "right", style = "margin-right:15px; margin-bottom:10px",
downloadButton("download_plot_vennlists", "Download Plot"),
textInput("filename_plot_vennlists", label = "Save as...", value = "plot_vennlists.pdf")
),
offset = 3
)
),
fluidRow(
column(
width = 6, plotOutput("upsetLists"),
div(
align = "right", style = "margin-right:15px; margin-bottom:10px",
downloadButton("download_plot_upsetlists", "Download Plot"),
textInput("filename_plot_upsetlists", label = "Save as...", value = "plot_upsetlists.pdf")
),
offset = 3
)
)
),
conditionalPanel(
condition = "output.checkresu",
h2("You did not create the result object yet. Please go the dedicated tab and generate it")
)
), # end of Functional Analysis panel
# ui panel signatures explorer ---------------------------------------------------------
tabPanel(
"Signatures Explorer",
icon = icon("map"),
conditionalPanel(
condition = "!output.checkdds",
fluidRow(
column(
width = 8,
shinyBS::bsCollapse(
id = "help_signatureexplorer", open = NULL,
shinyBS::bsCollapsePanel(
"Help",
includeMarkdown(system.file("extdata", "help_signatureexplorer.md", package = "ideal"))
)
)
)
),
actionButton("tour_signatureexplorer", "Click me for a quick tour of the section", icon("info"),
style = "color: #ffffff; background-color: #0092AC; border-color: #2e6da4"
), br(),
fluidRow(
column(
width = 6,
h4("Setup options"),
wellPanel(
uiOutput("sig_ui_gmtin"),
uiOutput("sig_ui_nrsigs"),
actionButton("sig_button_computevst",
label = "Compute the variance stabilized transformed data",
icon = icon("spinner"), class = "btn btn-success"
)
)
),
column(
width = 6,
h4("Conversion options"),
wellPanel(
uiOutput("sig_ui_id_data"),
uiOutput("sig_ui_id_sigs"),
uiOutput("sig_ui_orgdbpkg"),
actionButton("sig_convert_setup",
label = "Apply id conversion between data and signatures"
)
),
verbatimTextOutput("sig_convcheck")
)
),
fluidRow(
column(
width = 6,
wellPanel(
uiOutput("sig_ui_selectsig"),
uiOutput("sig_ui_annocoldata"),
checkboxInput("sig_useDEonly",
label = "Use only DE genes in the signature", value = FALSE
)
)
# ,
# verbatimTextOutput("sig_sigmembers")
),
column(
width = 6,
wellPanel(
checkboxInput("sig_clusterrows", label = "Cluster rows", value = TRUE),
checkboxInput("sig_clustercols", label = "Cluster columns"),
checkboxInput("sig_centermean", label = "Center mean", value = TRUE),
checkboxInput("sig_scalerow", label = "Standardize by row")
)
)
),
fluidRow(
column(
width = 8, offset = 2,
plotOutput("sig_heat")
)
)
),
conditionalPanel(
condition = "output.checkdds",
h2("You did not create the dds object yet. Please go the main tab and generate it")
)
), # end of Signatures Explorer panel
# ui panel report editor -----------------------------------------------------------
tabPanel(
"Report Editor",
icon = icon("pencil"),
headerPanel("Create, view and export a report of your analysis"),
fluidRow(
column(
width = 8,
shinyBS::bsCollapse(
id = "help_reporteditor", open = NULL,
shinyBS::bsCollapsePanel(
"Help",
includeMarkdown(system.file("extdata", "help_report.md", package = "ideal"))
)
)
)
),
actionButton("tour_report", "Click me for a quick tour of the section", icon("info"),
style = "color: #ffffff; background-color: #0092AC; border-color: #2e6da4"
), br(),
fluidRow(
column(
width = 6,
box(
title = "markdown options", status = "primary", solidHeader = TRUE, collapsible = TRUE, width = 9, collapsed = TRUE,
id = "md_opts",
radioButtons("rmd_dl_format", label = "Choose Format:", c("HTML" = "html", "R Markdown" = "rmd"), inline = TRUE),
textInput("report_title", "Title: "),
textInput("report_author", "Author: "),
radioButtons("report_toc", "Table of Contents", choices = list("Yes" = "true", "No" = "false")),
radioButtons("report_ns", "Number sections", choices = list("Yes" = "true", "No" = "false")),
selectInput("report_theme", "Theme", choices = list(
"Default" = "default", "Cerulean" = "cerulean",
"Journal" = "journal", "Flatly" = "flatly",
"Readable" = "readable", "Spacelab" = "spacelab",
"United" = "united", "Cosmo" = "cosmo"
)),
radioButtons("report_echo", "Echo the commands in the output", choices = list("Yes" = "TRUE", "No" = "FALSE"))
)
),
column(
width = 6,
box(
title = "editor options", status = "primary", solidHeader = TRUE, collapsible = TRUE, width = 9, collapsed = TRUE,
id = "editor_opts",
checkboxInput("enableAutocomplete", "Enable AutoComplete", TRUE),
conditionalPanel(
"input.enableAutocomplete",
wellPanel(
checkboxInput("enableLiveCompletion", "Live auto completion", TRUE),
checkboxInput("enableRCompletion", "R code completion", TRUE)
)
),
selectInput("mode", "Mode: ", choices = shinyAce::getAceModes(), selected = "markdown"),
selectInput("theme", "Theme: ", choices = shinyAce::getAceThemes(), selected = "solarized_light")
)
)
),
fluidRow(
column(
3,
actionButton("updatepreview_button", "Update report", class = "btn btn-primary"), p()
),
column(3, downloadButton("saveRmd", "Generate & Save", class = "btn btn-success")),
column(3,
uiOutput("ui_iSEEexport"),
uiOutput("ui_GeneTonicexport"))
),
tabBox(
width = NULL,
id = "report_tabbox",
tabPanel("Report preview",
icon = icon("file-text"),
htmlOutput("knitDoc")
),
tabPanel("Edit report",
icon = icon("pencil-square-o"),
aceEditor("acereport_rmd",
mode = "markdown", theme = "solarized_light", autoComplete = "live",
value = "_Initialization of the_ `ideal` _report generation..._",
placeholder = "You can enter some code and text in R Markdown format",
height = "800px"
)
)
)
), # end of Report Editor panel
# ui panel about -----------------------------------------------------------
tabPanel(
"About",
icon = icon("institution"),
# headerPanel("Information on ideal/session"),
fluidRow(
column(
width = 8,
includeMarkdown(system.file("extdata", "about.md", package = "ideal")),
verbatimTextOutput("sessioninfo")
)
)
) # end of About panel
) # end of box
) # end of myScrollBox
, footer()
), # end of dashboardBody
skin = "black"
) # end of dashboardPage
# server definition -----------------------------------------------------------
# nocov start
ideal_server <- shinyServer(function(input, output, session) {
# server tours setup -----------------------------------------------------------
# here will go the coded - i.e. not explicitly wrapped in introBox - steps
intro_firsttour <- read.delim(system.file("extdata", "intro_firsttour.txt", package = "ideal"), sep = ";", stringsAsFactors = FALSE)
intro_datasetup <- read.delim(system.file("extdata", "intro_datasetup.txt", package = "ideal"), sep = ";", stringsAsFactors = FALSE)
intro_countsoverview <- read.delim(system.file("extdata", "intro_countsoverview.txt", package = "ideal"), sep = ";", stringsAsFactors = FALSE)
intro_results <- read.delim(system.file("extdata", "intro_results.txt", package = "ideal"), sep = ";", stringsAsFactors = FALSE)
intro_plots <- read.delim(system.file("extdata", "intro_plots.txt", package = "ideal"), sep = ";", stringsAsFactors = FALSE)
intro_genefinder <- read.delim(system.file("extdata", "intro_genefinder.txt", package = "ideal"), sep = ";", stringsAsFactors = FALSE)
intro_funcanalysis <- read.delim(system.file("extdata", "intro_funcanalysis.txt", package = "ideal"), sep = ";", stringsAsFactors = FALSE)
intro_signatureexplorer <- read.delim(system.file("extdata", "intro_signatureexplorer.txt", package = "ideal"), sep = ";", stringsAsFactors = FALSE)
intro_report <- read.delim(system.file("extdata", "intro_report.txt", package = "ideal"), sep = ";", stringsAsFactors = FALSE)
observeEvent(input$btn, {
introjs(session,
options = list(steps = intro_firsttour)
)
})
observeEvent(input$introexample, {
intro_example <- data.frame(
element = c("#introexample", "#introexample"),
intro = c(
"Tour elements can be anchored to elements of the UI that are intended to be highlighted. You can proceed to the next step by using the button, or also pushing the right arrow key.",
"Well done. This is how a tour can look like. Click outside of this window to close the tour, or on the corresponding button."
)
)
introjs(session,
options = list(steps = intro_example)
)
})
observeEvent(input$tour_datasetup, {
introjs(session,
options = list(steps = intro_datasetup)
)
})
observeEvent(input$tour_countsoverview, {
introjs(session,
options = list(steps = intro_countsoverview)
)
})
observeEvent(input$tour_results, {
introjs(session,
options = list(steps = intro_results)
)
})
observeEvent(input$tour_plots, {
introjs(session,
options = list(steps = intro_plots)
)
})
observeEvent(input$tour_genefinder, {
introjs(session,
options = list(steps = intro_genefinder)
)
})
observeEvent(input$tour_funcanalysis, {
introjs(session,
options = list(steps = intro_funcanalysis)
)
})
observeEvent(input$tour_signatureexplorer, {
introjs(session,
options = list(steps = intro_signatureexplorer)
)
})
observeEvent(input$tour_report, {
introjs(session,
options = list(steps = intro_report)
)
})
## Update directory
userdir <- tempfile()
dir.create(userdir, recursive = TRUE)
# sapply(file.path(newuserdir, dir(newuserdir)[grep("code_", dir(newuserdir))]), file.remove)
# file.copy(file.path(userdir, "code_All.R"), newuserdir)
# userdir <- newuserdir
# dir.create(file.path(userdir, "data"))
# server setup reactivevalues -----------------------------------------------------------
## placeholder for the figures to export
exportPlots <- reactiveValues()
# expfig_fig1 <- NULL
# )
# will store all the reactive values relevant to the app
values <- reactiveValues()
values$countmatrix <- countmatrix
values$expdesign <- expdesign
values$dds_obj <- dds_obj
values$res_obj <- res_obj
values$annotation_obj <- annotation_obj
values$gene_signatures <- gene_signatures
# this part sets the "matching" objects if something is provided that is depending on these
if (!is.null(dds_obj)) {
values$countmatrix <- counts(dds_obj, normalized = FALSE)
values$expdesign <- as.data.frame(colData(dds_obj))
}
# server welcome home ---------------------------------------------------------
output$ui_instructions <- renderUI({
box(
width = 12,
title = "Instructions", status = "info", solidHeader = TRUE,
collapsible = TRUE, collapsed = TRUE,
includeMarkdown(system.file("extdata", "instructions.md", package = "ideal"))
)
})
# server info boxes -----------------------------------------------------------
output$box_ddsobj <- renderUI({
if (!is.null(values$dds_obj)) {
return(valueBox("dds object",
paste0(nrow(values$dds_obj), " genes - ", ncol(values$dds_obj), " samples"),
icon = icon("list"),
color = "green", width = NULL
))
} else {
return(valueBox("dds object",
"yet to create",
icon = icon("list"),
color = "red", width = NULL
))
}
})
output$box_annobj <- renderUI({
if (!is.null(values$annotation_obj)) {
return(valueBox("Annotation",
paste0(nrow(values$annotation_obj), " genes - ", ncol(values$annotation_obj), " ID types"),
icon = icon("book"),
color = "green", width = NULL
))
} else {
return(valueBox("Annotation",
"yet to create",
icon = icon("book"),
color = "red", width = NULL
))
}
})
output$box_resobj <- renderUI({
if (!is.null(values$res_obj)) {
DEregu <- sum(values$res_obj$padj < input$FDR & values$res_obj$log2FoldChange != 0, na.rm = TRUE)
return(valueBox("DE genes",
paste0(DEregu, " DE genes - out of ", nrow(values$res_obj), ""),
icon = icon("list-alt"),
color = "green", width = NULL
))
} else {
return(valueBox("DE genes",
"yet to create",
icon = icon("list-alt"),
color = "red", width = NULL
))
}
})
# if i want to focus a little more on the ihw object
values$ihwres <- NULL
# server uploading data -----------------------------------------------------------
## count matrix
output$upload_count_matrix <- renderUI({
if (!is.null(dds_obj) | !is.null(countmatrix)) {
return(fluidRow(column(
width = 12,
tags$li("You already provided a count matrix or a DESeqDataSet object as input. You can check your input data in the collapsible box here below."), offset = 2
)))
} else {
return(fileInput(
inputId = "uploadcmfile",
label = "Upload a count matrix file",
accept = c(
"text/csv", "text/comma-separated-values",
"text/tab-separated-values", "text/plain",
".csv", ".tsv"
), multiple = FALSE
))
}
})
readCountmatrix <- reactive({
if (is.null(input$uploadcmfile)) {
return(NULL)
}
guessed_sep <- sepguesser(input$uploadcmfile$datapath)
cm <- utils::read.delim(input$uploadcmfile$datapath,
header = TRUE,
as.is = TRUE, sep = guessed_sep, quote = "",
row.names = 1, # https://github.com/federicomarini/pcaExplorer/issues/1
## TODO: tell the user to use tsv, or use heuristics
## to check what is most frequently occurring separation character? -> see sepGuesser.R
check.names = FALSE
)
return(cm)
})
## exp design
output$upload_metadata <- renderUI({
if (!is.null(dds_obj) | !is.null(expdesign)) {
return(fluidRow(column(
width = 12,
tags$li("You already provided a matrix/data.frame with the experimental covariates or a DESeqDataSet object as input. You can check your input data in the collapsible box here below."), offset = 2
)))
} else {
return(fileInput(
inputId = "uploadmetadatafile",
label = "Upload a sample metadata matrix file",
accept = c(
"text/csv", "text/comma-separated-values",
"text/tab-separated-values", "text/plain",
".csv", ".tsv"
), multiple = FALSE
))
}
})
readMetadata <- reactive({
if (is.null(input$uploadmetadatafile)) {
return(NULL)
}
guessed_sep <- sepguesser(input$uploadmetadatafile$datapath)
expdesign <- utils::read.delim(input$uploadmetadatafile$datapath,
header = TRUE,
as.is = TRUE, sep = guessed_sep, quote = "",
check.names = FALSE
)
return(expdesign)
})
# load the demo data
observeEvent(input$btn_loaddemo, withProgress(
message = "Loading demo data",
detail = "Loading airway count and metadata information",
value = 0,
{
aw <- requireNamespace("airway", quietly = TRUE)
incProgress(0.2, detail = "`airway` package loaded")
if (aw) {
data(airway, package = "airway", envir = environment())
cm_airway <- assay(airway)
incProgress(0.7, detail = "Count matrix loaded")
ed_airway <- as.data.frame(colData(airway))
values$countmatrix <- cm_airway
values$expdesign <- ed_airway
incProgress(0.3, detail = "Experimental metadata loaded")
# just to be sure, erase the annotation and the rest
values$dds_obj <- NULL
values$annotation_obj <- NULL
values$res_obj <- NULL
showNotification("All components for generating the DESeqDataset object have been loaded, proceed to Step 2!",
type = "message"
)
} else {
showNotification("The 'airway' package is currently not installed. Please do so by executing BiocManager::install('airway') before launching ideal()", type = "warning")
}
}
))
observeEvent(input$help_format, {
showModal(modalDialog(
title = "Format specifications for ideal",
includeMarkdown(system.file("extdata", "datainput.md", package = "ideal")),
h4("Example:"),
tags$img(
src = base64enc::dataURI(file = system.file("www", "help_dataformats.png", package = "pcaExplorer"), mime = "image/png"),
width = 750
),
easyClose = TRUE,
footer = NULL,
size = "l"
))
})
output$ddsdesign <- renderUI({
if (is.null(values$expdesign)) {
return(NULL)
}
poss_covars <- colnames(values$expdesign)
selectInput("dds_design",
label = "Select the design for your experiment: ",
choices = c(NULL, poss_covars), selected = NULL, multiple = TRUE
)
})
# server ui steps -----------------------------------------------------------
output$ui_step2 <- renderUI({
if (is.null(values$expdesign) | is.null(values$countmatrix)) {
return(NULL)
}
box(
width = 12, title = "Step 2", status = "warning", solidHeader = TRUE,
tagList(
# as in https://groups.google.com/forum/#!topic/shiny-discuss/qQ8yICfvDu0
h2("Select the DE design and create the DESeqDataSet object"),
fluidRow(
column(
width = 6,
uiOutput("ddsdesign"),
uiOutput("ui_diydds"),
hr(),
# uiOutput("ok_dds"),
verbatimTextOutput("debugdiy")
)
)
)
)
})
output$ui_stepanno <- renderUI({
if (is.null(values$dds_obj)) { ### and not provided already with sep annotation?
return(NULL)
}
box(
width = 12, title = "Optional Step", status = "info", solidHeader = TRUE,
tagList(
h2("Create the annotation data frame for your dataset"),
fluidRow(
column(
width = 8,
uiOutput("ui_selectspecies"),
verbatimTextOutput("speciespkg"),
uiOutput("ui_idtype"),
verbatimTextOutput("printDIYanno")
)
),
uiOutput("ui_getanno")
)
)
})
output$ui_stepoutlier <- renderUI({
if (is.null(values$dds_obj)) { ### and not provided already with sep annotation?
return(NULL)
}
box(
width = 12, title = "Optional Step", status = "info", solidHeader = TRUE,
tagList(
h2("Remove sample(s) from the current dataset - suspected outliers!"),
fluidRow(
column(
width = 8,
uiOutput("ui_selectoutliers"),
uiOutput("outliersout"),
verbatimTextOutput("printremoved")
)
)
)
)
})
output$ui_diydds <- renderUI({
if (is.null(values$expdesign) | is.null(values$countmatrix) | is.null(input$dds_design)) {
return(NULL)
}
actionButton("button_diydds", "Generate the dds object", class = "btn btn-success")
})
output$ui_getanno <- renderUI({
if (is.null(values$dds_obj)) { ### and not provided already with sep annotation?
return(NULL)
}
shiny::validate(
need(
input$speciesSelect != "",
"Select a species first in the panel"
)
)
actionButton("button_getanno", "Retrieve the gene symbol annotation for the uploaded data", class = "btn btn-primary")
})
output$ui_nrcores <- renderUI({
mincores <- 1
maxcores <- BiocParallel::multicoreWorkers()
sliderInput("nrcores",
label = "Choose how many cores to use for computing:",
min = mincores, max = maxcores, value = 1, step = 1
)
})
output$ui_step3 <- renderUI({
if (is.null(values$dds_obj)) { #
return(NULL)
}
box(
width = 12, title = "Step 3", status = "success", solidHeader = TRUE,
tagList(
h2("Run DESeq!"),
fluidRow(
column(
width = 8,
shinyBS::bsCollapse(
id = "eda_check",
shinyBS::bsCollapsePanel(
title = "Make sure you properly performed Exploratory Data Analysis (EDA) before testing for Differential Expression (DE)",
style = "info",
includeMarkdown(system.file("extdata", "help_eda.md", package = "ideal"))
)
)
)
),
fluidRow(
column(
width = 4,
uiOutput("ui_nrcores")
)
),
uiOutput("rundeseq"),
verbatimTextOutput("printDIYresults"),
uiOutput("ui_stepend")
)
)
})
output$ui_stepend <- renderUI({
if (is.null(values$dds_obj)) {
return(NULL)
}
if (!"results" %in% mcols(mcols(values$dds_obj))$type) { #
return(NULL)
}
tagList(
h2("Good to go!"),
box(
width = 6, title = "Diagnostic plot", status = "info", solidHeader = TRUE,
collapsible = TRUE, collapsed = TRUE,
plotOutput("diagno_dispests")
)
)
})
output$diagno_dispests <- renderPlot({
plotDispEsts(values$dds_obj)
})
# server ok objects -----------------------------------------------------------
output$ok_cm <- renderUI({
if (is.null(values$countmatrix)) {
return(NULL)
}
# icon("check",class = "icon-done") # this does not allow to set the size? go manually with..
tags$div(HTML('<i class="fa fa-check fa-3x icon-done"></i>'))
})
output$ok_ed <- renderUI({
if (is.null(values$expdesign)) {
return(NULL)
}
# icon("check",class = "icon-done") # this does not allow to set the size? go manually with..
tags$div(HTML('<i class="fa fa-check fa-3x icon-done"></i>'))
})
output$ok_dds <- renderUI({
if (is.null(values$dds_obj)) {
return(NULL)
}
# icon("check",class = "icon-done") # this does not allow to set the size? go manually with..
tags$div(HTML('<i class="fa fa-check fa-3x icon-done"></i>'))
})
output$ok_anno <- renderUI({
if (is.null(values$annotation_obj)) {
return(NULL)
}
# icon("check",class = "icon-done") # this does not allow to set the size? go manually with..
tags$div(HTML('<i class="fa fa-check fa-3x icon-done"></i>'))
})
output$ok_resu <- renderUI({
if (is.null(values$res_obj)) {
return(NULL)
}
# icon("check",class = "icon-done") # this does not allow to set the size? go manually with..
tags$div(HTML('<i class="fa fa-check fa-3x icon-done"></i>'))
})
output$checkdds <- reactive({
is.null(values$dds_obj)
})
output$checkresu <- reactive({
is.null(values$res_obj)
})
outputOptions(output, "checkresu", suspendWhenHidden = FALSE)
outputOptions(output, "checkdds", suspendWhenHidden = FALSE)
output$dt_cm <- DT::renderDataTable({
if (is.null(values$countmatrix)) {
return(NULL)
}
datatable(values$countmatrix, options = list(scrollX = TRUE, scrollY = "400px"))
})
output$dt_ed <- DT::renderDataTable({
if (is.null(values$expdesign)) {
return(NULL)
}
datatable(values$expdesign, options = list(scrollX = TRUE))
})
# http://stackoverflow.com/questions/17024685/how-to-use-a-character-string-in-formula
# http://stats.stackexchange.com/questions/29477/how-to-write-a-linear-model-formula-with-100-variables-in-r
# http://stackoverflow.com/questions/7666807/anova-test-fails-on-lme-fits-created-with-pasted-formula/7668846#7668846
diyDDS <- reactive({
if (is.null(values$countmatrix) | is.null(values$expdesign) | is.null(input$dds_design)) {
return(NULL)
}
dds <- DESeqDataSetFromMatrix(
countData = values$countmatrix,
colData = values$expdesign,
design = as.formula(paste0("~", paste(input$dds_design, collapse = " + ")))
)
dds <- estimateSizeFactors(dds)
return(dds)
})
observeEvent(input$button_diydds, {
if (!is.null(values$countmatrix) & !is.null(values$expdesign)) {
values$dds_obj <- diyDDS()
}
})
output$debugdiy <- renderPrint({
if (!is.null(values$dds_obj)) {
print(values$dds_obj)
print(design(values$dds_obj))
}
})
# as in http://stackoverflow.com/questions/29716868/r-shiny-how-to-get-an-reactive-data-frame-updated-each-time-pressing-an-actionb
observeEvent(input$uploadcmfile, {
values$countmatrix <- readCountmatrix()
})
observeEvent(input$uploadmetadatafile, {
values$expdesign <- readMetadata()
})
# server retrieving anno --------------------------------------------------
annoSpecies_df <-
data.frame(
species = c(
"", "Anopheles", "Arabidopsis", "Bovine", "Worm",
"Canine", "Fly", "Zebrafish", "E coli strain K12",
"E coli strain Sakai", "Chicken", "Human", "Mouse",
"Rhesus", "Malaria", "Chimp", "Rat",
"Yeast", "Streptomyces coelicolor", "Pig", "Toxoplasma gondii",
"Xenopus"
),
pkg = c(
"", "org.Ag.eg.db", "org.At.tair.db", "org.Bt.eg.db", "org.Ce.eg.db",
"org.Cf.eg.db", "org.Dm.eg.db", "org.Dr.eg.db", "org.EcK12.eg.db",
"org.EcSakai.eg.db", "org.Gg.eg.db", "org.Hs.eg.db", "org.Mm.eg.db",
"org.Mmu.eg.db", "org.Pf.plasmo.db", "org.Pt.eg.db", "org.Rn.eg.db",
"org.Sc.sgd.db", "org.Sco.eg.db", "org.Ss.eg.db", "org.Tgondii.eg.db",
"org.Xl.eg.db"
),
stringsAsFactors = FALSE
)
annoSpecies_df <- annoSpecies_df[order(annoSpecies_df$species), ]
# this one is relevant for creating links to the genes
annoSpecies_df$ensembl_db <- c(
"", "", "", "Bos_taurus", "Canis_familiaris", "Gallus_gallus", "Pan_troglodytes",
"", "", "Drosophila_melanogaster", "Homo_sapiens", "", "Mus_musculus",
"Sus_scrofa", "Rattus_norvegicus", "Macaca_mulatta", "", "", "Caenorhabditis_elegans",
"Xenopus_tropicalis", "Saccharomyces_cerevisiae", "Danio_rerio"
)
# this one is the shortcut for the limma::goana function
annoSpecies_df$species_short[grep(pattern = "eg.db", annoSpecies_df$pkg)] <- gsub(".eg.db", "", gsub("org.", "", annoSpecies_df$pkg))[grep(pattern = "eg.db", annoSpecies_df$pkg)]
# to match to the goseq genome setting
annoSpecies_df$goseq_shortcut <- c(
"", "anoGam1", "Arabidopsis", "bosTau8", "canFam3", "galGal4", "panTro4", "E. coli K12", "E. coli Sakai",
"dm6", "hg38", "Malaria", "mm10", "susScr3", "rn6", "rheMac", "", "", "ce11", "xenTro", "sacCer3", "danRer10"
)
rownames(annoSpecies_df) <- annoSpecies_df$species # easier to access afterwards
# annoSpecies_df <- annoSpecies_df[annoSpecies_df$species %in% c("","Human", "Mouse", "Rat", "Fly", "Chimp"),]
output$ui_selectspecies <- renderUI({
if (is.null(values$dds_obj)) { #
return(NULL)
}
selectInput("speciesSelect",
label = "Select the species of your samples - it will also be used for enhancing result tables",
choices = annoSpecies_df$species, selected = ""
)
})
output$ui_idtype <- renderUI({
if (is.null(values$dds_obj)) { #
return(NULL)
}
std_choices <- c("ENSEMBL", "ENTREZID", "REFSEQ", "SYMBOL")
if (input$speciesSelect != "") {
annopkg <- annoSpecies_df$pkg[annoSpecies_df$species == input$speciesSelect]
require(annopkg, character.only = TRUE)
pkg_choices <- keytypes(get(annopkg))
std_choices <- union(std_choices, pkg_choices)
}
selectInput("idtype", "select the id type in your data", choices = std_choices)
})
output$speciespkg <- renderText({
if (is.null(values$dds_obj)) { #
return(NULL)
}
shiny::validate(
need(
input$speciesSelect != "",
"Select a species - requires the corresponding annotation package"
)
)
annopkg <- annoSpecies_df$pkg[annoSpecies_df$species == input$speciesSelect]
shiny::validate(
need(
require(annopkg, character.only = TRUE),
paste0("The package ", annopkg, " is not installed/available. Try installing it with BiocManager::install('", annopkg, "')")
)
)
retmsg <- paste0(annopkg, " - package available and loaded")
# if (!require(annopkg,character.only=TRUE)) {
# stop("The package",annopkg, "is not installed/available. Try installing it with BiocManager::install() ?")
# }
retmsg <- paste0(retmsg, " - ", gsub(".eg.db", "", gsub("org.", "", annopkg)))
retmsg
})
# server outliers --------------------------------------------------------
output$ui_selectoutliers <- renderUI({
if (is.null(values$dds_obj)) {
return(NULL)
} else {
selectInput("selectoutliers", "Select the samples to remove - candidate outliers",
choices = colnames(values$dds_obj), selected = NULL, multiple = TRUE
)
}
})
output$outliersout <- renderUI({
if (is.null(values$dds_obj)) {
return(NULL)
} else {
actionButton("button_outliersout", "Recompute the dds without some samples", class = "btn btn-primary")
}
})
observeEvent(input$button_outliersout, {
withProgress(
{
allsamples <- colnames(values$dds_obj)
outliersamples <- input$selectoutliers
keptsamples <- setdiff(allsamples, outliersamples)
dds <- DESeqDataSetFromMatrix(
countData = values$countmatrix[, keptsamples],
colData = values$expdesign[keptsamples, ],
design = design(values$dds_obj)
# design=as.formula(paste0("~",paste(input$dds_design, collapse=" + ")))
)
dds <- estimateSizeFactors(dds)
# return(dds)
# re-create the dds and keep track of which samples were removed
values$removedsamples <- input$selectoutliers
curr_species <- input$speciesSelect
values$dds_obj <- dds
updateSelectInput(session, inputId = "speciesSelect", selected = curr_species)
# accordingly, reset the results
values$res_obj <- NULL
},
message = "Removing selected samples from the current dataset"
)
})
output$printremoved <- renderPrint({
print(values$removedsamples)
})
# server run deseq --------------------------------------------------------
output$rundeseq <- renderUI({
if (is.null(values$dds_obj)) {
return(NULL)
} else {
actionButton("button_rundeseq", "Run DESeq!", icon = icon("spinner"), class = "btn btn-success")
}
})
observeEvent(input$button_rundeseq, {
withProgress(
message = "Running DESeq on your data...",
detail = "This step might take a while",
value = 0,
{
# trick to keep species info while still changing the dds_obj
curr_species <- input$speciesSelect
incProgress(0.1)
if (input$nrcores == 1) {
values$dds_obj <- DESeq(values$dds_obj)
} else {
# leave open option for computing in parallel?
values$dds_obj <- DESeq(values$dds_obj,
parallel = TRUE,
BPPARAM = MulticoreParam(workers = input$nrcores)
)
}
incProgress(0.89)
updateSelectInput(session, inputId = "speciesSelect", selected = curr_species)
}
)
})
observeEvent(input$speciesSelect, {
curr_idtype <- values$cur_type
updateSelectInput(session, inputId = "idtype", selected = curr_idtype)
})
output$printDIYresults <- renderPrint({
shiny::validate(
need(
!is.null(values$dds_obj),
"Provide or construct a dds object"
)
)
shiny::validate(
need(
"results" %in% mcols(mcols(values$dds_obj))$type,
"dds object provided, but couldn't find results. you should first run DESeq() with the button up here"
)
)
summary(results(values$dds_obj), alpha = input$FDR)
})
# server counts overview --------------------------------------------------------
current_countmat <- reactive({
if (input$countstable_unit == "raw_counts") {
return(counts(values$dds_obj, normalized = FALSE))
}
if (input$countstable_unit == "normalized_counts") {
return(counts(values$dds_obj, normalized = TRUE))
}
if (input$countstable_unit == "vst_counts") {
if (is.null(values$vst_obj)) {
withProgress(
message = "Computing the variance stabilized transformed data...",
detail = "This step can take a little while",
value = 0,
{
values$vst_obj <- vst(values$dds_obj)
}
)
}
return(assay(values$vst_obj)) ## see if it is worth to keep in here or explore possibility with fast vst
}
if (input$countstable_unit == "log10_counts") {
return(log10(1 + counts(values$dds_obj, normalized = TRUE)))
}
})
output$showcountmat <- DT::renderDataTable({
datatable(current_countmat())
})
output$downloadData <- downloadHandler(
filename = function() {
paste0(input$countstable_unit, "table.csv")
},
content = function(file) {
write.csv(current_countmat(), file)
}
)
output$corrplot <- renderPlot({
if (input$compute_pairwisecorr) {
withProgress(
pair_corr(current_countmat(),
method = input$corr_method,
log = input$corr_uselogs,
use_subset = input$corr_usesubset
),
message = "Preparing the plot",
detail = "this can take a while..."
)
}
})
output$heatcorr <- renderPlot({
if (input$compute_pairwisecorr) {
pheatmap(cor(current_countmat()))
}
})
output$pairwise_plotUI <- renderUI({
if (!input$compute_pairwisecorr) {
return()
}
plotOutput("corrplot", height = "1000px")
# )
})
output$heatcorr_plotUI <- renderUI({
if (!input$compute_pairwisecorr) {
return()
}
plotOutput("heatcorr")
})
# overview on number of detected genes on different threshold types
output$detected_genes <- renderPrint({
t1 <- rowSums(counts(values$dds_obj))
t2 <- rowMeans(counts(values$dds_obj, normalized = TRUE))
thresh_rowsums <- input$threshold_rowsums
thresh_rowmeans <- input$threshold_rowmeans
abs_t1 <- sum(t1 > thresh_rowsums)
rel_t1 <- 100 * mean(t1 > thresh_rowsums)
abs_t2 <- sum(t2 > thresh_rowmeans)
rel_t2 <- 100 * mean(t2 > thresh_rowmeans)
cat("Number of detected genes:\n")
cat(abs_t1, "genes have at least a sample with more than", thresh_rowsums, "counts\n")
cat(
paste0(round(rel_t1, 3), "%"), "of the", nrow(values$dds_obj),
"genes have at least a sample with more than", thresh_rowsums, "counts\n"
)
cat(abs_t2, "genes have more than", thresh_rowmeans, "counts (normalized) on average\n")
cat(
paste0(round(rel_t2, 3), "%"), "of the", nrow(values$dds_obj),
"genes have more than", thresh_rowsums, "counts (normalized) on average\n"
)
cat("Counts are ranging from", min(counts(values$dds_obj)), "to", max(counts(values$dds_obj)))
})
observeEvent(input$featfilt_dds, {
t1 <- rowSums(counts(values$dds_obj))
t2 <- rowMeans(counts(values$dds_obj, normalized = TRUE))
thresh_rowsums <- input$threshold_rowsums
thresh_rowmeans <- input$threshold_rowmeans
if (input$filter_crit == "row sums") {
filt_dds <- values$dds_obj[t1 > thresh_rowsums, ]
} else {
filt_dds <- values$dds_obj[t2 > thresh_rowmeans, ]
}
# TODO: see if re-estimation of size factors is required
filt_dds <- estimateSizeFactors(filt_dds)
curr_species <- input$speciesSelect
values$dds_obj <- filt_dds
updateSelectInput(session, inputId = "speciesSelect", selected = curr_species)
})
# server managing gene lists --------------------------------------------------------
## gene lists upload
observeEvent(input$gl1, {
mydf <- as.data.frame(gl1(), stringsAsFactors = FALSE)
names(mydf) <- "Gene Symbol"
values$genelist1 <- mydf
})
gl1 <- reactive({
if (is.null(input$gl1)) {
# User has not uploaded a file yet
return(data.frame())
} else {
gl1 <- readLines(input$gl1$datapath)
return(gl1)
}
})
observeEvent(input$gl2, {
mydf <- as.data.frame(gl2(), stringsAsFactors = FALSE)
names(mydf) <- "Gene Symbol"
values$genelist2 <- mydf
})
gl2 <- reactive({
if (is.null(input$gl2)) {
# User has not uploaded a file yet
return(data.frame())
} else {
gl2 <- readLines(input$gl2$datapath)
return(gl2)
}
})
observeEvent(input$gl_ma, {
mydf <- as.data.frame(gl_ma(), stringsAsFactors = FALSE)
names(mydf) <- "Gene Symbol"
values$genelist_ma <- mydf
})
gl_ma <- reactive({
if (is.null(input$gl_ma)) {
# User has not uploaded a file yet
return(data.frame())
} else {
gl_ma <- readLines(input$gl_ma$datapath)
return(gl_ma)
}
})
output$debuggls <- renderPrint({
values$genelist1
# values$genelist2
})
# DE genes lists ----------------------------------------------------------
values$genelistUP <- reactive({
res_tbl <- deseqresult2DEgenes(values$res_obj, FDR = input$FDR)
res_tbl_UP <- res_tbl[res_tbl$log2FoldChange > 0 & !is.na(res_tbl$padj), ]
# res_tbl_DOWN <- res_tbl[res_tbl$log2FoldChange < 0 & !is.na(res_tbl$padj),]
if ("symbol" %in% colnames(values$res_obj)) {
if (!is.null(values$annotation_obj)) {
res_tbl_UP$symbol <- values$annotation_obj$gene_name[
match(
res_tbl_UP$id,
rownames(values$annotation_obj)
)
]
listUP <- res_tbl_UP$symbol
} else {
listUP <- NULL
}
} else {
listUP <- res_tbl_UP$symbol
}
return(listUP)
})
values$genelistDOWN <- reactive({
res_tbl <- deseqresult2DEgenes(values$res_obj, FDR = input$FDR)
# res_tbl_UP <- res_tbl[res_tbl$log2FoldChange > 0 & !is.na(res_tbl$padj),]
res_tbl_DOWN <- res_tbl[res_tbl$log2FoldChange < 0 & !is.na(res_tbl$padj), ]
if ("symbol" %in% colnames(values$res_obj)) {
if (!is.null(values$annotation_obj)) {
res_tbl_DOWN$symbol <- values$annotation_obj$gene_name[
match(
res_tbl_DOWN$id,
rownames(values$annotation_obj)
)
]
listDOWN <- res_tbl_DOWN$symbol
} else {
listDOWN <- NULL
}
} else {
listDOWN <- res_tbl_DOWN$symbol
}
return(listDOWN)
})
values$genelistUPDOWN <- reactive({
res_tbl <- deseqresult2DEgenes(values$res_obj, FDR = input$FDR)
if ("symbol" %in% colnames(values$res_obj)) {
if (!is.null(values$annotation_obj)) {
res_tbl$symbol <- values$annotation_obj$gene_name[
match(
res_tbl$id,
rownames(values$annotation_obj)
)
]
listUPDOWN <- res_tbl$symbol
} else {
listUPDOWN <- NULL
}
} else {
listUPDOWN <- res_tbl$symbol
}
return(listUPDOWN)
})
## list of gene lists
gll <- reactive({
mylist <- list(
listUP = values$genelistUP(),
listDOWN = values$genelistDOWN(),
listUPDOWN = values$genelistUPDOWN(),
list1 = as.character(values$genelist1$`Gene Symbol`),
list2 = as.character(values$genelist2$`Gene Symbol`),
list3 = NULL
) # will be changed to be the ones selected by the user
gll_nonempty <- mylist[!sapply(mylist, is.null)]
# plus, add toggles to selectively keep only some lists?
lists_tokeep <- names(mylist)[which(c(
input$toggle_up,
input$toggle_down,
input$toggle_updown,
input$toggle_list1,
input$toggle_list2,
input$toggle_list3
))]
gll_final <- gll_nonempty[match(lists_tokeep, names(gll_nonempty))]
})
output$debuglists <- renderPrint({
# length(gll_nonempty)
# length(gll())
# lapply(gll(),length)
print(gll())
})
output$vennlists <- renderPlot({
shiny::validate(
need(all(sapply(gll(), function(arg) !is.null(arg))),
message = "Some lists are empty - make sure you extracted the results using the annotation object"
)
)
gplots::venn(gll())
})
output$upsetLists <- renderPlot({
shiny::validate(
need(sum(sapply(gll(), function(arg) length(arg) > 0)) > 1,
message = "Make sure you provide at least two sets"
)
)
UpSetR::upset(fromList(gll()))
})
observeEvent(input$button_getanno, {
withProgress(
message = "Retrieving the annotation...",
detail = "Locating package",
value = 0,
{
annopkg <- annoSpecies_df$pkg[annoSpecies_df$species == input$speciesSelect]
incProgress(0.1, detail = "Matching identifiers")
tryCatch(
{
annotation_obj <- get_annotation_orgdb(values$dds_obj, orgdb_species = annopkg, idtype = input$idtype)
values$annotation_obj <- annotation_obj
# and also, set the species in the reactiveValues
values$cur_species <- input$speciesSelect
values$cur_type <- input$idtype
},
error = function(e) {
showNotification(
paste(
"Warning! The annotation object was not generated,",
"because of an error in the underlying `mapIds` function:",
"-----", e
),
type = "warning"
)
}
)
}
)
})
output$printDIYanno <- renderPrint({
print(head(values$annotation_obj))
})
output$printUPgenes <- renderPrint({
print(head(values$genelistUP()))
print(str(values$genelistUP()))
organism <- annoSpecies_df[values$cur_species, ]$species_short
backgroundgenes <- rownames(values$dds_obj)[rowSums(counts(values$dds_obj)) > 0]
inputType <- "SYMBOL" # will be replaced by input$...
# annopkg <- paste0("org.",organism,".eg.db")
annopkg <- annoSpecies_df[values$cur_species, ]$pkg
listGenesEntrez <- as.character(AnnotationDbi::mapIds(eval(parse(text = annopkg)),
keys = values$genelistUP(),
column = "ENTREZID", keytype = inputType
))
listBackgroundEntrez <- as.character(AnnotationDbi::mapIds(eval(parse(text = annopkg)),
keys = backgroundgenes,
column = "ENTREZID", keytype = input$idtype
))
# print(values$genelistUP())
print(str(listGenesEntrez))
print(class(listGenesEntrez))
print(str(listBackgroundEntrez))
print(class(listBackgroundEntrez))
print(head(listGenesEntrez))
print(head(listBackgroundEntrez))
# values$gse_up <- limma::topGO(limma::goana(listGenesEntrez, listBackgroundEntrez, species = organism),
# ontology="BP", # could be ideally replaced by input$
# number=200)
})
### UP
observeEvent(input$button_enrUP, {
withProgress(message = "Performing Gene Set Enrichment on upregulated genes...", value = 0, {
if (is.null(input$speciesSelect)) {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else if (is.null(values$genelistUP())) {
showNotification("You are using ids different than symbols, please convert them by creating/using an annotation object", type = "warning")
return(NULL)
} else if (is.null(values$cur_species) | values$cur_species == "") {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else {
organism <- annoSpecies_df[values$cur_species, ]$species_short
backgroundgenes <- rownames(values$dds_obj)[rowSums(counts(values$dds_obj)) > 0]
inputType <- "SYMBOL" # will be replaced by input$...
# annopkg <- paste0("org.",organism,".eg.db")
annopkg <- annoSpecies_df[values$cur_species, ]$pkg
if (!require(annopkg, character.only = TRUE)) {
stop("The package", annopkg, "is not installed/available. Try installing it with BiocManager::install() ?")
}
listGenesEntrez <- as.character(AnnotationDbi::mapIds(eval(parse(text = annopkg)),
keys = values$genelistUP(),
column = "ENTREZID", keytype = inputType
))
listBackgroundEntrez <- as.character(AnnotationDbi::mapIds(eval(parse(text = annopkg)),
keys = backgroundgenes,
column = "ENTREZID", keytype = input$idtype
))
incProgress(0.1, detail = "IDs mapped")
values$gse_up <- limma::topGO(limma::goana(listGenesEntrez, listBackgroundEntrez, species = organism),
ontology = input$go_cats[1],
number = 200
)
incProgress(0.7, detail = "adding gene names to GO terms") # good indicator for showing it has progressed
go_ids <- rownames(values$gse_up)
allegs_list <- lapply(go_ids, function(arg) AnnotationDbi::get(arg, get(paste0("org.", organism, ".egGO2ALLEGS"))))
genes_list <- lapply(allegs_list, function(arg) unlist(AnnotationDbi::mget(arg, get(paste0("org.", organism, ".egSYMBOL")))))
degenes <- values$genelistUP()
DEgenes_list <- lapply(genes_list, function(arg) intersect(arg, degenes))
values$gse_up$genes <- unlist(lapply(DEgenes_list, function(arg) paste(arg, collapse = ",")))
}
})
})
observeEvent(input$button_enrUP_goseq, {
withProgress(message = "GOSEQ - Performing Gene Set Enrichment on upregulated genes...", value = 0, {
if (is.null(input$speciesSelect)) {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else if (is.null(values$genelistUP())) {
showNotification("You are using ids different than symbols, please convert them by creating/using an annotation object", type = "warning")
return(NULL)
} else if (is.null(values$cur_species) | values$cur_species == "") {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else {
de.genes <- values$genelistUP() # assumed to be in symbols
assayed.genes.ids <- rownames(values$dds_obj) # as IDs, but then to be converted back
assayed.genes <- mapIds(get(annoSpecies_df[values$cur_species, ]$pkg),
keys = assayed.genes.ids,
column = "SYMBOL",
keytype = input$idtype,
multiVals = "first"
)
de.genes.ids <- mapIds(get(annoSpecies_df[values$cur_species, ]$pkg),
keys = de.genes,
column = "ENSEMBL",
keytype = "SYMBOL",
multiVals = "first"
)
incProgress(0.1, detail = "IDs mapped")
values$gse_up_goseq <- goseqTable(de.genes.ids,
assayed.genes.ids,
genome = annoSpecies_df[values$cur_species, ]$goseq_short,
id = "ensGene",
testCats = paste0("GO:", input$go_cats),
FDR_GO_cutoff = 1,
nTop = 200,
addGeneToTerms = TRUE,
orgDbPkg = annoSpecies_df[values$cur_species, ]$pkg # ,
)
incProgress(0.89)
}
})
})
observeEvent(input$button_enrUP_topgo, {
withProgress(message = "TOPGO - Performing Gene Set Enrichment on upregulated genes...", value = 0, {
if (is.null(input$speciesSelect)) {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else if (is.null(values$genelistUP())) {
showNotification("You are using ids different than symbols, please convert them by creating/using an annotation object", type = "warning")
return(NULL)
} else if (is.null(values$cur_species) | values$cur_species == "") {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else {
de_symbols <- values$genelistUP() # assumed to be in symbols
bg_ids <- rownames(values$dds_obj)[rowSums(counts(values$dds_obj)) > 0]
bg_symbols <- mapIds(get(annoSpecies_df[values$cur_species, ]$pkg),
keys = bg_ids,
column = "SYMBOL",
keytype = input$idtype,
multiVals = "first"
)
incProgress(0.1, detail = "IDs mapped")
# library(topGO)
# requireNamespace("topGO")
values$topgo_up <- pcaExplorer::topGOtable(de_symbols, bg_symbols,
ontology = input$go_cats[1],
mapping = annoSpecies_df[values$cur_species, ]$pkg,
geneID = "symbol", addGeneToTerms = TRUE
)
incProgress(0.89)
}
})
})
### DOWN
observeEvent(input$button_enrDOWN, {
withProgress(message = "Performing Gene Set Enrichment on downregulated genes...", value = 0, {
if (is.null(input$speciesSelect)) {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else if (is.null(values$genelistDOWN())) {
showNotification("You are using ids different than symbols, please convert them by creating/using an annotation object", type = "warning")
return(NULL)
} else if (is.null(values$cur_species) | values$cur_species == "") {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else {
organism <- annoSpecies_df[values$cur_species, ]$species_short
backgroundgenes <- rownames(values$dds_obj)[rowSums(counts(values$dds_obj)) > 0]
inputType <- "SYMBOL" # will be replaced by input$...
annopkg <- annoSpecies_df[values$cur_species, ]$pkg
if (!require(annopkg, character.only = TRUE)) {
stop("The package", annopkg, "is not installed/available. Try installing it with BiocManager::install() ?")
}
listGenesEntrez <- as.character(AnnotationDbi::mapIds(eval(parse(text = annopkg)),
keys = values$genelistDOWN(),
column = "ENTREZID", keytype = inputType
))
listBackgroundEntrez <- as.character(AnnotationDbi::mapIds(eval(parse(text = annopkg)),
keys = backgroundgenes,
column = "ENTREZID", keytype = input$idtype
))
incProgress(0.1, detail = "IDs mapped")
values$gse_down <- limma::topGO(limma::goana(listGenesEntrez, listBackgroundEntrez, species = organism),
ontology = input$go_cats[1],
number = 200
)
incProgress(0.7, detail = "adding gene names to GO terms") # good indicator for showing it has progressed
go_ids <- rownames(values$gse_down)
allegs_list <- lapply(go_ids, function(arg) AnnotationDbi::get(arg, get(paste0("org.", organism, ".egGO2ALLEGS"))))
genes_list <- lapply(allegs_list, function(arg) unlist(AnnotationDbi::mget(arg, get(paste0("org.", organism, ".egSYMBOL")))))
degenes <- values$genelistDOWN()
DEgenes_list <- lapply(genes_list, function(arg) intersect(arg, degenes))
values$gse_down$genes <- unlist(lapply(DEgenes_list, function(arg) paste(arg, collapse = ",")))
}
})
})
observeEvent(input$button_enrDOWN_goseq, {
withProgress(message = "GOSEQ - Performing Gene Set Enrichment on downregulated genes...", value = 0, {
if (is.null(input$speciesSelect)) {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else if (is.null(values$genelistDOWN())) {
showNotification("You are using ids different than symbols, please convert them by creating/using an annotation object", type = "warning")
return(NULL)
} else if (is.null(values$cur_species) | values$cur_species == "") {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else {
de.genes <- values$genelistDOWN() # assumed to be in symbols
assayed.genes.ids <- rownames(values$dds_obj) # as IDs, but then to be converted back
assayed.genes <- mapIds(get(annoSpecies_df[values$cur_species, ]$pkg),
keys = assayed.genes.ids,
column = "SYMBOL",
keytype = input$idtype,
multiVals = "first"
)
de.genes.ids <- mapIds(get(annoSpecies_df[values$cur_species, ]$pkg),
keys = de.genes,
column = "ENSEMBL",
keytype = "SYMBOL",
multiVals = "first"
)
incProgress(0.1, detail = "IDs mapped")
values$gse_down_goseq <- goseqTable(de.genes.ids,
assayed.genes.ids,
genome = annoSpecies_df[values$cur_species, ]$goseq_short,
id = "ensGene",
testCats = paste0("GO:", input$go_cats),
FDR_GO_cutoff = 1,
nTop = 200,
addGeneToTerms = TRUE,
orgDbPkg = annoSpecies_df[values$cur_species, ]$pkg # ,
)
incProgress(0.89)
}
})
})
observeEvent(input$button_enrDOWN_topgo, {
withProgress(message = "TOPGO - Performing Gene Set Enrichment on downregulated genes...", value = 0, {
if (is.null(input$speciesSelect)) {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else if (is.null(values$genelistDOWN())) {
showNotification("You are using ids different than symbols, please convert them by creating/using an annotation object", type = "warning")
return(NULL)
} else if (is.null(values$cur_species) | values$cur_species == "") {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else {
de_symbols <- values$genelistDOWN() # assumed to be in symbols
bg_ids <- rownames(values$dds_obj)[rowSums(counts(values$dds_obj)) > 0]
bg_symbols <- mapIds(get(annoSpecies_df[values$cur_species, ]$pkg),
keys = bg_ids,
column = "SYMBOL",
keytype = input$idtype,
multiVals = "first"
)
incProgress(0.1, detail = "IDs mapped")
# library(topGO)
# requireNamespace("topGO")
values$topgo_down <- pcaExplorer::topGOtable(de_symbols, bg_symbols,
ontology = input$go_cats[1], # will take the first ontology
mapping = annoSpecies_df[values$cur_species, ]$pkg,
geneID = "symbol", addGeneToTerms = TRUE
)
incProgress(0.89)
}
})
})
### UPDOWN
observeEvent(input$button_enrUPDOWN, {
withProgress(message = "Performing Gene Set Enrichment on up- and downregulated genes...", value = 0, {
if (is.null(input$speciesSelect)) {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else if (is.null(values$genelistUPDOWN())) {
showNotification("You are using ids different than symbols, please convert them by creating/using an annotation object", type = "warning")
return(NULL)
} else if (is.null(values$cur_species) | values$cur_species == "") {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else {
organism <- annoSpecies_df[values$cur_species, ]$species_short
backgroundgenes <- rownames(values$dds_obj)[rowSums(counts(values$dds_obj)) > 0]
inputType <- "SYMBOL" # will be replaced by input$...
# annopkg <- paste0("org.",organism,".eg.db")
annopkg <- annoSpecies_df[values$cur_species, ]$pkg
if (!require(annopkg, character.only = TRUE)) {
stop("The package", annopkg, "is not installed/available. Try installing it with BiocManager::install() ?")
}
listGenesEntrez <- as.character(AnnotationDbi::mapIds(eval(parse(text = annopkg)),
keys = values$genelistUPDOWN(),
column = "ENTREZID", keytype = inputType
))
listBackgroundEntrez <- as.character(AnnotationDbi::mapIds(eval(parse(text = annopkg)),
keys = backgroundgenes,
column = "ENTREZID", keytype = input$idtype
))
incProgress(0.1, detail = "IDs mapped")
values$gse_updown <- limma::topGO(limma::goana(listGenesEntrez, listBackgroundEntrez, species = organism),
ontology = input$go_cats[1],
number = 200
)
incProgress(0.7, detail = "adding gene names to GO terms") # good indicator for showing it has progressed
go_ids <- rownames(values$gse_updown)
allegs_list <- lapply(go_ids, function(arg) AnnotationDbi::get(arg, get(paste0("org.", organism, ".egGO2ALLEGS"))))
genes_list <- lapply(allegs_list, function(arg) unlist(AnnotationDbi::mget(arg, get(paste0("org.", organism, ".egSYMBOL")))))
degenes <- values$genelistDOWN()
DEgenes_list <- lapply(genes_list, function(arg) intersect(arg, degenes))
# values$gse_down$genes[1:20] <- DEgenes_list
# lapply(values$gse_down,class)
values$gse_updown$genes <- unlist(lapply(DEgenes_list, function(arg) paste(arg, collapse = ",")))
}
})
})
observeEvent(input$button_enrUPDOWN_goseq, {
withProgress(message = "GOSEQ - Performing Gene Set Enrichment on up and downregulated genes...", value = 0, {
if (is.null(input$speciesSelect)) {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else if (is.null(values$genelistUPDOWN())) {
showNotification("You are using ids different than symbols, please convert them by creating/using an annotation object", type = "warning")
return(NULL)
} else if (is.null(values$cur_species) | values$cur_species == "") {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else {
de.genes <- values$genelistUPDOWN() # assumed to be in symbols
assayed.genes.ids <- rownames(values$dds_obj) # as IDs, but then to be converted back
assayed.genes <- mapIds(get(annoSpecies_df[values$cur_species, ]$pkg),
keys = assayed.genes.ids,
column = "SYMBOL",
keytype = input$idtype,
multiVals = "first"
)
de.genes.ids <- mapIds(get(annoSpecies_df[values$cur_species, ]$pkg),
keys = de.genes,
column = "ENSEMBL",
keytype = "SYMBOL",
multiVals = "first"
)
incProgress(0.1, detail = "IDs mapped")
values$gse_updown_goseq <- goseqTable(de.genes.ids,
assayed.genes.ids,
genome = annoSpecies_df[values$cur_species, ]$goseq_short,
id = "ensGene",
testCats = paste0("GO:", input$go_cats),
FDR_GO_cutoff = 1,
nTop = 200,
addGeneToTerms = TRUE,
orgDbPkg = annoSpecies_df[values$cur_species, ]$pkg # ,
)
incProgress(0.89)
}
})
})
observeEvent(input$button_enrUPDOWN_topgo, {
withProgress(message = "TOPGO - Performing Gene Set Enrichment on up and downregulated genes...", value = 0, {
if (is.null(input$speciesSelect)) {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else if (is.null(values$genelistUPDOWN())) {
showNotification("You are using ids different than symbols, please convert them by creating/using an annotation object", type = "warning")
return(NULL)
} else if (is.null(values$cur_species) | values$cur_species == "") {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else {
de_symbols <- values$genelistUPDOWN() # assumed to be in symbols
bg_ids <- rownames(values$dds_obj)[rowSums(counts(values$dds_obj)) > 0]
bg_symbols <- mapIds(get(annoSpecies_df[values$cur_species, ]$pkg),
keys = bg_ids,
column = "SYMBOL",
keytype = input$idtype,
multiVals = "first"
)
incProgress(0.1, detail = "IDs mapped")
# library(topGO)
# requireNamespace("topGO")
values$topgo_updown <- pcaExplorer::topGOtable(de_symbols, bg_symbols,
ontology = input$go_cats[1],
mapping = annoSpecies_df[values$cur_species, ]$pkg,
geneID = "symbol", addGeneToTerms = TRUE
)
incProgress(0.89)
}
})
})
### LIST1
observeEvent(input$button_enrLIST1, {
withProgress(message = "Performing Gene Set Enrichment on upregulated genes...", value = 0, {
if (is.null(input$speciesSelect)) {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else if (is.null(values$cur_species) | values$cur_species == "") {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else {
organism <- annoSpecies_df[values$cur_species, ]$species_short
backgroundgenes <- rownames(values$dds_obj)[rowSums(counts(values$dds_obj)) > 0]
inputType <- "SYMBOL" # will be replaced by input$...
# annopkg <- paste0("org.",organism,".eg.db")
annopkg <- annoSpecies_df[values$cur_species, ]$pkg
if (!require(annopkg, character.only = TRUE)) {
stop("The package", annopkg, "is not installed/available. Try installing it with BiocManager::install() ?")
}
listGenesEntrez <- AnnotationDbi::mapIds(eval(parse(text = annopkg)),
keys = as.character(values$genelist1$`Gene Symbol`),
column = "ENTREZID", keytype = inputType
)
listBackgroundEntrez <- AnnotationDbi::mapIds(eval(parse(text = annopkg)),
keys = backgroundgenes,
column = "ENTREZID", keytype = input$idtype
)
incProgress(0.1, detail = "IDs mapped")
values$gse_list1 <- limma::topGO(limma::goana(listGenesEntrez, listBackgroundEntrez, species = organism),
ontology = input$go_cats[1],
number = 200
)
incProgress(0.7, detail = "adding gene names to GO terms") # good indicator for showing it has progressed
go_ids <- rownames(values$gse_list1)
allegs_list <- lapply(go_ids, function(arg) AnnotationDbi::get(arg, get(paste0("org.", organism, ".egGO2ALLEGS"))))
genes_list <- lapply(allegs_list, function(arg) unlist(AnnotationDbi::mget(arg, get(paste0("org.", organism, ".egSYMBOL")))))
degenes <- values$genelistDOWN()
DEgenes_list <- lapply(genes_list, function(arg) intersect(arg, degenes))
values$gse_list1$genes <- unlist(lapply(DEgenes_list, function(arg) paste(arg, collapse = ",")))
}
})
})
observeEvent(input$button_enrLIST1_goseq, {
withProgress(message = "GOSEQ - Performing Gene Set Enrichment on list 1 genes...", value = 0, {
if (is.null(input$speciesSelect)) {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else if (is.null(values$cur_species) | values$cur_species == "") {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else {
de.genes <- values$genelist1$`Gene Symbol` # assumed to be in symbols
assayed.genes.ids <- rownames(values$dds_obj) # as IDs, but then to be converted back
assayed.genes <- mapIds(get(annoSpecies_df[values$cur_species, ]$pkg),
keys = assayed.genes.ids,
column = "SYMBOL",
keytype = input$idtype,
multiVals = "first"
)
de.genes.ids <- mapIds(get(annoSpecies_df[values$cur_species, ]$pkg),
keys = de.genes,
column = "ENSEMBL",
keytype = "SYMBOL",
multiVals = "first"
)
incProgress(0.1, detail = "IDs mapped")
values$gse_list1_goseq <- goseqTable(de.genes.ids,
assayed.genes.ids,
genome = annoSpecies_df[values$cur_species, ]$goseq_short,
id = "ensGene",
testCats = paste0("GO:", input$go_cats),
FDR_GO_cutoff = 1,
nTop = 200,
addGeneToTerms = TRUE,
orgDbPkg = annoSpecies_df[values$cur_species, ]$pkg # ,
)
incProgress(0.89)
}
})
})
observeEvent(input$button_enrLIST1_topgo, {
withProgress(message = "TOPGO - Performing Gene Set Enrichment on list1 genes...", value = 0, {
if (is.null(input$speciesSelect)) {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else if (is.null(values$cur_species) | values$cur_species == "") {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else {
de_symbols <- values$genelist1$`Gene Symbol` # assumed to be in symbols
bg_ids <- rownames(values$dds_obj)[rowSums(counts(values$dds_obj)) > 0]
bg_symbols <- mapIds(get(annoSpecies_df[values$cur_species, ]$pkg),
keys = bg_ids,
column = "SYMBOL",
keytype = input$idtype,
multiVals = "first"
)
incProgress(0.1, detail = "IDs mapped")
# library(topGO)
# requireNamespace("topGO")
values$topgo_list1 <- pcaExplorer::topGOtable(de_symbols, bg_symbols,
ontology = input$go_cats[1],
mapping = annoSpecies_df[values$cur_species, ]$pkg,
geneID = "symbol", addGeneToTerms = TRUE
)
incProgress(0.89)
}
})
})
### LIST2
observeEvent(input$button_enrLIST2, {
withProgress(message = "Performing Gene Set Enrichment on upregulated genes...", value = 0, {
if (is.null(input$speciesSelect)) {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else if (is.null(values$cur_species) | values$cur_species == "") {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else {
organism <- annoSpecies_df[values$cur_species, ]$species_short
backgroundgenes <- rownames(values$dds_obj)[rowSums(counts(values$dds_obj)) > 0]
inputType <- "SYMBOL" # will be replaced by input$...
# annopkg <- paste0("org.",organism,".eg.db")
annopkg <- annoSpecies_df[values$cur_species, ]$pkg
if (!require(annopkg, character.only = TRUE)) {
stop("The package", annopkg, "is not installed/available. Try installing it with BiocManager::install() ?")
}
listGenesEntrez <- AnnotationDbi::mapIds(eval(parse(text = annopkg)),
keys = as.character(values$genelist2$`Gene Symbol`),
column = "ENTREZID", keytype = inputType
)
listBackgroundEntrez <- AnnotationDbi::mapIds(eval(parse(text = annopkg)),
keys = backgroundgenes,
column = "ENTREZID", keytype = input$idtype
)
incProgress(0.1, detail = "IDs mapped")
values$gse_list2 <- limma::topGO(limma::goana(listGenesEntrez, listBackgroundEntrez, species = organism),
ontology = input$go_cats[1],
number = 200
)
incProgress(0.7, detail = "adding gene names to GO terms") # good indicator for showing it has progressed
go_ids <- rownames(values$gse_list2)
allegs_list <- lapply(go_ids, function(arg) AnnotationDbi::get(arg, get(paste0("org.", organism, ".egGO2ALLEGS"))))
genes_list <- lapply(allegs_list, function(arg) unlist(AnnotationDbi::mget(arg, get(paste0("org.", organism, ".egSYMBOL")))))
degenes <- values$genelistDOWN()
DEgenes_list <- lapply(genes_list, function(arg) intersect(arg, degenes))
values$gse_list2$genes <- unlist(lapply(DEgenes_list, function(arg) paste(arg, collapse = ",")))
}
})
})
observeEvent(input$button_enrLIST2_goseq, {
withProgress(message = "GOSEQ - Performing Gene Set Enrichment on list 2 genes...", value = 0, {
if (is.null(input$speciesSelect)) {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else if (is.null(values$cur_species) | values$cur_species == "") {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else {
de.genes <- values$genelist2$`Gene Symbol` # assumed to be in symbols
assayed.genes.ids <- rownames(values$dds_obj) # as IDs, but then to be converted back
assayed.genes <- mapIds(get(annoSpecies_df[values$cur_species, ]$pkg),
keys = assayed.genes.ids,
column = "SYMBOL",
keytype = input$idtype,
multiVals = "first"
)
de.genes.ids <- mapIds(get(annoSpecies_df[values$cur_species, ]$pkg),
keys = de.genes,
column = "ENSEMBL",
keytype = "SYMBOL",
multiVals = "first"
)
incProgress(0.1, detail = "IDs mapped")
values$gse_list2_goseq <- goseqTable(de.genes.ids,
assayed.genes.ids,
genome = annoSpecies_df[values$cur_species, ]$goseq_short,
id = "ensGene",
testCats = paste0("GO:", input$go_cats),
FDR_GO_cutoff = 1,
nTop = 200,
addGeneToTerms = TRUE,
orgDbPkg = annoSpecies_df[values$cur_species, ]$pkg # ,
)
incProgress(0.89)
}
})
})
observeEvent(input$button_enrLIST2_topgo, {
withProgress(message = "TOPGO - Performing Gene Set Enrichment on list2 genes...", value = 0, {
if (is.null(input$speciesSelect)) {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else if (is.null(values$cur_species) | values$cur_species == "") {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else {
de_symbols <- values$genelist2$`Gene Symbol` # assumed to be in symbols
bg_ids <- rownames(values$dds_obj)[rowSums(counts(values$dds_obj)) > 0]
bg_symbols <- mapIds(get(annoSpecies_df[values$cur_species, ]$pkg),
keys = bg_ids,
column = "SYMBOL",
keytype = input$idtype,
multiVals = "first"
)
incProgress(0.1, detail = "IDs mapped")
# library(topGO)
# requireNamespace("topGO")
values$topgo_list2 <- pcaExplorer::topGOtable(de_symbols, bg_symbols,
ontology = input$go_cats[1],
mapping = annoSpecies_df[values$cur_species, ]$pkg,
geneID = "symbol", addGeneToTerms = TRUE
)
incProgress(0.89)
}
})
})
# server gse datatables --------------------------------------------------------
## ui outputs here
output$ui_DT_gse_up <- renderUI({
if (is.null(values$gse_up)) {
return(NULL)
}
return(
tagList(
h4("goana table - up"),
DT::dataTableOutput("DT_gse_up")
)
)
})
output$ui_DT_gse_down <- renderUI({
if (is.null(values$gse_down)) {
return(NULL)
}
return(
tagList(
h4("goana table - down"),
DT::dataTableOutput("DT_gse_down")
)
)
})
output$ui_DT_gse_updown <- renderUI({
if (is.null(values$gse_updown)) {
return(NULL)
}
return(
tagList(
h4("goana table - up&down"),
DT::dataTableOutput("DT_gse_updown")
)
)
})
output$ui_DT_gse_list1 <- renderUI({
if (is.null(values$gse_list1)) {
return(NULL)
}
return(
tagList(
h4("goana table - list1"),
DT::dataTableOutput("DT_gse_list1")
)
)
})
output$ui_DT_gse_list2 <- renderUI({
if (is.null(values$gse_up)) {
return(NULL)
}
return(
tagList(
h4("goana table - list2"),
DT::dataTableOutput("DT_gse_list2")
)
)
})
output$ui_DT_gse_up_topgo <- renderUI({
if (is.null(values$topgo_up)) {
return(NULL)
}
return(
tagList(
h4("topGO table - up"),
DT::dataTableOutput("DT_gse_up_topgo")
)
)
})
output$ui_DT_gse_down_topgo <- renderUI({
if (is.null(values$topgo_down)) {
return(NULL)
}
return(
tagList(
h4("topGO table - down"),
DT::dataTableOutput("DT_gse_down_topgo")
)
)
})
output$ui_DT_gse_updown_topgo <- renderUI({
if (is.null(values$topgo_updown)) {
return(NULL)
}
return(
tagList(
h4("topGO table - up&down"),
DT::dataTableOutput("DT_gse_updown_topgo")
)
)
})
output$ui_DT_gse_list1_topgo <- renderUI({
if (is.null(values$topgo_list1)) {
return(NULL)
}
return(
tagList(
h4("topGO table - list1"),
DT::dataTableOutput("DT_gse_list1_topgo")
)
)
})
output$ui_DT_gse_list2_topgo <- renderUI({
if (is.null(values$topgo_list2)) {
return(NULL)
}
return(
tagList(
h4("topGO table - list2"),
DT::dataTableOutput("DT_gse_list2_topgo")
)
)
})
output$ui_DT_gse_up_goseq <- renderUI({
if (is.null(values$gse_up_goseq)) {
return(NULL)
}
return(
tagList(
h4("goseq table - up"),
DT::dataTableOutput("DT_gse_up_goseq")
)
)
})
output$ui_DT_gse_down_goseq <- renderUI({
if (is.null(values$gse_down_goseq)) {
return(NULL)
}
return(
tagList(
h4("goseq table - down"),
DT::dataTableOutput("DT_gse_down_goseq")
)
)
})
output$ui_DT_gse_updown_goseq <- renderUI({
if (is.null(values$gse_updown_goseq)) {
return(NULL)
}
return(
tagList(
h4("goseq table - up&down"),
DT::dataTableOutput("DT_gse_updown_goseq")
)
)
})
output$ui_DT_gse_list1_goseq <- renderUI({
if (is.null(values$gse_list1_goseq)) {
return(NULL)
}
return(
tagList(
h4("goseq table - list1"),
DT::dataTableOutput("DT_gse_list1_goseq")
)
)
})
output$ui_DT_gse_list2_goseq <- renderUI({
if (is.null(values$gse_up_goseq)) {
return(NULL)
}
return(
tagList(
h4("goseq table - list2"),
DT::dataTableOutput("DT_gse_list2_goseq")
)
)
})
## actual DTs here
output$DT_gse_up <- DT::renderDataTable({
# if not null...
if (is.null(values$gse_up)) {
return(NULL)
}
mytbl <- values$gse_up
rownames(mytbl) <- createLinkGO(rownames(mytbl))
datatable(mytbl, escape = FALSE)
})
output$DT_gse_down <- DT::renderDataTable({
# if not null...
if (is.null(values$gse_down)) {
return(NULL)
}
mytbl <- values$gse_down
rownames(mytbl) <- createLinkGO(rownames(mytbl))
datatable(mytbl, escape = FALSE)
})
output$DT_gse_updown <- DT::renderDataTable({
# if not null...
if (is.null(values$gse_updown)) {
return(NULL)
}
mytbl <- values$gse_updown
rownames(mytbl) <- createLinkGO(rownames(mytbl))
datatable(mytbl, escape = FALSE)
})
output$DT_gse_list1 <- DT::renderDataTable({
# if not null...
if (is.null(values$gse_list1)) {
return(NULL)
}
mytbl <- values$gse_list1
# mytbl$GOid <- rownames(mytbl)
rownames(mytbl) <- createLinkGO(rownames(mytbl))
datatable(mytbl, escape = FALSE)
})
output$DT_gse_list2 <- DT::renderDataTable({
# if not null...
if (is.null(values$gse_list2)) {
return(NULL)
}
mytbl <- values$gse_list2
rownames(mytbl) <- createLinkGO(rownames(mytbl))
datatable(mytbl, escape = FALSE)
})
output$DT_gse_up_topgo <- DT::renderDataTable({
# if not null...
if (is.null(values$topgo_up)) {
return(NULL)
}
mytbl <- values$topgo_up
mytbl$GO.ID <- createLinkGO(mytbl$GO.ID)
DT::datatable(mytbl, escape = FALSE, selection = list(mode = "single"))
})
output$DT_gse_down_topgo <- DT::renderDataTable({
# if not null...
if (is.null(values$topgo_down)) {
return(NULL)
}
mytbl <- values$topgo_down
mytbl$GO.ID <- createLinkGO(mytbl$GO.ID)
DT::datatable(mytbl, escape = FALSE, selection = list(mode = "single"))
})
output$DT_gse_updown_topgo <- DT::renderDataTable({
# if not null...
if (is.null(values$topgo_updown)) {
return(NULL)
}
mytbl <- values$topgo_updown
mytbl$GO.ID <- createLinkGO(mytbl$GO.ID)
DT::datatable(mytbl, escape = FALSE, selection = list(mode = "single"))
})
output$DT_gse_list1_topgo <- DT::renderDataTable({
# if not null...
if (is.null(values$topgo_list1)) {
return(NULL)
}
mytbl <- values$topgo_list1
# mytbl$GOid <- rownames(mytbl)
mytbl$GO.ID <- createLinkGO(mytbl$GO.ID)
DT::datatable(mytbl, escape = FALSE, selection = list(mode = "single"))
})
output$DT_gse_list2_topgo <- DT::renderDataTable({
# if not null...
if (is.null(values$topgo_list2)) {
return(NULL)
}
mytbl <- values$topgo_list2
# mytbl$GOid <- rownames(mytbl)
mytbl$GO.ID <- createLinkGO(mytbl$GO.ID)
DT::datatable(mytbl, escape = FALSE, selection = list(mode = "single"))
})
output$DT_gse_up_goseq <- DT::renderDataTable({
# if not null...
if (is.null(values$gse_up_goseq)) {
return(NULL)
}
mytbl <- values$gse_up_goseq
mytbl$category <- createLinkGO(mytbl$category)
datatable(mytbl, escape = FALSE, rownames = FALSE)
})
output$DT_gse_down_goseq <- DT::renderDataTable({
# if not null...
if (is.null(values$gse_down_goseq)) {
return(NULL)
}
mytbl <- values$gse_down_goseq
mytbl$category <- createLinkGO(mytbl$category)
datatable(mytbl, escape = FALSE, rownames = FALSE)
})
output$DT_gse_updown_goseq <- DT::renderDataTable({
# if not null...
if (is.null(values$gse_updown_goseq)) {
return(NULL)
}
mytbl <- values$gse_updown_goseq
mytbl$category <- createLinkGO(mytbl$category)
datatable(mytbl, escape = FALSE, rownames = FALSE)
})
output$DT_gse_list1_goseq <- DT::renderDataTable({
# if not null...
if (is.null(values$gse_list1_goseq)) {
return(NULL)
}
mytbl <- values$gse_list1_goseq
# mytbl$GOid <- rownames(mytbl)
mytbl$category <- createLinkGO(mytbl$category)
datatable(mytbl, escape = FALSE, rownames = FALSE)
})
output$DT_gse_list2_goseq <- DT::renderDataTable({
# if not null...
if (is.null(values$gse_list2_goseq)) {
return(NULL)
}
mytbl <- values$gse_list2_goseq
# mytbl$GOid <- rownames(mytbl)
mytbl$category <- createLinkGO(mytbl$category)
datatable(mytbl, escape = FALSE, rownames = FALSE)
})
# server gse heatmaps --------------------------------------------------------
output$goterm_heatmap_up_topgo <- renderPlot({
s <- input$DT_gse_up_topgo_rows_selected
if (length(s) == 0) {
return(NULL)
}
# allow only one selected line
mygenes <- values$topgo_up[input$DT_gse_up_topgo_rows_selected, ]$genes[1]
myterm <- paste0(
values$topgo_up[input$DT_gse_up_topgo_rows_selected, ]$`GO.ID`, " - ",
values$topgo_up[input$DT_gse_up_topgo_rows_selected, ]$Term
)
genevec <- unlist(strsplit(mygenes, split = ","))
annopkg <- annoSpecies_df$pkg[annoSpecies_df$species == input$speciesSelect]
genevec_ids <- mapIds(eval(parse(text = annopkg)), genevec, input$idtype, "SYMBOL", multiVals = "first")
log2things <- assay(normTransform(values$dds_obj))
selectedLogvalues <- log2things[genevec_ids, ]
# check that I do not have nas or similar...
if (length(genevec_ids) == length(genevec)) {
rowlabs <- genevec
} else {
rowlabs <- genevec_ids
# rowlabs <- ifelse(, genevec, genevec_ids)
}
pheatmap(selectedLogvalues, scale = "row", labels_row = rowlabs, main = myterm)
})
output$goterm_heatmap_down_topgo <- renderPlot({
s <- input$DT_gse_down_topgo_rows_selected
if (length(s) == 0) {
return(NULL)
}
# allow only one selected line
mygenes <- values$topgo_down[input$DT_gse_down_topgo_rows_selected, ]$genes[1]
myterm <- paste0(
values$topgo_down[input$DT_gse_down_topgo_rows_selected, ]$`GO.ID`, " - ",
values$topgo_down[input$DT_gse_down_topgo_rows_selected, ]$Term
)
genevec <- unlist(strsplit(mygenes, split = ","))
annopkg <- annoSpecies_df$pkg[annoSpecies_df$species == input$speciesSelect]
genevec_ids <- mapIds(eval(parse(text = annopkg)), genevec, input$idtype, "SYMBOL", multiVals = "first")
log2things <- assay(normTransform(values$dds_obj))
selectedLogvalues <- log2things[genevec_ids, ]
# check that I do not have nas or similar...
if (length(genevec_ids) == length(genevec)) {
rowlabs <- genevec
} else {
rowlabs <- genevec_ids
# rowlabs <- ifelse(, genevec, genevec_ids)
}
pheatmap(selectedLogvalues, scale = "row", labels_row = rowlabs, main = myterm)
})
output$goterm_heatmap_updown_topgo <- renderPlot({
s <- input$DT_gse_updown_topgo_rows_selected
if (length(s) == 0) {
return(NULL)
}
values$topgo_updown[input$DT_gse_updown_topgo_rows_selected, ]$genes
# allow only one selected line
mygenes <- values$topgo_updown[input$DT_gse_updown_topgo_rows_selected, ]$genes[1]
myterm <- paste0(
values$topgo_updown[input$DT_gse_updown_topgo_rows_selected, ]$`GO.ID`, " - ",
values$topgo_updown[input$DT_gse_updown_topgo_rows_selected, ]$Term
)
genevec <- unlist(strsplit(mygenes, split = ","))
annopkg <- annoSpecies_df$pkg[annoSpecies_df$species == input$speciesSelect]
genevec_ids <- mapIds(eval(parse(text = annopkg)), genevec, input$idtype, "SYMBOL", multiVals = "first")
log2things <- assay(normTransform(values$dds_obj))
selectedLogvalues <- log2things[genevec_ids, ]
# check that I do not have nas or similar...
if (length(genevec_ids) == length(genevec)) {
rowlabs <- genevec
} else {
rowlabs <- genevec_ids
# rowlabs <- ifelse(, genevec, genevec_ids)
}
pheatmap(selectedLogvalues, scale = "row", labels_row = rowlabs, main = myterm)
})
output$goterm_heatmap_l1_topgo <- renderPlot({
s <- input$DT_gse_list1_topgo_rows_selected
if (length(s) == 0) {
return(NULL)
}
# allow only one selected line
mygenes <- values$topgo_list1[input$DT_gse_list1_topgo_rows_selected, ]$genes[1]
myterm <- paste0(
values$topgo_list1[input$DT_gse_list1_topgo_rows_selected, ]$`GO.ID`, " - ",
values$topgo_list1[input$DT_gse_list1_topgo_rows_selected, ]$Term
)
genevec <- unlist(strsplit(mygenes, split = ","))
annopkg <- annoSpecies_df$pkg[annoSpecies_df$species == input$speciesSelect]
genevec_ids <- mapIds(eval(parse(text = annopkg)), genevec, input$idtype, "SYMBOL", multiVals = "first")
log2things <- assay(normTransform(values$dds_obj))
selectedLogvalues <- log2things[genevec_ids, ]
# check that I do not have nas or similar...
if (length(genevec_ids) == length(genevec)) {
rowlabs <- genevec
} else {
rowlabs <- genevec_ids
# rowlabs <- ifelse(, genevec, genevec_ids)
}
pheatmap(selectedLogvalues, scale = "row", labels_row = rowlabs, main = myterm)
})
output$goterm_heatmap_l2_topgo <- renderPlot({
s <- input$DT_gse_list2_topgo_rows_selected
if (length(s) == 0) {
return(NULL)
}
# allow only one selected line
mygenes <- values$topgo_list2[input$DT_gse_list2_topgo_rows_selected, ]$genes[1]
myterm <- paste0(
values$topgo_list2[input$DT_gse_list2_topgo_rows_selected, ]$`GO.ID`, " - ",
values$topgo_list2[input$DT_gse_list2_topgo_rows_selected, ]$Term
)
genevec <- unlist(strsplit(mygenes, split = ","))
annopkg <- annoSpecies_df$pkg[annoSpecies_df$species == input$speciesSelect]
genevec_ids <- mapIds(eval(parse(text = annopkg)), genevec, input$idtype, "SYMBOL", multiVals = "first")
log2things <- assay(normTransform(values$dds_obj))
selectedLogvalues <- log2things[genevec_ids, ]
# check that I do not have nas or similar...
if (length(genevec_ids) == length(genevec)) {
rowlabs <- genevec
} else {
rowlabs <- genevec_ids
# rowlabs <- ifelse(, genevec, genevec_ids)
}
pheatmap(selectedLogvalues, scale = "row", labels_row = rowlabs, main = myterm)
})
# server signature explorer ------------------------------------------------------
output$sig_ui_gmtin <- renderUI({
fileInput("sig_gmtin", "gmt input file")
})
loaded_gmt <- reactive({
if (is.null(input$sig_gmtin)) {
return(NULL)
}
mysigs <- read_gmt(input$sig_gmtin$datapath)
return(mysigs)
})
observeEvent(input$sig_gmtin, {
values$gene_signatures <- loaded_gmt()
})
output$sig_ui_nrsigs <- renderUI({
if (!is.null(values$gene_signatures)) {
return(valueBox("Gene signatures",
paste0(length(values$gene_signatures), " gene signatures"),
icon = icon("list"),
color = "green", width = NULL
))
} else {
return(valueBox("Gene signatures",
"yet to be loaded",
icon = icon("list"),
color = "red", width = NULL
))
}
})
observeEvent(input$sig_button_computevst, {
withProgress(
message = "Computing the variance stabilized transformed data...",
detail = "This step can take a little while",
value = 0,
{
values$vst_obj <- vst(values$dds_obj)
}
)
})
output$sig_ui_selectsig <- renderUI({
if (!is.null(values$gene_signatures)) {
return(selectizeInput("sig_selectsig",
label = "Select the gene signature",
choices = NULL, selected = NULL, multiple = FALSE
))
} else {
return(NULL)
}
})
observe({
updateSelectizeInput(session = session, inputId = "sig_selectsig", choices = c(Choose = "", names(values$gene_signatures)), server = TRUE)
})
output$sig_sigmembers <- renderPrint({
values$gene_signatures[[input$sig_selectsig]]
})
output$sig_ui_annocoldata <- renderUI({
if (!is.null(values$dds_obj)) {
return(selectizeInput("sig_annocoldata",
label = "Select the colData to decorate",
choices = names(colData(values$dds_obj)),
selected = NULL, multiple = TRUE
))
} else {
return(NULL)
}
})
output$sig_ui_id_data <- renderUI({
if (is.null(values$dds_obj)) { #
return(NULL)
}
validate(
need(!is.null(input$speciesSelect), message = "Please specify the species in the Data Setup panel")
)
std_choices <- c("ENSEMBL", "ENTREZID", "REFSEQ", "SYMBOL")
if (input$speciesSelect != "") {
annopkg <- annoSpecies_df$pkg[annoSpecies_df$species == input$speciesSelect]
pkg_choices <- keytypes(get(annopkg))
std_choices <- union(std_choices, pkg_choices)
}
selectInput("sig_id_data", "select the id type in your dds data", choices = std_choices)
})
output$sig_ui_id_sigs <- renderUI({
if (is.null(values$gene_signatures)) { #
return(NULL)
}
validate(
need(!is.null(input$speciesSelect), message = "Please specify the species in the Data Setup panel")
)
std_choices <- c("ENSEMBL", "ENTREZID", "REFSEQ", "SYMBOL")
if (input$speciesSelect != "") {
annopkg <- annoSpecies_df$pkg[annoSpecies_df$species == input$speciesSelect]
pkg_choices <- keytypes(get(annopkg))
std_choices <- union(std_choices, pkg_choices)
}
selectInput("sig_id_sigs", "select the id type in your signatures", choices = std_choices)
})
available_orgdb <- rownames(installed.packages())[
grep(pattern = "^org.*db$", rownames(installed.packages()))
]
output$sig_ui_orgdbpkg <- renderUI({
selectInput("sig_orgdbpkg", "Select the organism package for matching",
choices = c("", available_orgdb), selected = ""
)
})
observeEvent(input$speciesSelect, {
suggested_orgdb <- annoSpecies_df$pkg[annoSpecies_df$species == input$speciesSelect]
if (suggested_orgdb %in% available_orgdb) {
updateSelectInput(session, inputId = "sig_orgdbpkg", selected = suggested_orgdb)
}
})
observeEvent(input$sig_convert_setup, {
withProgress(
message = "Matching the identifiers",
detail = "Locating package",
value = 0,
{
require(input$sig_orgdbpkg, character.only = TRUE)
incProgress(0.1, detail = "Matching identifiers")
x <- get(input$sig_orgdbpkg)
values$anno_vec <- mapIds(x, rownames(values$dds_obj),
column = input$sig_id_sigs,
keytype = input$sig_id_data
)
}
)
})
output$sig_convcheck <- renderPrint({
head(values$anno_vec)
})
output$sig_heat <- renderPlot({
validate(
need(!is.null(values$gene_signatures), message = "Please provide some gene signatures in gmt format"),
need(!is.null(values$vst_obj), message = "Compute the vst transformed data"),
need(!is.null(values$anno_vec), message = "Setup the conversion between data ids and signature ids"),
need((!is.null(values$res_obj) | !input$sig_useDEonly),
message = "Please compute the results first if you want to subset to DE genes only"
),
need(input$sig_selectsig != "", message = "Select a signature")
)
print(
sig_heatmap(
values$vst_obj,
my_signature = values$gene_signatures[[input$sig_selectsig]],
res_data = values$res_obj,
FDR = input$FDR,
de_only = input$sig_useDEonly,
annovec = values$anno_vec,
# anno_colData = colData(values$vst_obj)[,input$sig_annocoldata, drop = FALSE],
title = names(values$gene_signatures)[match(input$sig_selectsig, names(values$gene_signatures))],
cluster_rows = input$sig_clusterrows,
cluster_cols = input$sig_clustercols,
center_mean = input$sig_centermean,
scale_row = input$sig_scalerow
)
)
})
# server ui update/observers --------------------------------------------------------
output$color_by <- renderUI({
if (is.null(values$dds_obj)) {
return(NULL)
}
poss_covars <- names(colData(values$dds_obj))
selectInput("color_by",
label = "Group/color by: ",
choices = c(NULL, poss_covars), selected = NULL, multiple = TRUE
)
})
# this trick speeds up the populating of the select(ize) input widgets,
# see http://stackoverflow.com/questions/38438920/shiny-selectinput-very-slow-on-larger-data-15-000-entries-in-browser
observe({
updateSelectizeInput(
session = session,
inputId = "avail_ids",
choices = c(Choose = "", rownames(values$dds_obj)),
server = TRUE
)
})
observe({
updateSelectizeInput(
session = session,
inputId = "avail_symbols",
choices = c(Choose = "", values$annotation_obj$gene_name[match(rownames(values$dds_obj), values$annotation_obj$gene_id)]),
server = TRUE
)
})
output$available_genes <- renderUI({
if (!is.null(values$annotation_obj)) {
selectizeInput("avail_symbols",
label = "Select the gene(s) of interest",
choices = NULL, selected = NULL, multiple = TRUE
)
} else { # else use the rownames as identifiers
selectizeInput("avail_ids",
label = "Select the gene(s) of interest - ids",
choices = NULL, selected = NULL, multiple = TRUE
)
}
})
design_factors <- reactive({
rev(attributes(terms.formula(design(values$dds_obj)))$term.labels)
})
output$choose_fac <- renderUI({
selectInput("choose_expfac",
label = "Choose the experimental factor to build the contrast upon (must be in the design formula)",
choices = c("", design_factors()), selected = ""
)
})
observe({
updateSelectizeInput(session = session, inputId = "color_by", selected = input$choose_expfac)
})
# server DE results --------------------------------------------------------
# nrl <- reactive
output$lrtavailable <- renderUI({
if (is.null(values$dds_obj)) {
return(NULL)
}
shiny::validate(
need(
input$choose_expfac != "",
""
)
)
fac1 <- input$choose_expfac
nrl <- length(levels(colData(values$dds_obj)[, fac1]))
if (nrl > 2) {
p("I can perform a LRT test on the chosen factor, select the full and the reduced model")
}
})
output$lrtfull <- renderUI({
if (is.null(values$dds_obj)) {
return(NULL)
}
shiny::validate(
need(
input$choose_expfac != "",
""
)
)
fac1 <- input$choose_expfac
nrl <- length(levels(colData(values$dds_obj)[, fac1]))
if (nrl > 2) {
selectInput("choose_lrt_full",
label = "Choose the factors for the full model",
choices = c("", design_factors()), selected = "", multiple = TRUE
)
}
})
output$lrtreduced <- renderUI({
if (is.null(values$dds_obj)) {
return(NULL)
}
shiny::validate(
need(
input$choose_expfac != "",
""
)
)
fac1 <- input$choose_expfac
nrl <- length(levels(colData(values$dds_obj)[, fac1]))
if(nrl > 2) {
tagList(
selectInput("choose_lrt_reduced",label = "Choose the factor(s) for the reduced model",
choices = c("",design_factors()), selected = "", multiple = TRUE),
p("If left blank, the formula for the reduced model will be '~ 1'")
)
}
})
output$runlrt <- renderUI({
if (is.null(values$dds_obj)) {
return(NULL)
}
shiny::validate(
need(
input$choose_expfac != "",
""
)
)
fac1 <- input$choose_expfac
nrl <- length(levels(colData(values$dds_obj)[, fac1]))
if (nrl > 2) {
actionButton("button_runlrt", label = "(re)Run LRT for the dataset", class = "btn btn-primary")
}
})
observeEvent(input$button_runlrt, {
withProgress(
message = "Computing the LRT results...",
detail = "This step can take a little while",
value = 0,
{
lrt_full_model <- as.formula(paste0("~", paste(input$choose_lrt_full, collapse = " + ")))
if (!is.null(input$choose_lrt_reduced)) {
lrt_reduced_model <- as.formula(
paste0("~", paste(input$choose_lrt_reduced, collapse = " + "))
)
} else {
lrt_reduced_model <- as.formula("~1")
}
if (is.null(input$choose_lrt_reduced)) {
showNotification("Using ~1 as reduced model...", type = "message")
}
if (lrt_full_model == design(values$dds_obj)) {
values$ddslrt <- DESeq(values$dds_obj,
test = "LRT",
full = lrt_full_model,
reduced = lrt_reduced_model
)
values$res_obj <- results(values$ddslrt)
if (!is.null(values$annotation_obj)) {
values$res_obj$symbol <-
values$annotation_obj$gene_name[match(
rownames(values$res_obj),
rownames(values$annotation_obj)
)]
}
} else {
showNotification(
ui = paste0(
"The full model must be equal to the specified design of the object ",
format(design(values$dds_obj))
),
type = "warning"
)
}
}
)
})
# copy this in the report for debugging purposes or so
# # section title
#
# ```{r setup, include=FALSE}
# knitr::opts_chunk$set(echo = TRUE)
# ```
#
# ## R Markdown
#
# This is an R Markdown document. Markdown is a simple formatting syntax for authoring HTML, PDF, and MS Word documents. For more details on using R Markdown see <http://rmarkdown.rstudio.com>.
#
# When you click the **Knit** button a document will be generated that includes both content as well as the output of any embedded R code chunks within the document. You can embed an R code chunk like this:
#
# ```{r cars}
# values$reslrt
# summary(values$reslrt)
#
# deseqresult2DEgenes(values$reslrt)
# plotCounts(dds_airway_lrt,intgroup="cell",gene="ENSG00000262902")
# plotCounts(dds_airway_lrt,intgroup="cell",gene="ENSG00000123243")
# resultsNames(dds_airway_lrt)
# ```
#
#
# ```{r}
# footertemplate()
# ```
## TODO; think if we want to allow for a continuous factor in the results, if so then do something like building
# the ui elements accordingly
output$fac1 <- renderUI({
shiny::validate(
need(
input$choose_expfac != "",
"Please select an experimental factor to generate the results"
)
)
fac1 <- input$choose_expfac
fac1_vals <- colData(values$dds_obj)[, fac1]
fac1_levels <- levels(fac1_vals)
if (is.factor(colData(values$dds_obj)[, fac1])) {
selectInput("fac1_c1", "Select the name of the numerator level for the fold change", choices = c("", fac1_levels), selected = "")
}
# selectInput("fac1_c2","c2",choices = fac1_levels)
})
output$fac2 <- renderUI({
shiny::validate(
need(
input$choose_expfac != "",
""
)
)
fac1 <- input$choose_expfac
fac1_vals <- colData(values$dds_obj)[, fac1]
fac1_levels <- levels(fac1_vals)
if (is.factor(colData(values$dds_obj)[, fac1])) {
# selectInput("fac1_c1","c1",choices = fac1_levels)
selectInput("fac1_c2", "Select the name of the denominator level for the fold change (must be different from the numerator)", choices = c("", fac1_levels), selected = "")
}
})
output$facnum <- renderPrint({
shiny::validate(
need(
input$choose_expfac != "",
""
)
)
fac1 <- input$choose_expfac
fac1_vals <- colData(values$dds_obj)[, fac1]
# fac1_levels <- levels(fac1_vals)
if (class(colData(values$dds_obj)[, fac1]) %in% c("integer", "numeric")) {
print("numeric/integer factor provided")
}
# selectInput("fac1_num","num/int",choices = c("",fac1_levels), selected = "")
# selectInput("fac1_c2","c2",choices = fac1_levels)
})
output$runresults <- renderUI({
shiny::validate(
need(
input$choose_expfac != "",
"Select a factor for the contrast first"
)
)
fac1 <- input$choose_expfac
fac1_vals <- colData(values$dds_obj)[, fac1]
if (!(class(colData(values$dds_obj)[, fac1]) %in% c("integer", "numeric"))) {
shiny::validate(
need(
input$fac1_c1 != "" & input$fac1_c2 != "" & input$fac1_c1 != input$fac1_c2,
"Select two different levels of the factor for the contrast"
)
)
}
# if((class(colData(values$dds_obj)[,fac1]) %in% c("integer","numeric"))){
#
# shiny::validate(
# need(input$resu_lfcshrink==FALSE,
# "Set the Add the unshrunken MLE to FALSE")
# )
# }
shiny::validate(
need(
"results" %in% mcols(mcols(values$dds_obj))$type,
"I couldn't find results. you should first run DESeq() with the button up here"
)
)
# if(input$choose_expfac=="" | input$fac1_c1 == "" | input$fac1_c2 == "" | input$fac1_c1 == input$fac1_c2)
# return(NULL)
# else
actionButton("button_runresults", "Extract the results!", icon = icon("spinner"), class = "btn btn-success")
})
observeEvent(input$button_runresults, {
withProgress(
message = "Computing the results...",
detail = "DE table on its way!",
value = 0,
{
# handling the experimental covariate correctly to extract the results...
if (is.factor(colData(values$dds_obj)[, input$choose_expfac])) {
if (input$resu_ihw) {
values$res_obj <- results(values$dds_obj,
contrast = c(input$choose_expfac, input$fac1_c1, input$fac1_c2),
independentFiltering = input$resu_indfil,
alpha = input$FDR,
filterFun = ihw
)
if (input$resu_lfcshrink) {
incProgress(amount = 0.15, detail = "Results extracted. Shrinking the logFC now...")
values$res_obj <- lfcShrink(values$dds_obj,
contrast = c(input$choose_expfac, input$fac1_c1, input$fac1_c2),
res = values$res_obj,
type = "normal"
)
incProgress(amount = 0.8, detail = "logFC shrunken, adding annotation info...")
} else {
incProgress(amount = 0.9, detail = "logFC left unshrunken, adding annotation info...")
}
} else {
values$res_obj <- results(values$dds_obj,
contrast = c(input$choose_expfac, input$fac1_c1, input$fac1_c2),
independentFiltering = input$resu_indfil,
alpha = input$FDR
)
if (input$resu_lfcshrink) {
incProgress(amount = 0.15, detail = "Results extracted. Shrinking the logFC now...")
values$res_obj <- lfcShrink(values$dds_obj,
contrast = c(input$choose_expfac, input$fac1_c1, input$fac1_c2),
res = values$res_obj,
type = "normal"
)
incProgress(amount = 0.8, detail = "logFC shrunken, adding annotation info...")
} else {
incProgress(amount = 0.9, detail = "logFC left unshrunken, adding annotation info...")
}
}
}
if (class(colData(values$dds_obj)[, input$choose_expfac]) %in% c("integer", "numeric")) {
values$res_obj <- results(values$dds_obj,
name = input$choose_expfac,
independentFiltering = input$resu_indfil,
alpha = input$FDR
# , addMLE = input$resu_lfcshrink
)
}
# adding info from the annotation
if (!is.null(values$annotation_obj)) {
values$res_obj$symbol <- values$annotation_obj$gene_name[
match(
rownames(values$res_obj),
rownames(values$annotation_obj)
)
]
}
}
)
})
output$diyres_summary <- renderPrint({
shiny::validate(
need(
input$choose_expfac != "" & input$fac1_c1 != "" & input$fac1_c2 != "" & input$fac1_c1 != input$fac1_c2,
"Please select the factor to build the contrast upon, and two different levels to build the contrast"
)
)
shiny::validate(
need(!is.null(values$res_obj), "Parameters selected, please compute the results first")
)
# summary(results(values$dds_obj,contrast = c(input$choose_expfac, input$fac1_c1, input$fac1_c2)))
summary(values$res_obj, alpha = input$FDR)
})
output$printdds <- renderPrint({
shiny::validate(
need(
!is.null(values$dds_obj),
"Please provide a count matrix/dds object"
)
)
values$dds_obj
design(values$dds_obj)
})
output$printres <- renderPrint({
shiny::validate(
need(
!is.null(values$res_obj),
"Please provide a DESeqResults object"
)
)
print(sub(".*p-value: (.*)", "\\1", mcols(values$res_obj, use.names = TRUE)["pvalue", "description"]))
summary(values$res_obj, alpha = input$FDR) # use fdr shiny widget
})
output$store_result <- renderUI({
if (is.null(values$res_obj)) {
return(NULL)
}
actionButton("button_store_result", "Store current results", class = "btn btn-primary")
})
observeEvent(input$button_store_result, {
values$stored_res <- values$res_obj
# this is in such a way to store & compare later if some parameters are edited
})
output$table_res <- DT::renderDataTable({
if (is.null(values$res_obj)) {
return(NULL)
}
mydf <- as.data.frame(values$res_obj[order(values$res_obj$padj), ]) # [1:500,]
rownames(mydf) <- createLinkENS(rownames(mydf), species = annoSpecies_df$ensembl_db[match(input$speciesSelect, annoSpecies_df$species)]) ## TODO: check what are the species from ensembl and
## TODO: add a check to see if wanted?
mydf$symbol <- createLinkGeneSymbol(mydf$symbol)
datatable(mydf, escape = FALSE)
})
# server resu diagnostics --------------------------------------------------------
output$pvals_hist <- renderPlot({
shiny::validate(
need(!is.null(values$res_obj), message = "")
)
res_df <- as.data.frame(values$res_obj)
res_df <- dplyr::filter(res_df, !is.na(pvalue))
p <- ggplot(res_df, aes_string("pvalue")) +
geom_histogram(binwidth = 0.01, boundary = 0) +
theme_bw()
# for visual estimation of the false discovery proportion in the first bin
alpha <- binw <- input$FDR
pi0 <- 2 * mean(res_df$pvalue > 0.5)
p <- p + geom_hline(yintercept = pi0 * binw * nrow(res_df), col = "steelblue") +
geom_vline(xintercept = alpha, col = "red")
p <- p + ggtitle(
label = "p-value histogram",
subtitle = paste0(
"Expected nulls = ", pi0 * binw * nrow(res_df),
" - #elements in the selected bins = ", sum(res_df$pvalue < alpha)
)
)
exportPlots$plot_pvals_hist <- p
p
})
output$pvals_hist_strat <- renderPlot({
shiny::validate(
need(!is.null(values$res_obj), message = "")
)
res_df <- as.data.frame(values$res_obj)
res_df <- dplyr::filter(res_df, !is.na(pvalue))
res_df <- mutate(
res_df,
stratum = cut(baseMean,
include.lowest = TRUE,
breaks = signif(quantile(baseMean, probs = seq(0, 1, length.out = 10)), 2)
)
)
p <- ggplot(res_df, aes_string("pvalue")) +
geom_histogram(binwidth = 0.01, boundary = 0) +
facet_wrap(~stratum) +
theme_bw()
p <- p + ggtitle(
label = "p-value histogram",
subtitle = "stratified on the different value classes of mean expression values"
)
exportPlots$plot_pvals_hist_strat <- p
p
})
output$pvals_ss <- renderPlot({
shiny::validate(
need(!is.null(values$res_obj), message = "")
)
res_df <- as.data.frame(values$res_obj)
res_df <- dplyr::filter(res_df, !is.na(pvalue))
phi <- input$FDR
res_df <- mutate(res_df, rank = rank(pvalue))
m <- nrow(res_df)
p <- ggplot(
filter(res_df, rank <= 6000),
aes_string(x = "rank", y = "pvalue")
) +
geom_line() +
geom_abline(slope = phi / m, col = "red") +
theme_bw()
p <- p + ggtitle(
label = "Schweder-Spjotvoll plot",
subtitle = paste0(
"Intersection point at rank ", with(arrange(res_df, rank), last(which(pvalue <= phi * rank / m)))
)
)
exportPlots$plot_pvals_ss <- p
p
})
output$logfc_hist <- renderPlot({
shiny::validate(
need(!is.null(values$res_obj), message = "")
)
res_df <- as.data.frame(values$res_obj)
res_df <- dplyr::filter(res_df, !is.na(pvalue))
p <- ggplot(res_df, aes_string("log2FoldChange")) +
geom_histogram(binwidth = 0.1) +
theme_bw()
p <- p + ggtitle(
"Histogram of the log2 fold changes"
)
exportPlots$plot_logfc_hist <- p
p
})
output$dds_design <- renderPrint({
design(values$dds_obj)
})
output$res_names <- renderPrint({
resultsNames(values$dds_obj)
})
output$explore_res <- renderPrint({
expfac <- attributes(terms.formula(design(values$dds_obj)))$term.labels
expfac # plus, support up to four factors that are either there or not according to the length
})
output$plotma <- renderPlot({
p <- plot_ma(values$res_obj, annotation_obj = values$annotation_obj, FDR = input$FDR)
exportPlots$plot_ma <- p
p
})
output$mazoom <- renderPlot({
if (is.null(input$ma_brush)) {
return(ggplot() +
annotate("text", label = "click and drag to zoom in", 0, 0) +
theme_bw())
}
if (!is.null(values$annotation_obj)) {
p <- plot_ma(values$res_obj, annotation_obj = values$annotation_obj, FDR = input$FDR) +
coord_cartesian(
xlim = c(input$ma_brush$xmin, input$ma_brush$xmax),
ylim = c(input$ma_brush$ymin, input$ma_brush$ymax)
) +
geom_text(aes_string(label = "genename"), size = input$size_genelabels, hjust = 0.25, vjust = -0.75)
} else {
p <- plot_ma(values$res_obj, annotation_obj = values$annotation_obj, FDR = input$FDR) +
coord_cartesian(
xlim = c(input$ma_brush$xmin, input$ma_brush$xmax),
ylim = c(input$ma_brush$ymin, input$ma_brush$ymax)
)
}
exportPlots$plot_mazoom <- p
p
})
output$ma_highlight <- renderPlot({
shiny::validate(
need(!is.null(values$res_obj), message = "Please generate the results object to display the plot and show the combined tables")
)
if ("symbol" %in% names(values$res_obj)) {
p <- plot_ma(values$res_obj,
intgenes = input$avail_symbols, annotation_obj = values$annotation_obj, FDR = input$FDR
)
} else {
p <- plot_ma(values$res_obj,
intgenes = input$avail_ids, annotation_obj = values$annotation_obj, FDR = input$FDR
)
}
exportPlots$plot_mahighlight <- p
p
})
output$ma_hl_list <- renderPlot({
if (is.null(values$genelist_ma)) {
return(NULL)
}
if ("symbol" %in% names(values$res_obj)) {
p <- plot_ma(values$res_obj,
intgenes = values$genelist_ma$`Gene Symbol`, annotation_obj = values$annotation_obj, FDR = input$FDR
)
} else {
# plot_ma(values$res_obj,
# intgenes = values$genelist_ma,annotation_obj = values$annotation_obj)
return(NULL)
}
exportPlots$plot_mahllist <- p
p
})
curData <- reactive({
mama <- data.frame(mean = values$res_obj$baseMean, lfc = values$res_obj$log2FoldChange, padj = values$res_obj$padj, isDE = ifelse(is.na(values$res_obj$padj), FALSE, values$res_obj$padj < 0.10), ID = rownames(values$res_obj))
mama$genename <- values$annotation_obj$gene_name[match(mama$ID, rownames(values$annotation_obj))]
# mama$yesorno <- ifelse(mama$isDE,"yes","no")
mama$yesorno <- ifelse(mama$isDE, "red", "black")
mama$logmean <- log10(mama$mean) # TO ALLOW FOR BRUSHING!!
res <- brushedPoints(mama, input$ma_brush, xvar = "logmean", yvar = "lfc")
res
})
curDataClick <- reactive({
mama <- data.frame(mean = values$res_obj$baseMean, lfc = values$res_obj$log2FoldChange, padj = values$res_obj$padj, isDE = ifelse(is.na(values$res_obj$padj), FALSE, values$res_obj$padj < 0.10), ID = rownames(values$res_obj))
mama$genename <- values$annotation_obj$gene_name[match(mama$ID, rownames(values$annotation_obj))]
# mama$yesorno <- ifelse(mama$isDE,"yes","no")
mama$yesorno <- ifelse(mama$isDE, "red", "black")
mama$logmean <- log10(mama$mean) # TO ALLOW FOR BRUSHING!!
res <- nearPoints(mama, input$mazoom_click,
threshold = 20, maxpoints = 1,
addDist = TRUE
)
res
})
output$ma_brush_out <- DT::renderDataTable({
if (nrow(curData()) == 0) {
return(NULL)
}
datatable(curData(), options = list(pageLength = 100))
})
output$heatbrush <- renderPlot({
if ((is.null(input$ma_brush)) | is.null(values$dds_obj)) {
return(NULL)
}
brushedObject <- curData()
selectedGenes <- as.character(brushedObject$ID)
toplot <- assay(values$dds_obj)[selectedGenes, ]
rownames(toplot) <- values$annotation_obj$gene_name[match(rownames(toplot), rownames(values$annotation_obj))]
if (input$pseudocounts) toplot <- log2(1 + toplot)
mat_rowscale <- function(x) {
m <- apply(x, 1, mean, na.rm = TRUE)
s <- apply(x, 1, sd, na.rm = TRUE)
return((x - m) / s)
}
if (input$rowscale) toplot <- mat_rowscale(toplot)
pheatmap(toplot, cluster_cols = as.logical(input$heatmap_colv))
})
output$hpi_brush <- renderPlotly({
if ((is.null(input$ma_brush)) | is.null(values$dds_obj)) {
return(NULL)
}
brushedObject <- curData()
selectedGenes <- as.character(brushedObject$ID)
toplot <- assay(values$dds_obj)[selectedGenes, ]
rownames(toplot) <- values$annotation_obj$gene_name[match(rownames(toplot), rownames(values$annotation_obj))]
mycolss <- c("#313695", "#4575b4", "#74add1", "#abd9e9", "#e0f3f8", "#fee090", "#fdae61", "#f46d43", "#d73027", "#a50026") # to be consistent with red/blue usual coding
if (input$pseudocounts) toplot <- log2(1 + toplot)
mat_rowscale <- function(x) {
m <- apply(x, 1, mean, na.rm = TRUE)
s <- apply(x, 1, sd, na.rm = TRUE)
return((x - m) / s)
}
if (input$rowscale) toplot <- mat_rowscale(toplot)
heatmaply(toplot, Colv = as.logical(input$heatmap_colv), colors = mycolss)
})
output$deb <- renderPrint({
# curDataClick()
selectedGene <- curDataClick()$ID
# selectedGeneSymbol <- cm2$fromgtf[match(selectedGene,rownames(cm2))]
# # plotCounts(dds_cleaner,)
# genedata <- plotCounts(dds_cleaner,gene=selectedGene,intgroup = "condition",returnData = T)
# genedata
# str(as.character(selectedGene))
selectedGene
})
output$volcanoplot <- renderPlot({
p <- plot_volcano(values$res_obj, FDR = input$FDR)
exportPlots$plot_volcanoplot <- p
p
})
# server genefinder --------------------------------------------------------
output$genefinder_plot <- renderPlot({
shiny::validate(
need(
length(input$color_by) > 0,
"Select an experimental factor in the Group/color by element in the sidebar"
)
)
if (is.null(input$ma_brush)) {
return(NULL)
}
if (is.null(input$mazoom_click)) {
return(ggplot() +
annotate("text", label = "click to generate the boxplot\nfor the selected gene", 0, 0) +
theme_bw())
}
selectedGene <- as.character(curDataClick()$ID)
selectedGeneSymbol <- values$annotation_obj$gene_name[match(selectedGene, values$annotation_obj$gene_id)]
p <- ggplotCounts(values$dds_obj, selectedGene, intgroup = input$color_by, annotation_obj = values$annotation_obj)
if (input$ylimZero_genes) {
p <- p + ylim(0.1, NA)
}
exportPlots$plot_genefinder <- p
p
})
output$rentrez_infobox <- renderUI({
shiny::validate(
need(
(nrow(curDataClick()) > 0),
"Select a gene first to display additional info (retrieved from the NCBI/ENTREZ db website)"
)
)
shiny::validate(
need(
(!is.null(values$cur_species)),
"Select a species first in the Data Setup panel"
)
)
selectedGene <- as.character(curDataClick()$ID)
selgene_entrez <- mapIds(
get(annoSpecies_df[values$cur_species, ]$pkg),
selectedGene, "ENTREZID", input$idtype
)
fullinfo <- geneinfo(selgene_entrez)
## TODO: build up link manually to paste under the info!
#
link_pubmed <- paste0(
'<a href="http://www.ncbi.nlm.nih.gov/gene/?term=',
selgene_entrez,
'" target="_blank" >Click here to see more at NCBI</a>'
)
if (fullinfo$summary == "") {
return(HTML(paste0(
"<b>", fullinfo$name, "</b><br/><br/>",
fullinfo$description, "<br/><br/>",
link_pubmed
)))
} else {
return(HTML(paste0(
"<b>", fullinfo$name, "</b><br/><br/>",
fullinfo$description, "<br/><br/>",
fullinfo$summary, "<br/><br/>",
link_pubmed
)))
}
})
cur_combires <- reactive({
if (is.null(values$res_obj)) {
return(NULL)
}
normCounts <- as.data.frame(counts(estimateSizeFactors(values$dds_obj), normalized = TRUE))
normCounts$id <- rownames(normCounts)
res_df <- deseqresult2tbl(values$res_obj)
combi_obj <- dplyr::inner_join(res_df, normCounts, by = "id")
combi_obj$symbol <- values$annotation_obj$gene_name[match(combi_obj$id, values$annotation_obj$gene_id)]
if ("symbol" %in% names(values$res_obj)) {
sel_genes <- input$avail_symbols
sel_genes_ids <- values$annotation_obj$gene_id[match(sel_genes, values$annotation_obj$gene_name)]
} else {
sel_genes_ids <- input$avail_ids
}
if (length(sel_genes_ids) > 0) {
combi_obj[match(sel_genes_ids, combi_obj$id), ]
} else {
combi_obj
}
})
output$table_combi <- DT::renderDataTable({
datatable(cur_combires(), options = list(scrollX = TRUE))
})
cur_combires_list <- reactive({
if (is.null(values$res_obj)) {
return(NULL)
}
normCounts <- as.data.frame(counts(estimateSizeFactors(values$dds_obj), normalized = TRUE))
normCounts$id <- rownames(normCounts)
res_df <- deseqresult2tbl(values$res_obj)
combi_obj <- dplyr::inner_join(res_df, normCounts, by = "id")
combi_obj$symbol <- values$annotation_obj$gene_name[match(combi_obj$id, values$annotation_obj$gene_id)]
if ("symbol" %in% names(values$res_obj)) {
sel_genes <- values$genelist_ma$`Gene Symbol`
sel_genes_ids <- values$annotation_obj$gene_id[match(sel_genes, values$annotation_obj$gene_name)]
} else {
# sel_genes_ids <- values$genelist_ma$`Gene Symbol`
}
if (length(sel_genes_ids) > 0) {
combi_obj[match(sel_genes_ids, combi_obj$id), ]
} else {
combi_obj
}
})
output$table_combi_list <- DT::renderDataTable({
if (is.null(values$genelist_ma)) {
return(NULL)
}
datatable(cur_combires_list(), options = list(scrollX = TRUE))
})
output$bp1 <- renderPlot({
shiny::validate(
need(
length(input$color_by) > 0,
"Select an experimental factor in the Group/color by element in the sidebar"
)
)
shiny::validate(
need(
(length(input$avail_symbols) > 0 | length(input$avail_ids) > 0),
"Select at least a gene to plot"
)
)
if (length(input$avail_symbols) > 0) {
# got the symbol, look for the id
mysym <- input$avail_symbols[1]
myid <- values$annotation_obj$gene_id[match(mysym, values$annotation_obj$gene_name)]
} else {
myid <- input$avail_ids[1]
# make it optional if annot is available
if (!is.null(values$annotation_obj)) {
mysim <- values$annotation_obj$gene_name[match(myid, values$annotation_obj$gene_id)]
} else {
mysim <- ""
}
}
p <- ggplotCounts(values$dds_obj, myid, intgroup = input$color_by, annotation_obj = values$annotation_obj)
if (input$ylimZero_genefinder) {
p <- p + ylim(0.1, NA)
}
exportPlots$plotbp1 <- p
p
})
output$bp2 <- renderPlot({
shiny::validate(
need(
length(input$color_by) > 0,
"Select an experimental factor in the Group/color by element in the sidebar"
)
)
shiny::validate(
need(
(length(input$avail_symbols) > 1 | length(input$avail_ids) > 1),
"Select at least a second gene to plot"
)
)
if (length(input$avail_symbols) > 0) {
# got the symbol, look for the id
mysym <- input$avail_symbols[2]
myid <- values$annotation_obj$gene_id[match(mysym, values$annotation_obj$gene_name)]
} else {
myid <- input$avail_ids[2]
# make it optional if annot is available
if (!is.null(values$annotation_obj)) {
mysim <- values$annotation_obj$gene_name[match(myid, values$annotation_obj$gene_id)]
} else {
mysim <- ""
}
}
p <- ggplotCounts(values$dds_obj, myid, intgroup = input$color_by, annotation_obj = values$annotation_obj)
if (input$ylimZero_genefinder) {
p <- p + ylim(0.1, NA)
}
exportPlots$plotbp2 <- p
p
})
output$bp3 <- renderPlot({
shiny::validate(
need(
length(input$color_by) > 0,
"Select an experimental factor in the Group/color by element in the sidebar"
)
)
shiny::validate(
need(
(length(input$avail_symbols) > 2 | length(input$avail_ids) > 2),
"Select at least a third gene to plot"
)
)
if (length(input$avail_symbols) > 0) {
# got the symbol, look for the id
mysym <- input$avail_symbols[3]
myid <- values$annotation_obj$gene_id[match(mysym, values$annotation_obj$gene_name)]
} else {
myid <- input$avail_ids[3]
# make it optional if annot is available
if (!is.null(values$annotation_obj)) {
mysim <- values$annotation_obj$gene_name[match(myid, values$annotation_obj$gene_id)]
} else {
mysim <- ""
}
}
p <- ggplotCounts(values$dds_obj, myid, intgroup = input$color_by, annotation_obj = values$annotation_obj)
if (input$ylimZero_genefinder) {
p <- p + ylim(0.1, NA)
}
exportPlots$plotbp3 <- p
p
})
output$bp4 <- renderPlot({
shiny::validate(
need(
length(input$color_by) > 0,
"Select an experimental factor in the Group/color by element in the sidebar"
)
)
shiny::validate(
need(
(length(input$avail_symbols) > 3 | length(input$avail_ids) > 3),
"Select at least a fourth gene to plot"
)
)
if (length(input$avail_symbols) > 0) {
# got the symbol, look for the id
mysym <- input$avail_symbols[4]
myid <- values$annotation_obj$gene_id[match(mysym, values$annotation_obj$gene_name)]
} else {
myid <- input$avail_ids[4]
# make it optional if annot is available
if (!is.null(values$annotation_obj)) {
mysim <- values$annotation_obj$gene_name[match(myid, values$annotation_obj$gene_id)]
} else {
mysim <- ""
}
}
p <- ggplotCounts(values$dds_obj, myid, intgroup = input$color_by, annotation_obj = values$annotation_obj)
if (input$ylimZero_genefinder) {
p <- p + ylim(0.1, NA)
}
exportPlots$plotbp4 <- p
p
})
# server report editor --------------------------------------------------------
### yaml generation
rmd_yaml <- reactive({
paste0("---",
"\ntitle: '", input$report_title,
"'\nauthor: '", input$report_author,
"'\ndate: '", Sys.Date(),
"'\noutput:\n html_document:\n toc: ", input$report_toc, "\n number_sections: ", input$report_ns, "\n theme: ", input$report_theme, "\n---\n\n",
collapse = "\n"
)
})
# rmd_full <- reactive({
# paste0(rmd_yaml(),"\n",
# readLines("reportTemplate.Rmd"))
# })
# output$loadedRmd <- renderPrint({
# # rmd_yaml() # or rmd_full()
# paste0(
# # rmd_yaml(),
# paste0(readLines("reportTemplate.Rmd"),collapse = "\n"))
# # head(paste0(rmd_yaml(),
# # readLines("reportTemplate.Rmd")),collapse="\n")
# })
### loading report template
# update aceEditor module
observe({
# loading rmd report from disk
inFile <- system.file("extdata", "irt.Rmd", package = "ideal")
isolate({
if (!is.null(inFile) && !is.na(inFile)) {
rmdfilecontent <- paste0(readLines(inFile), collapse = "\n")
shinyAce::updateAceEditor(session, "acereport_rmd", value = rmdfilecontent)
}
})
})
### ace editor options
observe({
autoComplete <- if (input$enableAutocomplete) {
if (input$enableLiveCompletion) "live" else "enabled"
} else {
"disabled"
}
updateAceEditor(session, "acereport_rmd", autoComplete = autoComplete, theme = input$theme, mode = input$mode)
# updateAceEditor(session, "plot", autoComplete = autoComplete)
})
# Enable/Disable R code completion
rmdOb <- aceAutocomplete("acereport_rmd")
observe({
if (input$enableRCompletion) {
rmdOb$resume()
} else {
rmdOb$suspend()
}
})
## currently not working as I want with rmarkdown::render, but can leave it like this - the yaml will be taken in the final version only
output$knitDoc <- renderUI({
input$updatepreview_button
## TODO: this does what it should do but messes up with CSS and so
#
# # error_I <- 0
# withProgress(message = 'Processing', value = 0, {
# isolate({
# fileConn<-file("www/tmp.Rmd")
# tmp_content <-
# paste0(rmd_yaml(),
# input$acereport_rmd,collapse = "\n")
# writeLines(tmp_content, fileConn)
# close(fileConn)
# incProgress(0.5, detail = "Synthesizing report...")
# # tryCatch({
# rmarkdown::render(input = "www/tmp.Rmd", output_format = "html_document", output_file = "../www/Rmd_preview.html",quiet = TRUE) #},
# # error = function(e) {
# # # error_I <<- 1
# # }
# # )
# })
# setProgress(1)
# })
#
# return(isolate(includeHTML("www/Rmd_preview.html")))
# # return(isolate(includeHTML("<iframe src='www/Rmd_preview.html', width='100%', height='800'></iframe>")))
# # return(isolate(HTML("<iframe src='www/Rmd_preview.html', width='100%', height='800'></iframe>")))
return(
withProgress(
{
# temporarily switch to the temp dir, in case you do not have write
# permission to the current working directory
owd <- setwd(tempdir())
on.exit(setwd(owd))
tmp_content <- paste0(rmd_yaml(), input$acereport_rmd, collapse = "\n")
isolate(HTML(knit2html(text = tmp_content, fragment.only = TRUE, quiet = TRUE)))
},
message = "Updating the report in the app body",
detail = "This can take some time"
)
)
})
# Generate and Download module
output$saveRmd <- downloadHandler(
filename = function() {
if (input$rmd_dl_format == "rmd") {
"report.Rmd"
} else {
"report.html" # TODO: maybe add Sys.time() to the filename to improve traceability?
}
},
content = function(file) {
# knit2html(text = input$rmd, fragment.only = TRUE, quiet = TRUE))
tmp_content <-
paste0(rmd_yaml(),
input$acereport_rmd,
collapse = "\n"
)
if (input$rmd_dl_format == "rmd") {
cat(tmp_content, file = file, sep = "\n")
} else {
if (input$rmd_dl_format == "html") {
# temporarily switch to the temp dir, in case you do not have write
# permission to the current working directory
owd <- setwd(tempdir())
on.exit(setwd(owd))
cat(tmp_content, file = "ideal_tempreport.Rmd", sep = "\n")
withProgress(rmarkdown::render(
input = "ideal_tempreport.Rmd",
output_file = file,
# fragment.only = TRUE,
quiet = TRUE
),
message = "Generating the html report",
detail = "This can take some time"
)
}
}
}
)
# iSEE export ------------------------------------------------------------
output$ui_iSEEexport <- renderUI({
validate(
need(((!is.null(values$dds_obj)) & (!is.null(values$res_obj))),
message = "Please build and compute the dds and res object to export as
SummarizedExperiment for use in iSEE"
)
)
return(
tagList(
textInput(
"se_export_name",
label = "Choose a filename for the serialized .rds object",
value = "se_ideal_toiSEE.rds"
),
downloadButton(
"button_iSEEexport",
label = "Export as serialized SummarizedExperiment"
)
)
)
})
output$button_iSEEexport <- downloadHandler(
filename = function() {
# paste0("se_ideal_toiSEE_",gsub(" ","_",gsub("-","",gsub(":","-",as.character(Sys.time())))),".rds")
input$se_export_name
}, content = function(file) {
se <- wrapup_for_iSEE(values$dds_obj, values$res_obj)
saveRDS(se, file = file)
}
)
# GeneTonic export -------------------------------------------------------
output$ui_GeneTonicexport <- renderUI({
validate(
need(((!is.null(values$dds_obj)) & (!is.null(values$res_obj))),
message = "Please build and compute the dds and res object to export as
a list for use in GeneTonic"
),
need(!is.null(values$annotation_obj),
message = "Please provide or obtain an annotation object")
)
go_tbls_available <- c("topgo_updown",
"topgo_down",
"topgo_up")
return(
tagList(
textInput(
"gtl_exportgt_name",
label = "Choose a filename for the serialized .rds object",
value = "gtl_ideal_toGeneTonic.rds"
),
selectInput(
"gotbl_forgt",
label = "Select which GO table to export (topGO output supported)",
choices = go_tbls_available[
unlist(lapply(go_tbls_available, function(arg) {
!is.null(values[[arg]])
}))
]
),
downloadButton(
"button_GeneTonicexport",
label = "Export as serialized list for GeneTonic"
)
)
)
})
output$button_GeneTonicexport <- downloadHandler(
filename = function() {
input$gtl_exportgt_name
}, content = function(file) {
dds_obj <- values$dds_obj
res_obj <- values$res_obj
res_obj$SYMBOL <- res_obj$symbol
res_enrich <- shake_topGOtableResult(values[[input$gotbl_forgt]])
anno_df <- values$annotation_obj
gtl <- list(dds = dds_obj,
res_de = res_obj,
res_enrich = res_enrich,
annotation_obj = anno_df)
saveRDS(gtl, file = file)
}
)
# server state saving --------------------------------------------------------
### to environment
observe({
if (is.null(input$task_exit_and_save) || input$task_exit_and_save == 0) {
return()
}
# quit R, unless you are running an interactive session
if (interactive()) {
# flush input and values to the environment in two distinct objects (to be reused later?)
isolate({
# ideal_env <<- new.env(parent = emptyenv())
cur_inputs <- reactiveValuesToList(input)
cur_values <- reactiveValuesToList(values)
tstamp <- gsub(" ", "_", gsub("-", "", gsub(":", "-", as.character(Sys.time()))))
# myvar <- "frfr"
# assign("test", myvar, ideal_env)
# better practice rather than assigning to global env - notify users of this
assign(paste0("ideal_inputs_", tstamp), cur_inputs, envir = ideal_env)
assign(paste0("ideal_values_", tstamp), cur_values, envir = ideal_env)
showNotification(
paste0(
"ideal closed, state successfully saved to the R environment. ",
"You can access these values by searching the `ideal_env` object."
),
type = "message"
)
message("ideal closed, state successfully saved to the R environment.")
message(" You can access these values by searching the `ideal_env` object.")
stopApp("ideal closed, state successfully saved to global R environment.")
# assign(paste0("ideal_inputs_",
# gsub(" ","_",gsub("-","",gsub(":","-",as.character(Sys.time()))))),
# reactiveValuesToList(input), envir = .GlobalEnv)
# assign(paste0("ideal_values_",
# gsub(" ","_",gsub("-","",gsub(":","-",as.character(Sys.time()))))),
# reactiveValuesToList(values), envir = .GlobalEnv)
# stopApp("ideal closed, state successfully saved to global R environment.")
})
} else {
stopApp("ideal closed")
q("no")
}
})
### to binary data
saveState <- function(filename) {
isolate({
LiveInputs <- reactiveValuesToList(input)
# values[names(LiveInputs)] <- LiveInputs
r_data <- reactiveValuesToList(values)
save(LiveInputs, r_data, file = filename)
message("list of inputs and reactive values correctly saved as binary data")
})
}
output$task_state_save <- downloadHandler(
filename = function() {
paste0("idealState_", gsub(" ", "_", gsub("-", "", gsub(":", "-", as.character(Sys.time())))), ".RData")
},
content = function(file) {
saveState(file)
}
)
output$sessioninfo <- renderPrint({
sessionInfo()
})
# server export plots and tables --------------------------------------------------------
## here, all export of plots and tables
output$download_plot_pvals_hist <- downloadHandler(filename = function() {
input$filename_plot_pvals_hist
}, content = function(file) {
ggsave(file, exportPlots$plot_pvals_hist,
width = input$export_width,
height = input$export_height, units = "cm"
)
})
output$download_plot_logfc_hist <- downloadHandler(filename = function() {
input$filename_plot_logfc_hist
}, content = function(file) {
ggsave(file, exportPlots$plot_logfc_hist,
width = input$export_width,
height = input$export_height, units = "cm"
)
})
output$download_plot_ma <- downloadHandler(filename = function() {
input$filename_plot_ma
}, content = function(file) {
ggsave(file, exportPlots$plot_ma,
width = input$export_width,
height = input$export_height, units = "cm"
)
})
output$download_plot_mazoom <- downloadHandler(filename = function() {
input$filename_plot_mazoom
}, content = function(file) {
ggsave(file, exportPlots$plot_mazoom,
width = input$export_width,
height = input$export_height, units = "cm"
)
})
output$download_plot_mahighlight <- downloadHandler(filename = function() {
input$filename_plot_mahighlight
}, content = function(file) {
ggsave(file, exportPlots$plot_mahighlight,
width = input$export_width,
height = input$export_height, units = "cm"
)
})
output$download_plot_mahllist <- downloadHandler(filename = function() {
input$filename_plot_mahllist
}, content = function(file) {
ggsave(file, exportPlots$plot_mahllist,
width = input$export_width,
height = input$export_height, units = "cm"
)
})
output$download_plot_volcanoplot <- downloadHandler(filename = function() {
input$filename_plot_volcanoplot
}, content = function(file) {
ggsave(file, exportPlots$plot_volcanoplot,
width = input$export_width,
height = input$export_height, units = "cm"
)
})
output$download_plot_genefinder <- downloadHandler(filename = function() {
input$filename_plot_genefinder
}, content = function(file) {
ggsave(file, exportPlots$plot_genefinder,
width = input$export_width,
height = input$export_height, units = "cm"
)
})
output$download_plotbp1 <- downloadHandler(filename = function() {
input$filename_plotbp1
}, content = function(file) {
ggsave(file, exportPlots$plotbp1,
width = input$export_width,
height = input$export_height, units = "cm"
)
})
output$download_plotbp2 <- downloadHandler(filename = function() {
input$filename_plotbp2
}, content = function(file) {
ggsave(file, exportPlots$plotbp2,
width = input$export_width,
height = input$export_height, units = "cm"
)
})
output$download_plotbp3 <- downloadHandler(filename = function() {
input$filename_plotbp3
}, content = function(file) {
ggsave(file, exportPlots$plotbp3,
width = input$export_width,
height = input$export_height, units = "cm"
)
})
output$download_plotbp4 <- downloadHandler(filename = function() {
input$filename_plotbp4
}, content = function(file) {
ggsave(file, exportPlots$plotbp4,
width = input$export_width,
height = input$export_height, units = "cm"
)
})
# tbls
output$downloadTblResu <- downloadHandler(
filename = function() {
"table_results.csv"
},
content = function(file) {
mydf <- as.data.frame(values$res_obj[order(values$res_obj$padj), ])
write.csv(mydf, file)
}
)
output$downloadTblMabrush <- downloadHandler(
filename = function() {
"table_mabrush.csv"
},
content = function(file) {
write.csv(curData(), file)
}
)
output$downloadTblCombi <- downloadHandler(
filename = function() {
"table_combi.csv"
},
content = function(file) {
write.csv(cur_combires(), file)
}
)
output$downloadTblCombiList <- downloadHandler(
filename = function() {
"table_combilist.csv"
},
content = function(file) {
write.csv(cur_combires_list(), file)
}
)
# base graphics plots
output$download_plot_heatbrush <- downloadHandler(filename = function() {
input$filename_plot_heatbrush
}, content = function(file) {
pdf(file)
brushedObject <- curData()
selectedGenes <- as.character(brushedObject$ID)
toplot <- assay(values$dds_obj)[selectedGenes, ]
rownames(toplot) <- values$annotation_obj$gene_name[match(rownames(toplot), rownames(values$annotation_obj))]
if (input$pseudocounts) toplot <- log2(1 + toplot)
mat_rowscale <- function(x) {
m <- apply(x, 1, mean, na.rm = TRUE)
s <- apply(x, 1, sd, na.rm = TRUE)
return((x - m) / s)
}
if (input$rowscale) toplot <- mat_rowscale(toplot)
pheatmap(toplot, cluster_cols = as.logical(input$heatmap_colv))
dev.off()
})
output$download_plot_vennlists <- downloadHandler(filename = function() {
input$filename_plot_vennlists
}, content = function(file) {
pdf(file)
gplots::venn(gll())
dev.off()
})
output$download_plot_upsetlists <- downloadHandler(filename = function() {
input$filename_plot_upsetlists
}, content = function(file) {
pdf(file)
UpSetR::upset(fromList(gll()))
dev.off()
})
## GO tbls topGO
output$downloadGOTbl_up <- downloadHandler(
filename = function() {
"table_GOresults_up.csv"
},
content = function(file) {
write.csv(values$topgo_up, file)
}
)
output$downloadGOTbl_down <- downloadHandler(
filename = function() {
"table_GOresults_down.csv"
},
content = function(file) {
write.csv(values$topgo_down, file)
}
)
output$downloadGOTbl_updown <- downloadHandler(
filename = function() {
"table_GOresults_updown.csv"
},
content = function(file) {
write.csv(values$topgo_updown, file)
}
)
output$downloadGOTbl_l1 <- downloadHandler(
filename = function() {
"table_GOresults_list1.csv"
},
content = function(file) {
write.csv(values$topgo_list1, file)
}
)
output$downloadGOTbl_l2 <- downloadHandler(
filename = function() {
"table_GOresults_list2.csv"
},
content = function(file) {
write.csv(values$topgo_list2, file)
}
)
}) # end of server function definition
# nocov end
# launch the app!
shinyApp(ui = ideal_ui, server = ideal_server)
}
|
/R/ideal.R
|
permissive
|
BadSeby/ideal
|
R
| false
| false
| 193,549
|
r
|
# ideal.R
#' ideal: Interactive Differential Expression Analysis
#'
#' ideal makes differential expression analysis interactive, easy and reproducible.
#' This function launches the main application included in the package.
#'
#' @param dds_obj A \code{\link{DESeqDataSet}} object. If not provided, then a
#' \code{countmatrix} and a \code{expdesign} need to be provided. If none of
#' the above is provided, it is possible to upload the data during the
#' execution of the Shiny App
#' @param res_obj A \code{\link{DESeqResults}} object. If not provided, it can
#' be computed during the execution of the application
#' @param annotation_obj A \code{data.frame} object, with row.names as gene
#' identifiers (e.g. ENSEMBL ids) and a column, \code{gene_name}, containing
#' e.g. HGNC-based gene symbols. If not provided, it can be constructed during
#' the execution via the org.eg.XX.db packages - these need to be installed
#' @param countmatrix A count matrix, with genes as rows and samples as columns.
#' If not provided, it is possible to upload the data during the execution of
#' the Shiny App
#' @param expdesign A \code{data.frame} containing the info on the covariates
#' of each sample. If not provided, it is possible to upload the data during the
#' execution of the Shiny App
#' @param gene_signatures A list of vectors, one for each pathway/signature. This
#' is for example the output of the \code{\link{read_gmt}} function. The provided
#' object can also be replaced during runtime in the dedicated upload widget.
#'
#' @return A Shiny App is launched for interactive data exploration and
#' differential expression analysis
#'
#' @export
#'
#' @examples
#' # with simulated data...
#' library(DESeq2)
#' dds <- DESeq2::makeExampleDESeqDataSet(n = 100, m = 8)
#' cm <- counts(dds)
#' cd <- colData(dds)
#'
#' # with the well known airway package...
#' library(airway)
#' data(airway)
#' airway
#' dds_airway <- DESeq2::DESeqDataSetFromMatrix(assay(airway),
#' colData = colData(airway),
#' design = ~ cell + dex
#' )
#' \dontrun{
#'
#' ideal()
#' ideal(dds)
#' ideal(dds_airway)
#'
#' dds_airway <- DESeq2::DESeq(dds_airway)
#' res_airway <- DESeq2::results(dds_airway)
#' ideal(dds_airway, res_airway)
#' }
#'
ideal <- function(dds_obj = NULL,
res_obj = NULL,
annotation_obj = NULL,
countmatrix = NULL,
expdesign = NULL,
gene_signatures = NULL) {
if (!requireNamespace("shiny", quietly = TRUE)) {
stop("ideal requires 'shiny'. Please install it using
install.packages('shiny')")
}
# create environment for storing inputs and values
## i need the assignment like this to export it up one level - i.e. "globally"
ideal_env <<- new.env(parent = emptyenv())
## upload max 300mb files - can be changed if necessary
options(shiny.maxRequestSize = 300 * 1024^2)
options(shiny.launch.browser = TRUE)
## ------------------------------------------------------------------ ##
## Define UI ##
## ------------------------------------------------------------------ ##
# # components defined in separated .R files
# shinyApp(ui = ideal_ui, server = ideal_server)
# ui definition -----------------------------------------------------------
ideal_ui <- shinydashboard::dashboardPage(
title = "ideal - Interactive Differential Expression AnaLysis",
# header definition -----------------------------------------------------------
shinydashboard::dashboardHeader(
title = tags$span(
img(src = "ideal/ideal_logo_v2.png", height = "50px"),
paste0(
"ideal - Interactive Differential Expression AnaLysis ",
packageVersion("ideal")
)
),
titleWidth = 600,
# TODO:
# http://stackoverflow.com/questions/31440564/adding-a-company-logo-to-shinydashboard-header
# replace text with image
# ideal_header$children[[2]]$children <- tags$a(href='https://github.com/federicomarini/ideal',
# tags$img(src='ideal_logo_v2.png',height='50',width='200'))
# title = tags$a(href='https://github.com/federicomarini/ideal',
# tags$img(src='ideal_logo_v2.png',height='50',width='200')),
# task menu for saving state to environment or binary data
shinydashboard::dropdownMenu(
type = "tasks", icon = icon("cog"),
badgeStatus = NULL,
headerText = "ideal Tasks menu",
notificationItem(
text = actionButton("task_exit_and_save", "Exit ideal & save",
class = "btn_no_border",
onclick = "setTimeout(function(){window.close();}, 100); "
),
icon = icon("sign-out"), status = "primary"
),
menuItem(
text = downloadButton("task_state_save", "Save State as .RData")
)
)
), # end of dashboardHeader
# sidebar definition -----------------------------------------------------------
dashboardSidebar(
width = 280,
menuItem(
text = "App settings",
icon = icon("cogs"),
startExpanded = TRUE,
uiOutput("color_by"),
shinyBS::bsTooltip(
"color_by",
paste0("Select the group(s) of samples to stratify the analysis, and ideally match the contrast of interest. Can also assume multiple values, in this case the interaction of the factors is used."),
"right",
options = list(container = "body")
),
uiOutput("available_genes"),
shinyBS::bsTooltip(
"available_genes",
paste0("Select one or more features (genes) from the list to inspect. Autocompletion is provided, so you can easily find your genes of interest by started typing their names. Defaults to the row names if no annotation object is provided."),
"right",
options = list(container = "body")
),
numericInput("FDR", "False Discovery Rate", value = 0.05, min = 0, max = 1, step = 0.01),
shinyBS::bsTooltip(
"FDR",
paste0("Select the alpha level at which you would like to control the FDR (False Discovery Rate) for the set of multiple tests in your dataset. The sensible choice of 0.05 is provided as default, 0.1 is more liberal, while 0.01 is more stringent - keep in mind this does not tell anything on the effect size for the expression change."),
"right",
options = list(container = "body")
)
),
menuItem("Plot export settings",
icon = icon("paint-brush"),
startExpanded = TRUE,
numericInput("export_width", label = "Width of exported figures (cm)", value = 16, min = 2),
shinyBS::bsTooltip(
"export_width", paste0("Width of the figures to export, expressed in cm"),
"right",
options = list(container = "body")
),
numericInput("export_height", label = "Height of exported figures (cm)", value = 10, min = 2),
shinyBS::bsTooltip(
"export_height", paste0("Height of the figures to export, expressed in cm"),
"right",
options = list(container = "body")
)
),
menuItem("Quick viewer",
icon = icon("flash"),
startExpanded = TRUE,
id = "qvmenu",
fluidRow(
fluidRow(column(6, p("Count matrix")), column(6, uiOutput("ok_cm"))),
fluidRow(column(6, p("Experimental design")), column(6, uiOutput("ok_ed"))),
fluidRow(column(6, p("DESeqDataset")), column(6, uiOutput("ok_dds"))),
fluidRow(column(6, p("Annotation")), column(6, uiOutput("ok_anno"))),
fluidRow(column(6, p("Results")), column(6, uiOutput("ok_resu")))
)
),
menuItem("First steps help",
icon = icon("question-circle"),
startExpanded = TRUE,
actionButton("btn", "Click me for a quick tour", icon("info"),
style = "color: #ffffff; background-color: #0092AC; border-color: #2e6da4"
)
)
), # end of dashboardSidebar
# body definition -----------------------------------------------------------
dashboardBody(
introjsUI(),
## Define output size and style of error messages, and also the style of the icons e.g. check
## plus, define the myscrollbox div to prevent y overflow when page fills up
tags$head(
tags$style(HTML("
.shiny-output-error-validation {
font-size: 15px;
color: forestgreen;
text-align: center;
}
.icon-done {
color: green;
}
#myScrollBox{
overflow-y: scroll;
.dataTables_wrapper{
overflow-x: scroll;
}
}
#myAnchorBox{}
"))
),
# value boxes to always have an overview on the available data
fluidRow(
valueBoxOutput("box_ddsobj"),
valueBoxOutput("box_annobj"),
valueBoxOutput("box_resobj")
),
## main structure of the body for the dashboard
div(
id = "myScrollBox", # trick to have the y direction scrollable
tabBox(
width = 12,
# ui panel welcome -----------------------------------------------------------
tabPanel(
title = "Welcome!", icon = icon("home"), value = "tab-welcome",
fluidRow(
column(
width = 8,
includeMarkdown(system.file("extdata", "welcome.md", package = "ideal")),
br(), br(),
p("If you see a grey box like this one open below..."),
shinyBS::bsCollapse(
id = "help_welcome", open = "Help",
shinyBS::bsCollapsePanel(
"Help",
includeMarkdown(system.file("extdata", "help_welcome.md", package = "ideal"))
)
),
actionButton("introexample", "If you see a button like this...", icon("info"),
style = "color: #ffffff; background-color: #0092AC; border-color: #2e6da4"
),
p("... you can click on that to start a tour based on introJS"),
br(), br(),
uiOutput("ui_instructions")
)
)
), # end of Welcome panel
# ui panel data setup -----------------------------------------------------------
tabPanel(
"Data Setup",
icon = icon("upload"), # value="tab-ds",
value = "tab-datasetup",
headerPanel("Setup your data for the analysis"),
fluidRow(
column(
width = 8,
shinyBS::bsCollapse(
id = "help_datasetup", open = NULL,
shinyBS::bsCollapsePanel(
"Help",
includeMarkdown(system.file("extdata", "help_datasetup.md", package = "ideal"))
)
)
)
),
actionButton("tour_datasetup", "Click me for a quick tour of the section", icon("info"),
style = "color: #ffffff; background-color: #0092AC; border-color: #2e6da4"
), br(),
box(
width = 12,
title = "Step 1", status = "danger", solidHeader = TRUE,
h2("Upload your count matrix and the info on the experimental design"),
fluidRow(
column(
width = 4,
uiOutput("upload_count_matrix"),
uiOutput("upload_metadata"),
br(),
"... or you can also ",
actionButton("btn_loaddemo", "Load the demo airway data",
icon = icon("play-circle"),
class = "btn btn-info"
), br(), p()
),
column(
width = 4,
br(),
actionButton("help_format",
label = "", icon = icon("question-circle"),
style = "color: #0092AC; background-color: #FFFFFF; border-color: #FFFFFF"
),
shinyBS::bsTooltip(
"help_format",
"How to provide your input data to ideal",
"bottom",
options = list(container = "body")
)
)
),
fluidRow(
column(
width = 6,
box(
width = NULL, title = "Count matrix preview", status = "primary",
solidHeader = TRUE, collapsible = TRUE, collapsed = TRUE,
fluidRow(
column(
width = 12,
offset = 0.5,
DT::dataTableOutput("dt_cm")
)
)
)
),
column(
width = 6,
box(
width = NULL, title = "Experimental design preview", status = "primary",
solidHeader = TRUE, collapsible = TRUE, collapsed = TRUE,
fluidRow(
column(
width = 12,
offset = 0.5,
DT::dataTableOutput("dt_ed")
)
)
)
)
)
),
uiOutput("ui_step2"),
fluidRow(
column(
width = 6,
uiOutput("ui_stepanno")
## this ideally populates also the list of genes of interest to choose among
),
column(
width = 6,
uiOutput("ui_stepoutlier")
)
),
uiOutput("ui_step3")
), # end of Data Setup panel
# ui panel counts overview -----------------------------------------------------------
tabPanel(
"Counts Overview",
icon = icon("eye"),
conditionalPanel(
condition = "!output.checkdds",
headerPanel("Get an overview on your data"),
fluidRow(
column(
width = 8,
shinyBS::bsCollapse(
id = "help_countsoverview", open = NULL,
shinyBS::bsCollapsePanel(
"Help",
includeMarkdown(system.file("extdata", "help_overview.md", package = "ideal"))
)
)
)
),
actionButton("tour_countsoverview", "Click me for a quick tour of the section",
icon("info"),
style = "color: #ffffff; background-color: #0092AC; border-color: #2e6da4"
),
br(),
selectInput("countstable_unit",
label = "Data scale in the table",
choices = list(
"Counts (raw)" = "raw_counts",
"Counts (normalized)" = "normalized_counts",
"Variance stabilizing transformed values" = "vst_counts",
"Log10 (pseudocount of 1 added)" = "log10_counts"
)
),
DT::dataTableOutput("showcountmat"),
downloadButton("downloadData", "Download", class = "btn btn-success"),
hr(),
fluidRow(
column(
width = 8,
h3("Basic summary for the counts"),
p("Number of uniquely aligned reads assigned to each sample"),
# verbatimTextOutput("reads_summary"),
wellPanel(
fluidRow(
column(
width = 6,
numericInput("threshold_rowsums", "Threshold on the row sums of the counts", value = 0, min = 0)
),
column(
width = 6,
numericInput("threshold_rowmeans", "Threshold on the row means of the normalized counts", value = 0, min = 0)
)
)
),
p("According to the selected filtering criteria, this is an overview on the provided count data"),
verbatimTextOutput("detected_genes"),
selectInput("filter_crit",
label = "Choose the filtering criterium",
choices = c("row means", "row sums"), selected = "row means"
),
actionButton("featfilt_dds", "Filter the DDS object", class = "btn btn-primary")
)
),
h3("Sample to sample scatter plots"),
selectInput("corr_method", "Correlation method", choices = list("pearson", "spearman", "kendall")),
checkboxInput(
inputId = "corr_uselogs",
label = "Use log2 values for plot axes and values",
value = TRUE
),
checkboxInput(
inputId = "corr_usesubset",
label = "Use a subset of max 1000 genes (quicker to plot)",
value = TRUE
),
p("Compute sample to sample correlations on the normalized counts - warning, it can take a while to plot all points (depending mostly on the number of samples you provided)."),
actionButton("compute_pairwisecorr", "Run", class = "btn btn-primary"),
uiOutput("pairwise_plotUI"),
uiOutput("heatcorr_plotUI")
),
conditionalPanel(
condition = "output.checkdds",
h2("You did not create the dds object yet. Please go the main tab and generate it")
)
), # end of Counts Overview panel
# ui panel extract results -----------------------------------------------------------
tabPanel(
"Extract Results",
icon = icon("table"),
# see: http://stackoverflow.com/questions/21609436/r-shiny-conditionalpanel-output-value?noredirect=1&lq=1
conditionalPanel(
condition = "!output.checkdds",
headerPanel("Extract and inspect the DE results"),
fluidRow(
column(
width = 8,
shinyBS::bsCollapse(
id = "help_extractresults", open = NULL,
shinyBS::bsCollapsePanel(
"Help",
includeMarkdown(system.file("extdata", "help_results.md", package = "ideal"))
)
)
)
),
actionButton("tour_results", "Click me for a quick tour of the section", icon("info"),
style = "color: #ffffff; background-color: #0092AC; border-color: #2e6da4"
),
br(),
fluidRow(
column(
width = 6,
uiOutput("choose_fac")
)
),
fluidRow(
column(
width = 4,
# factor as covariate
wellPanel(
width = 4, id = "factor_opts",
uiOutput("fac1"),
uiOutput("fac2"),
# continuous covariate
uiOutput("facnum")
)
),
column(
width = 4,
# factor with > 2 levels
wellPanel(
width = 4,
uiOutput("lrtavailable"),
uiOutput("lrtfull"),
uiOutput("lrtreduced")
),
uiOutput("runlrt")
)
),
## general options for result function
# alpha is set via FDR on the left side
fluidRow(
column(
width = 4,
wellPanel(
id = "resu_opts",
selectInput("resu_indfil",
label = "Apply independent filtering automatically",
choices = c(TRUE, FALSE), selected = TRUE
),
selectInput("resu_lfcshrink",
label = "Shrink the log fold change for the contrast of interest",
choices = c(TRUE, FALSE), selected = TRUE
),
selectInput("resu_ihw", "Use Independent Hypothesis Weighting (IHW) as a filtering function",
choices = c(TRUE, FALSE), selected = FALSE
)
)
)
),
# , evtl also the *filter* parameter of the function, i.e. baseMean if not specified
fluidRow(
column(
width = 6,
uiOutput("runresults"),
uiOutput("store_result"),
verbatimTextOutput("diyres_summary")
)
),
DT::dataTableOutput("table_res"),
downloadButton("downloadTblResu", "Download", class = "btn btn-success"),
fluidRow(
h3("Diagnostic plots"),
column(
width = 6,
plotOutput("pvals_hist"),
div(
align = "right", style = "margin-right:15px; margin-bottom:10px",
downloadButton("download_plot_pvals_hist", "Download Plot"),
textInput("filename_plot_pvals_hist", label = "Save as...", value = "plot_pvals_hist.pdf")
)
),
column(
width = 6,
plotOutput("pvals_hist_strat"),
div(
align = "right", style = "margin-right:15px; margin-bottom:10px",
downloadButton("download_plot_pvals_hist_strat", "Download Plot"),
textInput("filename_plot_pvals_hist_strat", label = "Save as...", value = "plot_pvals_hist_strat.pdf")
)
),
column(
width = 6,
plotOutput("pvals_ss"),
div(
align = "right", style = "margin-right:15px; margin-bottom:10px",
downloadButton("download_plot_pvals_ss", "Download Plot"),
textInput("filename_plot_pvals_ss", label = "Save as...", value = "plot_pvals_ss.pdf")
)
),
column(
width = 6,
plotOutput("logfc_hist"),
div(
align = "right", style = "margin-right:15px; margin-bottom:10px",
downloadButton("download_plot_logfc_hist", "Download Plot"),
textInput("filename_plot_logfc_hist", label = "Save as...", value = "plot_logfc_hist.pdf")
)
)
)
),
conditionalPanel(
condition = "output.checkdds",
h2("You did not create the dds object yet. Please go the main tab and generate it")
)
), # end of Extract Results panel
# ui panel summary plots -----------------------------------------------------------
tabPanel(
"Summary Plots",
icon = icon("photo"),
conditionalPanel(
condition = "!output.checkresu",
headerPanel("Interactive graphical exploration of the results"),
fluidRow(
column(
width = 8,
shinyBS::bsCollapse(
id = "help_summaryplots", open = NULL,
shinyBS::bsCollapsePanel(
"Help",
includeMarkdown(system.file("extdata", "help_plots.md", package = "ideal"))
)
)
)
),
actionButton("tour_plots", "Click me for a quick tour of the section", icon("info"),
style = "color: #ffffff; background-color: #0092AC; border-color: #2e6da4"
),
br(),
fluidRow(
column(
6,
h4("MA plot - Interactive!"),
plotOutput("plotma", brush = "ma_brush"),
div(
align = "right", style = "margin-right:15px; margin-bottom:10px",
downloadButton("download_plot_ma", "Download Plot"),
textInput("filename_plot_ma", label = "Save as...", value = "plot_ma.pdf")
)
),
column(
6,
h4("Zoomed section"),
plotOutput("mazoom", click = "mazoom_click"),
numericInput("size_genelabels", label = "Labels size: ", value = 4, min = 1, max = 8),
div(
align = "right", style = "margin-right:15px; margin-bottom:10px",
downloadButton("download_plot_mazoom", "Download Plot"),
textInput("filename_plot_mazoom", label = "Save as...", value = "plot_mazoom.pdf")
)
)
),
fluidRow(
column(
6,
h4("Selected gene"),
checkboxInput("ylimZero_genes", "Set y axis limit to 0", value = TRUE),
plotOutput("genefinder_plot"),
div(
align = "right", style = "margin-right:15px; margin-bottom:10px",
downloadButton("download_plot_genefinder", "Download Plot"),
textInput("filename_plot_genefinder", label = "Save as...", value = "plot_genefinder.pdf")
)
),
column(
6,
h4("Gene infobox"),
htmlOutput("rentrez_infobox")
)
),
fluidRow(
column(
6,
h4("volcano plot"),
plotOutput("volcanoplot"),
div(
align = "right", style = "margin-right:15px; margin-bottom:10px",
downloadButton("download_plot_volcanoplot", "Download Plot"),
textInput("filename_plot_volcanoplot", label = "Save as...", value = "plot_volcanoplot.pdf")
)
)
),
fluidRow(radioButtons("heatmap_colv", "Cluster samples", choices = list("Yes" = TRUE, "No" = FALSE), selected = TRUE)),
fluidRow(
column(
4,
checkboxInput("rowscale", label = "Scale by rows", value = TRUE)
),
column(
4,
checkboxInput("pseudocounts", "use log2(1+counts)", value = TRUE)
)
),
fluidRow(
column(
6,
plotOutput("heatbrush"),
div(
align = "right", style = "margin-right:15px; margin-bottom:10px",
downloadButton("download_plot_heatbrush", "Download Plot"),
textInput("filename_plot_heatbrush", label = "Save as...", value = "plot_heatbrush.pdf")
)
),
column(
6,
plotlyOutput("hpi_brush")
)
),
box(
title = "Brushed table", status = "primary", solidHeader = TRUE,
id = "box_brushedtbl",
collapsible = TRUE, collapsed = TRUE, width = 12,
fluidRow(
DT::dataTableOutput("ma_brush_out"),
downloadButton("downloadTblMabrush", "Download", class = "btn btn-success")
)
)
),
conditionalPanel(
condition = "output.checkresu",
h2("You did not create the result object yet. Please go the dedicated tab and generate it")
)
), # end of Summary Plots panel
# ui panel gene finder -----------------------------------------------------------
tabPanel(
"Gene Finder",
icon = icon("crosshairs"),
conditionalPanel(
condition = "!output.checkdds",
headerPanel("Find your gene(s) of interest"),
fluidRow(
column(
width = 8,
shinyBS::bsCollapse(
id = "help_genefinder", open = NULL,
shinyBS::bsCollapsePanel(
"Help",
includeMarkdown(system.file("extdata", "help_genefinder.md", package = "ideal"))
)
)
)
),
actionButton("tour_genefinder", "Click me for a quick tour of the section", icon("info"),
style = "color: #ffffff; background-color: #0092AC; border-color: #2e6da4"
),
br(),
fluidRow(
column(6, checkboxInput("ylimZero_genefinder", "Set y axis limit to 0", value = TRUE))
),
fluidRow(
column(
6,
plotOutput("bp1"),
div(
align = "right", style = "margin-right:15px; margin-bottom:10px",
downloadButton("download_plotbp1", "Download Plot"),
textInput("filename_plotbp1", label = "Save as...", value = "plotbp1.pdf")
)
),
column(
6,
plotOutput("bp2"),
div(
align = "right", style = "margin-right:15px; margin-bottom:10px",
downloadButton("download_plotbp2", "Download Plot"),
textInput("filename_plotbp2", label = "Save as...", value = "plotbp2.pdf")
)
)
),
fluidRow(
column(
6,
plotOutput("bp3"),
div(
align = "right", style = "margin-right:15px; margin-bottom:10px",
downloadButton("download_plotbp3", "Download Plot"),
textInput("filename_plotbp3", label = "Save as...", value = "plotbp3.pdf")
)
),
column(
6,
plotOutput("bp4"),
div(
align = "right", style = "margin-right:15px; margin-bottom:10px",
downloadButton("download_plotbp4", "Download Plot"),
textInput("filename_plotbp4", label = "Save as...", value = "plotbp4.pdf")
)
)
),
fluidRow(
column(
width = 10, offset = 1,
plotOutput("ma_highlight"),
div(
align = "right", style = "margin-right:15px; margin-bottom:10px",
downloadButton("download_plot_mahighlight", "Download Plot"),
textInput("filename_plot_mahighlight", label = "Save as...", value = "plot_mahighlight.pdf")
),
DT::dataTableOutput("table_combi"),
downloadButton("downloadTblCombi", "Download", class = "btn btn-success"),
fileInput(
inputId = "gl_ma",
label = "Upload a gene list file",
accept = c(
"text/csv", "text/comma-separated-values",
"text/tab-separated-values", "text/plain",
".csv", ".tsv"
), multiple = FALSE
),
plotOutput("ma_hl_list"),
div(
align = "right", style = "margin-right:15px; margin-bottom:10px",
downloadButton("download_plot_mahllist", "Download Plot"),
textInput("filename_plot_mahllist", label = "Save as...", value = "plot_mahllist.pdf")
),
DT::dataTableOutput("table_combi_list"),
downloadButton("downloadTblCombiList", "Download", class = "btn btn-success")
)
)
),
conditionalPanel(
condition = "output.checkdds",
h2("You did not create the dds object yet. Please go the main tab and generate it")
)
), # end of Gene Finder panel
# ui panel functional analysis ----------------------------------------------------------
tabPanel(
"Functional Analysis",
icon = icon("list-alt"),
conditionalPanel(
condition = "!output.checkresu",
headerPanel("Find functions enriched in gene sets"),
fluidRow(
column(
width = 8,
shinyBS::bsCollapse(
id = "help_functionalanalysis", open = NULL,
shinyBS::bsCollapsePanel(
"Help",
includeMarkdown(system.file("extdata", "help_funcanalysis.md", package = "ideal"))
)
)
)
),
actionButton("tour_funcanalysis", "Click me for a quick tour of the section", icon("info"),
style = "color: #ffffff; background-color: #0092AC; border-color: #2e6da4"
), br(),
selectInput("go_cats",
label = "Select the GO category(ies) of interest",
choices = list("GO Biological Process" = "BP", "GO Molecular Function" = "MF", "GO Cellular Component" = "CC"),
selected = "BP", multiple = TRUE
),
div(
id = "myAnchorBox",
tabBox(
width = NULL,
id = "gse_tabbox",
tabPanel("UPregu",
icon = icon("arrow-circle-up"),
fluidRow(column(width = 6, actionButton("button_enrUP", "Perform gene set enrichment analysis on the upregulated genes", class = "btn btn-primary"))),
fluidRow(column(width = 6, actionButton("button_enrUP_goseq", "Perform gene set enrichment analysis on the upregulated genes - goseq", class = "btn btn-primary"))),
fluidRow(column(width = 6, actionButton("button_enrUP_topgo", "Perform gene set enrichment analysis on the upregulated genes - topGO", class = "btn btn-primary"))),
uiOutput("ui_DT_gse_up"),
uiOutput("ui_DT_gse_up_goseq"),
# DT::dataTableOutput("DT_gse_up"),
# DT::dataTableOutput("DT_gse_up_goseq"),
fluidRow(
column(
width = 9,
uiOutput("ui_DT_gse_up_topgo"),
# DT::dataTableOutput("DT_gse_up_topgo"),
downloadButton("downloadGOTbl_up", "Download", class = "btn btn-success")
),
column(width = 3, plotOutput("goterm_heatmap_up_topgo"))
)
),
tabPanel("DOWNregu",
icon = icon("arrow-circle-down"),
fluidRow(column(width = 6, actionButton("button_enrDOWN", "Perform gene set enrichment analysis on the downregulated genes", class = "btn btn-primary"))),
fluidRow(column(width = 6, actionButton("button_enrDOWN_goseq", "Perform gene set enrichment analysis on the downregulated genes - goseq", class = "btn btn-primary"))),
fluidRow(column(width = 6, actionButton("button_enrDOWN_topgo", "Perform gene set enrichment analysis on the downregulated genes - topGO", class = "btn btn-primary"))),
# DT::dataTableOutput("DT_gse_down"),
# DT::dataTableOutput("DT_gse_down_goseq"),
uiOutput("ui_DT_gse_down"),
uiOutput("ui_DT_gse_down_goseq"),
fluidRow(
column(
width = 9,
# DT::dataTableOutput("DT_gse_down_topgo"),
uiOutput("ui_DT_gse_down_topgo"),
downloadButton("downloadGOTbl_down", "Download", class = "btn btn-success")
),
column(width = 3, plotOutput("goterm_heatmap_down_topgo"))
)
),
tabPanel("UPDOWN",
icon = icon("arrows-v"),
fluidRow(column(width = 6, actionButton("button_enrUPDOWN", "Perform gene set enrichment analysis on the up- and downregulated genes", class = "btn btn-primary"))),
fluidRow(column(width = 6, actionButton("button_enrUPDOWN_goseq", "Perform gene set enrichment analysis on the up- and downregulated genes - goseq", class = "btn btn-primary"))),
fluidRow(column(width = 6, actionButton("button_enrUPDOWN_topgo", "Perform gene set enrichment analysis on the up- and downregulated genes - topGO", class = "btn btn-primary"))),
# DT::dataTableOutput("DT_gse_updown"),
# DT::dataTableOutput("DT_gse_updown_goseq"),
uiOutput("ui_DT_gse_updown"),
uiOutput("ui_DT_gse_updown_goseq"),
fluidRow(
column(
width = 9,
uiOutput("ui_DT_gse_updown_topgo"),
downloadButton("downloadGOTbl_updown", "Download", class = "btn btn-success")
),
column(width = 3, plotOutput("goterm_heatmap_updown_topgo"))
)
),
tabPanel("List1",
icon = icon("list"),
fileInput(
inputId = "gl1",
label = "Upload a gene list file",
accept = c(
"text/csv", "text/comma-separated-values",
"text/tab-separated-values", "text/plain",
".csv", ".tsv"
), multiple = FALSE
),
fluidRow(column(width = 6, actionButton("button_enrLIST1", "Perform gene set enrichment analysis on the genes in list1", class = "btn btn-primary"))),
fluidRow(column(width = 6, actionButton("button_enrLIST1_goseq", "Perform gene set enrichment analysis on the list1 genes - goseq", class = "btn btn-primary"))),
fluidRow(column(width = 6, actionButton("button_enrLIST1_topgo", "Perform gene set enrichment analysis on the list1 genes - topGO", class = "btn btn-primary"))),
# DT::dataTableOutput("DT_gse_list1"),
# DT::dataTableOutput("DT_gse_list1_goseq"),
uiOutput("ui_DT_gse_list1"),
uiOutput("ui_DT_gse_list1_goseq"),
fluidRow(
column(
width = 9,
# DT::dataTableOutput("DT_gse_list1_topgo"),
uiOutput("ui_DT_gse_list1_topgo"),
downloadButton("downloadGOTbl_l1", "Download", class = "btn btn-success")
),
column(width = 3, plotOutput("goterm_heatmap_l1_topgo"))
)
),
tabPanel("List2",
icon = icon("list-alt"),
fileInput(
inputId = "gl2",
label = "Upload a gene list file",
accept = c(
"text/csv", "text/comma-separated-values",
"text/tab-separated-values", "text/plain",
".csv", ".tsv"
), multiple = FALSE
),
fluidRow(column(width = 6, actionButton("button_enrLIST2", "Perform gene set enrichment analysis on the genes in list2", class = "btn btn-primary"))),
fluidRow(column(width = 6, actionButton("button_enrLIST2_goseq", "Perform gene set enrichment analysis on the list2 genes - goseq", class = "btn btn-primary"))),
fluidRow(column(width = 6, actionButton("button_enrLIST2_topgo", "Perform gene set enrichment analysis on the list2 genes - topGO", class = "btn btn-primary"))),
# DT::dataTableOutput("DT_gse_list2"),
# DT::dataTableOutput("DT_gse_list2_goseq"),
uiOutput("ui_DT_gse_list2"),
uiOutput("ui_DT_gse_list2_goseq"),
fluidRow(
column(
width = 9,
# DT::dataTableOutput("DT_gse_list2_topgo"),
uiOutput("ui_DT_gse_list2_topgo"),
downloadButton("downloadGOTbl_l2", "Download", class = "btn btn-success")
),
column(width = 3, plotOutput("goterm_heatmap_l2_topgo"))
)
)
)
),
## will put collapsible list elements? or multi tab panel? or something to select on the left, and operate output-wise on the right e.g. venn diagrams or table for gene set enrichment
# h3("custom list 3 - handpicked") # use the select input from the left column?
# ,verbatimTextOutput("debuggls"),
# verbatimTextOutput("printUPgenes"),
# verbatimTextOutput("debuglists"),
h2("Intersection of gene sets"),
fluidRow(
column(
width = 4,
checkboxInput("toggle_updown", "Use up and down regulated genes", TRUE),
checkboxInput("toggle_up", "Use up regulated genes", FALSE),
checkboxInput("toggle_down", "Use down regulated genes", FALSE)
),
column(
width = 4,
checkboxInput("toggle_list1", "Use list1 genes", TRUE),
checkboxInput("toggle_list2", "Use list2 genes", FALSE),
checkboxInput("toggle_list3", "Use list3 genes", FALSE)
)
),
fluidRow(
column(
width = 6, plotOutput("vennlists"),
div(
align = "right", style = "margin-right:15px; margin-bottom:10px",
downloadButton("download_plot_vennlists", "Download Plot"),
textInput("filename_plot_vennlists", label = "Save as...", value = "plot_vennlists.pdf")
),
offset = 3
)
),
fluidRow(
column(
width = 6, plotOutput("upsetLists"),
div(
align = "right", style = "margin-right:15px; margin-bottom:10px",
downloadButton("download_plot_upsetlists", "Download Plot"),
textInput("filename_plot_upsetlists", label = "Save as...", value = "plot_upsetlists.pdf")
),
offset = 3
)
)
),
conditionalPanel(
condition = "output.checkresu",
h2("You did not create the result object yet. Please go the dedicated tab and generate it")
)
), # end of Functional Analysis panel
# ui panel signatures explorer ---------------------------------------------------------
tabPanel(
"Signatures Explorer",
icon = icon("map"),
conditionalPanel(
condition = "!output.checkdds",
fluidRow(
column(
width = 8,
shinyBS::bsCollapse(
id = "help_signatureexplorer", open = NULL,
shinyBS::bsCollapsePanel(
"Help",
includeMarkdown(system.file("extdata", "help_signatureexplorer.md", package = "ideal"))
)
)
)
),
actionButton("tour_signatureexplorer", "Click me for a quick tour of the section", icon("info"),
style = "color: #ffffff; background-color: #0092AC; border-color: #2e6da4"
), br(),
fluidRow(
column(
width = 6,
h4("Setup options"),
wellPanel(
uiOutput("sig_ui_gmtin"),
uiOutput("sig_ui_nrsigs"),
actionButton("sig_button_computevst",
label = "Compute the variance stabilized transformed data",
icon = icon("spinner"), class = "btn btn-success"
)
)
),
column(
width = 6,
h4("Conversion options"),
wellPanel(
uiOutput("sig_ui_id_data"),
uiOutput("sig_ui_id_sigs"),
uiOutput("sig_ui_orgdbpkg"),
actionButton("sig_convert_setup",
label = "Apply id conversion between data and signatures"
)
),
verbatimTextOutput("sig_convcheck")
)
),
fluidRow(
column(
width = 6,
wellPanel(
uiOutput("sig_ui_selectsig"),
uiOutput("sig_ui_annocoldata"),
checkboxInput("sig_useDEonly",
label = "Use only DE genes in the signature", value = FALSE
)
)
# ,
# verbatimTextOutput("sig_sigmembers")
),
column(
width = 6,
wellPanel(
checkboxInput("sig_clusterrows", label = "Cluster rows", value = TRUE),
checkboxInput("sig_clustercols", label = "Cluster columns"),
checkboxInput("sig_centermean", label = "Center mean", value = TRUE),
checkboxInput("sig_scalerow", label = "Standardize by row")
)
)
),
fluidRow(
column(
width = 8, offset = 2,
plotOutput("sig_heat")
)
)
),
conditionalPanel(
condition = "output.checkdds",
h2("You did not create the dds object yet. Please go the main tab and generate it")
)
), # end of Signatures Explorer panel
# ui panel report editor -----------------------------------------------------------
tabPanel(
"Report Editor",
icon = icon("pencil"),
headerPanel("Create, view and export a report of your analysis"),
fluidRow(
column(
width = 8,
shinyBS::bsCollapse(
id = "help_reporteditor", open = NULL,
shinyBS::bsCollapsePanel(
"Help",
includeMarkdown(system.file("extdata", "help_report.md", package = "ideal"))
)
)
)
),
actionButton("tour_report", "Click me for a quick tour of the section", icon("info"),
style = "color: #ffffff; background-color: #0092AC; border-color: #2e6da4"
), br(),
fluidRow(
column(
width = 6,
box(
title = "markdown options", status = "primary", solidHeader = TRUE, collapsible = TRUE, width = 9, collapsed = TRUE,
id = "md_opts",
radioButtons("rmd_dl_format", label = "Choose Format:", c("HTML" = "html", "R Markdown" = "rmd"), inline = TRUE),
textInput("report_title", "Title: "),
textInput("report_author", "Author: "),
radioButtons("report_toc", "Table of Contents", choices = list("Yes" = "true", "No" = "false")),
radioButtons("report_ns", "Number sections", choices = list("Yes" = "true", "No" = "false")),
selectInput("report_theme", "Theme", choices = list(
"Default" = "default", "Cerulean" = "cerulean",
"Journal" = "journal", "Flatly" = "flatly",
"Readable" = "readable", "Spacelab" = "spacelab",
"United" = "united", "Cosmo" = "cosmo"
)),
radioButtons("report_echo", "Echo the commands in the output", choices = list("Yes" = "TRUE", "No" = "FALSE"))
)
),
column(
width = 6,
box(
title = "editor options", status = "primary", solidHeader = TRUE, collapsible = TRUE, width = 9, collapsed = TRUE,
id = "editor_opts",
checkboxInput("enableAutocomplete", "Enable AutoComplete", TRUE),
conditionalPanel(
"input.enableAutocomplete",
wellPanel(
checkboxInput("enableLiveCompletion", "Live auto completion", TRUE),
checkboxInput("enableRCompletion", "R code completion", TRUE)
)
),
selectInput("mode", "Mode: ", choices = shinyAce::getAceModes(), selected = "markdown"),
selectInput("theme", "Theme: ", choices = shinyAce::getAceThemes(), selected = "solarized_light")
)
)
),
fluidRow(
column(
3,
actionButton("updatepreview_button", "Update report", class = "btn btn-primary"), p()
),
column(3, downloadButton("saveRmd", "Generate & Save", class = "btn btn-success")),
column(3,
uiOutput("ui_iSEEexport"),
uiOutput("ui_GeneTonicexport"))
),
tabBox(
width = NULL,
id = "report_tabbox",
tabPanel("Report preview",
icon = icon("file-text"),
htmlOutput("knitDoc")
),
tabPanel("Edit report",
icon = icon("pencil-square-o"),
aceEditor("acereport_rmd",
mode = "markdown", theme = "solarized_light", autoComplete = "live",
value = "_Initialization of the_ `ideal` _report generation..._",
placeholder = "You can enter some code and text in R Markdown format",
height = "800px"
)
)
)
), # end of Report Editor panel
# ui panel about -----------------------------------------------------------
tabPanel(
"About",
icon = icon("institution"),
# headerPanel("Information on ideal/session"),
fluidRow(
column(
width = 8,
includeMarkdown(system.file("extdata", "about.md", package = "ideal")),
verbatimTextOutput("sessioninfo")
)
)
) # end of About panel
) # end of box
) # end of myScrollBox
, footer()
), # end of dashboardBody
skin = "black"
) # end of dashboardPage
# server definition -----------------------------------------------------------
# nocov start
ideal_server <- shinyServer(function(input, output, session) {
# server tours setup -----------------------------------------------------------
# here will go the coded - i.e. not explicitly wrapped in introBox - steps
intro_firsttour <- read.delim(system.file("extdata", "intro_firsttour.txt", package = "ideal"), sep = ";", stringsAsFactors = FALSE)
intro_datasetup <- read.delim(system.file("extdata", "intro_datasetup.txt", package = "ideal"), sep = ";", stringsAsFactors = FALSE)
intro_countsoverview <- read.delim(system.file("extdata", "intro_countsoverview.txt", package = "ideal"), sep = ";", stringsAsFactors = FALSE)
intro_results <- read.delim(system.file("extdata", "intro_results.txt", package = "ideal"), sep = ";", stringsAsFactors = FALSE)
intro_plots <- read.delim(system.file("extdata", "intro_plots.txt", package = "ideal"), sep = ";", stringsAsFactors = FALSE)
intro_genefinder <- read.delim(system.file("extdata", "intro_genefinder.txt", package = "ideal"), sep = ";", stringsAsFactors = FALSE)
intro_funcanalysis <- read.delim(system.file("extdata", "intro_funcanalysis.txt", package = "ideal"), sep = ";", stringsAsFactors = FALSE)
intro_signatureexplorer <- read.delim(system.file("extdata", "intro_signatureexplorer.txt", package = "ideal"), sep = ";", stringsAsFactors = FALSE)
intro_report <- read.delim(system.file("extdata", "intro_report.txt", package = "ideal"), sep = ";", stringsAsFactors = FALSE)
observeEvent(input$btn, {
introjs(session,
options = list(steps = intro_firsttour)
)
})
observeEvent(input$introexample, {
intro_example <- data.frame(
element = c("#introexample", "#introexample"),
intro = c(
"Tour elements can be anchored to elements of the UI that are intended to be highlighted. You can proceed to the next step by using the button, or also pushing the right arrow key.",
"Well done. This is how a tour can look like. Click outside of this window to close the tour, or on the corresponding button."
)
)
introjs(session,
options = list(steps = intro_example)
)
})
observeEvent(input$tour_datasetup, {
introjs(session,
options = list(steps = intro_datasetup)
)
})
observeEvent(input$tour_countsoverview, {
introjs(session,
options = list(steps = intro_countsoverview)
)
})
observeEvent(input$tour_results, {
introjs(session,
options = list(steps = intro_results)
)
})
observeEvent(input$tour_plots, {
introjs(session,
options = list(steps = intro_plots)
)
})
observeEvent(input$tour_genefinder, {
introjs(session,
options = list(steps = intro_genefinder)
)
})
observeEvent(input$tour_funcanalysis, {
introjs(session,
options = list(steps = intro_funcanalysis)
)
})
observeEvent(input$tour_signatureexplorer, {
introjs(session,
options = list(steps = intro_signatureexplorer)
)
})
observeEvent(input$tour_report, {
introjs(session,
options = list(steps = intro_report)
)
})
## Update directory
userdir <- tempfile()
dir.create(userdir, recursive = TRUE)
# sapply(file.path(newuserdir, dir(newuserdir)[grep("code_", dir(newuserdir))]), file.remove)
# file.copy(file.path(userdir, "code_All.R"), newuserdir)
# userdir <- newuserdir
# dir.create(file.path(userdir, "data"))
# server setup reactivevalues -----------------------------------------------------------
## placeholder for the figures to export
exportPlots <- reactiveValues()
# expfig_fig1 <- NULL
# )
# will store all the reactive values relevant to the app
values <- reactiveValues()
values$countmatrix <- countmatrix
values$expdesign <- expdesign
values$dds_obj <- dds_obj
values$res_obj <- res_obj
values$annotation_obj <- annotation_obj
values$gene_signatures <- gene_signatures
# this part sets the "matching" objects if something is provided that is depending on these
if (!is.null(dds_obj)) {
values$countmatrix <- counts(dds_obj, normalized = FALSE)
values$expdesign <- as.data.frame(colData(dds_obj))
}
# server welcome home ---------------------------------------------------------
output$ui_instructions <- renderUI({
box(
width = 12,
title = "Instructions", status = "info", solidHeader = TRUE,
collapsible = TRUE, collapsed = TRUE,
includeMarkdown(system.file("extdata", "instructions.md", package = "ideal"))
)
})
# server info boxes -----------------------------------------------------------
output$box_ddsobj <- renderUI({
if (!is.null(values$dds_obj)) {
return(valueBox("dds object",
paste0(nrow(values$dds_obj), " genes - ", ncol(values$dds_obj), " samples"),
icon = icon("list"),
color = "green", width = NULL
))
} else {
return(valueBox("dds object",
"yet to create",
icon = icon("list"),
color = "red", width = NULL
))
}
})
output$box_annobj <- renderUI({
if (!is.null(values$annotation_obj)) {
return(valueBox("Annotation",
paste0(nrow(values$annotation_obj), " genes - ", ncol(values$annotation_obj), " ID types"),
icon = icon("book"),
color = "green", width = NULL
))
} else {
return(valueBox("Annotation",
"yet to create",
icon = icon("book"),
color = "red", width = NULL
))
}
})
output$box_resobj <- renderUI({
if (!is.null(values$res_obj)) {
DEregu <- sum(values$res_obj$padj < input$FDR & values$res_obj$log2FoldChange != 0, na.rm = TRUE)
return(valueBox("DE genes",
paste0(DEregu, " DE genes - out of ", nrow(values$res_obj), ""),
icon = icon("list-alt"),
color = "green", width = NULL
))
} else {
return(valueBox("DE genes",
"yet to create",
icon = icon("list-alt"),
color = "red", width = NULL
))
}
})
# if i want to focus a little more on the ihw object
values$ihwres <- NULL
# server uploading data -----------------------------------------------------------
## count matrix
output$upload_count_matrix <- renderUI({
if (!is.null(dds_obj) | !is.null(countmatrix)) {
return(fluidRow(column(
width = 12,
tags$li("You already provided a count matrix or a DESeqDataSet object as input. You can check your input data in the collapsible box here below."), offset = 2
)))
} else {
return(fileInput(
inputId = "uploadcmfile",
label = "Upload a count matrix file",
accept = c(
"text/csv", "text/comma-separated-values",
"text/tab-separated-values", "text/plain",
".csv", ".tsv"
), multiple = FALSE
))
}
})
readCountmatrix <- reactive({
if (is.null(input$uploadcmfile)) {
return(NULL)
}
guessed_sep <- sepguesser(input$uploadcmfile$datapath)
cm <- utils::read.delim(input$uploadcmfile$datapath,
header = TRUE,
as.is = TRUE, sep = guessed_sep, quote = "",
row.names = 1, # https://github.com/federicomarini/pcaExplorer/issues/1
## TODO: tell the user to use tsv, or use heuristics
## to check what is most frequently occurring separation character? -> see sepGuesser.R
check.names = FALSE
)
return(cm)
})
## exp design
output$upload_metadata <- renderUI({
if (!is.null(dds_obj) | !is.null(expdesign)) {
return(fluidRow(column(
width = 12,
tags$li("You already provided a matrix/data.frame with the experimental covariates or a DESeqDataSet object as input. You can check your input data in the collapsible box here below."), offset = 2
)))
} else {
return(fileInput(
inputId = "uploadmetadatafile",
label = "Upload a sample metadata matrix file",
accept = c(
"text/csv", "text/comma-separated-values",
"text/tab-separated-values", "text/plain",
".csv", ".tsv"
), multiple = FALSE
))
}
})
readMetadata <- reactive({
if (is.null(input$uploadmetadatafile)) {
return(NULL)
}
guessed_sep <- sepguesser(input$uploadmetadatafile$datapath)
expdesign <- utils::read.delim(input$uploadmetadatafile$datapath,
header = TRUE,
as.is = TRUE, sep = guessed_sep, quote = "",
check.names = FALSE
)
return(expdesign)
})
# load the demo data
observeEvent(input$btn_loaddemo, withProgress(
message = "Loading demo data",
detail = "Loading airway count and metadata information",
value = 0,
{
aw <- requireNamespace("airway", quietly = TRUE)
incProgress(0.2, detail = "`airway` package loaded")
if (aw) {
data(airway, package = "airway", envir = environment())
cm_airway <- assay(airway)
incProgress(0.7, detail = "Count matrix loaded")
ed_airway <- as.data.frame(colData(airway))
values$countmatrix <- cm_airway
values$expdesign <- ed_airway
incProgress(0.3, detail = "Experimental metadata loaded")
# just to be sure, erase the annotation and the rest
values$dds_obj <- NULL
values$annotation_obj <- NULL
values$res_obj <- NULL
showNotification("All components for generating the DESeqDataset object have been loaded, proceed to Step 2!",
type = "message"
)
} else {
showNotification("The 'airway' package is currently not installed. Please do so by executing BiocManager::install('airway') before launching ideal()", type = "warning")
}
}
))
observeEvent(input$help_format, {
showModal(modalDialog(
title = "Format specifications for ideal",
includeMarkdown(system.file("extdata", "datainput.md", package = "ideal")),
h4("Example:"),
tags$img(
src = base64enc::dataURI(file = system.file("www", "help_dataformats.png", package = "pcaExplorer"), mime = "image/png"),
width = 750
),
easyClose = TRUE,
footer = NULL,
size = "l"
))
})
output$ddsdesign <- renderUI({
if (is.null(values$expdesign)) {
return(NULL)
}
poss_covars <- colnames(values$expdesign)
selectInput("dds_design",
label = "Select the design for your experiment: ",
choices = c(NULL, poss_covars), selected = NULL, multiple = TRUE
)
})
# server ui steps -----------------------------------------------------------
output$ui_step2 <- renderUI({
if (is.null(values$expdesign) | is.null(values$countmatrix)) {
return(NULL)
}
box(
width = 12, title = "Step 2", status = "warning", solidHeader = TRUE,
tagList(
# as in https://groups.google.com/forum/#!topic/shiny-discuss/qQ8yICfvDu0
h2("Select the DE design and create the DESeqDataSet object"),
fluidRow(
column(
width = 6,
uiOutput("ddsdesign"),
uiOutput("ui_diydds"),
hr(),
# uiOutput("ok_dds"),
verbatimTextOutput("debugdiy")
)
)
)
)
})
output$ui_stepanno <- renderUI({
if (is.null(values$dds_obj)) { ### and not provided already with sep annotation?
return(NULL)
}
box(
width = 12, title = "Optional Step", status = "info", solidHeader = TRUE,
tagList(
h2("Create the annotation data frame for your dataset"),
fluidRow(
column(
width = 8,
uiOutput("ui_selectspecies"),
verbatimTextOutput("speciespkg"),
uiOutput("ui_idtype"),
verbatimTextOutput("printDIYanno")
)
),
uiOutput("ui_getanno")
)
)
})
output$ui_stepoutlier <- renderUI({
if (is.null(values$dds_obj)) { ### and not provided already with sep annotation?
return(NULL)
}
box(
width = 12, title = "Optional Step", status = "info", solidHeader = TRUE,
tagList(
h2("Remove sample(s) from the current dataset - suspected outliers!"),
fluidRow(
column(
width = 8,
uiOutput("ui_selectoutliers"),
uiOutput("outliersout"),
verbatimTextOutput("printremoved")
)
)
)
)
})
output$ui_diydds <- renderUI({
if (is.null(values$expdesign) | is.null(values$countmatrix) | is.null(input$dds_design)) {
return(NULL)
}
actionButton("button_diydds", "Generate the dds object", class = "btn btn-success")
})
output$ui_getanno <- renderUI({
if (is.null(values$dds_obj)) { ### and not provided already with sep annotation?
return(NULL)
}
shiny::validate(
need(
input$speciesSelect != "",
"Select a species first in the panel"
)
)
actionButton("button_getanno", "Retrieve the gene symbol annotation for the uploaded data", class = "btn btn-primary")
})
output$ui_nrcores <- renderUI({
mincores <- 1
maxcores <- BiocParallel::multicoreWorkers()
sliderInput("nrcores",
label = "Choose how many cores to use for computing:",
min = mincores, max = maxcores, value = 1, step = 1
)
})
output$ui_step3 <- renderUI({
if (is.null(values$dds_obj)) { #
return(NULL)
}
box(
width = 12, title = "Step 3", status = "success", solidHeader = TRUE,
tagList(
h2("Run DESeq!"),
fluidRow(
column(
width = 8,
shinyBS::bsCollapse(
id = "eda_check",
shinyBS::bsCollapsePanel(
title = "Make sure you properly performed Exploratory Data Analysis (EDA) before testing for Differential Expression (DE)",
style = "info",
includeMarkdown(system.file("extdata", "help_eda.md", package = "ideal"))
)
)
)
),
fluidRow(
column(
width = 4,
uiOutput("ui_nrcores")
)
),
uiOutput("rundeseq"),
verbatimTextOutput("printDIYresults"),
uiOutput("ui_stepend")
)
)
})
output$ui_stepend <- renderUI({
if (is.null(values$dds_obj)) {
return(NULL)
}
if (!"results" %in% mcols(mcols(values$dds_obj))$type) { #
return(NULL)
}
tagList(
h2("Good to go!"),
box(
width = 6, title = "Diagnostic plot", status = "info", solidHeader = TRUE,
collapsible = TRUE, collapsed = TRUE,
plotOutput("diagno_dispests")
)
)
})
output$diagno_dispests <- renderPlot({
plotDispEsts(values$dds_obj)
})
# server ok objects -----------------------------------------------------------
output$ok_cm <- renderUI({
if (is.null(values$countmatrix)) {
return(NULL)
}
# icon("check",class = "icon-done") # this does not allow to set the size? go manually with..
tags$div(HTML('<i class="fa fa-check fa-3x icon-done"></i>'))
})
output$ok_ed <- renderUI({
if (is.null(values$expdesign)) {
return(NULL)
}
# icon("check",class = "icon-done") # this does not allow to set the size? go manually with..
tags$div(HTML('<i class="fa fa-check fa-3x icon-done"></i>'))
})
output$ok_dds <- renderUI({
if (is.null(values$dds_obj)) {
return(NULL)
}
# icon("check",class = "icon-done") # this does not allow to set the size? go manually with..
tags$div(HTML('<i class="fa fa-check fa-3x icon-done"></i>'))
})
output$ok_anno <- renderUI({
if (is.null(values$annotation_obj)) {
return(NULL)
}
# icon("check",class = "icon-done") # this does not allow to set the size? go manually with..
tags$div(HTML('<i class="fa fa-check fa-3x icon-done"></i>'))
})
output$ok_resu <- renderUI({
if (is.null(values$res_obj)) {
return(NULL)
}
# icon("check",class = "icon-done") # this does not allow to set the size? go manually with..
tags$div(HTML('<i class="fa fa-check fa-3x icon-done"></i>'))
})
output$checkdds <- reactive({
is.null(values$dds_obj)
})
output$checkresu <- reactive({
is.null(values$res_obj)
})
outputOptions(output, "checkresu", suspendWhenHidden = FALSE)
outputOptions(output, "checkdds", suspendWhenHidden = FALSE)
output$dt_cm <- DT::renderDataTable({
if (is.null(values$countmatrix)) {
return(NULL)
}
datatable(values$countmatrix, options = list(scrollX = TRUE, scrollY = "400px"))
})
output$dt_ed <- DT::renderDataTable({
if (is.null(values$expdesign)) {
return(NULL)
}
datatable(values$expdesign, options = list(scrollX = TRUE))
})
# http://stackoverflow.com/questions/17024685/how-to-use-a-character-string-in-formula
# http://stats.stackexchange.com/questions/29477/how-to-write-a-linear-model-formula-with-100-variables-in-r
# http://stackoverflow.com/questions/7666807/anova-test-fails-on-lme-fits-created-with-pasted-formula/7668846#7668846
diyDDS <- reactive({
if (is.null(values$countmatrix) | is.null(values$expdesign) | is.null(input$dds_design)) {
return(NULL)
}
dds <- DESeqDataSetFromMatrix(
countData = values$countmatrix,
colData = values$expdesign,
design = as.formula(paste0("~", paste(input$dds_design, collapse = " + ")))
)
dds <- estimateSizeFactors(dds)
return(dds)
})
observeEvent(input$button_diydds, {
if (!is.null(values$countmatrix) & !is.null(values$expdesign)) {
values$dds_obj <- diyDDS()
}
})
output$debugdiy <- renderPrint({
if (!is.null(values$dds_obj)) {
print(values$dds_obj)
print(design(values$dds_obj))
}
})
# as in http://stackoverflow.com/questions/29716868/r-shiny-how-to-get-an-reactive-data-frame-updated-each-time-pressing-an-actionb
observeEvent(input$uploadcmfile, {
values$countmatrix <- readCountmatrix()
})
observeEvent(input$uploadmetadatafile, {
values$expdesign <- readMetadata()
})
# server retrieving anno --------------------------------------------------
annoSpecies_df <-
data.frame(
species = c(
"", "Anopheles", "Arabidopsis", "Bovine", "Worm",
"Canine", "Fly", "Zebrafish", "E coli strain K12",
"E coli strain Sakai", "Chicken", "Human", "Mouse",
"Rhesus", "Malaria", "Chimp", "Rat",
"Yeast", "Streptomyces coelicolor", "Pig", "Toxoplasma gondii",
"Xenopus"
),
pkg = c(
"", "org.Ag.eg.db", "org.At.tair.db", "org.Bt.eg.db", "org.Ce.eg.db",
"org.Cf.eg.db", "org.Dm.eg.db", "org.Dr.eg.db", "org.EcK12.eg.db",
"org.EcSakai.eg.db", "org.Gg.eg.db", "org.Hs.eg.db", "org.Mm.eg.db",
"org.Mmu.eg.db", "org.Pf.plasmo.db", "org.Pt.eg.db", "org.Rn.eg.db",
"org.Sc.sgd.db", "org.Sco.eg.db", "org.Ss.eg.db", "org.Tgondii.eg.db",
"org.Xl.eg.db"
),
stringsAsFactors = FALSE
)
annoSpecies_df <- annoSpecies_df[order(annoSpecies_df$species), ]
# this one is relevant for creating links to the genes
annoSpecies_df$ensembl_db <- c(
"", "", "", "Bos_taurus", "Canis_familiaris", "Gallus_gallus", "Pan_troglodytes",
"", "", "Drosophila_melanogaster", "Homo_sapiens", "", "Mus_musculus",
"Sus_scrofa", "Rattus_norvegicus", "Macaca_mulatta", "", "", "Caenorhabditis_elegans",
"Xenopus_tropicalis", "Saccharomyces_cerevisiae", "Danio_rerio"
)
# this one is the shortcut for the limma::goana function
annoSpecies_df$species_short[grep(pattern = "eg.db", annoSpecies_df$pkg)] <- gsub(".eg.db", "", gsub("org.", "", annoSpecies_df$pkg))[grep(pattern = "eg.db", annoSpecies_df$pkg)]
# to match to the goseq genome setting
annoSpecies_df$goseq_shortcut <- c(
"", "anoGam1", "Arabidopsis", "bosTau8", "canFam3", "galGal4", "panTro4", "E. coli K12", "E. coli Sakai",
"dm6", "hg38", "Malaria", "mm10", "susScr3", "rn6", "rheMac", "", "", "ce11", "xenTro", "sacCer3", "danRer10"
)
rownames(annoSpecies_df) <- annoSpecies_df$species # easier to access afterwards
# annoSpecies_df <- annoSpecies_df[annoSpecies_df$species %in% c("","Human", "Mouse", "Rat", "Fly", "Chimp"),]
output$ui_selectspecies <- renderUI({
if (is.null(values$dds_obj)) { #
return(NULL)
}
selectInput("speciesSelect",
label = "Select the species of your samples - it will also be used for enhancing result tables",
choices = annoSpecies_df$species, selected = ""
)
})
output$ui_idtype <- renderUI({
if (is.null(values$dds_obj)) { #
return(NULL)
}
std_choices <- c("ENSEMBL", "ENTREZID", "REFSEQ", "SYMBOL")
if (input$speciesSelect != "") {
annopkg <- annoSpecies_df$pkg[annoSpecies_df$species == input$speciesSelect]
require(annopkg, character.only = TRUE)
pkg_choices <- keytypes(get(annopkg))
std_choices <- union(std_choices, pkg_choices)
}
selectInput("idtype", "select the id type in your data", choices = std_choices)
})
output$speciespkg <- renderText({
if (is.null(values$dds_obj)) { #
return(NULL)
}
shiny::validate(
need(
input$speciesSelect != "",
"Select a species - requires the corresponding annotation package"
)
)
annopkg <- annoSpecies_df$pkg[annoSpecies_df$species == input$speciesSelect]
shiny::validate(
need(
require(annopkg, character.only = TRUE),
paste0("The package ", annopkg, " is not installed/available. Try installing it with BiocManager::install('", annopkg, "')")
)
)
retmsg <- paste0(annopkg, " - package available and loaded")
# if (!require(annopkg,character.only=TRUE)) {
# stop("The package",annopkg, "is not installed/available. Try installing it with BiocManager::install() ?")
# }
retmsg <- paste0(retmsg, " - ", gsub(".eg.db", "", gsub("org.", "", annopkg)))
retmsg
})
# server outliers --------------------------------------------------------
output$ui_selectoutliers <- renderUI({
if (is.null(values$dds_obj)) {
return(NULL)
} else {
selectInput("selectoutliers", "Select the samples to remove - candidate outliers",
choices = colnames(values$dds_obj), selected = NULL, multiple = TRUE
)
}
})
output$outliersout <- renderUI({
if (is.null(values$dds_obj)) {
return(NULL)
} else {
actionButton("button_outliersout", "Recompute the dds without some samples", class = "btn btn-primary")
}
})
observeEvent(input$button_outliersout, {
withProgress(
{
allsamples <- colnames(values$dds_obj)
outliersamples <- input$selectoutliers
keptsamples <- setdiff(allsamples, outliersamples)
dds <- DESeqDataSetFromMatrix(
countData = values$countmatrix[, keptsamples],
colData = values$expdesign[keptsamples, ],
design = design(values$dds_obj)
# design=as.formula(paste0("~",paste(input$dds_design, collapse=" + ")))
)
dds <- estimateSizeFactors(dds)
# return(dds)
# re-create the dds and keep track of which samples were removed
values$removedsamples <- input$selectoutliers
curr_species <- input$speciesSelect
values$dds_obj <- dds
updateSelectInput(session, inputId = "speciesSelect", selected = curr_species)
# accordingly, reset the results
values$res_obj <- NULL
},
message = "Removing selected samples from the current dataset"
)
})
output$printremoved <- renderPrint({
print(values$removedsamples)
})
# server run deseq --------------------------------------------------------
output$rundeseq <- renderUI({
if (is.null(values$dds_obj)) {
return(NULL)
} else {
actionButton("button_rundeseq", "Run DESeq!", icon = icon("spinner"), class = "btn btn-success")
}
})
observeEvent(input$button_rundeseq, {
withProgress(
message = "Running DESeq on your data...",
detail = "This step might take a while",
value = 0,
{
# trick to keep species info while still changing the dds_obj
curr_species <- input$speciesSelect
incProgress(0.1)
if (input$nrcores == 1) {
values$dds_obj <- DESeq(values$dds_obj)
} else {
# leave open option for computing in parallel?
values$dds_obj <- DESeq(values$dds_obj,
parallel = TRUE,
BPPARAM = MulticoreParam(workers = input$nrcores)
)
}
incProgress(0.89)
updateSelectInput(session, inputId = "speciesSelect", selected = curr_species)
}
)
})
observeEvent(input$speciesSelect, {
curr_idtype <- values$cur_type
updateSelectInput(session, inputId = "idtype", selected = curr_idtype)
})
output$printDIYresults <- renderPrint({
shiny::validate(
need(
!is.null(values$dds_obj),
"Provide or construct a dds object"
)
)
shiny::validate(
need(
"results" %in% mcols(mcols(values$dds_obj))$type,
"dds object provided, but couldn't find results. you should first run DESeq() with the button up here"
)
)
summary(results(values$dds_obj), alpha = input$FDR)
})
# server counts overview --------------------------------------------------------
current_countmat <- reactive({
if (input$countstable_unit == "raw_counts") {
return(counts(values$dds_obj, normalized = FALSE))
}
if (input$countstable_unit == "normalized_counts") {
return(counts(values$dds_obj, normalized = TRUE))
}
if (input$countstable_unit == "vst_counts") {
if (is.null(values$vst_obj)) {
withProgress(
message = "Computing the variance stabilized transformed data...",
detail = "This step can take a little while",
value = 0,
{
values$vst_obj <- vst(values$dds_obj)
}
)
}
return(assay(values$vst_obj)) ## see if it is worth to keep in here or explore possibility with fast vst
}
if (input$countstable_unit == "log10_counts") {
return(log10(1 + counts(values$dds_obj, normalized = TRUE)))
}
})
output$showcountmat <- DT::renderDataTable({
datatable(current_countmat())
})
output$downloadData <- downloadHandler(
filename = function() {
paste0(input$countstable_unit, "table.csv")
},
content = function(file) {
write.csv(current_countmat(), file)
}
)
output$corrplot <- renderPlot({
if (input$compute_pairwisecorr) {
withProgress(
pair_corr(current_countmat(),
method = input$corr_method,
log = input$corr_uselogs,
use_subset = input$corr_usesubset
),
message = "Preparing the plot",
detail = "this can take a while..."
)
}
})
output$heatcorr <- renderPlot({
if (input$compute_pairwisecorr) {
pheatmap(cor(current_countmat()))
}
})
output$pairwise_plotUI <- renderUI({
if (!input$compute_pairwisecorr) {
return()
}
plotOutput("corrplot", height = "1000px")
# )
})
output$heatcorr_plotUI <- renderUI({
if (!input$compute_pairwisecorr) {
return()
}
plotOutput("heatcorr")
})
# overview on number of detected genes on different threshold types
output$detected_genes <- renderPrint({
t1 <- rowSums(counts(values$dds_obj))
t2 <- rowMeans(counts(values$dds_obj, normalized = TRUE))
thresh_rowsums <- input$threshold_rowsums
thresh_rowmeans <- input$threshold_rowmeans
abs_t1 <- sum(t1 > thresh_rowsums)
rel_t1 <- 100 * mean(t1 > thresh_rowsums)
abs_t2 <- sum(t2 > thresh_rowmeans)
rel_t2 <- 100 * mean(t2 > thresh_rowmeans)
cat("Number of detected genes:\n")
cat(abs_t1, "genes have at least a sample with more than", thresh_rowsums, "counts\n")
cat(
paste0(round(rel_t1, 3), "%"), "of the", nrow(values$dds_obj),
"genes have at least a sample with more than", thresh_rowsums, "counts\n"
)
cat(abs_t2, "genes have more than", thresh_rowmeans, "counts (normalized) on average\n")
cat(
paste0(round(rel_t2, 3), "%"), "of the", nrow(values$dds_obj),
"genes have more than", thresh_rowsums, "counts (normalized) on average\n"
)
cat("Counts are ranging from", min(counts(values$dds_obj)), "to", max(counts(values$dds_obj)))
})
observeEvent(input$featfilt_dds, {
t1 <- rowSums(counts(values$dds_obj))
t2 <- rowMeans(counts(values$dds_obj, normalized = TRUE))
thresh_rowsums <- input$threshold_rowsums
thresh_rowmeans <- input$threshold_rowmeans
if (input$filter_crit == "row sums") {
filt_dds <- values$dds_obj[t1 > thresh_rowsums, ]
} else {
filt_dds <- values$dds_obj[t2 > thresh_rowmeans, ]
}
# TODO: see if re-estimation of size factors is required
filt_dds <- estimateSizeFactors(filt_dds)
curr_species <- input$speciesSelect
values$dds_obj <- filt_dds
updateSelectInput(session, inputId = "speciesSelect", selected = curr_species)
})
# server managing gene lists --------------------------------------------------------
## gene lists upload
observeEvent(input$gl1, {
mydf <- as.data.frame(gl1(), stringsAsFactors = FALSE)
names(mydf) <- "Gene Symbol"
values$genelist1 <- mydf
})
gl1 <- reactive({
if (is.null(input$gl1)) {
# User has not uploaded a file yet
return(data.frame())
} else {
gl1 <- readLines(input$gl1$datapath)
return(gl1)
}
})
observeEvent(input$gl2, {
mydf <- as.data.frame(gl2(), stringsAsFactors = FALSE)
names(mydf) <- "Gene Symbol"
values$genelist2 <- mydf
})
gl2 <- reactive({
if (is.null(input$gl2)) {
# User has not uploaded a file yet
return(data.frame())
} else {
gl2 <- readLines(input$gl2$datapath)
return(gl2)
}
})
observeEvent(input$gl_ma, {
mydf <- as.data.frame(gl_ma(), stringsAsFactors = FALSE)
names(mydf) <- "Gene Symbol"
values$genelist_ma <- mydf
})
gl_ma <- reactive({
if (is.null(input$gl_ma)) {
# User has not uploaded a file yet
return(data.frame())
} else {
gl_ma <- readLines(input$gl_ma$datapath)
return(gl_ma)
}
})
output$debuggls <- renderPrint({
values$genelist1
# values$genelist2
})
# DE genes lists ----------------------------------------------------------
values$genelistUP <- reactive({
res_tbl <- deseqresult2DEgenes(values$res_obj, FDR = input$FDR)
res_tbl_UP <- res_tbl[res_tbl$log2FoldChange > 0 & !is.na(res_tbl$padj), ]
# res_tbl_DOWN <- res_tbl[res_tbl$log2FoldChange < 0 & !is.na(res_tbl$padj),]
if ("symbol" %in% colnames(values$res_obj)) {
if (!is.null(values$annotation_obj)) {
res_tbl_UP$symbol <- values$annotation_obj$gene_name[
match(
res_tbl_UP$id,
rownames(values$annotation_obj)
)
]
listUP <- res_tbl_UP$symbol
} else {
listUP <- NULL
}
} else {
listUP <- res_tbl_UP$symbol
}
return(listUP)
})
values$genelistDOWN <- reactive({
res_tbl <- deseqresult2DEgenes(values$res_obj, FDR = input$FDR)
# res_tbl_UP <- res_tbl[res_tbl$log2FoldChange > 0 & !is.na(res_tbl$padj),]
res_tbl_DOWN <- res_tbl[res_tbl$log2FoldChange < 0 & !is.na(res_tbl$padj), ]
if ("symbol" %in% colnames(values$res_obj)) {
if (!is.null(values$annotation_obj)) {
res_tbl_DOWN$symbol <- values$annotation_obj$gene_name[
match(
res_tbl_DOWN$id,
rownames(values$annotation_obj)
)
]
listDOWN <- res_tbl_DOWN$symbol
} else {
listDOWN <- NULL
}
} else {
listDOWN <- res_tbl_DOWN$symbol
}
return(listDOWN)
})
values$genelistUPDOWN <- reactive({
res_tbl <- deseqresult2DEgenes(values$res_obj, FDR = input$FDR)
if ("symbol" %in% colnames(values$res_obj)) {
if (!is.null(values$annotation_obj)) {
res_tbl$symbol <- values$annotation_obj$gene_name[
match(
res_tbl$id,
rownames(values$annotation_obj)
)
]
listUPDOWN <- res_tbl$symbol
} else {
listUPDOWN <- NULL
}
} else {
listUPDOWN <- res_tbl$symbol
}
return(listUPDOWN)
})
## list of gene lists
gll <- reactive({
mylist <- list(
listUP = values$genelistUP(),
listDOWN = values$genelistDOWN(),
listUPDOWN = values$genelistUPDOWN(),
list1 = as.character(values$genelist1$`Gene Symbol`),
list2 = as.character(values$genelist2$`Gene Symbol`),
list3 = NULL
) # will be changed to be the ones selected by the user
gll_nonempty <- mylist[!sapply(mylist, is.null)]
# plus, add toggles to selectively keep only some lists?
lists_tokeep <- names(mylist)[which(c(
input$toggle_up,
input$toggle_down,
input$toggle_updown,
input$toggle_list1,
input$toggle_list2,
input$toggle_list3
))]
gll_final <- gll_nonempty[match(lists_tokeep, names(gll_nonempty))]
})
output$debuglists <- renderPrint({
# length(gll_nonempty)
# length(gll())
# lapply(gll(),length)
print(gll())
})
output$vennlists <- renderPlot({
shiny::validate(
need(all(sapply(gll(), function(arg) !is.null(arg))),
message = "Some lists are empty - make sure you extracted the results using the annotation object"
)
)
gplots::venn(gll())
})
output$upsetLists <- renderPlot({
shiny::validate(
need(sum(sapply(gll(), function(arg) length(arg) > 0)) > 1,
message = "Make sure you provide at least two sets"
)
)
UpSetR::upset(fromList(gll()))
})
observeEvent(input$button_getanno, {
withProgress(
message = "Retrieving the annotation...",
detail = "Locating package",
value = 0,
{
annopkg <- annoSpecies_df$pkg[annoSpecies_df$species == input$speciesSelect]
incProgress(0.1, detail = "Matching identifiers")
tryCatch(
{
annotation_obj <- get_annotation_orgdb(values$dds_obj, orgdb_species = annopkg, idtype = input$idtype)
values$annotation_obj <- annotation_obj
# and also, set the species in the reactiveValues
values$cur_species <- input$speciesSelect
values$cur_type <- input$idtype
},
error = function(e) {
showNotification(
paste(
"Warning! The annotation object was not generated,",
"because of an error in the underlying `mapIds` function:",
"-----", e
),
type = "warning"
)
}
)
}
)
})
output$printDIYanno <- renderPrint({
print(head(values$annotation_obj))
})
output$printUPgenes <- renderPrint({
print(head(values$genelistUP()))
print(str(values$genelistUP()))
organism <- annoSpecies_df[values$cur_species, ]$species_short
backgroundgenes <- rownames(values$dds_obj)[rowSums(counts(values$dds_obj)) > 0]
inputType <- "SYMBOL" # will be replaced by input$...
# annopkg <- paste0("org.",organism,".eg.db")
annopkg <- annoSpecies_df[values$cur_species, ]$pkg
listGenesEntrez <- as.character(AnnotationDbi::mapIds(eval(parse(text = annopkg)),
keys = values$genelistUP(),
column = "ENTREZID", keytype = inputType
))
listBackgroundEntrez <- as.character(AnnotationDbi::mapIds(eval(parse(text = annopkg)),
keys = backgroundgenes,
column = "ENTREZID", keytype = input$idtype
))
# print(values$genelistUP())
print(str(listGenesEntrez))
print(class(listGenesEntrez))
print(str(listBackgroundEntrez))
print(class(listBackgroundEntrez))
print(head(listGenesEntrez))
print(head(listBackgroundEntrez))
# values$gse_up <- limma::topGO(limma::goana(listGenesEntrez, listBackgroundEntrez, species = organism),
# ontology="BP", # could be ideally replaced by input$
# number=200)
})
### UP
observeEvent(input$button_enrUP, {
withProgress(message = "Performing Gene Set Enrichment on upregulated genes...", value = 0, {
if (is.null(input$speciesSelect)) {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else if (is.null(values$genelistUP())) {
showNotification("You are using ids different than symbols, please convert them by creating/using an annotation object", type = "warning")
return(NULL)
} else if (is.null(values$cur_species) | values$cur_species == "") {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else {
organism <- annoSpecies_df[values$cur_species, ]$species_short
backgroundgenes <- rownames(values$dds_obj)[rowSums(counts(values$dds_obj)) > 0]
inputType <- "SYMBOL" # will be replaced by input$...
# annopkg <- paste0("org.",organism,".eg.db")
annopkg <- annoSpecies_df[values$cur_species, ]$pkg
if (!require(annopkg, character.only = TRUE)) {
stop("The package", annopkg, "is not installed/available. Try installing it with BiocManager::install() ?")
}
listGenesEntrez <- as.character(AnnotationDbi::mapIds(eval(parse(text = annopkg)),
keys = values$genelistUP(),
column = "ENTREZID", keytype = inputType
))
listBackgroundEntrez <- as.character(AnnotationDbi::mapIds(eval(parse(text = annopkg)),
keys = backgroundgenes,
column = "ENTREZID", keytype = input$idtype
))
incProgress(0.1, detail = "IDs mapped")
values$gse_up <- limma::topGO(limma::goana(listGenesEntrez, listBackgroundEntrez, species = organism),
ontology = input$go_cats[1],
number = 200
)
incProgress(0.7, detail = "adding gene names to GO terms") # good indicator for showing it has progressed
go_ids <- rownames(values$gse_up)
allegs_list <- lapply(go_ids, function(arg) AnnotationDbi::get(arg, get(paste0("org.", organism, ".egGO2ALLEGS"))))
genes_list <- lapply(allegs_list, function(arg) unlist(AnnotationDbi::mget(arg, get(paste0("org.", organism, ".egSYMBOL")))))
degenes <- values$genelistUP()
DEgenes_list <- lapply(genes_list, function(arg) intersect(arg, degenes))
values$gse_up$genes <- unlist(lapply(DEgenes_list, function(arg) paste(arg, collapse = ",")))
}
})
})
observeEvent(input$button_enrUP_goseq, {
withProgress(message = "GOSEQ - Performing Gene Set Enrichment on upregulated genes...", value = 0, {
if (is.null(input$speciesSelect)) {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else if (is.null(values$genelistUP())) {
showNotification("You are using ids different than symbols, please convert them by creating/using an annotation object", type = "warning")
return(NULL)
} else if (is.null(values$cur_species) | values$cur_species == "") {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else {
de.genes <- values$genelistUP() # assumed to be in symbols
assayed.genes.ids <- rownames(values$dds_obj) # as IDs, but then to be converted back
assayed.genes <- mapIds(get(annoSpecies_df[values$cur_species, ]$pkg),
keys = assayed.genes.ids,
column = "SYMBOL",
keytype = input$idtype,
multiVals = "first"
)
de.genes.ids <- mapIds(get(annoSpecies_df[values$cur_species, ]$pkg),
keys = de.genes,
column = "ENSEMBL",
keytype = "SYMBOL",
multiVals = "first"
)
incProgress(0.1, detail = "IDs mapped")
values$gse_up_goseq <- goseqTable(de.genes.ids,
assayed.genes.ids,
genome = annoSpecies_df[values$cur_species, ]$goseq_short,
id = "ensGene",
testCats = paste0("GO:", input$go_cats),
FDR_GO_cutoff = 1,
nTop = 200,
addGeneToTerms = TRUE,
orgDbPkg = annoSpecies_df[values$cur_species, ]$pkg # ,
)
incProgress(0.89)
}
})
})
observeEvent(input$button_enrUP_topgo, {
withProgress(message = "TOPGO - Performing Gene Set Enrichment on upregulated genes...", value = 0, {
if (is.null(input$speciesSelect)) {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else if (is.null(values$genelistUP())) {
showNotification("You are using ids different than symbols, please convert them by creating/using an annotation object", type = "warning")
return(NULL)
} else if (is.null(values$cur_species) | values$cur_species == "") {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else {
de_symbols <- values$genelistUP() # assumed to be in symbols
bg_ids <- rownames(values$dds_obj)[rowSums(counts(values$dds_obj)) > 0]
bg_symbols <- mapIds(get(annoSpecies_df[values$cur_species, ]$pkg),
keys = bg_ids,
column = "SYMBOL",
keytype = input$idtype,
multiVals = "first"
)
incProgress(0.1, detail = "IDs mapped")
# library(topGO)
# requireNamespace("topGO")
values$topgo_up <- pcaExplorer::topGOtable(de_symbols, bg_symbols,
ontology = input$go_cats[1],
mapping = annoSpecies_df[values$cur_species, ]$pkg,
geneID = "symbol", addGeneToTerms = TRUE
)
incProgress(0.89)
}
})
})
### DOWN
observeEvent(input$button_enrDOWN, {
withProgress(message = "Performing Gene Set Enrichment on downregulated genes...", value = 0, {
if (is.null(input$speciesSelect)) {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else if (is.null(values$genelistDOWN())) {
showNotification("You are using ids different than symbols, please convert them by creating/using an annotation object", type = "warning")
return(NULL)
} else if (is.null(values$cur_species) | values$cur_species == "") {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else {
organism <- annoSpecies_df[values$cur_species, ]$species_short
backgroundgenes <- rownames(values$dds_obj)[rowSums(counts(values$dds_obj)) > 0]
inputType <- "SYMBOL" # will be replaced by input$...
annopkg <- annoSpecies_df[values$cur_species, ]$pkg
if (!require(annopkg, character.only = TRUE)) {
stop("The package", annopkg, "is not installed/available. Try installing it with BiocManager::install() ?")
}
listGenesEntrez <- as.character(AnnotationDbi::mapIds(eval(parse(text = annopkg)),
keys = values$genelistDOWN(),
column = "ENTREZID", keytype = inputType
))
listBackgroundEntrez <- as.character(AnnotationDbi::mapIds(eval(parse(text = annopkg)),
keys = backgroundgenes,
column = "ENTREZID", keytype = input$idtype
))
incProgress(0.1, detail = "IDs mapped")
values$gse_down <- limma::topGO(limma::goana(listGenesEntrez, listBackgroundEntrez, species = organism),
ontology = input$go_cats[1],
number = 200
)
incProgress(0.7, detail = "adding gene names to GO terms") # good indicator for showing it has progressed
go_ids <- rownames(values$gse_down)
allegs_list <- lapply(go_ids, function(arg) AnnotationDbi::get(arg, get(paste0("org.", organism, ".egGO2ALLEGS"))))
genes_list <- lapply(allegs_list, function(arg) unlist(AnnotationDbi::mget(arg, get(paste0("org.", organism, ".egSYMBOL")))))
degenes <- values$genelistDOWN()
DEgenes_list <- lapply(genes_list, function(arg) intersect(arg, degenes))
values$gse_down$genes <- unlist(lapply(DEgenes_list, function(arg) paste(arg, collapse = ",")))
}
})
})
observeEvent(input$button_enrDOWN_goseq, {
withProgress(message = "GOSEQ - Performing Gene Set Enrichment on downregulated genes...", value = 0, {
if (is.null(input$speciesSelect)) {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else if (is.null(values$genelistDOWN())) {
showNotification("You are using ids different than symbols, please convert them by creating/using an annotation object", type = "warning")
return(NULL)
} else if (is.null(values$cur_species) | values$cur_species == "") {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else {
de.genes <- values$genelistDOWN() # assumed to be in symbols
assayed.genes.ids <- rownames(values$dds_obj) # as IDs, but then to be converted back
assayed.genes <- mapIds(get(annoSpecies_df[values$cur_species, ]$pkg),
keys = assayed.genes.ids,
column = "SYMBOL",
keytype = input$idtype,
multiVals = "first"
)
de.genes.ids <- mapIds(get(annoSpecies_df[values$cur_species, ]$pkg),
keys = de.genes,
column = "ENSEMBL",
keytype = "SYMBOL",
multiVals = "first"
)
incProgress(0.1, detail = "IDs mapped")
values$gse_down_goseq <- goseqTable(de.genes.ids,
assayed.genes.ids,
genome = annoSpecies_df[values$cur_species, ]$goseq_short,
id = "ensGene",
testCats = paste0("GO:", input$go_cats),
FDR_GO_cutoff = 1,
nTop = 200,
addGeneToTerms = TRUE,
orgDbPkg = annoSpecies_df[values$cur_species, ]$pkg # ,
)
incProgress(0.89)
}
})
})
observeEvent(input$button_enrDOWN_topgo, {
withProgress(message = "TOPGO - Performing Gene Set Enrichment on downregulated genes...", value = 0, {
if (is.null(input$speciesSelect)) {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else if (is.null(values$genelistDOWN())) {
showNotification("You are using ids different than symbols, please convert them by creating/using an annotation object", type = "warning")
return(NULL)
} else if (is.null(values$cur_species) | values$cur_species == "") {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else {
de_symbols <- values$genelistDOWN() # assumed to be in symbols
bg_ids <- rownames(values$dds_obj)[rowSums(counts(values$dds_obj)) > 0]
bg_symbols <- mapIds(get(annoSpecies_df[values$cur_species, ]$pkg),
keys = bg_ids,
column = "SYMBOL",
keytype = input$idtype,
multiVals = "first"
)
incProgress(0.1, detail = "IDs mapped")
# library(topGO)
# requireNamespace("topGO")
values$topgo_down <- pcaExplorer::topGOtable(de_symbols, bg_symbols,
ontology = input$go_cats[1], # will take the first ontology
mapping = annoSpecies_df[values$cur_species, ]$pkg,
geneID = "symbol", addGeneToTerms = TRUE
)
incProgress(0.89)
}
})
})
### UPDOWN
observeEvent(input$button_enrUPDOWN, {
withProgress(message = "Performing Gene Set Enrichment on up- and downregulated genes...", value = 0, {
if (is.null(input$speciesSelect)) {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else if (is.null(values$genelistUPDOWN())) {
showNotification("You are using ids different than symbols, please convert them by creating/using an annotation object", type = "warning")
return(NULL)
} else if (is.null(values$cur_species) | values$cur_species == "") {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else {
organism <- annoSpecies_df[values$cur_species, ]$species_short
backgroundgenes <- rownames(values$dds_obj)[rowSums(counts(values$dds_obj)) > 0]
inputType <- "SYMBOL" # will be replaced by input$...
# annopkg <- paste0("org.",organism,".eg.db")
annopkg <- annoSpecies_df[values$cur_species, ]$pkg
if (!require(annopkg, character.only = TRUE)) {
stop("The package", annopkg, "is not installed/available. Try installing it with BiocManager::install() ?")
}
listGenesEntrez <- as.character(AnnotationDbi::mapIds(eval(parse(text = annopkg)),
keys = values$genelistUPDOWN(),
column = "ENTREZID", keytype = inputType
))
listBackgroundEntrez <- as.character(AnnotationDbi::mapIds(eval(parse(text = annopkg)),
keys = backgroundgenes,
column = "ENTREZID", keytype = input$idtype
))
incProgress(0.1, detail = "IDs mapped")
values$gse_updown <- limma::topGO(limma::goana(listGenesEntrez, listBackgroundEntrez, species = organism),
ontology = input$go_cats[1],
number = 200
)
incProgress(0.7, detail = "adding gene names to GO terms") # good indicator for showing it has progressed
go_ids <- rownames(values$gse_updown)
allegs_list <- lapply(go_ids, function(arg) AnnotationDbi::get(arg, get(paste0("org.", organism, ".egGO2ALLEGS"))))
genes_list <- lapply(allegs_list, function(arg) unlist(AnnotationDbi::mget(arg, get(paste0("org.", organism, ".egSYMBOL")))))
degenes <- values$genelistDOWN()
DEgenes_list <- lapply(genes_list, function(arg) intersect(arg, degenes))
# values$gse_down$genes[1:20] <- DEgenes_list
# lapply(values$gse_down,class)
values$gse_updown$genes <- unlist(lapply(DEgenes_list, function(arg) paste(arg, collapse = ",")))
}
})
})
observeEvent(input$button_enrUPDOWN_goseq, {
withProgress(message = "GOSEQ - Performing Gene Set Enrichment on up and downregulated genes...", value = 0, {
if (is.null(input$speciesSelect)) {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else if (is.null(values$genelistUPDOWN())) {
showNotification("You are using ids different than symbols, please convert them by creating/using an annotation object", type = "warning")
return(NULL)
} else if (is.null(values$cur_species) | values$cur_species == "") {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else {
de.genes <- values$genelistUPDOWN() # assumed to be in symbols
assayed.genes.ids <- rownames(values$dds_obj) # as IDs, but then to be converted back
assayed.genes <- mapIds(get(annoSpecies_df[values$cur_species, ]$pkg),
keys = assayed.genes.ids,
column = "SYMBOL",
keytype = input$idtype,
multiVals = "first"
)
de.genes.ids <- mapIds(get(annoSpecies_df[values$cur_species, ]$pkg),
keys = de.genes,
column = "ENSEMBL",
keytype = "SYMBOL",
multiVals = "first"
)
incProgress(0.1, detail = "IDs mapped")
values$gse_updown_goseq <- goseqTable(de.genes.ids,
assayed.genes.ids,
genome = annoSpecies_df[values$cur_species, ]$goseq_short,
id = "ensGene",
testCats = paste0("GO:", input$go_cats),
FDR_GO_cutoff = 1,
nTop = 200,
addGeneToTerms = TRUE,
orgDbPkg = annoSpecies_df[values$cur_species, ]$pkg # ,
)
incProgress(0.89)
}
})
})
observeEvent(input$button_enrUPDOWN_topgo, {
withProgress(message = "TOPGO - Performing Gene Set Enrichment on up and downregulated genes...", value = 0, {
if (is.null(input$speciesSelect)) {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else if (is.null(values$genelistUPDOWN())) {
showNotification("You are using ids different than symbols, please convert them by creating/using an annotation object", type = "warning")
return(NULL)
} else if (is.null(values$cur_species) | values$cur_species == "") {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else {
de_symbols <- values$genelistUPDOWN() # assumed to be in symbols
bg_ids <- rownames(values$dds_obj)[rowSums(counts(values$dds_obj)) > 0]
bg_symbols <- mapIds(get(annoSpecies_df[values$cur_species, ]$pkg),
keys = bg_ids,
column = "SYMBOL",
keytype = input$idtype,
multiVals = "first"
)
incProgress(0.1, detail = "IDs mapped")
# library(topGO)
# requireNamespace("topGO")
values$topgo_updown <- pcaExplorer::topGOtable(de_symbols, bg_symbols,
ontology = input$go_cats[1],
mapping = annoSpecies_df[values$cur_species, ]$pkg,
geneID = "symbol", addGeneToTerms = TRUE
)
incProgress(0.89)
}
})
})
### LIST1
observeEvent(input$button_enrLIST1, {
withProgress(message = "Performing Gene Set Enrichment on upregulated genes...", value = 0, {
if (is.null(input$speciesSelect)) {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else if (is.null(values$cur_species) | values$cur_species == "") {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else {
organism <- annoSpecies_df[values$cur_species, ]$species_short
backgroundgenes <- rownames(values$dds_obj)[rowSums(counts(values$dds_obj)) > 0]
inputType <- "SYMBOL" # will be replaced by input$...
# annopkg <- paste0("org.",organism,".eg.db")
annopkg <- annoSpecies_df[values$cur_species, ]$pkg
if (!require(annopkg, character.only = TRUE)) {
stop("The package", annopkg, "is not installed/available. Try installing it with BiocManager::install() ?")
}
listGenesEntrez <- AnnotationDbi::mapIds(eval(parse(text = annopkg)),
keys = as.character(values$genelist1$`Gene Symbol`),
column = "ENTREZID", keytype = inputType
)
listBackgroundEntrez <- AnnotationDbi::mapIds(eval(parse(text = annopkg)),
keys = backgroundgenes,
column = "ENTREZID", keytype = input$idtype
)
incProgress(0.1, detail = "IDs mapped")
values$gse_list1 <- limma::topGO(limma::goana(listGenesEntrez, listBackgroundEntrez, species = organism),
ontology = input$go_cats[1],
number = 200
)
incProgress(0.7, detail = "adding gene names to GO terms") # good indicator for showing it has progressed
go_ids <- rownames(values$gse_list1)
allegs_list <- lapply(go_ids, function(arg) AnnotationDbi::get(arg, get(paste0("org.", organism, ".egGO2ALLEGS"))))
genes_list <- lapply(allegs_list, function(arg) unlist(AnnotationDbi::mget(arg, get(paste0("org.", organism, ".egSYMBOL")))))
degenes <- values$genelistDOWN()
DEgenes_list <- lapply(genes_list, function(arg) intersect(arg, degenes))
values$gse_list1$genes <- unlist(lapply(DEgenes_list, function(arg) paste(arg, collapse = ",")))
}
})
})
observeEvent(input$button_enrLIST1_goseq, {
withProgress(message = "GOSEQ - Performing Gene Set Enrichment on list 1 genes...", value = 0, {
if (is.null(input$speciesSelect)) {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else if (is.null(values$cur_species) | values$cur_species == "") {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else {
de.genes <- values$genelist1$`Gene Symbol` # assumed to be in symbols
assayed.genes.ids <- rownames(values$dds_obj) # as IDs, but then to be converted back
assayed.genes <- mapIds(get(annoSpecies_df[values$cur_species, ]$pkg),
keys = assayed.genes.ids,
column = "SYMBOL",
keytype = input$idtype,
multiVals = "first"
)
de.genes.ids <- mapIds(get(annoSpecies_df[values$cur_species, ]$pkg),
keys = de.genes,
column = "ENSEMBL",
keytype = "SYMBOL",
multiVals = "first"
)
incProgress(0.1, detail = "IDs mapped")
values$gse_list1_goseq <- goseqTable(de.genes.ids,
assayed.genes.ids,
genome = annoSpecies_df[values$cur_species, ]$goseq_short,
id = "ensGene",
testCats = paste0("GO:", input$go_cats),
FDR_GO_cutoff = 1,
nTop = 200,
addGeneToTerms = TRUE,
orgDbPkg = annoSpecies_df[values$cur_species, ]$pkg # ,
)
incProgress(0.89)
}
})
})
observeEvent(input$button_enrLIST1_topgo, {
withProgress(message = "TOPGO - Performing Gene Set Enrichment on list1 genes...", value = 0, {
if (is.null(input$speciesSelect)) {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else if (is.null(values$cur_species) | values$cur_species == "") {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else {
de_symbols <- values$genelist1$`Gene Symbol` # assumed to be in symbols
bg_ids <- rownames(values$dds_obj)[rowSums(counts(values$dds_obj)) > 0]
bg_symbols <- mapIds(get(annoSpecies_df[values$cur_species, ]$pkg),
keys = bg_ids,
column = "SYMBOL",
keytype = input$idtype,
multiVals = "first"
)
incProgress(0.1, detail = "IDs mapped")
# library(topGO)
# requireNamespace("topGO")
values$topgo_list1 <- pcaExplorer::topGOtable(de_symbols, bg_symbols,
ontology = input$go_cats[1],
mapping = annoSpecies_df[values$cur_species, ]$pkg,
geneID = "symbol", addGeneToTerms = TRUE
)
incProgress(0.89)
}
})
})
### LIST2
observeEvent(input$button_enrLIST2, {
withProgress(message = "Performing Gene Set Enrichment on upregulated genes...", value = 0, {
if (is.null(input$speciesSelect)) {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else if (is.null(values$cur_species) | values$cur_species == "") {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else {
organism <- annoSpecies_df[values$cur_species, ]$species_short
backgroundgenes <- rownames(values$dds_obj)[rowSums(counts(values$dds_obj)) > 0]
inputType <- "SYMBOL" # will be replaced by input$...
# annopkg <- paste0("org.",organism,".eg.db")
annopkg <- annoSpecies_df[values$cur_species, ]$pkg
if (!require(annopkg, character.only = TRUE)) {
stop("The package", annopkg, "is not installed/available. Try installing it with BiocManager::install() ?")
}
listGenesEntrez <- AnnotationDbi::mapIds(eval(parse(text = annopkg)),
keys = as.character(values$genelist2$`Gene Symbol`),
column = "ENTREZID", keytype = inputType
)
listBackgroundEntrez <- AnnotationDbi::mapIds(eval(parse(text = annopkg)),
keys = backgroundgenes,
column = "ENTREZID", keytype = input$idtype
)
incProgress(0.1, detail = "IDs mapped")
values$gse_list2 <- limma::topGO(limma::goana(listGenesEntrez, listBackgroundEntrez, species = organism),
ontology = input$go_cats[1],
number = 200
)
incProgress(0.7, detail = "adding gene names to GO terms") # good indicator for showing it has progressed
go_ids <- rownames(values$gse_list2)
allegs_list <- lapply(go_ids, function(arg) AnnotationDbi::get(arg, get(paste0("org.", organism, ".egGO2ALLEGS"))))
genes_list <- lapply(allegs_list, function(arg) unlist(AnnotationDbi::mget(arg, get(paste0("org.", organism, ".egSYMBOL")))))
degenes <- values$genelistDOWN()
DEgenes_list <- lapply(genes_list, function(arg) intersect(arg, degenes))
values$gse_list2$genes <- unlist(lapply(DEgenes_list, function(arg) paste(arg, collapse = ",")))
}
})
})
observeEvent(input$button_enrLIST2_goseq, {
withProgress(message = "GOSEQ - Performing Gene Set Enrichment on list 2 genes...", value = 0, {
if (is.null(input$speciesSelect)) {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else if (is.null(values$cur_species) | values$cur_species == "") {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else {
de.genes <- values$genelist2$`Gene Symbol` # assumed to be in symbols
assayed.genes.ids <- rownames(values$dds_obj) # as IDs, but then to be converted back
assayed.genes <- mapIds(get(annoSpecies_df[values$cur_species, ]$pkg),
keys = assayed.genes.ids,
column = "SYMBOL",
keytype = input$idtype,
multiVals = "first"
)
de.genes.ids <- mapIds(get(annoSpecies_df[values$cur_species, ]$pkg),
keys = de.genes,
column = "ENSEMBL",
keytype = "SYMBOL",
multiVals = "first"
)
incProgress(0.1, detail = "IDs mapped")
values$gse_list2_goseq <- goseqTable(de.genes.ids,
assayed.genes.ids,
genome = annoSpecies_df[values$cur_species, ]$goseq_short,
id = "ensGene",
testCats = paste0("GO:", input$go_cats),
FDR_GO_cutoff = 1,
nTop = 200,
addGeneToTerms = TRUE,
orgDbPkg = annoSpecies_df[values$cur_species, ]$pkg # ,
)
incProgress(0.89)
}
})
})
observeEvent(input$button_enrLIST2_topgo, {
withProgress(message = "TOPGO - Performing Gene Set Enrichment on list2 genes...", value = 0, {
if (is.null(input$speciesSelect)) {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else if (is.null(values$cur_species) | values$cur_species == "") {
showNotification("Please specify the species in the Data Setup panel and retrieve the annotation object", type = "warning")
return(NULL)
} else {
de_symbols <- values$genelist2$`Gene Symbol` # assumed to be in symbols
bg_ids <- rownames(values$dds_obj)[rowSums(counts(values$dds_obj)) > 0]
bg_symbols <- mapIds(get(annoSpecies_df[values$cur_species, ]$pkg),
keys = bg_ids,
column = "SYMBOL",
keytype = input$idtype,
multiVals = "first"
)
incProgress(0.1, detail = "IDs mapped")
# library(topGO)
# requireNamespace("topGO")
values$topgo_list2 <- pcaExplorer::topGOtable(de_symbols, bg_symbols,
ontology = input$go_cats[1],
mapping = annoSpecies_df[values$cur_species, ]$pkg,
geneID = "symbol", addGeneToTerms = TRUE
)
incProgress(0.89)
}
})
})
# server gse datatables --------------------------------------------------------
## ui outputs here
output$ui_DT_gse_up <- renderUI({
if (is.null(values$gse_up)) {
return(NULL)
}
return(
tagList(
h4("goana table - up"),
DT::dataTableOutput("DT_gse_up")
)
)
})
output$ui_DT_gse_down <- renderUI({
if (is.null(values$gse_down)) {
return(NULL)
}
return(
tagList(
h4("goana table - down"),
DT::dataTableOutput("DT_gse_down")
)
)
})
output$ui_DT_gse_updown <- renderUI({
if (is.null(values$gse_updown)) {
return(NULL)
}
return(
tagList(
h4("goana table - up&down"),
DT::dataTableOutput("DT_gse_updown")
)
)
})
output$ui_DT_gse_list1 <- renderUI({
if (is.null(values$gse_list1)) {
return(NULL)
}
return(
tagList(
h4("goana table - list1"),
DT::dataTableOutput("DT_gse_list1")
)
)
})
output$ui_DT_gse_list2 <- renderUI({
if (is.null(values$gse_up)) {
return(NULL)
}
return(
tagList(
h4("goana table - list2"),
DT::dataTableOutput("DT_gse_list2")
)
)
})
output$ui_DT_gse_up_topgo <- renderUI({
if (is.null(values$topgo_up)) {
return(NULL)
}
return(
tagList(
h4("topGO table - up"),
DT::dataTableOutput("DT_gse_up_topgo")
)
)
})
output$ui_DT_gse_down_topgo <- renderUI({
if (is.null(values$topgo_down)) {
return(NULL)
}
return(
tagList(
h4("topGO table - down"),
DT::dataTableOutput("DT_gse_down_topgo")
)
)
})
output$ui_DT_gse_updown_topgo <- renderUI({
if (is.null(values$topgo_updown)) {
return(NULL)
}
return(
tagList(
h4("topGO table - up&down"),
DT::dataTableOutput("DT_gse_updown_topgo")
)
)
})
output$ui_DT_gse_list1_topgo <- renderUI({
if (is.null(values$topgo_list1)) {
return(NULL)
}
return(
tagList(
h4("topGO table - list1"),
DT::dataTableOutput("DT_gse_list1_topgo")
)
)
})
output$ui_DT_gse_list2_topgo <- renderUI({
if (is.null(values$topgo_list2)) {
return(NULL)
}
return(
tagList(
h4("topGO table - list2"),
DT::dataTableOutput("DT_gse_list2_topgo")
)
)
})
output$ui_DT_gse_up_goseq <- renderUI({
if (is.null(values$gse_up_goseq)) {
return(NULL)
}
return(
tagList(
h4("goseq table - up"),
DT::dataTableOutput("DT_gse_up_goseq")
)
)
})
output$ui_DT_gse_down_goseq <- renderUI({
if (is.null(values$gse_down_goseq)) {
return(NULL)
}
return(
tagList(
h4("goseq table - down"),
DT::dataTableOutput("DT_gse_down_goseq")
)
)
})
output$ui_DT_gse_updown_goseq <- renderUI({
if (is.null(values$gse_updown_goseq)) {
return(NULL)
}
return(
tagList(
h4("goseq table - up&down"),
DT::dataTableOutput("DT_gse_updown_goseq")
)
)
})
output$ui_DT_gse_list1_goseq <- renderUI({
if (is.null(values$gse_list1_goseq)) {
return(NULL)
}
return(
tagList(
h4("goseq table - list1"),
DT::dataTableOutput("DT_gse_list1_goseq")
)
)
})
output$ui_DT_gse_list2_goseq <- renderUI({
if (is.null(values$gse_up_goseq)) {
return(NULL)
}
return(
tagList(
h4("goseq table - list2"),
DT::dataTableOutput("DT_gse_list2_goseq")
)
)
})
## actual DTs here
output$DT_gse_up <- DT::renderDataTable({
# if not null...
if (is.null(values$gse_up)) {
return(NULL)
}
mytbl <- values$gse_up
rownames(mytbl) <- createLinkGO(rownames(mytbl))
datatable(mytbl, escape = FALSE)
})
output$DT_gse_down <- DT::renderDataTable({
# if not null...
if (is.null(values$gse_down)) {
return(NULL)
}
mytbl <- values$gse_down
rownames(mytbl) <- createLinkGO(rownames(mytbl))
datatable(mytbl, escape = FALSE)
})
output$DT_gse_updown <- DT::renderDataTable({
# if not null...
if (is.null(values$gse_updown)) {
return(NULL)
}
mytbl <- values$gse_updown
rownames(mytbl) <- createLinkGO(rownames(mytbl))
datatable(mytbl, escape = FALSE)
})
output$DT_gse_list1 <- DT::renderDataTable({
# if not null...
if (is.null(values$gse_list1)) {
return(NULL)
}
mytbl <- values$gse_list1
# mytbl$GOid <- rownames(mytbl)
rownames(mytbl) <- createLinkGO(rownames(mytbl))
datatable(mytbl, escape = FALSE)
})
output$DT_gse_list2 <- DT::renderDataTable({
# if not null...
if (is.null(values$gse_list2)) {
return(NULL)
}
mytbl <- values$gse_list2
rownames(mytbl) <- createLinkGO(rownames(mytbl))
datatable(mytbl, escape = FALSE)
})
output$DT_gse_up_topgo <- DT::renderDataTable({
# if not null...
if (is.null(values$topgo_up)) {
return(NULL)
}
mytbl <- values$topgo_up
mytbl$GO.ID <- createLinkGO(mytbl$GO.ID)
DT::datatable(mytbl, escape = FALSE, selection = list(mode = "single"))
})
output$DT_gse_down_topgo <- DT::renderDataTable({
# if not null...
if (is.null(values$topgo_down)) {
return(NULL)
}
mytbl <- values$topgo_down
mytbl$GO.ID <- createLinkGO(mytbl$GO.ID)
DT::datatable(mytbl, escape = FALSE, selection = list(mode = "single"))
})
output$DT_gse_updown_topgo <- DT::renderDataTable({
# if not null...
if (is.null(values$topgo_updown)) {
return(NULL)
}
mytbl <- values$topgo_updown
mytbl$GO.ID <- createLinkGO(mytbl$GO.ID)
DT::datatable(mytbl, escape = FALSE, selection = list(mode = "single"))
})
output$DT_gse_list1_topgo <- DT::renderDataTable({
# if not null...
if (is.null(values$topgo_list1)) {
return(NULL)
}
mytbl <- values$topgo_list1
# mytbl$GOid <- rownames(mytbl)
mytbl$GO.ID <- createLinkGO(mytbl$GO.ID)
DT::datatable(mytbl, escape = FALSE, selection = list(mode = "single"))
})
output$DT_gse_list2_topgo <- DT::renderDataTable({
# if not null...
if (is.null(values$topgo_list2)) {
return(NULL)
}
mytbl <- values$topgo_list2
# mytbl$GOid <- rownames(mytbl)
mytbl$GO.ID <- createLinkGO(mytbl$GO.ID)
DT::datatable(mytbl, escape = FALSE, selection = list(mode = "single"))
})
output$DT_gse_up_goseq <- DT::renderDataTable({
# if not null...
if (is.null(values$gse_up_goseq)) {
return(NULL)
}
mytbl <- values$gse_up_goseq
mytbl$category <- createLinkGO(mytbl$category)
datatable(mytbl, escape = FALSE, rownames = FALSE)
})
output$DT_gse_down_goseq <- DT::renderDataTable({
# if not null...
if (is.null(values$gse_down_goseq)) {
return(NULL)
}
mytbl <- values$gse_down_goseq
mytbl$category <- createLinkGO(mytbl$category)
datatable(mytbl, escape = FALSE, rownames = FALSE)
})
output$DT_gse_updown_goseq <- DT::renderDataTable({
# if not null...
if (is.null(values$gse_updown_goseq)) {
return(NULL)
}
mytbl <- values$gse_updown_goseq
mytbl$category <- createLinkGO(mytbl$category)
datatable(mytbl, escape = FALSE, rownames = FALSE)
})
output$DT_gse_list1_goseq <- DT::renderDataTable({
# if not null...
if (is.null(values$gse_list1_goseq)) {
return(NULL)
}
mytbl <- values$gse_list1_goseq
# mytbl$GOid <- rownames(mytbl)
mytbl$category <- createLinkGO(mytbl$category)
datatable(mytbl, escape = FALSE, rownames = FALSE)
})
output$DT_gse_list2_goseq <- DT::renderDataTable({
# if not null...
if (is.null(values$gse_list2_goseq)) {
return(NULL)
}
mytbl <- values$gse_list2_goseq
# mytbl$GOid <- rownames(mytbl)
mytbl$category <- createLinkGO(mytbl$category)
datatable(mytbl, escape = FALSE, rownames = FALSE)
})
# server gse heatmaps --------------------------------------------------------
output$goterm_heatmap_up_topgo <- renderPlot({
s <- input$DT_gse_up_topgo_rows_selected
if (length(s) == 0) {
return(NULL)
}
# allow only one selected line
mygenes <- values$topgo_up[input$DT_gse_up_topgo_rows_selected, ]$genes[1]
myterm <- paste0(
values$topgo_up[input$DT_gse_up_topgo_rows_selected, ]$`GO.ID`, " - ",
values$topgo_up[input$DT_gse_up_topgo_rows_selected, ]$Term
)
genevec <- unlist(strsplit(mygenes, split = ","))
annopkg <- annoSpecies_df$pkg[annoSpecies_df$species == input$speciesSelect]
genevec_ids <- mapIds(eval(parse(text = annopkg)), genevec, input$idtype, "SYMBOL", multiVals = "first")
log2things <- assay(normTransform(values$dds_obj))
selectedLogvalues <- log2things[genevec_ids, ]
# check that I do not have nas or similar...
if (length(genevec_ids) == length(genevec)) {
rowlabs <- genevec
} else {
rowlabs <- genevec_ids
# rowlabs <- ifelse(, genevec, genevec_ids)
}
pheatmap(selectedLogvalues, scale = "row", labels_row = rowlabs, main = myterm)
})
output$goterm_heatmap_down_topgo <- renderPlot({
s <- input$DT_gse_down_topgo_rows_selected
if (length(s) == 0) {
return(NULL)
}
# allow only one selected line
mygenes <- values$topgo_down[input$DT_gse_down_topgo_rows_selected, ]$genes[1]
myterm <- paste0(
values$topgo_down[input$DT_gse_down_topgo_rows_selected, ]$`GO.ID`, " - ",
values$topgo_down[input$DT_gse_down_topgo_rows_selected, ]$Term
)
genevec <- unlist(strsplit(mygenes, split = ","))
annopkg <- annoSpecies_df$pkg[annoSpecies_df$species == input$speciesSelect]
genevec_ids <- mapIds(eval(parse(text = annopkg)), genevec, input$idtype, "SYMBOL", multiVals = "first")
log2things <- assay(normTransform(values$dds_obj))
selectedLogvalues <- log2things[genevec_ids, ]
# check that I do not have nas or similar...
if (length(genevec_ids) == length(genevec)) {
rowlabs <- genevec
} else {
rowlabs <- genevec_ids
# rowlabs <- ifelse(, genevec, genevec_ids)
}
pheatmap(selectedLogvalues, scale = "row", labels_row = rowlabs, main = myterm)
})
output$goterm_heatmap_updown_topgo <- renderPlot({
s <- input$DT_gse_updown_topgo_rows_selected
if (length(s) == 0) {
return(NULL)
}
values$topgo_updown[input$DT_gse_updown_topgo_rows_selected, ]$genes
# allow only one selected line
mygenes <- values$topgo_updown[input$DT_gse_updown_topgo_rows_selected, ]$genes[1]
myterm <- paste0(
values$topgo_updown[input$DT_gse_updown_topgo_rows_selected, ]$`GO.ID`, " - ",
values$topgo_updown[input$DT_gse_updown_topgo_rows_selected, ]$Term
)
genevec <- unlist(strsplit(mygenes, split = ","))
annopkg <- annoSpecies_df$pkg[annoSpecies_df$species == input$speciesSelect]
genevec_ids <- mapIds(eval(parse(text = annopkg)), genevec, input$idtype, "SYMBOL", multiVals = "first")
log2things <- assay(normTransform(values$dds_obj))
selectedLogvalues <- log2things[genevec_ids, ]
# check that I do not have nas or similar...
if (length(genevec_ids) == length(genevec)) {
rowlabs <- genevec
} else {
rowlabs <- genevec_ids
# rowlabs <- ifelse(, genevec, genevec_ids)
}
pheatmap(selectedLogvalues, scale = "row", labels_row = rowlabs, main = myterm)
})
output$goterm_heatmap_l1_topgo <- renderPlot({
s <- input$DT_gse_list1_topgo_rows_selected
if (length(s) == 0) {
return(NULL)
}
# allow only one selected line
mygenes <- values$topgo_list1[input$DT_gse_list1_topgo_rows_selected, ]$genes[1]
myterm <- paste0(
values$topgo_list1[input$DT_gse_list1_topgo_rows_selected, ]$`GO.ID`, " - ",
values$topgo_list1[input$DT_gse_list1_topgo_rows_selected, ]$Term
)
genevec <- unlist(strsplit(mygenes, split = ","))
annopkg <- annoSpecies_df$pkg[annoSpecies_df$species == input$speciesSelect]
genevec_ids <- mapIds(eval(parse(text = annopkg)), genevec, input$idtype, "SYMBOL", multiVals = "first")
log2things <- assay(normTransform(values$dds_obj))
selectedLogvalues <- log2things[genevec_ids, ]
# check that I do not have nas or similar...
if (length(genevec_ids) == length(genevec)) {
rowlabs <- genevec
} else {
rowlabs <- genevec_ids
# rowlabs <- ifelse(, genevec, genevec_ids)
}
pheatmap(selectedLogvalues, scale = "row", labels_row = rowlabs, main = myterm)
})
output$goterm_heatmap_l2_topgo <- renderPlot({
s <- input$DT_gse_list2_topgo_rows_selected
if (length(s) == 0) {
return(NULL)
}
# allow only one selected line
mygenes <- values$topgo_list2[input$DT_gse_list2_topgo_rows_selected, ]$genes[1]
myterm <- paste0(
values$topgo_list2[input$DT_gse_list2_topgo_rows_selected, ]$`GO.ID`, " - ",
values$topgo_list2[input$DT_gse_list2_topgo_rows_selected, ]$Term
)
genevec <- unlist(strsplit(mygenes, split = ","))
annopkg <- annoSpecies_df$pkg[annoSpecies_df$species == input$speciesSelect]
genevec_ids <- mapIds(eval(parse(text = annopkg)), genevec, input$idtype, "SYMBOL", multiVals = "first")
log2things <- assay(normTransform(values$dds_obj))
selectedLogvalues <- log2things[genevec_ids, ]
# check that I do not have nas or similar...
if (length(genevec_ids) == length(genevec)) {
rowlabs <- genevec
} else {
rowlabs <- genevec_ids
# rowlabs <- ifelse(, genevec, genevec_ids)
}
pheatmap(selectedLogvalues, scale = "row", labels_row = rowlabs, main = myterm)
})
# server signature explorer ------------------------------------------------------
output$sig_ui_gmtin <- renderUI({
fileInput("sig_gmtin", "gmt input file")
})
loaded_gmt <- reactive({
if (is.null(input$sig_gmtin)) {
return(NULL)
}
mysigs <- read_gmt(input$sig_gmtin$datapath)
return(mysigs)
})
observeEvent(input$sig_gmtin, {
values$gene_signatures <- loaded_gmt()
})
output$sig_ui_nrsigs <- renderUI({
if (!is.null(values$gene_signatures)) {
return(valueBox("Gene signatures",
paste0(length(values$gene_signatures), " gene signatures"),
icon = icon("list"),
color = "green", width = NULL
))
} else {
return(valueBox("Gene signatures",
"yet to be loaded",
icon = icon("list"),
color = "red", width = NULL
))
}
})
observeEvent(input$sig_button_computevst, {
withProgress(
message = "Computing the variance stabilized transformed data...",
detail = "This step can take a little while",
value = 0,
{
values$vst_obj <- vst(values$dds_obj)
}
)
})
output$sig_ui_selectsig <- renderUI({
if (!is.null(values$gene_signatures)) {
return(selectizeInput("sig_selectsig",
label = "Select the gene signature",
choices = NULL, selected = NULL, multiple = FALSE
))
} else {
return(NULL)
}
})
observe({
updateSelectizeInput(session = session, inputId = "sig_selectsig", choices = c(Choose = "", names(values$gene_signatures)), server = TRUE)
})
output$sig_sigmembers <- renderPrint({
values$gene_signatures[[input$sig_selectsig]]
})
output$sig_ui_annocoldata <- renderUI({
if (!is.null(values$dds_obj)) {
return(selectizeInput("sig_annocoldata",
label = "Select the colData to decorate",
choices = names(colData(values$dds_obj)),
selected = NULL, multiple = TRUE
))
} else {
return(NULL)
}
})
output$sig_ui_id_data <- renderUI({
if (is.null(values$dds_obj)) { #
return(NULL)
}
validate(
need(!is.null(input$speciesSelect), message = "Please specify the species in the Data Setup panel")
)
std_choices <- c("ENSEMBL", "ENTREZID", "REFSEQ", "SYMBOL")
if (input$speciesSelect != "") {
annopkg <- annoSpecies_df$pkg[annoSpecies_df$species == input$speciesSelect]
pkg_choices <- keytypes(get(annopkg))
std_choices <- union(std_choices, pkg_choices)
}
selectInput("sig_id_data", "select the id type in your dds data", choices = std_choices)
})
output$sig_ui_id_sigs <- renderUI({
if (is.null(values$gene_signatures)) { #
return(NULL)
}
validate(
need(!is.null(input$speciesSelect), message = "Please specify the species in the Data Setup panel")
)
std_choices <- c("ENSEMBL", "ENTREZID", "REFSEQ", "SYMBOL")
if (input$speciesSelect != "") {
annopkg <- annoSpecies_df$pkg[annoSpecies_df$species == input$speciesSelect]
pkg_choices <- keytypes(get(annopkg))
std_choices <- union(std_choices, pkg_choices)
}
selectInput("sig_id_sigs", "select the id type in your signatures", choices = std_choices)
})
available_orgdb <- rownames(installed.packages())[
grep(pattern = "^org.*db$", rownames(installed.packages()))
]
output$sig_ui_orgdbpkg <- renderUI({
selectInput("sig_orgdbpkg", "Select the organism package for matching",
choices = c("", available_orgdb), selected = ""
)
})
observeEvent(input$speciesSelect, {
suggested_orgdb <- annoSpecies_df$pkg[annoSpecies_df$species == input$speciesSelect]
if (suggested_orgdb %in% available_orgdb) {
updateSelectInput(session, inputId = "sig_orgdbpkg", selected = suggested_orgdb)
}
})
observeEvent(input$sig_convert_setup, {
withProgress(
message = "Matching the identifiers",
detail = "Locating package",
value = 0,
{
require(input$sig_orgdbpkg, character.only = TRUE)
incProgress(0.1, detail = "Matching identifiers")
x <- get(input$sig_orgdbpkg)
values$anno_vec <- mapIds(x, rownames(values$dds_obj),
column = input$sig_id_sigs,
keytype = input$sig_id_data
)
}
)
})
output$sig_convcheck <- renderPrint({
head(values$anno_vec)
})
output$sig_heat <- renderPlot({
validate(
need(!is.null(values$gene_signatures), message = "Please provide some gene signatures in gmt format"),
need(!is.null(values$vst_obj), message = "Compute the vst transformed data"),
need(!is.null(values$anno_vec), message = "Setup the conversion between data ids and signature ids"),
need((!is.null(values$res_obj) | !input$sig_useDEonly),
message = "Please compute the results first if you want to subset to DE genes only"
),
need(input$sig_selectsig != "", message = "Select a signature")
)
print(
sig_heatmap(
values$vst_obj,
my_signature = values$gene_signatures[[input$sig_selectsig]],
res_data = values$res_obj,
FDR = input$FDR,
de_only = input$sig_useDEonly,
annovec = values$anno_vec,
# anno_colData = colData(values$vst_obj)[,input$sig_annocoldata, drop = FALSE],
title = names(values$gene_signatures)[match(input$sig_selectsig, names(values$gene_signatures))],
cluster_rows = input$sig_clusterrows,
cluster_cols = input$sig_clustercols,
center_mean = input$sig_centermean,
scale_row = input$sig_scalerow
)
)
})
# server ui update/observers --------------------------------------------------------
output$color_by <- renderUI({
if (is.null(values$dds_obj)) {
return(NULL)
}
poss_covars <- names(colData(values$dds_obj))
selectInput("color_by",
label = "Group/color by: ",
choices = c(NULL, poss_covars), selected = NULL, multiple = TRUE
)
})
# this trick speeds up the populating of the select(ize) input widgets,
# see http://stackoverflow.com/questions/38438920/shiny-selectinput-very-slow-on-larger-data-15-000-entries-in-browser
observe({
updateSelectizeInput(
session = session,
inputId = "avail_ids",
choices = c(Choose = "", rownames(values$dds_obj)),
server = TRUE
)
})
observe({
updateSelectizeInput(
session = session,
inputId = "avail_symbols",
choices = c(Choose = "", values$annotation_obj$gene_name[match(rownames(values$dds_obj), values$annotation_obj$gene_id)]),
server = TRUE
)
})
output$available_genes <- renderUI({
if (!is.null(values$annotation_obj)) {
selectizeInput("avail_symbols",
label = "Select the gene(s) of interest",
choices = NULL, selected = NULL, multiple = TRUE
)
} else { # else use the rownames as identifiers
selectizeInput("avail_ids",
label = "Select the gene(s) of interest - ids",
choices = NULL, selected = NULL, multiple = TRUE
)
}
})
design_factors <- reactive({
rev(attributes(terms.formula(design(values$dds_obj)))$term.labels)
})
output$choose_fac <- renderUI({
selectInput("choose_expfac",
label = "Choose the experimental factor to build the contrast upon (must be in the design formula)",
choices = c("", design_factors()), selected = ""
)
})
observe({
updateSelectizeInput(session = session, inputId = "color_by", selected = input$choose_expfac)
})
# server DE results --------------------------------------------------------
# nrl <- reactive
output$lrtavailable <- renderUI({
if (is.null(values$dds_obj)) {
return(NULL)
}
shiny::validate(
need(
input$choose_expfac != "",
""
)
)
fac1 <- input$choose_expfac
nrl <- length(levels(colData(values$dds_obj)[, fac1]))
if (nrl > 2) {
p("I can perform a LRT test on the chosen factor, select the full and the reduced model")
}
})
output$lrtfull <- renderUI({
if (is.null(values$dds_obj)) {
return(NULL)
}
shiny::validate(
need(
input$choose_expfac != "",
""
)
)
fac1 <- input$choose_expfac
nrl <- length(levels(colData(values$dds_obj)[, fac1]))
if (nrl > 2) {
selectInput("choose_lrt_full",
label = "Choose the factors for the full model",
choices = c("", design_factors()), selected = "", multiple = TRUE
)
}
})
output$lrtreduced <- renderUI({
if (is.null(values$dds_obj)) {
return(NULL)
}
shiny::validate(
need(
input$choose_expfac != "",
""
)
)
fac1 <- input$choose_expfac
nrl <- length(levels(colData(values$dds_obj)[, fac1]))
if(nrl > 2) {
tagList(
selectInput("choose_lrt_reduced",label = "Choose the factor(s) for the reduced model",
choices = c("",design_factors()), selected = "", multiple = TRUE),
p("If left blank, the formula for the reduced model will be '~ 1'")
)
}
})
output$runlrt <- renderUI({
if (is.null(values$dds_obj)) {
return(NULL)
}
shiny::validate(
need(
input$choose_expfac != "",
""
)
)
fac1 <- input$choose_expfac
nrl <- length(levels(colData(values$dds_obj)[, fac1]))
if (nrl > 2) {
actionButton("button_runlrt", label = "(re)Run LRT for the dataset", class = "btn btn-primary")
}
})
observeEvent(input$button_runlrt, {
withProgress(
message = "Computing the LRT results...",
detail = "This step can take a little while",
value = 0,
{
lrt_full_model <- as.formula(paste0("~", paste(input$choose_lrt_full, collapse = " + ")))
if (!is.null(input$choose_lrt_reduced)) {
lrt_reduced_model <- as.formula(
paste0("~", paste(input$choose_lrt_reduced, collapse = " + "))
)
} else {
lrt_reduced_model <- as.formula("~1")
}
if (is.null(input$choose_lrt_reduced)) {
showNotification("Using ~1 as reduced model...", type = "message")
}
if (lrt_full_model == design(values$dds_obj)) {
values$ddslrt <- DESeq(values$dds_obj,
test = "LRT",
full = lrt_full_model,
reduced = lrt_reduced_model
)
values$res_obj <- results(values$ddslrt)
if (!is.null(values$annotation_obj)) {
values$res_obj$symbol <-
values$annotation_obj$gene_name[match(
rownames(values$res_obj),
rownames(values$annotation_obj)
)]
}
} else {
showNotification(
ui = paste0(
"The full model must be equal to the specified design of the object ",
format(design(values$dds_obj))
),
type = "warning"
)
}
}
)
})
# copy this in the report for debugging purposes or so
# # section title
#
# ```{r setup, include=FALSE}
# knitr::opts_chunk$set(echo = TRUE)
# ```
#
# ## R Markdown
#
# This is an R Markdown document. Markdown is a simple formatting syntax for authoring HTML, PDF, and MS Word documents. For more details on using R Markdown see <http://rmarkdown.rstudio.com>.
#
# When you click the **Knit** button a document will be generated that includes both content as well as the output of any embedded R code chunks within the document. You can embed an R code chunk like this:
#
# ```{r cars}
# values$reslrt
# summary(values$reslrt)
#
# deseqresult2DEgenes(values$reslrt)
# plotCounts(dds_airway_lrt,intgroup="cell",gene="ENSG00000262902")
# plotCounts(dds_airway_lrt,intgroup="cell",gene="ENSG00000123243")
# resultsNames(dds_airway_lrt)
# ```
#
#
# ```{r}
# footertemplate()
# ```
## TODO; think if we want to allow for a continuous factor in the results, if so then do something like building
# the ui elements accordingly
output$fac1 <- renderUI({
shiny::validate(
need(
input$choose_expfac != "",
"Please select an experimental factor to generate the results"
)
)
fac1 <- input$choose_expfac
fac1_vals <- colData(values$dds_obj)[, fac1]
fac1_levels <- levels(fac1_vals)
if (is.factor(colData(values$dds_obj)[, fac1])) {
selectInput("fac1_c1", "Select the name of the numerator level for the fold change", choices = c("", fac1_levels), selected = "")
}
# selectInput("fac1_c2","c2",choices = fac1_levels)
})
output$fac2 <- renderUI({
shiny::validate(
need(
input$choose_expfac != "",
""
)
)
fac1 <- input$choose_expfac
fac1_vals <- colData(values$dds_obj)[, fac1]
fac1_levels <- levels(fac1_vals)
if (is.factor(colData(values$dds_obj)[, fac1])) {
# selectInput("fac1_c1","c1",choices = fac1_levels)
selectInput("fac1_c2", "Select the name of the denominator level for the fold change (must be different from the numerator)", choices = c("", fac1_levels), selected = "")
}
})
output$facnum <- renderPrint({
shiny::validate(
need(
input$choose_expfac != "",
""
)
)
fac1 <- input$choose_expfac
fac1_vals <- colData(values$dds_obj)[, fac1]
# fac1_levels <- levels(fac1_vals)
if (class(colData(values$dds_obj)[, fac1]) %in% c("integer", "numeric")) {
print("numeric/integer factor provided")
}
# selectInput("fac1_num","num/int",choices = c("",fac1_levels), selected = "")
# selectInput("fac1_c2","c2",choices = fac1_levels)
})
output$runresults <- renderUI({
shiny::validate(
need(
input$choose_expfac != "",
"Select a factor for the contrast first"
)
)
fac1 <- input$choose_expfac
fac1_vals <- colData(values$dds_obj)[, fac1]
if (!(class(colData(values$dds_obj)[, fac1]) %in% c("integer", "numeric"))) {
shiny::validate(
need(
input$fac1_c1 != "" & input$fac1_c2 != "" & input$fac1_c1 != input$fac1_c2,
"Select two different levels of the factor for the contrast"
)
)
}
# if((class(colData(values$dds_obj)[,fac1]) %in% c("integer","numeric"))){
#
# shiny::validate(
# need(input$resu_lfcshrink==FALSE,
# "Set the Add the unshrunken MLE to FALSE")
# )
# }
shiny::validate(
need(
"results" %in% mcols(mcols(values$dds_obj))$type,
"I couldn't find results. you should first run DESeq() with the button up here"
)
)
# if(input$choose_expfac=="" | input$fac1_c1 == "" | input$fac1_c2 == "" | input$fac1_c1 == input$fac1_c2)
# return(NULL)
# else
actionButton("button_runresults", "Extract the results!", icon = icon("spinner"), class = "btn btn-success")
})
observeEvent(input$button_runresults, {
withProgress(
message = "Computing the results...",
detail = "DE table on its way!",
value = 0,
{
# handling the experimental covariate correctly to extract the results...
if (is.factor(colData(values$dds_obj)[, input$choose_expfac])) {
if (input$resu_ihw) {
values$res_obj <- results(values$dds_obj,
contrast = c(input$choose_expfac, input$fac1_c1, input$fac1_c2),
independentFiltering = input$resu_indfil,
alpha = input$FDR,
filterFun = ihw
)
if (input$resu_lfcshrink) {
incProgress(amount = 0.15, detail = "Results extracted. Shrinking the logFC now...")
values$res_obj <- lfcShrink(values$dds_obj,
contrast = c(input$choose_expfac, input$fac1_c1, input$fac1_c2),
res = values$res_obj,
type = "normal"
)
incProgress(amount = 0.8, detail = "logFC shrunken, adding annotation info...")
} else {
incProgress(amount = 0.9, detail = "logFC left unshrunken, adding annotation info...")
}
} else {
values$res_obj <- results(values$dds_obj,
contrast = c(input$choose_expfac, input$fac1_c1, input$fac1_c2),
independentFiltering = input$resu_indfil,
alpha = input$FDR
)
if (input$resu_lfcshrink) {
incProgress(amount = 0.15, detail = "Results extracted. Shrinking the logFC now...")
values$res_obj <- lfcShrink(values$dds_obj,
contrast = c(input$choose_expfac, input$fac1_c1, input$fac1_c2),
res = values$res_obj,
type = "normal"
)
incProgress(amount = 0.8, detail = "logFC shrunken, adding annotation info...")
} else {
incProgress(amount = 0.9, detail = "logFC left unshrunken, adding annotation info...")
}
}
}
if (class(colData(values$dds_obj)[, input$choose_expfac]) %in% c("integer", "numeric")) {
values$res_obj <- results(values$dds_obj,
name = input$choose_expfac,
independentFiltering = input$resu_indfil,
alpha = input$FDR
# , addMLE = input$resu_lfcshrink
)
}
# adding info from the annotation
if (!is.null(values$annotation_obj)) {
values$res_obj$symbol <- values$annotation_obj$gene_name[
match(
rownames(values$res_obj),
rownames(values$annotation_obj)
)
]
}
}
)
})
output$diyres_summary <- renderPrint({
shiny::validate(
need(
input$choose_expfac != "" & input$fac1_c1 != "" & input$fac1_c2 != "" & input$fac1_c1 != input$fac1_c2,
"Please select the factor to build the contrast upon, and two different levels to build the contrast"
)
)
shiny::validate(
need(!is.null(values$res_obj), "Parameters selected, please compute the results first")
)
# summary(results(values$dds_obj,contrast = c(input$choose_expfac, input$fac1_c1, input$fac1_c2)))
summary(values$res_obj, alpha = input$FDR)
})
output$printdds <- renderPrint({
shiny::validate(
need(
!is.null(values$dds_obj),
"Please provide a count matrix/dds object"
)
)
values$dds_obj
design(values$dds_obj)
})
output$printres <- renderPrint({
shiny::validate(
need(
!is.null(values$res_obj),
"Please provide a DESeqResults object"
)
)
print(sub(".*p-value: (.*)", "\\1", mcols(values$res_obj, use.names = TRUE)["pvalue", "description"]))
summary(values$res_obj, alpha = input$FDR) # use fdr shiny widget
})
output$store_result <- renderUI({
if (is.null(values$res_obj)) {
return(NULL)
}
actionButton("button_store_result", "Store current results", class = "btn btn-primary")
})
observeEvent(input$button_store_result, {
values$stored_res <- values$res_obj
# this is in such a way to store & compare later if some parameters are edited
})
output$table_res <- DT::renderDataTable({
if (is.null(values$res_obj)) {
return(NULL)
}
mydf <- as.data.frame(values$res_obj[order(values$res_obj$padj), ]) # [1:500,]
rownames(mydf) <- createLinkENS(rownames(mydf), species = annoSpecies_df$ensembl_db[match(input$speciesSelect, annoSpecies_df$species)]) ## TODO: check what are the species from ensembl and
## TODO: add a check to see if wanted?
mydf$symbol <- createLinkGeneSymbol(mydf$symbol)
datatable(mydf, escape = FALSE)
})
# server resu diagnostics --------------------------------------------------------
output$pvals_hist <- renderPlot({
shiny::validate(
need(!is.null(values$res_obj), message = "")
)
res_df <- as.data.frame(values$res_obj)
res_df <- dplyr::filter(res_df, !is.na(pvalue))
p <- ggplot(res_df, aes_string("pvalue")) +
geom_histogram(binwidth = 0.01, boundary = 0) +
theme_bw()
# for visual estimation of the false discovery proportion in the first bin
alpha <- binw <- input$FDR
pi0 <- 2 * mean(res_df$pvalue > 0.5)
p <- p + geom_hline(yintercept = pi0 * binw * nrow(res_df), col = "steelblue") +
geom_vline(xintercept = alpha, col = "red")
p <- p + ggtitle(
label = "p-value histogram",
subtitle = paste0(
"Expected nulls = ", pi0 * binw * nrow(res_df),
" - #elements in the selected bins = ", sum(res_df$pvalue < alpha)
)
)
exportPlots$plot_pvals_hist <- p
p
})
output$pvals_hist_strat <- renderPlot({
shiny::validate(
need(!is.null(values$res_obj), message = "")
)
res_df <- as.data.frame(values$res_obj)
res_df <- dplyr::filter(res_df, !is.na(pvalue))
res_df <- mutate(
res_df,
stratum = cut(baseMean,
include.lowest = TRUE,
breaks = signif(quantile(baseMean, probs = seq(0, 1, length.out = 10)), 2)
)
)
p <- ggplot(res_df, aes_string("pvalue")) +
geom_histogram(binwidth = 0.01, boundary = 0) +
facet_wrap(~stratum) +
theme_bw()
p <- p + ggtitle(
label = "p-value histogram",
subtitle = "stratified on the different value classes of mean expression values"
)
exportPlots$plot_pvals_hist_strat <- p
p
})
output$pvals_ss <- renderPlot({
shiny::validate(
need(!is.null(values$res_obj), message = "")
)
res_df <- as.data.frame(values$res_obj)
res_df <- dplyr::filter(res_df, !is.na(pvalue))
phi <- input$FDR
res_df <- mutate(res_df, rank = rank(pvalue))
m <- nrow(res_df)
p <- ggplot(
filter(res_df, rank <= 6000),
aes_string(x = "rank", y = "pvalue")
) +
geom_line() +
geom_abline(slope = phi / m, col = "red") +
theme_bw()
p <- p + ggtitle(
label = "Schweder-Spjotvoll plot",
subtitle = paste0(
"Intersection point at rank ", with(arrange(res_df, rank), last(which(pvalue <= phi * rank / m)))
)
)
exportPlots$plot_pvals_ss <- p
p
})
output$logfc_hist <- renderPlot({
shiny::validate(
need(!is.null(values$res_obj), message = "")
)
res_df <- as.data.frame(values$res_obj)
res_df <- dplyr::filter(res_df, !is.na(pvalue))
p <- ggplot(res_df, aes_string("log2FoldChange")) +
geom_histogram(binwidth = 0.1) +
theme_bw()
p <- p + ggtitle(
"Histogram of the log2 fold changes"
)
exportPlots$plot_logfc_hist <- p
p
})
output$dds_design <- renderPrint({
design(values$dds_obj)
})
output$res_names <- renderPrint({
resultsNames(values$dds_obj)
})
output$explore_res <- renderPrint({
expfac <- attributes(terms.formula(design(values$dds_obj)))$term.labels
expfac # plus, support up to four factors that are either there or not according to the length
})
output$plotma <- renderPlot({
p <- plot_ma(values$res_obj, annotation_obj = values$annotation_obj, FDR = input$FDR)
exportPlots$plot_ma <- p
p
})
output$mazoom <- renderPlot({
if (is.null(input$ma_brush)) {
return(ggplot() +
annotate("text", label = "click and drag to zoom in", 0, 0) +
theme_bw())
}
if (!is.null(values$annotation_obj)) {
p <- plot_ma(values$res_obj, annotation_obj = values$annotation_obj, FDR = input$FDR) +
coord_cartesian(
xlim = c(input$ma_brush$xmin, input$ma_brush$xmax),
ylim = c(input$ma_brush$ymin, input$ma_brush$ymax)
) +
geom_text(aes_string(label = "genename"), size = input$size_genelabels, hjust = 0.25, vjust = -0.75)
} else {
p <- plot_ma(values$res_obj, annotation_obj = values$annotation_obj, FDR = input$FDR) +
coord_cartesian(
xlim = c(input$ma_brush$xmin, input$ma_brush$xmax),
ylim = c(input$ma_brush$ymin, input$ma_brush$ymax)
)
}
exportPlots$plot_mazoom <- p
p
})
output$ma_highlight <- renderPlot({
shiny::validate(
need(!is.null(values$res_obj), message = "Please generate the results object to display the plot and show the combined tables")
)
if ("symbol" %in% names(values$res_obj)) {
p <- plot_ma(values$res_obj,
intgenes = input$avail_symbols, annotation_obj = values$annotation_obj, FDR = input$FDR
)
} else {
p <- plot_ma(values$res_obj,
intgenes = input$avail_ids, annotation_obj = values$annotation_obj, FDR = input$FDR
)
}
exportPlots$plot_mahighlight <- p
p
})
output$ma_hl_list <- renderPlot({
if (is.null(values$genelist_ma)) {
return(NULL)
}
if ("symbol" %in% names(values$res_obj)) {
p <- plot_ma(values$res_obj,
intgenes = values$genelist_ma$`Gene Symbol`, annotation_obj = values$annotation_obj, FDR = input$FDR
)
} else {
# plot_ma(values$res_obj,
# intgenes = values$genelist_ma,annotation_obj = values$annotation_obj)
return(NULL)
}
exportPlots$plot_mahllist <- p
p
})
curData <- reactive({
mama <- data.frame(mean = values$res_obj$baseMean, lfc = values$res_obj$log2FoldChange, padj = values$res_obj$padj, isDE = ifelse(is.na(values$res_obj$padj), FALSE, values$res_obj$padj < 0.10), ID = rownames(values$res_obj))
mama$genename <- values$annotation_obj$gene_name[match(mama$ID, rownames(values$annotation_obj))]
# mama$yesorno <- ifelse(mama$isDE,"yes","no")
mama$yesorno <- ifelse(mama$isDE, "red", "black")
mama$logmean <- log10(mama$mean) # TO ALLOW FOR BRUSHING!!
res <- brushedPoints(mama, input$ma_brush, xvar = "logmean", yvar = "lfc")
res
})
curDataClick <- reactive({
mama <- data.frame(mean = values$res_obj$baseMean, lfc = values$res_obj$log2FoldChange, padj = values$res_obj$padj, isDE = ifelse(is.na(values$res_obj$padj), FALSE, values$res_obj$padj < 0.10), ID = rownames(values$res_obj))
mama$genename <- values$annotation_obj$gene_name[match(mama$ID, rownames(values$annotation_obj))]
# mama$yesorno <- ifelse(mama$isDE,"yes","no")
mama$yesorno <- ifelse(mama$isDE, "red", "black")
mama$logmean <- log10(mama$mean) # TO ALLOW FOR BRUSHING!!
res <- nearPoints(mama, input$mazoom_click,
threshold = 20, maxpoints = 1,
addDist = TRUE
)
res
})
output$ma_brush_out <- DT::renderDataTable({
if (nrow(curData()) == 0) {
return(NULL)
}
datatable(curData(), options = list(pageLength = 100))
})
output$heatbrush <- renderPlot({
if ((is.null(input$ma_brush)) | is.null(values$dds_obj)) {
return(NULL)
}
brushedObject <- curData()
selectedGenes <- as.character(brushedObject$ID)
toplot <- assay(values$dds_obj)[selectedGenes, ]
rownames(toplot) <- values$annotation_obj$gene_name[match(rownames(toplot), rownames(values$annotation_obj))]
if (input$pseudocounts) toplot <- log2(1 + toplot)
mat_rowscale <- function(x) {
m <- apply(x, 1, mean, na.rm = TRUE)
s <- apply(x, 1, sd, na.rm = TRUE)
return((x - m) / s)
}
if (input$rowscale) toplot <- mat_rowscale(toplot)
pheatmap(toplot, cluster_cols = as.logical(input$heatmap_colv))
})
output$hpi_brush <- renderPlotly({
if ((is.null(input$ma_brush)) | is.null(values$dds_obj)) {
return(NULL)
}
brushedObject <- curData()
selectedGenes <- as.character(brushedObject$ID)
toplot <- assay(values$dds_obj)[selectedGenes, ]
rownames(toplot) <- values$annotation_obj$gene_name[match(rownames(toplot), rownames(values$annotation_obj))]
mycolss <- c("#313695", "#4575b4", "#74add1", "#abd9e9", "#e0f3f8", "#fee090", "#fdae61", "#f46d43", "#d73027", "#a50026") # to be consistent with red/blue usual coding
if (input$pseudocounts) toplot <- log2(1 + toplot)
mat_rowscale <- function(x) {
m <- apply(x, 1, mean, na.rm = TRUE)
s <- apply(x, 1, sd, na.rm = TRUE)
return((x - m) / s)
}
if (input$rowscale) toplot <- mat_rowscale(toplot)
heatmaply(toplot, Colv = as.logical(input$heatmap_colv), colors = mycolss)
})
output$deb <- renderPrint({
# curDataClick()
selectedGene <- curDataClick()$ID
# selectedGeneSymbol <- cm2$fromgtf[match(selectedGene,rownames(cm2))]
# # plotCounts(dds_cleaner,)
# genedata <- plotCounts(dds_cleaner,gene=selectedGene,intgroup = "condition",returnData = T)
# genedata
# str(as.character(selectedGene))
selectedGene
})
output$volcanoplot <- renderPlot({
p <- plot_volcano(values$res_obj, FDR = input$FDR)
exportPlots$plot_volcanoplot <- p
p
})
# server genefinder --------------------------------------------------------
output$genefinder_plot <- renderPlot({
shiny::validate(
need(
length(input$color_by) > 0,
"Select an experimental factor in the Group/color by element in the sidebar"
)
)
if (is.null(input$ma_brush)) {
return(NULL)
}
if (is.null(input$mazoom_click)) {
return(ggplot() +
annotate("text", label = "click to generate the boxplot\nfor the selected gene", 0, 0) +
theme_bw())
}
selectedGene <- as.character(curDataClick()$ID)
selectedGeneSymbol <- values$annotation_obj$gene_name[match(selectedGene, values$annotation_obj$gene_id)]
p <- ggplotCounts(values$dds_obj, selectedGene, intgroup = input$color_by, annotation_obj = values$annotation_obj)
if (input$ylimZero_genes) {
p <- p + ylim(0.1, NA)
}
exportPlots$plot_genefinder <- p
p
})
output$rentrez_infobox <- renderUI({
shiny::validate(
need(
(nrow(curDataClick()) > 0),
"Select a gene first to display additional info (retrieved from the NCBI/ENTREZ db website)"
)
)
shiny::validate(
need(
(!is.null(values$cur_species)),
"Select a species first in the Data Setup panel"
)
)
selectedGene <- as.character(curDataClick()$ID)
selgene_entrez <- mapIds(
get(annoSpecies_df[values$cur_species, ]$pkg),
selectedGene, "ENTREZID", input$idtype
)
fullinfo <- geneinfo(selgene_entrez)
## TODO: build up link manually to paste under the info!
#
link_pubmed <- paste0(
'<a href="http://www.ncbi.nlm.nih.gov/gene/?term=',
selgene_entrez,
'" target="_blank" >Click here to see more at NCBI</a>'
)
if (fullinfo$summary == "") {
return(HTML(paste0(
"<b>", fullinfo$name, "</b><br/><br/>",
fullinfo$description, "<br/><br/>",
link_pubmed
)))
} else {
return(HTML(paste0(
"<b>", fullinfo$name, "</b><br/><br/>",
fullinfo$description, "<br/><br/>",
fullinfo$summary, "<br/><br/>",
link_pubmed
)))
}
})
cur_combires <- reactive({
if (is.null(values$res_obj)) {
return(NULL)
}
normCounts <- as.data.frame(counts(estimateSizeFactors(values$dds_obj), normalized = TRUE))
normCounts$id <- rownames(normCounts)
res_df <- deseqresult2tbl(values$res_obj)
combi_obj <- dplyr::inner_join(res_df, normCounts, by = "id")
combi_obj$symbol <- values$annotation_obj$gene_name[match(combi_obj$id, values$annotation_obj$gene_id)]
if ("symbol" %in% names(values$res_obj)) {
sel_genes <- input$avail_symbols
sel_genes_ids <- values$annotation_obj$gene_id[match(sel_genes, values$annotation_obj$gene_name)]
} else {
sel_genes_ids <- input$avail_ids
}
if (length(sel_genes_ids) > 0) {
combi_obj[match(sel_genes_ids, combi_obj$id), ]
} else {
combi_obj
}
})
output$table_combi <- DT::renderDataTable({
datatable(cur_combires(), options = list(scrollX = TRUE))
})
cur_combires_list <- reactive({
if (is.null(values$res_obj)) {
return(NULL)
}
normCounts <- as.data.frame(counts(estimateSizeFactors(values$dds_obj), normalized = TRUE))
normCounts$id <- rownames(normCounts)
res_df <- deseqresult2tbl(values$res_obj)
combi_obj <- dplyr::inner_join(res_df, normCounts, by = "id")
combi_obj$symbol <- values$annotation_obj$gene_name[match(combi_obj$id, values$annotation_obj$gene_id)]
if ("symbol" %in% names(values$res_obj)) {
sel_genes <- values$genelist_ma$`Gene Symbol`
sel_genes_ids <- values$annotation_obj$gene_id[match(sel_genes, values$annotation_obj$gene_name)]
} else {
# sel_genes_ids <- values$genelist_ma$`Gene Symbol`
}
if (length(sel_genes_ids) > 0) {
combi_obj[match(sel_genes_ids, combi_obj$id), ]
} else {
combi_obj
}
})
output$table_combi_list <- DT::renderDataTable({
if (is.null(values$genelist_ma)) {
return(NULL)
}
datatable(cur_combires_list(), options = list(scrollX = TRUE))
})
output$bp1 <- renderPlot({
shiny::validate(
need(
length(input$color_by) > 0,
"Select an experimental factor in the Group/color by element in the sidebar"
)
)
shiny::validate(
need(
(length(input$avail_symbols) > 0 | length(input$avail_ids) > 0),
"Select at least a gene to plot"
)
)
if (length(input$avail_symbols) > 0) {
# got the symbol, look for the id
mysym <- input$avail_symbols[1]
myid <- values$annotation_obj$gene_id[match(mysym, values$annotation_obj$gene_name)]
} else {
myid <- input$avail_ids[1]
# make it optional if annot is available
if (!is.null(values$annotation_obj)) {
mysim <- values$annotation_obj$gene_name[match(myid, values$annotation_obj$gene_id)]
} else {
mysim <- ""
}
}
p <- ggplotCounts(values$dds_obj, myid, intgroup = input$color_by, annotation_obj = values$annotation_obj)
if (input$ylimZero_genefinder) {
p <- p + ylim(0.1, NA)
}
exportPlots$plotbp1 <- p
p
})
output$bp2 <- renderPlot({
shiny::validate(
need(
length(input$color_by) > 0,
"Select an experimental factor in the Group/color by element in the sidebar"
)
)
shiny::validate(
need(
(length(input$avail_symbols) > 1 | length(input$avail_ids) > 1),
"Select at least a second gene to plot"
)
)
if (length(input$avail_symbols) > 0) {
# got the symbol, look for the id
mysym <- input$avail_symbols[2]
myid <- values$annotation_obj$gene_id[match(mysym, values$annotation_obj$gene_name)]
} else {
myid <- input$avail_ids[2]
# make it optional if annot is available
if (!is.null(values$annotation_obj)) {
mysim <- values$annotation_obj$gene_name[match(myid, values$annotation_obj$gene_id)]
} else {
mysim <- ""
}
}
p <- ggplotCounts(values$dds_obj, myid, intgroup = input$color_by, annotation_obj = values$annotation_obj)
if (input$ylimZero_genefinder) {
p <- p + ylim(0.1, NA)
}
exportPlots$plotbp2 <- p
p
})
output$bp3 <- renderPlot({
shiny::validate(
need(
length(input$color_by) > 0,
"Select an experimental factor in the Group/color by element in the sidebar"
)
)
shiny::validate(
need(
(length(input$avail_symbols) > 2 | length(input$avail_ids) > 2),
"Select at least a third gene to plot"
)
)
if (length(input$avail_symbols) > 0) {
# got the symbol, look for the id
mysym <- input$avail_symbols[3]
myid <- values$annotation_obj$gene_id[match(mysym, values$annotation_obj$gene_name)]
} else {
myid <- input$avail_ids[3]
# make it optional if annot is available
if (!is.null(values$annotation_obj)) {
mysim <- values$annotation_obj$gene_name[match(myid, values$annotation_obj$gene_id)]
} else {
mysim <- ""
}
}
p <- ggplotCounts(values$dds_obj, myid, intgroup = input$color_by, annotation_obj = values$annotation_obj)
if (input$ylimZero_genefinder) {
p <- p + ylim(0.1, NA)
}
exportPlots$plotbp3 <- p
p
})
output$bp4 <- renderPlot({
shiny::validate(
need(
length(input$color_by) > 0,
"Select an experimental factor in the Group/color by element in the sidebar"
)
)
shiny::validate(
need(
(length(input$avail_symbols) > 3 | length(input$avail_ids) > 3),
"Select at least a fourth gene to plot"
)
)
if (length(input$avail_symbols) > 0) {
# got the symbol, look for the id
mysym <- input$avail_symbols[4]
myid <- values$annotation_obj$gene_id[match(mysym, values$annotation_obj$gene_name)]
} else {
myid <- input$avail_ids[4]
# make it optional if annot is available
if (!is.null(values$annotation_obj)) {
mysim <- values$annotation_obj$gene_name[match(myid, values$annotation_obj$gene_id)]
} else {
mysim <- ""
}
}
p <- ggplotCounts(values$dds_obj, myid, intgroup = input$color_by, annotation_obj = values$annotation_obj)
if (input$ylimZero_genefinder) {
p <- p + ylim(0.1, NA)
}
exportPlots$plotbp4 <- p
p
})
# server report editor --------------------------------------------------------
### yaml generation
rmd_yaml <- reactive({
paste0("---",
"\ntitle: '", input$report_title,
"'\nauthor: '", input$report_author,
"'\ndate: '", Sys.Date(),
"'\noutput:\n html_document:\n toc: ", input$report_toc, "\n number_sections: ", input$report_ns, "\n theme: ", input$report_theme, "\n---\n\n",
collapse = "\n"
)
})
# rmd_full <- reactive({
# paste0(rmd_yaml(),"\n",
# readLines("reportTemplate.Rmd"))
# })
# output$loadedRmd <- renderPrint({
# # rmd_yaml() # or rmd_full()
# paste0(
# # rmd_yaml(),
# paste0(readLines("reportTemplate.Rmd"),collapse = "\n"))
# # head(paste0(rmd_yaml(),
# # readLines("reportTemplate.Rmd")),collapse="\n")
# })
### loading report template
# update aceEditor module
observe({
# loading rmd report from disk
inFile <- system.file("extdata", "irt.Rmd", package = "ideal")
isolate({
if (!is.null(inFile) && !is.na(inFile)) {
rmdfilecontent <- paste0(readLines(inFile), collapse = "\n")
shinyAce::updateAceEditor(session, "acereport_rmd", value = rmdfilecontent)
}
})
})
### ace editor options
observe({
autoComplete <- if (input$enableAutocomplete) {
if (input$enableLiveCompletion) "live" else "enabled"
} else {
"disabled"
}
updateAceEditor(session, "acereport_rmd", autoComplete = autoComplete, theme = input$theme, mode = input$mode)
# updateAceEditor(session, "plot", autoComplete = autoComplete)
})
# Enable/Disable R code completion
rmdOb <- aceAutocomplete("acereport_rmd")
observe({
if (input$enableRCompletion) {
rmdOb$resume()
} else {
rmdOb$suspend()
}
})
## currently not working as I want with rmarkdown::render, but can leave it like this - the yaml will be taken in the final version only
output$knitDoc <- renderUI({
input$updatepreview_button
## TODO: this does what it should do but messes up with CSS and so
#
# # error_I <- 0
# withProgress(message = 'Processing', value = 0, {
# isolate({
# fileConn<-file("www/tmp.Rmd")
# tmp_content <-
# paste0(rmd_yaml(),
# input$acereport_rmd,collapse = "\n")
# writeLines(tmp_content, fileConn)
# close(fileConn)
# incProgress(0.5, detail = "Synthesizing report...")
# # tryCatch({
# rmarkdown::render(input = "www/tmp.Rmd", output_format = "html_document", output_file = "../www/Rmd_preview.html",quiet = TRUE) #},
# # error = function(e) {
# # # error_I <<- 1
# # }
# # )
# })
# setProgress(1)
# })
#
# return(isolate(includeHTML("www/Rmd_preview.html")))
# # return(isolate(includeHTML("<iframe src='www/Rmd_preview.html', width='100%', height='800'></iframe>")))
# # return(isolate(HTML("<iframe src='www/Rmd_preview.html', width='100%', height='800'></iframe>")))
return(
withProgress(
{
# temporarily switch to the temp dir, in case you do not have write
# permission to the current working directory
owd <- setwd(tempdir())
on.exit(setwd(owd))
tmp_content <- paste0(rmd_yaml(), input$acereport_rmd, collapse = "\n")
isolate(HTML(knit2html(text = tmp_content, fragment.only = TRUE, quiet = TRUE)))
},
message = "Updating the report in the app body",
detail = "This can take some time"
)
)
})
# Generate and Download module
output$saveRmd <- downloadHandler(
filename = function() {
if (input$rmd_dl_format == "rmd") {
"report.Rmd"
} else {
"report.html" # TODO: maybe add Sys.time() to the filename to improve traceability?
}
},
content = function(file) {
# knit2html(text = input$rmd, fragment.only = TRUE, quiet = TRUE))
tmp_content <-
paste0(rmd_yaml(),
input$acereport_rmd,
collapse = "\n"
)
if (input$rmd_dl_format == "rmd") {
cat(tmp_content, file = file, sep = "\n")
} else {
if (input$rmd_dl_format == "html") {
# temporarily switch to the temp dir, in case you do not have write
# permission to the current working directory
owd <- setwd(tempdir())
on.exit(setwd(owd))
cat(tmp_content, file = "ideal_tempreport.Rmd", sep = "\n")
withProgress(rmarkdown::render(
input = "ideal_tempreport.Rmd",
output_file = file,
# fragment.only = TRUE,
quiet = TRUE
),
message = "Generating the html report",
detail = "This can take some time"
)
}
}
}
)
# iSEE export ------------------------------------------------------------
output$ui_iSEEexport <- renderUI({
validate(
need(((!is.null(values$dds_obj)) & (!is.null(values$res_obj))),
message = "Please build and compute the dds and res object to export as
SummarizedExperiment for use in iSEE"
)
)
return(
tagList(
textInput(
"se_export_name",
label = "Choose a filename for the serialized .rds object",
value = "se_ideal_toiSEE.rds"
),
downloadButton(
"button_iSEEexport",
label = "Export as serialized SummarizedExperiment"
)
)
)
})
output$button_iSEEexport <- downloadHandler(
filename = function() {
# paste0("se_ideal_toiSEE_",gsub(" ","_",gsub("-","",gsub(":","-",as.character(Sys.time())))),".rds")
input$se_export_name
}, content = function(file) {
se <- wrapup_for_iSEE(values$dds_obj, values$res_obj)
saveRDS(se, file = file)
}
)
# GeneTonic export -------------------------------------------------------
output$ui_GeneTonicexport <- renderUI({
validate(
need(((!is.null(values$dds_obj)) & (!is.null(values$res_obj))),
message = "Please build and compute the dds and res object to export as
a list for use in GeneTonic"
),
need(!is.null(values$annotation_obj),
message = "Please provide or obtain an annotation object")
)
go_tbls_available <- c("topgo_updown",
"topgo_down",
"topgo_up")
return(
tagList(
textInput(
"gtl_exportgt_name",
label = "Choose a filename for the serialized .rds object",
value = "gtl_ideal_toGeneTonic.rds"
),
selectInput(
"gotbl_forgt",
label = "Select which GO table to export (topGO output supported)",
choices = go_tbls_available[
unlist(lapply(go_tbls_available, function(arg) {
!is.null(values[[arg]])
}))
]
),
downloadButton(
"button_GeneTonicexport",
label = "Export as serialized list for GeneTonic"
)
)
)
})
output$button_GeneTonicexport <- downloadHandler(
filename = function() {
input$gtl_exportgt_name
}, content = function(file) {
dds_obj <- values$dds_obj
res_obj <- values$res_obj
res_obj$SYMBOL <- res_obj$symbol
res_enrich <- shake_topGOtableResult(values[[input$gotbl_forgt]])
anno_df <- values$annotation_obj
gtl <- list(dds = dds_obj,
res_de = res_obj,
res_enrich = res_enrich,
annotation_obj = anno_df)
saveRDS(gtl, file = file)
}
)
# server state saving --------------------------------------------------------
### to environment
observe({
if (is.null(input$task_exit_and_save) || input$task_exit_and_save == 0) {
return()
}
# quit R, unless you are running an interactive session
if (interactive()) {
# flush input and values to the environment in two distinct objects (to be reused later?)
isolate({
# ideal_env <<- new.env(parent = emptyenv())
cur_inputs <- reactiveValuesToList(input)
cur_values <- reactiveValuesToList(values)
tstamp <- gsub(" ", "_", gsub("-", "", gsub(":", "-", as.character(Sys.time()))))
# myvar <- "frfr"
# assign("test", myvar, ideal_env)
# better practice rather than assigning to global env - notify users of this
assign(paste0("ideal_inputs_", tstamp), cur_inputs, envir = ideal_env)
assign(paste0("ideal_values_", tstamp), cur_values, envir = ideal_env)
showNotification(
paste0(
"ideal closed, state successfully saved to the R environment. ",
"You can access these values by searching the `ideal_env` object."
),
type = "message"
)
message("ideal closed, state successfully saved to the R environment.")
message(" You can access these values by searching the `ideal_env` object.")
stopApp("ideal closed, state successfully saved to global R environment.")
# assign(paste0("ideal_inputs_",
# gsub(" ","_",gsub("-","",gsub(":","-",as.character(Sys.time()))))),
# reactiveValuesToList(input), envir = .GlobalEnv)
# assign(paste0("ideal_values_",
# gsub(" ","_",gsub("-","",gsub(":","-",as.character(Sys.time()))))),
# reactiveValuesToList(values), envir = .GlobalEnv)
# stopApp("ideal closed, state successfully saved to global R environment.")
})
} else {
stopApp("ideal closed")
q("no")
}
})
### to binary data
saveState <- function(filename) {
isolate({
LiveInputs <- reactiveValuesToList(input)
# values[names(LiveInputs)] <- LiveInputs
r_data <- reactiveValuesToList(values)
save(LiveInputs, r_data, file = filename)
message("list of inputs and reactive values correctly saved as binary data")
})
}
output$task_state_save <- downloadHandler(
filename = function() {
paste0("idealState_", gsub(" ", "_", gsub("-", "", gsub(":", "-", as.character(Sys.time())))), ".RData")
},
content = function(file) {
saveState(file)
}
)
output$sessioninfo <- renderPrint({
sessionInfo()
})
# server export plots and tables --------------------------------------------------------
## here, all export of plots and tables
output$download_plot_pvals_hist <- downloadHandler(filename = function() {
input$filename_plot_pvals_hist
}, content = function(file) {
ggsave(file, exportPlots$plot_pvals_hist,
width = input$export_width,
height = input$export_height, units = "cm"
)
})
output$download_plot_logfc_hist <- downloadHandler(filename = function() {
input$filename_plot_logfc_hist
}, content = function(file) {
ggsave(file, exportPlots$plot_logfc_hist,
width = input$export_width,
height = input$export_height, units = "cm"
)
})
output$download_plot_ma <- downloadHandler(filename = function() {
input$filename_plot_ma
}, content = function(file) {
ggsave(file, exportPlots$plot_ma,
width = input$export_width,
height = input$export_height, units = "cm"
)
})
output$download_plot_mazoom <- downloadHandler(filename = function() {
input$filename_plot_mazoom
}, content = function(file) {
ggsave(file, exportPlots$plot_mazoom,
width = input$export_width,
height = input$export_height, units = "cm"
)
})
output$download_plot_mahighlight <- downloadHandler(filename = function() {
input$filename_plot_mahighlight
}, content = function(file) {
ggsave(file, exportPlots$plot_mahighlight,
width = input$export_width,
height = input$export_height, units = "cm"
)
})
output$download_plot_mahllist <- downloadHandler(filename = function() {
input$filename_plot_mahllist
}, content = function(file) {
ggsave(file, exportPlots$plot_mahllist,
width = input$export_width,
height = input$export_height, units = "cm"
)
})
output$download_plot_volcanoplot <- downloadHandler(filename = function() {
input$filename_plot_volcanoplot
}, content = function(file) {
ggsave(file, exportPlots$plot_volcanoplot,
width = input$export_width,
height = input$export_height, units = "cm"
)
})
output$download_plot_genefinder <- downloadHandler(filename = function() {
input$filename_plot_genefinder
}, content = function(file) {
ggsave(file, exportPlots$plot_genefinder,
width = input$export_width,
height = input$export_height, units = "cm"
)
})
output$download_plotbp1 <- downloadHandler(filename = function() {
input$filename_plotbp1
}, content = function(file) {
ggsave(file, exportPlots$plotbp1,
width = input$export_width,
height = input$export_height, units = "cm"
)
})
output$download_plotbp2 <- downloadHandler(filename = function() {
input$filename_plotbp2
}, content = function(file) {
ggsave(file, exportPlots$plotbp2,
width = input$export_width,
height = input$export_height, units = "cm"
)
})
output$download_plotbp3 <- downloadHandler(filename = function() {
input$filename_plotbp3
}, content = function(file) {
ggsave(file, exportPlots$plotbp3,
width = input$export_width,
height = input$export_height, units = "cm"
)
})
output$download_plotbp4 <- downloadHandler(filename = function() {
input$filename_plotbp4
}, content = function(file) {
ggsave(file, exportPlots$plotbp4,
width = input$export_width,
height = input$export_height, units = "cm"
)
})
# tbls
output$downloadTblResu <- downloadHandler(
filename = function() {
"table_results.csv"
},
content = function(file) {
mydf <- as.data.frame(values$res_obj[order(values$res_obj$padj), ])
write.csv(mydf, file)
}
)
output$downloadTblMabrush <- downloadHandler(
filename = function() {
"table_mabrush.csv"
},
content = function(file) {
write.csv(curData(), file)
}
)
output$downloadTblCombi <- downloadHandler(
filename = function() {
"table_combi.csv"
},
content = function(file) {
write.csv(cur_combires(), file)
}
)
output$downloadTblCombiList <- downloadHandler(
filename = function() {
"table_combilist.csv"
},
content = function(file) {
write.csv(cur_combires_list(), file)
}
)
# base graphics plots
output$download_plot_heatbrush <- downloadHandler(filename = function() {
input$filename_plot_heatbrush
}, content = function(file) {
pdf(file)
brushedObject <- curData()
selectedGenes <- as.character(brushedObject$ID)
toplot <- assay(values$dds_obj)[selectedGenes, ]
rownames(toplot) <- values$annotation_obj$gene_name[match(rownames(toplot), rownames(values$annotation_obj))]
if (input$pseudocounts) toplot <- log2(1 + toplot)
mat_rowscale <- function(x) {
m <- apply(x, 1, mean, na.rm = TRUE)
s <- apply(x, 1, sd, na.rm = TRUE)
return((x - m) / s)
}
if (input$rowscale) toplot <- mat_rowscale(toplot)
pheatmap(toplot, cluster_cols = as.logical(input$heatmap_colv))
dev.off()
})
output$download_plot_vennlists <- downloadHandler(filename = function() {
input$filename_plot_vennlists
}, content = function(file) {
pdf(file)
gplots::venn(gll())
dev.off()
})
output$download_plot_upsetlists <- downloadHandler(filename = function() {
input$filename_plot_upsetlists
}, content = function(file) {
pdf(file)
UpSetR::upset(fromList(gll()))
dev.off()
})
## GO tbls topGO
output$downloadGOTbl_up <- downloadHandler(
filename = function() {
"table_GOresults_up.csv"
},
content = function(file) {
write.csv(values$topgo_up, file)
}
)
output$downloadGOTbl_down <- downloadHandler(
filename = function() {
"table_GOresults_down.csv"
},
content = function(file) {
write.csv(values$topgo_down, file)
}
)
output$downloadGOTbl_updown <- downloadHandler(
filename = function() {
"table_GOresults_updown.csv"
},
content = function(file) {
write.csv(values$topgo_updown, file)
}
)
output$downloadGOTbl_l1 <- downloadHandler(
filename = function() {
"table_GOresults_list1.csv"
},
content = function(file) {
write.csv(values$topgo_list1, file)
}
)
output$downloadGOTbl_l2 <- downloadHandler(
filename = function() {
"table_GOresults_list2.csv"
},
content = function(file) {
write.csv(values$topgo_list2, file)
}
)
}) # end of server function definition
# nocov end
# launch the app!
shinyApp(ui = ideal_ui, server = ideal_server)
}
|
## EDA
data("ToothGrowth")
head(ToothGrowth)
summary(ToothGrowth$len)
unique(ToothGrowth$supp)
table(ToothGrowth$supp)
summary(ToothGrowth$dose)
unique(ToothGrowth$dose)
nrow(ToothGrowth)
sapply(ToothGrowth, class)
ToothGrowth$dose <- as.factor(ToothGrowth$dose)
## Ploting the dose fig1
ggplot(aes(x=dose, y=len), data=ToothGrowth) + geom_boxplot(aes(fill=dose)) + xlab("Dose Amount") + ylab("Tooth Length") + facet_grid(~ supp) + ggtitle("Tooth Length vs. Dose Amount \nby Delivery Method") +
theme(plot.title = element_text(lineheight=.8, face="bold"))
## Ploting the Supply fig2
ggplot(aes(x=supp, y=len), data=ToothGrowth) + geom_boxplot(aes(fill=supp)) + xlab("Supplement Delivery") + ylab("Tooth Length") + facet_grid(~ dose) + ggtitle("Tooth Length vs. Delivery Method \nby Dose Amount") +
theme(plot.title = element_text(lineheight=.8, face="bold"))
## t-test
t.test(len~supp,data=ToothGrowth)
ToothGrowth_sub1 <- subset(ToothGrowth, ToothGrowth$dose %in% c(1.0,0.5))
t.test(len~dose,data=ToothGrowth_sub1)
ToothGrowth_sub2 <- subset(ToothGrowth, ToothGrowth$dose %in% c(0.5,2.0))
t.test(len~dose,data=ToothGrowth_sub2)
ToothGrowth_sub3 <- subset(ToothGrowth, ToothGrowth$dose %in% c(2.0,1.0))
t.test(len~dose,data=ToothGrowth_sub2)
|
/t - test part 2.R
|
no_license
|
Mo7amed2/Statistical-Inference-Course-Project
|
R
| false
| false
| 1,264
|
r
|
## EDA
data("ToothGrowth")
head(ToothGrowth)
summary(ToothGrowth$len)
unique(ToothGrowth$supp)
table(ToothGrowth$supp)
summary(ToothGrowth$dose)
unique(ToothGrowth$dose)
nrow(ToothGrowth)
sapply(ToothGrowth, class)
ToothGrowth$dose <- as.factor(ToothGrowth$dose)
## Ploting the dose fig1
ggplot(aes(x=dose, y=len), data=ToothGrowth) + geom_boxplot(aes(fill=dose)) + xlab("Dose Amount") + ylab("Tooth Length") + facet_grid(~ supp) + ggtitle("Tooth Length vs. Dose Amount \nby Delivery Method") +
theme(plot.title = element_text(lineheight=.8, face="bold"))
## Ploting the Supply fig2
ggplot(aes(x=supp, y=len), data=ToothGrowth) + geom_boxplot(aes(fill=supp)) + xlab("Supplement Delivery") + ylab("Tooth Length") + facet_grid(~ dose) + ggtitle("Tooth Length vs. Delivery Method \nby Dose Amount") +
theme(plot.title = element_text(lineheight=.8, face="bold"))
## t-test
t.test(len~supp,data=ToothGrowth)
ToothGrowth_sub1 <- subset(ToothGrowth, ToothGrowth$dose %in% c(1.0,0.5))
t.test(len~dose,data=ToothGrowth_sub1)
ToothGrowth_sub2 <- subset(ToothGrowth, ToothGrowth$dose %in% c(0.5,2.0))
t.test(len~dose,data=ToothGrowth_sub2)
ToothGrowth_sub3 <- subset(ToothGrowth, ToothGrowth$dose %in% c(2.0,1.0))
t.test(len~dose,data=ToothGrowth_sub2)
|
# --------
rm(list=ls())
files <- list.files(path="/home/alina/Documents/Universität/2016W/Multimedia Search and Retrieval/Project/div-2014/devset/descvis/img",
pattern="*GLRLM.csv", full.names=T, recursive=FALSE)
centroids <- read.csv("/home/alina/Documents/Universität/2016W/Multimedia Search and Retrieval/Project/MMSearch-Retrieval_2016_MainProject/MMSR_Project_ImagerRetrieval/src/scripts/diverseimages/centroids_GLRLM.csv", header=FALSE)
result <- NULL
for (file in files) {
csvfile <- read.csv(file, header=FALSE)
normalized <- t(apply(csvfile[-1], 1, function(x)(x-min(x))/(max(x)-min(x))))
for (l in seq(nrow(normalized))) {
distances <- NULL
for (c in seq(nrow(centroids))) {
distances <- rbind(distances, cbind(centroids[c,1], centroids[c,2], dist(rbind(normalized[l,], centroids[c,-(1:2)]), method="euclidean")))
}
result <- rbind(result, c(csvfile[l, 1], distances[which.min(distances[,3]),]))
}
}
write.table(result,
"/home/alina/Documents/Universität/2016W/Multimedia Search and Retrieval/Project/MMSearch-Retrieval_2016_MainProject/MMSR_Project_ImagerRetrieval/src/scripts/diverseimages/distances_GLRLM.csv",
sep=",", quote=FALSE, row.names=F, col.names=F)
|
/MMSR_Project_ImagerRetrieval/src/scripts/diverseimages/R/clusterTestData_GLRLM.R
|
no_license
|
ntsmwk/MMSearch-Retrieval_2016_MainProject
|
R
| false
| false
| 1,263
|
r
|
# --------
rm(list=ls())
files <- list.files(path="/home/alina/Documents/Universität/2016W/Multimedia Search and Retrieval/Project/div-2014/devset/descvis/img",
pattern="*GLRLM.csv", full.names=T, recursive=FALSE)
centroids <- read.csv("/home/alina/Documents/Universität/2016W/Multimedia Search and Retrieval/Project/MMSearch-Retrieval_2016_MainProject/MMSR_Project_ImagerRetrieval/src/scripts/diverseimages/centroids_GLRLM.csv", header=FALSE)
result <- NULL
for (file in files) {
csvfile <- read.csv(file, header=FALSE)
normalized <- t(apply(csvfile[-1], 1, function(x)(x-min(x))/(max(x)-min(x))))
for (l in seq(nrow(normalized))) {
distances <- NULL
for (c in seq(nrow(centroids))) {
distances <- rbind(distances, cbind(centroids[c,1], centroids[c,2], dist(rbind(normalized[l,], centroids[c,-(1:2)]), method="euclidean")))
}
result <- rbind(result, c(csvfile[l, 1], distances[which.min(distances[,3]),]))
}
}
write.table(result,
"/home/alina/Documents/Universität/2016W/Multimedia Search and Retrieval/Project/MMSearch-Retrieval_2016_MainProject/MMSR_Project_ImagerRetrieval/src/scripts/diverseimages/distances_GLRLM.csv",
sep=",", quote=FALSE, row.names=F, col.names=F)
|
## Caching the inverse of a special matrix
## Function "makeCacheMatrix" creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
i<-NULL
set<-function(y){
x<<-y
i<<-NULL
}
get<-function()x
setinverse<-function(inverse) i<<-inverse
getinverse<-function() i
list(set=set, get=get,
setinverse=setinverse,
getinverse=getinverse)
}
## Function "cacheSolve" computes the inverse of the special "matrix" returned by makeCacheMatrix above. If the inverse has already been calculated (and the matrix has not changed), then the "cachesolve" should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i<-x$getinverse()
if(!is.null(i)){
message("getting cached data")
return(i)
}
m<-x$get()
i<-solve(m, ...)
x$setinverse(i)
i
}
|
/cachematrix.R
|
no_license
|
tangqingguangyue/ProgrammingAssignment2
|
R
| false
| false
| 897
|
r
|
## Caching the inverse of a special matrix
## Function "makeCacheMatrix" creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
i<-NULL
set<-function(y){
x<<-y
i<<-NULL
}
get<-function()x
setinverse<-function(inverse) i<<-inverse
getinverse<-function() i
list(set=set, get=get,
setinverse=setinverse,
getinverse=getinverse)
}
## Function "cacheSolve" computes the inverse of the special "matrix" returned by makeCacheMatrix above. If the inverse has already been calculated (and the matrix has not changed), then the "cachesolve" should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i<-x$getinverse()
if(!is.null(i)){
message("getting cached data")
return(i)
}
m<-x$get()
i<-solve(m, ...)
x$setinverse(i)
i
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/pedvis.R
\name{prop2string}
\alias{prop2string}
\title{given a named list of properties, this turns each one into a string.}
\usage{
prop2string(y)
}
\description{
It returns a vector of such strings.
}
|
/man/prop2string.Rd
|
no_license
|
eriqande/pedvis
|
R
| false
| false
| 290
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/pedvis.R
\name{prop2string}
\alias{prop2string}
\title{given a named list of properties, this turns each one into a string.}
\usage{
prop2string(y)
}
\description{
It returns a vector of such strings.
}
|
library(ggplot2)
library(dplyr)
displ4 <- mpg %>% filter(mpg$displ <=4)
displ5 <- mpg %>% filter(mpg$displ >=5)
mean(displ4$hwy)
mean(displ5$hwy)
audi <- mpg %>% filter(manufacturer == "audi")
toyota <- mpg %>% filter(manufacturer =="toyota")
mean(audi$cty)
mean(toyota$cty)
cfh <- mpg %>% filter(manufacturer %in% c("chevrolet","ford","honda"))
cfh$hwy %>% mean()
# 배기통이 4이하인 자동차의 hwy (고속도로 연비)가 더 좋은지 배기통이 5이상인 자동차의 hwy (고속도로 연비)가 더 좋은지 비교.
|
/19_10_24practice2_연습.R
|
no_license
|
hongshi97/R_basic_practice
|
R
| false
| false
| 558
|
r
|
library(ggplot2)
library(dplyr)
displ4 <- mpg %>% filter(mpg$displ <=4)
displ5 <- mpg %>% filter(mpg$displ >=5)
mean(displ4$hwy)
mean(displ5$hwy)
audi <- mpg %>% filter(manufacturer == "audi")
toyota <- mpg %>% filter(manufacturer =="toyota")
mean(audi$cty)
mean(toyota$cty)
cfh <- mpg %>% filter(manufacturer %in% c("chevrolet","ford","honda"))
cfh$hwy %>% mean()
# 배기통이 4이하인 자동차의 hwy (고속도로 연비)가 더 좋은지 배기통이 5이상인 자동차의 hwy (고속도로 연비)가 더 좋은지 비교.
|
######################### Verifying output ##################################
# Output is verified for the following conditions using an independent approach
# i.e. by calculating a subset of the expected tidy output matrix through a
# completetely different operation and comparing with corresponding subset of
# tidy data
# The subset considered (this is arbitrary) for testing is
## 2 variables - (a)TimeBodyAccelerometerMean-X (V1) (b) FrequencyBodyGyroscopeJerkMagnitudeMean (V542)
## subjectID = 1, Y = LAYING(6)
verification_data <- tbl_df(master_data) %>%
select(c(subjectID, V1, V542, Activity)) %>%
filter(subjectID == "1" & Activity == 6) %>%
summarize(TimeBodyAccelerometerMeanX = mean(V1),FrequencyBodyGyroscopeJerkMagnitudeMean = mean(V542))
tidy_output_subset_for_verification <- tbl_df(tidy_output) %>%
filter(subjectID == "1" & Activity == "LAYING" ) %>%
select(TimeBodyAccelerometerMeanX, FrequencyBodyGyroscopeJerkMagnitudeMean)
if(identical(verification_data,tidy_output_subset_for_verification)) {
print("Output verification is successful!")
}
|
/03_Getting_And_Cleaning_Data/07_verify_output.R
|
no_license
|
kartik-avula/Assignments
|
R
| false
| false
| 1,115
|
r
|
######################### Verifying output ##################################
# Output is verified for the following conditions using an independent approach
# i.e. by calculating a subset of the expected tidy output matrix through a
# completetely different operation and comparing with corresponding subset of
# tidy data
# The subset considered (this is arbitrary) for testing is
## 2 variables - (a)TimeBodyAccelerometerMean-X (V1) (b) FrequencyBodyGyroscopeJerkMagnitudeMean (V542)
## subjectID = 1, Y = LAYING(6)
verification_data <- tbl_df(master_data) %>%
select(c(subjectID, V1, V542, Activity)) %>%
filter(subjectID == "1" & Activity == 6) %>%
summarize(TimeBodyAccelerometerMeanX = mean(V1),FrequencyBodyGyroscopeJerkMagnitudeMean = mean(V542))
tidy_output_subset_for_verification <- tbl_df(tidy_output) %>%
filter(subjectID == "1" & Activity == "LAYING" ) %>%
select(TimeBodyAccelerometerMeanX, FrequencyBodyGyroscopeJerkMagnitudeMean)
if(identical(verification_data,tidy_output_subset_for_verification)) {
print("Output verification is successful!")
}
|
WrightFisherPopulationMultiFounders<-function(n,nbgeneration,nloci,genomeDefFixedMapWithQTL,nbfounder){
## Generate individuals according to Wright Fisher
## Be careful at the early beginning we only have two founders
## input : n number of individuals to generate
### nbgeneration : number of generations
#### nloci: number of loci
### nbfounder: number of founders
MyFounders<-array(0,dim=c(nbfounder,nloci))
for (i in 1:nbfounder) {
MyFounders[i,]=rbern(nloci,0.5)
}
genomesOvertime<-array(0,dim=c(nbgeneration,n,nloci))
#generate genomes of n individuals that are descendents of the 2 founders
# i.e. it is the first generation of descendents
# genomesOvertime[1,i,] genome of individual i , for generation 1
#recombination according to Haldane
for (i in 1:n) {
indFounders<-sample(seq(1,nbfounder), 2, replace = FALSE)
gamete <- hypredRecombine(genomeDefFixedMapWithQTL,
genomeA = MyFounders[indFounders[1],],
genomeB = MyFounders[indFounders[2],],
mutate = FALSE,
block = FALSE)
genomesOvertime[1,i,]<-as.matrix(gamete)
}
# generate generations from 2 to nbgeneration by random mating, and recombination according to Haldane
#genomesOvertime[g,i,j] denotes the genome of individu i at locus j at generation g
for (g in 2:nbgeneration){
for (i in 1:n) {
indparents<-sample(seq(1,n), 2, replace = FALSE)
gamete <- hypredRecombine(genomeDefFixedMapWithQTL,
genomeA = genomesOvertime[g-1,indparents[1],],
genomeB = genomesOvertime[g-1,indparents[2],],
mutate = FALSE,
block = FALSE)
genomesOvertime[g,i,]<-as.matrix(gamete)
}
}
return(genomesOvertime)
}
##################################################
|
/functionsForMultiFounders.R
|
no_license
|
rabier/GSImperfectLD
|
R
| false
| false
| 1,738
|
r
|
WrightFisherPopulationMultiFounders<-function(n,nbgeneration,nloci,genomeDefFixedMapWithQTL,nbfounder){
## Generate individuals according to Wright Fisher
## Be careful at the early beginning we only have two founders
## input : n number of individuals to generate
### nbgeneration : number of generations
#### nloci: number of loci
### nbfounder: number of founders
MyFounders<-array(0,dim=c(nbfounder,nloci))
for (i in 1:nbfounder) {
MyFounders[i,]=rbern(nloci,0.5)
}
genomesOvertime<-array(0,dim=c(nbgeneration,n,nloci))
#generate genomes of n individuals that are descendents of the 2 founders
# i.e. it is the first generation of descendents
# genomesOvertime[1,i,] genome of individual i , for generation 1
#recombination according to Haldane
for (i in 1:n) {
indFounders<-sample(seq(1,nbfounder), 2, replace = FALSE)
gamete <- hypredRecombine(genomeDefFixedMapWithQTL,
genomeA = MyFounders[indFounders[1],],
genomeB = MyFounders[indFounders[2],],
mutate = FALSE,
block = FALSE)
genomesOvertime[1,i,]<-as.matrix(gamete)
}
# generate generations from 2 to nbgeneration by random mating, and recombination according to Haldane
#genomesOvertime[g,i,j] denotes the genome of individu i at locus j at generation g
for (g in 2:nbgeneration){
for (i in 1:n) {
indparents<-sample(seq(1,n), 2, replace = FALSE)
gamete <- hypredRecombine(genomeDefFixedMapWithQTL,
genomeA = genomesOvertime[g-1,indparents[1],],
genomeB = genomesOvertime[g-1,indparents[2],],
mutate = FALSE,
block = FALSE)
genomesOvertime[g,i,]<-as.matrix(gamete)
}
}
return(genomesOvertime)
}
##################################################
|
# Name: Fastq_checks.R
# Auth: u.niazi@imperial.ac.uk
# Date: 4/12/15
# Desc: create a report for the fastq files
source('Header.R')
library(ShortRead)
#source('../CFastqQuality/CFastqQuality.R')
dfAnnotations = read.csv('Data_external/Sample_information.csv', header=T)
i = nrow(dfAnnotations)
r = 1
nc = ncol(dfAnnotations)
while(r < i){
dfAnnotations[r+1, 2:nc] = dfAnnotations[r, 2:nc]
r = r+2
}
dir.create('Results')
summary(dfAnnotations)
f_getqa = function(files, title, nseq=3){
q = qa(files, BPPARAM=SerialParam())
report(q, dest=paste('Results/', title, sep=''))
seq = q[['frequentSequences']]
# get nseq top sequences from each lane
seq = split(seq, seq$lane)
seq.2 = lapply(seq, function(x) as.character(x[1:nseq,'sequence']))
seq.2 = DNAStringSetList(seq.2)
Biostrings::writeXStringSet(unlist(seq.2), filepath = paste('Results/', title, '/frequent_seq.fasta', sep=''))
rm(q)
}
## check samples by factor
fGroup = dfAnnotations$LANE
table(fGroup)
csFiles = paste('Data_external/RNASeq/20151110/FASTQ/', dfAnnotations[fGroup == 1,'Files'], sep='')
f_getqa(csFiles, paste('lane', 1, sep=''))
gc()
csFiles = paste('Data_external/RNASeq/20151110/FASTQ/', dfAnnotations[fGroup == 2,'Files'], sep='')
f_getqa(csFiles, paste('lane', 2, sep=''))
gc()
csFiles = paste('Data_external/RNASeq/20151110/FASTQ/', dfAnnotations[fGroup == 3,'Files'], sep='')
f_getqa(csFiles, paste('lane', 3, sep=''))
gc()
csFiles = paste('Data_external/RNASeq/20151110/FASTQ/', dfAnnotations[fGroup == 4,'Files'], sep='')
f_getqa(csFiles, paste('lane', 4, sep=''))
gc()
# ul = unique(lanes)
#
# mclapply(seq_along(ul), function(i) {
# csFiles = paste('Data_external/RNASeq/20151110/FASTQ/', dfAnnotations[lanes == ul[i],'Files'], sep='')
# qa = f_getqa(csFiles, paste('lane', ul[i], sep=''))
# f_writeFrequent(qa, paste('lane', ul[i], sep=''))
# })
#
# for(i in seq_along(ul)){
# csFiles = paste('Data_external/RNASeq/20151110/FASTQ/', dfAnnotations[lanes == ul[i],'Files'], sep='')
# qa = f_getqa(csFiles, paste('lane', ul[i], sep=''))
# f_writeFrequent(qa, paste('lane', ul[i], sep=''))
# }
#
# f_writeFrequent = function(q, title, n = 3){
# seq = q[['frequentSequences']]
# # get n top sequences from each lane
# seq = split(seq, seq$lane)
# seq.2 = lapply(seq, function(x) as.character(x[1:n,'sequence']))
# seq.2 = DNAStringSetList(seq.2)
# Biostrings::writeXStringSet(unlist(seq.2), filepath = paste('Results/', title, '/frequent_seq.fasta', sep=''))
# }
|
/Fastq_checks.R
|
permissive
|
uhkniazi/HPRU_AK_Rna_Seq
|
R
| false
| false
| 2,513
|
r
|
# Name: Fastq_checks.R
# Auth: u.niazi@imperial.ac.uk
# Date: 4/12/15
# Desc: create a report for the fastq files
source('Header.R')
library(ShortRead)
#source('../CFastqQuality/CFastqQuality.R')
dfAnnotations = read.csv('Data_external/Sample_information.csv', header=T)
i = nrow(dfAnnotations)
r = 1
nc = ncol(dfAnnotations)
while(r < i){
dfAnnotations[r+1, 2:nc] = dfAnnotations[r, 2:nc]
r = r+2
}
dir.create('Results')
summary(dfAnnotations)
f_getqa = function(files, title, nseq=3){
q = qa(files, BPPARAM=SerialParam())
report(q, dest=paste('Results/', title, sep=''))
seq = q[['frequentSequences']]
# get nseq top sequences from each lane
seq = split(seq, seq$lane)
seq.2 = lapply(seq, function(x) as.character(x[1:nseq,'sequence']))
seq.2 = DNAStringSetList(seq.2)
Biostrings::writeXStringSet(unlist(seq.2), filepath = paste('Results/', title, '/frequent_seq.fasta', sep=''))
rm(q)
}
## check samples by factor
fGroup = dfAnnotations$LANE
table(fGroup)
csFiles = paste('Data_external/RNASeq/20151110/FASTQ/', dfAnnotations[fGroup == 1,'Files'], sep='')
f_getqa(csFiles, paste('lane', 1, sep=''))
gc()
csFiles = paste('Data_external/RNASeq/20151110/FASTQ/', dfAnnotations[fGroup == 2,'Files'], sep='')
f_getqa(csFiles, paste('lane', 2, sep=''))
gc()
csFiles = paste('Data_external/RNASeq/20151110/FASTQ/', dfAnnotations[fGroup == 3,'Files'], sep='')
f_getqa(csFiles, paste('lane', 3, sep=''))
gc()
csFiles = paste('Data_external/RNASeq/20151110/FASTQ/', dfAnnotations[fGroup == 4,'Files'], sep='')
f_getqa(csFiles, paste('lane', 4, sep=''))
gc()
# ul = unique(lanes)
#
# mclapply(seq_along(ul), function(i) {
# csFiles = paste('Data_external/RNASeq/20151110/FASTQ/', dfAnnotations[lanes == ul[i],'Files'], sep='')
# qa = f_getqa(csFiles, paste('lane', ul[i], sep=''))
# f_writeFrequent(qa, paste('lane', ul[i], sep=''))
# })
#
# for(i in seq_along(ul)){
# csFiles = paste('Data_external/RNASeq/20151110/FASTQ/', dfAnnotations[lanes == ul[i],'Files'], sep='')
# qa = f_getqa(csFiles, paste('lane', ul[i], sep=''))
# f_writeFrequent(qa, paste('lane', ul[i], sep=''))
# }
#
# f_writeFrequent = function(q, title, n = 3){
# seq = q[['frequentSequences']]
# # get n top sequences from each lane
# seq = split(seq, seq$lane)
# seq.2 = lapply(seq, function(x) as.character(x[1:n,'sequence']))
# seq.2 = DNAStringSetList(seq.2)
# Biostrings::writeXStringSet(unlist(seq.2), filepath = paste('Results/', title, '/frequent_seq.fasta', sep=''))
# }
|
#' @export
#' @rdname geom_linerange
geom_pointrange <- function(mapping = NULL, data = NULL, stat = "identity",
position = "identity", show.legend = NA,
inherit.aes = TRUE, ...) {
layer(
data = data,
mapping = mapping,
stat = stat,
geom = GeomPointrange,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(...)
)
}
#' @rdname ggplot2-ggproto
#' @format NULL
#' @usage NULL
#' @export
GeomPointrange <- ggproto("GeomPointrange", Geom,
default_aes = aes(colour = "black", size = 0.5, linetype = 1, shape = 19,
fill = NA, alpha = NA, stroke = 1),
draw_key = draw_key_pointrange,
required_aes = c("x", "y", "ymin", "ymax"),
draw = function(self, data, scales, coordinates, ...) {
if (is.null(data$y))
return(GeomLinerange$draw(data, scales, coordinates, ...))
ggname("geom_pointrange",
gTree(children = gList(
GeomLinerange$draw(data, scales, coordinates, ...),
GeomPoint$draw(transform(data, size = size * 4), scales, coordinates, ...)
))
)
}
)
|
/R/geom-pointrange.r
|
no_license
|
bbolker/ggplot2
|
R
| false
| false
| 1,138
|
r
|
#' @export
#' @rdname geom_linerange
geom_pointrange <- function(mapping = NULL, data = NULL, stat = "identity",
position = "identity", show.legend = NA,
inherit.aes = TRUE, ...) {
layer(
data = data,
mapping = mapping,
stat = stat,
geom = GeomPointrange,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(...)
)
}
#' @rdname ggplot2-ggproto
#' @format NULL
#' @usage NULL
#' @export
GeomPointrange <- ggproto("GeomPointrange", Geom,
default_aes = aes(colour = "black", size = 0.5, linetype = 1, shape = 19,
fill = NA, alpha = NA, stroke = 1),
draw_key = draw_key_pointrange,
required_aes = c("x", "y", "ymin", "ymax"),
draw = function(self, data, scales, coordinates, ...) {
if (is.null(data$y))
return(GeomLinerange$draw(data, scales, coordinates, ...))
ggname("geom_pointrange",
gTree(children = gList(
GeomLinerange$draw(data, scales, coordinates, ...),
GeomPoint$draw(transform(data, size = size * 4), scales, coordinates, ...)
))
)
}
)
|
#### Logical Sparse Triangular Matrices in Compressed column-oriented format
setAs("ntCMatrix", "matrix",
function(from) as(as(from, "denseMatrix"), "matrix"))
setAs("matrix", "ntCMatrix",
function(from) as(as(from, "dtCMatrix"), "ntCMatrix"))
setAs("ntCMatrix", "TsparseMatrix",
function(from) .Call(Csparse_to_Tsparse, from, TRUE))
setAs("ntCMatrix", "ngCMatrix",
function(from) copyClass(diagU2N(from), "ngCMatrix"))
.ntC2d <- function(from)
new("dtCMatrix", i = from@i, p = from@p,
x = rep.int(1, length(from@i)), uplo = from@uplo,
diag = from@diag, Dim = from@Dim, Dimnames = from@Dimnames)
.ntC2l <- function(from)
new("ltCMatrix", i = from@i, p = from@p,
x = rep.int(TRUE, length(from@i)), uplo = from@uplo,
diag = from@diag, Dim = from@Dim, Dimnames = from@Dimnames)
setAs("ntCMatrix", "dMatrix", .ntC2d)# < instead of "dtCMatrix"
setAs("ntCMatrix", "dsparseMatrix", .ntC2d)
setAs("ntCMatrix", "dtCMatrix", .ntC2d)
setAs("ntCMatrix", "lMatrix", .ntC2l)
setAs("ntCMatrix", "lsparseMatrix", .ntC2l)
setAs("ntCMatrix", "ltCMatrix", .ntC2l)
rm(.ntC2d,.ntC2l) # don't even keep "hidden"
setAs("ngCMatrix", "ntCMatrix", # to triangular, needed for triu,..
function(from) as(as(as(from, "TsparseMatrix"),
"ntTMatrix"), "ntCMatrix"))
## setAs("ntCMatrix", "generalMatrix",
## function(from) ......)
## setMethod("t", signature(x = "ntCMatrix"),
## function(x) .Call(ntCMatrix_trans, x),
## valueClass = "ntCMatrix")
|
/tags/0.999375-23/R/ntCMatrix.R
|
no_license
|
LTLA/Matrix
|
R
| false
| false
| 1,523
|
r
|
#### Logical Sparse Triangular Matrices in Compressed column-oriented format
setAs("ntCMatrix", "matrix",
function(from) as(as(from, "denseMatrix"), "matrix"))
setAs("matrix", "ntCMatrix",
function(from) as(as(from, "dtCMatrix"), "ntCMatrix"))
setAs("ntCMatrix", "TsparseMatrix",
function(from) .Call(Csparse_to_Tsparse, from, TRUE))
setAs("ntCMatrix", "ngCMatrix",
function(from) copyClass(diagU2N(from), "ngCMatrix"))
.ntC2d <- function(from)
new("dtCMatrix", i = from@i, p = from@p,
x = rep.int(1, length(from@i)), uplo = from@uplo,
diag = from@diag, Dim = from@Dim, Dimnames = from@Dimnames)
.ntC2l <- function(from)
new("ltCMatrix", i = from@i, p = from@p,
x = rep.int(TRUE, length(from@i)), uplo = from@uplo,
diag = from@diag, Dim = from@Dim, Dimnames = from@Dimnames)
setAs("ntCMatrix", "dMatrix", .ntC2d)# < instead of "dtCMatrix"
setAs("ntCMatrix", "dsparseMatrix", .ntC2d)
setAs("ntCMatrix", "dtCMatrix", .ntC2d)
setAs("ntCMatrix", "lMatrix", .ntC2l)
setAs("ntCMatrix", "lsparseMatrix", .ntC2l)
setAs("ntCMatrix", "ltCMatrix", .ntC2l)
rm(.ntC2d,.ntC2l) # don't even keep "hidden"
setAs("ngCMatrix", "ntCMatrix", # to triangular, needed for triu,..
function(from) as(as(as(from, "TsparseMatrix"),
"ntTMatrix"), "ntCMatrix"))
## setAs("ntCMatrix", "generalMatrix",
## function(from) ......)
## setMethod("t", signature(x = "ntCMatrix"),
## function(x) .Call(ntCMatrix_trans, x),
## valueClass = "ntCMatrix")
|
library(tidyverse)
library(h2o)
library(lubridate)
library(caTools)
h2o.init()
df = read_rds("Data/WeatherBound.rds")
set.seed(101)
sample = sample.split(df$range_remaining, SplitRatio = .75)
subset(df, sample == TRUE) %>% write_csv("Data/h2odata/train.csv")
subset(df, sample == FALSE) %>% write_csv("Data/h2odata/test.csv")
train <- h2o.importFile("Data/h2odata/train.csv")
test = h2o.importFile("Data/h2odata/test.csv")
m = h2o.automl(x = c("economy", "blue_score",
"start_outside_temp",
"average_speed",
"road_conditions"),
y = "range_remaining",
training_frame = train,
max_models = 15)
dtest = test %>% as_tibble
preds = h2o.predict(m, test, type = "prob")
dtest$prediction = preds[[1]] %>% as.vector
dtest %>%
select(range_remaining, prediction) %>%
mutate(diff = range_remaining - prediction) %>%
pull(diff) %>% mean(na.rm = T)
|
/Code/autoML.R
|
no_license
|
cdmoss/MHC-EV-Dashboard
|
R
| false
| false
| 969
|
r
|
library(tidyverse)
library(h2o)
library(lubridate)
library(caTools)
h2o.init()
df = read_rds("Data/WeatherBound.rds")
set.seed(101)
sample = sample.split(df$range_remaining, SplitRatio = .75)
subset(df, sample == TRUE) %>% write_csv("Data/h2odata/train.csv")
subset(df, sample == FALSE) %>% write_csv("Data/h2odata/test.csv")
train <- h2o.importFile("Data/h2odata/train.csv")
test = h2o.importFile("Data/h2odata/test.csv")
m = h2o.automl(x = c("economy", "blue_score",
"start_outside_temp",
"average_speed",
"road_conditions"),
y = "range_remaining",
training_frame = train,
max_models = 15)
dtest = test %>% as_tibble
preds = h2o.predict(m, test, type = "prob")
dtest$prediction = preds[[1]] %>% as.vector
dtest %>%
select(range_remaining, prediction) %>%
mutate(diff = range_remaining - prediction) %>%
pull(diff) %>% mean(na.rm = T)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{region_mappings}
\alias{region_mappings}
\title{Default regional mapping}
\format{
A named list of data.tables with 2 columns
\describe{
\item{mapping_name}{Mapping name (example, witch17),
contains the region names in lower case}
\item{iso3}{ISO3 code in upper case}
}
}
\usage{
region_mappings
}
\description{
A collection of regional mappings for 250 ISO3.
}
\keyword{datasets}
|
/man/region_mappings.Rd
|
permissive
|
witch-team/witchtools
|
R
| false
| true
| 487
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{region_mappings}
\alias{region_mappings}
\title{Default regional mapping}
\format{
A named list of data.tables with 2 columns
\describe{
\item{mapping_name}{Mapping name (example, witch17),
contains the region names in lower case}
\item{iso3}{ISO3 code in upper case}
}
}
\usage{
region_mappings
}
\description{
A collection of regional mappings for 250 ISO3.
}
\keyword{datasets}
|
tag_item <- function(x, tag_val) {
if (is.null(x)) x <- list()
attr(x, "tag") <- tag_val
x
}
tag_list_col <- function(x) {
x %<>%
purrr::modify(~tag_item(., "list")) %>%
tag_item("list_col")
x
}
serialize_df <- function(x) {
out <- x %>%
dplyr::mutate(dplyr::across(where(is.list), tag_list_col)) %>%
purrr::transpose() %>%
purrr::set_names( nm = purrr::map_chr(., "name"))
out %<>%
yaml::as.yaml()
out
}
list_handler <- function(x) {
tag_item(x, "list")
}
has_list_tag <- function(x) {
x %<>% attr("tag")
if (rlang::is_empty(x)) return(FALSE)
return("list" %in% x)
}
restore_col_type <- function(x) {
is_list_col <- x %>%
purrr::map_lgl(has_list_tag) %>%
any()
if (is_list_col) {
x %<>%
tag_list_col() %>%
purrr::simplify_all()
} else {
x %<>% unlist()
}
x
}
deserialize_df <- function(x) {
out <- x %>% yaml::yaml.load(handlers = list(list = list_handler))
out %<>% purrr::set_names(NULL)
out %<>%
purrr::transpose() %>%
tibble::as_tibble()
out %<>%
dplyr::mutate(
dplyr::across(
dplyr::everything(), restore_col_type)
)
out
}
default_column_map_input_path <- function() {
"mandrake"
}
#' Load a package colspec to case
#'
#' The package colspec may be consumed by [link_col2doc()], in order to
#' link column names to their metadata.
#' If `lookup_cache` is `NULL`/empty, a new one will be made.
#'
#' @export
#' @param pkg_name the name of the package from which the set of columns
#' should be loaded.
#' @param lookup_cache the `storr::storr` object, generated by
#' `load_package_colspec()`, containing keys (given by column aliases)
#' mapping to column metadata lists.
load_package_colspec <- function(pkg_name, lookup_cache = NULL) {
# If no store is given, make one
st <- lookup_cache
if (rlang::is_empty(st)) st <- storr::storr_environment()
`%||%` <- rlang::`%||%`
pkg_path <- system.file(package = pkg_name, lib.loc = .libPaths())
opts <- roxygen2::load_options(pkg_path)
mandrake_path <- opts$mandrake_output %||%
default_column_map_input_path()
# The directory containing the mapppings
mandrake_path <- file.path(pkg_path, mandrake_path)
message("Adding cols from ", pkg_name, " to lookup cache")
spec_paths <- mandrake_path %>%
list.files(pattern = ".*\\.ya?ml$")
spec_paths %>%
purrr::walk(function(path) {
message("Including ", path, " in lookup cache")
spec <- file.path(mandrake_path, path) %>% load_colspec_file()
spec %>%
dplyr::group_by(name) %>%
dplyr::group_walk(add_entry_to_cache, lookup_cache = st, .keep = TRUE)
invisible(NULL)
})
return(st)
}
#' Load colspec from a single file, to be imported into storr cache
load_colspec_file <- function(path) {
out <- path %>%
readLines() %>%
paste0(collapse = "\n") %>%
deserialize_df()
out
}
add_entry_to_cache <- function(entry, keys, lookup_cache = NULL) {
if (rlang::is_empty(lookup_cache))
stop("Empty lookup cache given to add_entry_to_cache")
grouping_cols <- names(keys)
keys %<>% dplyr::pull()
main_key <- keys
fix_entry_duplication <- function(entry, main_key) {
first_entry <- entry %>% dplyr::slice_head()
topics <- entry$topic %>% jsonlite::toJSON()
first_topic <- first_entry$topic %>% jsonlite::toJSON()
if (nrow(entry) > 1) {
warning(
"Multiple defintions for ", main_key, " given in ",
topics, " keeping only definition from ", first_topic
)
}
return(first_entry)
}
entry %<>% fix_entry_duplication(main_key)
aliases <- entry %>%
dplyr::pull(aliases) %>%
purrr::flatten_chr()
keys %<>% c(aliases)
pkg_ns <- paste0("package:",entry$package)
dest_namespace <- "unique"
src_namespace <- lookup_cache$default_namespace
handle_previous_defs <- function(keys, entry, lookup_cache) {
already_defined <- lookup_cache$exists(keys)
if (any(already_defined)) {
defd_keys <- keys[already_defined]
previous_defs <- defd_keys %>%
lookup_cache$mget() %>%
purrr::map2_dfr(defd_keys, function(entry, key) {
entry %<>%
dplyr::mutate(key = key)
entry
}) %>%
glue::glue_data(
"{key}@{package}::{topic}"
)
warning(
"For entry @ ",
jsonlite::toJSON(entry$topic),
". keys already defined: ",
jsonlite::toJSON(previous_defs))
keys %<>% .[!already_defined]
}
keys
}
keys %<>% handle_previous_defs(entry, lookup_cache)
entry %<>% dplyr::filter(name %in% keys)
# Only bother filling the cache if there's something to fill
if (!rlang::is_empty(keys) & nrow(entry) > 0) {
# Make the value referencable by the formal name, or any of its
# aliases
lookup_cache$fill(keys, entry)
lookup_cache$fill(keys, entry, namespace = pkg_ns)
# Add it to the 1:1 namespace that links formal name to values
# (no alias linkage)
lookup_cache$duplicate(
main_key,
main_key,
namespace_src = src_namespace,
namespace_dest = dest_namespace
)
}
# Drop these before return to ensure functionality
# with group_modify
entry %<>% dplyr::select(-c(grouping_cols))
invisible(entry)
}
|
/R/io_utils.R
|
no_license
|
strazto/mandrake
|
R
| false
| false
| 5,340
|
r
|
tag_item <- function(x, tag_val) {
if (is.null(x)) x <- list()
attr(x, "tag") <- tag_val
x
}
tag_list_col <- function(x) {
x %<>%
purrr::modify(~tag_item(., "list")) %>%
tag_item("list_col")
x
}
serialize_df <- function(x) {
out <- x %>%
dplyr::mutate(dplyr::across(where(is.list), tag_list_col)) %>%
purrr::transpose() %>%
purrr::set_names( nm = purrr::map_chr(., "name"))
out %<>%
yaml::as.yaml()
out
}
list_handler <- function(x) {
tag_item(x, "list")
}
has_list_tag <- function(x) {
x %<>% attr("tag")
if (rlang::is_empty(x)) return(FALSE)
return("list" %in% x)
}
restore_col_type <- function(x) {
is_list_col <- x %>%
purrr::map_lgl(has_list_tag) %>%
any()
if (is_list_col) {
x %<>%
tag_list_col() %>%
purrr::simplify_all()
} else {
x %<>% unlist()
}
x
}
deserialize_df <- function(x) {
out <- x %>% yaml::yaml.load(handlers = list(list = list_handler))
out %<>% purrr::set_names(NULL)
out %<>%
purrr::transpose() %>%
tibble::as_tibble()
out %<>%
dplyr::mutate(
dplyr::across(
dplyr::everything(), restore_col_type)
)
out
}
default_column_map_input_path <- function() {
"mandrake"
}
#' Load a package colspec to case
#'
#' The package colspec may be consumed by [link_col2doc()], in order to
#' link column names to their metadata.
#' If `lookup_cache` is `NULL`/empty, a new one will be made.
#'
#' @export
#' @param pkg_name the name of the package from which the set of columns
#' should be loaded.
#' @param lookup_cache the `storr::storr` object, generated by
#' `load_package_colspec()`, containing keys (given by column aliases)
#' mapping to column metadata lists.
load_package_colspec <- function(pkg_name, lookup_cache = NULL) {
# If no store is given, make one
st <- lookup_cache
if (rlang::is_empty(st)) st <- storr::storr_environment()
`%||%` <- rlang::`%||%`
pkg_path <- system.file(package = pkg_name, lib.loc = .libPaths())
opts <- roxygen2::load_options(pkg_path)
mandrake_path <- opts$mandrake_output %||%
default_column_map_input_path()
# The directory containing the mapppings
mandrake_path <- file.path(pkg_path, mandrake_path)
message("Adding cols from ", pkg_name, " to lookup cache")
spec_paths <- mandrake_path %>%
list.files(pattern = ".*\\.ya?ml$")
spec_paths %>%
purrr::walk(function(path) {
message("Including ", path, " in lookup cache")
spec <- file.path(mandrake_path, path) %>% load_colspec_file()
spec %>%
dplyr::group_by(name) %>%
dplyr::group_walk(add_entry_to_cache, lookup_cache = st, .keep = TRUE)
invisible(NULL)
})
return(st)
}
#' Load colspec from a single file, to be imported into storr cache
load_colspec_file <- function(path) {
out <- path %>%
readLines() %>%
paste0(collapse = "\n") %>%
deserialize_df()
out
}
add_entry_to_cache <- function(entry, keys, lookup_cache = NULL) {
if (rlang::is_empty(lookup_cache))
stop("Empty lookup cache given to add_entry_to_cache")
grouping_cols <- names(keys)
keys %<>% dplyr::pull()
main_key <- keys
fix_entry_duplication <- function(entry, main_key) {
first_entry <- entry %>% dplyr::slice_head()
topics <- entry$topic %>% jsonlite::toJSON()
first_topic <- first_entry$topic %>% jsonlite::toJSON()
if (nrow(entry) > 1) {
warning(
"Multiple defintions for ", main_key, " given in ",
topics, " keeping only definition from ", first_topic
)
}
return(first_entry)
}
entry %<>% fix_entry_duplication(main_key)
aliases <- entry %>%
dplyr::pull(aliases) %>%
purrr::flatten_chr()
keys %<>% c(aliases)
pkg_ns <- paste0("package:",entry$package)
dest_namespace <- "unique"
src_namespace <- lookup_cache$default_namespace
handle_previous_defs <- function(keys, entry, lookup_cache) {
already_defined <- lookup_cache$exists(keys)
if (any(already_defined)) {
defd_keys <- keys[already_defined]
previous_defs <- defd_keys %>%
lookup_cache$mget() %>%
purrr::map2_dfr(defd_keys, function(entry, key) {
entry %<>%
dplyr::mutate(key = key)
entry
}) %>%
glue::glue_data(
"{key}@{package}::{topic}"
)
warning(
"For entry @ ",
jsonlite::toJSON(entry$topic),
". keys already defined: ",
jsonlite::toJSON(previous_defs))
keys %<>% .[!already_defined]
}
keys
}
keys %<>% handle_previous_defs(entry, lookup_cache)
entry %<>% dplyr::filter(name %in% keys)
# Only bother filling the cache if there's something to fill
if (!rlang::is_empty(keys) & nrow(entry) > 0) {
# Make the value referencable by the formal name, or any of its
# aliases
lookup_cache$fill(keys, entry)
lookup_cache$fill(keys, entry, namespace = pkg_ns)
# Add it to the 1:1 namespace that links formal name to values
# (no alias linkage)
lookup_cache$duplicate(
main_key,
main_key,
namespace_src = src_namespace,
namespace_dest = dest_namespace
)
}
# Drop these before return to ensure functionality
# with group_modify
entry %<>% dplyr::select(-c(grouping_cols))
invisible(entry)
}
|
##
# Author: Autogenerated on 2013-11-27 18:13:58
# gitHash: c4ad841105ba82f4a3979e4cf1ae7e20a5905e59
# SEED: 4663640625336856642
##
source('./findNSourceUtils.R')
Log.info("======================== Begin Test ===========================")
simpleFilterTest_leads_83 <- function(conn) {
Log.info("A munge-task R unit test on data <leads> testing the functional unit <>=> ")
Log.info("Uploading leads")
hex <- h2o.uploadFile(conn, locate("../../smalldata/iris/leads.csv"), "rleads.hex")
Log.info("Filtering out rows by >= from dataset leads and column \"class\" using value 1.51746584587")
filterHex <- hex[hex[,c("class")] >= 1.51746584587,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"class" >= 1.51746584587,]
Log.info("Filtering out rows by >= from dataset leads and column \"desire_idx\" using value 4.91659036911")
filterHex <- hex[hex[,c("desire_idx")] >= 4.91659036911,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"desire_idx" >= 4.91659036911,]
Log.info("Filtering out rows by >= from dataset leads and column \"desire_idx\" using value 6.02628648628")
filterHex <- hex[hex[,c("desire_idx")] >= 6.02628648628,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"desire_idx" >= 6.02628648628,]
Log.info("Filtering out rows by >= from dataset leads and column \"desire_idx\" using value 3.20445477305")
filterHex <- hex[hex[,c("desire_idx")] >= 3.20445477305,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"desire_idx" >= 3.20445477305,]
Log.info("Filtering out rows by >= from dataset leads and column \"risk_idx\" using value 1.60393032307")
filterHex <- hex[hex[,c("risk_idx")] >= 1.60393032307,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"risk_idx" >= 1.60393032307,]
Log.info("Filtering out rows by >= from dataset leads and column \"risk_idx\" using value 1.0478651567")
filterHex <- hex[hex[,c("risk_idx")] >= 1.0478651567,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"risk_idx" >= 1.0478651567,]
Log.info("Filtering out rows by >= from dataset leads and column \"item_price\" using value 3.87395661538")
filterHex <- hex[hex[,c("item_price")] >= 3.87395661538,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"item_price" >= 3.87395661538,]
Log.info("Filtering out rows by >= from dataset leads and column \"item_price\" using value 3.07365074335")
filterHex <- hex[hex[,c("item_price")] >= 3.07365074335,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"item_price" >= 3.07365074335,]
Log.info("Filtering out rows by >= from dataset leads and column \"frequent_customer\" using value 4.68143396191")
filterHex <- hex[hex[,c("frequent_customer")] >= 4.68143396191,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"frequent_customer" >= 4.68143396191,]
Log.info("Filtering out rows by >= from dataset leads and column \"desire_idx\" using value 1.46248441333")
filterHex <- hex[hex[,c("desire_idx")] >= 1.46248441333,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"desire_idx" >= 1.46248441333,]
Log.info("Filtering out rows by >= from dataset leads and column \"risk_idx\" using value 1.00538965519, and also subsetting columns.")
filterHex <- hex[hex[,c("risk_idx")] >= 1.00538965519, c("risk_idx")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("risk_idx")] >= 1.00538965519, c("item_price","frequent_customer","desire_idx","risk_idx","class","date")]
Log.info("Filtering out rows by >= from dataset leads and column \"frequent_customer\" using value 7.11096357284, and also subsetting columns.")
filterHex <- hex[hex[,c("frequent_customer")] >= 7.11096357284, c("frequent_customer")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("frequent_customer")] >= 7.11096357284, c("item_price","frequent_customer","desire_idx","risk_idx","class","date")]
Log.info("Filtering out rows by >= from dataset leads and column \"risk_idx\" using value 1.15708272415, and also subsetting columns.")
filterHex <- hex[hex[,c("risk_idx")] >= 1.15708272415, c("risk_idx")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("risk_idx")] >= 1.15708272415, c("item_price","frequent_customer","desire_idx","risk_idx","class","date")]
Log.info("Filtering out rows by >= from dataset leads and column \"desire_idx\" using value 3.37951301752, and also subsetting columns.")
filterHex <- hex[hex[,c("desire_idx")] >= 3.37951301752, c("desire_idx")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("desire_idx")] >= 3.37951301752, c("item_price","frequent_customer","desire_idx","risk_idx","class","date")]
Log.info("Filtering out rows by >= from dataset leads and column \"frequent_customer\" using value 7.35542359805, and also subsetting columns.")
filterHex <- hex[hex[,c("frequent_customer")] >= 7.35542359805, c("frequent_customer")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("frequent_customer")] >= 7.35542359805, c("item_price","frequent_customer","desire_idx","risk_idx","class","date")]
Log.info("Filtering out rows by >= from dataset leads and column \"desire_idx\" using value 4.9449671627, and also subsetting columns.")
filterHex <- hex[hex[,c("desire_idx")] >= 4.9449671627, c("desire_idx")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("desire_idx")] >= 4.9449671627, c("item_price","frequent_customer","desire_idx","risk_idx","class","date")]
Log.info("Filtering out rows by >= from dataset leads and column \"item_price\" using value 4.05536958922, and also subsetting columns.")
filterHex <- hex[hex[,c("item_price")] >= 4.05536958922, c("item_price")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("item_price")] >= 4.05536958922, c("item_price","frequent_customer","desire_idx","risk_idx","class","date")]
Log.info("Filtering out rows by >= from dataset leads and column \"item_price\" using value 3.48481460378, and also subsetting columns.")
filterHex <- hex[hex[,c("item_price")] >= 3.48481460378, c("item_price")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("item_price")] >= 3.48481460378, c("item_price","frequent_customer","desire_idx","risk_idx","class","date")]
Log.info("Filtering out rows by >= from dataset leads and column \"desire_idx\" using value 5.29755002169, and also subsetting columns.")
filterHex <- hex[hex[,c("desire_idx")] >= 5.29755002169, c("desire_idx")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("desire_idx")] >= 5.29755002169, c("item_price","frequent_customer","desire_idx","risk_idx","class","date")]
Log.info("Filtering out rows by >= from dataset leads and column \"risk_idx\" using value 0.846826419107, and also subsetting columns.")
filterHex <- hex[hex[,c("risk_idx")] >= 0.846826419107, c("risk_idx")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("risk_idx")] >= 0.846826419107, c("item_price","frequent_customer","desire_idx","risk_idx","class","date")]
}
conn = new("H2OClient", ip=myIP, port=myPort)
tryCatch(test_that("simpleFilterTest_ on data leads", simpleFilterTest_leads_83(conn)), warning = function(w) WARN(w), error = function(e) FAIL(e))
PASS()
|
/R/tests/testdir_autoGen/runit_simpleFilterTest_leads_83.R
|
permissive
|
hardikk/h2o
|
R
| false
| false
| 9,453
|
r
|
##
# Author: Autogenerated on 2013-11-27 18:13:58
# gitHash: c4ad841105ba82f4a3979e4cf1ae7e20a5905e59
# SEED: 4663640625336856642
##
source('./findNSourceUtils.R')
Log.info("======================== Begin Test ===========================")
simpleFilterTest_leads_83 <- function(conn) {
Log.info("A munge-task R unit test on data <leads> testing the functional unit <>=> ")
Log.info("Uploading leads")
hex <- h2o.uploadFile(conn, locate("../../smalldata/iris/leads.csv"), "rleads.hex")
Log.info("Filtering out rows by >= from dataset leads and column \"class\" using value 1.51746584587")
filterHex <- hex[hex[,c("class")] >= 1.51746584587,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"class" >= 1.51746584587,]
Log.info("Filtering out rows by >= from dataset leads and column \"desire_idx\" using value 4.91659036911")
filterHex <- hex[hex[,c("desire_idx")] >= 4.91659036911,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"desire_idx" >= 4.91659036911,]
Log.info("Filtering out rows by >= from dataset leads and column \"desire_idx\" using value 6.02628648628")
filterHex <- hex[hex[,c("desire_idx")] >= 6.02628648628,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"desire_idx" >= 6.02628648628,]
Log.info("Filtering out rows by >= from dataset leads and column \"desire_idx\" using value 3.20445477305")
filterHex <- hex[hex[,c("desire_idx")] >= 3.20445477305,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"desire_idx" >= 3.20445477305,]
Log.info("Filtering out rows by >= from dataset leads and column \"risk_idx\" using value 1.60393032307")
filterHex <- hex[hex[,c("risk_idx")] >= 1.60393032307,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"risk_idx" >= 1.60393032307,]
Log.info("Filtering out rows by >= from dataset leads and column \"risk_idx\" using value 1.0478651567")
filterHex <- hex[hex[,c("risk_idx")] >= 1.0478651567,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"risk_idx" >= 1.0478651567,]
Log.info("Filtering out rows by >= from dataset leads and column \"item_price\" using value 3.87395661538")
filterHex <- hex[hex[,c("item_price")] >= 3.87395661538,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"item_price" >= 3.87395661538,]
Log.info("Filtering out rows by >= from dataset leads and column \"item_price\" using value 3.07365074335")
filterHex <- hex[hex[,c("item_price")] >= 3.07365074335,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"item_price" >= 3.07365074335,]
Log.info("Filtering out rows by >= from dataset leads and column \"frequent_customer\" using value 4.68143396191")
filterHex <- hex[hex[,c("frequent_customer")] >= 4.68143396191,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"frequent_customer" >= 4.68143396191,]
Log.info("Filtering out rows by >= from dataset leads and column \"desire_idx\" using value 1.46248441333")
filterHex <- hex[hex[,c("desire_idx")] >= 1.46248441333,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"desire_idx" >= 1.46248441333,]
Log.info("Filtering out rows by >= from dataset leads and column \"risk_idx\" using value 1.00538965519, and also subsetting columns.")
filterHex <- hex[hex[,c("risk_idx")] >= 1.00538965519, c("risk_idx")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("risk_idx")] >= 1.00538965519, c("item_price","frequent_customer","desire_idx","risk_idx","class","date")]
Log.info("Filtering out rows by >= from dataset leads and column \"frequent_customer\" using value 7.11096357284, and also subsetting columns.")
filterHex <- hex[hex[,c("frequent_customer")] >= 7.11096357284, c("frequent_customer")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("frequent_customer")] >= 7.11096357284, c("item_price","frequent_customer","desire_idx","risk_idx","class","date")]
Log.info("Filtering out rows by >= from dataset leads and column \"risk_idx\" using value 1.15708272415, and also subsetting columns.")
filterHex <- hex[hex[,c("risk_idx")] >= 1.15708272415, c("risk_idx")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("risk_idx")] >= 1.15708272415, c("item_price","frequent_customer","desire_idx","risk_idx","class","date")]
Log.info("Filtering out rows by >= from dataset leads and column \"desire_idx\" using value 3.37951301752, and also subsetting columns.")
filterHex <- hex[hex[,c("desire_idx")] >= 3.37951301752, c("desire_idx")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("desire_idx")] >= 3.37951301752, c("item_price","frequent_customer","desire_idx","risk_idx","class","date")]
Log.info("Filtering out rows by >= from dataset leads and column \"frequent_customer\" using value 7.35542359805, and also subsetting columns.")
filterHex <- hex[hex[,c("frequent_customer")] >= 7.35542359805, c("frequent_customer")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("frequent_customer")] >= 7.35542359805, c("item_price","frequent_customer","desire_idx","risk_idx","class","date")]
Log.info("Filtering out rows by >= from dataset leads and column \"desire_idx\" using value 4.9449671627, and also subsetting columns.")
filterHex <- hex[hex[,c("desire_idx")] >= 4.9449671627, c("desire_idx")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("desire_idx")] >= 4.9449671627, c("item_price","frequent_customer","desire_idx","risk_idx","class","date")]
Log.info("Filtering out rows by >= from dataset leads and column \"item_price\" using value 4.05536958922, and also subsetting columns.")
filterHex <- hex[hex[,c("item_price")] >= 4.05536958922, c("item_price")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("item_price")] >= 4.05536958922, c("item_price","frequent_customer","desire_idx","risk_idx","class","date")]
Log.info("Filtering out rows by >= from dataset leads and column \"item_price\" using value 3.48481460378, and also subsetting columns.")
filterHex <- hex[hex[,c("item_price")] >= 3.48481460378, c("item_price")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("item_price")] >= 3.48481460378, c("item_price","frequent_customer","desire_idx","risk_idx","class","date")]
Log.info("Filtering out rows by >= from dataset leads and column \"desire_idx\" using value 5.29755002169, and also subsetting columns.")
filterHex <- hex[hex[,c("desire_idx")] >= 5.29755002169, c("desire_idx")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("desire_idx")] >= 5.29755002169, c("item_price","frequent_customer","desire_idx","risk_idx","class","date")]
Log.info("Filtering out rows by >= from dataset leads and column \"risk_idx\" using value 0.846826419107, and also subsetting columns.")
filterHex <- hex[hex[,c("risk_idx")] >= 0.846826419107, c("risk_idx")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("risk_idx")] >= 0.846826419107, c("item_price","frequent_customer","desire_idx","risk_idx","class","date")]
}
conn = new("H2OClient", ip=myIP, port=myPort)
tryCatch(test_that("simpleFilterTest_ on data leads", simpleFilterTest_leads_83(conn)), warning = function(w) WARN(w), error = function(e) FAIL(e))
PASS()
|
#Load Data
loadPowerData <- function() {
data <- read.table("household_power_consumption.txt",
header=TRUE,
sep=";",
colClasses=c("character", "character", rep("numeric",7)),
na="?")
#Convert time variable
data$Time <- strptime(paste(data$Date, data$Time), "%d/%m/%Y %H:%M:%S")
# Convert date variable
data$Date <- as.Date(data$Date, "%d/%m/%Y")
# Extract data with 2 dates
dates <- as.Date(c("2007-02-01", "2007-02-02"), "%Y-%m-%d")
data <- subset(data, Date %in% dates)
return(data)
}
plot2 <- function(data=NULL) {
data <- loadPowerData()
png("plot2.png", width=600, height=600)
plot(data$Time, data$Global_active_power,
type="l",
xlab="",
ylab="Global Active Power (kilowatts)")
dev.off()
}
|
/plot2.R
|
no_license
|
wongkaeperng/ExData_Plotting1
|
R
| false
| false
| 956
|
r
|
#Load Data
loadPowerData <- function() {
data <- read.table("household_power_consumption.txt",
header=TRUE,
sep=";",
colClasses=c("character", "character", rep("numeric",7)),
na="?")
#Convert time variable
data$Time <- strptime(paste(data$Date, data$Time), "%d/%m/%Y %H:%M:%S")
# Convert date variable
data$Date <- as.Date(data$Date, "%d/%m/%Y")
# Extract data with 2 dates
dates <- as.Date(c("2007-02-01", "2007-02-02"), "%Y-%m-%d")
data <- subset(data, Date %in% dates)
return(data)
}
plot2 <- function(data=NULL) {
data <- loadPowerData()
png("plot2.png", width=600, height=600)
plot(data$Time, data$Global_active_power,
type="l",
xlab="",
ylab="Global Active Power (kilowatts)")
dev.off()
}
|
# solving carina
# need a lookup for f0(v)
hobj = hist(milkyway, breaks = 100, plot = FALSE)
# make a function by linearly interpolating between this fit
f0v = approx(hobj$mids, hobj$density, v)$y
# hist(milkyway, breaks = 50, freq =FALSE)
# points(v[order(v)], f0v[order(v)], cex = 0.2, col = 'blue', type = 'l')
# EM solution for carina
# using only R
emcarina = function(y, x, f0y, tol = 1e-6, maxit = 1e3, verbose = FALSE){
require(Iso)
require(Matrix)
# sorting by x
ox = order(x)
yy = y[ox]
f0yy = f0y[ox]
iox = invPerm(ox)
runFlag = 1
itcount = 0
w = rep(1/2, length(yy))
if (verbose) pb = txtProgressBar(max = maxit, style = 3)
while(runFlag){
# updating f1
mu = sum(w*yy)/sum(w)
sig = sqrt(sum(w*(yy - mu)^2)/sum(w))
# updating pi
pivec = pava(w, decreasing = TRUE)
# updating w
wnew = pivec * dnorm(yy, mu, sig)/(pivec * dnorm(yy, mu, sig) + (1-pivec)*f0yy)
itererr = mean(abs(wnew - w))
runFlag = min(runFlag, !(itererr <= tol))
runFlag = min(runFlag, !(itcount == maxit))
w = wnew
itcount = itcount + 1
if (verbose) setTxtProgressBar(pb, itcount)
}
return(list(p = pivec[iox], mu = mu, sig = sig, w = w[iox]))
}
eo = emcarina(v, R, f0v, verbose = TRUE)
labels_cov = 1 + (eo$w <= 0.5)
# mean(abs(eo$w - postp))
plot(R[order(R)], eo$p[order(R)], type = 'l', ylim = c(0,1),xlab="Distance from center (X)", ylab="Estimated frequency of Carina Stars",cex.lab=1.5,cex.main=1.5)
# carina no covariates
emcarina_twog = function(y, f0y, tol = 1e-6, maxit = 1e3, verbose = FALSE){
require(Iso)
require(Matrix)
runFlag = 1
itcount = 0
# initial values
w = rep(1/2, length(y))
if (verbose) pb = txtProgressBar(max = maxit, style = 3)
while(runFlag){
# updating p
p = mean(w)
# updating f1
mu = sum(w*y)/sum(w)
sig = sqrt(sum(w*(y - mu)^2)/sum(w))
# updating w
wnew = p * dnorm(y, mu, sig)/(p * dnorm(y, mu, sig) + (1-p)*f0y)
itererr = mean(abs(wnew - w))
runFlag = min(runFlag, !(itererr <= tol))
runFlag = min(runFlag, !(itcount == maxit))
w = wnew
itcount = itcount + 1
if (verbose) setTxtProgressBar(pb, itcount)
}
return(list(p = p, mu = mu, sig = sig, w = w))
}
eo2g = emcarina_twog(v, f0v)
labels_2g = 1 + (eo2g$w <= 0.5)
abline(h=eo2g$p,col="red")
legend(x="topright", legend = c('Covariates model', 'Two-groups'),col=c('blue', 'red'), lty = 1, bty = 'n',lwd=4,cex=2)
# difference in the two solutions
sum(labels_cov != labels_2g)
labels_cov[which(labels_cov != labels_2g)]
labels_2g[which(labels_cov != labels_2g)]
cols = c('red', 'blue')
pdf('carina_class_cov.pdf', width = 7, height = 7)
par(bty = 'l')
plot(R, v, cex = 0.7, col = cols[labels_cov],
xlab = 'Distance from Center (X)',
ylab = 'Line-of-Sigth Velocity (Y)',
main = 'Two-groups with Covariates',pch=20,cex.lab=1.5,cex.main=1.5)
legend(x = 'topleft', legend = c('Carina', 'Milky Way'), col = cols, lwd = 4, bty = 'n',cex=2)
dev.off()
pdf('carina_class_2g.pdf', width = 7, height = 7)
par(bty = 'l')
plot(R, v, cex = 0.7, col = cols[labels_2g],
xlab = 'Distance from Center (X)',
ylab = 'Line-of-Sigth Velocity (Y)',
main = 'Two-groups',pch=20,cex.lab=1.5,cex.main=1.5)
legend(x = 'topleft', legend = c('Carina', 'Milky Way'), col = cols, lwd = 4, bty = 'n', cex=2)
dev.off()
pdf('carina_pi_est.pdf', width = 7, height = 7)
par(bty = 'l')
plot(R[order(R)], eo$p[order(R)], type = 'l',
xlab = 'Distance from Center (X)',
ylab = 'Estimated Frequency of Carina')
abline(h = eo2g$p, col = 'red')
legend(x = 'topright', legend = c('Covariates', 'Two Groups'), col = c('black', 'red'), lwd = 2, bty = 'n')
dev.off()
####### All Plots #######
fmlelfdr=eo$w
scottlfdr=eo2g$w
### 1 ###
fdr_nominal=0.1
lfdr1=fmlelfdr
sl1 = sort(lfdr1)
k1 = sum(cumsum(sl1)/seq_along(sl1) <= fdr_nominal)
sl1[k1]
sl1vec=which(fmlelfdr<sl1[k1])
lfdr2=scott_res$localfdr
sl2 = sort(lfdr2)
k2 = sum(cumsum(sl2)/seq_along(sl2) <= fdr_nominal)
sl2[k2]
sl2vec=which(scott_res$localfdr<sl2[k2])
plot(v,1-eo$w,pch=20,col="blue",cex=0.8,xlab="Test statistic",ylab="lFDR",cex.lab=1.5)
points(v,1-eo2g$w,pch=20,col="red",cex=0.8)
abline(h=sl1[k1],lty=2,col="red")
abline(h=sl2[k2],lty=2,col="blue")
legend(x = "topright", legend = c('2g lFDR', 'fMLE lFDR'), col = c('blue', 'red'), lty = 1, lwd = 3, bty = 'n',cex=2)
### 2 ###
ttx=seq(0,1,0.001)
tty=seq(0,1,0.001)
plot(fmlelfdr,scottlfdr,col="red",pch=20,cex=0.8,cex.lab=1.5)
lines(ttx,tty,col="blue",lty=2,lwd=4)
ind=which(fmlelfdr>scottlfdr)
points(fmlelfdr[ind],scottlfdr[ind],pch=20,cex=0.8)
legend(x = "topleft", legend = c('y=x line'), col = c('blue'), lty = 2, lwd=4, bty = 'n',cex=2)
### 3 ###
hist(v, breaks = 100, prob=TRUE, col='lightgrey', border='grey',
main='Fitted marginals for 2 groups', xlab='line of sight velocity',xlim=c(-100,350),ylim=c(0,0.03),cex.lab=1.5,cex.main=1.5)
rug(v)
m1=eo2g$p*sapply(((v-eo2g$mu)/(eo2g$sig)),dnorm)
m2=(1-eo2g$p)*f0v
m3=m1+m2
points(v[order(v)], m1[order(v)], col='red',ty='l')
points(v[order(v)], m2[order(v)], col='blue',ty='l',lty=2)
points(v[order(v)], m3[order(v)],ty='l',lwd=2)
legend(x = 2.5, y=0.3, bty = 'n',
legend = c(expression(bar(pi)~f[1] + (1-bar(pi))~f[0]),
expression(bar(pi)~f[1]),
expression((1-bar(pi))~f[0])),
col = c('black', 'red', 'blue'), lty= 1,lwd=2,cex=1.3)
# lets plot the histogram overlapped with scaled versions of f0 and f1
#pdf('histogram_fitted_densities_new.pdf', width = 10, height = 10)
### 4 ###
hist(v, breaks = 100, prob=TRUE, col='lightgrey', border='grey',
main='Fitted marginals for covariate model', xlab='line of sight velocity',xlim=c(-100,350),ylim=c(0,0.03),cex.lab=1.5,cex.main=1.5)
rug(v)
m1=eo$p*sapply(((v-eo$mu)/(eo$sig)),dnorm)
m2=(1-eo$p)*f0v
m3=m1+m2
points(v[order(v)], m1[order(v)], col='red',ty='l')
points(v[order(v)], m2[order(v)], col='blue',ty='l',lty=2)
points(v[order(v)], m3[order(v)],ty='l',lwd=2)
legend(x = 2.5, y=0.3, bty = 'n',
legend = c(expression(bar(pi)~f[1] + (1-bar(pi))~f[0]),
expression(bar(pi)~f[1]),
expression((1-bar(pi))~f[0])),
col = c('black', 'red', 'blue'), lty= 1,lwd=2,cex=1.3)
# lets plot the histogram overlapped with scaled versions of f0 and f1
#pdf('histogram_fitted_densities_new.pdf', width = 10, height = 10)
### 5 ###
plot(xtbs$x, am1_res$p[xtbs$ix], type = 'l', ylim = c(0,1), col = 'darkcyan',
ylab = expression(hat(pi)), xlab = expression(x^T~hat(beta)),cex.lab=1.1,cex.axis=1,cex.main=1,ps=12)
lines(xtbs$x, am2_res$p[xtbs$ix], col = 'blue')
lines(xtbs$x, scott_res$priorprob[xtbs$ix], col = 'red')
# write a legend for this plot
legend(x = 'topleft',
legend = c('scott', 'marginal1+fullmle', 'marginal2+fullmle'),
col = c('red', 'darkcyan', 'blue'), lty = 1, bty = 'n',cex=2,pt.cex=1)
### 6 ###
image(x1breaks, x2breaks, mat_combined, xlab = 'Dist', ylab = 'TuningCor',
col = colors_list,cex.lab=1.5,cex.axis=1.5)
legend(x = 2200, y=0.7, legend = c('both methods', 'only fullmle'), col = c('blue', 'green4'), lty = 1, bty = 'n',lwd=3,cex=2)
### 7 ###
os = setdiff(sl2vec,sl1vec)
bm = intersect(sl2vec,sl1vec)
oa = setdiff(sl1vec,sl2vec)
hist(ddfull$z[bm],col="lightgrey",breaks=50,xlim=c(1,6),xlab="Test statistic",cex.lab=1.3,cex.main=1.5,
ylab="Frequency of rejection",main="Histogram on rejection")
hist(ddfull$z[oa],col="red",breaks=50,add=T,xlim=c(1,6))
hist(ddfull$z[os],col="blue",breaks=50,add=T,xlim=c(1,6))
legend(x="topright", legend = c('both methods', 'only fullmle','only FDRreg'),col=c('lightgrey', 'red','blue'), lty = 1, bty = 'n',lwd=4,cex=2)
### new code ###
dim(x)
y1=sort(x[,1])
y2=x[,4][order(x[,1])]
ourdat=cbind(y1,y2)
#ourdat=x[,c(1,4)]
ubd=5
lbd=1
l=length(ourdat[,1])
w=rep(0.5,l)
niter=50
f0v=f0v[order(x[,1])]
pivec=NULL
denvec=NULL
for(i in 1:niter)
{
pivec=pava(w,dec=T)
f=function(p)
{
sst=max(log(p[1]*sapply((ourdat[,2]-p[2])/p[3], dnorm)+(1-p[1])*sapply((ourdat[,2]-p[2])/p[4],dnorm)),-200)
#return(-mean(log(sst)))
return(-mean(sst))
}
est=optim(c(0.3,210,2,2),f,method="L-BFGS-B",lower=c(0.02,200,lbd,lbd),upper=c(0.98,250,ubd,ubd),hessian=FALSE)$par
denvec=est[1]*sapply((ourdat[,2]-est[2])/est[3], dnorm)+(1-est[1])*sapply((ourdat[,2]-est[2])/est[4],dnorm)
w=pivec*denvec+(1-pivec)*f0v
print(i)
}
est
plot(ourdat[,1],pivec)
sst=ii[1]*sapply((ourdat[,2]-ii[2])/ii[3], dnorm)+(1-ii[1])*sapply((ourdat[,2]-ii[2])/ii[4],dnorm)
|
/AstroDataSets/mmfs_data/carina_em_methods.R
|
no_license
|
NabarunD/NPMLEmix
|
R
| false
| false
| 8,805
|
r
|
# solving carina
# need a lookup for f0(v)
hobj = hist(milkyway, breaks = 100, plot = FALSE)
# make a function by linearly interpolating between this fit
f0v = approx(hobj$mids, hobj$density, v)$y
# hist(milkyway, breaks = 50, freq =FALSE)
# points(v[order(v)], f0v[order(v)], cex = 0.2, col = 'blue', type = 'l')
# EM solution for carina
# using only R
emcarina = function(y, x, f0y, tol = 1e-6, maxit = 1e3, verbose = FALSE){
require(Iso)
require(Matrix)
# sorting by x
ox = order(x)
yy = y[ox]
f0yy = f0y[ox]
iox = invPerm(ox)
runFlag = 1
itcount = 0
w = rep(1/2, length(yy))
if (verbose) pb = txtProgressBar(max = maxit, style = 3)
while(runFlag){
# updating f1
mu = sum(w*yy)/sum(w)
sig = sqrt(sum(w*(yy - mu)^2)/sum(w))
# updating pi
pivec = pava(w, decreasing = TRUE)
# updating w
wnew = pivec * dnorm(yy, mu, sig)/(pivec * dnorm(yy, mu, sig) + (1-pivec)*f0yy)
itererr = mean(abs(wnew - w))
runFlag = min(runFlag, !(itererr <= tol))
runFlag = min(runFlag, !(itcount == maxit))
w = wnew
itcount = itcount + 1
if (verbose) setTxtProgressBar(pb, itcount)
}
return(list(p = pivec[iox], mu = mu, sig = sig, w = w[iox]))
}
eo = emcarina(v, R, f0v, verbose = TRUE)
labels_cov = 1 + (eo$w <= 0.5)
# mean(abs(eo$w - postp))
plot(R[order(R)], eo$p[order(R)], type = 'l', ylim = c(0,1),xlab="Distance from center (X)", ylab="Estimated frequency of Carina Stars",cex.lab=1.5,cex.main=1.5)
# carina no covariates
emcarina_twog = function(y, f0y, tol = 1e-6, maxit = 1e3, verbose = FALSE){
require(Iso)
require(Matrix)
runFlag = 1
itcount = 0
# initial values
w = rep(1/2, length(y))
if (verbose) pb = txtProgressBar(max = maxit, style = 3)
while(runFlag){
# updating p
p = mean(w)
# updating f1
mu = sum(w*y)/sum(w)
sig = sqrt(sum(w*(y - mu)^2)/sum(w))
# updating w
wnew = p * dnorm(y, mu, sig)/(p * dnorm(y, mu, sig) + (1-p)*f0y)
itererr = mean(abs(wnew - w))
runFlag = min(runFlag, !(itererr <= tol))
runFlag = min(runFlag, !(itcount == maxit))
w = wnew
itcount = itcount + 1
if (verbose) setTxtProgressBar(pb, itcount)
}
return(list(p = p, mu = mu, sig = sig, w = w))
}
eo2g = emcarina_twog(v, f0v)
labels_2g = 1 + (eo2g$w <= 0.5)
abline(h=eo2g$p,col="red")
legend(x="topright", legend = c('Covariates model', 'Two-groups'),col=c('blue', 'red'), lty = 1, bty = 'n',lwd=4,cex=2)
# difference in the two solutions
sum(labels_cov != labels_2g)
labels_cov[which(labels_cov != labels_2g)]
labels_2g[which(labels_cov != labels_2g)]
cols = c('red', 'blue')
pdf('carina_class_cov.pdf', width = 7, height = 7)
par(bty = 'l')
plot(R, v, cex = 0.7, col = cols[labels_cov],
xlab = 'Distance from Center (X)',
ylab = 'Line-of-Sigth Velocity (Y)',
main = 'Two-groups with Covariates',pch=20,cex.lab=1.5,cex.main=1.5)
legend(x = 'topleft', legend = c('Carina', 'Milky Way'), col = cols, lwd = 4, bty = 'n',cex=2)
dev.off()
pdf('carina_class_2g.pdf', width = 7, height = 7)
par(bty = 'l')
plot(R, v, cex = 0.7, col = cols[labels_2g],
xlab = 'Distance from Center (X)',
ylab = 'Line-of-Sigth Velocity (Y)',
main = 'Two-groups',pch=20,cex.lab=1.5,cex.main=1.5)
legend(x = 'topleft', legend = c('Carina', 'Milky Way'), col = cols, lwd = 4, bty = 'n', cex=2)
dev.off()
pdf('carina_pi_est.pdf', width = 7, height = 7)
par(bty = 'l')
plot(R[order(R)], eo$p[order(R)], type = 'l',
xlab = 'Distance from Center (X)',
ylab = 'Estimated Frequency of Carina')
abline(h = eo2g$p, col = 'red')
legend(x = 'topright', legend = c('Covariates', 'Two Groups'), col = c('black', 'red'), lwd = 2, bty = 'n')
dev.off()
####### All Plots #######
fmlelfdr=eo$w
scottlfdr=eo2g$w
### 1 ###
fdr_nominal=0.1
lfdr1=fmlelfdr
sl1 = sort(lfdr1)
k1 = sum(cumsum(sl1)/seq_along(sl1) <= fdr_nominal)
sl1[k1]
sl1vec=which(fmlelfdr<sl1[k1])
lfdr2=scott_res$localfdr
sl2 = sort(lfdr2)
k2 = sum(cumsum(sl2)/seq_along(sl2) <= fdr_nominal)
sl2[k2]
sl2vec=which(scott_res$localfdr<sl2[k2])
plot(v,1-eo$w,pch=20,col="blue",cex=0.8,xlab="Test statistic",ylab="lFDR",cex.lab=1.5)
points(v,1-eo2g$w,pch=20,col="red",cex=0.8)
abline(h=sl1[k1],lty=2,col="red")
abline(h=sl2[k2],lty=2,col="blue")
legend(x = "topright", legend = c('2g lFDR', 'fMLE lFDR'), col = c('blue', 'red'), lty = 1, lwd = 3, bty = 'n',cex=2)
### 2 ###
ttx=seq(0,1,0.001)
tty=seq(0,1,0.001)
plot(fmlelfdr,scottlfdr,col="red",pch=20,cex=0.8,cex.lab=1.5)
lines(ttx,tty,col="blue",lty=2,lwd=4)
ind=which(fmlelfdr>scottlfdr)
points(fmlelfdr[ind],scottlfdr[ind],pch=20,cex=0.8)
legend(x = "topleft", legend = c('y=x line'), col = c('blue'), lty = 2, lwd=4, bty = 'n',cex=2)
### 3 ###
hist(v, breaks = 100, prob=TRUE, col='lightgrey', border='grey',
main='Fitted marginals for 2 groups', xlab='line of sight velocity',xlim=c(-100,350),ylim=c(0,0.03),cex.lab=1.5,cex.main=1.5)
rug(v)
m1=eo2g$p*sapply(((v-eo2g$mu)/(eo2g$sig)),dnorm)
m2=(1-eo2g$p)*f0v
m3=m1+m2
points(v[order(v)], m1[order(v)], col='red',ty='l')
points(v[order(v)], m2[order(v)], col='blue',ty='l',lty=2)
points(v[order(v)], m3[order(v)],ty='l',lwd=2)
legend(x = 2.5, y=0.3, bty = 'n',
legend = c(expression(bar(pi)~f[1] + (1-bar(pi))~f[0]),
expression(bar(pi)~f[1]),
expression((1-bar(pi))~f[0])),
col = c('black', 'red', 'blue'), lty= 1,lwd=2,cex=1.3)
# lets plot the histogram overlapped with scaled versions of f0 and f1
#pdf('histogram_fitted_densities_new.pdf', width = 10, height = 10)
### 4 ###
hist(v, breaks = 100, prob=TRUE, col='lightgrey', border='grey',
main='Fitted marginals for covariate model', xlab='line of sight velocity',xlim=c(-100,350),ylim=c(0,0.03),cex.lab=1.5,cex.main=1.5)
rug(v)
m1=eo$p*sapply(((v-eo$mu)/(eo$sig)),dnorm)
m2=(1-eo$p)*f0v
m3=m1+m2
points(v[order(v)], m1[order(v)], col='red',ty='l')
points(v[order(v)], m2[order(v)], col='blue',ty='l',lty=2)
points(v[order(v)], m3[order(v)],ty='l',lwd=2)
legend(x = 2.5, y=0.3, bty = 'n',
legend = c(expression(bar(pi)~f[1] + (1-bar(pi))~f[0]),
expression(bar(pi)~f[1]),
expression((1-bar(pi))~f[0])),
col = c('black', 'red', 'blue'), lty= 1,lwd=2,cex=1.3)
# lets plot the histogram overlapped with scaled versions of f0 and f1
#pdf('histogram_fitted_densities_new.pdf', width = 10, height = 10)
### 5 ###
plot(xtbs$x, am1_res$p[xtbs$ix], type = 'l', ylim = c(0,1), col = 'darkcyan',
ylab = expression(hat(pi)), xlab = expression(x^T~hat(beta)),cex.lab=1.1,cex.axis=1,cex.main=1,ps=12)
lines(xtbs$x, am2_res$p[xtbs$ix], col = 'blue')
lines(xtbs$x, scott_res$priorprob[xtbs$ix], col = 'red')
# write a legend for this plot
legend(x = 'topleft',
legend = c('scott', 'marginal1+fullmle', 'marginal2+fullmle'),
col = c('red', 'darkcyan', 'blue'), lty = 1, bty = 'n',cex=2,pt.cex=1)
### 6 ###
image(x1breaks, x2breaks, mat_combined, xlab = 'Dist', ylab = 'TuningCor',
col = colors_list,cex.lab=1.5,cex.axis=1.5)
legend(x = 2200, y=0.7, legend = c('both methods', 'only fullmle'), col = c('blue', 'green4'), lty = 1, bty = 'n',lwd=3,cex=2)
### 7 ###
os = setdiff(sl2vec,sl1vec)
bm = intersect(sl2vec,sl1vec)
oa = setdiff(sl1vec,sl2vec)
hist(ddfull$z[bm],col="lightgrey",breaks=50,xlim=c(1,6),xlab="Test statistic",cex.lab=1.3,cex.main=1.5,
ylab="Frequency of rejection",main="Histogram on rejection")
hist(ddfull$z[oa],col="red",breaks=50,add=T,xlim=c(1,6))
hist(ddfull$z[os],col="blue",breaks=50,add=T,xlim=c(1,6))
legend(x="topright", legend = c('both methods', 'only fullmle','only FDRreg'),col=c('lightgrey', 'red','blue'), lty = 1, bty = 'n',lwd=4,cex=2)
### new code ###
dim(x)
y1=sort(x[,1])
y2=x[,4][order(x[,1])]
ourdat=cbind(y1,y2)
#ourdat=x[,c(1,4)]
ubd=5
lbd=1
l=length(ourdat[,1])
w=rep(0.5,l)
niter=50
f0v=f0v[order(x[,1])]
pivec=NULL
denvec=NULL
for(i in 1:niter)
{
pivec=pava(w,dec=T)
f=function(p)
{
sst=max(log(p[1]*sapply((ourdat[,2]-p[2])/p[3], dnorm)+(1-p[1])*sapply((ourdat[,2]-p[2])/p[4],dnorm)),-200)
#return(-mean(log(sst)))
return(-mean(sst))
}
est=optim(c(0.3,210,2,2),f,method="L-BFGS-B",lower=c(0.02,200,lbd,lbd),upper=c(0.98,250,ubd,ubd),hessian=FALSE)$par
denvec=est[1]*sapply((ourdat[,2]-est[2])/est[3], dnorm)+(1-est[1])*sapply((ourdat[,2]-est[2])/est[4],dnorm)
w=pivec*denvec+(1-pivec)*f0v
print(i)
}
est
plot(ourdat[,1],pivec)
sst=ii[1]*sapply((ourdat[,2]-ii[2])/ii[3], dnorm)+(1-ii[1])*sapply((ourdat[,2]-ii[2])/ii[4],dnorm)
|
# web scraping figure skater personal bests
# http://www.isuresults.com/bios/fsbiosmen.htm and http://www.isuresults.com/bios/fsbiosladies.htm
# biographies of figure skaters.
library(rvest)
library(stringr)
library(pbapply) # for progress bar
# get URLs for each skater
male_urls <- "http://www.isuresults.com/bios/fsbiosmen.htm"
males <- readLines(male_urls)
males <- males[grepl(pattern = "HyperLink_Biography", x = males)]
urlsM <- str_extract(males, pattern = "/bios/.+\\.htm")
urlsM <- paste0("http://www.isuresults.com",urlsM)
female_urls <- "http://www.isuresults.com/bios/fsbiosladies.htm"
females <- readLines(female_urls)
females <- females[grepl(pattern = "HyperLink_Biography", x = females)]
urlsF <- str_extract(females, pattern = "/bios/.+\\.htm")
urlsF <- paste0("http://www.isuresults.com",urlsF)
# allURLS <- rbind(data.frame(URL = urlsF, gender = "Female", stringsAsFactors = F),
# data.frame(URL= urlsM, gender = "Male", stringsAsFactors = F))
# allURLS <- c(urlsF, urlsM)
rm(female_urls, male_urls, females, males)
# function to get URLs for personal bests
getPBurl <- function(x){
tmp <- readLines(x)
get <- grepl("Personal Best", tmp)
if(any(get)){
tmp <- tmp[get]
tmp <- unlist(str_split(tmp, pattern = "\""))
if(any(grepl(pattern = ".htm$", tmp))){
pburl <- tmp[grepl(pattern = ".htm$", tmp)]
pburl <- paste0("http://www.isuresults.com/bios/", pburl)
pburl
} else {
NA
}
}
}
# get personal best URLs - female
pburlsF <- pblapply(urlsF, getPBurl)
pburlsF <- na.omit(unlist(pburlsF)) # 1090
# get personal best URLs - male
pburlsM <- pblapply(urlsM, getPBurl)
pburlsM <- na.omit(unlist(pburlsM)) # 674
# function to get personal bests
getPBinfo <- function(x, gender){
tryCatch({ # to skip empty tables, which throws an error
tmp <- read_html(x)
name <- html_table(html_nodes(tmp, "table")[[1]])[1,1]
dat <- html_table(html_nodes(tmp, "table")[[2]], fill = TRUE)
if(all(dat[nrow(dat),1] == dat[nrow(dat),], na.rm = TRUE)){
dat <- dat[-nrow(dat),] # drop last row if it has a date
}
dat$Name <- name
dat$Gender <- gender
if(names(dat)[1]==""){ # drop 1st 1st column if empty
dat <- dat[,-1]
}
dat
}, error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
}
pbsF <- pblapply(pburlsF, getPBinfo, gender = "Female")
pbsM <- pblapply(pburlsM, getPBinfo, gender = "Male")
pbsDataF <- do.call(rbind, pbsF)
pbsDataM <- do.call(rbind, pbsM)
pbData <- rbind(pbsDataF, pbsDataM)
# clean up "historical record" rows
# table(pbsDataF$Type)
pbData <- pbData[pbData$Type != "H - Historic Record achieved before 2010/2011 season",]
save(urlsF, urlsM, pburlsF, pburlsM, pbData, pbsDataF, pbsDataM, file = "figSkate.Rda")
load("figSkate.Rda")
|
/fig-skate-web-scrape.R
|
no_license
|
clayford/fig-skate
|
R
| false
| false
| 2,904
|
r
|
# web scraping figure skater personal bests
# http://www.isuresults.com/bios/fsbiosmen.htm and http://www.isuresults.com/bios/fsbiosladies.htm
# biographies of figure skaters.
library(rvest)
library(stringr)
library(pbapply) # for progress bar
# get URLs for each skater
male_urls <- "http://www.isuresults.com/bios/fsbiosmen.htm"
males <- readLines(male_urls)
males <- males[grepl(pattern = "HyperLink_Biography", x = males)]
urlsM <- str_extract(males, pattern = "/bios/.+\\.htm")
urlsM <- paste0("http://www.isuresults.com",urlsM)
female_urls <- "http://www.isuresults.com/bios/fsbiosladies.htm"
females <- readLines(female_urls)
females <- females[grepl(pattern = "HyperLink_Biography", x = females)]
urlsF <- str_extract(females, pattern = "/bios/.+\\.htm")
urlsF <- paste0("http://www.isuresults.com",urlsF)
# allURLS <- rbind(data.frame(URL = urlsF, gender = "Female", stringsAsFactors = F),
# data.frame(URL= urlsM, gender = "Male", stringsAsFactors = F))
# allURLS <- c(urlsF, urlsM)
rm(female_urls, male_urls, females, males)
# function to get URLs for personal bests
getPBurl <- function(x){
tmp <- readLines(x)
get <- grepl("Personal Best", tmp)
if(any(get)){
tmp <- tmp[get]
tmp <- unlist(str_split(tmp, pattern = "\""))
if(any(grepl(pattern = ".htm$", tmp))){
pburl <- tmp[grepl(pattern = ".htm$", tmp)]
pburl <- paste0("http://www.isuresults.com/bios/", pburl)
pburl
} else {
NA
}
}
}
# get personal best URLs - female
pburlsF <- pblapply(urlsF, getPBurl)
pburlsF <- na.omit(unlist(pburlsF)) # 1090
# get personal best URLs - male
pburlsM <- pblapply(urlsM, getPBurl)
pburlsM <- na.omit(unlist(pburlsM)) # 674
# function to get personal bests
getPBinfo <- function(x, gender){
tryCatch({ # to skip empty tables, which throws an error
tmp <- read_html(x)
name <- html_table(html_nodes(tmp, "table")[[1]])[1,1]
dat <- html_table(html_nodes(tmp, "table")[[2]], fill = TRUE)
if(all(dat[nrow(dat),1] == dat[nrow(dat),], na.rm = TRUE)){
dat <- dat[-nrow(dat),] # drop last row if it has a date
}
dat$Name <- name
dat$Gender <- gender
if(names(dat)[1]==""){ # drop 1st 1st column if empty
dat <- dat[,-1]
}
dat
}, error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
}
pbsF <- pblapply(pburlsF, getPBinfo, gender = "Female")
pbsM <- pblapply(pburlsM, getPBinfo, gender = "Male")
pbsDataF <- do.call(rbind, pbsF)
pbsDataM <- do.call(rbind, pbsM)
pbData <- rbind(pbsDataF, pbsDataM)
# clean up "historical record" rows
# table(pbsDataF$Type)
pbData <- pbData[pbData$Type != "H - Historic Record achieved before 2010/2011 season",]
save(urlsF, urlsM, pburlsF, pburlsM, pbData, pbsDataF, pbsDataM, file = "figSkate.Rda")
load("figSkate.Rda")
|
testlist <- list(data = structure(c(3.63959479422642e-23, 0, 2.47812124169512e-307, 3.52953640547268e+30, 6.06877987683043e-307, 0), .Dim = c(1L, 6L)), q = 0)
result <- do.call(biwavelet:::rcpp_row_quantile,testlist)
str(result)
|
/biwavelet/inst/testfiles/rcpp_row_quantile/libFuzzer_rcpp_row_quantile/rcpp_row_quantile_valgrind_files/1610556060-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 230
|
r
|
testlist <- list(data = structure(c(3.63959479422642e-23, 0, 2.47812124169512e-307, 3.52953640547268e+30, 6.06877987683043e-307, 0), .Dim = c(1L, 6L)), q = 0)
result <- do.call(biwavelet:::rcpp_row_quantile,testlist)
str(result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/muestra_contribuyentes.R
\name{muestra_contribuyentes}
\alias{muestra_contribuyentes}
\title{Muestra de las y los Contribuyentes}
\usage{
muestra_contribuyentes(n = 500)
}
\arguments{
\item{n}{Un número entero no negativo que indica el número de elementos a
muestrear.}
}
\value{
Un `tibble` con las y los contribuyentes muestreadas.
}
\description{
Toma una muestra, con reemplazo, de las y los contribuyentes de este
repositorio.
}
\examples{
# Tomamos una muestra de 10 contribuyentes.
muestra_contribuyentes(10)
}
|
/man/muestra_contribuyentes.Rd
|
permissive
|
RichDeto/firstContributionsInR
|
R
| false
| true
| 599
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/muestra_contribuyentes.R
\name{muestra_contribuyentes}
\alias{muestra_contribuyentes}
\title{Muestra de las y los Contribuyentes}
\usage{
muestra_contribuyentes(n = 500)
}
\arguments{
\item{n}{Un número entero no negativo que indica el número de elementos a
muestrear.}
}
\value{
Un `tibble` con las y los contribuyentes muestreadas.
}
\description{
Toma una muestra, con reemplazo, de las y los contribuyentes de este
repositorio.
}
\examples{
# Tomamos una muestra de 10 contribuyentes.
muestra_contribuyentes(10)
}
|
##########################
# Have a crack at sample size for
# regional councils based on "heterogeneity"
# of their LCDB classification.
#
# Paul van Dam-Bates
# Decemeber 28, 2018
##########################
# Logic here is that heterogeneity is not a function of "area"
# but a function of heterogeneity of landcover.
# We will model birds as a function of different forest types and
# then act as if we will model them as such.
# Step 1: Add LCDB class to all sites.
setwd("..")
library(data.table)
library(dplyr)
library(readxl)
library(tidyverse)
library(simr)
library(rgdal)
library(sp)
library(rgeos)
library(sf)
library(reshape2)
library(raster) # intersect function to try.
# library(devtools)
# install_github("ogansell/MSampNZ")
library(MSampNZ)
# Helpful fast clip function.
gClip <- function(shp, bb){
if(class(bb) == "matrix") b_poly <- as(extent(as.vector(t(bb))), "SpatialPolygons")
else b_poly <- as(extent(bb), "SpatialPolygons")
proj4string(b_poly) <- proj4string(shp)
gIntersection(shp, b_poly, byid = T)
}
# Start with Tier 1 data:
load("Data/bdi_meta.Rda") # Object: metadata
load("Data/FiveMinBird.Rda") # Object: FiveMinBird
# Read in the Wellington bird data
well <- data.table(read_excel("Data/Tier I bird count data summary.xlsx"))
setnames(well, "Site", "Place")
allLocs <- fread("Data/Appendix_1_static_master_data_July2016.csv", skip = 10)
setnames(allLocs, "PlotID", "Place")
# Read in lcdb use map data.
# Keep the data elswhere, too big for github.
lcdb <- readOGR("../../Examples/Data/mfe-lucas-nz-land-use-map-1990-2008-2012-v018-SHP", "lucas-nz-land-use-map-1990-2008-2012-v018")
rc <- readOGR("../../Examples/Data/RC_Boundaries", "RegionalCouncilBoundaries")
pcl <- readOGR("../../Examples/Data/PCL", "PCL_08July2016")
# Lookup table for LUC_NAME aggregation
lookup <- fread("Data/LandMapUseLookUp.csv")
# nztm <- proj4string(lcdb)
nztm <- "+proj=tmerc +lat_0=0 +lon_0=173 +k=0.9996 +x_0=1600000 +y_0=10000000 +ellps=GRS80 +units=m +no_defs"
locs <- SpatialPointsDataFrame(SpatialPoints(cbind(allLocs$NZTM_Easting, allLocs$NZTM_Northing), proj4string = CRS(nztm)), data = data.frame(Place = allLocs$Place, Region = allLocs$Region))
# Speed things up by not "rerunning" this bit.
if(!any("LocationsLCDB.Rda" %in% dir("Data/"))){
dat.lcdb <- over(locs, lcdb)
locs@data <- cbind(locs@data, dat.lcdb)
dat.pcl <- over(locs, pcl)
locs@data <- cbind(locs@data, data.frame(PCL2016 = !is.na(dat.pcl$Type)))
save(locs, file = "Data/LocationsLCDB.Rda")
}
load("Data/LocationsLCDB.Rda")
# I also want to know what proportion of rc is pcl.
# Super slow so don't do it if you don't have to.
if(!any("AreaPCL.Rda" %in% dir("Data/"))){
area.pcl <- data.table()
for( i in unique(rc$NAME) )
{
tmp.shp <- rc[rc$NAME == i,]
tmp.int <- gIntersection(pcl, tmp.shp, byid = TRUE, drop_lower_td = TRUE)
area.i <- sum(area(tmp.int))
area.rc <- sum(area(tmp.shp))
area.pcl <- rbind(area.pcl, data.table(NAME = i, areaPCL = area.i, areaRC = area.rc))
}
area.pcl[, "PropPCL" := areaPCL/areaRC]
save(area.pcl, file = "Data/AreaPCL.Rda")
}else{
load("Data/AreaPCL.Rda")
}
area.pcl[, "LUM_REG_NA" := gsub(" Region|'", "", NAME)]
# Build tables for the report.
# For stratified sampling need to know the true proportion of Native and Impacted:
# We also need a table with area, proportion of each strata and then proportion of that which is PCL.
#####################################################################################################
lcdb <- merge(lcdb, lookup, by = "LUC_NAME", all.x = TRUE, all.y = FALSE, sort = FALSE) # Need to merge with sp merge for the sake of order.
locs <- merge(locs, lookup, by = "LUC_NAME", all.x = TRUE, all.y = FALSE, sort = FALSE) # Need to merge with sp merge for the sake of order.
dat.lcdb <- data.table(lcdb@data)
dat.cov <- dat.lcdb[,.(area = sum(AREA_HA)), by = c("LUM_REG_NA", "Level")]
dat.cov[, "prop" := area/sum(area), by = "LUM_REG_NA"]
tab.cover <- dcast.data.table(dat.cov, LUM_REG_NA ~ Level, value.var = "prop")
tab.cover <- merge(tab.cover, area.pcl[,.(LUM_REG_NA, areaRC, PropPCL)], all.x = TRUE, all.y = FALSE)
tab.cover <- tab.cover[,.(Region = LUM_REG_NA, Area = round(areaRC/1000000,1), PCL = round(PropPCL*100, 1), Impacted = round(Impacted*100,1), Native = round(Native*100, 1))]
write.csv(tab.cover, "Data/CoverTypesTable.csv", row.names = FALSE)
dat.samp <- data.table(locs@data)
dat.samp <- dat.samp[, .N, by = c("LUM_REG_NA", "PCL2016", "Level")]
tab.sites <- dcast.data.table(dat.samp, LUM_REG_NA ~ Level + PCL2016, value.var = "N")
tab.sites[!is.na(Impacted_TRUE), Native_TRUE := Native_TRUE + Impacted_TRUE]
tab.sites[, Impacted_TRUE := NULL]
tab.sites[, "Native" := Native_TRUE + Native_FALSE]
tab.sites <- tab.sites[,.(Region = LUM_REG_NA, Native = Native, NativePCL = Native_TRUE, Impacted = Impacted_FALSE)]
tab.sites[is.na(Impacted), Impacted := 0]
tab.sites[, "Total" := Native + Impacted ]
write.csv(tab.sites, "Data/SampleSizeGridTable.csv", row.names = FALSE)
tab.total <- merge(tab.sites, tab.cover, by = "Region")
tab.total[, "n_Total" := ceiling(Native.x/(2*Native.y/(2*Native.y + 1*Impacted.y)))]
tab.total[,"Native_Ratio" := (2*Native.y/(2*Native.y + 1*Impacted.y))]
tab.total[, "n_Impacted" := n_Total - Native.x]
tab.total <- tab.total[, .(Region, Total = n_Total, Native_PCL = NativePCL, Native_RC = Native.x - NativePCL, Impacted = n_Impacted)]
tab.total[, RC_Total := Impacted + Native_RC]
write.csv(tab.total, "Data/SampleSizeTable.csv", row.names = FALSE)
########################################################
# Now create the map of Southland for Tier 1:
########################################################
rc.south <- rc[rc$NAME == "Southland Region",]
southland.df <- fortify(rc.south)
locs.south <- data.frame(locs[locs$Region == "Southland Region",])
pcl.south <- gClip(pcl, bbox(rc.south))
pcl.df <- fortify(pcl.south)
ggplot(data = southland.df, aes(long, lat, group = group)) + geom_polygon(fill = "white", colour = "black", size = 1) +
geom_polygon(data = pcl.df, fill = "grey", size = .5, alpha = 0.8) +
geom_point(data = locs.south, aes(x = coords.x1, y = coords.x2, shape = PCL2016), group = 1) +
scale_shape_manual(name = "", values = c("TRUE" = 4, "FALSE" = 16), labels = c( "Non-PCL", "PCL")) +
theme_bw() + coord_fixed() + xlab("Easting (m)") + ylab("Northing (m)")
ggsave("Data/Tier1Southland.png")
locs.y1 <- locs.south[locs.south$PCL2016 == "FALSE",]
locs.y1 <- locs.y1[sample(1:nrow(locs.y1), ceiling(nrow(locs.y1)/5)),]
ggplot(data = southland.df, aes(long, lat, group = group)) + geom_polygon(fill = "white", colour = "black", size = 1) +
geom_polygon(data = pcl.df, fill = "grey", size = .5, alpha = 0.8) +
geom_point(data = rbind(locs.south[locs.south$PCL2016 == TRUE,], locs.y1), aes(x = coords.x1, y = coords.x2, shape = PCL2016), group = 1) +
scale_shape_manual(name = "", values = c("TRUE" = 4, "FALSE" = 16), labels = c( "Non-PCL", "PCL")) +
theme_bw() + coord_fixed() + xlab("Easting (m)") + ylab("Northing (m)")
ggsave("Data/Tier1SouthlandYear1.png")
ms.pts.native <- masterSample(N = 500, shp = lcdb[lcdb$LUM_REG_NA == "Southland" & lcdb$Level == "Native",], island = "South", J = c(4,3))
ms.pts.impact <- masterSample(N = 500, shp = lcdb[lcdb$LUM_REG_NA == "Southland" & lcdb$Level == "Impacted",], island = "South", J = c(4,3))
ms.pts.impact$Level <- "Impacted"
ms.pts.native$Level <- "Native"
ms.pts.impact <- spTransform(ms.pts.impact, CRS(proj4string(lcdb)))
ms.pts.native <- spTransform(ms.pts.native, CRS(proj4string(lcdb)))
pts1 <- ms.pts.impact[pcl,]
pts2 <- ms.pts.native[pcl,]
ms.pts.impact <- ms.pts.impact[!(ms.pts.impact$SiteID %in% pts1$SiteID),]
ms.pts.native <- ms.pts.native[!(ms.pts.native$SiteID %in% c(paste0(pts2$SiteID), "South2521")),] # One point is ending up in Milford sound and I think it's a spatial processing issue.
ms.pts <- rbind(ms.pts.impact[1:55,], ms.pts.native[1:112,])
ggplot(data = southland.df, aes(long, lat, group = group)) + geom_polygon(fill = "white", colour = "black", size = 1) +
geom_polygon(data = pcl.df, fill = "grey", size = .5, alpha = 0.8) +
geom_point(data = locs.south[locs.south$PCL2016 == TRUE,], aes(x = coords.x1, y = coords.x2), group = 1, shape = 4) +
geom_point(data = data.frame(ms.pts), aes(x = coords.x1, y = coords.x2, shape = Level, colour = Level), group = 1, size = 1.5) +
scale_shape_manual(name = "Land Use Type", values = c("Impacted" = 16, "Native" = 17), labels = c("Impacted", "Native")) +
scale_colour_manual(name = "Land Use Type", values = c("Impacted" = "red", "Native" = "blue"), labels = c("Impacted", "Native")) +
theme_bw() + coord_fixed() + xlab("Easting (m)") + ylab("Northing (m)")
ggsave("Data/StratifiedSouthland.png")
ms.pts <- rbind(ms.pts.impact[1:11,], ms.pts.native[1:23,])
ggplot(data = southland.df, aes(long, lat, group = group)) + geom_polygon(fill = "white", colour = "black", size = 1) +
geom_polygon(data = pcl.df, fill = "grey", size = .5, alpha = 0.8) +
geom_point(data = locs.south[locs.south$PCL2016 == TRUE,], aes(x = coords.x1, y = coords.x2), group = 1, shape = 4) +
geom_point(data = data.frame(ms.pts), aes(x = coords.x1, y = coords.x2, shape = Level, colour = Level), group = 1, size = 1.5) +
scale_shape_manual(name = "Land Use Type", values = c("Impacted" = 16, "Native" = 17), labels = c("Impacted", "Native")) +
scale_colour_manual(name = "Land Use Type", values = c("Impacted" = "red", "Native" = "blue"), labels = c("Impacted", "Native")) +
theme_bw() + coord_fixed() + xlab("Easting (m)") + ylab("Northing (m)")
ggsave("Data/StratifiedSouthlandYear1.png")
#####################
#Process Tier 1 data as we do in annual report.
#####################
#Hash out the dates
metadata[, "StartDate" := as.Date(Start.Date, format = "%d/%m/%Y")]
#Need just the most recent of the observed plot data
metadata[, "Season" := as.numeric(format(StartDate, '%Y'))]
metadata[as.numeric(format(StartDate, '%m')) <= 06, "Season" := Season - 1]
metadata = metadata[,.SD[Season == max(Season)], by = Place]
metadata <- metadata[!is.na(Wood)]
#Quick averages of 5MBC
avg.birds <- dcast.data.table(data = FiveMinBird, Place + Season + Station ~ CommonName, value.var = "TotalCount", fun = sum, fill = 0)
avg.birds[,"NA" := NULL]
##########################################
# Do the equivallent with Wellington Data
##########################################
well[, "Season" := as.numeric(format(Date, '%Y'))]
well[as.numeric(format(Date, '%m')) <= 06, "Season" := Season - 1]
avg.birds.w <- dcast.data.table(data = well, Place + Season + Point ~ Species_name, value.var = "Number", fun = sum, fill = 0)
avg.birds.w <- merge(avg.birds.w, locs@data, all.x = TRUE, all.y = FALSE, by = "Place")
avg.birds <- merge(avg.birds, locs@data, all.x = TRUE, all.y = FALSE, by = "Place")
avg.birds <- merge(avg.birds, lookup, by = "LUC_NAME")
t1.birds.w <- avg.birds[Region == "Wellington Region"]
wellyFans <- rbind(t1.birds.w[,.(Place, Station, Season, Region, Fantail, Bellbird, GreyWarbler = `Grey Warbler`, Chaff = Chaffinch, LUC_NAME, PCL = 1)],
avg.birds.w[,.(Place, Station = Point, Season, Region, Fantail, Bellbird, GreyWarbler = `Warbler_Grey`, Chaff = Chaffinch, LUC_NAME, PCL = 0)])
wellyFans[, "time" := Season - min(Season)]
wellyFans <- merge(wellyFans, lookup)
# Here we have combined Tier 1 and Wellington Data for birds.
#------------------------------------------------------------
tmp <- wellyFans[,.(Chaff = mean(Chaff), Bellbird = mean(Bellbird), GreyWarbler = mean(GreyWarbler), Fantail = mean(Fantail)), by = c("Level", "Place", "Region", "PCL")]
tmp[,.N, by = c("PCL", "Level")]
wellyFans[, .(fan = sd(Fantail), bell = sd(Bellbird), greyWarb = sd(GreyWarbler)), by = "Level"]
avg.birds[, .(fan = sd(Fantail), bell = sd(Bellbird), greyWarb = sd(`Grey Warbler`))]
m <- glmer(GreyWarbler ~ -1 + Level + (1|Place), data = wellyFans, family=poisson(link=log), control=glmerControl(optCtrl=list(maxfun=2000000)))
table(doSim(m))
table(wellyFans$GreyWarbler)
# Fails to converge but we simulate realistic data.
# Create data for two scenarios, one full 8-km grid and then for stratified sample.
full.dat <- data.table(rbind(expand.grid(Level = "Native", Station = LETTERS[1:5], Place = paste0("A", 1:95)),
expand.grid(Level = "Impacted", Station = LETTERS[1:5], Place = paste0("B", 1:32))))
strat.dat <- data.table(rbind(expand.grid(Level = "Native", Station = LETTERS[1:5], Place = paste0("A", 1:95)),
expand.grid(Level = "Impacted", Station = LETTERS[1:5], Place = paste0("B", 1:16))))
results <- data.table()
for(i in 1:1000)
{
getData(m) <- full.dat[,.(Level, Place, Station)]
full.dat$y <- doSim(m)
f <- full.dat[,.(m = mean(y)), by = "Place"]
f <- f[,.(mean = mean(m), SE = sd(m)/sqrt(.N), iter = i, type = "full")]
getData(m) <- strat.dat[,.(Level, Station, Place)]
strat.dat$y <- doSim(m)
s <- full.dat[,.(m = mean(y)), by = c("Place", "Level")]
s <- s[,.(mean = mean(m), SE = sd(m)/sqrt(.N)), by = "Level"]
s[, "p" := ifelse(Level == "Native", 0.759, 0.241)]
s <- s[,.(mean = sum(p*mean), SE = sqrt(sum(p^2*SE^2)), iter = i, type = "strata")]
results <- rbind(results, s, f)
}
results[, .(lower = quantile(SE, 0.025), mean = mean(SE), upper = quantile(SE, 0.975)), by = "type"]
# Let's show an example where this may fail!
##########################################
mc <- glmer(Chaff ~ -1 + Level + (1|Place), data = wellyFans, family=poisson(link=log), control=glmerControl(optCtrl=list(maxfun=2000000)))
table(doSim(mc))
table(wellyFans$Chaff)
# Fails to converge but we simulate realistic data.
# Create data for two scenarios, one full 8-km grid and then for stratified sample.
full.dat <- data.table(rbind(expand.grid(Level = "Native", Station = LETTERS[1:5], Place = paste0("A", 1:95)),
expand.grid(Level = "Impacted", Station = LETTERS[1:5], Place = paste0("B", 1:32))))
strat.dat <- data.table(rbind(expand.grid(Level = "Native", Station = LETTERS[1:5], Place = paste0("A", 1:95)),
expand.grid(Level = "Impacted", Station = LETTERS[1:5], Place = paste0("B", 1:16))))
results2 <- data.table()
for(i in 1:1000)
{
getData(mc) <- full.dat[,.(Level, Place, Station)]
full.dat$y <- doSim(mc)
f <- full.dat[,.(m = mean(y)), by = "Place"]
f <- f[,.(mean = mean(m), SE = sd(m)/sqrt(.N), iter = i, type = "full")]
getData(mc) <- strat.dat[,.(Level, Station, Place)]
strat.dat$y <- doSim(mc)
s <- full.dat[,.(m = mean(y)), by = c("Place", "Level")]
s <- s[,.(mean = mean(m), SE = sd(m)/sqrt(.N)), by = "Level"]
s[, "p" := ifelse(Level == "Native", 0.759, 0.241)]
s <- s[,.(mean = sum(p*mean), SE = sqrt(sum(p^2*SE^2)), iter = i, type = "strata")]
results2 <- rbind(results2, s, f)
}
results2[, .(lower = quantile(SE, 0.025), mean = mean(SE), upper = quantile(SE, 0.975)), by = "type"]
# Build scenarios:
# Scenario 1. Can we find the proportion of effort to put into off of Native cover types?
# Does equal knowledge occur if
#############
results.nat <- data.table()
results.imp <- data.table()
for(i in seq(0, 80, by = 5))
{
X <- expand.grid(Level = c(rep("Native", 90 - i), rep("Impacted", 10 + i)), Station = c("A", "B", "C", "D", "E"))
getData(m) <- X
tmpImp <- data.table(getWidths(powerCurve(m, test = ciWidth("LevelImpacted"), along = "Level", nsim = 10)), NI = 10 + i)
results.imp <- rbind(results.imp, tmpImp)
tmpNat <- data.table(getWidths(powerCurve(m, test = ciWidth("LevelNative"), along = "Level", nsim = 10)), NI = 10 + i)
results.nat <- rbind(results.nat, tmpNat)
}
lcdb.well <- lcdb[lcdb$LUM_REG_NA == "Wellington",]
dt.well <- data.table(lcdb.well@data)
dt.well <- dt.well[, .(totArea = sum(AREA_HA)), by = "Level"]
dt.well[, prop := totArea/sum(totArea)]
results.nat[, "Level" := "Native"]
results.imp[, "Level" := "Impacted"]
results <- rbind(results.nat, results.imp)
ggplot(data = results, aes(x = NI, y = mean, colour = Level)) + geom_point() + geom_smooth()
getData(m) <- X
X <- data.table(X)
X$Y <- doSim(m)
X[, .(v = var(Y),), by = "Level"]
|
/Sample Size/Code/DataProcessing.r
|
no_license
|
paul-vdb/RegionalCouncilDesign
|
R
| false
| false
| 15,989
|
r
|
##########################
# Have a crack at sample size for
# regional councils based on "heterogeneity"
# of their LCDB classification.
#
# Paul van Dam-Bates
# Decemeber 28, 2018
##########################
# Logic here is that heterogeneity is not a function of "area"
# but a function of heterogeneity of landcover.
# We will model birds as a function of different forest types and
# then act as if we will model them as such.
# Step 1: Add LCDB class to all sites.
setwd("..")
library(data.table)
library(dplyr)
library(readxl)
library(tidyverse)
library(simr)
library(rgdal)
library(sp)
library(rgeos)
library(sf)
library(reshape2)
library(raster) # intersect function to try.
# library(devtools)
# install_github("ogansell/MSampNZ")
library(MSampNZ)
# Helpful fast clip function.
gClip <- function(shp, bb){
if(class(bb) == "matrix") b_poly <- as(extent(as.vector(t(bb))), "SpatialPolygons")
else b_poly <- as(extent(bb), "SpatialPolygons")
proj4string(b_poly) <- proj4string(shp)
gIntersection(shp, b_poly, byid = T)
}
# Start with Tier 1 data:
load("Data/bdi_meta.Rda") # Object: metadata
load("Data/FiveMinBird.Rda") # Object: FiveMinBird
# Read in the Wellington bird data
well <- data.table(read_excel("Data/Tier I bird count data summary.xlsx"))
setnames(well, "Site", "Place")
allLocs <- fread("Data/Appendix_1_static_master_data_July2016.csv", skip = 10)
setnames(allLocs, "PlotID", "Place")
# Read in lcdb use map data.
# Keep the data elswhere, too big for github.
lcdb <- readOGR("../../Examples/Data/mfe-lucas-nz-land-use-map-1990-2008-2012-v018-SHP", "lucas-nz-land-use-map-1990-2008-2012-v018")
rc <- readOGR("../../Examples/Data/RC_Boundaries", "RegionalCouncilBoundaries")
pcl <- readOGR("../../Examples/Data/PCL", "PCL_08July2016")
# Lookup table for LUC_NAME aggregation
lookup <- fread("Data/LandMapUseLookUp.csv")
# nztm <- proj4string(lcdb)
nztm <- "+proj=tmerc +lat_0=0 +lon_0=173 +k=0.9996 +x_0=1600000 +y_0=10000000 +ellps=GRS80 +units=m +no_defs"
locs <- SpatialPointsDataFrame(SpatialPoints(cbind(allLocs$NZTM_Easting, allLocs$NZTM_Northing), proj4string = CRS(nztm)), data = data.frame(Place = allLocs$Place, Region = allLocs$Region))
# Speed things up by not "rerunning" this bit.
if(!any("LocationsLCDB.Rda" %in% dir("Data/"))){
dat.lcdb <- over(locs, lcdb)
locs@data <- cbind(locs@data, dat.lcdb)
dat.pcl <- over(locs, pcl)
locs@data <- cbind(locs@data, data.frame(PCL2016 = !is.na(dat.pcl$Type)))
save(locs, file = "Data/LocationsLCDB.Rda")
}
load("Data/LocationsLCDB.Rda")
# I also want to know what proportion of rc is pcl.
# Super slow so don't do it if you don't have to.
if(!any("AreaPCL.Rda" %in% dir("Data/"))){
area.pcl <- data.table()
for( i in unique(rc$NAME) )
{
tmp.shp <- rc[rc$NAME == i,]
tmp.int <- gIntersection(pcl, tmp.shp, byid = TRUE, drop_lower_td = TRUE)
area.i <- sum(area(tmp.int))
area.rc <- sum(area(tmp.shp))
area.pcl <- rbind(area.pcl, data.table(NAME = i, areaPCL = area.i, areaRC = area.rc))
}
area.pcl[, "PropPCL" := areaPCL/areaRC]
save(area.pcl, file = "Data/AreaPCL.Rda")
}else{
load("Data/AreaPCL.Rda")
}
area.pcl[, "LUM_REG_NA" := gsub(" Region|'", "", NAME)]
# Build tables for the report.
# For stratified sampling need to know the true proportion of Native and Impacted:
# We also need a table with area, proportion of each strata and then proportion of that which is PCL.
#####################################################################################################
lcdb <- merge(lcdb, lookup, by = "LUC_NAME", all.x = TRUE, all.y = FALSE, sort = FALSE) # Need to merge with sp merge for the sake of order.
locs <- merge(locs, lookup, by = "LUC_NAME", all.x = TRUE, all.y = FALSE, sort = FALSE) # Need to merge with sp merge for the sake of order.
dat.lcdb <- data.table(lcdb@data)
dat.cov <- dat.lcdb[,.(area = sum(AREA_HA)), by = c("LUM_REG_NA", "Level")]
dat.cov[, "prop" := area/sum(area), by = "LUM_REG_NA"]
tab.cover <- dcast.data.table(dat.cov, LUM_REG_NA ~ Level, value.var = "prop")
tab.cover <- merge(tab.cover, area.pcl[,.(LUM_REG_NA, areaRC, PropPCL)], all.x = TRUE, all.y = FALSE)
tab.cover <- tab.cover[,.(Region = LUM_REG_NA, Area = round(areaRC/1000000,1), PCL = round(PropPCL*100, 1), Impacted = round(Impacted*100,1), Native = round(Native*100, 1))]
write.csv(tab.cover, "Data/CoverTypesTable.csv", row.names = FALSE)
dat.samp <- data.table(locs@data)
dat.samp <- dat.samp[, .N, by = c("LUM_REG_NA", "PCL2016", "Level")]
tab.sites <- dcast.data.table(dat.samp, LUM_REG_NA ~ Level + PCL2016, value.var = "N")
tab.sites[!is.na(Impacted_TRUE), Native_TRUE := Native_TRUE + Impacted_TRUE]
tab.sites[, Impacted_TRUE := NULL]
tab.sites[, "Native" := Native_TRUE + Native_FALSE]
tab.sites <- tab.sites[,.(Region = LUM_REG_NA, Native = Native, NativePCL = Native_TRUE, Impacted = Impacted_FALSE)]
tab.sites[is.na(Impacted), Impacted := 0]
tab.sites[, "Total" := Native + Impacted ]
write.csv(tab.sites, "Data/SampleSizeGridTable.csv", row.names = FALSE)
tab.total <- merge(tab.sites, tab.cover, by = "Region")
tab.total[, "n_Total" := ceiling(Native.x/(2*Native.y/(2*Native.y + 1*Impacted.y)))]
tab.total[,"Native_Ratio" := (2*Native.y/(2*Native.y + 1*Impacted.y))]
tab.total[, "n_Impacted" := n_Total - Native.x]
tab.total <- tab.total[, .(Region, Total = n_Total, Native_PCL = NativePCL, Native_RC = Native.x - NativePCL, Impacted = n_Impacted)]
tab.total[, RC_Total := Impacted + Native_RC]
write.csv(tab.total, "Data/SampleSizeTable.csv", row.names = FALSE)
########################################################
# Now create the map of Southland for Tier 1:
########################################################
rc.south <- rc[rc$NAME == "Southland Region",]
southland.df <- fortify(rc.south)
locs.south <- data.frame(locs[locs$Region == "Southland Region",])
pcl.south <- gClip(pcl, bbox(rc.south))
pcl.df <- fortify(pcl.south)
ggplot(data = southland.df, aes(long, lat, group = group)) + geom_polygon(fill = "white", colour = "black", size = 1) +
geom_polygon(data = pcl.df, fill = "grey", size = .5, alpha = 0.8) +
geom_point(data = locs.south, aes(x = coords.x1, y = coords.x2, shape = PCL2016), group = 1) +
scale_shape_manual(name = "", values = c("TRUE" = 4, "FALSE" = 16), labels = c( "Non-PCL", "PCL")) +
theme_bw() + coord_fixed() + xlab("Easting (m)") + ylab("Northing (m)")
ggsave("Data/Tier1Southland.png")
locs.y1 <- locs.south[locs.south$PCL2016 == "FALSE",]
locs.y1 <- locs.y1[sample(1:nrow(locs.y1), ceiling(nrow(locs.y1)/5)),]
ggplot(data = southland.df, aes(long, lat, group = group)) + geom_polygon(fill = "white", colour = "black", size = 1) +
geom_polygon(data = pcl.df, fill = "grey", size = .5, alpha = 0.8) +
geom_point(data = rbind(locs.south[locs.south$PCL2016 == TRUE,], locs.y1), aes(x = coords.x1, y = coords.x2, shape = PCL2016), group = 1) +
scale_shape_manual(name = "", values = c("TRUE" = 4, "FALSE" = 16), labels = c( "Non-PCL", "PCL")) +
theme_bw() + coord_fixed() + xlab("Easting (m)") + ylab("Northing (m)")
ggsave("Data/Tier1SouthlandYear1.png")
ms.pts.native <- masterSample(N = 500, shp = lcdb[lcdb$LUM_REG_NA == "Southland" & lcdb$Level == "Native",], island = "South", J = c(4,3))
ms.pts.impact <- masterSample(N = 500, shp = lcdb[lcdb$LUM_REG_NA == "Southland" & lcdb$Level == "Impacted",], island = "South", J = c(4,3))
ms.pts.impact$Level <- "Impacted"
ms.pts.native$Level <- "Native"
ms.pts.impact <- spTransform(ms.pts.impact, CRS(proj4string(lcdb)))
ms.pts.native <- spTransform(ms.pts.native, CRS(proj4string(lcdb)))
pts1 <- ms.pts.impact[pcl,]
pts2 <- ms.pts.native[pcl,]
ms.pts.impact <- ms.pts.impact[!(ms.pts.impact$SiteID %in% pts1$SiteID),]
ms.pts.native <- ms.pts.native[!(ms.pts.native$SiteID %in% c(paste0(pts2$SiteID), "South2521")),] # One point is ending up in Milford sound and I think it's a spatial processing issue.
ms.pts <- rbind(ms.pts.impact[1:55,], ms.pts.native[1:112,])
ggplot(data = southland.df, aes(long, lat, group = group)) + geom_polygon(fill = "white", colour = "black", size = 1) +
geom_polygon(data = pcl.df, fill = "grey", size = .5, alpha = 0.8) +
geom_point(data = locs.south[locs.south$PCL2016 == TRUE,], aes(x = coords.x1, y = coords.x2), group = 1, shape = 4) +
geom_point(data = data.frame(ms.pts), aes(x = coords.x1, y = coords.x2, shape = Level, colour = Level), group = 1, size = 1.5) +
scale_shape_manual(name = "Land Use Type", values = c("Impacted" = 16, "Native" = 17), labels = c("Impacted", "Native")) +
scale_colour_manual(name = "Land Use Type", values = c("Impacted" = "red", "Native" = "blue"), labels = c("Impacted", "Native")) +
theme_bw() + coord_fixed() + xlab("Easting (m)") + ylab("Northing (m)")
ggsave("Data/StratifiedSouthland.png")
ms.pts <- rbind(ms.pts.impact[1:11,], ms.pts.native[1:23,])
ggplot(data = southland.df, aes(long, lat, group = group)) + geom_polygon(fill = "white", colour = "black", size = 1) +
geom_polygon(data = pcl.df, fill = "grey", size = .5, alpha = 0.8) +
geom_point(data = locs.south[locs.south$PCL2016 == TRUE,], aes(x = coords.x1, y = coords.x2), group = 1, shape = 4) +
geom_point(data = data.frame(ms.pts), aes(x = coords.x1, y = coords.x2, shape = Level, colour = Level), group = 1, size = 1.5) +
scale_shape_manual(name = "Land Use Type", values = c("Impacted" = 16, "Native" = 17), labels = c("Impacted", "Native")) +
scale_colour_manual(name = "Land Use Type", values = c("Impacted" = "red", "Native" = "blue"), labels = c("Impacted", "Native")) +
theme_bw() + coord_fixed() + xlab("Easting (m)") + ylab("Northing (m)")
ggsave("Data/StratifiedSouthlandYear1.png")
#####################
#Process Tier 1 data as we do in annual report.
#####################
#Hash out the dates
metadata[, "StartDate" := as.Date(Start.Date, format = "%d/%m/%Y")]
#Need just the most recent of the observed plot data
metadata[, "Season" := as.numeric(format(StartDate, '%Y'))]
metadata[as.numeric(format(StartDate, '%m')) <= 06, "Season" := Season - 1]
metadata = metadata[,.SD[Season == max(Season)], by = Place]
metadata <- metadata[!is.na(Wood)]
#Quick averages of 5MBC
avg.birds <- dcast.data.table(data = FiveMinBird, Place + Season + Station ~ CommonName, value.var = "TotalCount", fun = sum, fill = 0)
avg.birds[,"NA" := NULL]
##########################################
# Do the equivallent with Wellington Data
##########################################
well[, "Season" := as.numeric(format(Date, '%Y'))]
well[as.numeric(format(Date, '%m')) <= 06, "Season" := Season - 1]
avg.birds.w <- dcast.data.table(data = well, Place + Season + Point ~ Species_name, value.var = "Number", fun = sum, fill = 0)
avg.birds.w <- merge(avg.birds.w, locs@data, all.x = TRUE, all.y = FALSE, by = "Place")
avg.birds <- merge(avg.birds, locs@data, all.x = TRUE, all.y = FALSE, by = "Place")
avg.birds <- merge(avg.birds, lookup, by = "LUC_NAME")
t1.birds.w <- avg.birds[Region == "Wellington Region"]
wellyFans <- rbind(t1.birds.w[,.(Place, Station, Season, Region, Fantail, Bellbird, GreyWarbler = `Grey Warbler`, Chaff = Chaffinch, LUC_NAME, PCL = 1)],
avg.birds.w[,.(Place, Station = Point, Season, Region, Fantail, Bellbird, GreyWarbler = `Warbler_Grey`, Chaff = Chaffinch, LUC_NAME, PCL = 0)])
wellyFans[, "time" := Season - min(Season)]
wellyFans <- merge(wellyFans, lookup)
# Here we have combined Tier 1 and Wellington Data for birds.
#------------------------------------------------------------
tmp <- wellyFans[,.(Chaff = mean(Chaff), Bellbird = mean(Bellbird), GreyWarbler = mean(GreyWarbler), Fantail = mean(Fantail)), by = c("Level", "Place", "Region", "PCL")]
tmp[,.N, by = c("PCL", "Level")]
wellyFans[, .(fan = sd(Fantail), bell = sd(Bellbird), greyWarb = sd(GreyWarbler)), by = "Level"]
avg.birds[, .(fan = sd(Fantail), bell = sd(Bellbird), greyWarb = sd(`Grey Warbler`))]
m <- glmer(GreyWarbler ~ -1 + Level + (1|Place), data = wellyFans, family=poisson(link=log), control=glmerControl(optCtrl=list(maxfun=2000000)))
table(doSim(m))
table(wellyFans$GreyWarbler)
# Fails to converge but we simulate realistic data.
# Create data for two scenarios, one full 8-km grid and then for stratified sample.
full.dat <- data.table(rbind(expand.grid(Level = "Native", Station = LETTERS[1:5], Place = paste0("A", 1:95)),
expand.grid(Level = "Impacted", Station = LETTERS[1:5], Place = paste0("B", 1:32))))
strat.dat <- data.table(rbind(expand.grid(Level = "Native", Station = LETTERS[1:5], Place = paste0("A", 1:95)),
expand.grid(Level = "Impacted", Station = LETTERS[1:5], Place = paste0("B", 1:16))))
results <- data.table()
for(i in 1:1000)
{
getData(m) <- full.dat[,.(Level, Place, Station)]
full.dat$y <- doSim(m)
f <- full.dat[,.(m = mean(y)), by = "Place"]
f <- f[,.(mean = mean(m), SE = sd(m)/sqrt(.N), iter = i, type = "full")]
getData(m) <- strat.dat[,.(Level, Station, Place)]
strat.dat$y <- doSim(m)
s <- full.dat[,.(m = mean(y)), by = c("Place", "Level")]
s <- s[,.(mean = mean(m), SE = sd(m)/sqrt(.N)), by = "Level"]
s[, "p" := ifelse(Level == "Native", 0.759, 0.241)]
s <- s[,.(mean = sum(p*mean), SE = sqrt(sum(p^2*SE^2)), iter = i, type = "strata")]
results <- rbind(results, s, f)
}
results[, .(lower = quantile(SE, 0.025), mean = mean(SE), upper = quantile(SE, 0.975)), by = "type"]
# Let's show an example where this may fail!
##########################################
mc <- glmer(Chaff ~ -1 + Level + (1|Place), data = wellyFans, family=poisson(link=log), control=glmerControl(optCtrl=list(maxfun=2000000)))
table(doSim(mc))
table(wellyFans$Chaff)
# Fails to converge but we simulate realistic data.
# Create data for two scenarios, one full 8-km grid and then for stratified sample.
full.dat <- data.table(rbind(expand.grid(Level = "Native", Station = LETTERS[1:5], Place = paste0("A", 1:95)),
expand.grid(Level = "Impacted", Station = LETTERS[1:5], Place = paste0("B", 1:32))))
strat.dat <- data.table(rbind(expand.grid(Level = "Native", Station = LETTERS[1:5], Place = paste0("A", 1:95)),
expand.grid(Level = "Impacted", Station = LETTERS[1:5], Place = paste0("B", 1:16))))
results2 <- data.table()
for(i in 1:1000)
{
getData(mc) <- full.dat[,.(Level, Place, Station)]
full.dat$y <- doSim(mc)
f <- full.dat[,.(m = mean(y)), by = "Place"]
f <- f[,.(mean = mean(m), SE = sd(m)/sqrt(.N), iter = i, type = "full")]
getData(mc) <- strat.dat[,.(Level, Station, Place)]
strat.dat$y <- doSim(mc)
s <- full.dat[,.(m = mean(y)), by = c("Place", "Level")]
s <- s[,.(mean = mean(m), SE = sd(m)/sqrt(.N)), by = "Level"]
s[, "p" := ifelse(Level == "Native", 0.759, 0.241)]
s <- s[,.(mean = sum(p*mean), SE = sqrt(sum(p^2*SE^2)), iter = i, type = "strata")]
results2 <- rbind(results2, s, f)
}
results2[, .(lower = quantile(SE, 0.025), mean = mean(SE), upper = quantile(SE, 0.975)), by = "type"]
# Build scenarios:
# Scenario 1. Can we find the proportion of effort to put into off of Native cover types?
# Does equal knowledge occur if
#############
results.nat <- data.table()
results.imp <- data.table()
for(i in seq(0, 80, by = 5))
{
X <- expand.grid(Level = c(rep("Native", 90 - i), rep("Impacted", 10 + i)), Station = c("A", "B", "C", "D", "E"))
getData(m) <- X
tmpImp <- data.table(getWidths(powerCurve(m, test = ciWidth("LevelImpacted"), along = "Level", nsim = 10)), NI = 10 + i)
results.imp <- rbind(results.imp, tmpImp)
tmpNat <- data.table(getWidths(powerCurve(m, test = ciWidth("LevelNative"), along = "Level", nsim = 10)), NI = 10 + i)
results.nat <- rbind(results.nat, tmpNat)
}
lcdb.well <- lcdb[lcdb$LUM_REG_NA == "Wellington",]
dt.well <- data.table(lcdb.well@data)
dt.well <- dt.well[, .(totArea = sum(AREA_HA)), by = "Level"]
dt.well[, prop := totArea/sum(totArea)]
results.nat[, "Level" := "Native"]
results.imp[, "Level" := "Impacted"]
results <- rbind(results.nat, results.imp)
ggplot(data = results, aes(x = NI, y = mean, colour = Level)) + geom_point() + geom_smooth()
getData(m) <- X
X <- data.table(X)
X$Y <- doSim(m)
X[, .(v = var(Y),), by = "Level"]
|
# Análise Categórica e Dados
# MMEAD - 2017/2018 - 2º semestre
# Modelos Lineares Generalizados v1.0
# Ana Sapata n.º39504
# 21/06/2018
dados<-read.table("C:\\Users\\rebis1\\Desktop\\Universidade\\Categorica\\Nova pasta\\loja.txt", header=T)
attach(dados)
View(dados)
str(dados)
# -------------------------- Modelo Normal --------------------------------
#histograma para a variavel cliente
hist(dados$Client)
library(fBasics)
basicStats(Client)
#aplicação do modelo aos dados
fit1<-glm(Client ~ habit + rendim + idade + dist_conc +
dist_loja, family="gaussian", data=dados)
summary(fit1)
#Verificação dos pressupostos
# 1.Homogeneidade de variâncias
plot(fitted(fit1),rstudent(fit1))
plot(fitted(fit1),rstandard(fit1))
library(lmtest)
bptest(fit1, studentize=F) #p-value=0.01245<0.05 rejeita-se H0
library (car)
ncvTest (lm(Client ~ habit + rendim + idade + dist_conc +
dist_loja)) #p-value=0.0005<0.05 rejeita-se H0
#O pressuposto não se verifica
# 2.Normalidade
rs<-rstandard(fit1)
qqnorm(rs)
qqline (rs, col=2)
hist(rs, ylab=, xlab=, col=rainbow(60))
boxplot(rs)
library (nortest)
lillie.test(rs) #p-value=0.4803>0.05, não se rejeita H0
library (tseries)
jarque.bera.test (rs) #p-value=0.2536
a<-ad.test (rs)
a #p-value=0.2721
#Verifica-se o pressuposto
# 3.Multicolinaridade
library(corpcor)
mcor<-cor(data.frame(habit, rendim, idade, dist_conc, dist_loja))
mcor #existem algumas correlações significativas entre as variaveis
#Pelo que existe multicolinearidade entre as variaveis independentes
# 4.Independência
durbinWatsonTest(fit1) #p-value=0.158>0.05 não se rejeita H0
# Não se verifica a independencia
# -------------------------- Modelo Poisson --------------------------------
mod1 <- glm(Client ~ habit + rendim + idade + dist_conc +
dist_loja, family=poisson,dados)
summary(mod1)
exp(mod1$coefficients)
#Simplificação do modelo saturado
mod1a<-step(mod1)
summary (mod1a)
#dá o mesmo modelo
# Adequabilidade da função de ligação
mu <- 2*sqrt(predict (mod1, type="response")) #2*sqrt(mu) p.79 livro MLG prof
z <- predict (mod1)+(Client-mu)/mu
plot(z ~ predict(mod1,type="link"), xlab=expression(hat(eta)), dados,ylab="Resposta linearizada")
#embora exista alguma dispersão os dados seguem +/- ums linha pelo que é adequada
# Avaliação da Função de Variância
ra<-resid(mod1,type="response")
tr<-2*sqrt(predict(mod1,type="response"))
plot(ra ~ tr, xlab=expression(2*sqrt(hat(mu))),ylab="Resíduos Absolutos")
plot(residuals (mod1) ~ predict(mod1,type="link"), xlab=expression(hat(u)),ylab="Resíduos Absolutos")
lines (lowess (predict(mod1,type="link"), residuals (mod1)), col="red")
# Resíduos deviance
rd<-resid(mod1,type="deviance")
tr<-2*sqrt(predict(mod1,type="response"))
plot(rd ~ tr, xlab=expression(2*sqrt(hat(mu))),ylab="Resíduos Deviance")
abline(h=0, col=2)
plot(rd ~ predict(mod1,type="link"), xlab=expression(hat(eta)),ylab="Resíduos Deviance")
identify(predict(mod1,type="link"), rd)
lines (lowess (predict(mod1,type="link"), rd), col="red")
#Influência
plot(Client,cooks.distance(mod1),xlab="Número de nascimentos",ylab="Distância de Cook")
identify(Client,cooks.distance(mod1))
library(faraway)
halfnorm(cooks.distance(mod1))
#Leverage
library(car)
dadosi <- influence(mod1)
halfnorm(dadosi$hat)
h<-hatvalues(mod1)
dim(dados)
traco<-sum(diag(hatvalues(mod1)))
traco
plot (round(y,0), h*70/10)
abline(h=2,col=2)
identify(round(y,0), h*70/10)
#Outlier
plot (rstudent(mod1))
identify(rstudent(mod1))
plot(rd)
identify(rd)
halfnorm(rstudent(mod1))
Client ~ habit + rendim + idade + dist_conc +
dist_loja
# Estimar o número de nascimentos para uma mulher com um casamento de 5 anos, a viver na cidade e a tirar um mestrado
x0 <- data.frame(habit=500,rendim=38000,idade=45, dist_conc=5, dist_loja=7)
x0
predict(mod1,new=x0,se=T,type="response")
pred<-predict(mod1,new=x0,se=T,type="response")
c(pred$fit-qnorm(0.975)*pred$se.fit,pred$fit+qnorm(0.975)*pred$se.fit)
# Estimar o número de nascimentos para uma mulher com um casamento de 10 anos, a viver na cidade e a tirar um mestrado
x0 <- data.frame(dur="10-14",educn="sec",res="urban", n=1)
x0
predict(mod1,new=x0,se=T,type="response")
pred<-predict(mod1,new=x0,se=T,type="response")
c(pred$fit-qnorm(0.975)*pred$se.fit,pred$fit+qnorm(0.975)*pred$se.fit)
# -------------------------- Modelo Gamma --------------------------------
AG <- c(rep(1,17), rep(0,16)); AG
WBC <- c(2300, 750, 4300, 2600, 6000, 10500, 10000, 17000, 5400, 7000, 9400,
32000, 35000, 100000, 10000, 52000, 100000, 4400, 3000, 4000, 1500,
9000, 5300, 10000, 19000, 27000, 28000, 31000,26000, 21000, 79000,
100000, 100000)
Temp <- c(65, 156, 100, 134, 16, 108, 121, 4, 39, 143, 56, 26, 22, 1, 1, 5,
65, 56, 65, 17, 7, 16, 22, 3, 4, 2, 3, 8, 4, 3, 30, 4, 43)
tab <- data.frame(AG, WBC, Temp)
View(tab)
tab$logWBC <- log(tab$WBC)
tab$AG <- as.factor(tab$AG)
str(tab)
par(mfrow=c(1,2))
hist(tab$Temp, col=rainbow(50))
boxplot (tab$Temp, col="red")
library(fBasics)
basicStats (tab$Temp)
mod1 <- glm(tab$Temp ~ tab$AG + tab$logWBC, family=Gamma(link=log), tab, maxit=100)
summary(mod1)
exp(mod1$coefficients)
# Variação explicada
1-(41.608/30)/(58.138/32) #0.2366 / 23.66%
#Verificação dos pressupostos
# 1.Homogeneidade de variâncias
plot(fitted(mod1),rstudent(mod1))
plot(fitted(mod1),rstandard(mod1))
library(lmtest)
bptest(mod1, studentize=F) #p-value=0.08025>0.05 não se rejeita H0
library (car)
ncvTest (lm(tab$Temp ~ tab$AG + tab$logWBC)) #p-value=0.04<0.05 rejeita-se H0
#O pressuposto não se verifica
# 2.Normalidade
r <- residuals(mod1)
rs<-rstandard(mod1)
qqnorm(rs)
qqline (rs, col=2)
hist(rs, ylab=, xlab=, col=rainbow(60))
boxplot(rs)
library (nortest)
lillie.test(rs) #p-value=0.3912>0.05, não se rejeita H0
library (tseries)
jarque.bera.test (rs) #p-value=0.601
a<-ad.test (rs)
a #p-value=0.3856
#Verifica-se o pressuposto
# 3.Multicolinaridade
library(corpcor)
mcor<-cor(data.frame(as.numeric(tab$AG), tab$logWBC))
mcor #não existe multicolinearidade
# 4.Independência
durbinWatsonTest(mod1) #p-value=0<.05 rejeita-se H0
# Verifica-se a independencia
# Adequabilidade da função de ligação
mu <- 2*log(predict (mod1, type="response"))
z <- predict (mod1)+(tab$Temp-mu)/mu
par(mfrow=c(1,1))
plot(z ~ predict(mod1,type="link"), xlab=expression(hat(eta)), tab,ylab="Resposta linearizada")
#função de ligação não é a mais adequada
# Avaliação da Função de Variância
ra<-resid(mod1,type="response")
tr<-2*log(predict(mod1,type="response"))
plot(ra ~ tr, xlab=expression(2*log(hat(mu))),ylab="Resíduos Absolutos")
lines (lowess (ra~tr), col="red")
rd<-resid(mod1,type="deviance")
plot(rd ~ tr, xlab=expression(2*log(hat(mu))),ylab="Resíduos Deviance")
plot(rd ~ predict(mod1,type="link"), xlab=expression(hat(eta)),ylab="Resíduos Deviance")
lines (lowess (predict(mod1,type="link"), rd), col="red")
#Influência
plot(tab$Temp,cooks.distance(mod1),xlab="tempo",ylab="Distância de Cook")
identify(tab$Temp,cooks.distance(mod1))
library(faraway)
halfnorm(cooks.distance(mod1))
#Leverage
library(car)
tabi <- influence(mod1)
halfnorm(tabi$hat)
h<- hatvalues(mod1)
plot (tab$Temp, h*156/32)
abline(h=1)
identify(tab$Temp, h*156/32)
#Outliers
plot (rstudent(mod1))
identify(rstudent(mod1))
plot(rd)
identify(rd)
halfnorm(rstudent(mod1))
|
/t2.R
|
no_license
|
AnaSapata/ACD_T2
|
R
| false
| false
| 7,619
|
r
|
# Análise Categórica e Dados
# MMEAD - 2017/2018 - 2º semestre
# Modelos Lineares Generalizados v1.0
# Ana Sapata n.º39504
# 21/06/2018
dados<-read.table("C:\\Users\\rebis1\\Desktop\\Universidade\\Categorica\\Nova pasta\\loja.txt", header=T)
attach(dados)
View(dados)
str(dados)
# -------------------------- Modelo Normal --------------------------------
#histograma para a variavel cliente
hist(dados$Client)
library(fBasics)
basicStats(Client)
#aplicação do modelo aos dados
fit1<-glm(Client ~ habit + rendim + idade + dist_conc +
dist_loja, family="gaussian", data=dados)
summary(fit1)
#Verificação dos pressupostos
# 1.Homogeneidade de variâncias
plot(fitted(fit1),rstudent(fit1))
plot(fitted(fit1),rstandard(fit1))
library(lmtest)
bptest(fit1, studentize=F) #p-value=0.01245<0.05 rejeita-se H0
library (car)
ncvTest (lm(Client ~ habit + rendim + idade + dist_conc +
dist_loja)) #p-value=0.0005<0.05 rejeita-se H0
#O pressuposto não se verifica
# 2.Normalidade
rs<-rstandard(fit1)
qqnorm(rs)
qqline (rs, col=2)
hist(rs, ylab=, xlab=, col=rainbow(60))
boxplot(rs)
library (nortest)
lillie.test(rs) #p-value=0.4803>0.05, não se rejeita H0
library (tseries)
jarque.bera.test (rs) #p-value=0.2536
a<-ad.test (rs)
a #p-value=0.2721
#Verifica-se o pressuposto
# 3.Multicolinaridade
library(corpcor)
mcor<-cor(data.frame(habit, rendim, idade, dist_conc, dist_loja))
mcor #existem algumas correlações significativas entre as variaveis
#Pelo que existe multicolinearidade entre as variaveis independentes
# 4.Independência
durbinWatsonTest(fit1) #p-value=0.158>0.05 não se rejeita H0
# Não se verifica a independencia
# -------------------------- Modelo Poisson --------------------------------
mod1 <- glm(Client ~ habit + rendim + idade + dist_conc +
dist_loja, family=poisson,dados)
summary(mod1)
exp(mod1$coefficients)
#Simplificação do modelo saturado
mod1a<-step(mod1)
summary (mod1a)
#dá o mesmo modelo
# Adequabilidade da função de ligação
mu <- 2*sqrt(predict (mod1, type="response")) #2*sqrt(mu) p.79 livro MLG prof
z <- predict (mod1)+(Client-mu)/mu
plot(z ~ predict(mod1,type="link"), xlab=expression(hat(eta)), dados,ylab="Resposta linearizada")
#embora exista alguma dispersão os dados seguem +/- ums linha pelo que é adequada
# Avaliação da Função de Variância
ra<-resid(mod1,type="response")
tr<-2*sqrt(predict(mod1,type="response"))
plot(ra ~ tr, xlab=expression(2*sqrt(hat(mu))),ylab="Resíduos Absolutos")
plot(residuals (mod1) ~ predict(mod1,type="link"), xlab=expression(hat(u)),ylab="Resíduos Absolutos")
lines (lowess (predict(mod1,type="link"), residuals (mod1)), col="red")
# Resíduos deviance
rd<-resid(mod1,type="deviance")
tr<-2*sqrt(predict(mod1,type="response"))
plot(rd ~ tr, xlab=expression(2*sqrt(hat(mu))),ylab="Resíduos Deviance")
abline(h=0, col=2)
plot(rd ~ predict(mod1,type="link"), xlab=expression(hat(eta)),ylab="Resíduos Deviance")
identify(predict(mod1,type="link"), rd)
lines (lowess (predict(mod1,type="link"), rd), col="red")
#Influência
plot(Client,cooks.distance(mod1),xlab="Número de nascimentos",ylab="Distância de Cook")
identify(Client,cooks.distance(mod1))
library(faraway)
halfnorm(cooks.distance(mod1))
#Leverage
library(car)
dadosi <- influence(mod1)
halfnorm(dadosi$hat)
h<-hatvalues(mod1)
dim(dados)
traco<-sum(diag(hatvalues(mod1)))
traco
plot (round(y,0), h*70/10)
abline(h=2,col=2)
identify(round(y,0), h*70/10)
#Outlier
plot (rstudent(mod1))
identify(rstudent(mod1))
plot(rd)
identify(rd)
halfnorm(rstudent(mod1))
Client ~ habit + rendim + idade + dist_conc +
dist_loja
# Estimar o número de nascimentos para uma mulher com um casamento de 5 anos, a viver na cidade e a tirar um mestrado
x0 <- data.frame(habit=500,rendim=38000,idade=45, dist_conc=5, dist_loja=7)
x0
predict(mod1,new=x0,se=T,type="response")
pred<-predict(mod1,new=x0,se=T,type="response")
c(pred$fit-qnorm(0.975)*pred$se.fit,pred$fit+qnorm(0.975)*pred$se.fit)
# Estimar o número de nascimentos para uma mulher com um casamento de 10 anos, a viver na cidade e a tirar um mestrado
x0 <- data.frame(dur="10-14",educn="sec",res="urban", n=1)
x0
predict(mod1,new=x0,se=T,type="response")
pred<-predict(mod1,new=x0,se=T,type="response")
c(pred$fit-qnorm(0.975)*pred$se.fit,pred$fit+qnorm(0.975)*pred$se.fit)
# -------------------------- Modelo Gamma --------------------------------
AG <- c(rep(1,17), rep(0,16)); AG
WBC <- c(2300, 750, 4300, 2600, 6000, 10500, 10000, 17000, 5400, 7000, 9400,
32000, 35000, 100000, 10000, 52000, 100000, 4400, 3000, 4000, 1500,
9000, 5300, 10000, 19000, 27000, 28000, 31000,26000, 21000, 79000,
100000, 100000)
Temp <- c(65, 156, 100, 134, 16, 108, 121, 4, 39, 143, 56, 26, 22, 1, 1, 5,
65, 56, 65, 17, 7, 16, 22, 3, 4, 2, 3, 8, 4, 3, 30, 4, 43)
tab <- data.frame(AG, WBC, Temp)
View(tab)
tab$logWBC <- log(tab$WBC)
tab$AG <- as.factor(tab$AG)
str(tab)
par(mfrow=c(1,2))
hist(tab$Temp, col=rainbow(50))
boxplot (tab$Temp, col="red")
library(fBasics)
basicStats (tab$Temp)
mod1 <- glm(tab$Temp ~ tab$AG + tab$logWBC, family=Gamma(link=log), tab, maxit=100)
summary(mod1)
exp(mod1$coefficients)
# Variação explicada
1-(41.608/30)/(58.138/32) #0.2366 / 23.66%
#Verificação dos pressupostos
# 1.Homogeneidade de variâncias
plot(fitted(mod1),rstudent(mod1))
plot(fitted(mod1),rstandard(mod1))
library(lmtest)
bptest(mod1, studentize=F) #p-value=0.08025>0.05 não se rejeita H0
library (car)
ncvTest (lm(tab$Temp ~ tab$AG + tab$logWBC)) #p-value=0.04<0.05 rejeita-se H0
#O pressuposto não se verifica
# 2.Normalidade
r <- residuals(mod1)
rs<-rstandard(mod1)
qqnorm(rs)
qqline (rs, col=2)
hist(rs, ylab=, xlab=, col=rainbow(60))
boxplot(rs)
library (nortest)
lillie.test(rs) #p-value=0.3912>0.05, não se rejeita H0
library (tseries)
jarque.bera.test (rs) #p-value=0.601
a<-ad.test (rs)
a #p-value=0.3856
#Verifica-se o pressuposto
# 3.Multicolinaridade
library(corpcor)
mcor<-cor(data.frame(as.numeric(tab$AG), tab$logWBC))
mcor #não existe multicolinearidade
# 4.Independência
durbinWatsonTest(mod1) #p-value=0<.05 rejeita-se H0
# Verifica-se a independencia
# Adequabilidade da função de ligação
mu <- 2*log(predict (mod1, type="response"))
z <- predict (mod1)+(tab$Temp-mu)/mu
par(mfrow=c(1,1))
plot(z ~ predict(mod1,type="link"), xlab=expression(hat(eta)), tab,ylab="Resposta linearizada")
#função de ligação não é a mais adequada
# Avaliação da Função de Variância
ra<-resid(mod1,type="response")
tr<-2*log(predict(mod1,type="response"))
plot(ra ~ tr, xlab=expression(2*log(hat(mu))),ylab="Resíduos Absolutos")
lines (lowess (ra~tr), col="red")
rd<-resid(mod1,type="deviance")
plot(rd ~ tr, xlab=expression(2*log(hat(mu))),ylab="Resíduos Deviance")
plot(rd ~ predict(mod1,type="link"), xlab=expression(hat(eta)),ylab="Resíduos Deviance")
lines (lowess (predict(mod1,type="link"), rd), col="red")
#Influência
plot(tab$Temp,cooks.distance(mod1),xlab="tempo",ylab="Distância de Cook")
identify(tab$Temp,cooks.distance(mod1))
library(faraway)
halfnorm(cooks.distance(mod1))
#Leverage
library(car)
tabi <- influence(mod1)
halfnorm(tabi$hat)
h<- hatvalues(mod1)
plot (tab$Temp, h*156/32)
abline(h=1)
identify(tab$Temp, h*156/32)
#Outliers
plot (rstudent(mod1))
identify(rstudent(mod1))
plot(rd)
identify(rd)
halfnorm(rstudent(mod1))
|
library(phylocurve)
### Name: ultraFastAnc
### Title: Ultra-fast maximum likelihood ancestral state reconstruction
### Aliases: ultraFastAnc
### Keywords: ~kwd1 ~kwd2
### ** Examples
require(ape)
tree <- rtree(1e4) # random tree with 10,000 taxa
x <- setNames(rnorm(1e4),tree$tip.label) # random trait data
recon <- ultraFastAnc(phy=tree,x=x,CI=TRUE)
|
/data/genthat_extracted_code/phylocurve/examples/ultraFastAnc.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 358
|
r
|
library(phylocurve)
### Name: ultraFastAnc
### Title: Ultra-fast maximum likelihood ancestral state reconstruction
### Aliases: ultraFastAnc
### Keywords: ~kwd1 ~kwd2
### ** Examples
require(ape)
tree <- rtree(1e4) # random tree with 10,000 taxa
x <- setNames(rnorm(1e4),tree$tip.label) # random trait data
recon <- ultraFastAnc(phy=tree,x=x,CI=TRUE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/glmmulti.R
\name{glmmulti}
\alias{glmmulti}
\title{Binomial logistic regression multivariable models: \code{finalfit} model
wrapper}
\usage{
glmmulti(.data, dependent, explanatory)
}
\arguments{
\item{.data}{Dataframe.}
\item{dependent}{Character vector usually of length 1, but can take more than
1 dependent: name of depdendent variable (must have 2 levels).}
\item{explanatory}{Character vector of any length: name(s) of explanatory
variables.}
}
\value{
A list of multivariable \code{\link[stats]{glm}} fitted model
outputs. Output is of class \code{glmlist}.
}
\description{
Using \code{finalfit} conventions, produces multiple multivariable binomial
logistic regression models for a set of explanatory variables against a
binary dependent.
}
\details{
Uses \code{\link[stats]{glm}} with \code{finalfit} modelling conventions.
Output can be passed to \code{\link{fit2df}}. Note that this function can
take multiple \code{dependent} variables as well, but performs multiple
individual models, not a multivariate analysis.
}
\examples{
library(finalfit)
library(dplyr)
explanatory = c("age.factor", "sex.factor", "obstruct.factor", "perfor.factor")
dependent = "mort_5yr"
colon_s \%>\%
glmmulti(dependent, explanatory) \%>\%
fit2df(estimate_suffix=" (univariable)")
}
\seealso{
\code{\link{fit2df}, \link{finalfit_merge}}
Other \code{finalfit} model wrappers: \code{\link{coxphmulti}},
\code{\link{coxphuni}}, \code{\link{glmmixed}},
\code{\link{glmmulti_boot}}, \code{\link{glmuni}},
\code{\link{lmmixed}}, \code{\link{lmmulti}},
\code{\link{lmuni}}
}
|
/man/glmmulti.Rd
|
no_license
|
nemochina2008/finalfit
|
R
| false
| true
| 1,652
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/glmmulti.R
\name{glmmulti}
\alias{glmmulti}
\title{Binomial logistic regression multivariable models: \code{finalfit} model
wrapper}
\usage{
glmmulti(.data, dependent, explanatory)
}
\arguments{
\item{.data}{Dataframe.}
\item{dependent}{Character vector usually of length 1, but can take more than
1 dependent: name of depdendent variable (must have 2 levels).}
\item{explanatory}{Character vector of any length: name(s) of explanatory
variables.}
}
\value{
A list of multivariable \code{\link[stats]{glm}} fitted model
outputs. Output is of class \code{glmlist}.
}
\description{
Using \code{finalfit} conventions, produces multiple multivariable binomial
logistic regression models for a set of explanatory variables against a
binary dependent.
}
\details{
Uses \code{\link[stats]{glm}} with \code{finalfit} modelling conventions.
Output can be passed to \code{\link{fit2df}}. Note that this function can
take multiple \code{dependent} variables as well, but performs multiple
individual models, not a multivariate analysis.
}
\examples{
library(finalfit)
library(dplyr)
explanatory = c("age.factor", "sex.factor", "obstruct.factor", "perfor.factor")
dependent = "mort_5yr"
colon_s \%>\%
glmmulti(dependent, explanatory) \%>\%
fit2df(estimate_suffix=" (univariable)")
}
\seealso{
\code{\link{fit2df}, \link{finalfit_merge}}
Other \code{finalfit} model wrappers: \code{\link{coxphmulti}},
\code{\link{coxphuni}}, \code{\link{glmmixed}},
\code{\link{glmmulti_boot}}, \code{\link{glmuni}},
\code{\link{lmmixed}}, \code{\link{lmmulti}},
\code{\link{lmuni}}
}
|
library(glmnet)
mydata = read.table("./TrainingSet/Correlation/NSCLC.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.8,family="gaussian",standardize=FALSE)
sink('./Model/EN/Correlation/NSCLC/NSCLC_082.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
/Model/EN/Correlation/NSCLC/NSCLC_082.R
|
no_license
|
leon1003/QSMART
|
R
| false
| false
| 357
|
r
|
library(glmnet)
mydata = read.table("./TrainingSet/Correlation/NSCLC.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.8,family="gaussian",standardize=FALSE)
sink('./Model/EN/Correlation/NSCLC/NSCLC_082.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
\name{fdrtool}
\alias{fdrtool}
\title{Estimate (Local) False Discovery Rates For Diverse Test Statistics}
\usage{
fdrtool(x, statistic=c("normal", "correlation", "pvalue"),
plot=TRUE, color.figure=TRUE, verbose=TRUE,
cutoff.method=c("fndr", "pct0", "locfdr"),
pct0=0.75)
}
\description{
\code{fdrtool} takes a vector of z-scores (or of correlations, p-values,
or t-statistics), and estimates for each case both the tail area-based Fdr
as well as the density-based fdr (=q-value resp. local false discovery rate).
The parameters of the null distribution are
estimated adaptively from the data (except for the case of p-values where
this is not necessary).
}
\arguments{
\item{x}{vector of the observed test statistics.}
\item{statistic}{one of "normal" (default), "correlation", "pvalue".
This species the null model.}
\item{plot}{plot a figure with estimated densities, distribution functions,
and (local) false discovery rates.}
\item{verbose}{print out status messages.}
\item{cutoff.method}{one of "fndr" (default), "pct0", "locfdr".}
\item{pct0}{fraction of data used for fitting null model - only if \code{cutoff.method}="pct0"}
\item{color.figure}{determines whether a color figure or a black and white
figure is produced (defaults to "TRUE", i.e. to color figure).}
}
\details{
The algorithm implemented in this function proceeds as follows:
\enumerate{
\item A suitable cutoff point is determined. If \code{cutoff.method}
is "fndr" then first an approximate null model is fitted and
subsequently a cutoff point is sought with false nondiscovery
rate as small as possible (see \code{\link{fndr.cutoff}}).
If \code{cutoff.method} is "pct0"
then a specified quantile (default value: 0.75) of the data
is used as the cutoff point. If \code{cutoff.method} equals
"locfdr" then the heuristic of the "locfdr" package (version 1.1-6)
is employed to find the cutoff (z-scores and correlations only).
\item The parameters of the null model are estimated from the
data using \code{\link{censored.fit}}. This results
in estimates for scale parameters und and proportion
of null values (\code{eta0}).
\item Subsequently the corresponding p-values are computed, and
a modified \code{\link{grenander}} algorithm is employed
to obtain the overall density and distribution function
(note that this respects the estimated \code{eta0}).
\item Finally, q-values and local fdr values are computed for each case.
}
The assumed null models all have (except for p-values) one free
scale parameter. Note that the z-scores and the correlations
are assumed to have zero mean.
}
\value{
A list with the following components:
\item{pval}{a vector with p-values for each case.}
\item{qval}{a vector with q-values (Fdr) for each case.}
\item{lfdr}{a vector with local fdr values for each case.}
\item{statistic}{the specified type of null model.}
\item{param}{a vector containing the estimated parameters (the null
proportion \code{eta0} and the free parameter of the null model).}
}
\author{
Korbinian Strimmer (\url{https://strimmerlab.github.io}).
}
\references{
Strimmer, K. (2008a). A unified approach to false discovery
rate estimation. BMC Bioinformatics 9: 303.
<DOI:10.1186/1471-2105-9-303>
Strimmer, K. (2008b). fdrtool: a versatile R package for estimating
local and tail area- based false discovery rates.
Bioinformatics 24: 1461-1462.
<DOI:10.1093/bioinformatics/btn209>
}
\seealso{\code{\link{pval.estimate.eta0}}, \code{\link{censored.fit}}.}
\examples{
# load "fdrtool" library and p-values
library("fdrtool")
data(pvalues)
# estimate fdr and Fdr from p-values
data(pvalues)
fdr = fdrtool(pvalues, statistic="pvalue")
fdr$qval # estimated Fdr values
fdr$lfdr # estimated local fdr
# the same but with black and white figure
fdr = fdrtool(pvalues, statistic="pvalue", color.figure=FALSE)
# estimate fdr and Fdr from z-scores
sd.true = 2.232
n = 500
z = rnorm(n, sd=sd.true)
z = c(z, runif(30, 5, 10)) # add some contamination
fdr = fdrtool(z)
# you may change some parameters of the underlying functions
fdr = fdrtool(z, cutoff.method="pct0", pct0=0.9)
}
\keyword{htest}
|
/man/fdrtool.Rd
|
no_license
|
cran/fdrtool
|
R
| false
| false
| 4,382
|
rd
|
\name{fdrtool}
\alias{fdrtool}
\title{Estimate (Local) False Discovery Rates For Diverse Test Statistics}
\usage{
fdrtool(x, statistic=c("normal", "correlation", "pvalue"),
plot=TRUE, color.figure=TRUE, verbose=TRUE,
cutoff.method=c("fndr", "pct0", "locfdr"),
pct0=0.75)
}
\description{
\code{fdrtool} takes a vector of z-scores (or of correlations, p-values,
or t-statistics), and estimates for each case both the tail area-based Fdr
as well as the density-based fdr (=q-value resp. local false discovery rate).
The parameters of the null distribution are
estimated adaptively from the data (except for the case of p-values where
this is not necessary).
}
\arguments{
\item{x}{vector of the observed test statistics.}
\item{statistic}{one of "normal" (default), "correlation", "pvalue".
This species the null model.}
\item{plot}{plot a figure with estimated densities, distribution functions,
and (local) false discovery rates.}
\item{verbose}{print out status messages.}
\item{cutoff.method}{one of "fndr" (default), "pct0", "locfdr".}
\item{pct0}{fraction of data used for fitting null model - only if \code{cutoff.method}="pct0"}
\item{color.figure}{determines whether a color figure or a black and white
figure is produced (defaults to "TRUE", i.e. to color figure).}
}
\details{
The algorithm implemented in this function proceeds as follows:
\enumerate{
\item A suitable cutoff point is determined. If \code{cutoff.method}
is "fndr" then first an approximate null model is fitted and
subsequently a cutoff point is sought with false nondiscovery
rate as small as possible (see \code{\link{fndr.cutoff}}).
If \code{cutoff.method} is "pct0"
then a specified quantile (default value: 0.75) of the data
is used as the cutoff point. If \code{cutoff.method} equals
"locfdr" then the heuristic of the "locfdr" package (version 1.1-6)
is employed to find the cutoff (z-scores and correlations only).
\item The parameters of the null model are estimated from the
data using \code{\link{censored.fit}}. This results
in estimates for scale parameters und and proportion
of null values (\code{eta0}).
\item Subsequently the corresponding p-values are computed, and
a modified \code{\link{grenander}} algorithm is employed
to obtain the overall density and distribution function
(note that this respects the estimated \code{eta0}).
\item Finally, q-values and local fdr values are computed for each case.
}
The assumed null models all have (except for p-values) one free
scale parameter. Note that the z-scores and the correlations
are assumed to have zero mean.
}
\value{
A list with the following components:
\item{pval}{a vector with p-values for each case.}
\item{qval}{a vector with q-values (Fdr) for each case.}
\item{lfdr}{a vector with local fdr values for each case.}
\item{statistic}{the specified type of null model.}
\item{param}{a vector containing the estimated parameters (the null
proportion \code{eta0} and the free parameter of the null model).}
}
\author{
Korbinian Strimmer (\url{https://strimmerlab.github.io}).
}
\references{
Strimmer, K. (2008a). A unified approach to false discovery
rate estimation. BMC Bioinformatics 9: 303.
<DOI:10.1186/1471-2105-9-303>
Strimmer, K. (2008b). fdrtool: a versatile R package for estimating
local and tail area- based false discovery rates.
Bioinformatics 24: 1461-1462.
<DOI:10.1093/bioinformatics/btn209>
}
\seealso{\code{\link{pval.estimate.eta0}}, \code{\link{censored.fit}}.}
\examples{
# load "fdrtool" library and p-values
library("fdrtool")
data(pvalues)
# estimate fdr and Fdr from p-values
data(pvalues)
fdr = fdrtool(pvalues, statistic="pvalue")
fdr$qval # estimated Fdr values
fdr$lfdr # estimated local fdr
# the same but with black and white figure
fdr = fdrtool(pvalues, statistic="pvalue", color.figure=FALSE)
# estimate fdr and Fdr from z-scores
sd.true = 2.232
n = 500
z = rnorm(n, sd=sd.true)
z = c(z, runif(30, 5, 10)) # add some contamination
fdr = fdrtool(z)
# you may change some parameters of the underlying functions
fdr = fdrtool(z, cutoff.method="pct0", pct0=0.9)
}
\keyword{htest}
|
### Jinliang
### May 12th, 2015
#source("~/Documents/Github/zmSNPtools/Rcodes/dsnp2GenABEL.R")
library("data.table")
#library("GenABEL.data", lib="~/bin/Rlib/")
#library("GenABEL", lib="~/bin/Rlib/")
bed2illumina <- function(){
#######==> GBS data for GenABEL
gbs <- fread("~/dbcenter/AllZeaGBS/ZeaGBSv27_Ames282_agpv3.bed5", data.table=FALSE)
nms <- names(gbs)
nms2 <- gsub(":.*", "", nms)
names(gbs) <- nms2
gbs1 <- gbs
gbs1 <- gbs1[, c(-2, -5)]
gbs1 <- gbs1[, c(3,1, 2, 4:ncol(gbs1))]
names(gbs1)[1:3] <- c("Name", "Chr", "Pos")
gbs1[gbs1 == "A"] <- "AA"
gbs1[gbs1 == "T"] <- "TT"
gbs1[gbs1 == "C"] <- "CC"
gbs1[gbs1 == "G"] <- "GG"
gbs1[gbs1 == "-"] <- "--"
gbs1[gbs1 == "N"] <- "00"
message("start to writing ...")
write.table(gbs1, "largedata/10.Dong/ZeaGBSv27_Ames282_agpv3.illumina", sep="\t", row.names=FALSE, quote=FALSE)
}
bed2illumina()
###>>> Read 509572 rows and 293 (of 293) columns from 0.291 GB file in 00:00:14
###>>> Input [ 509572 ] GBS data, after filtering, [ 306190 ] remaining
|
/profiling/10.Dong_etal/10.B.4_GBS_2GenABEL.R
|
no_license
|
yangjl/Misc
|
R
| false
| false
| 1,093
|
r
|
### Jinliang
### May 12th, 2015
#source("~/Documents/Github/zmSNPtools/Rcodes/dsnp2GenABEL.R")
library("data.table")
#library("GenABEL.data", lib="~/bin/Rlib/")
#library("GenABEL", lib="~/bin/Rlib/")
bed2illumina <- function(){
#######==> GBS data for GenABEL
gbs <- fread("~/dbcenter/AllZeaGBS/ZeaGBSv27_Ames282_agpv3.bed5", data.table=FALSE)
nms <- names(gbs)
nms2 <- gsub(":.*", "", nms)
names(gbs) <- nms2
gbs1 <- gbs
gbs1 <- gbs1[, c(-2, -5)]
gbs1 <- gbs1[, c(3,1, 2, 4:ncol(gbs1))]
names(gbs1)[1:3] <- c("Name", "Chr", "Pos")
gbs1[gbs1 == "A"] <- "AA"
gbs1[gbs1 == "T"] <- "TT"
gbs1[gbs1 == "C"] <- "CC"
gbs1[gbs1 == "G"] <- "GG"
gbs1[gbs1 == "-"] <- "--"
gbs1[gbs1 == "N"] <- "00"
message("start to writing ...")
write.table(gbs1, "largedata/10.Dong/ZeaGBSv27_Ames282_agpv3.illumina", sep="\t", row.names=FALSE, quote=FALSE)
}
bed2illumina()
###>>> Read 509572 rows and 293 (of 293) columns from 0.291 GB file in 00:00:14
###>>> Input [ 509572 ] GBS data, after filtering, [ 306190 ] remaining
|
library(parallel)
simRep <- 5000 # Replication times in one simulation
pvalue.true <- .05 # Testing type I error
b.var <- c(0.0025) # The set of varaince of random covariates b as random slope
smooth <- 1 # measurement error is added to M if smooth = 0; no measurement error is added if sooth = 1
cores <- 8
r.sim <- b.var
run_one_sample <- function(iter){
library(refund)
library(lme4)
library(nlme)
library(arm)
library(RLRsim)
library(MASS)
set.seed(iter+5000)
D <- 80 # grid number total
nSubj <- 200 # 200 # I the number of curves
nRep <- 50 # 20 # datasets for each covariance function
totalN <- nSubj * nRep
thetaK.true <- 2
timeGrid <- (1:D)/D
npc.true <- 3
percent <- 0.95
SNR <- 3 # 5, signal noise ratio'
sd.epsilon <- 1 # or 0.5
delta.true <- 0.5
a.mean <- 0
gamma.true <- 2
gammaVar.true <- 1
# hot
gammaI.true <- mapply(rnorm, nSubj, gamma.true, rep(sqrt(gammaVar.true), 1))
gammaI.true <- gammaI.true[rep(1:nrow(gammaI.true), each = nRep), ]
# warm
gammaI2.true <- mapply(rnorm, nSubj, gamma.true, rep(sqrt(gammaVar.true), 1))
gammaI2.true <- gammaI2.true[rep(1:nrow(gammaI2.true), each = nRep), ]
dummyX <- rbinom(n = totalN, size = 1, prob = 0.5) # dummyX
#generate functional covariates
lambda.sim <- function(degree) {
return(0.5^(degree - 1))
}
psi.fourier <- function(t, degree) {
result <- NA
if(degree == 1){
result <- sqrt(2) * sinpi(2*t)
}else if(degree == 2){
result <- sqrt(2) * cospi(4*t)
}else if(degree == 3){
result <- sqrt(2) * sinpi(4*t)
}
return(result)
}
lambdaVec.true <- mapply(lambda.sim, 1: npc.true)
psi.true <- matrix(data = mapply(psi.fourier, rep(timeGrid, npc.true), rep(1:npc.true, each=D)),
nrow = npc.true,
ncol = D,
byrow = TRUE)
ascore.true <- mvrnorm(totalN, rep(a.mean, npc.true), diag(lambdaVec.true))
Mt.true <- ascore.true %*% psi.true
error <- rnorm(totalN, mean = 0, sd = sd.epsilon)
thetaIK.true <- mvrnorm(nSubj, rep(thetaK.true, npc.true), diag(c(r.sim, r.sim/2, r.sim/4)))
thetaIK.true <- thetaIK.true[rep(1:nrow(thetaIK.true), each = nRep), ]
betaM.true <- thetaIK.true * ascore.true
betaM.true <- rowSums(betaM.true)
Y <- delta.true + dummyX * gammaI.true + (dummyX - 1) * gammaI2.true + betaM.true + error
##########################################################################
ID <- rep(1:nSubj, each = nRep)
if(smooth == 0){
Merror.Var <- sum(lambdaVec.true) / SNR #SNR = sum(lambdaVec.true)/Merror.Var
Mt.hat <- Mt.true + matrix(rnorm(totalN*D, mean=0, sd = sqrt(Merror.Var)), totalN, D)
}
if(smooth == 1){
Merror.Var <- 0 #SNR = sum(lambdaVec.true)/Merror.Var
Mt.hat <- Mt.true
}
M <- Mt.hat
# M <- M - matrix(rep(colMeans(M), each = totalN), totalN, D) # center:column-means are 0
t <- (1:D)/D
knots <- 5 # previous setting 10
p <- 5 # previous setting p <- 7, the number of degree for B-splines we use
results <- fpca.face(M, center = TRUE, argvals = t, knots = knots, pve = percent, p = p, lambda = 0) # pve need to be chosen!
npc <- results$npc
score <- results$scores
ascore <- score[, 1:npc]/sqrt(D)
# plot(results$efunctions[,2]*sqrt(D))
# lines(1:80, psi.fourier(timeGrid, 2)) #match very well
# to compare lambda: results$evalues/(D))
# to compare estimated M, Mt.hat, Mt.true
# a<-results$scores %*% t(results$efunctions)
# plot(M[300,]) #Mt.hat
# lines(a[300,]+results$mu,col="red") # estimated M
# lines(Mt.true[300,], col="blue") #true Mt
###########################################################################
dummyX <- cbind(dummyX, -dummyX + 1)
z.sim.uni = c()
ID.uni <- c(rbind(matrix(1:(nSubj*npc),
nrow = npc,
ncol = nSubj),
matrix(0, nrow = nRep - npc, ncol = nSubj)))
for(k in 1:nSubj){
svd <- svd(ascore[((k-1)*nRep+1):(k*nRep), ] %*% t(ascore[((k-1)*nRep+1):(k*nRep), ])) #SVD on A_i
u.tra <- t(svd$v)
u <- svd$u
d <- (svd$d)[1:npc]
# u <- cbind(u, Null(u))
Y[((k-1)*nRep+1):(k*nRep)] <- u.tra %*% Y[((k-1)*nRep+1):(k*nRep)]
dummyX[((k-1)*nRep+1):(k*nRep), ] <- u.tra %*% dummyX[((k-1)*nRep+1):(k*nRep), ]
ascore[((k-1)*nRep+1):(k*nRep), ] <- rbind(u.tra[1:npc, ] %*% ascore[((k-1)*nRep+1):(k*nRep), ],
matrix(0,
nrow = nRep - npc,
ncol = npc))
z.sim.uni <- c(z.sim.uni, sqrt(d), rep(0, nRep - npc))
}
###########################################################################
designMatrix <- data.frame(rating = Y,
temp.1 = dummyX[, 1],
temp.2 = dummyX[, 2],
ID = as.factor(ID),
ID.uni = as.factor(ID.uni),
ascore = ascore,
z.sim.uni = z.sim.uni)
# 'lmer' model
designMatrix.lmm <- designMatrix
additive0.sim <- paste(1:npc, collapse = " + ascore.")
additive.sim <- paste(1:npc, collapse = " | ID) + (0 + ascore.")
# Confusion of modifying
model.sim <- as.formula(paste("rating ~ 1 + temp.1 + temp.2 + ascore.",
additive0.sim,
" + (0 + temp.1 | ID) + (0 + temp.2 | ID) + (0 + z.sim.uni | ID.uni)",
sep = ""))
fullReml <- lmer(model.sim, data = designMatrix.lmm)
f.slope <- as.formula(paste("rating ~ 1 + temp.1 + temp.2 + ascore.",
additive0.sim,
" + (0 + z.sim.uni | ID.uni)",
sep = ""))
m.slope <- lmer(f.slope, data = designMatrix.lmm)
f0 <- as.formula(" . ~ . - (0 + z.sim.uni | ID.uni)")
m0 <- update(fullReml, f0)
tests2 <- exactRLRT(m.slope, fullReml, m0)
pvalues.bonf <- tests2$p[1]
###################################################################################
return(list(realTau = r.sim,
pvalues.bonf = pvalues.bonf,
Merror.Var = Merror.Var,
smooth = smooth,
npc = npc,
tests2 = tests2))
}
# Setup parallel
#cores <- detectCores()
cluster <- makeCluster(cores)
#clusterSetRNGStream(cluster, 20170822)
# for(nRandCovariate in 1 * 2){ # START out-outer loop
# clusterExport(cluster, c("nRandCovariate")) # casting the coefficient parameter on the random effects' covariance function
# fileName <- paste("power_", b.var, "_grp20-rep20-", nRandCovariate,".RData", sep = "") # Saving file's name
clusterExport(cluster, c("r.sim", "smooth")) # casting the coefficient parameter on the random effects' covariance function
fileName <- paste("h_f_power_", smooth, "_",b.var,"_seed2_grp200-rep50.RData", sep = "") # Saving file's name
# run the simulation
loopIndex <- 1
# resultDoubleList.sim <- list()
#power1.sim <- list()
power2.sim <- list()
# for(r.sim in b.var){ # START outer loop
node_results <- parLapply(cluster, 1:simRep, run_one_sample)
# result1.sim <- lapply(node_results, function(x) {list(realTau = x$realTau,
# pvalue = x$pvalue)})
#result2.sim <- lapply(node_results, function(x) {list(realTau = x$realTau,
# pvalues.bonf = x$pvalues.bonf,
# smooth = x$smooth,
# npc = x$npc)})
#resultDoubleList.sim[[loopIndex]] <- node_results
#save.image(file=fileName) # Auto Save
#table1.sim <- sapply(result1.sim, function(x) {
# c(sens = (sum(x$pvalue <= pvalue.true) > 0))})
#Power1 <- mean(table1.sim)
#cat("nRandCovariate: ", nRandCovariate, fill = TRUE)
#cat("Power1: ", Power1, fill = TRUE)
#power1.sim[[loopIndex]] <- list(Power = Power1, realTau = r.sim)
table2.sim <- sapply(node_results, function(x) {
c(overall.sens = (sum(x$pvalues.bonf <= pvalue.true) > 0))})
Power2 <- mean(table2.sim)
#cat("Power2: ", Power2, fill = TRUE)
power2.sim[[loopIndex]] <- list(Power = Power2, realTau = c(r.sim,r.sim/2,r.sim/4), smooth = smooth)
# loopIndex <- loopIndex + 1
# } # End outer loop
save(power2.sim, file=fileName) # Auto Save
# par(mfrow=c(2,1))
# Histogram plots
# hist(sapply(result1.sim, function(x) x$pvalue),
# main = "Histogram of p-value for lme model",
# xlab = "p-value")
# hist(sapply(result2.sim, function(x) x$pvalues.bonf),
# main = "Histogram of p-value for lmer model",
# xlab = "p-value")
# hist(sapply(resultDoubleList.sim[[1]], function(x) (x$tests1)$statistic[1]),
# breaks = (0:110)/10,
# main = "Histogram of test-statistic for lme model",
# xlab = "Test Statistics")
#
# hist(sapply(resultDoubleList.sim[[1]], function(x) (x$tests2)[1,1]),
# breaks = (0:100)/10,
# main = "Histogram of test-statistic for lmer model",
# xlab = "Test Statistics")
#} # End out-outer loop
stopCluster(cluster)
|
/full simulation/summer/hetero_power/variance0.0025/seed2/heter_power_0.0025_pca_s_seed2_200_50.R
|
no_license
|
wma9/FMRI-project
|
R
| false
| false
| 9,199
|
r
|
library(parallel)
simRep <- 5000 # Replication times in one simulation
pvalue.true <- .05 # Testing type I error
b.var <- c(0.0025) # The set of varaince of random covariates b as random slope
smooth <- 1 # measurement error is added to M if smooth = 0; no measurement error is added if sooth = 1
cores <- 8
r.sim <- b.var
run_one_sample <- function(iter){
library(refund)
library(lme4)
library(nlme)
library(arm)
library(RLRsim)
library(MASS)
set.seed(iter+5000)
D <- 80 # grid number total
nSubj <- 200 # 200 # I the number of curves
nRep <- 50 # 20 # datasets for each covariance function
totalN <- nSubj * nRep
thetaK.true <- 2
timeGrid <- (1:D)/D
npc.true <- 3
percent <- 0.95
SNR <- 3 # 5, signal noise ratio'
sd.epsilon <- 1 # or 0.5
delta.true <- 0.5
a.mean <- 0
gamma.true <- 2
gammaVar.true <- 1
# hot
gammaI.true <- mapply(rnorm, nSubj, gamma.true, rep(sqrt(gammaVar.true), 1))
gammaI.true <- gammaI.true[rep(1:nrow(gammaI.true), each = nRep), ]
# warm
gammaI2.true <- mapply(rnorm, nSubj, gamma.true, rep(sqrt(gammaVar.true), 1))
gammaI2.true <- gammaI2.true[rep(1:nrow(gammaI2.true), each = nRep), ]
dummyX <- rbinom(n = totalN, size = 1, prob = 0.5) # dummyX
#generate functional covariates
lambda.sim <- function(degree) {
return(0.5^(degree - 1))
}
psi.fourier <- function(t, degree) {
result <- NA
if(degree == 1){
result <- sqrt(2) * sinpi(2*t)
}else if(degree == 2){
result <- sqrt(2) * cospi(4*t)
}else if(degree == 3){
result <- sqrt(2) * sinpi(4*t)
}
return(result)
}
lambdaVec.true <- mapply(lambda.sim, 1: npc.true)
psi.true <- matrix(data = mapply(psi.fourier, rep(timeGrid, npc.true), rep(1:npc.true, each=D)),
nrow = npc.true,
ncol = D,
byrow = TRUE)
ascore.true <- mvrnorm(totalN, rep(a.mean, npc.true), diag(lambdaVec.true))
Mt.true <- ascore.true %*% psi.true
error <- rnorm(totalN, mean = 0, sd = sd.epsilon)
thetaIK.true <- mvrnorm(nSubj, rep(thetaK.true, npc.true), diag(c(r.sim, r.sim/2, r.sim/4)))
thetaIK.true <- thetaIK.true[rep(1:nrow(thetaIK.true), each = nRep), ]
betaM.true <- thetaIK.true * ascore.true
betaM.true <- rowSums(betaM.true)
Y <- delta.true + dummyX * gammaI.true + (dummyX - 1) * gammaI2.true + betaM.true + error
##########################################################################
ID <- rep(1:nSubj, each = nRep)
if(smooth == 0){
Merror.Var <- sum(lambdaVec.true) / SNR #SNR = sum(lambdaVec.true)/Merror.Var
Mt.hat <- Mt.true + matrix(rnorm(totalN*D, mean=0, sd = sqrt(Merror.Var)), totalN, D)
}
if(smooth == 1){
Merror.Var <- 0 #SNR = sum(lambdaVec.true)/Merror.Var
Mt.hat <- Mt.true
}
M <- Mt.hat
# M <- M - matrix(rep(colMeans(M), each = totalN), totalN, D) # center:column-means are 0
t <- (1:D)/D
knots <- 5 # previous setting 10
p <- 5 # previous setting p <- 7, the number of degree for B-splines we use
results <- fpca.face(M, center = TRUE, argvals = t, knots = knots, pve = percent, p = p, lambda = 0) # pve need to be chosen!
npc <- results$npc
score <- results$scores
ascore <- score[, 1:npc]/sqrt(D)
# plot(results$efunctions[,2]*sqrt(D))
# lines(1:80, psi.fourier(timeGrid, 2)) #match very well
# to compare lambda: results$evalues/(D))
# to compare estimated M, Mt.hat, Mt.true
# a<-results$scores %*% t(results$efunctions)
# plot(M[300,]) #Mt.hat
# lines(a[300,]+results$mu,col="red") # estimated M
# lines(Mt.true[300,], col="blue") #true Mt
###########################################################################
dummyX <- cbind(dummyX, -dummyX + 1)
z.sim.uni = c()
ID.uni <- c(rbind(matrix(1:(nSubj*npc),
nrow = npc,
ncol = nSubj),
matrix(0, nrow = nRep - npc, ncol = nSubj)))
for(k in 1:nSubj){
svd <- svd(ascore[((k-1)*nRep+1):(k*nRep), ] %*% t(ascore[((k-1)*nRep+1):(k*nRep), ])) #SVD on A_i
u.tra <- t(svd$v)
u <- svd$u
d <- (svd$d)[1:npc]
# u <- cbind(u, Null(u))
Y[((k-1)*nRep+1):(k*nRep)] <- u.tra %*% Y[((k-1)*nRep+1):(k*nRep)]
dummyX[((k-1)*nRep+1):(k*nRep), ] <- u.tra %*% dummyX[((k-1)*nRep+1):(k*nRep), ]
ascore[((k-1)*nRep+1):(k*nRep), ] <- rbind(u.tra[1:npc, ] %*% ascore[((k-1)*nRep+1):(k*nRep), ],
matrix(0,
nrow = nRep - npc,
ncol = npc))
z.sim.uni <- c(z.sim.uni, sqrt(d), rep(0, nRep - npc))
}
###########################################################################
designMatrix <- data.frame(rating = Y,
temp.1 = dummyX[, 1],
temp.2 = dummyX[, 2],
ID = as.factor(ID),
ID.uni = as.factor(ID.uni),
ascore = ascore,
z.sim.uni = z.sim.uni)
# 'lmer' model
designMatrix.lmm <- designMatrix
additive0.sim <- paste(1:npc, collapse = " + ascore.")
additive.sim <- paste(1:npc, collapse = " | ID) + (0 + ascore.")
# Confusion of modifying
model.sim <- as.formula(paste("rating ~ 1 + temp.1 + temp.2 + ascore.",
additive0.sim,
" + (0 + temp.1 | ID) + (0 + temp.2 | ID) + (0 + z.sim.uni | ID.uni)",
sep = ""))
fullReml <- lmer(model.sim, data = designMatrix.lmm)
f.slope <- as.formula(paste("rating ~ 1 + temp.1 + temp.2 + ascore.",
additive0.sim,
" + (0 + z.sim.uni | ID.uni)",
sep = ""))
m.slope <- lmer(f.slope, data = designMatrix.lmm)
f0 <- as.formula(" . ~ . - (0 + z.sim.uni | ID.uni)")
m0 <- update(fullReml, f0)
tests2 <- exactRLRT(m.slope, fullReml, m0)
pvalues.bonf <- tests2$p[1]
###################################################################################
return(list(realTau = r.sim,
pvalues.bonf = pvalues.bonf,
Merror.Var = Merror.Var,
smooth = smooth,
npc = npc,
tests2 = tests2))
}
# Setup parallel
#cores <- detectCores()
cluster <- makeCluster(cores)
#clusterSetRNGStream(cluster, 20170822)
# for(nRandCovariate in 1 * 2){ # START out-outer loop
# clusterExport(cluster, c("nRandCovariate")) # casting the coefficient parameter on the random effects' covariance function
# fileName <- paste("power_", b.var, "_grp20-rep20-", nRandCovariate,".RData", sep = "") # Saving file's name
clusterExport(cluster, c("r.sim", "smooth")) # casting the coefficient parameter on the random effects' covariance function
fileName <- paste("h_f_power_", smooth, "_",b.var,"_seed2_grp200-rep50.RData", sep = "") # Saving file's name
# run the simulation
loopIndex <- 1
# resultDoubleList.sim <- list()
#power1.sim <- list()
power2.sim <- list()
# for(r.sim in b.var){ # START outer loop
node_results <- parLapply(cluster, 1:simRep, run_one_sample)
# result1.sim <- lapply(node_results, function(x) {list(realTau = x$realTau,
# pvalue = x$pvalue)})
#result2.sim <- lapply(node_results, function(x) {list(realTau = x$realTau,
# pvalues.bonf = x$pvalues.bonf,
# smooth = x$smooth,
# npc = x$npc)})
#resultDoubleList.sim[[loopIndex]] <- node_results
#save.image(file=fileName) # Auto Save
#table1.sim <- sapply(result1.sim, function(x) {
# c(sens = (sum(x$pvalue <= pvalue.true) > 0))})
#Power1 <- mean(table1.sim)
#cat("nRandCovariate: ", nRandCovariate, fill = TRUE)
#cat("Power1: ", Power1, fill = TRUE)
#power1.sim[[loopIndex]] <- list(Power = Power1, realTau = r.sim)
table2.sim <- sapply(node_results, function(x) {
c(overall.sens = (sum(x$pvalues.bonf <= pvalue.true) > 0))})
Power2 <- mean(table2.sim)
#cat("Power2: ", Power2, fill = TRUE)
power2.sim[[loopIndex]] <- list(Power = Power2, realTau = c(r.sim,r.sim/2,r.sim/4), smooth = smooth)
# loopIndex <- loopIndex + 1
# } # End outer loop
save(power2.sim, file=fileName) # Auto Save
# par(mfrow=c(2,1))
# Histogram plots
# hist(sapply(result1.sim, function(x) x$pvalue),
# main = "Histogram of p-value for lme model",
# xlab = "p-value")
# hist(sapply(result2.sim, function(x) x$pvalues.bonf),
# main = "Histogram of p-value for lmer model",
# xlab = "p-value")
# hist(sapply(resultDoubleList.sim[[1]], function(x) (x$tests1)$statistic[1]),
# breaks = (0:110)/10,
# main = "Histogram of test-statistic for lme model",
# xlab = "Test Statistics")
#
# hist(sapply(resultDoubleList.sim[[1]], function(x) (x$tests2)[1,1]),
# breaks = (0:100)/10,
# main = "Histogram of test-statistic for lmer model",
# xlab = "Test Statistics")
#} # End out-outer loop
stopCluster(cluster)
|
##################################
##### 01REAN Cviceni 5 ##########
#################################
#
# Todays exercise
# Multivariable linear regression in R
library(lattice)
library(MASS)
library(car)
library(ggplot2)
setwd("~/Studies/REAN/tutorials/05")
# We use again trees data to present Multivariable linear regression
head(trees)
summary(trees)
trees$Forest <- rbinom(nrow(trees),3,0.5)+1
# Basic scatterplot
pairs(trees, main="Basic Scatterplot Matrix") # all variables in data tree
pairs(~(.), data=trees, main="Basic Scatterplot Matrix") # all variables in data tree
pairs(~Girth+Volume,data=trees, main="Basic Scatterplot Matrix") # Girth and Volume variables from data tree
# Scatterplot Matrices from lattice package
splom(trees)
splom(trees, groups=trees$Forest )
splom(trees[c("Girth","Height","Volume")], groups=trees$Forest )
# Scatterplot Matrices from the car package
scatterplotMatrix(~(.)|Forest, data=trees, main="Three Cylinder Options")
# tune colors and remove regression lines
library(RColorBrewer)
my_colors <- brewer.pal(nlevels(as.factor(trees$Forest)), "Set2")
scatterplotMatrix(~(.)|Forest, data=trees ,
reg.line="" , smoother="", col=my_colors , smoother.args=list(col="grey") ,
cex=1.5 , pch=c(15,16,17) , main="Scatter plot with Three Cylinder Options")
# xyplot
xyplot(Volume ~ Girth | Forest , data=trees , pch=20 , cex=2 , col="blue" )
### 4x Forest kvuli tomu, ze Forest neni factor (kategorcika promenna)
# Forest as factor
summary(trees)
trees$Forest <- as.factor(trees$Forest)
# Different
xyplot(Volume ~ Girth | Forest , data=trees , pch=20 , cex=2 , col="red" )
# How to devide plot into two parts and have only one title
#Divide the screen in 1 line and 2 columns
#Make the margin around each graph a bit smaller
op <- par(mfrow=c(1,2),oma = c(0, 0, 2, 0))
# oma is a vector of the form c(bottom, left, top, right)
# giving the size of the outer margins in lines of text.
#Classical histogram and density
hist(trees$Volume, main="" , breaks=10 , col="gray" , xlab="Volume" , ylab="Number of trees in each bin")
hist(trees$Volume, freq=F, breaks=10, main="", xlab="Volume",xlim=c(10, 80),ylim=c(0, 0.05))
lines(density(trees$Volume), col="red", lwd=2)
#Add only ONE title :
mtext("Histogram and Density plot of Volume", outer = TRUE, cex = 1.4, font=4, col=rgb(0.1,0.3,0.5,0.5) )
## At end of plotting, reset to previous settings:
par(op)
# How to split screen and make nice plots :)
# Divide the screen in 2 line and 1 column only
new_screen_step1 <- split.screen(c(2, 1))
# Add one graph on the screen number 1 which is on top :
screen(new_screen_step1[1])
plot( Volume~Girth , data=trees, pch=20 , xlab="Girth",ylab="Volume", cex=1 , col="black" )
# I divide the second screen in 2 columns :
new_screen_step1=split.screen(c(1, 2), screen = new_screen_step1[2])
screen(new_screen_step1[1])
hist(trees$Girth , breaks = 8, border=F , col="blue" , main="" , xlab="Distribution of Girth")
screen(new_screen_step1[2])
hist(trees$Volume, breaks = 8, border=F , col="red" , main="" , xlab="Distribution of Volume")
table(trees$Height)
trees$Height.f<-cut(trees$Height, seq(60,90,10), right=FALSE)
# cut creates cathegorical variable
is.factor(trees$Height.f)
# ggplot version of scatterplot, where Color and shape depend on factor variable
# multipolot with glot is different
# more possibilities exist, here is only one
require(gridExtra)
plot1 <- ggplot(trees, aes(x=Girth, y=Volume, color=Forest, shape=Forest)) +
geom_point(size=5, alpha=0.8) + theme_bw()
plot2 <- ggplot(trees, aes(x=Girth, y=Volume, color=Height.f, size=Height.f)) +
geom_point(alpha=0.6) + theme_bw()
grid.arrange(plot1, plot2, ncol=2)
# Reset plot settings
dev.new()
dev.off()
##############################
# Lets start with regression
#### Model selection by adding varibles #########
trees_lm1.0 <- lm(Volume ~ (.), data = trees) # use all what I have in the dataframe
trees_lm1.0 <- lm(Volume ~ (.)^2, data = trees) # use all what I have in the dataframe
# with second order interactions
# uff - lets start from the simplest model
# only Girth and Height as a independent variable
trees_lm1.0 <- lm(Volume ~ Girth + Height, data = trees) # use all what I have in the dataframe
# nearly all information is in summary
summary(trees_lm1.0)
# Constuct X,Y, beta_hat - if you want to analyze it by hand
n = nrow(trees)
p = length(coefficients(trees_lm1.0))
beta_hat = as.matrix(coefficients(trees_lm1.0))
X = as.matrix(cbind(rep(1,times = n), trees[,c(1,2)]))
Y = as.matrix(trees[,3])
residuals_lm1.0 = Y - X%*%beta_hat
((residuals_lm1.0 - residuals(trees_lm1.0))<0.00001)
# and so on
summary(trees)
trees_new <- data.frame(Girth = (seq(5,25,0.1)), Height = seq(60,90,length=length(seq(5,25,0.1))))
conf_new <- predict(trees_lm1.0, newdata = trees_new, interval = "confidence")
pred_new <- predict(trees_lm1.0, newdata = trees_new, interval = "prediction")
height_lim = seq(60,90,length=length(seq(5,25,0.1)))
op <- par(mfrow=c(1,2),oma = c(0, 0, 2, 0))
plot(Volume ~ Girth, data = trees, xlim = c(5,25), ylim = c(5,100),pch=20, col = "black", xaxs="i",yaxs="i",
main="Trees data: Girth and Volume dependence",xlab="Girth", ylab="Volume")
lines(seq(5,25,0.1), pred_new[,1], col='black')
lines(seq(5,25,0.1), pred_new[,2], col='red')
lines(seq(5,25,0.1), pred_new[,3], col='red')
lines(seq(5,25,0.1), conf_new[,2], col='blue')
lines(seq(5,25,0.1), conf_new[,3], col='blue')
legend("topleft",legend=c("observed","fit","Confidence int","Prediction int"),
pch=c(20,NA,NA,NA),lty = c(NA,1,1,1),col = c("black","black","blue","red"))
plot(Volume ~ Height, data = trees, xlim = c(60,90), ylim = c(5,100),pch=20, col = "black", xaxs="i",yaxs="i",
main="Trees data: Height and Volume dependence",xlab="Height", ylab="Volume")
lines(height_lim, pred_new[,1], col='black')
lines(height_lim, pred_new[,2], col='red')
lines(height_lim, pred_new[,3], col='red')
lines(height_lim, conf_new[,2], col='blue')
lines(height_lim, conf_new[,3], col='blue')
legend("topleft",legend=c("observed","fit","Confidence int","Prediction int"),
pch=c(20,NA,NA,NA),lty = c(NA,1,1,1),col = c("black","black","blue","red"))
par(op)
require(scatterplot3d) # for 3d scatter plot
# Another design of scatterplot3d
s3d <-with(trees,scatterplot3d(Girth, Height, Volume, pch=16,
highlight.3d=TRUE, type="h",
main="3D Scatter Plot with Vertical Lines and Regression Planes",
angle=135,scale.y=1, xlab="Girth",ylab="Height",zlab="Volume",
cex.lab=1.5, cex.axis=1.5, cex.main=1.1, cex.sub=1.5))
s3d$plane3d(trees_lm1.0)
# Comperable statistics of the model (See lecture 5)
summary(trees_lm1.0)$r.squared
summary(trees_lm1.0)$adj.r.squared
summary(trees_lm1.0)$fstatistic
# polynomila Regression without scaled varialbes
trees_lm2.0 <- lm(Volume ~ (Girth) + I((Girth)^2), data = trees)
summary(trees_lm2.0)
# polynomial Regression with scaled varialbes
trees_lm2.1 <- lm(Volume ~ scale(Girth) + I(scale(Girth)^2), data = trees)
summary(trees_lm2.1)
plot(Volume ~ scale(Girth), data = trees)
lines(fitted(trees_lm2.1) ~ scale(Girth), data = trees)
conf_new <- predict(trees_lm2.1, newdata = trees_new, interval = "confidence")
# polynomial Regression with scaled varialbe Girth and add Height
trees_lm2.2 <- lm(Volume ~ scale(Girth) + I(scale(Girth)^2) + Height, data = trees)
summary(trees_lm2.2)
# polynomial Regression with scaled varialbe Girth and add Height and interaction
trees_lm2.3 <- lm(Volume ~ Height*scale(Girth) + I(scale(Girth)^2) , data = trees)
summary(trees_lm2.3)
# :(
# Qualitative Explanatory Variables - Forest
summary(trees)
trees$Tall <- cut(trees$Height, breaks = c(-Inf, 76, Inf), labels = c("no", "yes"))
treesTall <- split(trees, trees$Tall)
trees_lm_Tall <- lm(Volume ~ Girth + Tall, data = trees)
summary(trees_lm_Tall)
treesTall[["yes"]]$Fit <- predict(trees_lm_Tall, treesTall[["yes"]])
treesTall[["no"]]$Fit <- predict(trees_lm_Tall, treesTall[["no"]])
plot(Volume ~ Girth, data = trees, type = "n")
points(Volume ~ Girth, data = treesTall[["yes"]], pch = 1)
points(Volume ~ Girth, data = treesTall[["no"]], pch = 2)
lines(Fit ~ Girth, data = treesTall[["yes"]])
lines(Fit ~ Girth, data = treesTall[["no"]])
#### Model Selection by step function ###########
trees_lm1.0 <- lm(Volume ~ (.), data = trees)
summary(trees_lm1.0)
# Choose a model by AIC in a Stepwise Algorithm
trees_lm1.1 <- step(trees_lm1.0)
summary(trees_lm1.1)
AIC(trees_lm1.1)
nrow(trees)*(1+log(2*pi*(summary(trees_lm1.1)$sigma)^2)) + (length(coefficients(trees_lm1.1))+2)
require(scatterplot3d) # for 3d scatter plot
# Another design of scatterplot3d
s3d <-with(trees,scatterplot3d(Girth, Height, Volume, pch=16,
highlight.3d=TRUE, type="h",
main="3D Scatter Plot with Vertical Lines and Regression Planes",
angle=135,scale.y=1, xlab="Girth",ylab="Height",zlab="Volume",
cex.lab=1.5, cex.axis=1.5, cex.main=1.1, cex.sub=1.5))
#s3d$plane3d(yield.lm)
# Investigate the "clouds" data, from package HSAUR2,
# collected in the summer of 1975 from an experiment
# to investigate the use of massive amounts of silver iodide
# in cloud seeding to increase rainfall.
install.packages("HSAUR2")
library(HSAUR2)
summary(clouds)
# Example
clouds_lm1 <- lm( rainfall ~ (.)^2, data = clouds)
summary(clouds_lm1)
# Find suitable model describing rainfall as a response variable
# Plot the relationship between rainfall and other variables + add estimated regression lines
# Check: the assumptions of OLS (by hypothesis testing and graphical diagnostic tools):
# constant variance - homoscedasticity, missing autocorrelation
# plot: residuals against each explanatory variable, against order of measurement,
# against fitted values
# normality of error terms, normal probability plot of the residuals.
# identify outliers, remove them and run the analysis again
clouds_lm1 <- step(clouds_lm1)
pairs(clouds)
clouds_lm2 <- lm( rainfall ~ time + prewetness + sne + cloudcover, data = clouds)
summary(clouds_lm2)
plot(rainfall ~ prewetness, data = clouds, col = "black", xaxs="i",yaxs="i",
main="Clouds data: Rainfall and Time dependence",xlab="Time", ylab="Rainfall")
summary(clouds)
clouds_new <- data.frame(time = (seq(0,85,1)), prewetness = seq(0,1.5,length=length(seq(0,85,1))), sne = seq(1,5,length=length(seq(0,85,1))), cloudcover = seq(2,40,length=length(seq(0,85,1))))
conf_new <- predict(clouds_lm2, newdata = clouds_new, interval = "confidence")
pred_new <- predict(clouds_lm2, newdata = clouds_new, interval = "prediction")
# height_lim = seq(60,90,length=length(seq(5,25,0.1)))
op <- par(mfrow=c(1,2),oma = c(0, 0, 2, 0))
plot(rainfall ~ time, data = clouds, xlim = c(0,40), ylim = c(0,15),pch=20, col = "black", xaxs="i",yaxs="i",
main="Clouds data: Time and Rainfall dependence",xlab="Time", ylab="Rainfall")
lines(seq(0,85,1), pred_new[,1], col='black')
lines(seq(0,85,1), pred_new[,2], col='red')
lines(seq(0,85,1), pred_new[,3], col='red')
lines(seq(0,85,1), conf_new[,2], col='blue')
lines(seq(0,85,1), conf_new[,3], col='blue')
legend("topleft",legend=c("observed","fit","Confidence int","Prediction int"),
pch=c(20,NA,NA,NA),lty = c(NA,1,1,1),col = c("black","black","blue","red"))
plot(Volume ~ Height, data = trees, xlim = c(60,90), ylim = c(5,100),pch=20, col = "black", xaxs="i",yaxs="i",
main="Trees data: Height and Volume dependence",xlab="Height", ylab="Volume")
lines(height_lim, pred_new[,1], col='black')
lines(height_lim, pred_new[,2], col='red')
lines(height_lim, pred_new[,3], col='red')
lines(height_lim, conf_new[,2], col='blue')
lines(height_lim, conf_new[,3], col='blue')
legend("topleft",legend=c("observed","fit","Confidence int","Prediction int"),
pch=c(20,NA,NA,NA),lty = c(NA,1,1,1),col = c("black","black","blue","red"))
par(op)
|
/tutorials/05/REAN2017_Ex05.R
|
no_license
|
salisaresama/REAN
|
R
| false
| false
| 12,195
|
r
|
##################################
##### 01REAN Cviceni 5 ##########
#################################
#
# Todays exercise
# Multivariable linear regression in R
library(lattice)
library(MASS)
library(car)
library(ggplot2)
setwd("~/Studies/REAN/tutorials/05")
# We use again trees data to present Multivariable linear regression
head(trees)
summary(trees)
trees$Forest <- rbinom(nrow(trees),3,0.5)+1
# Basic scatterplot
pairs(trees, main="Basic Scatterplot Matrix") # all variables in data tree
pairs(~(.), data=trees, main="Basic Scatterplot Matrix") # all variables in data tree
pairs(~Girth+Volume,data=trees, main="Basic Scatterplot Matrix") # Girth and Volume variables from data tree
# Scatterplot Matrices from lattice package
splom(trees)
splom(trees, groups=trees$Forest )
splom(trees[c("Girth","Height","Volume")], groups=trees$Forest )
# Scatterplot Matrices from the car package
scatterplotMatrix(~(.)|Forest, data=trees, main="Three Cylinder Options")
# tune colors and remove regression lines
library(RColorBrewer)
my_colors <- brewer.pal(nlevels(as.factor(trees$Forest)), "Set2")
scatterplotMatrix(~(.)|Forest, data=trees ,
reg.line="" , smoother="", col=my_colors , smoother.args=list(col="grey") ,
cex=1.5 , pch=c(15,16,17) , main="Scatter plot with Three Cylinder Options")
# xyplot
xyplot(Volume ~ Girth | Forest , data=trees , pch=20 , cex=2 , col="blue" )
### 4x Forest kvuli tomu, ze Forest neni factor (kategorcika promenna)
# Forest as factor
summary(trees)
trees$Forest <- as.factor(trees$Forest)
# Different
xyplot(Volume ~ Girth | Forest , data=trees , pch=20 , cex=2 , col="red" )
# How to devide plot into two parts and have only one title
#Divide the screen in 1 line and 2 columns
#Make the margin around each graph a bit smaller
op <- par(mfrow=c(1,2),oma = c(0, 0, 2, 0))
# oma is a vector of the form c(bottom, left, top, right)
# giving the size of the outer margins in lines of text.
#Classical histogram and density
hist(trees$Volume, main="" , breaks=10 , col="gray" , xlab="Volume" , ylab="Number of trees in each bin")
hist(trees$Volume, freq=F, breaks=10, main="", xlab="Volume",xlim=c(10, 80),ylim=c(0, 0.05))
lines(density(trees$Volume), col="red", lwd=2)
#Add only ONE title :
mtext("Histogram and Density plot of Volume", outer = TRUE, cex = 1.4, font=4, col=rgb(0.1,0.3,0.5,0.5) )
## At end of plotting, reset to previous settings:
par(op)
# How to split screen and make nice plots :)
# Divide the screen in 2 line and 1 column only
new_screen_step1 <- split.screen(c(2, 1))
# Add one graph on the screen number 1 which is on top :
screen(new_screen_step1[1])
plot( Volume~Girth , data=trees, pch=20 , xlab="Girth",ylab="Volume", cex=1 , col="black" )
# I divide the second screen in 2 columns :
new_screen_step1=split.screen(c(1, 2), screen = new_screen_step1[2])
screen(new_screen_step1[1])
hist(trees$Girth , breaks = 8, border=F , col="blue" , main="" , xlab="Distribution of Girth")
screen(new_screen_step1[2])
hist(trees$Volume, breaks = 8, border=F , col="red" , main="" , xlab="Distribution of Volume")
table(trees$Height)
trees$Height.f<-cut(trees$Height, seq(60,90,10), right=FALSE)
# cut creates cathegorical variable
is.factor(trees$Height.f)
# ggplot version of scatterplot, where Color and shape depend on factor variable
# multipolot with glot is different
# more possibilities exist, here is only one
require(gridExtra)
plot1 <- ggplot(trees, aes(x=Girth, y=Volume, color=Forest, shape=Forest)) +
geom_point(size=5, alpha=0.8) + theme_bw()
plot2 <- ggplot(trees, aes(x=Girth, y=Volume, color=Height.f, size=Height.f)) +
geom_point(alpha=0.6) + theme_bw()
grid.arrange(plot1, plot2, ncol=2)
# Reset plot settings
dev.new()
dev.off()
##############################
# Lets start with regression
#### Model selection by adding varibles #########
trees_lm1.0 <- lm(Volume ~ (.), data = trees) # use all what I have in the dataframe
trees_lm1.0 <- lm(Volume ~ (.)^2, data = trees) # use all what I have in the dataframe
# with second order interactions
# uff - lets start from the simplest model
# only Girth and Height as a independent variable
trees_lm1.0 <- lm(Volume ~ Girth + Height, data = trees) # use all what I have in the dataframe
# nearly all information is in summary
summary(trees_lm1.0)
# Constuct X,Y, beta_hat - if you want to analyze it by hand
n = nrow(trees)
p = length(coefficients(trees_lm1.0))
beta_hat = as.matrix(coefficients(trees_lm1.0))
X = as.matrix(cbind(rep(1,times = n), trees[,c(1,2)]))
Y = as.matrix(trees[,3])
residuals_lm1.0 = Y - X%*%beta_hat
((residuals_lm1.0 - residuals(trees_lm1.0))<0.00001)
# and so on
summary(trees)
trees_new <- data.frame(Girth = (seq(5,25,0.1)), Height = seq(60,90,length=length(seq(5,25,0.1))))
conf_new <- predict(trees_lm1.0, newdata = trees_new, interval = "confidence")
pred_new <- predict(trees_lm1.0, newdata = trees_new, interval = "prediction")
height_lim = seq(60,90,length=length(seq(5,25,0.1)))
op <- par(mfrow=c(1,2),oma = c(0, 0, 2, 0))
plot(Volume ~ Girth, data = trees, xlim = c(5,25), ylim = c(5,100),pch=20, col = "black", xaxs="i",yaxs="i",
main="Trees data: Girth and Volume dependence",xlab="Girth", ylab="Volume")
lines(seq(5,25,0.1), pred_new[,1], col='black')
lines(seq(5,25,0.1), pred_new[,2], col='red')
lines(seq(5,25,0.1), pred_new[,3], col='red')
lines(seq(5,25,0.1), conf_new[,2], col='blue')
lines(seq(5,25,0.1), conf_new[,3], col='blue')
legend("topleft",legend=c("observed","fit","Confidence int","Prediction int"),
pch=c(20,NA,NA,NA),lty = c(NA,1,1,1),col = c("black","black","blue","red"))
plot(Volume ~ Height, data = trees, xlim = c(60,90), ylim = c(5,100),pch=20, col = "black", xaxs="i",yaxs="i",
main="Trees data: Height and Volume dependence",xlab="Height", ylab="Volume")
lines(height_lim, pred_new[,1], col='black')
lines(height_lim, pred_new[,2], col='red')
lines(height_lim, pred_new[,3], col='red')
lines(height_lim, conf_new[,2], col='blue')
lines(height_lim, conf_new[,3], col='blue')
legend("topleft",legend=c("observed","fit","Confidence int","Prediction int"),
pch=c(20,NA,NA,NA),lty = c(NA,1,1,1),col = c("black","black","blue","red"))
par(op)
require(scatterplot3d) # for 3d scatter plot
# Another design of scatterplot3d
s3d <-with(trees,scatterplot3d(Girth, Height, Volume, pch=16,
highlight.3d=TRUE, type="h",
main="3D Scatter Plot with Vertical Lines and Regression Planes",
angle=135,scale.y=1, xlab="Girth",ylab="Height",zlab="Volume",
cex.lab=1.5, cex.axis=1.5, cex.main=1.1, cex.sub=1.5))
s3d$plane3d(trees_lm1.0)
# Comperable statistics of the model (See lecture 5)
summary(trees_lm1.0)$r.squared
summary(trees_lm1.0)$adj.r.squared
summary(trees_lm1.0)$fstatistic
# polynomila Regression without scaled varialbes
trees_lm2.0 <- lm(Volume ~ (Girth) + I((Girth)^2), data = trees)
summary(trees_lm2.0)
# polynomial Regression with scaled varialbes
trees_lm2.1 <- lm(Volume ~ scale(Girth) + I(scale(Girth)^2), data = trees)
summary(trees_lm2.1)
plot(Volume ~ scale(Girth), data = trees)
lines(fitted(trees_lm2.1) ~ scale(Girth), data = trees)
conf_new <- predict(trees_lm2.1, newdata = trees_new, interval = "confidence")
# polynomial Regression with scaled varialbe Girth and add Height
trees_lm2.2 <- lm(Volume ~ scale(Girth) + I(scale(Girth)^2) + Height, data = trees)
summary(trees_lm2.2)
# polynomial Regression with scaled varialbe Girth and add Height and interaction
trees_lm2.3 <- lm(Volume ~ Height*scale(Girth) + I(scale(Girth)^2) , data = trees)
summary(trees_lm2.3)
# :(
# Qualitative Explanatory Variables - Forest
summary(trees)
trees$Tall <- cut(trees$Height, breaks = c(-Inf, 76, Inf), labels = c("no", "yes"))
treesTall <- split(trees, trees$Tall)
trees_lm_Tall <- lm(Volume ~ Girth + Tall, data = trees)
summary(trees_lm_Tall)
treesTall[["yes"]]$Fit <- predict(trees_lm_Tall, treesTall[["yes"]])
treesTall[["no"]]$Fit <- predict(trees_lm_Tall, treesTall[["no"]])
plot(Volume ~ Girth, data = trees, type = "n")
points(Volume ~ Girth, data = treesTall[["yes"]], pch = 1)
points(Volume ~ Girth, data = treesTall[["no"]], pch = 2)
lines(Fit ~ Girth, data = treesTall[["yes"]])
lines(Fit ~ Girth, data = treesTall[["no"]])
#### Model Selection by step function ###########
trees_lm1.0 <- lm(Volume ~ (.), data = trees)
summary(trees_lm1.0)
# Choose a model by AIC in a Stepwise Algorithm
trees_lm1.1 <- step(trees_lm1.0)
summary(trees_lm1.1)
AIC(trees_lm1.1)
nrow(trees)*(1+log(2*pi*(summary(trees_lm1.1)$sigma)^2)) + (length(coefficients(trees_lm1.1))+2)
require(scatterplot3d) # for 3d scatter plot
# Another design of scatterplot3d
s3d <-with(trees,scatterplot3d(Girth, Height, Volume, pch=16,
highlight.3d=TRUE, type="h",
main="3D Scatter Plot with Vertical Lines and Regression Planes",
angle=135,scale.y=1, xlab="Girth",ylab="Height",zlab="Volume",
cex.lab=1.5, cex.axis=1.5, cex.main=1.1, cex.sub=1.5))
#s3d$plane3d(yield.lm)
# Investigate the "clouds" data, from package HSAUR2,
# collected in the summer of 1975 from an experiment
# to investigate the use of massive amounts of silver iodide
# in cloud seeding to increase rainfall.
install.packages("HSAUR2")
library(HSAUR2)
summary(clouds)
# Example
clouds_lm1 <- lm( rainfall ~ (.)^2, data = clouds)
summary(clouds_lm1)
# Find suitable model describing rainfall as a response variable
# Plot the relationship between rainfall and other variables + add estimated regression lines
# Check: the assumptions of OLS (by hypothesis testing and graphical diagnostic tools):
# constant variance - homoscedasticity, missing autocorrelation
# plot: residuals against each explanatory variable, against order of measurement,
# against fitted values
# normality of error terms, normal probability plot of the residuals.
# identify outliers, remove them and run the analysis again
clouds_lm1 <- step(clouds_lm1)
pairs(clouds)
clouds_lm2 <- lm( rainfall ~ time + prewetness + sne + cloudcover, data = clouds)
summary(clouds_lm2)
plot(rainfall ~ prewetness, data = clouds, col = "black", xaxs="i",yaxs="i",
main="Clouds data: Rainfall and Time dependence",xlab="Time", ylab="Rainfall")
summary(clouds)
clouds_new <- data.frame(time = (seq(0,85,1)), prewetness = seq(0,1.5,length=length(seq(0,85,1))), sne = seq(1,5,length=length(seq(0,85,1))), cloudcover = seq(2,40,length=length(seq(0,85,1))))
conf_new <- predict(clouds_lm2, newdata = clouds_new, interval = "confidence")
pred_new <- predict(clouds_lm2, newdata = clouds_new, interval = "prediction")
# height_lim = seq(60,90,length=length(seq(5,25,0.1)))
op <- par(mfrow=c(1,2),oma = c(0, 0, 2, 0))
plot(rainfall ~ time, data = clouds, xlim = c(0,40), ylim = c(0,15),pch=20, col = "black", xaxs="i",yaxs="i",
main="Clouds data: Time and Rainfall dependence",xlab="Time", ylab="Rainfall")
lines(seq(0,85,1), pred_new[,1], col='black')
lines(seq(0,85,1), pred_new[,2], col='red')
lines(seq(0,85,1), pred_new[,3], col='red')
lines(seq(0,85,1), conf_new[,2], col='blue')
lines(seq(0,85,1), conf_new[,3], col='blue')
legend("topleft",legend=c("observed","fit","Confidence int","Prediction int"),
pch=c(20,NA,NA,NA),lty = c(NA,1,1,1),col = c("black","black","blue","red"))
plot(Volume ~ Height, data = trees, xlim = c(60,90), ylim = c(5,100),pch=20, col = "black", xaxs="i",yaxs="i",
main="Trees data: Height and Volume dependence",xlab="Height", ylab="Volume")
lines(height_lim, pred_new[,1], col='black')
lines(height_lim, pred_new[,2], col='red')
lines(height_lim, pred_new[,3], col='red')
lines(height_lim, conf_new[,2], col='blue')
lines(height_lim, conf_new[,3], col='blue')
legend("topleft",legend=c("observed","fit","Confidence int","Prediction int"),
pch=c(20,NA,NA,NA),lty = c(NA,1,1,1),col = c("black","black","blue","red"))
par(op)
|
/Prueba variaciones poisson.R
|
no_license
|
gagliu/R-programing
|
R
| false
| false
| 1,118
|
r
| ||
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/common.R
\name{get_translations}
\alias{get_translations}
\title{Translate your text, choosing the engine}
\usage{
get_translations(x, lang_to, lang_from = "en", api_key,
parallelization_strategy = c("sequential", "multicore", "cluster"),
engine = c("google", "microsoft"))
}
\arguments{
\item{x}{A character vector containing text to translate.}
\item{lang_to}{A two letter language code describing the language to
translate to. See \code{MICROSOFT_LANGS} for available values.}
\item{lang_from}{A two letter language code describing the language to
translate from, defaulting to English. See \code{MICROSOFT_LANGS} for
available values.}
\item{api_key}{A string containing a subscription key to the Google Translate
or Microsfot Translator API (depending upon the \code{engine}).}
\item{parallelization_strategy}{A string naming a parallelization strategy,
passed to \code{\link[future]{plan}}.}
\item{engine}{A string naming the translation engine to use. Either "google"
or "microsoft".}
}
\value{
A character vector of translated strings.
}
\description{
Translate text, choosing whether you use Google Translate or Microsoft
Translator.
}
\examples{
\donttest{
# Not tested due to need for Microsoft Cognitive
# Services Translator API key and Google Translate API key
get_translations(
TRANSLATION_QUOTES,
"es",
api_key = Sys.getenv("GOOGLE_TRANSLATE_API_KEY"),
engine = "google"
)
get_translations(
TRANSLATION_QUOTES,
"es",
api_key = Sys.getenv("MICROSOFT_TRANSLATOR_API_KEY"),
engine = "microsoft"
)
}
}
\seealso{
\code{\link{get_google_translations}},
\code{\link{get_microsoft_translations}}
}
|
/man/get_translations.Rd
|
no_license
|
RL10N/autotranslate
|
R
| false
| true
| 1,709
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/common.R
\name{get_translations}
\alias{get_translations}
\title{Translate your text, choosing the engine}
\usage{
get_translations(x, lang_to, lang_from = "en", api_key,
parallelization_strategy = c("sequential", "multicore", "cluster"),
engine = c("google", "microsoft"))
}
\arguments{
\item{x}{A character vector containing text to translate.}
\item{lang_to}{A two letter language code describing the language to
translate to. See \code{MICROSOFT_LANGS} for available values.}
\item{lang_from}{A two letter language code describing the language to
translate from, defaulting to English. See \code{MICROSOFT_LANGS} for
available values.}
\item{api_key}{A string containing a subscription key to the Google Translate
or Microsfot Translator API (depending upon the \code{engine}).}
\item{parallelization_strategy}{A string naming a parallelization strategy,
passed to \code{\link[future]{plan}}.}
\item{engine}{A string naming the translation engine to use. Either "google"
or "microsoft".}
}
\value{
A character vector of translated strings.
}
\description{
Translate text, choosing whether you use Google Translate or Microsoft
Translator.
}
\examples{
\donttest{
# Not tested due to need for Microsoft Cognitive
# Services Translator API key and Google Translate API key
get_translations(
TRANSLATION_QUOTES,
"es",
api_key = Sys.getenv("GOOGLE_TRANSLATE_API_KEY"),
engine = "google"
)
get_translations(
TRANSLATION_QUOTES,
"es",
api_key = Sys.getenv("MICROSOFT_TRANSLATOR_API_KEY"),
engine = "microsoft"
)
}
}
\seealso{
\code{\link{get_google_translations}},
\code{\link{get_microsoft_translations}}
}
|
# packages
library(tidyverse)
library(patchwork)
library(lubridate)
library(scales)
# read data (https://github.com/CSSEGISandData/COVID-19)
raw <- read.csv("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv")
work <- raw %>%
pivot_longer(cols = starts_with("X"),
names_to = "date") %>%
mutate(date = substr(date,2,8)) %>%
mutate(date = mdy(date))
# define countries
countries <- c("Austria","Italy","Spain","Switzerland", "France", "Germany")
# filter countries
work <- work %>%
filter(Country.Region %in% countries,
(Province.State =="" | Province.State %in% countries),
value > 0) %>%
mutate(country = Country.Region,
type = "confirmed")
# days
data <- work %>%
arrange(country, date) %>%
group_by(country) %>%
mutate(day = row_number(),
infected = value)
# new infections
data <- data %>%
group_by(country) %>%
mutate(new_abs = infected - lag(infected),
new_pct = new_abs / lag(infected) * 100) %>%
ungroup()
# predict
predict_corona <- function(data, infection_rate, days) {
future <- seq(from = max(data$day) + 1, to = max(data$day)+days-1)
infected_all <- NULL
infected_act <- max(data$infected)
infected_predict <- NULL
for (i in seq_along(future)) {
infected_act <- infected_act * infection_rate
infected_all <- c(infected_all, infected_act)
}
data2 <- tibble(type = paste0("growth ", (infection_rate-1)*100,"%"),
day = future,
infected = infected_all)
data2
} # predict_corona
#############################################
# predict growth
#############################################
predict_days <- 50 #57
predict_data <- data %>%
filter(country == "Austria") %>%
select(type, day, infected)
data_50 <- predict_data %>%
predict_corona(infection_rate = 1.50, days = predict_days)
data_40 <- predict_data %>%
predict_corona(infection_rate = 1.40, days = predict_days)
data_33 <- predict_data %>%
predict_corona(infection_rate = 1.33, days = predict_days)
data_20 <- predict_data %>%
predict_corona(infection_rate = 1.20, days = predict_days)
data_15 <- predict_data %>%
predict_corona(infection_rate = 1.15, days = predict_days)
data_10 <- predict_data %>%
predict_corona(infection_rate = 1.10, days = predict_days)
# combine dataset
data_plot <- predict_data %>%
bind_rows(data_40, data_33, data_20, data_15, data_10)
# visualise
last_day <- nrow(predict_data)
p0 <- data_plot %>%
mutate(infected_M = infected / 1000000) %>%
ggplot(aes(day, infected_M, color = type)) +
geom_line(size = 1.5) +
geom_vline(xintercept = c(last_day, last_day + 28),
linetype = "dotted") +
ylim(0,5.5) +
xlab("Days since outbreak") +
ylab("Confirmed infections in Mio") +
#ggtitle("Covid-19 outbreak in Austria") +
theme_minimal()+
annotate("text", last_day/2, 5,
label = "until today", size = 2.5) +
annotate("text", last_day+14, 5,
label = "next 4 weeks", size = 2.5)
#############################################
# confirmed infections
#############################################
# data by country (>= 50 confirmed infections)
data_countries <- data %>%
filter(infected >= 50) %>%
arrange(country, day) %>%
group_by(country) %>%
mutate(day = row_number()) %>%
ungroup() %>%
mutate(infected_M = infected / 1000000)
# data for reference line 33% growth (Austria)
data_line_start <- tibble(
day = 1,
infected = 55
)
# predict 10%/33% growth (days since 50 cases)
data_line10 <- predict_corona(
data_line_start,
infection_rate = 1.10,
days = 24)
data_line33 <- predict_corona(
data_line_start,
infection_rate = 1.33,
days = 24)
highlight_country <- "Austria"
# infected
p1 <- ggplot(data = data_countries %>% filter(!country %in% highlight_country),
aes(day,infected, colour = country)) +
geom_line(alpha = 0.7, size = 1.1) +
geom_line(data = data_countries %>% filter(country %in% highlight_country),
aes(day,infected, colour = country),
alpha = 1, size = 1.5) +
geom_line(data = data_line10,
aes(day,infected), color = "grey", alpha = 0.7) +
geom_line(data = data_line33,
aes(day,infected), color = "grey", alpha = 0.7) +
scale_y_continuous(labels=function(x) format(x, big.mark = " ", scientific = FALSE)) +
labs(x = "Days since 50 cases",
y = "Confirmed infections") +
ggtitle("Covid-19 outbreak in Europe") +
theme_minimal()
# daily growth infected
p2 <- data_countries %>%
filter(country == highlight_country) %>%
ggplot(aes(day, new_pct)) +
geom_col(fill = "grey") +
geom_text(aes(day, new_pct,
label = paste0(format(new_pct, digits=1),"%")),
size = 2) +
geom_hline(yintercept = 33, linetype = "dotted") +
ylim(c(0,100)) +
xlab("Days since 50 cases") +
ylab("Daily growth in %") +
# ggtitle("Covid-19 outbreak in Austria") +
theme_minimal() +
annotate("text", 2.5, 33,
label = "33% growth",
size = 2,
vjust = "bottom"
)
# combine plots
p <- ((p1 / p2) | p0) + plot_annotation('Covid-19 outbreak in Austria',
caption = "source: https://github.com/CSSEGISandData/COVID-19")
# plot
p
# save plot
p1 %>% ggsave(filename = "covid-19-austria-europe.png",
device = "png",
width = 7, height = 4)
|
/covid19_austria_europe.R
|
no_license
|
manevska18/covid19_austria
|
R
| false
| false
| 5,542
|
r
|
# packages
library(tidyverse)
library(patchwork)
library(lubridate)
library(scales)
# read data (https://github.com/CSSEGISandData/COVID-19)
raw <- read.csv("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv")
work <- raw %>%
pivot_longer(cols = starts_with("X"),
names_to = "date") %>%
mutate(date = substr(date,2,8)) %>%
mutate(date = mdy(date))
# define countries
countries <- c("Austria","Italy","Spain","Switzerland", "France", "Germany")
# filter countries
work <- work %>%
filter(Country.Region %in% countries,
(Province.State =="" | Province.State %in% countries),
value > 0) %>%
mutate(country = Country.Region,
type = "confirmed")
# days
data <- work %>%
arrange(country, date) %>%
group_by(country) %>%
mutate(day = row_number(),
infected = value)
# new infections
data <- data %>%
group_by(country) %>%
mutate(new_abs = infected - lag(infected),
new_pct = new_abs / lag(infected) * 100) %>%
ungroup()
# predict
predict_corona <- function(data, infection_rate, days) {
future <- seq(from = max(data$day) + 1, to = max(data$day)+days-1)
infected_all <- NULL
infected_act <- max(data$infected)
infected_predict <- NULL
for (i in seq_along(future)) {
infected_act <- infected_act * infection_rate
infected_all <- c(infected_all, infected_act)
}
data2 <- tibble(type = paste0("growth ", (infection_rate-1)*100,"%"),
day = future,
infected = infected_all)
data2
} # predict_corona
#############################################
# predict growth
#############################################
predict_days <- 50 #57
predict_data <- data %>%
filter(country == "Austria") %>%
select(type, day, infected)
data_50 <- predict_data %>%
predict_corona(infection_rate = 1.50, days = predict_days)
data_40 <- predict_data %>%
predict_corona(infection_rate = 1.40, days = predict_days)
data_33 <- predict_data %>%
predict_corona(infection_rate = 1.33, days = predict_days)
data_20 <- predict_data %>%
predict_corona(infection_rate = 1.20, days = predict_days)
data_15 <- predict_data %>%
predict_corona(infection_rate = 1.15, days = predict_days)
data_10 <- predict_data %>%
predict_corona(infection_rate = 1.10, days = predict_days)
# combine dataset
data_plot <- predict_data %>%
bind_rows(data_40, data_33, data_20, data_15, data_10)
# visualise
last_day <- nrow(predict_data)
p0 <- data_plot %>%
mutate(infected_M = infected / 1000000) %>%
ggplot(aes(day, infected_M, color = type)) +
geom_line(size = 1.5) +
geom_vline(xintercept = c(last_day, last_day + 28),
linetype = "dotted") +
ylim(0,5.5) +
xlab("Days since outbreak") +
ylab("Confirmed infections in Mio") +
#ggtitle("Covid-19 outbreak in Austria") +
theme_minimal()+
annotate("text", last_day/2, 5,
label = "until today", size = 2.5) +
annotate("text", last_day+14, 5,
label = "next 4 weeks", size = 2.5)
#############################################
# confirmed infections
#############################################
# data by country (>= 50 confirmed infections)
data_countries <- data %>%
filter(infected >= 50) %>%
arrange(country, day) %>%
group_by(country) %>%
mutate(day = row_number()) %>%
ungroup() %>%
mutate(infected_M = infected / 1000000)
# data for reference line 33% growth (Austria)
data_line_start <- tibble(
day = 1,
infected = 55
)
# predict 10%/33% growth (days since 50 cases)
data_line10 <- predict_corona(
data_line_start,
infection_rate = 1.10,
days = 24)
data_line33 <- predict_corona(
data_line_start,
infection_rate = 1.33,
days = 24)
highlight_country <- "Austria"
# infected
p1 <- ggplot(data = data_countries %>% filter(!country %in% highlight_country),
aes(day,infected, colour = country)) +
geom_line(alpha = 0.7, size = 1.1) +
geom_line(data = data_countries %>% filter(country %in% highlight_country),
aes(day,infected, colour = country),
alpha = 1, size = 1.5) +
geom_line(data = data_line10,
aes(day,infected), color = "grey", alpha = 0.7) +
geom_line(data = data_line33,
aes(day,infected), color = "grey", alpha = 0.7) +
scale_y_continuous(labels=function(x) format(x, big.mark = " ", scientific = FALSE)) +
labs(x = "Days since 50 cases",
y = "Confirmed infections") +
ggtitle("Covid-19 outbreak in Europe") +
theme_minimal()
# daily growth infected
p2 <- data_countries %>%
filter(country == highlight_country) %>%
ggplot(aes(day, new_pct)) +
geom_col(fill = "grey") +
geom_text(aes(day, new_pct,
label = paste0(format(new_pct, digits=1),"%")),
size = 2) +
geom_hline(yintercept = 33, linetype = "dotted") +
ylim(c(0,100)) +
xlab("Days since 50 cases") +
ylab("Daily growth in %") +
# ggtitle("Covid-19 outbreak in Austria") +
theme_minimal() +
annotate("text", 2.5, 33,
label = "33% growth",
size = 2,
vjust = "bottom"
)
# combine plots
p <- ((p1 / p2) | p0) + plot_annotation('Covid-19 outbreak in Austria',
caption = "source: https://github.com/CSSEGISandData/COVID-19")
# plot
p
# save plot
p1 %>% ggsave(filename = "covid-19-austria-europe.png",
device = "png",
width = 7, height = 4)
|
# Subset World Bank data to complete cases
wcSumAll <- readRDS('./1_data/WorldCupSummaryAll.RDS')
# Read world cup data
WDI <- read.csv('./1_data/worldBank/WDI_csv/WDIData.csv')
WDI <- WDI[WDI$Country.Code %in% wcSumAll$codeWB,]
saveRDS(WDI, './1_data/wdi.RDS')
WGI <- read.csv('./1_data/worldBank/WGI_csv/WGIData.csv')
WGI <- WGI[WGI$Country.Code %in% wcSumAll$codeWB,]
saveRDS(WGI, './1_data/wgi.RDS')
GenderStats <- read.csv('./1_data/worldBank/Gender_Stats_csv/Gender_StatsData.csv')
GenderStats <- GenderStats[GenderStats$Country.Code %in% wcSumAll$codeWB,]
saveRDS(GenderStats, './1_data/GenderStats.RDS')
GenderStats <- readRDS('./1_data/GenderStats.RDS')
unique(GenderStats$Indicator.Name)
wgi <- readRDS('./1_data/wgi.RDS')
unique(wgi$Indicator.Name)
|
/4_Scripts/deprecated/SubsetWBData.R
|
no_license
|
MaximilianPi/ftbl
|
R
| false
| false
| 768
|
r
|
# Subset World Bank data to complete cases
wcSumAll <- readRDS('./1_data/WorldCupSummaryAll.RDS')
# Read world cup data
WDI <- read.csv('./1_data/worldBank/WDI_csv/WDIData.csv')
WDI <- WDI[WDI$Country.Code %in% wcSumAll$codeWB,]
saveRDS(WDI, './1_data/wdi.RDS')
WGI <- read.csv('./1_data/worldBank/WGI_csv/WGIData.csv')
WGI <- WGI[WGI$Country.Code %in% wcSumAll$codeWB,]
saveRDS(WGI, './1_data/wgi.RDS')
GenderStats <- read.csv('./1_data/worldBank/Gender_Stats_csv/Gender_StatsData.csv')
GenderStats <- GenderStats[GenderStats$Country.Code %in% wcSumAll$codeWB,]
saveRDS(GenderStats, './1_data/GenderStats.RDS')
GenderStats <- readRDS('./1_data/GenderStats.RDS')
unique(GenderStats$Indicator.Name)
wgi <- readRDS('./1_data/wgi.RDS')
unique(wgi$Indicator.Name)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/constants.R
\name{.h2o.__MODEL_BUILDERS}
\alias{.h2o.__MODEL_BUILDERS}
\title{Model Builder Endpoint Generator}
\usage{
.h2o.__MODEL_BUILDERS(algo)
}
\arguments{
\item{algo}{Cannonical identifier of H2O algorithm.}
}
\description{
Model Builder Endpoint Generator
}
|
/man/dot-h2o.__MODEL_BUILDERS.Rd
|
no_license
|
cran/h2o
|
R
| false
| true
| 344
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/constants.R
\name{.h2o.__MODEL_BUILDERS}
\alias{.h2o.__MODEL_BUILDERS}
\title{Model Builder Endpoint Generator}
\usage{
.h2o.__MODEL_BUILDERS(algo)
}
\arguments{
\item{algo}{Cannonical identifier of H2O algorithm.}
}
\description{
Model Builder Endpoint Generator
}
|
\name{plot.sota}
\alias{plot.sota}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Plot Function for a SOTA Object}
\description{
'plot.sota' is used to obtain a visual representation of profiles within each individual cluster.
Corresponding cluster average profiles are also available. By default, plots for all clusters
are displayed side by side.
}
\usage{
\method{plot}{sota}(x, cl = 0, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{SOTA object, an object returned by function \code{\link{sota}}.}
\item{cl}{\code{cl} specifies which cluster is to be plotted by setting it to the cluster ID. By default,
\code{cl} is equal to 0 and the function plots all clusters side by
side.}
\item{\dots}{Additional arguments to pass to \code{\link{plot}}.}
}
\references{Herrero, J., Valencia,
A, and Dopazo, J. (2005). A hierarchical unsupervised growing neural
network for clustering gene expression patterns. Bioinformatics, 17, 126-136.}
\author{Vasyl Pihur, Guy Brock, Susmita Datta, Somnath Datta}
\seealso{\code{\link{sota}}, \code{\link{print.sota}} }
\examples{
data(mouse)
express <- mouse[,c("M1","M2","M3","NC1","NC2","NC3")]
rownames(express) <- mouse$ID
sotaCl <- sota(as.matrix(express), 4)
names(sotaCl)
sotaCl
plot(sotaCl)
plot(sotaCl, cl=2)
}
\keyword{cluster}
\keyword{hplot}
|
/man/plot.sota.Rd
|
no_license
|
cran/clValid
|
R
| false
| false
| 1,419
|
rd
|
\name{plot.sota}
\alias{plot.sota}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Plot Function for a SOTA Object}
\description{
'plot.sota' is used to obtain a visual representation of profiles within each individual cluster.
Corresponding cluster average profiles are also available. By default, plots for all clusters
are displayed side by side.
}
\usage{
\method{plot}{sota}(x, cl = 0, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{SOTA object, an object returned by function \code{\link{sota}}.}
\item{cl}{\code{cl} specifies which cluster is to be plotted by setting it to the cluster ID. By default,
\code{cl} is equal to 0 and the function plots all clusters side by
side.}
\item{\dots}{Additional arguments to pass to \code{\link{plot}}.}
}
\references{Herrero, J., Valencia,
A, and Dopazo, J. (2005). A hierarchical unsupervised growing neural
network for clustering gene expression patterns. Bioinformatics, 17, 126-136.}
\author{Vasyl Pihur, Guy Brock, Susmita Datta, Somnath Datta}
\seealso{\code{\link{sota}}, \code{\link{print.sota}} }
\examples{
data(mouse)
express <- mouse[,c("M1","M2","M3","NC1","NC2","NC3")]
rownames(express) <- mouse$ID
sotaCl <- sota(as.matrix(express), 4)
names(sotaCl)
sotaCl
plot(sotaCl)
plot(sotaCl, cl=2)
}
\keyword{cluster}
\keyword{hplot}
|
the.genotypes <- read.delim("http://people.beocat.ksu.edu/~omo/Collaborations/Cowpea/Cowpea.GAPIT.GenoFormat.Lipka.txt", header=T)
the.genotypes[1:6,1:6]
the.genotypes$Snp <- paste("V", the.genotypes$Snp, sep="")
snp_info<-the.genotypes[c(1,3:4)]
colnames(snp_info)<-c("SNP","Chr","Pos")
head(snp_info)
geno <- the.genotypes[,-c(2:5)]
rownames(geno) <- as.vector(as.matrix(geno[,1]))
geno <- geno[,-1]
geno[1:6,1:6]
geno <- as.matrix(geno)
geno[which(is.na(geno))] <- 1
#geno[which(geno=="N")] <- 1
G <- t(geno-1)
library(rrBLUP)
# Read in phenotypic data
phdata <- read.csv("http://people.beocat.ksu.edu/~omo/Collaborations/Cowpea/cowpea.phenotypes.csv", header = T)
head(phdata)
colnames(phdata)[1] <- "Taxa"
dim(phdata)
phenames <- as.vector(colnames(phdata[,-1]))
geno.taxa <- data.frame(colnames(geno))
head(geno.taxa)
colnames(geno.taxa)[1] <- "Taxa"
com.tax <- merge(phdata, geno.taxa, by="Taxa")
head(com.tax)
com.tax.FT.SEnvs <- #com.tax[,c(1:5)]
com.tax.FT.BLUP <- #com.tax[,c(1,2)]
com.tax <- merge(com.tax.FT.SEnvs, com.tax.FT.BLUP, by="Taxa")
# match genotypes by common taxa
the.genotypes <- the.genotypes[, c(1:5, match(com.tax$Taxa, colnames(the.genotypes)))]
G[1:6,1:6]
G2 <- G[match(com.tax$Taxa, rownames(G)),]
Phenotypes.FT <- com.tax
# Impute missing data using rrBLUP function
impute=A.mat(G2,max.missing=0.5,impute.method="mean",return.imputed=T)
Markers_impute=impute$imputed
Markers_impute[1:6,1:6]
Markers_impute <- Markers_impute
Markers_impute2 <- Markers_impute+1
Markers_impute2[1:6,1:6]
FTFILD <- read.csv("FTFILD.MLMM.csv", header=T)
FTRILD <- read.csv("FTRILD.MLMM.csv", header=T)
FTFISD <- read.csv("FTFISD.MLMM.csv", header=T)
FTRISD <- read.csv("FTRISD.MLMM.csv", header=T)
FT_BLUP <- read.csv("FT_BLUP.MLMM.csv", header=T)
JL_RES <- rbind(FTFILD, FTRILD, FTFISD, FTRISD, FT_BLUP)
JL_RES$Trait <- as.character(JL_RES$Trait)
JL_RES$SNP <- as.character(JL_RES$SNP)
phenames <- names(com.tax[,-1])
for (l in 1:length(phenames))
#for(l in 1:3)
{
print(paste("-------------- Trait being analysed: ", phenames[l], "!!!!!!!!!!!---------------", sep = ""))
ExplVar200Best <- JL_RES[which(JL_RES$Trait==phenames[l]),]
bSNP<-Markers_impute2[,as.character(ExplVar200Best$SNP)]
phdata <- data.frame(Phenotypes.FT[,1], Phenotypes.FT[,phenames[l]])
colnames(phdata)[2] <- phenames[l]
colnames(phdata)[1] <- "Taxa"
#sP<-as.data.frame(phdata[,phenames[l]])
sP<-phdata
rownames(sP) <- sP$Taxa
da<-as.data.frame(cbind(sP, bSNP))
trait_QTL_Pheno <- da
write.table(t(data.frame(c("QTL", "Additive Effect", "PVE"))), paste("Cowpea.QTL.Effects_", phenames[l],"_QTL",".txt", sep=""), sep="\t", append=T, quote=F, row.names=F, col.names=F)
#APV is the Among population variance in accordance to Wurschum et al. 2011 Heredity
for(i in 3:ncol(trait_QTL_Pheno)){
snp <- colnames(trait_QTL_Pheno)[i]
print(paste("-------------- Trait being analysed: ", phenames[l], "SNP: ", snp, "!!!!!!!!!!!---------------", sep = ""))
trait_QTL_Pheno_2 <- trait_QTL_Pheno[,c(1,2,i)]
AA_class <- trait_QTL_Pheno[which(trait_QTL_Pheno[,i]==2),]
AA <- mean(AA_class[,2], na.rm=T)
BB_class <- trait_QTL_Pheno[which(trait_QTL_Pheno[,i]==0),]
BB <- mean(BB_class[,2], na.rm=T)
QTL_effect <- (AA-BB)/2
#formula.single <- as.formula(paste("Cd_comb ~ ",paste(as.character(topSNP$SNP), collapse=" + "), sep=" "))
trait_QTL_Pheno_2$QTL <- trait_QTL_Pheno_2[,3]
#QTL <- colnames(trait_QTL_Pheno_2[3])
fin.anova <- lm(trait_QTL_Pheno_2[,phenames[l]] ~ QTL, data=trait_QTL_Pheno_2, na.action = na.omit)
fin.sum <- summary(fin.anova)
QVar <- round((fin.sum$adj.r.squared)*100, digits=2)#Phenotypes.FT[,phenames[l]]
print(paste("-------------- PVE For SNP: ", snp, "; Trait: ", phenames[l], " == ", QVar, "% !!!!!!!!!!!---------------", sep = ""))
write.table(t(data.frame(c(colnames(trait_QTL_Pheno[i]), round(abs(QTL_effect[1]), 1), QVar[1]))), paste("Cowpea.QTL.Effects_", phenames[l],"_QTL",".txt", sep=""), sep="\t", append=T, quote=F, row.names=F, col.names=F)
}
}
FTFILD.PVE <- read.delim("/Users/omo/Google Drive/Post Doc/Collaborative Publications/Cowpea/Analysis/Results/Cowpea.QTL.Effects_FTFILD_QTL.txt", header=T)
FTFILD.PVE$Trait <- rep("FTFILD", nrow(FTFILD.PVE))
FTRILD.PVE <- read.delim("/Users/omo/Google Drive/Post Doc/Collaborative Publications/Cowpea/Analysis/Results/Cowpea.QTL.Effects_FTRILD_QTL.txt", header=T)
FTRILD.PVE$Trait <- rep("FTRILD", nrow(FTRILD.PVE))
FTFISD.PVE <- read.delim("/Users/omo/Google Drive/Post Doc/Collaborative Publications/Cowpea/Analysis/Results/Cowpea.QTL.Effects_FTFISD_QTL.txt", header=T)
FTFISD.PVE$Trait <- rep("FTFISD", nrow(FTFISD.PVE))
FTRISD.PVE <- read.delim("/Users/omo/Google Drive/Post Doc/Collaborative Publications/Cowpea/Analysis/Results/Cowpea.QTL.Effects_FTRISD_QTL.txt", header=T)
FTRISD.PVE$Trait <- rep("FTRISD", nrow(FTRISD.PVE))
FT_BLUP.PVE <- read.delim("/Users/omo/Google Drive/Post Doc/Collaborative Publications/Cowpea/Analysis/Results/Cowpea.QTL.Effects_FLT_BLUP_QTL.txt", header=T)
FT_BLUP.PVE$Trait <- rep("FT_BLUP", nrow(FT_BLUP.PVE))
FT.Eff <- rbind(FTFILD.PVE, FTRILD.PVE, FTFISD.PVE, FTRISD.PVE, FT_BLUP.PVE)
###################################################################################################################
# For Epistasis Markers
FTFILD <- read.csv("FTFILD.PATOWAS.csv", header=T)
FT_BLUP <- read.csv("FT_BLUP.PATOWAS.csv", header=T)
JL_RES <- rbind(FTFILD, FT_BLUP)
JL_RES$Trait <- as.character(JL_RES$Trait)
JL_RES$SNP <- as.character(JL_RES$SNP)
phenames <- names(com.tax[,c(2,6)])
for (l in 1:length(phenames))
#for(l in 1:3)
{
print(paste("-------------- Trait being analysed: ", phenames[l], "!!!!!!!!!!!---------------", sep = ""))
ExplVar200Best <- JL_RES[which(JL_RES$Trait==phenames[l]),]
bSNP<-Markers_impute2[,as.character(ExplVar200Best$SNP)]
phdata <- data.frame(Phenotypes.FT[,1], Phenotypes.FT[,phenames[l]])
colnames(phdata)[2] <- phenames[l]
colnames(phdata)[1] <- "Taxa"
#sP<-as.data.frame(phdata[,phenames[l]])
sP<-phdata
rownames(sP) <- sP$Taxa
da<-as.data.frame(cbind(sP, bSNP))
trait_QTL_Pheno <- da
write.table(t(data.frame(c("QTL", "Additive Effect", "PVE"))), paste("Cowpea.Epistasis.QTL.Effects_", phenames[l],"_QTL",".txt", sep=""), sep="\t", append=T, quote=F, row.names=F, col.names=F)
#APV is the Among population variance in accordance to Wurschum et al. 2011 Heredity
for(i in 3:ncol(trait_QTL_Pheno)){
snp <- colnames(trait_QTL_Pheno)[i]
print(paste("-------------- Trait being analysed: ", phenames[l], "SNP: ", snp, "!!!!!!!!!!!---------------", sep = ""))
trait_QTL_Pheno_2 <- trait_QTL_Pheno[,c(1,2,i)]
AA_class <- trait_QTL_Pheno[which(trait_QTL_Pheno[,i]==2),]
AA <- mean(AA_class[,2], na.rm=T)
BB_class <- trait_QTL_Pheno[which(trait_QTL_Pheno[,i]==0),]
BB <- mean(BB_class[,2], na.rm=T)
QTL_effect <- (AA-BB)/2
#formula.single <- as.formula(paste("Cd_comb ~ ",paste(as.character(topSNP$SNP), collapse=" + "), sep=" "))
trait_QTL_Pheno_2$QTL <- trait_QTL_Pheno_2[,3]
#QTL <- colnames(trait_QTL_Pheno_2[3])
fin.anova <- lm(trait_QTL_Pheno_2[,phenames[l]] ~ QTL, data=trait_QTL_Pheno_2, na.action = na.omit)
fin.sum <- summary(fin.anova)
QVar <- round((fin.sum$adj.r.squared)*100, digits=2)#Phenotypes.FT[,phenames[l]]
print(paste("-------------- PVE For SNP: ", snp, "; Trait: ", phenames[l], " == ", QVar, "% !!!!!!!!!!!---------------", sep = ""))
write.table(t(data.frame(c(colnames(trait_QTL_Pheno[i]), round(abs(QTL_effect[1]), 1), QVar[1]))), paste("Cowpea.Epistasis.QTL.Effects_", phenames[l],"_QTL",".txt", sep=""), sep="\t", append=T, quote=F, row.names=F, col.names=F)
}
}
FTFILD.PVE.epi <- read.delim("/Users/omo/Google Drive/Post Doc/Collaborative Publications/Cowpea/Analysis/Results/Cowpea.Epistasis.QTL.Effects_FTFILD_QTL.txt", header=T)
FTFILD.PVE.epi$Trait <- rep("FTFILD", nrow(FTFILD.PVE.epi))
FT_BLUP.PVE.epi <- read.delim("/Users/omo/Google Drive/Post Doc/Collaborative Publications/Cowpea/Analysis/Results/Cowpea.Epistasis.QTL.Effects_FLT_BLUP_QTL.txt", header=T)
FT_BLUP.PVE.epi$Trait <- rep("FT_BLUP", nrow(FT_BLUP.PVE.epi))
FT.QTL.Epi <- rbind(FTFILD.PVE.epi, FT_BLUP.PVE.epi)
############################### Minor Allele Frequency
Markers_impute2[1:6,1:6]
Markers_impute3 <- t(Markers_impute2)
Markers_impute3[1:6,1:6]
source("http://evachan.org/calc_snp_stats.R")
QTL.summary <- calc_snp_stats(Markers_impute3)
head(QTL.summary)
QTL.summary$QTL <- rownames(QTL.summary)
QTL.summary <- QTL.summary[,c(14,5,6)]
#hist(QTL.summary$maf)
summary(QTL.summary)
FT.info <- merge(FT.Eff, QTL.summary, by="QTL")
head(FT.info)
FT.info$maf <- round(FT.info$maf, 2)
head(FT.info)
map <- read.delim("/Users/omo/Google Drive/Post Doc/Collaborative Publications/Cowpea/Analysis/Data/cowpea.map.txt", header=T)
head(map)
map$Marker <- paste("V", map$Marker, sep="")
head(map)
map.q <- map
colnames(map.q)[1] <- "QTL"
FT.info2 <- merge(map.q, FT.info, by="QTL")
head(FT.info2)
write.table(FT.info2, "FT.PVE.ADE.MAF.MLMM.csv", sep=",", quote=F, row.names = F, col.names = T)
FT.Info.Epi <- merge(FT.QTL.Epi, QTL.summary, by="QTL")
FT.Info.Epi$maf <- round(FT.Info.Epi$maf, 2)
write.table(FT.Info.Epi, "FT.Epi.Summary.PVE.MAF.csv", sep=",", quote=F, row.names = F, col.names = T)
|
/Cowpea.QTL.PVE.R
|
no_license
|
skayondo/cowpea
|
R
| false
| false
| 9,437
|
r
|
the.genotypes <- read.delim("http://people.beocat.ksu.edu/~omo/Collaborations/Cowpea/Cowpea.GAPIT.GenoFormat.Lipka.txt", header=T)
the.genotypes[1:6,1:6]
the.genotypes$Snp <- paste("V", the.genotypes$Snp, sep="")
snp_info<-the.genotypes[c(1,3:4)]
colnames(snp_info)<-c("SNP","Chr","Pos")
head(snp_info)
geno <- the.genotypes[,-c(2:5)]
rownames(geno) <- as.vector(as.matrix(geno[,1]))
geno <- geno[,-1]
geno[1:6,1:6]
geno <- as.matrix(geno)
geno[which(is.na(geno))] <- 1
#geno[which(geno=="N")] <- 1
G <- t(geno-1)
library(rrBLUP)
# Read in phenotypic data
phdata <- read.csv("http://people.beocat.ksu.edu/~omo/Collaborations/Cowpea/cowpea.phenotypes.csv", header = T)
head(phdata)
colnames(phdata)[1] <- "Taxa"
dim(phdata)
phenames <- as.vector(colnames(phdata[,-1]))
geno.taxa <- data.frame(colnames(geno))
head(geno.taxa)
colnames(geno.taxa)[1] <- "Taxa"
com.tax <- merge(phdata, geno.taxa, by="Taxa")
head(com.tax)
com.tax.FT.SEnvs <- #com.tax[,c(1:5)]
com.tax.FT.BLUP <- #com.tax[,c(1,2)]
com.tax <- merge(com.tax.FT.SEnvs, com.tax.FT.BLUP, by="Taxa")
# match genotypes by common taxa
the.genotypes <- the.genotypes[, c(1:5, match(com.tax$Taxa, colnames(the.genotypes)))]
G[1:6,1:6]
G2 <- G[match(com.tax$Taxa, rownames(G)),]
Phenotypes.FT <- com.tax
# Impute missing data using rrBLUP function
impute=A.mat(G2,max.missing=0.5,impute.method="mean",return.imputed=T)
Markers_impute=impute$imputed
Markers_impute[1:6,1:6]
Markers_impute <- Markers_impute
Markers_impute2 <- Markers_impute+1
Markers_impute2[1:6,1:6]
FTFILD <- read.csv("FTFILD.MLMM.csv", header=T)
FTRILD <- read.csv("FTRILD.MLMM.csv", header=T)
FTFISD <- read.csv("FTFISD.MLMM.csv", header=T)
FTRISD <- read.csv("FTRISD.MLMM.csv", header=T)
FT_BLUP <- read.csv("FT_BLUP.MLMM.csv", header=T)
JL_RES <- rbind(FTFILD, FTRILD, FTFISD, FTRISD, FT_BLUP)
JL_RES$Trait <- as.character(JL_RES$Trait)
JL_RES$SNP <- as.character(JL_RES$SNP)
phenames <- names(com.tax[,-1])
for (l in 1:length(phenames))
#for(l in 1:3)
{
print(paste("-------------- Trait being analysed: ", phenames[l], "!!!!!!!!!!!---------------", sep = ""))
ExplVar200Best <- JL_RES[which(JL_RES$Trait==phenames[l]),]
bSNP<-Markers_impute2[,as.character(ExplVar200Best$SNP)]
phdata <- data.frame(Phenotypes.FT[,1], Phenotypes.FT[,phenames[l]])
colnames(phdata)[2] <- phenames[l]
colnames(phdata)[1] <- "Taxa"
#sP<-as.data.frame(phdata[,phenames[l]])
sP<-phdata
rownames(sP) <- sP$Taxa
da<-as.data.frame(cbind(sP, bSNP))
trait_QTL_Pheno <- da
write.table(t(data.frame(c("QTL", "Additive Effect", "PVE"))), paste("Cowpea.QTL.Effects_", phenames[l],"_QTL",".txt", sep=""), sep="\t", append=T, quote=F, row.names=F, col.names=F)
#APV is the Among population variance in accordance to Wurschum et al. 2011 Heredity
for(i in 3:ncol(trait_QTL_Pheno)){
snp <- colnames(trait_QTL_Pheno)[i]
print(paste("-------------- Trait being analysed: ", phenames[l], "SNP: ", snp, "!!!!!!!!!!!---------------", sep = ""))
trait_QTL_Pheno_2 <- trait_QTL_Pheno[,c(1,2,i)]
AA_class <- trait_QTL_Pheno[which(trait_QTL_Pheno[,i]==2),]
AA <- mean(AA_class[,2], na.rm=T)
BB_class <- trait_QTL_Pheno[which(trait_QTL_Pheno[,i]==0),]
BB <- mean(BB_class[,2], na.rm=T)
QTL_effect <- (AA-BB)/2
#formula.single <- as.formula(paste("Cd_comb ~ ",paste(as.character(topSNP$SNP), collapse=" + "), sep=" "))
trait_QTL_Pheno_2$QTL <- trait_QTL_Pheno_2[,3]
#QTL <- colnames(trait_QTL_Pheno_2[3])
fin.anova <- lm(trait_QTL_Pheno_2[,phenames[l]] ~ QTL, data=trait_QTL_Pheno_2, na.action = na.omit)
fin.sum <- summary(fin.anova)
QVar <- round((fin.sum$adj.r.squared)*100, digits=2)#Phenotypes.FT[,phenames[l]]
print(paste("-------------- PVE For SNP: ", snp, "; Trait: ", phenames[l], " == ", QVar, "% !!!!!!!!!!!---------------", sep = ""))
write.table(t(data.frame(c(colnames(trait_QTL_Pheno[i]), round(abs(QTL_effect[1]), 1), QVar[1]))), paste("Cowpea.QTL.Effects_", phenames[l],"_QTL",".txt", sep=""), sep="\t", append=T, quote=F, row.names=F, col.names=F)
}
}
FTFILD.PVE <- read.delim("/Users/omo/Google Drive/Post Doc/Collaborative Publications/Cowpea/Analysis/Results/Cowpea.QTL.Effects_FTFILD_QTL.txt", header=T)
FTFILD.PVE$Trait <- rep("FTFILD", nrow(FTFILD.PVE))
FTRILD.PVE <- read.delim("/Users/omo/Google Drive/Post Doc/Collaborative Publications/Cowpea/Analysis/Results/Cowpea.QTL.Effects_FTRILD_QTL.txt", header=T)
FTRILD.PVE$Trait <- rep("FTRILD", nrow(FTRILD.PVE))
FTFISD.PVE <- read.delim("/Users/omo/Google Drive/Post Doc/Collaborative Publications/Cowpea/Analysis/Results/Cowpea.QTL.Effects_FTFISD_QTL.txt", header=T)
FTFISD.PVE$Trait <- rep("FTFISD", nrow(FTFISD.PVE))
FTRISD.PVE <- read.delim("/Users/omo/Google Drive/Post Doc/Collaborative Publications/Cowpea/Analysis/Results/Cowpea.QTL.Effects_FTRISD_QTL.txt", header=T)
FTRISD.PVE$Trait <- rep("FTRISD", nrow(FTRISD.PVE))
FT_BLUP.PVE <- read.delim("/Users/omo/Google Drive/Post Doc/Collaborative Publications/Cowpea/Analysis/Results/Cowpea.QTL.Effects_FLT_BLUP_QTL.txt", header=T)
FT_BLUP.PVE$Trait <- rep("FT_BLUP", nrow(FT_BLUP.PVE))
FT.Eff <- rbind(FTFILD.PVE, FTRILD.PVE, FTFISD.PVE, FTRISD.PVE, FT_BLUP.PVE)
###################################################################################################################
# For Epistasis Markers
FTFILD <- read.csv("FTFILD.PATOWAS.csv", header=T)
FT_BLUP <- read.csv("FT_BLUP.PATOWAS.csv", header=T)
JL_RES <- rbind(FTFILD, FT_BLUP)
JL_RES$Trait <- as.character(JL_RES$Trait)
JL_RES$SNP <- as.character(JL_RES$SNP)
phenames <- names(com.tax[,c(2,6)])
for (l in 1:length(phenames))
#for(l in 1:3)
{
print(paste("-------------- Trait being analysed: ", phenames[l], "!!!!!!!!!!!---------------", sep = ""))
ExplVar200Best <- JL_RES[which(JL_RES$Trait==phenames[l]),]
bSNP<-Markers_impute2[,as.character(ExplVar200Best$SNP)]
phdata <- data.frame(Phenotypes.FT[,1], Phenotypes.FT[,phenames[l]])
colnames(phdata)[2] <- phenames[l]
colnames(phdata)[1] <- "Taxa"
#sP<-as.data.frame(phdata[,phenames[l]])
sP<-phdata
rownames(sP) <- sP$Taxa
da<-as.data.frame(cbind(sP, bSNP))
trait_QTL_Pheno <- da
write.table(t(data.frame(c("QTL", "Additive Effect", "PVE"))), paste("Cowpea.Epistasis.QTL.Effects_", phenames[l],"_QTL",".txt", sep=""), sep="\t", append=T, quote=F, row.names=F, col.names=F)
#APV is the Among population variance in accordance to Wurschum et al. 2011 Heredity
for(i in 3:ncol(trait_QTL_Pheno)){
snp <- colnames(trait_QTL_Pheno)[i]
print(paste("-------------- Trait being analysed: ", phenames[l], "SNP: ", snp, "!!!!!!!!!!!---------------", sep = ""))
trait_QTL_Pheno_2 <- trait_QTL_Pheno[,c(1,2,i)]
AA_class <- trait_QTL_Pheno[which(trait_QTL_Pheno[,i]==2),]
AA <- mean(AA_class[,2], na.rm=T)
BB_class <- trait_QTL_Pheno[which(trait_QTL_Pheno[,i]==0),]
BB <- mean(BB_class[,2], na.rm=T)
QTL_effect <- (AA-BB)/2
#formula.single <- as.formula(paste("Cd_comb ~ ",paste(as.character(topSNP$SNP), collapse=" + "), sep=" "))
trait_QTL_Pheno_2$QTL <- trait_QTL_Pheno_2[,3]
#QTL <- colnames(trait_QTL_Pheno_2[3])
fin.anova <- lm(trait_QTL_Pheno_2[,phenames[l]] ~ QTL, data=trait_QTL_Pheno_2, na.action = na.omit)
fin.sum <- summary(fin.anova)
QVar <- round((fin.sum$adj.r.squared)*100, digits=2)#Phenotypes.FT[,phenames[l]]
print(paste("-------------- PVE For SNP: ", snp, "; Trait: ", phenames[l], " == ", QVar, "% !!!!!!!!!!!---------------", sep = ""))
write.table(t(data.frame(c(colnames(trait_QTL_Pheno[i]), round(abs(QTL_effect[1]), 1), QVar[1]))), paste("Cowpea.Epistasis.QTL.Effects_", phenames[l],"_QTL",".txt", sep=""), sep="\t", append=T, quote=F, row.names=F, col.names=F)
}
}
FTFILD.PVE.epi <- read.delim("/Users/omo/Google Drive/Post Doc/Collaborative Publications/Cowpea/Analysis/Results/Cowpea.Epistasis.QTL.Effects_FTFILD_QTL.txt", header=T)
FTFILD.PVE.epi$Trait <- rep("FTFILD", nrow(FTFILD.PVE.epi))
FT_BLUP.PVE.epi <- read.delim("/Users/omo/Google Drive/Post Doc/Collaborative Publications/Cowpea/Analysis/Results/Cowpea.Epistasis.QTL.Effects_FLT_BLUP_QTL.txt", header=T)
FT_BLUP.PVE.epi$Trait <- rep("FT_BLUP", nrow(FT_BLUP.PVE.epi))
FT.QTL.Epi <- rbind(FTFILD.PVE.epi, FT_BLUP.PVE.epi)
############################### Minor Allele Frequency
Markers_impute2[1:6,1:6]
Markers_impute3 <- t(Markers_impute2)
Markers_impute3[1:6,1:6]
source("http://evachan.org/calc_snp_stats.R")
QTL.summary <- calc_snp_stats(Markers_impute3)
head(QTL.summary)
QTL.summary$QTL <- rownames(QTL.summary)
QTL.summary <- QTL.summary[,c(14,5,6)]
#hist(QTL.summary$maf)
summary(QTL.summary)
FT.info <- merge(FT.Eff, QTL.summary, by="QTL")
head(FT.info)
FT.info$maf <- round(FT.info$maf, 2)
head(FT.info)
map <- read.delim("/Users/omo/Google Drive/Post Doc/Collaborative Publications/Cowpea/Analysis/Data/cowpea.map.txt", header=T)
head(map)
map$Marker <- paste("V", map$Marker, sep="")
head(map)
map.q <- map
colnames(map.q)[1] <- "QTL"
FT.info2 <- merge(map.q, FT.info, by="QTL")
head(FT.info2)
write.table(FT.info2, "FT.PVE.ADE.MAF.MLMM.csv", sep=",", quote=F, row.names = F, col.names = T)
FT.Info.Epi <- merge(FT.QTL.Epi, QTL.summary, by="QTL")
FT.Info.Epi$maf <- round(FT.Info.Epi$maf, 2)
write.table(FT.Info.Epi, "FT.Epi.Summary.PVE.MAF.csv", sep=",", quote=F, row.names = F, col.names = T)
|
#' Hard clustering using k-means
#'
#' This function expects output from custom minimap test dataset that contains original locations of mapped reads in the genome.
#'
#' @param counts.l A \code{list} of directional read counts per PB read per library.
#' @inheritParams SaaRclust
#' @return A \code{list} of estimated theta values for every cluster and cell.
#' @author David Porubsky
#' @export
hardClust <- function(counts.l=NULL, num.clusters=NULL, nstart=10, iter.max=10) {
message("Hard clustering")
ptm <- startTimedMessage(" Kmeans clustering for ",num.clusters," clusters")
ratios.l <- list()
for (j in 1:length(counts.l)) {
#lib.name <- names(tab.l[j])
#message("\tWorking on ",lib.name)
counts <- counts.l[[j]]
ratios <- (counts[,2]-counts[,1])/(counts[,2]+counts[,1]) #calculate ratio of WW reads
ratios[is.nan(ratios)] <- 0
ratios.l[[j]] <- ratios
}
ratios.m <- do.call(cbind, ratios.l)
ratios.m[ratios.m<0] <- -1
ratios.m[ratios.m>0] <- 1
#hard clustering using kmeans
km <- suppressWarnings( kmeans(ratios.m, centers = num.clusters, nstart = nstart, iter.max = iter.max) )
ord <- km$cluster
#ratios.m.ord <- ratios.m[order(ord),]
stopTimedMessage(ptm)
return(ord)
}
#' Estimate theta values based on hard clustering
#'
#' This function takes results of hard clustering and estimates majority cell types for each Strand-seq library
#'
#' @param counts.l A \code{list} of directional read counts per PB read per library.
#' @param hard.clust A \code{integer} of cluster assignments for each PacBio read.
#' @inheritParams SaaRclust
#' @return A \code{list} of estimated theta values for every cluster and cell.
#' @author David Porubsky
#' @export
estimateTheta <- function(counts.l=NULL, hard.clust=NULL, alpha=0.1) {
ptm <- startTimedMessage("Estimate theta values")
theta.estim <- list()
for (j in 1:length(counts.l)) {
minus.c <- split(counts.l[[j]][,1], hard.clust)
plus.c <- split(counts.l[[j]][,2], hard.clust)
#minus.counts <- sapply(minus.c, sum)
#plus.counts <- sapply(plus.c, sum)
#probs <- countProb2(minusCounts = minus.counts, plusCounts = plus.counts)
clust.prob <- mapply(function(X,Y) { countProb(X,Y) }, X=minus.c, Y=plus.c)
clust.prob.norm <- lapply(clust.prob, function(x) colSums(log(x)))
estimates <- sapply(clust.prob.norm, which.max)
#Assign cell type probs based on the majority type in each cluster
probs <- list()
for (i in 1:length(clust.prob)) {
estim <- estimates[i]
if (estim == 1) {
theta <- c(1-alpha, alpha/2, alpha/2)
} else if (estim == 2) {
theta <- c(alpha/2, 1-alpha, alpha/2)
} else {
theta <- c(alpha/2, alpha/2, 1-alpha)
}
probs[[i]] <- theta
}
probs <- do.call(rbind, probs)
theta.estim[[j]] <- probs
}
stopTimedMessage(ptm)
return(theta.estim)
}
#' Hierarchical clustering for merging the kmeans clusters.
#'
#' This function takes as input the kmeans hard clustering output and the initialized thetas and merges the kmeans clusters based on thetas
#'
#' @param theta.l A \code{list} of estimated theta values for each cluster and cell.
#' @param hard.clust The kmeans hard clustering.
#' @param k Desired number of clusters after merging.
#' @inheritParams SaaRclust
#' @return A new hard clustering with the correct number of clusters
#' @author Maryam Ghareghani
#' @export
mergeClusters <- function(hard.clust, theta.l, k=46)
{
ptm <- startTimedMessage("Merging clusters")
theta.all <- do.call(cbind, theta.l)
hc <- hclust(dist(theta.all))
hc.clust <- cutree(hc, k=k)
stopTimedMessage(ptm)
return(sapply(hard.clust, function(i) hc.clust[i]))
}
|
/R/hardClust.R
|
no_license
|
maryam-ghr/SaaRclust
|
R
| false
| false
| 3,779
|
r
|
#' Hard clustering using k-means
#'
#' This function expects output from custom minimap test dataset that contains original locations of mapped reads in the genome.
#'
#' @param counts.l A \code{list} of directional read counts per PB read per library.
#' @inheritParams SaaRclust
#' @return A \code{list} of estimated theta values for every cluster and cell.
#' @author David Porubsky
#' @export
hardClust <- function(counts.l=NULL, num.clusters=NULL, nstart=10, iter.max=10) {
message("Hard clustering")
ptm <- startTimedMessage(" Kmeans clustering for ",num.clusters," clusters")
ratios.l <- list()
for (j in 1:length(counts.l)) {
#lib.name <- names(tab.l[j])
#message("\tWorking on ",lib.name)
counts <- counts.l[[j]]
ratios <- (counts[,2]-counts[,1])/(counts[,2]+counts[,1]) #calculate ratio of WW reads
ratios[is.nan(ratios)] <- 0
ratios.l[[j]] <- ratios
}
ratios.m <- do.call(cbind, ratios.l)
ratios.m[ratios.m<0] <- -1
ratios.m[ratios.m>0] <- 1
#hard clustering using kmeans
km <- suppressWarnings( kmeans(ratios.m, centers = num.clusters, nstart = nstart, iter.max = iter.max) )
ord <- km$cluster
#ratios.m.ord <- ratios.m[order(ord),]
stopTimedMessage(ptm)
return(ord)
}
#' Estimate theta values based on hard clustering
#'
#' This function takes results of hard clustering and estimates majority cell types for each Strand-seq library
#'
#' @param counts.l A \code{list} of directional read counts per PB read per library.
#' @param hard.clust A \code{integer} of cluster assignments for each PacBio read.
#' @inheritParams SaaRclust
#' @return A \code{list} of estimated theta values for every cluster and cell.
#' @author David Porubsky
#' @export
estimateTheta <- function(counts.l=NULL, hard.clust=NULL, alpha=0.1) {
ptm <- startTimedMessage("Estimate theta values")
theta.estim <- list()
for (j in 1:length(counts.l)) {
minus.c <- split(counts.l[[j]][,1], hard.clust)
plus.c <- split(counts.l[[j]][,2], hard.clust)
#minus.counts <- sapply(minus.c, sum)
#plus.counts <- sapply(plus.c, sum)
#probs <- countProb2(minusCounts = minus.counts, plusCounts = plus.counts)
clust.prob <- mapply(function(X,Y) { countProb(X,Y) }, X=minus.c, Y=plus.c)
clust.prob.norm <- lapply(clust.prob, function(x) colSums(log(x)))
estimates <- sapply(clust.prob.norm, which.max)
#Assign cell type probs based on the majority type in each cluster
probs <- list()
for (i in 1:length(clust.prob)) {
estim <- estimates[i]
if (estim == 1) {
theta <- c(1-alpha, alpha/2, alpha/2)
} else if (estim == 2) {
theta <- c(alpha/2, 1-alpha, alpha/2)
} else {
theta <- c(alpha/2, alpha/2, 1-alpha)
}
probs[[i]] <- theta
}
probs <- do.call(rbind, probs)
theta.estim[[j]] <- probs
}
stopTimedMessage(ptm)
return(theta.estim)
}
#' Hierarchical clustering for merging the kmeans clusters.
#'
#' This function takes as input the kmeans hard clustering output and the initialized thetas and merges the kmeans clusters based on thetas
#'
#' @param theta.l A \code{list} of estimated theta values for each cluster and cell.
#' @param hard.clust The kmeans hard clustering.
#' @param k Desired number of clusters after merging.
#' @inheritParams SaaRclust
#' @return A new hard clustering with the correct number of clusters
#' @author Maryam Ghareghani
#' @export
mergeClusters <- function(hard.clust, theta.l, k=46)
{
ptm <- startTimedMessage("Merging clusters")
theta.all <- do.call(cbind, theta.l)
hc <- hclust(dist(theta.all))
hc.clust <- cutree(hc, k=k)
stopTimedMessage(ptm)
return(sapply(hard.clust, function(i) hc.clust[i]))
}
|
setwd("~/git/Iterative_ML/R/gibbs_stan_bridge_lm")
rm(list = ls())
#Parameters
N = 100
k = 50
beta = runif(k, -10, 10)
sd = 25
#Data creation
X = matrix(c(rep(1, N), runif((length(beta) - 1) * N, -10, 10)), ncol = length(beta))
y = X %*% beta + rnorm(N, sd = sd)
#Save:
save(X, y, file = "lmData.RData")
|
/R/gibbs_stan_bridge_lm/create_data.R
|
no_license
|
tkmckenzie/Iterative_ML
|
R
| false
| false
| 308
|
r
|
setwd("~/git/Iterative_ML/R/gibbs_stan_bridge_lm")
rm(list = ls())
#Parameters
N = 100
k = 50
beta = runif(k, -10, 10)
sd = 25
#Data creation
X = matrix(c(rep(1, N), runif((length(beta) - 1) * N, -10, 10)), ncol = length(beta))
y = X %*% beta + rnorm(N, sd = sd)
#Save:
save(X, y, file = "lmData.RData")
|
# rpart {rpart}
SL.rpart <- function(Y.temp, X.temp, newX.temp, family, obsWeights, cp =0.01, minsplit = 20, xval=10, maxdepth=30,...) {
tryCatch(require(rpart), warning = function(...){ stop("you have selected rpart as a library algorithm but do not have the rpart package installed")})
if(family$family=="gaussian"){
fit.rpart <- rpart(Y.temp~., data=data.frame(Y.temp, X.temp), control = rpart.control(cp=cp, minsplit=minsplit, xval=xval, maxdepth=maxdepth), method="anova", weights = obsWeights)
out <- predict(fit.rpart, newdata=newX.temp)
}
if(family$family=="binomial"){
fit.rpart <- rpart(Y.temp~., data=data.frame(Y.temp, X.temp), control = rpart.control(cp=cp, minsplit=minsplit, xval=xval, maxdepth=maxdepth), method="class", weights = obsWeights)
out <- predict(fit.rpart, newdata=newX.temp)[, 2]
}
fit <- list(object=fit.rpart)
foo <- list(out=out, fit=fit)
class(foo$fit) <- c("SL.rpart")
return(foo)
}
#
predict.SL.rpart <- function(object, newdata, family, X=NULL, Y=NULL,...) {
tryCatch(require(rpart), warning = function(...) { stop("you have selected rpart as a library algorithm but do not have the rpart package installed")})
out <- predict(object, newdata=newdata)
return(out)
}
|
/R/SL.rpart.R
|
no_license
|
tedwestling/SuperLearner_Old
|
R
| false
| false
| 1,224
|
r
|
# rpart {rpart}
SL.rpart <- function(Y.temp, X.temp, newX.temp, family, obsWeights, cp =0.01, minsplit = 20, xval=10, maxdepth=30,...) {
tryCatch(require(rpart), warning = function(...){ stop("you have selected rpart as a library algorithm but do not have the rpart package installed")})
if(family$family=="gaussian"){
fit.rpart <- rpart(Y.temp~., data=data.frame(Y.temp, X.temp), control = rpart.control(cp=cp, minsplit=minsplit, xval=xval, maxdepth=maxdepth), method="anova", weights = obsWeights)
out <- predict(fit.rpart, newdata=newX.temp)
}
if(family$family=="binomial"){
fit.rpart <- rpart(Y.temp~., data=data.frame(Y.temp, X.temp), control = rpart.control(cp=cp, minsplit=minsplit, xval=xval, maxdepth=maxdepth), method="class", weights = obsWeights)
out <- predict(fit.rpart, newdata=newX.temp)[, 2]
}
fit <- list(object=fit.rpart)
foo <- list(out=out, fit=fit)
class(foo$fit) <- c("SL.rpart")
return(foo)
}
#
predict.SL.rpart <- function(object, newdata, family, X=NULL, Y=NULL,...) {
tryCatch(require(rpart), warning = function(...) { stop("you have selected rpart as a library algorithm but do not have the rpart package installed")})
out <- predict(object, newdata=newdata)
return(out)
}
|
structure(list(record_id = c(1, 2, 3, 4, 5), name_first = c("Nutmeg",
"Tumtum", "Marcus", "Trudy", "John Lee"), name_last = c("Nutmouse",
"Nutmouse", "Wood", "DAG", "Walker"), address = c("14 Rose Cottage St.\nKenning UK, 323232",
"14 Rose Cottage Blvd.\nKenning UK 34243", "243 Hill St.\nGuthrie OK 73402",
"342 Elm\nDuncanville TX, 75116", "Hotel Suite\nNew Orleans LA, 70115"
), telephone = c("(405) 321-1111", "(405) 321-2222", "(405) 321-3333",
"(405) 321-4444", "(405) 321-5555"), email = c("nutty@mouse.com",
"tummy@mouse.comm", "mw@mwood.net", "peroxide@blonde.com", "left@hippocket.com"
), dob = structure(c(12294, 12121, -13051, -6269, -5375), class = "Date"),
age = c(11, 11, 80, 61, 59), sex = c("Female", "Male", "Male",
"Female", "Male"), demographics_complete = c("Complete",
"Complete", "Complete", "Complete", "Complete"), height = c(7,
6, 180, 165, 193.04), weight = c(1, 1, 80, 54, 104), bmi = c(204.1,
277.8, 24.7, 19.8, 27.9), comments = c("Character in a book, with some guessing",
"A mouse character from a good book", "completely made up",
"This record doesn't have a DAG assigned\n\nSo call up Trudy on the telephone\nSend her a letter in the mail",
"Had a hand for trouble and a eye for cash\n\nHe had a gold watch chain and a black mustache"
), mugshot = c("mugshot-1.jpg", "mugshot-2.jpg", "mugshot-3.jpg",
"mugshot-4.jpg", "mugshot-5.jpg"), health_complete = c("Unverified",
"Incomplete", "Complete", "Complete", "Incomplete"), race___1 = c(NA,
NA, NA, NA, "American Indian/Alaska Native"), race___2 = c(NA,
NA, NA, "Asian", NA), race___3 = c(NA, "Native Hawaiian or Other Pacific Islander",
NA, NA, NA), race___4 = c(NA, NA, "Black or African American",
NA, NA), race___5 = c("White", "White", "White", "White",
NA), race___6 = c(NA, NA, NA, NA, "Unknown / Not Reported"
), ethnicity = c("NOT Hispanic or Latino", "NOT Hispanic or Latino",
"Unknown / Not Reported", "NOT Hispanic or Latino", "Hispanic or Latino"
), interpreter_needed = c(FALSE, FALSE, TRUE, NA, FALSE),
race_and_ethnicity_complete = c("Complete", "Incomplete",
"Complete", "Complete", "Complete")), row.names = c(NA, -5L
), class = "data.frame")
|
/inst/test-data/specific-redcapr/read-batch-simple/export_checkbox_label.R
|
permissive
|
OuhscBbmc/REDCapR
|
R
| false
| false
| 2,252
|
r
|
structure(list(record_id = c(1, 2, 3, 4, 5), name_first = c("Nutmeg",
"Tumtum", "Marcus", "Trudy", "John Lee"), name_last = c("Nutmouse",
"Nutmouse", "Wood", "DAG", "Walker"), address = c("14 Rose Cottage St.\nKenning UK, 323232",
"14 Rose Cottage Blvd.\nKenning UK 34243", "243 Hill St.\nGuthrie OK 73402",
"342 Elm\nDuncanville TX, 75116", "Hotel Suite\nNew Orleans LA, 70115"
), telephone = c("(405) 321-1111", "(405) 321-2222", "(405) 321-3333",
"(405) 321-4444", "(405) 321-5555"), email = c("nutty@mouse.com",
"tummy@mouse.comm", "mw@mwood.net", "peroxide@blonde.com", "left@hippocket.com"
), dob = structure(c(12294, 12121, -13051, -6269, -5375), class = "Date"),
age = c(11, 11, 80, 61, 59), sex = c("Female", "Male", "Male",
"Female", "Male"), demographics_complete = c("Complete",
"Complete", "Complete", "Complete", "Complete"), height = c(7,
6, 180, 165, 193.04), weight = c(1, 1, 80, 54, 104), bmi = c(204.1,
277.8, 24.7, 19.8, 27.9), comments = c("Character in a book, with some guessing",
"A mouse character from a good book", "completely made up",
"This record doesn't have a DAG assigned\n\nSo call up Trudy on the telephone\nSend her a letter in the mail",
"Had a hand for trouble and a eye for cash\n\nHe had a gold watch chain and a black mustache"
), mugshot = c("mugshot-1.jpg", "mugshot-2.jpg", "mugshot-3.jpg",
"mugshot-4.jpg", "mugshot-5.jpg"), health_complete = c("Unverified",
"Incomplete", "Complete", "Complete", "Incomplete"), race___1 = c(NA,
NA, NA, NA, "American Indian/Alaska Native"), race___2 = c(NA,
NA, NA, "Asian", NA), race___3 = c(NA, "Native Hawaiian or Other Pacific Islander",
NA, NA, NA), race___4 = c(NA, NA, "Black or African American",
NA, NA), race___5 = c("White", "White", "White", "White",
NA), race___6 = c(NA, NA, NA, NA, "Unknown / Not Reported"
), ethnicity = c("NOT Hispanic or Latino", "NOT Hispanic or Latino",
"Unknown / Not Reported", "NOT Hispanic or Latino", "Hispanic or Latino"
), interpreter_needed = c(FALSE, FALSE, TRUE, NA, FALSE),
race_and_ethnicity_complete = c("Complete", "Incomplete",
"Complete", "Complete", "Complete")), row.names = c(NA, -5L
), class = "data.frame")
|
#' Interactive Heatmap
#'
#' EXTENSION OFF JOE CHENG'S d3heatmap/rstudio. THIS BY NO MEANS IS COMPLETELY
#' MY OWN WORK. CREDITS TO JOE CHENG!!!
#'
#' @import htmlwidgets
#'
#' @export
#'
NULL
`%||%` <- function(a, b) {
if (!is.null(a))
a
else
b
}
iHeatmap <- function(mainData,
theme = NULL,
width = NULL,
height = NULL,
colAnnote=NULL,
rowAnnote=NULL,
ClustM = "complete",
distM = "euclidean",
Colv = TRUE,
Rowv = TRUE,...) {
## sees if rownames/ col names exist for entered matrix
if (length(row.names(mainData))==0) {
row.names(mainData) = c(1:dim(mainData)[1])
}
if (length(colnames(mainData))== 0) {
colnames(mainData) = c(1:dim(mainData)[2])
}
#########FIX THIS!!!
#########FIX THIS!!!
#########FIX THIS!!!
if (Rowv) {
rowClust <- hclust(dist(mainData,distM),ClustM)
mainData <- mainData[rowClust$order,]
if (!is.null(rowAnnote)) {
rowAnnotes <- rowAnnote[rowClust$order,]
}
rowDend <- HCtoJSON(rowClust)
} else {
rowDend = NULL
rowAnnotes <- rowAnnote
}
### NEED TO RUN EVEN IF METADATA is different dimensions
if (Colv) {
colClust <- hclust(dist(t(mainData),distM),ClustM)
mainData <- mainData[,colClust$order]
if (!is.null(colAnnote)) {
colAnnotes <- colAnnote[colClust$order,]
}
colDend <- HCtoJSON(colClust)
} else {
colDend = NULL
colAnnotes <- colAnnote
}
if (!is.null(rowAnnote)) {
if (length(row.names(rowAnnote))==0) {
row.names(rowAnnote) = c(1:dim(rowAnnote)[1])
colnames(rowAnnote) = c(1:dim(rowAnnote)[2])
}
if (length(rowAnnote[,1])==dim(mainData)[1]) {
rowAnnotes <- matrix(rowAnnotes)
rowHead <- colnames(rowAnnote)
} else {
rowAnnotes <- NULL
rowHead <- NULL
}
} else {
rowAnnotes <- rowAnnote
rowHead <- NULL
}
if (!is.null(colAnnote)) {
if(length(row.names(colAnnote))==0) {
row.names(colAnnote) = c(1:dim(colAnnote)[1])
colnames(colAnnote) = c(1:dim(colAnnote)[2])
}
if (length(colAnnote[,1])==dim(mainData)[2]) {
colAnnotes <- matrix(colAnnotes)
colHead <- colnames(colAnnote)
} else {
colAnnotes <- NULL
colHead <- NULL
}
} else {
colAnnotes <- colAnnote
colHead <- NULL
}
#########FIX THIS!!!
#########FIX THIS!!!
##Dealing with outliers.. Simple boxplot$out
##rng <- range(mainData[abs(mainData)<min(abs(boxplot(mainData)$out))])
rng <- range(mainData)
domain <- seq.int(ceiling(rng[2]), floor(rng[1]), length.out = 100)
colors <- heat.colors(100)
colors <- sub('FF$', '', colors)
colMeta <- list(data = colAnnotes,
header = colHead)
rowMeta <- list(data = rowAnnotes,
header = rowHead)
matrix <- list(data = as.numeric(t(mainData)),
dim = dim(mainData),
rows = row.names(mainData),
cols = colnames(mainData),
colors = colors,
domain = domain)
x <- list(rows = rowDend, cols = colDend, theme = theme, colMeta = colMeta,rowMeta = rowMeta, matrix = matrix)
# create widget
htmlwidgets::createWidget(
name = 'iHeatmap',
x,
width = width,
height = height,
package = 'iHeatmap',
sizingPolicy = htmlwidgets::sizingPolicy(browser.fill = TRUE)
)
}
#' Widget output function for use in Shiny
#'
#' @export
iHeatmapOutput <- function(outputId, width = '100%', height = '400px'){
shinyWidgetOutput(outputId, 'iHeatmap', width, height, package = 'iHeatmap')
}
#' Widget render function for use in Shiny
#'
#' @export
renderIHeatmap <- function(expr, env = parent.frame(), quoted = FALSE) {
if (!quoted) { expr <- substitute(expr) } # force quoted
shinyRenderWidget(expr, iHeatmapOutput, env, quoted = TRUE)
}
|
/R/iHeatmap.R
|
no_license
|
gvanzin/Interactive-bioPlots
|
R
| false
| false
| 3,998
|
r
|
#' Interactive Heatmap
#'
#' EXTENSION OFF JOE CHENG'S d3heatmap/rstudio. THIS BY NO MEANS IS COMPLETELY
#' MY OWN WORK. CREDITS TO JOE CHENG!!!
#'
#' @import htmlwidgets
#'
#' @export
#'
NULL
`%||%` <- function(a, b) {
if (!is.null(a))
a
else
b
}
iHeatmap <- function(mainData,
theme = NULL,
width = NULL,
height = NULL,
colAnnote=NULL,
rowAnnote=NULL,
ClustM = "complete",
distM = "euclidean",
Colv = TRUE,
Rowv = TRUE,...) {
## sees if rownames/ col names exist for entered matrix
if (length(row.names(mainData))==0) {
row.names(mainData) = c(1:dim(mainData)[1])
}
if (length(colnames(mainData))== 0) {
colnames(mainData) = c(1:dim(mainData)[2])
}
#########FIX THIS!!!
#########FIX THIS!!!
#########FIX THIS!!!
if (Rowv) {
rowClust <- hclust(dist(mainData,distM),ClustM)
mainData <- mainData[rowClust$order,]
if (!is.null(rowAnnote)) {
rowAnnotes <- rowAnnote[rowClust$order,]
}
rowDend <- HCtoJSON(rowClust)
} else {
rowDend = NULL
rowAnnotes <- rowAnnote
}
### NEED TO RUN EVEN IF METADATA is different dimensions
if (Colv) {
colClust <- hclust(dist(t(mainData),distM),ClustM)
mainData <- mainData[,colClust$order]
if (!is.null(colAnnote)) {
colAnnotes <- colAnnote[colClust$order,]
}
colDend <- HCtoJSON(colClust)
} else {
colDend = NULL
colAnnotes <- colAnnote
}
if (!is.null(rowAnnote)) {
if (length(row.names(rowAnnote))==0) {
row.names(rowAnnote) = c(1:dim(rowAnnote)[1])
colnames(rowAnnote) = c(1:dim(rowAnnote)[2])
}
if (length(rowAnnote[,1])==dim(mainData)[1]) {
rowAnnotes <- matrix(rowAnnotes)
rowHead <- colnames(rowAnnote)
} else {
rowAnnotes <- NULL
rowHead <- NULL
}
} else {
rowAnnotes <- rowAnnote
rowHead <- NULL
}
if (!is.null(colAnnote)) {
if(length(row.names(colAnnote))==0) {
row.names(colAnnote) = c(1:dim(colAnnote)[1])
colnames(colAnnote) = c(1:dim(colAnnote)[2])
}
if (length(colAnnote[,1])==dim(mainData)[2]) {
colAnnotes <- matrix(colAnnotes)
colHead <- colnames(colAnnote)
} else {
colAnnotes <- NULL
colHead <- NULL
}
} else {
colAnnotes <- colAnnote
colHead <- NULL
}
#########FIX THIS!!!
#########FIX THIS!!!
##Dealing with outliers.. Simple boxplot$out
##rng <- range(mainData[abs(mainData)<min(abs(boxplot(mainData)$out))])
rng <- range(mainData)
domain <- seq.int(ceiling(rng[2]), floor(rng[1]), length.out = 100)
colors <- heat.colors(100)
colors <- sub('FF$', '', colors)
colMeta <- list(data = colAnnotes,
header = colHead)
rowMeta <- list(data = rowAnnotes,
header = rowHead)
matrix <- list(data = as.numeric(t(mainData)),
dim = dim(mainData),
rows = row.names(mainData),
cols = colnames(mainData),
colors = colors,
domain = domain)
x <- list(rows = rowDend, cols = colDend, theme = theme, colMeta = colMeta,rowMeta = rowMeta, matrix = matrix)
# create widget
htmlwidgets::createWidget(
name = 'iHeatmap',
x,
width = width,
height = height,
package = 'iHeatmap',
sizingPolicy = htmlwidgets::sizingPolicy(browser.fill = TRUE)
)
}
#' Widget output function for use in Shiny
#'
#' @export
iHeatmapOutput <- function(outputId, width = '100%', height = '400px'){
shinyWidgetOutput(outputId, 'iHeatmap', width, height, package = 'iHeatmap')
}
#' Widget render function for use in Shiny
#'
#' @export
renderIHeatmap <- function(expr, env = parent.frame(), quoted = FALSE) {
if (!quoted) { expr <- substitute(expr) } # force quoted
shinyRenderWidget(expr, iHeatmapOutput, env, quoted = TRUE)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/WPSExecuteResponse.R
\docType{class}
\name{WPSExecuteResponse}
\alias{WPSExecuteResponse}
\title{WPSExecuteResponse}
\format{
\code{\link{R6Class}} object.
}
\value{
Object of \code{\link{R6Class}} for modelling a WPS Execute response
}
\description{
WPSExecuteResponse
WPSExecuteResponse
}
\author{
Emmanuel Blondel <emmanuel.blondel1@gmail.com>
}
\keyword{ExecuteResponse}
\keyword{OGC}
\keyword{WPS}
\section{Super class}{
\code{\link[ows4R:OGCAbstractObject]{ows4R::OGCAbstractObject}} -> \code{WPSExecuteResponse}
}
\section{Public fields}{
\if{html}{\out{<div class="r6-fields">}}
\describe{
\item{\code{process}}{process}
\item{\code{status}}{status}
\item{\code{statusLocation}}{status location}
\item{\code{statusHistory}}{status history}
\item{\code{processOutputs}}{process outputs}
\item{\code{exception}}{exception}
}
\if{html}{\out{</div>}}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-WPSExecuteResponse-new}{\code{WPSExecuteResponse$new()}}
\item \href{#method-WPSExecuteResponse-getProcess}{\code{WPSExecuteResponse$getProcess()}}
\item \href{#method-WPSExecuteResponse-getStatus}{\code{WPSExecuteResponse$getStatus()}}
\item \href{#method-WPSExecuteResponse-getStatusLocation}{\code{WPSExecuteResponse$getStatusLocation()}}
\item \href{#method-WPSExecuteResponse-getStatusHistory}{\code{WPSExecuteResponse$getStatusHistory()}}
\item \href{#method-WPSExecuteResponse-getProcessOutputs}{\code{WPSExecuteResponse$getProcessOutputs()}}
\item \href{#method-WPSExecuteResponse-getException}{\code{WPSExecuteResponse$getException()}}
\item \href{#method-WPSExecuteResponse-decode}{\code{WPSExecuteResponse$decode()}}
\item \href{#method-WPSExecuteResponse-update}{\code{WPSExecuteResponse$update()}}
\item \href{#method-WPSExecuteResponse-clone}{\code{WPSExecuteResponse$clone()}}
}
}
\if{html}{\out{
<details><summary>Inherited methods</summary>
<ul>
<li><span class="pkg-link" data-pkg="ows4R" data-topic="OGCAbstractObject" data-id="ERROR"><a href='../../ows4R/html/OGCAbstractObject.html#method-OGCAbstractObject-ERROR'><code>ows4R::OGCAbstractObject$ERROR()</code></a></span></li>
<li><span class="pkg-link" data-pkg="ows4R" data-topic="OGCAbstractObject" data-id="INFO"><a href='../../ows4R/html/OGCAbstractObject.html#method-OGCAbstractObject-INFO'><code>ows4R::OGCAbstractObject$INFO()</code></a></span></li>
<li><span class="pkg-link" data-pkg="ows4R" data-topic="OGCAbstractObject" data-id="WARN"><a href='../../ows4R/html/OGCAbstractObject.html#method-OGCAbstractObject-WARN'><code>ows4R::OGCAbstractObject$WARN()</code></a></span></li>
<li><span class="pkg-link" data-pkg="ows4R" data-topic="OGCAbstractObject" data-id="encode"><a href='../../ows4R/html/OGCAbstractObject.html#method-OGCAbstractObject-encode'><code>ows4R::OGCAbstractObject$encode()</code></a></span></li>
<li><span class="pkg-link" data-pkg="ows4R" data-topic="OGCAbstractObject" data-id="getClass"><a href='../../ows4R/html/OGCAbstractObject.html#method-OGCAbstractObject-getClass'><code>ows4R::OGCAbstractObject$getClass()</code></a></span></li>
<li><span class="pkg-link" data-pkg="ows4R" data-topic="OGCAbstractObject" data-id="getClassName"><a href='../../ows4R/html/OGCAbstractObject.html#method-OGCAbstractObject-getClassName'><code>ows4R::OGCAbstractObject$getClassName()</code></a></span></li>
<li><span class="pkg-link" data-pkg="ows4R" data-topic="OGCAbstractObject" data-id="getNamespaceDefinition"><a href='../../ows4R/html/OGCAbstractObject.html#method-OGCAbstractObject-getNamespaceDefinition'><code>ows4R::OGCAbstractObject$getNamespaceDefinition()</code></a></span></li>
<li><span class="pkg-link" data-pkg="ows4R" data-topic="OGCAbstractObject" data-id="isFieldInheritedFrom"><a href='../../ows4R/html/OGCAbstractObject.html#method-OGCAbstractObject-isFieldInheritedFrom'><code>ows4R::OGCAbstractObject$isFieldInheritedFrom()</code></a></span></li>
<li><span class="pkg-link" data-pkg="ows4R" data-topic="OGCAbstractObject" data-id="logger"><a href='../../ows4R/html/OGCAbstractObject.html#method-OGCAbstractObject-logger'><code>ows4R::OGCAbstractObject$logger()</code></a></span></li>
</ul>
</details>
}}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-WPSExecuteResponse-new"></a>}}
\if{latex}{\out{\hypertarget{method-WPSExecuteResponse-new}{}}}
\subsection{Method \code{new()}}{
Initializes a \link{WPSExecuteResponse}
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{WPSExecuteResponse$new(
xml,
capabilities,
processDescription = NULL,
logger = NULL
)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{xml}}{object of class \link{XMLInternalNode-class} from \pkg{XML}}
\item{\code{capabilities}}{object of class \link{WPSCapabilities}}
\item{\code{processDescription}}{process description}
\item{\code{logger}}{logger}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-WPSExecuteResponse-getProcess"></a>}}
\if{latex}{\out{\hypertarget{method-WPSExecuteResponse-getProcess}{}}}
\subsection{Method \code{getProcess()}}{
Get process
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{WPSExecuteResponse$getProcess()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
an object of class \link{WPSProcess}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-WPSExecuteResponse-getStatus"></a>}}
\if{latex}{\out{\hypertarget{method-WPSExecuteResponse-getStatus}{}}}
\subsection{Method \code{getStatus()}}{
Get status
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{WPSExecuteResponse$getStatus()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
an object of class \link{WPSStatus}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-WPSExecuteResponse-getStatusLocation"></a>}}
\if{latex}{\out{\hypertarget{method-WPSExecuteResponse-getStatusLocation}{}}}
\subsection{Method \code{getStatusLocation()}}{
Get status location
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{WPSExecuteResponse$getStatusLocation()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
an object of class \code{character}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-WPSExecuteResponse-getStatusHistory"></a>}}
\if{latex}{\out{\hypertarget{method-WPSExecuteResponse-getStatusHistory}{}}}
\subsection{Method \code{getStatusHistory()}}{
Get status history
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{WPSExecuteResponse$getStatusHistory()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
an object of class \code{character}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-WPSExecuteResponse-getProcessOutputs"></a>}}
\if{latex}{\out{\hypertarget{method-WPSExecuteResponse-getProcessOutputs}{}}}
\subsection{Method \code{getProcessOutputs()}}{
Get list of process outputs
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{WPSExecuteResponse$getProcessOutputs()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
a \code{list} of outputs
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-WPSExecuteResponse-getException"></a>}}
\if{latex}{\out{\hypertarget{method-WPSExecuteResponse-getException}{}}}
\subsection{Method \code{getException()}}{
Get exception
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{WPSExecuteResponse$getException()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
an object of class \link{WPSException}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-WPSExecuteResponse-decode"></a>}}
\if{latex}{\out{\hypertarget{method-WPSExecuteResponse-decode}{}}}
\subsection{Method \code{decode()}}{
Decodes an object of class \link{WPSExecuteResponse} from XML
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{WPSExecuteResponse$decode(xml, capabilities, processDescription, logger)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{xml}}{object of class \link{XMLInternalNode-class} from \pkg{XML}}
\item{\code{capabilities}}{object of class \link{WPSCapabilities}}
\item{\code{processDescription}}{process description}
\item{\code{logger}}{logger}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-WPSExecuteResponse-update"></a>}}
\if{latex}{\out{\hypertarget{method-WPSExecuteResponse-update}{}}}
\subsection{Method \code{update()}}{
Updates status history
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{WPSExecuteResponse$update(verbose = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{verbose}}{verbose. Default is \code{FALSE}}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-WPSExecuteResponse-clone"></a>}}
\if{latex}{\out{\hypertarget{method-WPSExecuteResponse-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{WPSExecuteResponse$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
/man/WPSExecuteResponse.Rd
|
no_license
|
cran/ows4R
|
R
| false
| true
| 9,524
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/WPSExecuteResponse.R
\docType{class}
\name{WPSExecuteResponse}
\alias{WPSExecuteResponse}
\title{WPSExecuteResponse}
\format{
\code{\link{R6Class}} object.
}
\value{
Object of \code{\link{R6Class}} for modelling a WPS Execute response
}
\description{
WPSExecuteResponse
WPSExecuteResponse
}
\author{
Emmanuel Blondel <emmanuel.blondel1@gmail.com>
}
\keyword{ExecuteResponse}
\keyword{OGC}
\keyword{WPS}
\section{Super class}{
\code{\link[ows4R:OGCAbstractObject]{ows4R::OGCAbstractObject}} -> \code{WPSExecuteResponse}
}
\section{Public fields}{
\if{html}{\out{<div class="r6-fields">}}
\describe{
\item{\code{process}}{process}
\item{\code{status}}{status}
\item{\code{statusLocation}}{status location}
\item{\code{statusHistory}}{status history}
\item{\code{processOutputs}}{process outputs}
\item{\code{exception}}{exception}
}
\if{html}{\out{</div>}}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-WPSExecuteResponse-new}{\code{WPSExecuteResponse$new()}}
\item \href{#method-WPSExecuteResponse-getProcess}{\code{WPSExecuteResponse$getProcess()}}
\item \href{#method-WPSExecuteResponse-getStatus}{\code{WPSExecuteResponse$getStatus()}}
\item \href{#method-WPSExecuteResponse-getStatusLocation}{\code{WPSExecuteResponse$getStatusLocation()}}
\item \href{#method-WPSExecuteResponse-getStatusHistory}{\code{WPSExecuteResponse$getStatusHistory()}}
\item \href{#method-WPSExecuteResponse-getProcessOutputs}{\code{WPSExecuteResponse$getProcessOutputs()}}
\item \href{#method-WPSExecuteResponse-getException}{\code{WPSExecuteResponse$getException()}}
\item \href{#method-WPSExecuteResponse-decode}{\code{WPSExecuteResponse$decode()}}
\item \href{#method-WPSExecuteResponse-update}{\code{WPSExecuteResponse$update()}}
\item \href{#method-WPSExecuteResponse-clone}{\code{WPSExecuteResponse$clone()}}
}
}
\if{html}{\out{
<details><summary>Inherited methods</summary>
<ul>
<li><span class="pkg-link" data-pkg="ows4R" data-topic="OGCAbstractObject" data-id="ERROR"><a href='../../ows4R/html/OGCAbstractObject.html#method-OGCAbstractObject-ERROR'><code>ows4R::OGCAbstractObject$ERROR()</code></a></span></li>
<li><span class="pkg-link" data-pkg="ows4R" data-topic="OGCAbstractObject" data-id="INFO"><a href='../../ows4R/html/OGCAbstractObject.html#method-OGCAbstractObject-INFO'><code>ows4R::OGCAbstractObject$INFO()</code></a></span></li>
<li><span class="pkg-link" data-pkg="ows4R" data-topic="OGCAbstractObject" data-id="WARN"><a href='../../ows4R/html/OGCAbstractObject.html#method-OGCAbstractObject-WARN'><code>ows4R::OGCAbstractObject$WARN()</code></a></span></li>
<li><span class="pkg-link" data-pkg="ows4R" data-topic="OGCAbstractObject" data-id="encode"><a href='../../ows4R/html/OGCAbstractObject.html#method-OGCAbstractObject-encode'><code>ows4R::OGCAbstractObject$encode()</code></a></span></li>
<li><span class="pkg-link" data-pkg="ows4R" data-topic="OGCAbstractObject" data-id="getClass"><a href='../../ows4R/html/OGCAbstractObject.html#method-OGCAbstractObject-getClass'><code>ows4R::OGCAbstractObject$getClass()</code></a></span></li>
<li><span class="pkg-link" data-pkg="ows4R" data-topic="OGCAbstractObject" data-id="getClassName"><a href='../../ows4R/html/OGCAbstractObject.html#method-OGCAbstractObject-getClassName'><code>ows4R::OGCAbstractObject$getClassName()</code></a></span></li>
<li><span class="pkg-link" data-pkg="ows4R" data-topic="OGCAbstractObject" data-id="getNamespaceDefinition"><a href='../../ows4R/html/OGCAbstractObject.html#method-OGCAbstractObject-getNamespaceDefinition'><code>ows4R::OGCAbstractObject$getNamespaceDefinition()</code></a></span></li>
<li><span class="pkg-link" data-pkg="ows4R" data-topic="OGCAbstractObject" data-id="isFieldInheritedFrom"><a href='../../ows4R/html/OGCAbstractObject.html#method-OGCAbstractObject-isFieldInheritedFrom'><code>ows4R::OGCAbstractObject$isFieldInheritedFrom()</code></a></span></li>
<li><span class="pkg-link" data-pkg="ows4R" data-topic="OGCAbstractObject" data-id="logger"><a href='../../ows4R/html/OGCAbstractObject.html#method-OGCAbstractObject-logger'><code>ows4R::OGCAbstractObject$logger()</code></a></span></li>
</ul>
</details>
}}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-WPSExecuteResponse-new"></a>}}
\if{latex}{\out{\hypertarget{method-WPSExecuteResponse-new}{}}}
\subsection{Method \code{new()}}{
Initializes a \link{WPSExecuteResponse}
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{WPSExecuteResponse$new(
xml,
capabilities,
processDescription = NULL,
logger = NULL
)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{xml}}{object of class \link{XMLInternalNode-class} from \pkg{XML}}
\item{\code{capabilities}}{object of class \link{WPSCapabilities}}
\item{\code{processDescription}}{process description}
\item{\code{logger}}{logger}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-WPSExecuteResponse-getProcess"></a>}}
\if{latex}{\out{\hypertarget{method-WPSExecuteResponse-getProcess}{}}}
\subsection{Method \code{getProcess()}}{
Get process
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{WPSExecuteResponse$getProcess()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
an object of class \link{WPSProcess}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-WPSExecuteResponse-getStatus"></a>}}
\if{latex}{\out{\hypertarget{method-WPSExecuteResponse-getStatus}{}}}
\subsection{Method \code{getStatus()}}{
Get status
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{WPSExecuteResponse$getStatus()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
an object of class \link{WPSStatus}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-WPSExecuteResponse-getStatusLocation"></a>}}
\if{latex}{\out{\hypertarget{method-WPSExecuteResponse-getStatusLocation}{}}}
\subsection{Method \code{getStatusLocation()}}{
Get status location
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{WPSExecuteResponse$getStatusLocation()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
an object of class \code{character}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-WPSExecuteResponse-getStatusHistory"></a>}}
\if{latex}{\out{\hypertarget{method-WPSExecuteResponse-getStatusHistory}{}}}
\subsection{Method \code{getStatusHistory()}}{
Get status history
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{WPSExecuteResponse$getStatusHistory()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
an object of class \code{character}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-WPSExecuteResponse-getProcessOutputs"></a>}}
\if{latex}{\out{\hypertarget{method-WPSExecuteResponse-getProcessOutputs}{}}}
\subsection{Method \code{getProcessOutputs()}}{
Get list of process outputs
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{WPSExecuteResponse$getProcessOutputs()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
a \code{list} of outputs
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-WPSExecuteResponse-getException"></a>}}
\if{latex}{\out{\hypertarget{method-WPSExecuteResponse-getException}{}}}
\subsection{Method \code{getException()}}{
Get exception
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{WPSExecuteResponse$getException()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
an object of class \link{WPSException}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-WPSExecuteResponse-decode"></a>}}
\if{latex}{\out{\hypertarget{method-WPSExecuteResponse-decode}{}}}
\subsection{Method \code{decode()}}{
Decodes an object of class \link{WPSExecuteResponse} from XML
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{WPSExecuteResponse$decode(xml, capabilities, processDescription, logger)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{xml}}{object of class \link{XMLInternalNode-class} from \pkg{XML}}
\item{\code{capabilities}}{object of class \link{WPSCapabilities}}
\item{\code{processDescription}}{process description}
\item{\code{logger}}{logger}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-WPSExecuteResponse-update"></a>}}
\if{latex}{\out{\hypertarget{method-WPSExecuteResponse-update}{}}}
\subsection{Method \code{update()}}{
Updates status history
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{WPSExecuteResponse$update(verbose = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{verbose}}{verbose. Default is \code{FALSE}}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-WPSExecuteResponse-clone"></a>}}
\if{latex}{\out{\hypertarget{method-WPSExecuteResponse-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{WPSExecuteResponse$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotting.geoChronR.R
\name{plotLine}
\alias{plotLine}
\title{Plot or add a line to plot}
\usage{
plotLine(add.to.plot = ggplot(), X, Y, color = "black", alp = 1)
}
\arguments{
\item{add.to.plot}{A ggplot object to add these lines to. Default is ggplot() .}
\item{X}{A LiPD variable list to plot, including values, units, names, and more}
\item{Y}{A LiPD variable list to plot, including values, units, names, and more}
\item{color}{Line color (following ggplot rules)}
\item{alp}{Line transparency}
}
\value{
A ggplot object
}
\description{
Plots or adds a line to aplot
}
\seealso{
Other plot:
\code{\link{plotChronEnsDiff}()},
\code{\link{plotChronEns}()},
\code{\link{plotChron}()},
\code{\link{plotCorEns}()},
\code{\link{plotHistEns}()},
\code{\link{plotModelDistributions}()},
\code{\link{plotPcaEns}()},
\code{\link{plotPvalsEnsFdr}()},
\code{\link{plotRegressEns}()},
\code{\link{plotScatterEns}()},
\code{\link{plotScreeEns}()},
\code{\link{plotSpectraEns}()},
\code{\link{plotSpectrum}()},
\code{\link{plotSummaryTs}()},
\code{\link{plotSummary}()},
\code{\link{plotTimeseriesEnsLines}()},
\code{\link{plotTimeseriesEnsRibbons}()},
\code{\link{plotTimeseriesStack}()},
\code{\link{plotTrendLinesEns}()}
}
\author{
Nick McKay
}
\concept{plot}
|
/man/plotLine.Rd
|
permissive
|
nickmckay/GeoChronR
|
R
| false
| true
| 1,335
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotting.geoChronR.R
\name{plotLine}
\alias{plotLine}
\title{Plot or add a line to plot}
\usage{
plotLine(add.to.plot = ggplot(), X, Y, color = "black", alp = 1)
}
\arguments{
\item{add.to.plot}{A ggplot object to add these lines to. Default is ggplot() .}
\item{X}{A LiPD variable list to plot, including values, units, names, and more}
\item{Y}{A LiPD variable list to plot, including values, units, names, and more}
\item{color}{Line color (following ggplot rules)}
\item{alp}{Line transparency}
}
\value{
A ggplot object
}
\description{
Plots or adds a line to aplot
}
\seealso{
Other plot:
\code{\link{plotChronEnsDiff}()},
\code{\link{plotChronEns}()},
\code{\link{plotChron}()},
\code{\link{plotCorEns}()},
\code{\link{plotHistEns}()},
\code{\link{plotModelDistributions}()},
\code{\link{plotPcaEns}()},
\code{\link{plotPvalsEnsFdr}()},
\code{\link{plotRegressEns}()},
\code{\link{plotScatterEns}()},
\code{\link{plotScreeEns}()},
\code{\link{plotSpectraEns}()},
\code{\link{plotSpectrum}()},
\code{\link{plotSummaryTs}()},
\code{\link{plotSummary}()},
\code{\link{plotTimeseriesEnsLines}()},
\code{\link{plotTimeseriesEnsRibbons}()},
\code{\link{plotTimeseriesStack}()},
\code{\link{plotTrendLinesEns}()}
}
\author{
Nick McKay
}
\concept{plot}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fc_locations_transform.R
\name{fc_locations_transform}
\alias{fc_locations_transform}
\title{Transform event locations}
\usage{
fc_locations_transform(data, x, y, dim = c(105, 70), units = "meters")
}
\arguments{
\item{data}{A data frame with event location data.}
\item{x}{A character vector identifying the variables for the x coordinate
locations in \code{data}.}
\item{y}{A character vector identifying the variables for the y coordinate
locations in \code{data}.}
\item{dim}{A numeric vector of length two giving the dimensions of how to
transform the pitch. The x-dimension should be the first component.}
\item{units}{A character vector that provides units for the pitch
dimensions. Use "percent" for when \code{dim = c(100, 100)}.}
}
\value{
A tidy tibble with x and y coordinates transformed according to
\code{dim}. The resulting tibble will have "pitch_dimensions"
and "units" attributes. These will be added or modified depending on if
they exist for \code{data}.
}
\description{
This function transforms the Wyscout event data locations. By default,
the data provided is on a range of 0 - 100\%. Thus, an x-y coordinate pair
of (50, 50) corresponds to midfield.
}
\examples{
# load dplyr for examples
library(dplyr)
# read event data given in package
file_path <- system.file("extdata", "events_england.json", package = "scoutr")
events <- fc_read_events(file_path)
result <- events \%>\%
select(event_sec:end_y) \%>\%
fc_locations_transform(x = c("start_x", "end_x"), y = c("start_y", "end_y"))
# verify attributes
attr(result, "pitch_dimensions")
attr(result, "units")
# transform to meters, then transform back
events \%>\%
select(event_sec:end_y) \%>\%
fc_locations_transform(x = c("start_x", "end_x"),
y = c("start_y", "end_y")) \%>\%
fc_locations_transform(x = c("start_x", "end_x"),
y = c("start_y", "end_y"),
dim = c(100, 100), units = "percent")
}
\references{
\emph{Pappalardo, L., Cintia, P., Rossi, A. et al. A public data
set of spatio-temporal match events in soccer competitions. Sci
Data 6, 236 (2019). \url{https://doi.org/10.1038/s41597-019-0247-7}}
All public Wyscout data is available at \url{https://figshare.com/collections/Soccer_match_event_dataset/4415000/2}
}
|
/man/fc_locations_transform.Rd
|
no_license
|
shawnsanto/scoutr
|
R
| false
| true
| 2,372
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fc_locations_transform.R
\name{fc_locations_transform}
\alias{fc_locations_transform}
\title{Transform event locations}
\usage{
fc_locations_transform(data, x, y, dim = c(105, 70), units = "meters")
}
\arguments{
\item{data}{A data frame with event location data.}
\item{x}{A character vector identifying the variables for the x coordinate
locations in \code{data}.}
\item{y}{A character vector identifying the variables for the y coordinate
locations in \code{data}.}
\item{dim}{A numeric vector of length two giving the dimensions of how to
transform the pitch. The x-dimension should be the first component.}
\item{units}{A character vector that provides units for the pitch
dimensions. Use "percent" for when \code{dim = c(100, 100)}.}
}
\value{
A tidy tibble with x and y coordinates transformed according to
\code{dim}. The resulting tibble will have "pitch_dimensions"
and "units" attributes. These will be added or modified depending on if
they exist for \code{data}.
}
\description{
This function transforms the Wyscout event data locations. By default,
the data provided is on a range of 0 - 100\%. Thus, an x-y coordinate pair
of (50, 50) corresponds to midfield.
}
\examples{
# load dplyr for examples
library(dplyr)
# read event data given in package
file_path <- system.file("extdata", "events_england.json", package = "scoutr")
events <- fc_read_events(file_path)
result <- events \%>\%
select(event_sec:end_y) \%>\%
fc_locations_transform(x = c("start_x", "end_x"), y = c("start_y", "end_y"))
# verify attributes
attr(result, "pitch_dimensions")
attr(result, "units")
# transform to meters, then transform back
events \%>\%
select(event_sec:end_y) \%>\%
fc_locations_transform(x = c("start_x", "end_x"),
y = c("start_y", "end_y")) \%>\%
fc_locations_transform(x = c("start_x", "end_x"),
y = c("start_y", "end_y"),
dim = c(100, 100), units = "percent")
}
\references{
\emph{Pappalardo, L., Cintia, P., Rossi, A. et al. A public data
set of spatio-temporal match events in soccer competitions. Sci
Data 6, 236 (2019). \url{https://doi.org/10.1038/s41597-019-0247-7}}
All public Wyscout data is available at \url{https://figshare.com/collections/Soccer_match_event_dataset/4415000/2}
}
|
# Extract Maize prices from CSvs with ETH commodity prices
#### Libraries ####
library(stringdist)
#### INput Data ####
data.dir.path <- "../ScrapDoc/output" # "data/ETHPriceData"
output.dir.path <- "output/ETHPriceData"
coords.file.path <- "data/ETH_Region_Market_Coords.csv"
#### Prepare data ####
mrkts.coords <- read.csv(coords.file.path, as.is = TRUE)
price.csvS.path <- list.files(data.dir.path, full.names = TRUE, recursive = TRUE, pattern = "*.csv")
dir.create(output.dir.path, recursive = TRUE, showWarnings = FALSE)
##### Create Row data for each price ####
f <- function(maize.data.row, mnth.year, mrkts.coords) {
rgion <- maize.data.row[1]
mrkt <- maize.data.row[2]
if (grepl("addis", tolower(rgion))) mrkt <- "addis ababa"
mnth.year <- sub(".csv$", "", mnth.year)
mnth <- sub("[[:digit:]]+", "", mnth.year)
yr <- sub("[[:alpha:]]+", "", mnth.year)
for (market.names in 3:6) {
print(market.names)
matchdist1 <- stringdist(tolower(mrkt), tolower(mrkts.coords[,market.names]))
mrkt.row.nos <- which(matchdist1 %in% min(matchdist1))
if (min(matchdist1) == 0) {
print("AAAAAAAAAAAAAAAAAAAAAAAAA")
print(mnth.year)
print(rgion)
print(mrkt)
print(market.names)
print("BBBBBBBBBBBBBBBBBBBBBBBBB")
break()
}
mrkt.row.nos <- 999
}
mrkt.long <- mrkts.coords$Longitude[mrkt.row.nos]
mrkt.lati <- mrkts.coords$Latitude[mrkt.row.nos]
price_UnmilledMaize <- maize.data.row[3]
price_MilledMaize <- maize.data.row[4]
return(c(rgion, mrkt, mnth, yr, mrkt.long, mrkt.lati, price_UnmilledMaize, price_MilledMaize))
}
#### Extract Maize information in data files and store in one File ####
i = 0
maize.info.all <- data.frame()
output.csv <- ""
for (price.csv.path in price.csvS.path) {
region.names <- read.csv(price.csv.path, as.is = TRUE, header = FALSE)[1:2,]
columns.error <- try(read.csv(price.csv.path, as.is = TRUE, skip = 1, ))
if (class(columns.error) == "try-error"){
price.csv1 <- read.csv(price.csv.path, as.is = TRUE, skip = 2, header = FALSE )
markts.available1 <- replace(region.names[2,], region.names[2,]=="", NA)
markts.available <- markts.available1[!is.na(markts.available1)]
price.csv <- price.csv1[,1:length(markts.available)]
names(price.csv) <- markts.available
# Get rows with maize
maize.rows <- grepl("maize", price.csv$ITEM, ignore.case = TRUE)
maize.data <- data.frame(t(region.names[,1:length(markts.available)]),
t(price.csv[maize.rows, ])
)
}
if (class(columns.error) != "try-error"){
price.csv <- read.csv(price.csv.path, as.is = TRUE, skip = 1, )
head(price.csv)
# Get rows with maize
maize.rows <- grepl("maize", price.csv$ITEM, ignore.case = TRUE)
maize.data <- data.frame(t(region.names),
t(price.csv[maize.rows, ])
)
}
mnth.year <- sub("^.+[_][[:digit:]]+[_]", "", basename(price.csv.path))
maize.info <- apply(tail(maize.data, -4), 1, f, mnth.year = mnth.year, mrkts.coords = mrkts.coords )
maize.info <- t(maize.info)
##### Get Medium for Addis Ababa ###
if (grepl("addis", tolower(maize.info[1]))) {
# Replace "-" with NA
maize.info[,7][maize.info[,7] == "-"] <- NA
maize.info[,8][maize.info[,8] == "-"] <- NA
unmilled.median.price <- median(as.numeric(maize.info[,7]), na.rm = TRUE)
milled.median.price <- median(as.numeric(maize.info[,8]), na.rm = TRUE)
maize.info <- c(maize.info[1,1:6], unmilled.median.price, milled.median.price)
}
#### Append data ###
# First Check the source directory; Create output CSV per Directory
if (output.csv != paste0(basename(dirname(price.csv.path)), ".csv")) maize.info.all <- data.frame()
if(length(maize.info.all) > 0){
maize.info.all <- rbind(maize.info.all, maize.info)
colnames(maize.info.all) <- c("Region", "Market", "Month", "Year", "Longitude", "Latitude",
"Unmilled Maize Price Birr.KG", "Milled Maize Price Birr.KG")
rownames(maize.info.all) <- 1:dim(maize.info.all)[1]
}
if (length(maize.info.all) == 0){
maize.info.all <- maize.info
}
##### Feedback ####
print("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
# print(dirname(price.csv.path))
# print(basename(price.csv.path))
# print(maize.data)
# print(data.frame(maize.info))
print("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$")
#### Save Data ###
output.csv <- paste0(basename(dirname(price.csv.path)), ".csv")
write.csv( maize.info.all, file.path(output.dir.path, output.csv))
# if (i == 12)break()
# i = i +1
# break()
}
|
/code/Coordinates_prep2.R
|
no_license
|
chamb244/EthPriceModelling
|
R
| false
| false
| 4,760
|
r
|
# Extract Maize prices from CSvs with ETH commodity prices
#### Libraries ####
library(stringdist)
#### INput Data ####
data.dir.path <- "../ScrapDoc/output" # "data/ETHPriceData"
output.dir.path <- "output/ETHPriceData"
coords.file.path <- "data/ETH_Region_Market_Coords.csv"
#### Prepare data ####
mrkts.coords <- read.csv(coords.file.path, as.is = TRUE)
price.csvS.path <- list.files(data.dir.path, full.names = TRUE, recursive = TRUE, pattern = "*.csv")
dir.create(output.dir.path, recursive = TRUE, showWarnings = FALSE)
##### Create Row data for each price ####
f <- function(maize.data.row, mnth.year, mrkts.coords) {
rgion <- maize.data.row[1]
mrkt <- maize.data.row[2]
if (grepl("addis", tolower(rgion))) mrkt <- "addis ababa"
mnth.year <- sub(".csv$", "", mnth.year)
mnth <- sub("[[:digit:]]+", "", mnth.year)
yr <- sub("[[:alpha:]]+", "", mnth.year)
for (market.names in 3:6) {
print(market.names)
matchdist1 <- stringdist(tolower(mrkt), tolower(mrkts.coords[,market.names]))
mrkt.row.nos <- which(matchdist1 %in% min(matchdist1))
if (min(matchdist1) == 0) {
print("AAAAAAAAAAAAAAAAAAAAAAAAA")
print(mnth.year)
print(rgion)
print(mrkt)
print(market.names)
print("BBBBBBBBBBBBBBBBBBBBBBBBB")
break()
}
mrkt.row.nos <- 999
}
mrkt.long <- mrkts.coords$Longitude[mrkt.row.nos]
mrkt.lati <- mrkts.coords$Latitude[mrkt.row.nos]
price_UnmilledMaize <- maize.data.row[3]
price_MilledMaize <- maize.data.row[4]
return(c(rgion, mrkt, mnth, yr, mrkt.long, mrkt.lati, price_UnmilledMaize, price_MilledMaize))
}
#### Extract Maize information in data files and store in one File ####
i = 0
maize.info.all <- data.frame()
output.csv <- ""
for (price.csv.path in price.csvS.path) {
region.names <- read.csv(price.csv.path, as.is = TRUE, header = FALSE)[1:2,]
columns.error <- try(read.csv(price.csv.path, as.is = TRUE, skip = 1, ))
if (class(columns.error) == "try-error"){
price.csv1 <- read.csv(price.csv.path, as.is = TRUE, skip = 2, header = FALSE )
markts.available1 <- replace(region.names[2,], region.names[2,]=="", NA)
markts.available <- markts.available1[!is.na(markts.available1)]
price.csv <- price.csv1[,1:length(markts.available)]
names(price.csv) <- markts.available
# Get rows with maize
maize.rows <- grepl("maize", price.csv$ITEM, ignore.case = TRUE)
maize.data <- data.frame(t(region.names[,1:length(markts.available)]),
t(price.csv[maize.rows, ])
)
}
if (class(columns.error) != "try-error"){
price.csv <- read.csv(price.csv.path, as.is = TRUE, skip = 1, )
head(price.csv)
# Get rows with maize
maize.rows <- grepl("maize", price.csv$ITEM, ignore.case = TRUE)
maize.data <- data.frame(t(region.names),
t(price.csv[maize.rows, ])
)
}
mnth.year <- sub("^.+[_][[:digit:]]+[_]", "", basename(price.csv.path))
maize.info <- apply(tail(maize.data, -4), 1, f, mnth.year = mnth.year, mrkts.coords = mrkts.coords )
maize.info <- t(maize.info)
##### Get Medium for Addis Ababa ###
if (grepl("addis", tolower(maize.info[1]))) {
# Replace "-" with NA
maize.info[,7][maize.info[,7] == "-"] <- NA
maize.info[,8][maize.info[,8] == "-"] <- NA
unmilled.median.price <- median(as.numeric(maize.info[,7]), na.rm = TRUE)
milled.median.price <- median(as.numeric(maize.info[,8]), na.rm = TRUE)
maize.info <- c(maize.info[1,1:6], unmilled.median.price, milled.median.price)
}
#### Append data ###
# First Check the source directory; Create output CSV per Directory
if (output.csv != paste0(basename(dirname(price.csv.path)), ".csv")) maize.info.all <- data.frame()
if(length(maize.info.all) > 0){
maize.info.all <- rbind(maize.info.all, maize.info)
colnames(maize.info.all) <- c("Region", "Market", "Month", "Year", "Longitude", "Latitude",
"Unmilled Maize Price Birr.KG", "Milled Maize Price Birr.KG")
rownames(maize.info.all) <- 1:dim(maize.info.all)[1]
}
if (length(maize.info.all) == 0){
maize.info.all <- maize.info
}
##### Feedback ####
print("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
# print(dirname(price.csv.path))
# print(basename(price.csv.path))
# print(maize.data)
# print(data.frame(maize.info))
print("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$")
#### Save Data ###
output.csv <- paste0(basename(dirname(price.csv.path)), ".csv")
write.csv( maize.info.all, file.path(output.dir.path, output.csv))
# if (i == 12)break()
# i = i +1
# break()
}
|
#' DislikeAction
#'
#' The act of expressing a negative sentiment about the object. An agent dislikes an object (a proposition, topic or theme) with participants.
#'
#'
#' @param id identifier for the object (URI)
#' @param target (EntryPoint type.) Indicates a target EntryPoint for an Action.
#' @param startTime (DateTime or DateTime type.) The startTime of something. For a reserved event or service (e.g. FoodEstablishmentReservation), the time that it is expected to start. For actions that span a period of time, when the action was performed. e.g. John wrote a book from *January* to December.Note that Event uses startDate/endDate instead of startTime/endTime, even when describing dates with times. This situation may be clarified in future revisions.
#' @param result (Thing type.) The result produced in the action. e.g. John wrote *a book*.
#' @param participant (Person or Organization type.) Other co-agents that participated in the action indirectly. e.g. John wrote a book with *Steve*.
#' @param object (Thing type.) The object upon which the action is carried out, whose state is kept intact or changed. Also known as the semantic roles patient, affected or undergoer (which change their state) or theme (which doesn't). e.g. John read *a book*.
#' @param location (Text or PostalAddress or Place or Text or PostalAddress or Place or Text or PostalAddress or Place type.) The location of for example where the event is happening, an organization is located, or where an action takes place.
#' @param instrument (Thing type.) The object that helped the agent perform the action. e.g. John wrote a book with *a pen*.
#' @param error (Thing type.) For failed actions, more information on the cause of the failure.
#' @param endTime (DateTime or DateTime type.) The endTime of something. For a reserved event or service (e.g. FoodEstablishmentReservation), the time that it is expected to end. For actions that span a period of time, when the action was performed. e.g. John wrote a book from January to *December*.Note that Event uses startDate/endDate instead of startTime/endTime, even when describing dates with times. This situation may be clarified in future revisions.
#' @param agent (Person or Organization type.) The direct performer or driver of the action (animate or inanimate). e.g. *John* wrote a book.
#' @param actionStatus (ActionStatusType type.) Indicates the current disposition of the Action.
#' @param url (URL type.) URL of the item.
#' @param sameAs (URL type.) URL of a reference Web page that unambiguously indicates the item's identity. E.g. the URL of the item's Wikipedia page, Wikidata entry, or official website.
#' @param potentialAction (Action type.) Indicates a potential Action, which describes an idealized action in which this thing would play an 'object' role.
#' @param name (Text type.) The name of the item.
#' @param mainEntityOfPage (URL or CreativeWork type.) Indicates a page (or other CreativeWork) for which this thing is the main entity being described. See [background notes](/docs/datamodel.html#mainEntityBackground) for details.
#' @param image (URL or ImageObject type.) An image of the item. This can be a [[URL]] or a fully described [[ImageObject]].
#' @param identifier (URL or Text or PropertyValue type.) The identifier property represents any kind of identifier for any kind of [[Thing]], such as ISBNs, GTIN codes, UUIDs etc. Schema.org provides dedicated properties for representing many of these, either as textual strings or as URL (URI) links. See [background notes](/docs/datamodel.html#identifierBg) for more details.
#' @param disambiguatingDescription (Text type.) A sub property of description. A short description of the item used to disambiguate from other, similar items. Information from other properties (in particular, name) may be necessary for the description to be useful for disambiguation.
#' @param description (Text type.) A description of the item.
#' @param alternateName (Text type.) An alias for the item.
#' @param additionalType (URL type.) An additional type for the item, typically used for adding more specific types from external vocabularies in microdata syntax. This is a relationship between something and a class that the thing is in. In RDFa syntax, it is better to use the native RDFa syntax - the 'typeof' attribute - for multiple types. Schema.org tools may have only weaker understanding of extra types, in particular those defined externally.
#'
#' @return a list object corresponding to a schema:DislikeAction
#'
#' @export
DislikeAction <- function(id = NULL,
target = NULL,
startTime = NULL,
result = NULL,
participant = NULL,
object = NULL,
location = NULL,
instrument = NULL,
error = NULL,
endTime = NULL,
agent = NULL,
actionStatus = NULL,
url = NULL,
sameAs = NULL,
potentialAction = NULL,
name = NULL,
mainEntityOfPage = NULL,
image = NULL,
identifier = NULL,
disambiguatingDescription = NULL,
description = NULL,
alternateName = NULL,
additionalType = NULL){
Filter(Negate(is.null),
list(
type = "DislikeAction",
id = id,
target = target,
startTime = startTime,
result = result,
participant = participant,
object = object,
location = location,
instrument = instrument,
error = error,
endTime = endTime,
agent = agent,
actionStatus = actionStatus,
url = url,
sameAs = sameAs,
potentialAction = potentialAction,
name = name,
mainEntityOfPage = mainEntityOfPage,
image = image,
identifier = identifier,
disambiguatingDescription = disambiguatingDescription,
description = description,
alternateName = alternateName,
additionalType = additionalType))}
|
/R/DislikeAction.R
|
no_license
|
cboettig/schemar
|
R
| false
| false
| 5,604
|
r
|
#' DislikeAction
#'
#' The act of expressing a negative sentiment about the object. An agent dislikes an object (a proposition, topic or theme) with participants.
#'
#'
#' @param id identifier for the object (URI)
#' @param target (EntryPoint type.) Indicates a target EntryPoint for an Action.
#' @param startTime (DateTime or DateTime type.) The startTime of something. For a reserved event or service (e.g. FoodEstablishmentReservation), the time that it is expected to start. For actions that span a period of time, when the action was performed. e.g. John wrote a book from *January* to December.Note that Event uses startDate/endDate instead of startTime/endTime, even when describing dates with times. This situation may be clarified in future revisions.
#' @param result (Thing type.) The result produced in the action. e.g. John wrote *a book*.
#' @param participant (Person or Organization type.) Other co-agents that participated in the action indirectly. e.g. John wrote a book with *Steve*.
#' @param object (Thing type.) The object upon which the action is carried out, whose state is kept intact or changed. Also known as the semantic roles patient, affected or undergoer (which change their state) or theme (which doesn't). e.g. John read *a book*.
#' @param location (Text or PostalAddress or Place or Text or PostalAddress or Place or Text or PostalAddress or Place type.) The location of for example where the event is happening, an organization is located, or where an action takes place.
#' @param instrument (Thing type.) The object that helped the agent perform the action. e.g. John wrote a book with *a pen*.
#' @param error (Thing type.) For failed actions, more information on the cause of the failure.
#' @param endTime (DateTime or DateTime type.) The endTime of something. For a reserved event or service (e.g. FoodEstablishmentReservation), the time that it is expected to end. For actions that span a period of time, when the action was performed. e.g. John wrote a book from January to *December*.Note that Event uses startDate/endDate instead of startTime/endTime, even when describing dates with times. This situation may be clarified in future revisions.
#' @param agent (Person or Organization type.) The direct performer or driver of the action (animate or inanimate). e.g. *John* wrote a book.
#' @param actionStatus (ActionStatusType type.) Indicates the current disposition of the Action.
#' @param url (URL type.) URL of the item.
#' @param sameAs (URL type.) URL of a reference Web page that unambiguously indicates the item's identity. E.g. the URL of the item's Wikipedia page, Wikidata entry, or official website.
#' @param potentialAction (Action type.) Indicates a potential Action, which describes an idealized action in which this thing would play an 'object' role.
#' @param name (Text type.) The name of the item.
#' @param mainEntityOfPage (URL or CreativeWork type.) Indicates a page (or other CreativeWork) for which this thing is the main entity being described. See [background notes](/docs/datamodel.html#mainEntityBackground) for details.
#' @param image (URL or ImageObject type.) An image of the item. This can be a [[URL]] or a fully described [[ImageObject]].
#' @param identifier (URL or Text or PropertyValue type.) The identifier property represents any kind of identifier for any kind of [[Thing]], such as ISBNs, GTIN codes, UUIDs etc. Schema.org provides dedicated properties for representing many of these, either as textual strings or as URL (URI) links. See [background notes](/docs/datamodel.html#identifierBg) for more details.
#' @param disambiguatingDescription (Text type.) A sub property of description. A short description of the item used to disambiguate from other, similar items. Information from other properties (in particular, name) may be necessary for the description to be useful for disambiguation.
#' @param description (Text type.) A description of the item.
#' @param alternateName (Text type.) An alias for the item.
#' @param additionalType (URL type.) An additional type for the item, typically used for adding more specific types from external vocabularies in microdata syntax. This is a relationship between something and a class that the thing is in. In RDFa syntax, it is better to use the native RDFa syntax - the 'typeof' attribute - for multiple types. Schema.org tools may have only weaker understanding of extra types, in particular those defined externally.
#'
#' @return a list object corresponding to a schema:DislikeAction
#'
#' @export
DislikeAction <- function(id = NULL,
target = NULL,
startTime = NULL,
result = NULL,
participant = NULL,
object = NULL,
location = NULL,
instrument = NULL,
error = NULL,
endTime = NULL,
agent = NULL,
actionStatus = NULL,
url = NULL,
sameAs = NULL,
potentialAction = NULL,
name = NULL,
mainEntityOfPage = NULL,
image = NULL,
identifier = NULL,
disambiguatingDescription = NULL,
description = NULL,
alternateName = NULL,
additionalType = NULL){
Filter(Negate(is.null),
list(
type = "DislikeAction",
id = id,
target = target,
startTime = startTime,
result = result,
participant = participant,
object = object,
location = location,
instrument = instrument,
error = error,
endTime = endTime,
agent = agent,
actionStatus = actionStatus,
url = url,
sameAs = sameAs,
potentialAction = potentialAction,
name = name,
mainEntityOfPage = mainEntityOfPage,
image = image,
identifier = identifier,
disambiguatingDescription = disambiguatingDescription,
description = description,
alternateName = alternateName,
additionalType = additionalType))}
|
plot2 <- function() {
data <- read.csv("household_power_consumption.txt", sep=";", na.strings="?")
datetimed <- strptime(datetime, "%d/%m/%Y %H:%M:%S")
data <- cbind(data, datetimed)
subdata <- data[data$datetimed>=strptime("2007-02-01 00:00:00","%Y-%m-%d %H:%M:%S") & data$datetimed<strptime("2007-02-03 00:00:00","%Y-%m-%d %H:%M:%S"),]
png("plot2.png", width = 480, height = 480)
plot(subdata$datetimed, subdata$Global_active_power, type="n", ylab="Global Active Power (kilowatts)", xlab="")
lines(subdata$datetimed, subdata$Global_active_power)
dev.off()
}
|
/plot2.R
|
no_license
|
kellyv/ExData_Plotting1
|
R
| false
| false
| 575
|
r
|
plot2 <- function() {
data <- read.csv("household_power_consumption.txt", sep=";", na.strings="?")
datetimed <- strptime(datetime, "%d/%m/%Y %H:%M:%S")
data <- cbind(data, datetimed)
subdata <- data[data$datetimed>=strptime("2007-02-01 00:00:00","%Y-%m-%d %H:%M:%S") & data$datetimed<strptime("2007-02-03 00:00:00","%Y-%m-%d %H:%M:%S"),]
png("plot2.png", width = 480, height = 480)
plot(subdata$datetimed, subdata$Global_active_power, type="n", ylab="Global Active Power (kilowatts)", xlab="")
lines(subdata$datetimed, subdata$Global_active_power)
dev.off()
}
|
# Exercise 2: indexing and filtering vectors
# Create a vector `first_ten` that has the values 10 through 20 in it (using
# the : operator)
first_ten <- c(10:20)
# Create a vector `next_ten` that has the values 21 through 30 in it (using the
# seq() function)
next_ten <- c(seq(21,30))
# Create a vector `all_numbers` by combining the previous two vectors
all_numbers <- c(first_ten + next_ten)
# Create a variable `eleventh` that contains the 11th element in `all_numbers`
eleventh <- all_numbers[11]
# Create a vector `some_numbers` that contains the 2nd through the 5th elements
# of `all_numbers`
some_numbers <- c(all_numbers[2:5])
# Create a vector `even` that holds the even numbers from 1 to 100
even <- c(seq(2,100,2))
# Using the `all()` function and `%%` (modulo) operator, confirm that all of the
# numbers in your `even` vector are even
all(even)
# Create a vector `phone_numbers` that contains the numbers 8, 6, 7, 5, 3, 0, 9
phone_numbers <- c(8,6,7,5,3,0,9)
# Create a vector `prefix` that has the first three elements of `phone_numbers`
prefix <- phone_numbers[1:3]
# Create a vector `small` that has the values of `phone_numbers` that are
# less than or equal to 5
small <- phone_numbers[phone_numbers<=5]
# Create a vector `large` that has the values of `phone_numbers` that are
# strictly greater than 5
large <- phone_numbers[phone_numbers >5]
# Replace the values in `phone_numbers` that are larger than 5 with the number 5
replace(phone_numbers, large, 5 )
# Replace every odd-numbered value in `phone_numbers` with the number 0
phone_numbers <- replace(phone_numbers, phone_numbers%%2 ==1, 0)
|
/chapter-07-exercises/exercise-2/exercise.R
|
permissive
|
jqin10/book-exercises
|
R
| false
| false
| 1,636
|
r
|
# Exercise 2: indexing and filtering vectors
# Create a vector `first_ten` that has the values 10 through 20 in it (using
# the : operator)
first_ten <- c(10:20)
# Create a vector `next_ten` that has the values 21 through 30 in it (using the
# seq() function)
next_ten <- c(seq(21,30))
# Create a vector `all_numbers` by combining the previous two vectors
all_numbers <- c(first_ten + next_ten)
# Create a variable `eleventh` that contains the 11th element in `all_numbers`
eleventh <- all_numbers[11]
# Create a vector `some_numbers` that contains the 2nd through the 5th elements
# of `all_numbers`
some_numbers <- c(all_numbers[2:5])
# Create a vector `even` that holds the even numbers from 1 to 100
even <- c(seq(2,100,2))
# Using the `all()` function and `%%` (modulo) operator, confirm that all of the
# numbers in your `even` vector are even
all(even)
# Create a vector `phone_numbers` that contains the numbers 8, 6, 7, 5, 3, 0, 9
phone_numbers <- c(8,6,7,5,3,0,9)
# Create a vector `prefix` that has the first three elements of `phone_numbers`
prefix <- phone_numbers[1:3]
# Create a vector `small` that has the values of `phone_numbers` that are
# less than or equal to 5
small <- phone_numbers[phone_numbers<=5]
# Create a vector `large` that has the values of `phone_numbers` that are
# strictly greater than 5
large <- phone_numbers[phone_numbers >5]
# Replace the values in `phone_numbers` that are larger than 5 with the number 5
replace(phone_numbers, large, 5 )
# Replace every odd-numbered value in `phone_numbers` with the number 0
phone_numbers <- replace(phone_numbers, phone_numbers%%2 ==1, 0)
|
library(readxl)
library(dplyr)
library(tidyr)
library(readr)
library(dplyr)
library(vroom)
library(readstata13)
library(foreign)
library(here)
options(scipen=999)
setwd(here::here("data/2021/2021-01-06/"))
bd2020 <- readxl::read_xlsx("CONOSCE_ADJUDICACIONES2020_0.xlsx")
bd2019 <- readxl::read_xlsx("CONOSCE_ADJUDICACIONES2019_0.xlsx")
bd2018 <- readxl::read_xlsx("CONOSCE_ADJUDICACIONES2018_0.xlsx")
|
/data/2021/2021-01-06/ejemplo.R
|
no_license
|
BESTDATASCIENCE/manos-a-la-data
|
R
| false
| false
| 406
|
r
|
library(readxl)
library(dplyr)
library(tidyr)
library(readr)
library(dplyr)
library(vroom)
library(readstata13)
library(foreign)
library(here)
options(scipen=999)
setwd(here::here("data/2021/2021-01-06/"))
bd2020 <- readxl::read_xlsx("CONOSCE_ADJUDICACIONES2020_0.xlsx")
bd2019 <- readxl::read_xlsx("CONOSCE_ADJUDICACIONES2019_0.xlsx")
bd2018 <- readxl::read_xlsx("CONOSCE_ADJUDICACIONES2018_0.xlsx")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_data.R
\name{load_cellSNP_vcf}
\alias{load_cellSNP_vcf}
\title{Load sparse matrices A and D from cellSNP VCF file with filtering SNPs}
\usage{
load_cellSNP_vcf(vcf_file, min_count = 0, min_MAF = 0,
max_other_allele = NULL, rowname_format = "full", keep_GL = FALSE)
}
\arguments{
\item{vcf_file}{character(1), path to VCF file generated from cellSNP}
\item{min_count}{minimum count across all cells, e.g., 20}
\item{min_MAF}{minimum minor allele fraction, e.g., 0.1}
\item{max_other_allele}{maximum ratio of other alleles comparing to REF and
ALT alleles; for cellSNP vcf, we recommend 0.05}
\item{rowname_format}{the format of rowname: NULL is the default from vcfR,
short is CHROM_POS, and full is CHROM_POS_REF_ALT}
\item{keep_GL}{logical(1), if TRUE, check if GL (genotype probability) exists
it will be returned}
}
\description{
Load sparse matrices A and D from cellSNP VCF file with filtering SNPs
}
|
/man/load_cellSNP_vcf.Rd
|
permissive
|
XiaomeiLi1/cardelino
|
R
| false
| true
| 998
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_data.R
\name{load_cellSNP_vcf}
\alias{load_cellSNP_vcf}
\title{Load sparse matrices A and D from cellSNP VCF file with filtering SNPs}
\usage{
load_cellSNP_vcf(vcf_file, min_count = 0, min_MAF = 0,
max_other_allele = NULL, rowname_format = "full", keep_GL = FALSE)
}
\arguments{
\item{vcf_file}{character(1), path to VCF file generated from cellSNP}
\item{min_count}{minimum count across all cells, e.g., 20}
\item{min_MAF}{minimum minor allele fraction, e.g., 0.1}
\item{max_other_allele}{maximum ratio of other alleles comparing to REF and
ALT alleles; for cellSNP vcf, we recommend 0.05}
\item{rowname_format}{the format of rowname: NULL is the default from vcfR,
short is CHROM_POS, and full is CHROM_POS_REF_ALT}
\item{keep_GL}{logical(1), if TRUE, check if GL (genotype probability) exists
it will be returned}
}
\description{
Load sparse matrices A and D from cellSNP VCF file with filtering SNPs
}
|
/_gnvcrime/crime_spatial_eda.R
|
no_license
|
cbolch/stories
|
R
| false
| false
| 2,377
|
r
| ||
#!/usr/bin/env Rscript
#
# This file is part of the `OmnipathR` R package
#
# Copyright
# 2018-2021
# Saez Lab, Uniklinik RWTH Aachen, Heidelberg University
#
# File author(s): Alberto Valdeolivas
# Dénes Türei (turei.denes@gmail.com)
# Attila Gábor
#
# Distributed under the MIT (Expat) License.
# See accompanying file `LICENSE` or find a copy at
# https://directory.fsf.org/wiki/License:Expat
#
# Website: https://saezlab.github.io/omnipathr
# Git repo: https://github.com/saezlab/OmnipathR
#
#' Downloads ligand-receptor interactions from Ramilowski et al. 2015
#'
#' Curated ligand-receptor pairs from Supplementary Table 2 of the article
#' "A draft network of ligand-receptor mediated multicellular signaling in
#' human" (\url{https://www.nature.com/articles/ncomms8866}).
#'
#' @return A data frame (tibble) with interactions.
#'
#' @examples
#' rami_interactions <- ramilowski_download()
#' rami_interactions
#' # # A tibble: 2,557 x 16
#' # Pair.Name Ligand.Approved. Ligand.Name Receptor.Approv.
#' # <chr> <chr> <chr> <chr>
#' # 1 A2M_LRP1 A2M alpha-2-ma. LRP1
#' # 2 AANAT_MT. AANAT aralkylami. MTNR1A
#' # 3 AANAT_MT. AANAT aralkylami. MTNR1B
#' # 4 ACE_AGTR2 ACE angiotensi. AGTR2
#' # 5 ACE_BDKR. ACE angiotensi. BDKRB2
#' # # . with 2,547 more rows, and 12 more variables: Receptor.Name <chr>,
#' # # DLRP <chr>, HPMR <chr>, IUPHAR <chr>, HPRD <chr>,
#' # # STRING.binding <chr>, STRING.experiment <chr>, HPMR.Ligand <chr>,
#' # # HPMR.Receptor <chr>, PMID.Manual <chr>, Pair.Source <chr>,
#' # # Pair.Evidence <chr>
#'
#' @export
#' @importFrom magrittr %T>%
ramilowski_download <- function(){
xls_downloader(
url_key = 'ramilowski',
sheet = 'All.Pairs',
resource = 'Ramilowski et al. 2015'
) %T>%
load_success()
}
|
/R/ramilowski.R
|
permissive
|
kerwin12580/OmnipathR
|
R
| false
| false
| 1,927
|
r
|
#!/usr/bin/env Rscript
#
# This file is part of the `OmnipathR` R package
#
# Copyright
# 2018-2021
# Saez Lab, Uniklinik RWTH Aachen, Heidelberg University
#
# File author(s): Alberto Valdeolivas
# Dénes Türei (turei.denes@gmail.com)
# Attila Gábor
#
# Distributed under the MIT (Expat) License.
# See accompanying file `LICENSE` or find a copy at
# https://directory.fsf.org/wiki/License:Expat
#
# Website: https://saezlab.github.io/omnipathr
# Git repo: https://github.com/saezlab/OmnipathR
#
#' Downloads ligand-receptor interactions from Ramilowski et al. 2015
#'
#' Curated ligand-receptor pairs from Supplementary Table 2 of the article
#' "A draft network of ligand-receptor mediated multicellular signaling in
#' human" (\url{https://www.nature.com/articles/ncomms8866}).
#'
#' @return A data frame (tibble) with interactions.
#'
#' @examples
#' rami_interactions <- ramilowski_download()
#' rami_interactions
#' # # A tibble: 2,557 x 16
#' # Pair.Name Ligand.Approved. Ligand.Name Receptor.Approv.
#' # <chr> <chr> <chr> <chr>
#' # 1 A2M_LRP1 A2M alpha-2-ma. LRP1
#' # 2 AANAT_MT. AANAT aralkylami. MTNR1A
#' # 3 AANAT_MT. AANAT aralkylami. MTNR1B
#' # 4 ACE_AGTR2 ACE angiotensi. AGTR2
#' # 5 ACE_BDKR. ACE angiotensi. BDKRB2
#' # # . with 2,547 more rows, and 12 more variables: Receptor.Name <chr>,
#' # # DLRP <chr>, HPMR <chr>, IUPHAR <chr>, HPRD <chr>,
#' # # STRING.binding <chr>, STRING.experiment <chr>, HPMR.Ligand <chr>,
#' # # HPMR.Receptor <chr>, PMID.Manual <chr>, Pair.Source <chr>,
#' # # Pair.Evidence <chr>
#'
#' @export
#' @importFrom magrittr %T>%
ramilowski_download <- function(){
xls_downloader(
url_key = 'ramilowski',
sheet = 'All.Pairs',
resource = 'Ramilowski et al. 2015'
) %T>%
load_success()
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/attribution.r
\name{attribution}
\alias{attribution}
\title{Add an attribution label to a plot}
\usage{
attribution(text = "Lincoln Mullen <http://lincolnmullen.com>", size = 0.75,
color = "gray10")
}
\arguments{
\item{text}{label text}
\item{size}{size of the label text}
\item{color}{color of the label text}
}
\description{
This function adds an attribution label to a plot. You can customize the
text and appearance of the label. This function is adapted from Kieran
Healy's code.
}
\examples{
plot(faithful)
attribution()
}
|
/man/attribution.Rd
|
no_license
|
Deerluluolivia/mullenMisc
|
R
| false
| false
| 620
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/attribution.r
\name{attribution}
\alias{attribution}
\title{Add an attribution label to a plot}
\usage{
attribution(text = "Lincoln Mullen <http://lincolnmullen.com>", size = 0.75,
color = "gray10")
}
\arguments{
\item{text}{label text}
\item{size}{size of the label text}
\item{color}{color of the label text}
}
\description{
This function adds an attribution label to a plot. You can customize the
text and appearance of the label. This function is adapted from Kieran
Healy's code.
}
\examples{
plot(faithful)
attribution()
}
|
library("data.table")
setwd("~/Documents/DataScienceCoursera/Exploratory_Data_Analysis/project/data")
powerDT <- data.table::fread(input = "household_power_consumption.txt"
, na.strings="?"
)
powerDT[, Global_active_power := lapply(.SD, as.numeric), .SDcols = c("Global_active_power")]
powerDT[, Date := lapply(.SD, as.Date, "%d/%m/%Y"), .SDcols = c("Date")]
powerDT <- powerDT[(Date >= "2007-02-01") & (Date <= "2007-02-02")]
png("image1.png", width=480, height=480)
hist(powerDT[, Global_active_power], main="Global Active Power",
xlab="Global Active Power (kilowatts)", ylab="Frequency", col="Red")
dev.off()
|
/plot1.r
|
no_license
|
LinaBrand/Exploratory-Data-Analysis---Project
|
R
| false
| false
| 702
|
r
|
library("data.table")
setwd("~/Documents/DataScienceCoursera/Exploratory_Data_Analysis/project/data")
powerDT <- data.table::fread(input = "household_power_consumption.txt"
, na.strings="?"
)
powerDT[, Global_active_power := lapply(.SD, as.numeric), .SDcols = c("Global_active_power")]
powerDT[, Date := lapply(.SD, as.Date, "%d/%m/%Y"), .SDcols = c("Date")]
powerDT <- powerDT[(Date >= "2007-02-01") & (Date <= "2007-02-02")]
png("image1.png", width=480, height=480)
hist(powerDT[, Global_active_power], main="Global Active Power",
xlab="Global Active Power (kilowatts)", ylab="Frequency", col="Red")
dev.off()
|
#!/usr/bin/env Rscript
# Preprocess COMPAS data for experiments
# Preprocess Compas data ----------------------------------------------------------
compas_df <- read_csv(COMPAS_SCORE_PATH)
test_df <- read_csv(VIGN_PATH) %>%
rename(degree = `charge_degree (misd/fel)`)
# Write violent recidivism data
test_df %>%
select(-outcome) %>%
left_join(compas_df %>% select(id, outcome = is_violent_recid), by = "id") %>%
write_csv(VIOLENT_VIGN_PATH)
# Write fulldata rds
read_csv(BROWARD_CLEAN_PATH) %>%
rename(degree = `charge_degree (misd/fel)`) %>%
left_join(
compas_df %>%
select(
-race,
-sex,
-age,-juv_fel_count,
-juv_misd_count,
-priors_count
),
by = "id"
) %>%
select(-decile_score_1) %>%
mutate(fold = if_else(id %in% test_df$id, "test", "train")) %>%
write_rds(DF_FULL_RDS)
# Preprocess LSI data ----------------------------------------------------------
# Check if raw LSI files exists
all_raw_lsi_files_exist <- all(unlist(map(TARGET_SITES,
~ file.exists(
file.path(
PRIVATE_DATA_DIR,
paste0("individuals/lsi_vignettes_site_",
.x, ".csv")
)
))))
# if raw LSI files exists, pre-process the data
if (all_raw_lsi_files_exist) {
# Read data
lsi_full_df <- map_dfr(
TARGET_SITES,
~
read_csv(file.path(
PRIVATE_DATA_DIR,
paste0("individuals/lsi_vignettes_site_", .x, ".csv")
)) %>%
mutate(
ch_cat = cut(ch, c(-Inf, 1, 3, Inf), LMH_LABELS),
ee_cat = cut(ee, c(-Inf, 1, 3, Inf), LMH_LABELS),
fin_cat = cut(fin, c(-Inf, 0, 1, Inf), LMH_LABELS),
fam_cat = cut(fam, c(-Inf, 0, 1, Inf), LMH_LABELS),
acc_cat = cut(acc, c(-Inf, 0, 1, Inf), LMH_LABELS),
leisure_cat = cut(leisure, c(-Inf, 0, 1, Inf), LMH_LABELS),
peers_cat = cut(peers, c(-Inf, 1, 2, Inf), LMH_LABELS),
drugs_cat = cut(drugs, c(-Inf, 0, 2, Inf), LMH_LABELS),
mh_cat = cut(mh, c(-Inf, 0, 1, Inf), LMH_LABELS),
cog_cat = cut(cog, c(-Inf, 0, 1, Inf), LMH_LABELS)
) %>%
select(-hispanic) # un-used missing column
)
# Clean data and compute estimates
complete_rows <- lsi_full_df %>%
select(recid, age, male, ends_with("_cat")) %>%
complete.cases
lsi_full_df <- lsi_full_df %>%
filter(complete_rows)
write_rds(lsi_full_df, LSI_ALL_RDS)
} else {
message("No LSI raw data, skipping creating LSI_ALL_RDS...")
}
|
/src/preprocess.R
|
no_license
|
jballesterosc/recidivism-predictions
|
R
| false
| false
| 2,729
|
r
|
#!/usr/bin/env Rscript
# Preprocess COMPAS data for experiments
# Preprocess Compas data ----------------------------------------------------------
compas_df <- read_csv(COMPAS_SCORE_PATH)
test_df <- read_csv(VIGN_PATH) %>%
rename(degree = `charge_degree (misd/fel)`)
# Write violent recidivism data
test_df %>%
select(-outcome) %>%
left_join(compas_df %>% select(id, outcome = is_violent_recid), by = "id") %>%
write_csv(VIOLENT_VIGN_PATH)
# Write fulldata rds
read_csv(BROWARD_CLEAN_PATH) %>%
rename(degree = `charge_degree (misd/fel)`) %>%
left_join(
compas_df %>%
select(
-race,
-sex,
-age,-juv_fel_count,
-juv_misd_count,
-priors_count
),
by = "id"
) %>%
select(-decile_score_1) %>%
mutate(fold = if_else(id %in% test_df$id, "test", "train")) %>%
write_rds(DF_FULL_RDS)
# Preprocess LSI data ----------------------------------------------------------
# Check if raw LSI files exists
all_raw_lsi_files_exist <- all(unlist(map(TARGET_SITES,
~ file.exists(
file.path(
PRIVATE_DATA_DIR,
paste0("individuals/lsi_vignettes_site_",
.x, ".csv")
)
))))
# if raw LSI files exists, pre-process the data
if (all_raw_lsi_files_exist) {
# Read data
lsi_full_df <- map_dfr(
TARGET_SITES,
~
read_csv(file.path(
PRIVATE_DATA_DIR,
paste0("individuals/lsi_vignettes_site_", .x, ".csv")
)) %>%
mutate(
ch_cat = cut(ch, c(-Inf, 1, 3, Inf), LMH_LABELS),
ee_cat = cut(ee, c(-Inf, 1, 3, Inf), LMH_LABELS),
fin_cat = cut(fin, c(-Inf, 0, 1, Inf), LMH_LABELS),
fam_cat = cut(fam, c(-Inf, 0, 1, Inf), LMH_LABELS),
acc_cat = cut(acc, c(-Inf, 0, 1, Inf), LMH_LABELS),
leisure_cat = cut(leisure, c(-Inf, 0, 1, Inf), LMH_LABELS),
peers_cat = cut(peers, c(-Inf, 1, 2, Inf), LMH_LABELS),
drugs_cat = cut(drugs, c(-Inf, 0, 2, Inf), LMH_LABELS),
mh_cat = cut(mh, c(-Inf, 0, 1, Inf), LMH_LABELS),
cog_cat = cut(cog, c(-Inf, 0, 1, Inf), LMH_LABELS)
) %>%
select(-hispanic) # un-used missing column
)
# Clean data and compute estimates
complete_rows <- lsi_full_df %>%
select(recid, age, male, ends_with("_cat")) %>%
complete.cases
lsi_full_df <- lsi_full_df %>%
filter(complete_rows)
write_rds(lsi_full_df, LSI_ALL_RDS)
} else {
message("No LSI raw data, skipping creating LSI_ALL_RDS...")
}
|
#Author: Mallory Barnes
#Date: 08/13/2016
#Input: QA/QC-ed A/CI Curves
#Output: Data frame with the outputs of fit_ACI from plant_ecophys
#Necessary Pacakges:
library(devtools)
library(PEcAn.photosynthesis)
library(plantecophys)
#Purpose: fitting A/Ci curves to files using Plant_Ecophys.
#Then create summary df for work with stats
#Load up saved datafile
dat <- read.csv("C:/Users/Mallory/Dropbox/QC_3_5_2017.csv")
#Format dat$fname as factor
dat$fname <- as.factor(dat$fname)
#Originally did it the hard way until I found "fitacis":
#by(dat, dat$fname, fitaci)
#Fit curves to all A/Ci files
fits <- fitacis(dat, "fname")
## Trying to figure out 'Photosyn' to find Ci/Ca ratio##
plot(Photosyn(VPD = 1.5, Ca = 400, PPFD = 1500, Tleaf = 25, Patm = 100,
RH = NULL, gsmodel = c("BBOpti", "BBLeuning", "BallBerry"), g1 = 4,
g0 = 0, gk = 0.5, vpdmin = 0.5, D0 = 5, GS = NULL, alpha = 0.24,
theta = 0.85, Jmax = 100, Vcmax = 50, gmeso = NULL, TPU = 1000,
Rd0 = 0.92, Q10 = 1.92, Rd = NULL, TrefR = 25, Rdayfrac = 1,
EaV = 82620.87, EdVC = 0, delsC = 645.1013, EaJ = 39676.89,
EdVJ = 2e+05, delsJ = 641.3615, GammaStar = NULL, Km = NULL,
Ci = NULL, Tcorrect = TRUE, returnParsOnly = FALSE, whichA = c("Ah",
"Amin", "Ac", "Aj")))
#Plot Vcmax by Jmax
with(coef(fits), plot(Vcmax, Jmax))
#Extract 1 curve:
fits[[1]]
plot(fits[[1]])
#Plot all curves separately
plot(fits)
#Plot all curves in `1 plot
plot(fits, how="oneplot")
#Can summarize elements using sapply
rmses <- sapply(fits, "[[", "RMSE")
#Plot worst fitting curve
plot(fits[[which.max(rmses)]])
#This is what we want for analysis:
vcmax_jmax <- coef(fits)
#Parse filname to get Plant ID and Date using substr
vcmax_jmax$fname <- as.character(vcmax_jmax$fname)
#vcmax_jmax$PlantID -> substr(as.character(vcmax_jmax$fname),8,10)
#substr(vcmax_jmax$fname,8,10)
str(vcmax_jmax)
write.csv(vcmax_jmax, "Estimates_3_5_2017.csv")
plot(vcmax_jmax$Jmax, vcmax_jmax$Jmax_SE)
to_review <- subset(vcmax_jmax, Jmax_SE >5)
|
/04_Fit_Model.R
|
no_license
|
GRSEB9S/Drought_Expt_2016
|
R
| false
| false
| 2,182
|
r
|
#Author: Mallory Barnes
#Date: 08/13/2016
#Input: QA/QC-ed A/CI Curves
#Output: Data frame with the outputs of fit_ACI from plant_ecophys
#Necessary Pacakges:
library(devtools)
library(PEcAn.photosynthesis)
library(plantecophys)
#Purpose: fitting A/Ci curves to files using Plant_Ecophys.
#Then create summary df for work with stats
#Load up saved datafile
dat <- read.csv("C:/Users/Mallory/Dropbox/QC_3_5_2017.csv")
#Format dat$fname as factor
dat$fname <- as.factor(dat$fname)
#Originally did it the hard way until I found "fitacis":
#by(dat, dat$fname, fitaci)
#Fit curves to all A/Ci files
fits <- fitacis(dat, "fname")
## Trying to figure out 'Photosyn' to find Ci/Ca ratio##
plot(Photosyn(VPD = 1.5, Ca = 400, PPFD = 1500, Tleaf = 25, Patm = 100,
RH = NULL, gsmodel = c("BBOpti", "BBLeuning", "BallBerry"), g1 = 4,
g0 = 0, gk = 0.5, vpdmin = 0.5, D0 = 5, GS = NULL, alpha = 0.24,
theta = 0.85, Jmax = 100, Vcmax = 50, gmeso = NULL, TPU = 1000,
Rd0 = 0.92, Q10 = 1.92, Rd = NULL, TrefR = 25, Rdayfrac = 1,
EaV = 82620.87, EdVC = 0, delsC = 645.1013, EaJ = 39676.89,
EdVJ = 2e+05, delsJ = 641.3615, GammaStar = NULL, Km = NULL,
Ci = NULL, Tcorrect = TRUE, returnParsOnly = FALSE, whichA = c("Ah",
"Amin", "Ac", "Aj")))
#Plot Vcmax by Jmax
with(coef(fits), plot(Vcmax, Jmax))
#Extract 1 curve:
fits[[1]]
plot(fits[[1]])
#Plot all curves separately
plot(fits)
#Plot all curves in `1 plot
plot(fits, how="oneplot")
#Can summarize elements using sapply
rmses <- sapply(fits, "[[", "RMSE")
#Plot worst fitting curve
plot(fits[[which.max(rmses)]])
#This is what we want for analysis:
vcmax_jmax <- coef(fits)
#Parse filname to get Plant ID and Date using substr
vcmax_jmax$fname <- as.character(vcmax_jmax$fname)
#vcmax_jmax$PlantID -> substr(as.character(vcmax_jmax$fname),8,10)
#substr(vcmax_jmax$fname,8,10)
str(vcmax_jmax)
write.csv(vcmax_jmax, "Estimates_3_5_2017.csv")
plot(vcmax_jmax$Jmax, vcmax_jmax$Jmax_SE)
to_review <- subset(vcmax_jmax, Jmax_SE >5)
|
#
# Author: Casey, Jason P.
# Create Date: 15MAY2015
# Description: Reader script for IPEDS Completions, AY2009-2010
# Notes:
# CIP codes are 2010 taxonomy.
# Column classes had to be explicity defined because of periods (.) for missing values.
#
# Load necessary add-ins
library(reshape2)
library(plyr)
# infile and outfile are set to the input and output file paths
infile <- "raw-data/completions/c2010_a.csv"
outfile <- "output-data/completions/c_2010_dat.csv"
# Assign academic year
year <- 2010L
# Read the data
dat <- read.table(file=infile,
header=TRUE,
sep=",",
quote="\"",
skip=0,
colClasses = c('integer',
'character',
'integer',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer'),
row.names=NULL,
stringsAsFactors=FALSE,
fileEncoding="utf-8")
# Cleanup
rm(infile)
# Convert names to lower case for consistency
names(dat) <- tolower(names(dat))
# Assing DataYear value
dat$DataYear <- year
dat <- subset(dat, nchar(cipcode)==7)
dat$cipcode <- as.numeric(dat$cipcode)
# Rename fields to proper nomenclature
names(dat)[names(dat) == 'unitid'] <- c('Unitid')
names(dat)[names(dat) == 'cipcode'] <- c('Cip')
names(dat)[names(dat) == 'majornum'] <- c('MajorNumber')
names(dat)[names(dat) == 'awlevel'] <- c('AwardLevel')
# Subset the data, including only needed variables
dat <- subset(dat,select=c("Unitid",
"DataYear",
"Cip",
"MajorNumber",
"AwardLevel",
"dvcaim",
"dvcaiw",
"dvcapm",
"dvcapw",
"dvcbkm",
"dvcbkw",
"dvchsm",
"dvchsw",
"dvcwhm",
"dvcwhw",
"c2morm",
"c2morw",
"cunknm",
"cunknw",
"cnralm",
"cnralw"))
# Convert from wide to long data format
c <- melt(dat, id.vars=c('Unitid','DataYear','Cip','MajorNumber','AwardLevel'), variable.name='Variable',value.name='Awards')
# Cleanup
rm(dat)
# Assign ND Race/Ethnicity codes to RaceEthnicity factor
# N - Nonresident Alien
# A - American Indian or Alaska Native
# B - Black or African-American
# O - Asian
# S - Hispanic
# P - Native Hawaiian or Other Pacific Islander
# C - White
# T - Two or More Races
# U - Unknown
c$RaceEthnicity <- factor(mapvalues(c$Variable,
c("dvcaim",
"dvcaiw",
"dvcapm",
"dvcapw",
"dvcbkm",
"dvcbkw",
"dvchsm",
"dvchsw",
"dvcwhm",
"dvcwhw",
"c2morm",
"c2morw",
"cunknm",
"cunknw",
"cnralm",
"cnralw"),
c("A",
"A",
"O",
"O",
"B",
"B",
"S",
"S",
"C",
"C",
"T",
"T",
"U",
"U",
"N",
"N")))
# Assign ND Gender codes to Sex factor
# F - Female (or Women)
# M - Male (Or Men)
c$Sex <- factor(mapvalues(c$Variable,
c("dvcaim",
"dvcaiw",
"dvcapm",
"dvcapw",
"dvcbkm",
"dvcbkw",
"dvchsm",
"dvchsw",
"dvcwhm",
"dvcwhw",
"c2morm",
"c2morw",
"cunknm",
"cunknw",
"cnralm",
"cnralw"),
c("M",
"F",
"M",
"F",
"M",
"F",
"M",
"F",
"M",
"F",
"M",
"F",
"M",
"F",
"M",
"F")))
# Remove empty fields and totals
c <- subset(c, Awards > 0 & Cip < 99, select=c("Unitid","DataYear","Cip","MajorNumber","AwardLevel","RaceEthnicity","Sex", "Awards"))
# Write the outfile. Empty fields are left null
write.table(c,
file=outfile,
append=FALSE,
quote=TRUE,
sep=",",
row.names=FALSE, # If you want to create row numbers, set this to TRUE
col.names=TRUE,
na = "") # Set the missing values to blanks
# Cleanup memory
rm(outfile)
rm(c)
|
/r-scripts/completions/c_2010.R
|
permissive
|
jasonpcasey/chedb
|
R
| false
| false
| 11,626
|
r
|
#
# Author: Casey, Jason P.
# Create Date: 15MAY2015
# Description: Reader script for IPEDS Completions, AY2009-2010
# Notes:
# CIP codes are 2010 taxonomy.
# Column classes had to be explicity defined because of periods (.) for missing values.
#
# Load necessary add-ins
library(reshape2)
library(plyr)
# infile and outfile are set to the input and output file paths
infile <- "raw-data/completions/c2010_a.csv"
outfile <- "output-data/completions/c_2010_dat.csv"
# Assign academic year
year <- 2010L
# Read the data
dat <- read.table(file=infile,
header=TRUE,
sep=",",
quote="\"",
skip=0,
colClasses = c('integer',
'character',
'integer',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer',
'character',
'integer'),
row.names=NULL,
stringsAsFactors=FALSE,
fileEncoding="utf-8")
# Cleanup
rm(infile)
# Convert names to lower case for consistency
names(dat) <- tolower(names(dat))
# Assing DataYear value
dat$DataYear <- year
dat <- subset(dat, nchar(cipcode)==7)
dat$cipcode <- as.numeric(dat$cipcode)
# Rename fields to proper nomenclature
names(dat)[names(dat) == 'unitid'] <- c('Unitid')
names(dat)[names(dat) == 'cipcode'] <- c('Cip')
names(dat)[names(dat) == 'majornum'] <- c('MajorNumber')
names(dat)[names(dat) == 'awlevel'] <- c('AwardLevel')
# Subset the data, including only needed variables
dat <- subset(dat,select=c("Unitid",
"DataYear",
"Cip",
"MajorNumber",
"AwardLevel",
"dvcaim",
"dvcaiw",
"dvcapm",
"dvcapw",
"dvcbkm",
"dvcbkw",
"dvchsm",
"dvchsw",
"dvcwhm",
"dvcwhw",
"c2morm",
"c2morw",
"cunknm",
"cunknw",
"cnralm",
"cnralw"))
# Convert from wide to long data format
c <- melt(dat, id.vars=c('Unitid','DataYear','Cip','MajorNumber','AwardLevel'), variable.name='Variable',value.name='Awards')
# Cleanup
rm(dat)
# Assign ND Race/Ethnicity codes to RaceEthnicity factor
# N - Nonresident Alien
# A - American Indian or Alaska Native
# B - Black or African-American
# O - Asian
# S - Hispanic
# P - Native Hawaiian or Other Pacific Islander
# C - White
# T - Two or More Races
# U - Unknown
c$RaceEthnicity <- factor(mapvalues(c$Variable,
c("dvcaim",
"dvcaiw",
"dvcapm",
"dvcapw",
"dvcbkm",
"dvcbkw",
"dvchsm",
"dvchsw",
"dvcwhm",
"dvcwhw",
"c2morm",
"c2morw",
"cunknm",
"cunknw",
"cnralm",
"cnralw"),
c("A",
"A",
"O",
"O",
"B",
"B",
"S",
"S",
"C",
"C",
"T",
"T",
"U",
"U",
"N",
"N")))
# Assign ND Gender codes to Sex factor
# F - Female (or Women)
# M - Male (Or Men)
c$Sex <- factor(mapvalues(c$Variable,
c("dvcaim",
"dvcaiw",
"dvcapm",
"dvcapw",
"dvcbkm",
"dvcbkw",
"dvchsm",
"dvchsw",
"dvcwhm",
"dvcwhw",
"c2morm",
"c2morw",
"cunknm",
"cunknw",
"cnralm",
"cnralw"),
c("M",
"F",
"M",
"F",
"M",
"F",
"M",
"F",
"M",
"F",
"M",
"F",
"M",
"F",
"M",
"F")))
# Remove empty fields and totals
c <- subset(c, Awards > 0 & Cip < 99, select=c("Unitid","DataYear","Cip","MajorNumber","AwardLevel","RaceEthnicity","Sex", "Awards"))
# Write the outfile. Empty fields are left null
write.table(c,
file=outfile,
append=FALSE,
quote=TRUE,
sep=",",
row.names=FALSE, # If you want to create row numbers, set this to TRUE
col.names=TRUE,
na = "") # Set the missing values to blanks
# Cleanup memory
rm(outfile)
rm(c)
|
[
{
"title": "Collinearity and stepwise VIF selection",
"href": "https://beckmw.wordpress.com/2013/02/05/collinearity-and-stepwise-vif-selection/"
},
{
"title": "Open data and ecological fallacy",
"href": "http://blog.free.fr/"
},
{
"title": "R is a cool image editor!",
"href": "http://statistic-on-air.blogspot.com/2010/11/r-is-cool-image-editor.html"
},
{
"title": "Confident package releases in R with crant",
"href": "https://cartesianfaith.com/2012/11/29/confident-package-releases-in-r-with-crant/"
},
{
"title": "Ebola in the Congo",
"href": "http://www.scipirate.com/"
},
{
"title": "National identification number: Finland part2",
"href": "http://xrgb.blogspot.com/2013/01/national-identification-number-finland.html"
},
{
"title": "Seven quick facts about R",
"href": "http://blog.revolutionanalytics.com/2014/04/seven-quick-facts-about-r.html"
},
{
"title": "Mapped: British, Spanish and Dutch Shipping 1750-1800",
"href": "http://spatial.ly/2012/03/mapped-british-shipping-1750-1800/"
},
{
"title": "Trigonometric Pattern Design",
"href": "https://aschinchon.wordpress.com/2015/07/01/trigonometric-pattern-design/"
},
{
"title": "Seattle histogram",
"href": "https://xianblog.wordpress.com/2015/08/16/seattle-histogram/"
},
{
"title": "Initialize yourself",
"href": "https://web.archive.org/web/http://anotherrblog.blogspot.com/2013/01/initialize-yourself.html"
},
{
"title": "Revolution R renamed Microsoft R, available free to developers and students",
"href": "http://blog.revolutionanalytics.com/2016/01/microsoft-r-open.html"
},
{
"title": "Descriptive Analytics-Part 1: Data Formatting Exercises",
"href": "http://r-exercises.com/2016/10/26/descriptive-analytics-part-1-data-formatting/"
},
{
"title": "sixty two-minute r twotorials now available",
"href": "http://www.twotorials.com/2012/04/sixty-two-minute-r-twotorials-now.html"
},
{
"title": "Import Japanese equity data into R with quantmod 0.4-4",
"href": "http://blog.fosstrading.com/2015/03/import-japanese-equity-data-into-r.html"
},
{
"title": "DailyMeteo.org – 2014 Conference",
"href": "http://www.milanor.net/blog/dailymeteo-org-2014-conference/"
},
{
"title": "How to run R in the cloud (for teaching)",
"href": "https://web.archive.org/web/http://blog.datacamp.com/2013/07/23/how-to-run-r-in-the-cloud-for-teaching/"
},
{
"title": "Air Pollution (PM10 and PM2.5) in Different Cities using Interactive Charts",
"href": "http://www.analyticsandvisualization.com/2015/06/air-pollution-pm10-and-pm25-in.html"
},
{
"title": "Some Applications of Item Response Theory in R",
"href": "http://joelcadwell.blogspot.com/2015/01/some-applications-of-item-response.html"
},
{
"title": "Scraping Web Pages With R",
"href": "https://blog.ouseful.info/2015/04/15/scraping-web-pages-with-r/"
},
{
"title": "Conditional Colors and Shapes in plot() with ifelse()",
"href": "http://is-r.tumblr.com/post/33223979190/conditional-colors-and-shapes-in-plot-with"
},
{
"title": "Knowing whether a time-series has been differenced appropriately in order to make it stationary",
"href": "http://costaleconomist.blogspot.com/2010/05/knowing-whether-time-series-has-been.html"
},
{
"title": "Reproducible research is still a challenge",
"href": "http://ropensci.org/blog/2014/06/09/reproducibility/"
},
{
"title": "The Euler Method In R",
"href": "http://www.theresearchkitchen.com/archives/679"
},
{
"title": "What ‘The power of R’ is saying",
"href": "http://using-r-project.blogspot.com/2009/10/what-power-of-r-is-saying.html"
},
{
"title": "Data Mining the California Solar Statistics with R: Part IV",
"href": "http://www.beyondmaxwell.com/?p=217"
},
{
"title": "\"RStudio:Get Started\" Screencasts",
"href": "http://www.r-chart.com/2015/01/rstudioget-started-screencasts.html"
},
{
"title": "The guessing game in R (with a twist, of course)",
"href": "http://www.statisticsblog.com/2010/05/the-guessing-game-in-r-with-a-twist-of-course/"
},
{
"title": "Surveys continue to rank R #1 for Data Mining",
"href": "http://blog.revolutionanalytics.com/2012/08/r-language-popularity-for-data-mining.html"
},
{
"title": "System Testing",
"href": "http://www.copula.de/2013/02/system-testing.html"
},
{
"title": "May 30: 5th MilanoR meeting",
"href": "http://www.milanor.net/blog/may-30-5th-milanor-meeting/"
},
{
"title": "upsetplot in ChIPseeker",
"href": "https://web.archive.org/web/http://ygc.name/2015/07/28/upsetplot-in-chipseeker/"
},
{
"title": "LondonR recap",
"href": "https://feedproxy.google.com/~r/PortfolioProbeRLanguage/~3/SyjpfqhQam4/"
},
{
"title": "Spatial Critter Swarming Simulation",
"href": "http://www.econometricsbysimulation.com/2013/05/spatial-critter-swarming-simulation.html"
},
{
"title": "useR 2014 Slides for PSAboot and Version 1.1 on CRAN",
"href": "http://jason.bryer.org/posts/2014-07-03/useR_2014_PSAboot_Slides.html"
},
{
"title": "Deploying a car price model using R and AzureML",
"href": "https://longhowlam.wordpress.com/2015/08/20/deploying-a-car-price-model-using-r-and-azureml/"
},
{
"title": "An application of aggregate() and merge()",
"href": "https://feedproxy.google.com/~r/CoffeeAndEconometricsInTheMorning/~3/lxkmLjcDxdM/application-of-aggregate-and-merge.html"
},
{
"title": "Diabetes Drug Switching: Why do people switch from one Drug to another?",
"href": "http://scweiss.blogspot.com/2016/04/diabetes-drug-switching-why-do-people.html"
},
{
"title": "How to create random binary matrices in R",
"href": "http://blog.revolutionanalytics.com/2012/09/how-to-create-random-binary-matrices-in-r.html"
},
{
"title": "Principal Components Regression, Pt. 2: Y-Aware Methods",
"href": "http://www.win-vector.com/blog/2016/05/pcr_part2_yaware/"
},
{
"title": "embed images in ggplot2 via subview and annotate a phylogenetic tree with images using inset function",
"href": "http://guangchuangyu.github.io/2016/03/embed-images-in-ggplot2-via-subview-and-annotate-a-phylogenetic-tree-with-images-using-inset-function/"
},
{
"title": "How the New York Times uses R for Data Visualization",
"href": "http://blog.revolutionanalytics.com/2011/03/how-the-new-york-times-uses-r-for-data-visualization.html"
},
{
"title": "Data analysis approaches to modeling changes in primary metabolism",
"href": "https://imdevsoftware.wordpress.com/2013/02/01/data-analysis-approaches-to-modeling-changes-in-primary-metabolism/"
},
{
"title": "R Workflow",
"href": "http://quantitativeecology.blogspot.com/2010/12/r-workflow.html"
},
{
"title": "Opel Corsa Diesel Usage",
"href": "http://wiekvoet.blogspot.com/2013/06/opel-corsa-diesel-usage.html"
},
{
"title": "CRAN might get tenure at Yale?",
"href": "https://web.archive.org/web/http://jackman.stanford.edu/blog/?p=2549"
},
{
"title": "R: single plot with two different y-axes",
"href": "http://www.gettinggeneticsdone.com/2015/04/r-single-plot-with-two-different-y-axes.html"
},
{
"title": "Examples on Clustering with R",
"href": "https://rdatamining.wordpress.com/2011/08/26/examples-on-clustering-with-r/"
},
{
"title": "Basket Option Pricing: Step by Step",
"href": "https://web.archive.org/web/http://stotastic.com/wordpress/2010/05/basket-option-pricing/"
},
{
"title": "A Stata HTML syntax highlighter in R",
"href": "http://www.econometricsbysimulation.com/2013/08/a-stata-html-formatter-in-r.html"
}
]
|
/json/264.r
|
no_license
|
rweekly/rweekly.org
|
R
| false
| false
| 7,849
|
r
|
[
{
"title": "Collinearity and stepwise VIF selection",
"href": "https://beckmw.wordpress.com/2013/02/05/collinearity-and-stepwise-vif-selection/"
},
{
"title": "Open data and ecological fallacy",
"href": "http://blog.free.fr/"
},
{
"title": "R is a cool image editor!",
"href": "http://statistic-on-air.blogspot.com/2010/11/r-is-cool-image-editor.html"
},
{
"title": "Confident package releases in R with crant",
"href": "https://cartesianfaith.com/2012/11/29/confident-package-releases-in-r-with-crant/"
},
{
"title": "Ebola in the Congo",
"href": "http://www.scipirate.com/"
},
{
"title": "National identification number: Finland part2",
"href": "http://xrgb.blogspot.com/2013/01/national-identification-number-finland.html"
},
{
"title": "Seven quick facts about R",
"href": "http://blog.revolutionanalytics.com/2014/04/seven-quick-facts-about-r.html"
},
{
"title": "Mapped: British, Spanish and Dutch Shipping 1750-1800",
"href": "http://spatial.ly/2012/03/mapped-british-shipping-1750-1800/"
},
{
"title": "Trigonometric Pattern Design",
"href": "https://aschinchon.wordpress.com/2015/07/01/trigonometric-pattern-design/"
},
{
"title": "Seattle histogram",
"href": "https://xianblog.wordpress.com/2015/08/16/seattle-histogram/"
},
{
"title": "Initialize yourself",
"href": "https://web.archive.org/web/http://anotherrblog.blogspot.com/2013/01/initialize-yourself.html"
},
{
"title": "Revolution R renamed Microsoft R, available free to developers and students",
"href": "http://blog.revolutionanalytics.com/2016/01/microsoft-r-open.html"
},
{
"title": "Descriptive Analytics-Part 1: Data Formatting Exercises",
"href": "http://r-exercises.com/2016/10/26/descriptive-analytics-part-1-data-formatting/"
},
{
"title": "sixty two-minute r twotorials now available",
"href": "http://www.twotorials.com/2012/04/sixty-two-minute-r-twotorials-now.html"
},
{
"title": "Import Japanese equity data into R with quantmod 0.4-4",
"href": "http://blog.fosstrading.com/2015/03/import-japanese-equity-data-into-r.html"
},
{
"title": "DailyMeteo.org – 2014 Conference",
"href": "http://www.milanor.net/blog/dailymeteo-org-2014-conference/"
},
{
"title": "How to run R in the cloud (for teaching)",
"href": "https://web.archive.org/web/http://blog.datacamp.com/2013/07/23/how-to-run-r-in-the-cloud-for-teaching/"
},
{
"title": "Air Pollution (PM10 and PM2.5) in Different Cities using Interactive Charts",
"href": "http://www.analyticsandvisualization.com/2015/06/air-pollution-pm10-and-pm25-in.html"
},
{
"title": "Some Applications of Item Response Theory in R",
"href": "http://joelcadwell.blogspot.com/2015/01/some-applications-of-item-response.html"
},
{
"title": "Scraping Web Pages With R",
"href": "https://blog.ouseful.info/2015/04/15/scraping-web-pages-with-r/"
},
{
"title": "Conditional Colors and Shapes in plot() with ifelse()",
"href": "http://is-r.tumblr.com/post/33223979190/conditional-colors-and-shapes-in-plot-with"
},
{
"title": "Knowing whether a time-series has been differenced appropriately in order to make it stationary",
"href": "http://costaleconomist.blogspot.com/2010/05/knowing-whether-time-series-has-been.html"
},
{
"title": "Reproducible research is still a challenge",
"href": "http://ropensci.org/blog/2014/06/09/reproducibility/"
},
{
"title": "The Euler Method In R",
"href": "http://www.theresearchkitchen.com/archives/679"
},
{
"title": "What ‘The power of R’ is saying",
"href": "http://using-r-project.blogspot.com/2009/10/what-power-of-r-is-saying.html"
},
{
"title": "Data Mining the California Solar Statistics with R: Part IV",
"href": "http://www.beyondmaxwell.com/?p=217"
},
{
"title": "\"RStudio:Get Started\" Screencasts",
"href": "http://www.r-chart.com/2015/01/rstudioget-started-screencasts.html"
},
{
"title": "The guessing game in R (with a twist, of course)",
"href": "http://www.statisticsblog.com/2010/05/the-guessing-game-in-r-with-a-twist-of-course/"
},
{
"title": "Surveys continue to rank R #1 for Data Mining",
"href": "http://blog.revolutionanalytics.com/2012/08/r-language-popularity-for-data-mining.html"
},
{
"title": "System Testing",
"href": "http://www.copula.de/2013/02/system-testing.html"
},
{
"title": "May 30: 5th MilanoR meeting",
"href": "http://www.milanor.net/blog/may-30-5th-milanor-meeting/"
},
{
"title": "upsetplot in ChIPseeker",
"href": "https://web.archive.org/web/http://ygc.name/2015/07/28/upsetplot-in-chipseeker/"
},
{
"title": "LondonR recap",
"href": "https://feedproxy.google.com/~r/PortfolioProbeRLanguage/~3/SyjpfqhQam4/"
},
{
"title": "Spatial Critter Swarming Simulation",
"href": "http://www.econometricsbysimulation.com/2013/05/spatial-critter-swarming-simulation.html"
},
{
"title": "useR 2014 Slides for PSAboot and Version 1.1 on CRAN",
"href": "http://jason.bryer.org/posts/2014-07-03/useR_2014_PSAboot_Slides.html"
},
{
"title": "Deploying a car price model using R and AzureML",
"href": "https://longhowlam.wordpress.com/2015/08/20/deploying-a-car-price-model-using-r-and-azureml/"
},
{
"title": "An application of aggregate() and merge()",
"href": "https://feedproxy.google.com/~r/CoffeeAndEconometricsInTheMorning/~3/lxkmLjcDxdM/application-of-aggregate-and-merge.html"
},
{
"title": "Diabetes Drug Switching: Why do people switch from one Drug to another?",
"href": "http://scweiss.blogspot.com/2016/04/diabetes-drug-switching-why-do-people.html"
},
{
"title": "How to create random binary matrices in R",
"href": "http://blog.revolutionanalytics.com/2012/09/how-to-create-random-binary-matrices-in-r.html"
},
{
"title": "Principal Components Regression, Pt. 2: Y-Aware Methods",
"href": "http://www.win-vector.com/blog/2016/05/pcr_part2_yaware/"
},
{
"title": "embed images in ggplot2 via subview and annotate a phylogenetic tree with images using inset function",
"href": "http://guangchuangyu.github.io/2016/03/embed-images-in-ggplot2-via-subview-and-annotate-a-phylogenetic-tree-with-images-using-inset-function/"
},
{
"title": "How the New York Times uses R for Data Visualization",
"href": "http://blog.revolutionanalytics.com/2011/03/how-the-new-york-times-uses-r-for-data-visualization.html"
},
{
"title": "Data analysis approaches to modeling changes in primary metabolism",
"href": "https://imdevsoftware.wordpress.com/2013/02/01/data-analysis-approaches-to-modeling-changes-in-primary-metabolism/"
},
{
"title": "R Workflow",
"href": "http://quantitativeecology.blogspot.com/2010/12/r-workflow.html"
},
{
"title": "Opel Corsa Diesel Usage",
"href": "http://wiekvoet.blogspot.com/2013/06/opel-corsa-diesel-usage.html"
},
{
"title": "CRAN might get tenure at Yale?",
"href": "https://web.archive.org/web/http://jackman.stanford.edu/blog/?p=2549"
},
{
"title": "R: single plot with two different y-axes",
"href": "http://www.gettinggeneticsdone.com/2015/04/r-single-plot-with-two-different-y-axes.html"
},
{
"title": "Examples on Clustering with R",
"href": "https://rdatamining.wordpress.com/2011/08/26/examples-on-clustering-with-r/"
},
{
"title": "Basket Option Pricing: Step by Step",
"href": "https://web.archive.org/web/http://stotastic.com/wordpress/2010/05/basket-option-pricing/"
},
{
"title": "A Stata HTML syntax highlighter in R",
"href": "http://www.econometricsbysimulation.com/2013/08/a-stata-html-formatter-in-r.html"
}
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.