content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
## Put comments here that give an overall description of what your
## functions do
## Make object which holds a matrix and it's inverse
## Inverse is not calculated when creating the object, but should be set by
## calling function
## If input matrix is not square, the function returns NULL
## Return makeCacheMatrix object
makeCacheMatrix <- function(x = matrix()) {
# Check if matrix is square
if ( nrow(x)!=ncol(x) ) {
message( "Matrix is not square, returning NULL" )
return(NULL)
}
inverse <- NULL
set <- function(y) {
x <<- y
inverse <<- NULL
}
get <- function() x
setInverse <- function(inv) inverse <<- inv
getInverse <- function() inverse
list( set=set, get=get, setInverse=setInverse, getInverse=getInverse )
}
## Find inverse of matrix of type makeCacheMatrix
## Only calculate inverse if this has not been cached.
## Cache inverse if not previously done
## Return inversed matrix, return NaN if matrix is singular
cacheSolve <- function(x, ...) {
inverse <- x$getInverse()
if ( !is.null(inverse) ) {
message("Inverse read from cache")
return(inverse)
}
message("Inverse not cached, calculating it...")
data <- x$get()
## First check if matrix is singular, and give a proper error message
if ( det(data)==0 ) {
message( "Singular matrix, cannot invert" )
inverse<-NaN
} else {
inverse <- solve(data)
}
x$setInverse(inverse)
inverse
}
| /cachematrix.R | no_license | tbuanes/ProgrammingAssignment2 | R | false | false | 1,519 | r | ## Put comments here that give an overall description of what your
## functions do
## Make object which holds a matrix and it's inverse
## Inverse is not calculated when creating the object, but should be set by
## calling function
## If input matrix is not square, the function returns NULL
## Return makeCacheMatrix object
makeCacheMatrix <- function(x = matrix()) {
# Check if matrix is square
if ( nrow(x)!=ncol(x) ) {
message( "Matrix is not square, returning NULL" )
return(NULL)
}
inverse <- NULL
set <- function(y) {
x <<- y
inverse <<- NULL
}
get <- function() x
setInverse <- function(inv) inverse <<- inv
getInverse <- function() inverse
list( set=set, get=get, setInverse=setInverse, getInverse=getInverse )
}
## Find inverse of matrix of type makeCacheMatrix
## Only calculate inverse if this has not been cached.
## Cache inverse if not previously done
## Return inversed matrix, return NaN if matrix is singular
cacheSolve <- function(x, ...) {
inverse <- x$getInverse()
if ( !is.null(inverse) ) {
message("Inverse read from cache")
return(inverse)
}
message("Inverse not cached, calculating it...")
data <- x$get()
## First check if matrix is singular, and give a proper error message
if ( det(data)==0 ) {
message( "Singular matrix, cannot invert" )
inverse<-NaN
} else {
inverse <- solve(data)
}
x$setInverse(inverse)
inverse
}
|
chi2 <- function(a,b,c,d) {
# Compute the chi^2 statistic for a 2x2 crosstab containing the values
# [[a, b], [c, d]]
ooe <- function(o, e) {(o-e)*(o-e) / e}
tot = 0.0 + a+b+c+d
a = as.numeric(a)
b = as.numeric(b)
c = as.numeric(c)
d = as.numeric(d)
(ooe(a, (a+c)*(a+b)/tot)
+ ooe(b, (b+d)*(a+b)/tot)
+ ooe(c, (a+c)*(c+d)/tot)
+ ooe(d, (d+b)*(c+d)/tot))
}
corpus.compare <- function(words.x, freq.x=rep(1, length(words.x)), words.y, freq.y=rep(1, length(words.y))) {
# Compare two corpora, listen relative frequency and chi-squared
# words should be a vector of character with the words for corpus x and y
# freq, if given, should be a vector of the same length as words with the counts per word
# words may be duplicated
n.x = aggregate(freq.x, list(words.x), FUN=sum)
colnames(n.x) = c("word", "freq.x")
n.y = aggregate(freq.y, list(words.y), FUN=sum)
colnames(n.y) = c("word", "freq.y")
result = merge(n.x, n.y, all=T)
result[is.na(result)] = 0
result$chi = chi2(result$freq.x, result$freq.y, sum(result$freq.x) - result$freq.x, sum(result$freq.y) - result$freq.y)
result$over = (result$freq.x / result$freq.y) / (sum(result$freq.x) / sum(result$freq.y))
result
}
corpus.split <- function(aid, words, freq=NULL, pattern, ...) {
selected.aid = aid[grepl(pattern, words, ...)]
print(paste("Selected", length(selected.aid),"/", length(unique(aid)),"using pattern", pattern))
words.x = words[aid %in% selected.aid]
freq.x = if (is.null(freq)) NULL else freq[aid %in% selected.aid]
words.y = words[!(aid %in% selected.aid)]
freq.y = if (is.null(freq)) NULL else freq[!(aid %in% selected.aid)]
list(words.x=words.x, freq.x=freq.x, words.y=words.y, freq.y=freq.y)
}
corpus.freqs <- function(aid, words, freq) {
# compute the document frequency for all words
freqs = aggregate(freq, list(words), FUN=sum)
docfreqs = aggregate(aid, list(words), FUN=function(x) length(unique(x)))
result = merge(freqs, docfreqs, by='Group.1')
colnames(result) <- c("word", "tf", "df")
result$df.prop = result$df / length(unique(aid))
result$idf = log(length(unique(aid)) / result$df)
result$tfidf = result$tf * result$idf
result
}
| /corpus.r | permissive | amcat/amcat-r-tools | R | false | false | 2,201 | r | chi2 <- function(a,b,c,d) {
# Compute the chi^2 statistic for a 2x2 crosstab containing the values
# [[a, b], [c, d]]
ooe <- function(o, e) {(o-e)*(o-e) / e}
tot = 0.0 + a+b+c+d
a = as.numeric(a)
b = as.numeric(b)
c = as.numeric(c)
d = as.numeric(d)
(ooe(a, (a+c)*(a+b)/tot)
+ ooe(b, (b+d)*(a+b)/tot)
+ ooe(c, (a+c)*(c+d)/tot)
+ ooe(d, (d+b)*(c+d)/tot))
}
corpus.compare <- function(words.x, freq.x=rep(1, length(words.x)), words.y, freq.y=rep(1, length(words.y))) {
# Compare two corpora, listen relative frequency and chi-squared
# words should be a vector of character with the words for corpus x and y
# freq, if given, should be a vector of the same length as words with the counts per word
# words may be duplicated
n.x = aggregate(freq.x, list(words.x), FUN=sum)
colnames(n.x) = c("word", "freq.x")
n.y = aggregate(freq.y, list(words.y), FUN=sum)
colnames(n.y) = c("word", "freq.y")
result = merge(n.x, n.y, all=T)
result[is.na(result)] = 0
result$chi = chi2(result$freq.x, result$freq.y, sum(result$freq.x) - result$freq.x, sum(result$freq.y) - result$freq.y)
result$over = (result$freq.x / result$freq.y) / (sum(result$freq.x) / sum(result$freq.y))
result
}
corpus.split <- function(aid, words, freq=NULL, pattern, ...) {
selected.aid = aid[grepl(pattern, words, ...)]
print(paste("Selected", length(selected.aid),"/", length(unique(aid)),"using pattern", pattern))
words.x = words[aid %in% selected.aid]
freq.x = if (is.null(freq)) NULL else freq[aid %in% selected.aid]
words.y = words[!(aid %in% selected.aid)]
freq.y = if (is.null(freq)) NULL else freq[!(aid %in% selected.aid)]
list(words.x=words.x, freq.x=freq.x, words.y=words.y, freq.y=freq.y)
}
corpus.freqs <- function(aid, words, freq) {
# compute the document frequency for all words
freqs = aggregate(freq, list(words), FUN=sum)
docfreqs = aggregate(aid, list(words), FUN=function(x) length(unique(x)))
result = merge(freqs, docfreqs, by='Group.1')
colnames(result) <- c("word", "tf", "df")
result$df.prop = result$df / length(unique(aid))
result$idf = log(length(unique(aid)) / result$df)
result$tfidf = result$tf * result$idf
result
}
|
library(MLML2R)
### Name: MLML
### Title: MLE (maximum likelihood estimates) of 5-mC and 5-hmC levels.
### Aliases: MLML
### ** Examples
# load the example datasets from BS, oxBS and TAB methods
data(C_BS_sim)
data(C_OxBS_sim)
data(T_BS_sim)
data(T_OxBS_sim)
data(C_TAB_sim)
data(T_TAB_sim)
# obtain MLE via EM-algorithm for BS+oxBS:
results_em <- MLML(T.matrix = C_BS_sim , U.matrix = T_BS_sim,
L.matrix = T_OxBS_sim, M.matrix = C_OxBS_sim,iterative=TRUE)
# obtain constrained exact MLE for BS+oxBS:
results_exact <- MLML(T.matrix = C_BS_sim , U.matrix = T_BS_sim,
L.matrix = T_OxBS_sim, M.matrix = C_OxBS_sim)
# obtain MLE via EM-algorithm for BS+TAB:
results_em <- MLML(T.matrix = C_BS_sim , U.matrix = T_BS_sim,
G.matrix = T_TAB_sim, H.matrix = C_TAB_sim,iterative=TRUE)
# obtain constrained exact MLE for BS+TAB:
results_exact <- MLML(T.matrix = C_BS_sim , U.matrix = T_BS_sim,
G.matrix = T_TAB_sim, H.matrix = C_TAB_sim)
# obtain MLE via EM-algorithm for oxBS+TAB:
results_em <- MLML(L.matrix = T_OxBS_sim, M.matrix = C_OxBS_sim,
G.matrix = T_TAB_sim, H.matrix = C_TAB_sim,iterative=TRUE)
# obtain constrained exact MLE for oxBS+TAB:
results_exact <- MLML(L.matrix = T_OxBS_sim, M.matrix = C_OxBS_sim,
G.matrix = T_TAB_sim, H.matrix = C_TAB_sim)
# obtain MLE via EM-algorithm for BS+oxBS+TAB:
results_em <- MLML(T.matrix = C_BS_sim , U.matrix = T_BS_sim,
L.matrix = T_OxBS_sim, M.matrix = C_OxBS_sim,
G.matrix = T_TAB_sim, H.matrix = C_TAB_sim,iterative=TRUE)
#' # obtain MLE via Lagrange multiplier for BS+oxBS+TAB:
results_exact <- MLML(T.matrix = C_BS_sim , U.matrix = T_BS_sim,
L.matrix = T_OxBS_sim, M.matrix = C_OxBS_sim,
G.matrix = T_TAB_sim, H.matrix = C_TAB_sim)
# Example of datasets with zero counts and missing values:
C_BS_sim[1,1] <- 0
C_OxBS_sim[1,1] <- 0
C_TAB_sim[1,1] <- 0
T_BS_sim[1,1] <- 0
T_OxBS_sim[1,1] <- 0
T_TAB_sim[1,1] <- 0
C_BS_sim[2,2] <- NA
C_OxBS_sim[2,2] <- NA
C_TAB_sim[2,2] <- NA
T_BS_sim[2,2] <- NA
T_OxBS_sim[2,2] <- NA
T_TAB_sim[2,2] <- NA
| /data/genthat_extracted_code/MLML2R/examples/MLML.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 2,005 | r | library(MLML2R)
### Name: MLML
### Title: MLE (maximum likelihood estimates) of 5-mC and 5-hmC levels.
### Aliases: MLML
### ** Examples
# load the example datasets from BS, oxBS and TAB methods
data(C_BS_sim)
data(C_OxBS_sim)
data(T_BS_sim)
data(T_OxBS_sim)
data(C_TAB_sim)
data(T_TAB_sim)
# obtain MLE via EM-algorithm for BS+oxBS:
results_em <- MLML(T.matrix = C_BS_sim , U.matrix = T_BS_sim,
L.matrix = T_OxBS_sim, M.matrix = C_OxBS_sim,iterative=TRUE)
# obtain constrained exact MLE for BS+oxBS:
results_exact <- MLML(T.matrix = C_BS_sim , U.matrix = T_BS_sim,
L.matrix = T_OxBS_sim, M.matrix = C_OxBS_sim)
# obtain MLE via EM-algorithm for BS+TAB:
results_em <- MLML(T.matrix = C_BS_sim , U.matrix = T_BS_sim,
G.matrix = T_TAB_sim, H.matrix = C_TAB_sim,iterative=TRUE)
# obtain constrained exact MLE for BS+TAB:
results_exact <- MLML(T.matrix = C_BS_sim , U.matrix = T_BS_sim,
G.matrix = T_TAB_sim, H.matrix = C_TAB_sim)
# obtain MLE via EM-algorithm for oxBS+TAB:
results_em <- MLML(L.matrix = T_OxBS_sim, M.matrix = C_OxBS_sim,
G.matrix = T_TAB_sim, H.matrix = C_TAB_sim,iterative=TRUE)
# obtain constrained exact MLE for oxBS+TAB:
results_exact <- MLML(L.matrix = T_OxBS_sim, M.matrix = C_OxBS_sim,
G.matrix = T_TAB_sim, H.matrix = C_TAB_sim)
# obtain MLE via EM-algorithm for BS+oxBS+TAB:
results_em <- MLML(T.matrix = C_BS_sim , U.matrix = T_BS_sim,
L.matrix = T_OxBS_sim, M.matrix = C_OxBS_sim,
G.matrix = T_TAB_sim, H.matrix = C_TAB_sim,iterative=TRUE)
#' # obtain MLE via Lagrange multiplier for BS+oxBS+TAB:
results_exact <- MLML(T.matrix = C_BS_sim , U.matrix = T_BS_sim,
L.matrix = T_OxBS_sim, M.matrix = C_OxBS_sim,
G.matrix = T_TAB_sim, H.matrix = C_TAB_sim)
# Example of datasets with zero counts and missing values:
C_BS_sim[1,1] <- 0
C_OxBS_sim[1,1] <- 0
C_TAB_sim[1,1] <- 0
T_BS_sim[1,1] <- 0
T_OxBS_sim[1,1] <- 0
T_TAB_sim[1,1] <- 0
C_BS_sim[2,2] <- NA
C_OxBS_sim[2,2] <- NA
C_TAB_sim[2,2] <- NA
T_BS_sim[2,2] <- NA
T_OxBS_sim[2,2] <- NA
T_TAB_sim[2,2] <- NA
|
rm(list=ls())
setwd("D:\\Studies\\PhD\\Year1\\STAT548 - Qualifying course\\Paper 1 - TGS\\Code\\Results\\Simulation_Study\\dld\\")
plot_and_save <- function(data, title, vers, width=8, height=6.5){
require(ggplot2)
ggplot(data, aes(x=x, y=y)) +
geom_point() + labs(x="", y="", title = title) +
geom_abline(slope=1, intercept=0) +
theme(plot.title = element_text(hjust = 0.5))
ggsave(paste(title,"_", vers, ".pdf", sep=""), width = width, height = width, units = "cm")
}
scenario= "dld"
path = paste("D:/Studies/PhD/Year1/STAT548 - Qualifying course/Paper 1 - TGS/Code/Results/Simulation_Study/", scenario, "/", sep="")
files <- list.files(path, pattern="*.RData")
res_parsed = list()
count=1
for(res in files){
full_path = paste(path, res, sep="")
load(full_path)
res_parsed[count] = list(res_par)
count = count + 1
}
for(res in res_parsed){
gs = res$pip_GS
hbs = res$pip_HBS
wtgs = res$pip_wTGS
rows = nrow(gs); cols=ncol(gs)
results_gs = matrix(NA, 1,2)
results_hbs = matrix(NA, 1, 2)
results_wtgs = matrix(NA, 1, 2)
for(i in 1:rows){
for(j in 1:cols){
x = rep(gs[i, j], rows - 1)
y = gs[-i, j]
results_gs = rbind(results_gs, cbind(x, y))
x = rep(hbs[i, j], rows - 1)
y = hbs[-i, j]
results_hbs = rbind(results_hbs, cbind(x, y))
x = rep(wtgs[i, j], rows - 1)
y = wtgs[-i, j]
results_wtgs = rbind(results_wtgs, cbind(x, y))
}
}
results_gs = as.data.frame(results_gs[-1,])
results_hbs = as.data.frame(results_hbs[-1,])
results_wtgs = as.data.frame(results_wtgs[-1,])
plot_and_save(results_gs, "GS", res$c_vers)
plot_and_save(results_hbs, "HBS", res$c_vers)
plot_and_save(results_wtgs, "wTGS", res$c_vers)
}
| /process_real_data_results.R | no_license | vittrom/Scalable-Importance-Tempering-and-Bayesian-Variable-Selection | R | false | false | 1,827 | r | rm(list=ls())
setwd("D:\\Studies\\PhD\\Year1\\STAT548 - Qualifying course\\Paper 1 - TGS\\Code\\Results\\Simulation_Study\\dld\\")
plot_and_save <- function(data, title, vers, width=8, height=6.5){
require(ggplot2)
ggplot(data, aes(x=x, y=y)) +
geom_point() + labs(x="", y="", title = title) +
geom_abline(slope=1, intercept=0) +
theme(plot.title = element_text(hjust = 0.5))
ggsave(paste(title,"_", vers, ".pdf", sep=""), width = width, height = width, units = "cm")
}
scenario= "dld"
path = paste("D:/Studies/PhD/Year1/STAT548 - Qualifying course/Paper 1 - TGS/Code/Results/Simulation_Study/", scenario, "/", sep="")
files <- list.files(path, pattern="*.RData")
res_parsed = list()
count=1
for(res in files){
full_path = paste(path, res, sep="")
load(full_path)
res_parsed[count] = list(res_par)
count = count + 1
}
for(res in res_parsed){
gs = res$pip_GS
hbs = res$pip_HBS
wtgs = res$pip_wTGS
rows = nrow(gs); cols=ncol(gs)
results_gs = matrix(NA, 1,2)
results_hbs = matrix(NA, 1, 2)
results_wtgs = matrix(NA, 1, 2)
for(i in 1:rows){
for(j in 1:cols){
x = rep(gs[i, j], rows - 1)
y = gs[-i, j]
results_gs = rbind(results_gs, cbind(x, y))
x = rep(hbs[i, j], rows - 1)
y = hbs[-i, j]
results_hbs = rbind(results_hbs, cbind(x, y))
x = rep(wtgs[i, j], rows - 1)
y = wtgs[-i, j]
results_wtgs = rbind(results_wtgs, cbind(x, y))
}
}
results_gs = as.data.frame(results_gs[-1,])
results_hbs = as.data.frame(results_hbs[-1,])
results_wtgs = as.data.frame(results_wtgs[-1,])
plot_and_save(results_gs, "GS", res$c_vers)
plot_and_save(results_hbs, "HBS", res$c_vers)
plot_and_save(results_wtgs, "wTGS", res$c_vers)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/score_teamX.R
\name{score_teamX}
\alias{score_teamX}
\title{List all occurrences of a specific scoreline for a specific team}
\usage{
score_teamX(df = NULL, score = NULL, teamname = NULL)
}
\arguments{
\item{df}{the results dataset}
\item{score}{the scoreline}
\item{teamname}{the team}
}
\value{
a dataframe of games ending in that result
}
\description{
List all occurrences of a specific scoreline for a specific team
}
\examples{
score_teamX(england,'4-4', 'Tottenham Hotspur')
#all 4-4 draws Tottenham Hotspur have played in (home and away)
score_teamX(england,'3-5', 'York City')
#list all 5-3 defeats suffered by York City (regardless of if occurred home/away)
score_teamX(england,'5-3', 'York City')
#list all 5-3 victories by York City (regardless of if occurred home/away)
score_teamX(england,'8-0', 'Arsenal')
#list all 8-0 victories by Arsenal (regardless of if occurred home/away)
score_teamX(england,'0-8', 'Arsenal')
#list all 8-0 defeats suffered by Arsenal (regardless of if occurred home/away)
}
| /man/score_teamX.Rd | no_license | nturaga/engsoccerdata | R | false | true | 1,098 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/score_teamX.R
\name{score_teamX}
\alias{score_teamX}
\title{List all occurrences of a specific scoreline for a specific team}
\usage{
score_teamX(df = NULL, score = NULL, teamname = NULL)
}
\arguments{
\item{df}{the results dataset}
\item{score}{the scoreline}
\item{teamname}{the team}
}
\value{
a dataframe of games ending in that result
}
\description{
List all occurrences of a specific scoreline for a specific team
}
\examples{
score_teamX(england,'4-4', 'Tottenham Hotspur')
#all 4-4 draws Tottenham Hotspur have played in (home and away)
score_teamX(england,'3-5', 'York City')
#list all 5-3 defeats suffered by York City (regardless of if occurred home/away)
score_teamX(england,'5-3', 'York City')
#list all 5-3 victories by York City (regardless of if occurred home/away)
score_teamX(england,'8-0', 'Arsenal')
#list all 8-0 victories by Arsenal (regardless of if occurred home/away)
score_teamX(england,'0-8', 'Arsenal')
#list all 8-0 defeats suffered by Arsenal (regardless of if occurred home/away)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rcloud.jupyter.notebooks.R
\docType{package}
\name{rcloud.jupyter.noteooks}
\alias{rcloud.jupyter.noteooks}
\alias{rcloud.jupyter.noteooks-package}
\title{rcloud.jupyter.noteooks}
\description{
This package is an extension to RCloud, it allows import and export of Jupyter notebooks.
}
| /man/rcloud.jupyter.noteooks.Rd | no_license | altusnets/rcloud.jupyter.notebooks | R | false | true | 364 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rcloud.jupyter.notebooks.R
\docType{package}
\name{rcloud.jupyter.noteooks}
\alias{rcloud.jupyter.noteooks}
\alias{rcloud.jupyter.noteooks-package}
\title{rcloud.jupyter.noteooks}
\description{
This package is an extension to RCloud, it allows import and export of Jupyter notebooks.
}
|
#' @title plotSampleCount function
#' @param ReturnClass Description
#' @param Colour_List list - optional colours specified for the columns
#' @keywords Mutation Waterfall
#' @export
#' @examples
#' ## Pass the function a MuationPlot object
#' plotSampleCount(MutationPlot)
## FuncColour=c("frameshift deletion" = "#7CAE00","." = "#00BFC4","nonsynonymous SNV" = "#F8766D")
plotSampleCount <- function(ReturnClass, Colour_List = FALSE) {
ReturnClass@plots[["SampleCount"]] <- ggplot(ReturnClass@plotdata$SampleData,
aes(
x = as.factor(SampleCol),
y = V1,
fill = InfoCol
)) +
geom_bar(stat = "identity") +
labs(y = "Count") +
theme(axis.text.x = element_blank(),
axis.ticks.x = element_blank(),
axis.title.x = element_blank(),
panel.background = element_blank(),
axis.line.y = element_line()
) +
if (typeof(Colour_List) == "character" ) {
scale_fill_manual(name = "Type of Mutation", values = Colour_List)
} else {scale_fill_discrete(name = "Type of Mutation")
}
ReturnClass
}
| /R/plotSampleCount.R | no_license | findlaycopley/MutPlot | R | false | false | 1,392 | r | #' @title plotSampleCount function
#' @param ReturnClass Description
#' @param Colour_List list - optional colours specified for the columns
#' @keywords Mutation Waterfall
#' @export
#' @examples
#' ## Pass the function a MuationPlot object
#' plotSampleCount(MutationPlot)
## FuncColour=c("frameshift deletion" = "#7CAE00","." = "#00BFC4","nonsynonymous SNV" = "#F8766D")
plotSampleCount <- function(ReturnClass, Colour_List = FALSE) {
ReturnClass@plots[["SampleCount"]] <- ggplot(ReturnClass@plotdata$SampleData,
aes(
x = as.factor(SampleCol),
y = V1,
fill = InfoCol
)) +
geom_bar(stat = "identity") +
labs(y = "Count") +
theme(axis.text.x = element_blank(),
axis.ticks.x = element_blank(),
axis.title.x = element_blank(),
panel.background = element_blank(),
axis.line.y = element_line()
) +
if (typeof(Colour_List) == "character" ) {
scale_fill_manual(name = "Type of Mutation", values = Colour_List)
} else {scale_fill_discrete(name = "Type of Mutation")
}
ReturnClass
}
|
library(dplyr)
library(stringr)
library(ggplot2)
############ settings ##############
#maze4 <- c('maze4_path_weight100_weight30.csv','Maze4 - 201801162151 - Trial 0 - TRIAL_NUM - 6000 - TEST.csv') #Maze4 - 201801162151 - Trial 0 - TRIAL_NUM - 6000 - TEST.csv')
maze4 <- c('maze4_path_weight100_weight30.csv','maze4 - 201801222157 - Trial 0 - TRIAL_NUM - 6000 - TEST.csv')
maze5 <- c('maze5_path_weight100_weight30.csv','nxcs.testbed.maze5_weighted_sum - 201801222158 - Trial 0 - TRIAL_NUM - 6000 - TEST.csv') #Maze5 - 201801162151 - Trial 0 - TRIAL_NUM - 6000 - TEST.csv')
maze6 <- c('maze6_path_weight100_weight30.csv','maze6 - 201801222201 - Trial 0 - TRIAL_NUM - 6000 - TEST.csv') #Maze6 - 201801151104 - Trial 0 - TRIAL_NUM - 6000 - TEST.csv.csv')
dst <- c('dst_path.csv','nxcs.testbed.dst_weighted_sum - 201803311404 - Trial 0 - TRIAL_NUM - 25000 - TEST.csv')
dst2 <- c('dst_path.csv','nxcs.testbed.dst_weighted_sum - 201804182045 - Trial 0 - TRIAL_NUM - 25000 - TEST.csv')
#dst3 <- c('dst_path.csv','nxcs.testbed.dst_weighted_sum - 201805212152 - Trial 0 - TRIAL_NUM - 150000 - TEST.csv')
dst3 <- c('dst_path.csv','nxcs.testbed.dst_weighted_sum - 201805262102 - Trial 0 - TRIAL_NUM - 150000 - TEST.csv')
dst32 <- c('dst_path.csv','nxcs.testbed.dst_weighted_sum - 201805262103cp2 - Trial 0 - TRIAL_NUM - 150000 - TEST.csv')
dst33 <- c('dst_path.csv','nxcs.testbed.dst_weighted_sum - 201805262103cp1 - Trial 0 - TRIAL_NUM - 150000 - TEST.csv')
upperBound <- 6000
# traceWeightFilter <- c('0.040000|0.960000'
# , '0.480000|0.520000'
# #, '0.520000|0.480000'
# , '0.960000|0.040000') #c('0.000000|1.000000', '0.560000|0.440000', '1.000000|0.000000')
#
# plot.labels <- list(expression(paste(lambda[1],'=0.04, 0.96',' ',sep=''))
# , expression(paste(lambda[2],'=0.48, 0.52',' ',sep=''))
# #, expression(paste(lambda[3],'=0.52, 0.48',sep=''))
# , expression(paste(lambda[3],'=0.96, 0.04',' ',sep='')) )
traceWeightFilter <- c("0.000000|1.000000",
"0.040000|0.960000",
"0.080000|0.920000",
"0.120000|0.880000",
"0.160000|0.840000"
, "0.200000|0.800000",
"0.240000|0.760000",
"0.280000|0.720000",
"0.320000|0.680000",
"0.360000|0.640000"
, "0.400000|0.600000",
"0.440000|0.560000",
"0.480000|0.520000",
"0.520000|0.480000",
"0.560000|0.440000"
, "0.600000|0.400000",
"0.640000|0.360000",
"0.680000|0.320000",
"0.720000|0.280000",
"0.760000|0.240000"
, "0.800000|0.200000",
"0.840000|0.160000",
"0.880000|0.120000",
"0.920000|0.080000",
"0.960000|0.040000"
, "1.000000|0.000000"
)
plot.upperBound <- 6000
plot.traceWeightFilter <- c(#"0.000000|1.000000",
"0.040000|0.960000",
#"0.080000|0.920000",
# "0.120000|0.880000",
# "0.160000|0.840000"
# , "0.200000|0.800000",
# "0.240000|0.760000",
# "0.280000|0.720000",
# "0.320000|0.680000",
# "0.360000|0.640000"
# , "0.400000|0.600000",
# "0.440000|0.560000",
"0.480000|0.520000",
"0.520000|0.480000",
# "0.560000|0.440000"
# , "0.600000|0.400000",
# "0.640000|0.360000",
# "0.680000|0.320000",
# "0.720000|0.280000",
# "0.760000|0.240000"
# , "0.800000|0.200000",
# "0.840000|0.160000",
# "0.880000|0.120000",
# "0.920000|0.080000",
"0.960000|0.040000"
# , "1.000000|0.000000"
)
plot.labels <- list(#expression(paste(lambda[0],'=0.0, 1.0',' ',sep=''))
#, expression(paste(lambda[1],'=0.11, 0.89',' ',sep=''))
expression(paste(lambda^1,'=0.04, 0.96',' ',sep=''))
#, expression(paste(lambda[3],'=0.33, 0.67',' ',sep=''))
, expression(paste(lambda^2,'=0.48, 0.52',' ',sep=''))
, expression(paste(lambda^3,'=0.52, 0.48',' ',sep=''))
#, expression(paste(lambda[6],'=0.67, 0.33',' ',sep=''))
, expression(paste(lambda^4,'=0.96, 0.04',' ',sep=''))
#, expression(paste(lambda[8],'=0.89, 0.11',' ',sep=''))
#, expression(paste(lambda[9],'=1.0, 0.0',' ',sep=''))
)
plot.targetReward <- c('200.000000|400.000000')
##################
mazeToRun <- dst3
############# begin to read result #############
#setwd("/Users/773742/Documents/CEC2018/DST2018/")
setwd("C:/Users/martin.xie/Downloads/Result/dst3/")
# targetSteps <- read.csv(file = mazeToRun[1], header = TRUE, sep = ",", stringsAsFactors = FALSE)
# targetId <- paste(targetSteps$open, targetSteps$final, paste(as.character(targetSteps$step), '', sep = ''), sep = '*')
# targetSteps <- cbind(targetSteps, targetId)
#
#setwd("/Users/773742/Documents/CEC2018/DST2018/")
raw.data1 <- read.csv(file = mazeToRun[2] #Train - 201801141417 - Trial 0 - TRIAL_NUM - 6000 - TEST.csv.csv"
, header = TRUE, sep = ","
, stringsAsFactors = FALSE
, row.names=NULL)
raw.data2 <- read.csv(file = dst32[2] #Train - 201801141417 - Trial 0 - TRIAL_NUM - 6000 - TEST.csv.csv"
, header = TRUE, sep = ","
, stringsAsFactors = FALSE
, row.names=NULL)
raw.data3 <- read.csv(file = dst33[2] #Train - 201801141417 - Trial 0 - TRIAL_NUM - 6000 - TEST.csv.csv"
, header = TRUE, sep = ","
, stringsAsFactors = FALSE
, row.names=NULL)
raw.data <- rbind(raw.data,raw.data2)
raw.data <- rbind(raw.data,raw.data3)
#h5data <- head(raw.data,10000)
data <- raw.data %>%
select(TrailNumber, Timestamp, TargetWeight, TraceWeight, obj_r1, OpenState, FinalState, steps, hyperVolumn, path) %>%
filter(TraceWeight %in% traceWeightFilter
, Timestamp <= upperBound)
#write.csv(data, file = paste('trim_2_',mazeToRun[2]),row.names=FALSE)
## release memory
#rm(raw.data)
################ check if uid in final state pair ###############
uid <- paste(data$OpenState, data$FinalState, data$steps, sep = "*")
data <- cbind(data, uid)
####match by preset open/final/step
#data$match <- ifelse(data$uid %in% targetSteps$targetId, 1, 0)
rm(uid)
####match by preset open/final/step
data$match <- ifelse(nchar(data$path)>160,0,1)
################ calculate match rate ###############
result <- data %>%
group_by(TrailNumber, Timestamp,TargetWeight,TraceWeight ) %>%
summarise(groupRow = n()
, matchCount = sum(match)
, matchRate =matchCount/groupRow
, hyperVolumn = mean(hyperVolumn))
# uniqTrail <- unique(result$TrailNumber)
# pall <- rep(NULL, nrow(uniqTrail))
# pdata <- NULL
#
# for (i in uniqTrail) {
# pdata <- result %>%
# filter(TrailNumber == i
# #, TraceWeight == '5.000000|5.000000'
# #, TraceWeight == uniqWeight[i] #' 0.000000|1.000000'
# )
# ggplot(pdata, aes(x = Timestamp, y = matchRate, group = TraceWeight, color = TraceWeight, linetype = TraceWeight) )+
# geom_line() +
# labs(title = paste('Trail', i,sep=' '))
# }
#ggplot(pdata, aes(x = Timestamp, y = matchRate, group = TraceWeight, color = c('#41ae76', '#ef6548', '#4292c6'))) +
# geom_line()
################ calculate mean match rate and hyper volume ###############
retdata <- result %>%
group_by(Timestamp, TargetWeight, TraceWeight) %>%
summarise(matchRateAvg = mean(matchRate)
, hyperVolumnAvg = mean(hyperVolumn)
, maxmr = max(matchRate)
, minmr = min(matchRate)
, maxhv = max(hyperVolumn)
, minhv = min(hyperVolumn))
plt <- ggplot(retdata, aes(x = Timestamp, y = matchRateAvg, group = TraceWeight, color = TraceWeight, linetype = TraceWeight)) +
geom_line()
########### plot begin ###########
theme_set(theme_classic(base_size = 9))
lty = c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
lshp = c(1, 2, 3, 4, 5, 6, 7, 8, 9,10)
cbbPalette = c('#e41a1c', '#377eb8', '#4daf4a'
, '#984ea3', '#ff7f00', '#66ff66'
, '#a65628', '#f781bf', '#000000'
,'#f781bf')
################ plot data ###############
plot.data <- retdata %>% filter(TraceWeight %in% plot.traceWeightFilter
, Timestamp <= plot.upperBound
#, TargetWeight %in% plot.targetReward
)
#write.csv(plot.data, file = paste('plot_',mazeToRun[2]),row.names=FALSE)
################ plot hyper volume ###############
phv <- ggplot(data = plot.data, aes(
x = Timestamp,
y = hyperVolumnAvg,
colour = TraceWeight,
group = TraceWeight,
linetype = TraceWeight
)) +
geom_line() +
#geom_ribbon(aes(ymin = minhv, ymax = maxhv, fill = TraceWeight), alpha = 0.2) +
labs(x = 'Number of Learning Problems\n(a)', y = NULL) +
ggtitle("THV") +
theme(axis.title.y = element_text(size = rel(1.1), face = "bold"), axis.title.x = element_text(size = rel(1.1), face = "bold"), title = element_text(size = rel(1.1), face = 'bold')) +
theme(legend.text = element_text(size = rel(1.5), face = "bold")) +
theme(legend.title = element_blank()) +
#theme(legend.position = c(0.63, 0.15))
theme(legend.position = 'bottom') + theme(panel.grid.major = element_line(size = 0.01, linetype = 'dotted',
colour = "black"),
panel.grid.minor = element_line(size = 0.001, linetype = 'dotted',
colour = "black")) +
theme(legend.background = element_rect(fill = alpha('gray', 0.05))) +
theme(axis.text.x = element_text(size = rel(1.4)),
axis.text.y = element_text(size = rel(1.4)),
axis.line.x = element_line(size = rel(0.4),colour = 'black',linetype = 'solid'),
axis.line.y = element_line(size = rel(0.4),colour = 'black',linetype = 'solid'),
axis.title = element_text(size = rel(1.2), face = "bold")) +
scale_linetype_manual(values = lty, guide = "none") +
scale_colour_manual(values = cbbPalette, labels = plot.labels) +
guides(colour=guide_legend(override.aes=list(linetype=1:length(plot.traceWeightFilter))))
################ plot match rate ###############
pmr <- ggplot(data = plot.data, aes(
x = Timestamp,
y = matchRateAvg,
colour = TraceWeight,
group = TraceWeight,
linetype = TraceWeight)) +
geom_line() +
#geom_ribbon(aes(ymin = minmr, ymax = maxmr, fill = TraceWeight), alpha = 0.2) +
labs(x = 'Number of Learning Problems\n(b)', y = NULL) +
ggtitle("% OP") +
theme(axis.title.y = element_text(size = rel(1.1), face = "bold"), axis.title.x = element_text(size = rel(1.1), face = "bold"), title = element_text(size = rel(1.1), face = 'bold')) +
theme(legend.text = element_text(size = rel(1), face = "bold")) +
theme(legend.title = element_blank()) +
#theme(legend.position = c(0.63, 0.15))
theme(legend.position = 'bottom') + theme(panel.grid.major = element_line(size = 0.01, linetype = 'dotted',
colour = "black"),
panel.grid.minor = element_line(size = 0.001, linetype = 'dotted',
colour = "black")) +
theme(legend.background = element_rect(fill = alpha('gray', 0.05))) +
theme(axis.text.x = element_text(size = rel(1.4)),
axis.text.y = element_text(size = rel(1.4)),
axis.line.x = element_line(size = rel(0.4),colour = 'black',linetype = 'solid'),
axis.line.y = element_line(size = rel(0.4),colour = 'black',linetype = 'solid'),
axis.title = element_text(size = rel(1.2), face = "bold")) +
scale_linetype_manual(values = lty) +
scale_colour_manual(values = cbbPalette)
################ plot arrange plots into one ###############
library(gridExtra)
g_legend <- function(a.gplot) {
tmp <- ggplot_gtable(ggplot_build(a.gplot))
leg <- which(sapply(tmp$grobs, function(x) x$name) == "guide-box")
legend <- tmp$grobs[[leg]]
return(legend)
}
mylegend <- g_legend(phv)
p3 <- grid.arrange(arrangeGrob(phv + theme(legend.position = "none"),
pmr + theme(legend.position = "none"),
nrow = 1),
mylegend, nrow = 2, heights = c(5, 1) )
p3
| /dst_plot.R | no_license | cx1027/DST | R | false | false | 13,044 | r | library(dplyr)
library(stringr)
library(ggplot2)
############ settings ##############
#maze4 <- c('maze4_path_weight100_weight30.csv','Maze4 - 201801162151 - Trial 0 - TRIAL_NUM - 6000 - TEST.csv') #Maze4 - 201801162151 - Trial 0 - TRIAL_NUM - 6000 - TEST.csv')
maze4 <- c('maze4_path_weight100_weight30.csv','maze4 - 201801222157 - Trial 0 - TRIAL_NUM - 6000 - TEST.csv')
maze5 <- c('maze5_path_weight100_weight30.csv','nxcs.testbed.maze5_weighted_sum - 201801222158 - Trial 0 - TRIAL_NUM - 6000 - TEST.csv') #Maze5 - 201801162151 - Trial 0 - TRIAL_NUM - 6000 - TEST.csv')
maze6 <- c('maze6_path_weight100_weight30.csv','maze6 - 201801222201 - Trial 0 - TRIAL_NUM - 6000 - TEST.csv') #Maze6 - 201801151104 - Trial 0 - TRIAL_NUM - 6000 - TEST.csv.csv')
dst <- c('dst_path.csv','nxcs.testbed.dst_weighted_sum - 201803311404 - Trial 0 - TRIAL_NUM - 25000 - TEST.csv')
dst2 <- c('dst_path.csv','nxcs.testbed.dst_weighted_sum - 201804182045 - Trial 0 - TRIAL_NUM - 25000 - TEST.csv')
#dst3 <- c('dst_path.csv','nxcs.testbed.dst_weighted_sum - 201805212152 - Trial 0 - TRIAL_NUM - 150000 - TEST.csv')
dst3 <- c('dst_path.csv','nxcs.testbed.dst_weighted_sum - 201805262102 - Trial 0 - TRIAL_NUM - 150000 - TEST.csv')
dst32 <- c('dst_path.csv','nxcs.testbed.dst_weighted_sum - 201805262103cp2 - Trial 0 - TRIAL_NUM - 150000 - TEST.csv')
dst33 <- c('dst_path.csv','nxcs.testbed.dst_weighted_sum - 201805262103cp1 - Trial 0 - TRIAL_NUM - 150000 - TEST.csv')
upperBound <- 6000
# traceWeightFilter <- c('0.040000|0.960000'
# , '0.480000|0.520000'
# #, '0.520000|0.480000'
# , '0.960000|0.040000') #c('0.000000|1.000000', '0.560000|0.440000', '1.000000|0.000000')
#
# plot.labels <- list(expression(paste(lambda[1],'=0.04, 0.96',' ',sep=''))
# , expression(paste(lambda[2],'=0.48, 0.52',' ',sep=''))
# #, expression(paste(lambda[3],'=0.52, 0.48',sep=''))
# , expression(paste(lambda[3],'=0.96, 0.04',' ',sep='')) )
traceWeightFilter <- c("0.000000|1.000000",
"0.040000|0.960000",
"0.080000|0.920000",
"0.120000|0.880000",
"0.160000|0.840000"
, "0.200000|0.800000",
"0.240000|0.760000",
"0.280000|0.720000",
"0.320000|0.680000",
"0.360000|0.640000"
, "0.400000|0.600000",
"0.440000|0.560000",
"0.480000|0.520000",
"0.520000|0.480000",
"0.560000|0.440000"
, "0.600000|0.400000",
"0.640000|0.360000",
"0.680000|0.320000",
"0.720000|0.280000",
"0.760000|0.240000"
, "0.800000|0.200000",
"0.840000|0.160000",
"0.880000|0.120000",
"0.920000|0.080000",
"0.960000|0.040000"
, "1.000000|0.000000"
)
plot.upperBound <- 6000
plot.traceWeightFilter <- c(#"0.000000|1.000000",
"0.040000|0.960000",
#"0.080000|0.920000",
# "0.120000|0.880000",
# "0.160000|0.840000"
# , "0.200000|0.800000",
# "0.240000|0.760000",
# "0.280000|0.720000",
# "0.320000|0.680000",
# "0.360000|0.640000"
# , "0.400000|0.600000",
# "0.440000|0.560000",
"0.480000|0.520000",
"0.520000|0.480000",
# "0.560000|0.440000"
# , "0.600000|0.400000",
# "0.640000|0.360000",
# "0.680000|0.320000",
# "0.720000|0.280000",
# "0.760000|0.240000"
# , "0.800000|0.200000",
# "0.840000|0.160000",
# "0.880000|0.120000",
# "0.920000|0.080000",
"0.960000|0.040000"
# , "1.000000|0.000000"
)
plot.labels <- list(#expression(paste(lambda[0],'=0.0, 1.0',' ',sep=''))
#, expression(paste(lambda[1],'=0.11, 0.89',' ',sep=''))
expression(paste(lambda^1,'=0.04, 0.96',' ',sep=''))
#, expression(paste(lambda[3],'=0.33, 0.67',' ',sep=''))
, expression(paste(lambda^2,'=0.48, 0.52',' ',sep=''))
, expression(paste(lambda^3,'=0.52, 0.48',' ',sep=''))
#, expression(paste(lambda[6],'=0.67, 0.33',' ',sep=''))
, expression(paste(lambda^4,'=0.96, 0.04',' ',sep=''))
#, expression(paste(lambda[8],'=0.89, 0.11',' ',sep=''))
#, expression(paste(lambda[9],'=1.0, 0.0',' ',sep=''))
)
plot.targetReward <- c('200.000000|400.000000')
##################
mazeToRun <- dst3
############# begin to read result #############
#setwd("/Users/773742/Documents/CEC2018/DST2018/")
setwd("C:/Users/martin.xie/Downloads/Result/dst3/")
# targetSteps <- read.csv(file = mazeToRun[1], header = TRUE, sep = ",", stringsAsFactors = FALSE)
# targetId <- paste(targetSteps$open, targetSteps$final, paste(as.character(targetSteps$step), '', sep = ''), sep = '*')
# targetSteps <- cbind(targetSteps, targetId)
#
#setwd("/Users/773742/Documents/CEC2018/DST2018/")
raw.data1 <- read.csv(file = mazeToRun[2] #Train - 201801141417 - Trial 0 - TRIAL_NUM - 6000 - TEST.csv.csv"
, header = TRUE, sep = ","
, stringsAsFactors = FALSE
, row.names=NULL)
raw.data2 <- read.csv(file = dst32[2] #Train - 201801141417 - Trial 0 - TRIAL_NUM - 6000 - TEST.csv.csv"
, header = TRUE, sep = ","
, stringsAsFactors = FALSE
, row.names=NULL)
raw.data3 <- read.csv(file = dst33[2] #Train - 201801141417 - Trial 0 - TRIAL_NUM - 6000 - TEST.csv.csv"
, header = TRUE, sep = ","
, stringsAsFactors = FALSE
, row.names=NULL)
raw.data <- rbind(raw.data,raw.data2)
raw.data <- rbind(raw.data,raw.data3)
#h5data <- head(raw.data,10000)
data <- raw.data %>%
select(TrailNumber, Timestamp, TargetWeight, TraceWeight, obj_r1, OpenState, FinalState, steps, hyperVolumn, path) %>%
filter(TraceWeight %in% traceWeightFilter
, Timestamp <= upperBound)
#write.csv(data, file = paste('trim_2_',mazeToRun[2]),row.names=FALSE)
## release memory
#rm(raw.data)
################ check if uid in final state pair ###############
uid <- paste(data$OpenState, data$FinalState, data$steps, sep = "*")
data <- cbind(data, uid)
####match by preset open/final/step
#data$match <- ifelse(data$uid %in% targetSteps$targetId, 1, 0)
rm(uid)
####match by preset open/final/step
data$match <- ifelse(nchar(data$path)>160,0,1)
################ calculate match rate ###############
result <- data %>%
group_by(TrailNumber, Timestamp,TargetWeight,TraceWeight ) %>%
summarise(groupRow = n()
, matchCount = sum(match)
, matchRate =matchCount/groupRow
, hyperVolumn = mean(hyperVolumn))
# uniqTrail <- unique(result$TrailNumber)
# pall <- rep(NULL, nrow(uniqTrail))
# pdata <- NULL
#
# for (i in uniqTrail) {
# pdata <- result %>%
# filter(TrailNumber == i
# #, TraceWeight == '5.000000|5.000000'
# #, TraceWeight == uniqWeight[i] #' 0.000000|1.000000'
# )
# ggplot(pdata, aes(x = Timestamp, y = matchRate, group = TraceWeight, color = TraceWeight, linetype = TraceWeight) )+
# geom_line() +
# labs(title = paste('Trail', i,sep=' '))
# }
#ggplot(pdata, aes(x = Timestamp, y = matchRate, group = TraceWeight, color = c('#41ae76', '#ef6548', '#4292c6'))) +
# geom_line()
################ calculate mean match rate and hyper volume ###############
retdata <- result %>%
group_by(Timestamp, TargetWeight, TraceWeight) %>%
summarise(matchRateAvg = mean(matchRate)
, hyperVolumnAvg = mean(hyperVolumn)
, maxmr = max(matchRate)
, minmr = min(matchRate)
, maxhv = max(hyperVolumn)
, minhv = min(hyperVolumn))
plt <- ggplot(retdata, aes(x = Timestamp, y = matchRateAvg, group = TraceWeight, color = TraceWeight, linetype = TraceWeight)) +
geom_line()
########### plot begin ###########
theme_set(theme_classic(base_size = 9))
lty = c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
lshp = c(1, 2, 3, 4, 5, 6, 7, 8, 9,10)
cbbPalette = c('#e41a1c', '#377eb8', '#4daf4a'
, '#984ea3', '#ff7f00', '#66ff66'
, '#a65628', '#f781bf', '#000000'
,'#f781bf')
################ plot data ###############
plot.data <- retdata %>% filter(TraceWeight %in% plot.traceWeightFilter
, Timestamp <= plot.upperBound
#, TargetWeight %in% plot.targetReward
)
#write.csv(plot.data, file = paste('plot_',mazeToRun[2]),row.names=FALSE)
################ plot hyper volume ###############
phv <- ggplot(data = plot.data, aes(
x = Timestamp,
y = hyperVolumnAvg,
colour = TraceWeight,
group = TraceWeight,
linetype = TraceWeight
)) +
geom_line() +
#geom_ribbon(aes(ymin = minhv, ymax = maxhv, fill = TraceWeight), alpha = 0.2) +
labs(x = 'Number of Learning Problems\n(a)', y = NULL) +
ggtitle("THV") +
theme(axis.title.y = element_text(size = rel(1.1), face = "bold"), axis.title.x = element_text(size = rel(1.1), face = "bold"), title = element_text(size = rel(1.1), face = 'bold')) +
theme(legend.text = element_text(size = rel(1.5), face = "bold")) +
theme(legend.title = element_blank()) +
#theme(legend.position = c(0.63, 0.15))
theme(legend.position = 'bottom') + theme(panel.grid.major = element_line(size = 0.01, linetype = 'dotted',
colour = "black"),
panel.grid.minor = element_line(size = 0.001, linetype = 'dotted',
colour = "black")) +
theme(legend.background = element_rect(fill = alpha('gray', 0.05))) +
theme(axis.text.x = element_text(size = rel(1.4)),
axis.text.y = element_text(size = rel(1.4)),
axis.line.x = element_line(size = rel(0.4),colour = 'black',linetype = 'solid'),
axis.line.y = element_line(size = rel(0.4),colour = 'black',linetype = 'solid'),
axis.title = element_text(size = rel(1.2), face = "bold")) +
scale_linetype_manual(values = lty, guide = "none") +
scale_colour_manual(values = cbbPalette, labels = plot.labels) +
guides(colour=guide_legend(override.aes=list(linetype=1:length(plot.traceWeightFilter))))
################ plot match rate ###############
pmr <- ggplot(data = plot.data, aes(
x = Timestamp,
y = matchRateAvg,
colour = TraceWeight,
group = TraceWeight,
linetype = TraceWeight)) +
geom_line() +
#geom_ribbon(aes(ymin = minmr, ymax = maxmr, fill = TraceWeight), alpha = 0.2) +
labs(x = 'Number of Learning Problems\n(b)', y = NULL) +
ggtitle("% OP") +
theme(axis.title.y = element_text(size = rel(1.1), face = "bold"), axis.title.x = element_text(size = rel(1.1), face = "bold"), title = element_text(size = rel(1.1), face = 'bold')) +
theme(legend.text = element_text(size = rel(1), face = "bold")) +
theme(legend.title = element_blank()) +
#theme(legend.position = c(0.63, 0.15))
theme(legend.position = 'bottom') + theme(panel.grid.major = element_line(size = 0.01, linetype = 'dotted',
colour = "black"),
panel.grid.minor = element_line(size = 0.001, linetype = 'dotted',
colour = "black")) +
theme(legend.background = element_rect(fill = alpha('gray', 0.05))) +
theme(axis.text.x = element_text(size = rel(1.4)),
axis.text.y = element_text(size = rel(1.4)),
axis.line.x = element_line(size = rel(0.4),colour = 'black',linetype = 'solid'),
axis.line.y = element_line(size = rel(0.4),colour = 'black',linetype = 'solid'),
axis.title = element_text(size = rel(1.2), face = "bold")) +
scale_linetype_manual(values = lty) +
scale_colour_manual(values = cbbPalette)
################ plot arrange plots into one ###############
library(gridExtra)
g_legend <- function(a.gplot) {
tmp <- ggplot_gtable(ggplot_build(a.gplot))
leg <- which(sapply(tmp$grobs, function(x) x$name) == "guide-box")
legend <- tmp$grobs[[leg]]
return(legend)
}
mylegend <- g_legend(phv)
p3 <- grid.arrange(arrangeGrob(phv + theme(legend.position = "none"),
pmr + theme(legend.position = "none"),
nrow = 1),
mylegend, nrow = 2, heights = c(5, 1) )
p3
|
singleview.clusterer <- function(datasets, index, kernel_funcs) {
mydata = datasets[[index]]
#Build the dendrogram from data set module names
priori.decomp <- build.dendrogam(rownames(mydata$mtx))
best_kernel_func = NULL
best_kernel_func_index = 1
best_score = Inf
for (i in 1:length(kernel_funcs)) {
kernel_func <- kernel_funcs[[i]]
# print(kernel_func)
K <- tryCatch(kernel_func(mydata), error=function(e) e)
stopifnot(all(rownames(K) == rownames(mydata)))
if(inherits(K, "error")) next
result <- measure_cluster_analysis(K, priori.decomp)
score <- result$diff
# XXX the smaller the score, the better the score
if (score < best_score){
best_score = score
best_kernel_func = kernel_func
best_kernel_func_index = i
}
}
return(list(score=best_score, kernel_func = best_kernel_func, kernel_func_index=best_kernel_func_index))
}
measure_cluster_analysis <- function(kernel, priori.decomp){
#compute distance from kernel
myDist <- squared.euclidean.distance.of.kernel.matrix(kernel)
myDist <- as.dist(myDist)
# pinned it to complete linkage
clusters <- hclust(myDist, method = 'complete')
# compute tree distance
treeDistance = compute_tree_edit_distance_for_hc(clusters, priori.decomp$graph)
clusters.tree <- ape::as.phylo(clusters)
priori.tree <- priori.decomp$tree
path.difference <- phangorn::path.dist(clusters.tree, priori.tree, check.labels = T)
return(list(baker=0, cophcor=0, Bk=0, diff=path.difference, mojosim = 0, treeDistance = treeDistance))
}
multiview.clusterer <- function(cfgsim_kernel_func, freqsim_kernel_func, lexsim_kernel_func, fuse_multi_view_func, datasets){
cfg <- datasets[[1]]
freq <- datasets[[2]]
lex <- datasets[[3]]
#Build the dendrogram from data set module names
priori.decomp <- build.dendrogam(rownames(cfg$mtx))
cfgsim_kernel <- cfgsim_kernel_func(cfg)
freqsim_kernel <- freqsim_kernel_func(freq)
lexsim_kernel <- lexsim_kernel_func(lex)
fused_Ks <- fuse_multi_view_func(list(cfgsim_kernel, freqsim_kernel, lexsim_kernel))
scores <- lapply(fused_Ks, function(K) measure_cluster_analysis(K, priori.decomp))
return(c(scores[[1]]$diff, scores[[2]]$diff, scores[[3]]$diff))
}
perform.clustering <- function(prname, rootFolder="org", pattern = "*.java"){
require(GeLaToLab)
require(proxy)
setwd("~/workspace")
#Read the set of classnames for running the experiment
# classnames <- unlist(read.table(paste("benchmark", prname , paste("MULTIVIEW", "classnames.txt" ,sep="/") , sep="/")) )
#Load the adjacency matrix
extensions= c("java/", "org/xml/", "javax/")
cfg <- import.bunch.matrix(paste("benchmark", prname ,"dep_graph.txt", sep="/"), exclude.ext=extensions)
#cfg <- read.table("benchmark/jedit-5.1.0/cfg.csv", sep=",", row.names = 1, header = TRUE, check.names = FALSE)
# cfg <- unweight.adjacency(cfg)
# cfg <- cfg[which(rownames(cfg) %in% classnames), which(colnames(cfg) %in% classnames)]
cfg <- cfg[order(rownames(cfg)), order(colnames(cfg))]
cfg[cfg > 0] <- 1
#Load the transaction frequency
freq <- read.table(paste("benchmark", prname , "mydata-change-freq-matrix.csv", sep="/"), sep=",", row.names = 1, header = TRUE, check.names = FALSE)
freq <- as.matrix(freq)
#freq <- freq[which(rownames(freq) %in% classnames),]
freq <- freq[order(rownames(freq)),]
freq[is.na(freq)] <- 0
freq[freq> 0] <- 1
no_transactions <- colSums(freq)
freq <- freq[, which(no_transactions > 0)]
no_transactions <- colSums(freq)
freq <- freq[, which(no_transactions <= 30)]
#Load the bag of words
BoW <- load_BoW(prname)
# apply tf-idf mechanism and eliminate features that are lower than some threshold,
# then remove those features from BoW
x <- apply_tf_idf(BoW)
dimnames(x) <- dimnames(BoW)
BoW <- x
# Clean up BoW
no_words <- colSums(BoW)
BoW <- BoW[, which(no_words > 0)]
no_words_in_document <- rowSums(BoW)
BoW <- BoW[which(no_words_in_document > 0),]
#LOAD the text of the source code
#Initialize the text of source files
setwd(paste("benchmark", prname, sep="/"))
txts <- read.text.directory(rootFolder, pattern)
txts <- txts[order(names(txts))]
print("dimensions before intersection")
print(dim(cfg))
print(dim(freq))
print(dim(BoW))
print(length(txts))
#INTERSECT
names <- intersect_all(rownames(cfg), rownames(freq), rownames(BoW), names(txts))
cfg <- cfg[names, names]
freq <- freq[names,]
freq <- freq[, colSums(freq) > 0]
BoW <- BoW[names,]
BoW <- BoW[, colSums(BoW) > 0]
txts <- txts[names]
print("dimensions after intersection")
print(dim(cfg))
print(dim(freq))
print(dim(BoW))
print(length(txts))
#lexsim kernels
#cosine normalized linear kernel
cosine_kernel_func <- function(x) { cos.sim(t(x$mtx))}
# polnomial degree: 1,2,3,4,5
polynomial_params <- c(1,2,3,4,5)
polynomial_kernel_func <- lapply(polynomial_params, function(p) {function(x) polynomial.kernel(x$mtx, p)})
#gaussian parameter: 10^(-5),10^(-4),10^(-3),10^(-2),10^(-1),10^(0),10^(1),10^(2)
gaussian_params <- c(10^(-5),10^(-4),10^(-3),10^(-2),10^(-1),10^(0),10^(1),10^(2))
gaussian_kernel_func <- lapply(gaussian_params, function(p) {function(x) gaussian.kernel(x$mtx, p)})
# string kernels
# p_spectrum parameters: 1,2,5,10,15,20
p_spectrum_params <- c(1,2,5,10,15,20)
p_spectrum_kernel_func <- lapply(p_spectrum_params, function(p) {function(x) spectrum.string.kernel(x$txt, p)})
# constant kernel
constant_kernel_func <- function(x) { constant.string.kernel(x$txt) }
#exponential decay parameter: 1,2,5,10,15,20
# what is this about?!?
exponential_decay_params <- c(1,2,5,10,15,20)
exponential_decay_kernel_func <- lapply(exponential_decay_params, function(p) {function(x) exponential.decay.kernel(x$txt, p)})
#exponential diffusion parameter: 10^(-5),10^(-4),10^(-3),10^(-2),10^(-1),10^(0),10^(1),10^(2)
exponential_diffusion_params <- c(10^(-5),10^(-4),10^(-3),10^(-2),10^(-1),10^(0),10^(1),10^(2))
exponential_diffusion_kernel_func <- lapply(exponential_diffusion_params, function(p) {function(x) calc.diffusion.kernel(x$mtx, p, TRUE)})
#laplacian exponential diffusion parameter: 10^(-5),10^(-4),10^(-3),10^(-2),10^(-1),10^(0),10^(1),10^(2)
# what is this about?!?
laplacian_exponential_diffusion_params <- c(1,2,5,10,15,20)
laplacian_exponential_diffusion_kernel_func <- lapply(laplacian_exponential_diffusion_params,
function(p) {function(x) compute.exponential.diffusion.kernel(x$mtx, p)})
# commute time kernel
commute_time_kernel_func <- function(x) {
x <- x$mtx
if (!isSymmetric(x)){
x <- (x + t(x))/2
}
d <- apply(abs(x),1,sum)
D <- diag(d)
compute.avg.commute.time.kernel(x, D)
}
# exponential_decay_kernel_func
lex_kernel_parameters_list <- list(0, polynomial_params, p_spectrum_params, 0)
lexsim_kernel_funcs <- unlist(list(cosine_kernel_func, polynomial_kernel_func, p_spectrum_kernel_func, constant_kernel_func))
freq_kernel_parameters_list <- list(polynomial_params, gaussian_params)
freqsim_kernel_funcs <- unlist(list(polynomial_kernel_func, gaussian_kernel_func))
#commute_time_kernel_func
cfg_kernel_parameters_list <- list(exponential_diffusion_params, laplacian_exponential_diffusion_params)
cfgsim_kernel_funcs <- unlist(list(exponential_diffusion_kernel_func, laplacian_exponential_diffusion_kernel_func))
add_kernel_func <- function(Ks) {
r <- add.kernels(Ks)
list(r, r, r)
}
product_kernel_func <- function(Ks) {
r <- product.kernels(Ks)
list(r, r, r)
}
#perform nested cross validation
datasets = list(cfg=list(mtx=cfg), freq=list(mtx=freq), lex=list(mtx=BoW, txt=txts))
cfg_single_view <- singleview.clusterer(datasets, 1, cfgsim_kernel_funcs)
freq_single_view <- singleview.clusterer(datasets, 2, freqsim_kernel_funcs)
lex_single_view <- singleview.clusterer(datasets, 3, lexsim_kernel_funcs)
cfgsim <- cfg_single_view$score
freqsim <- freq_single_view$score
lexsim <- lex_single_view$score
best_cfg_eval_func <- cfg_single_view$kernel_func
best_freq_eval_func <- freq_single_view$kernel_func
best_lex_eval_func <- lex_single_view$kernel_func
cotraining_kernel_func <- function(Ks) {
cotraining(Ks, 50)
}
MKL_multiview_fuse_funcs <- list(add_kernel_func, product_kernel_func, cotraining_kernel_func, rgcca_func)
MKL.multiview.clusterers <- lapply(MKL_multiview_fuse_funcs, function(MKL_multiview_fuse_func) {
function(datasets)
multiview.clusterer(best_cfg_eval_func, best_freq_eval_func, best_lex_eval_func, MKL_multiview_fuse_func, datasets)
})
MKL_add <- MKL.multiview.clusterers[[1]](datasets)
MKL_product <- MKL.multiview.clusterers[[2]](datasets)
co_training <- MKL.multiview.clusterers[[3]](datasets)
k_cca <- MKL.multiview.clusterers[[4]](datasets)
result = list(cfgsim=cfgsim, freqsim=freqsim, lexsim=lexsim, MKL_add=min(MKL_add), MKL_product=min(MKL_product), co_training=min(co_training), kcca=min(k_cca))
setwd("~/workspace")
#Create results directory, if it doesn't exist
dir.create(file.path(getwd(), paste("benchmark", prname, "Multiview/Results", sep="/")), showWarnings = FALSE)
print_clustering_results(prname, result, txt.file = "Multiview/Results/Clustering.txt", rnd=2)
#FIND BEST EVAL FUNCTIONS
best_cfg_eval_func_idx <- cfg_single_view$kernel_func_index
best_freq_eval_func_idx <- freq_single_view$kernel_func_index
best_lex_eval_func_idx <- lex_single_view$kernel_func_index
# evalFuncToString = function(func_index, type, parameters)
cfg_eval_string <- evalFuncToString(best_cfg_eval_func_idx, "cfg", cfg_kernel_parameters_list)
write(cfg_eval_string, file = paste("benchmark", prname, "Multiview/Results/CFG_EVAL.txt", sep="/"))
freq_eval_string <- evalFuncToString(best_freq_eval_func_idx, "freq", freq_kernel_parameters_list)
write(freq_eval_string, file = paste("benchmark", prname, "Multiview/Results/FREQ_EVAL.txt", sep="/"))
lex_eval_string <- evalFuncToString(best_lex_eval_func_idx, "lex", lex_kernel_parameters_list)
write(lex_eval_string, file = paste("benchmark", prname, "Multiview/Results/FREQ_EVAL.txt", sep="/"))
# result = list(cfgsim=cfgsim, freqsim=freqsim, lexsim=lexsim, MKL_add=min(MKL_add), MKL_product=min(MKL_product), co_training=min(co_training), kcca=min(k_cca))
all_string <- paste(round(cfgsim,2),cfg_eval_string,round(freqsim,2), freq_eval_string, round(lexsim,2), lex_eval_string, round(min(MKL_add),2), round(min(co_training),2), round(min(k_cca),2), sep="&")
write(all_string, file = paste("benchmark", prname, "Multiview/Results/All_EVAL.txt", sep="/"))
} | /R/multiview/clusterValidate.R | no_license | amirms/GeLaToLab | R | false | false | 11,212 | r | singleview.clusterer <- function(datasets, index, kernel_funcs) {
mydata = datasets[[index]]
#Build the dendrogram from data set module names
priori.decomp <- build.dendrogam(rownames(mydata$mtx))
best_kernel_func = NULL
best_kernel_func_index = 1
best_score = Inf
for (i in 1:length(kernel_funcs)) {
kernel_func <- kernel_funcs[[i]]
# print(kernel_func)
K <- tryCatch(kernel_func(mydata), error=function(e) e)
stopifnot(all(rownames(K) == rownames(mydata)))
if(inherits(K, "error")) next
result <- measure_cluster_analysis(K, priori.decomp)
score <- result$diff
# XXX the smaller the score, the better the score
if (score < best_score){
best_score = score
best_kernel_func = kernel_func
best_kernel_func_index = i
}
}
return(list(score=best_score, kernel_func = best_kernel_func, kernel_func_index=best_kernel_func_index))
}
measure_cluster_analysis <- function(kernel, priori.decomp){
#compute distance from kernel
myDist <- squared.euclidean.distance.of.kernel.matrix(kernel)
myDist <- as.dist(myDist)
# pinned it to complete linkage
clusters <- hclust(myDist, method = 'complete')
# compute tree distance
treeDistance = compute_tree_edit_distance_for_hc(clusters, priori.decomp$graph)
clusters.tree <- ape::as.phylo(clusters)
priori.tree <- priori.decomp$tree
path.difference <- phangorn::path.dist(clusters.tree, priori.tree, check.labels = T)
return(list(baker=0, cophcor=0, Bk=0, diff=path.difference, mojosim = 0, treeDistance = treeDistance))
}
multiview.clusterer <- function(cfgsim_kernel_func, freqsim_kernel_func, lexsim_kernel_func, fuse_multi_view_func, datasets){
cfg <- datasets[[1]]
freq <- datasets[[2]]
lex <- datasets[[3]]
#Build the dendrogram from data set module names
priori.decomp <- build.dendrogam(rownames(cfg$mtx))
cfgsim_kernel <- cfgsim_kernel_func(cfg)
freqsim_kernel <- freqsim_kernel_func(freq)
lexsim_kernel <- lexsim_kernel_func(lex)
fused_Ks <- fuse_multi_view_func(list(cfgsim_kernel, freqsim_kernel, lexsim_kernel))
scores <- lapply(fused_Ks, function(K) measure_cluster_analysis(K, priori.decomp))
return(c(scores[[1]]$diff, scores[[2]]$diff, scores[[3]]$diff))
}
perform.clustering <- function(prname, rootFolder="org", pattern = "*.java"){
require(GeLaToLab)
require(proxy)
setwd("~/workspace")
#Read the set of classnames for running the experiment
# classnames <- unlist(read.table(paste("benchmark", prname , paste("MULTIVIEW", "classnames.txt" ,sep="/") , sep="/")) )
#Load the adjacency matrix
extensions= c("java/", "org/xml/", "javax/")
cfg <- import.bunch.matrix(paste("benchmark", prname ,"dep_graph.txt", sep="/"), exclude.ext=extensions)
#cfg <- read.table("benchmark/jedit-5.1.0/cfg.csv", sep=",", row.names = 1, header = TRUE, check.names = FALSE)
# cfg <- unweight.adjacency(cfg)
# cfg <- cfg[which(rownames(cfg) %in% classnames), which(colnames(cfg) %in% classnames)]
cfg <- cfg[order(rownames(cfg)), order(colnames(cfg))]
cfg[cfg > 0] <- 1
#Load the transaction frequency
freq <- read.table(paste("benchmark", prname , "mydata-change-freq-matrix.csv", sep="/"), sep=",", row.names = 1, header = TRUE, check.names = FALSE)
freq <- as.matrix(freq)
#freq <- freq[which(rownames(freq) %in% classnames),]
freq <- freq[order(rownames(freq)),]
freq[is.na(freq)] <- 0
freq[freq> 0] <- 1
no_transactions <- colSums(freq)
freq <- freq[, which(no_transactions > 0)]
no_transactions <- colSums(freq)
freq <- freq[, which(no_transactions <= 30)]
#Load the bag of words
BoW <- load_BoW(prname)
# apply tf-idf mechanism and eliminate features that are lower than some threshold,
# then remove those features from BoW
x <- apply_tf_idf(BoW)
dimnames(x) <- dimnames(BoW)
BoW <- x
# Clean up BoW
no_words <- colSums(BoW)
BoW <- BoW[, which(no_words > 0)]
no_words_in_document <- rowSums(BoW)
BoW <- BoW[which(no_words_in_document > 0),]
#LOAD the text of the source code
#Initialize the text of source files
setwd(paste("benchmark", prname, sep="/"))
txts <- read.text.directory(rootFolder, pattern)
txts <- txts[order(names(txts))]
print("dimensions before intersection")
print(dim(cfg))
print(dim(freq))
print(dim(BoW))
print(length(txts))
#INTERSECT
names <- intersect_all(rownames(cfg), rownames(freq), rownames(BoW), names(txts))
cfg <- cfg[names, names]
freq <- freq[names,]
freq <- freq[, colSums(freq) > 0]
BoW <- BoW[names,]
BoW <- BoW[, colSums(BoW) > 0]
txts <- txts[names]
print("dimensions after intersection")
print(dim(cfg))
print(dim(freq))
print(dim(BoW))
print(length(txts))
#lexsim kernels
#cosine normalized linear kernel
cosine_kernel_func <- function(x) { cos.sim(t(x$mtx))}
# polnomial degree: 1,2,3,4,5
polynomial_params <- c(1,2,3,4,5)
polynomial_kernel_func <- lapply(polynomial_params, function(p) {function(x) polynomial.kernel(x$mtx, p)})
#gaussian parameter: 10^(-5),10^(-4),10^(-3),10^(-2),10^(-1),10^(0),10^(1),10^(2)
gaussian_params <- c(10^(-5),10^(-4),10^(-3),10^(-2),10^(-1),10^(0),10^(1),10^(2))
gaussian_kernel_func <- lapply(gaussian_params, function(p) {function(x) gaussian.kernel(x$mtx, p)})
# string kernels
# p_spectrum parameters: 1,2,5,10,15,20
p_spectrum_params <- c(1,2,5,10,15,20)
p_spectrum_kernel_func <- lapply(p_spectrum_params, function(p) {function(x) spectrum.string.kernel(x$txt, p)})
# constant kernel
constant_kernel_func <- function(x) { constant.string.kernel(x$txt) }
#exponential decay parameter: 1,2,5,10,15,20
# what is this about?!?
exponential_decay_params <- c(1,2,5,10,15,20)
exponential_decay_kernel_func <- lapply(exponential_decay_params, function(p) {function(x) exponential.decay.kernel(x$txt, p)})
#exponential diffusion parameter: 10^(-5),10^(-4),10^(-3),10^(-2),10^(-1),10^(0),10^(1),10^(2)
exponential_diffusion_params <- c(10^(-5),10^(-4),10^(-3),10^(-2),10^(-1),10^(0),10^(1),10^(2))
exponential_diffusion_kernel_func <- lapply(exponential_diffusion_params, function(p) {function(x) calc.diffusion.kernel(x$mtx, p, TRUE)})
#laplacian exponential diffusion parameter: 10^(-5),10^(-4),10^(-3),10^(-2),10^(-1),10^(0),10^(1),10^(2)
# what is this about?!?
laplacian_exponential_diffusion_params <- c(1,2,5,10,15,20)
laplacian_exponential_diffusion_kernel_func <- lapply(laplacian_exponential_diffusion_params,
function(p) {function(x) compute.exponential.diffusion.kernel(x$mtx, p)})
# commute time kernel
commute_time_kernel_func <- function(x) {
x <- x$mtx
if (!isSymmetric(x)){
x <- (x + t(x))/2
}
d <- apply(abs(x),1,sum)
D <- diag(d)
compute.avg.commute.time.kernel(x, D)
}
# exponential_decay_kernel_func
lex_kernel_parameters_list <- list(0, polynomial_params, p_spectrum_params, 0)
lexsim_kernel_funcs <- unlist(list(cosine_kernel_func, polynomial_kernel_func, p_spectrum_kernel_func, constant_kernel_func))
freq_kernel_parameters_list <- list(polynomial_params, gaussian_params)
freqsim_kernel_funcs <- unlist(list(polynomial_kernel_func, gaussian_kernel_func))
#commute_time_kernel_func
cfg_kernel_parameters_list <- list(exponential_diffusion_params, laplacian_exponential_diffusion_params)
cfgsim_kernel_funcs <- unlist(list(exponential_diffusion_kernel_func, laplacian_exponential_diffusion_kernel_func))
add_kernel_func <- function(Ks) {
r <- add.kernels(Ks)
list(r, r, r)
}
product_kernel_func <- function(Ks) {
r <- product.kernels(Ks)
list(r, r, r)
}
#perform nested cross validation
datasets = list(cfg=list(mtx=cfg), freq=list(mtx=freq), lex=list(mtx=BoW, txt=txts))
cfg_single_view <- singleview.clusterer(datasets, 1, cfgsim_kernel_funcs)
freq_single_view <- singleview.clusterer(datasets, 2, freqsim_kernel_funcs)
lex_single_view <- singleview.clusterer(datasets, 3, lexsim_kernel_funcs)
cfgsim <- cfg_single_view$score
freqsim <- freq_single_view$score
lexsim <- lex_single_view$score
best_cfg_eval_func <- cfg_single_view$kernel_func
best_freq_eval_func <- freq_single_view$kernel_func
best_lex_eval_func <- lex_single_view$kernel_func
cotraining_kernel_func <- function(Ks) {
cotraining(Ks, 50)
}
MKL_multiview_fuse_funcs <- list(add_kernel_func, product_kernel_func, cotraining_kernel_func, rgcca_func)
MKL.multiview.clusterers <- lapply(MKL_multiview_fuse_funcs, function(MKL_multiview_fuse_func) {
function(datasets)
multiview.clusterer(best_cfg_eval_func, best_freq_eval_func, best_lex_eval_func, MKL_multiview_fuse_func, datasets)
})
MKL_add <- MKL.multiview.clusterers[[1]](datasets)
MKL_product <- MKL.multiview.clusterers[[2]](datasets)
co_training <- MKL.multiview.clusterers[[3]](datasets)
k_cca <- MKL.multiview.clusterers[[4]](datasets)
result = list(cfgsim=cfgsim, freqsim=freqsim, lexsim=lexsim, MKL_add=min(MKL_add), MKL_product=min(MKL_product), co_training=min(co_training), kcca=min(k_cca))
setwd("~/workspace")
#Create results directory, if it doesn't exist
dir.create(file.path(getwd(), paste("benchmark", prname, "Multiview/Results", sep="/")), showWarnings = FALSE)
print_clustering_results(prname, result, txt.file = "Multiview/Results/Clustering.txt", rnd=2)
#FIND BEST EVAL FUNCTIONS
best_cfg_eval_func_idx <- cfg_single_view$kernel_func_index
best_freq_eval_func_idx <- freq_single_view$kernel_func_index
best_lex_eval_func_idx <- lex_single_view$kernel_func_index
# evalFuncToString = function(func_index, type, parameters)
cfg_eval_string <- evalFuncToString(best_cfg_eval_func_idx, "cfg", cfg_kernel_parameters_list)
write(cfg_eval_string, file = paste("benchmark", prname, "Multiview/Results/CFG_EVAL.txt", sep="/"))
freq_eval_string <- evalFuncToString(best_freq_eval_func_idx, "freq", freq_kernel_parameters_list)
write(freq_eval_string, file = paste("benchmark", prname, "Multiview/Results/FREQ_EVAL.txt", sep="/"))
lex_eval_string <- evalFuncToString(best_lex_eval_func_idx, "lex", lex_kernel_parameters_list)
write(lex_eval_string, file = paste("benchmark", prname, "Multiview/Results/FREQ_EVAL.txt", sep="/"))
# result = list(cfgsim=cfgsim, freqsim=freqsim, lexsim=lexsim, MKL_add=min(MKL_add), MKL_product=min(MKL_product), co_training=min(co_training), kcca=min(k_cca))
all_string <- paste(round(cfgsim,2),cfg_eval_string,round(freqsim,2), freq_eval_string, round(lexsim,2), lex_eval_string, round(min(MKL_add),2), round(min(co_training),2), round(min(k_cca),2), sep="&")
write(all_string, file = paste("benchmark", prname, "Multiview/Results/All_EVAL.txt", sep="/"))
} |
#' Performs Mercator or Lambert projection of data.
#'
#' Performs Mercator, Lambert or no projection of data, as a default it will
#' perform the projection used in current plot.
#'
#'
#' @param a,b The input data to be projected, may be given as two vectors or as
#' list attributes, lat and lon (x and y if projection = none).
#' @param scale The scale used for the projection, (m, km or miles). Default is
#' the scale defined in geopar (the scale defined when the plot is
#' initialized).
#' @param b0 if projection = mercator b0 is the center of the mercator
#' projection. If projection = "Lambert" b0 and b1 are lattitudes defining the
#' Lambert projection. Default are the b0 and b1 defined in geopar.
#' @param b1 Second defining latitute for Lambert projection
#' @param l1 The longitude defining the Lambert projection, default is the l1
#' defined in geopar.
#' @param projection The projection of the data, legal projections are
#' "Mercator", "Lambert" and "none".
#' @param col.names This has to be set to the default value of c("lon", "lat"),
#' otherwise projection will be set to "none".
#' @return The function returns a list containing if projection = "none" x and
#' y, if projection is "Mercator" or "Lambert" it includes the projection
#' (projection), the scale (scale), lat and lon and x and y (the
#' distance in scale from point (0,0) in spherical coordinates.
#' @seealso \code{\link{invProj}}, \code{\link{geopar}}, \code{\link{geoplot}}.
#' @examples
#'
#' # For an example of use for this function see i.e. init() where
#' # it is called:
#' \dontrun{
#' xgr <- Proj(lat, lon, scale, b0, b1, l1, projection)
#' }
#'
#' @export Proj
Proj <-
function(a, b = 0, scale = getOption("geopar")$scale,
b0 = getOption("geopar")$b0, b1 = getOption("geopar")$b1,
l1 = getOption("geopar")$l1,
projection = getOption("geopar")$projection,
col.names = c("lon", "lat"))
{
if(col.names[1] != "lon" || col.names[2] != "lat")
projection <- "none"
if(is.list(a)) {
if(projection == "none") {
b <- a$y
a <- a$x
}
else {
b <- a$lon
a <- a$lat
}
}
if(projection == "Lambert") {
x <- lambert(a, b, b0, l1, b1, scale, old = T)
}
else if(projection == "Mercator") {
x <- mercator(a, b, scale, b0)
}
else if(projection == "none") {
x <- list(x = a, y = b)
}
}
| /R/Proj.R | no_license | Hafro/geo | R | false | false | 2,352 | r | #' Performs Mercator or Lambert projection of data.
#'
#' Performs Mercator, Lambert or no projection of data, as a default it will
#' perform the projection used in current plot.
#'
#'
#' @param a,b The input data to be projected, may be given as two vectors or as
#' list attributes, lat and lon (x and y if projection = none).
#' @param scale The scale used for the projection, (m, km or miles). Default is
#' the scale defined in geopar (the scale defined when the plot is
#' initialized).
#' @param b0 if projection = mercator b0 is the center of the mercator
#' projection. If projection = "Lambert" b0 and b1 are lattitudes defining the
#' Lambert projection. Default are the b0 and b1 defined in geopar.
#' @param b1 Second defining latitute for Lambert projection
#' @param l1 The longitude defining the Lambert projection, default is the l1
#' defined in geopar.
#' @param projection The projection of the data, legal projections are
#' "Mercator", "Lambert" and "none".
#' @param col.names This has to be set to the default value of c("lon", "lat"),
#' otherwise projection will be set to "none".
#' @return The function returns a list containing if projection = "none" x and
#' y, if projection is "Mercator" or "Lambert" it includes the projection
#' (projection), the scale (scale), lat and lon and x and y (the
#' distance in scale from point (0,0) in spherical coordinates.
#' @seealso \code{\link{invProj}}, \code{\link{geopar}}, \code{\link{geoplot}}.
#' @examples
#'
#' # For an example of use for this function see i.e. init() where
#' # it is called:
#' \dontrun{
#' xgr <- Proj(lat, lon, scale, b0, b1, l1, projection)
#' }
#'
#' @export Proj
Proj <-
function(a, b = 0, scale = getOption("geopar")$scale,
b0 = getOption("geopar")$b0, b1 = getOption("geopar")$b1,
l1 = getOption("geopar")$l1,
projection = getOption("geopar")$projection,
col.names = c("lon", "lat"))
{
if(col.names[1] != "lon" || col.names[2] != "lat")
projection <- "none"
if(is.list(a)) {
if(projection == "none") {
b <- a$y
a <- a$x
}
else {
b <- a$lon
a <- a$lat
}
}
if(projection == "Lambert") {
x <- lambert(a, b, b0, l1, b1, scale, old = T)
}
else if(projection == "Mercator") {
x <- mercator(a, b, scale, b0)
}
else if(projection == "none") {
x <- list(x = a, y = b)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/20_watson.R
\name{watson}
\alias{watson}
\title{Watson Function}
\usage{
watson()
}
\value{
A list containing:
\itemize{
\item \code{fn} Objective function which calculates the value given input
parameter vector.
\item \code{gr} Gradient function which calculates the gradient vector
given input parameter vector.
\item \code{he} If available, the hessian matrix (second derivatives)
of the function w.r.t. the parameters at the given values.
\item \code{fg} A function which, given the parameter vector, calculates
both the objective value and gradient, returning a list with members
\code{fn} and \code{gr}, respectively.
\item \code{x0} Function returning the standard starting point, given
\code{n}, the number of variables desired.
}
}
\description{
Test function 20 from the More', Garbow and Hillstrom paper.
}
\details{
The objective function is the sum of \code{m} functions, each of \code{n}
parameters.
\itemize{
\item Dimensions: Number of parameters \code{2 <= n <= 31}, number of
summand functions \code{m = 31}.
\item Minima: \code{f = 2.28767...e-3} if \code{n = 6};
\code{f = 1.39976...e-6} if \code{n = 9};
\code{f = 4.72238...e-10} if \code{n = 12}
}
The number of parameters, \code{n}, in the objective function is not
specified when invoking this function. It is implicitly set by the length of
the parameter vector passed to the objective and gradient functions that this
function creates. See the 'Examples' section.
}
\examples{
wat <- watson()
# 6 variable problem using the standard starting point
x0_6 <- wat$x0(6)
res_6 <- stats::optim(x0_6, wat$fn, wat$gr, method = "L-BFGS-B")
# Standing starting point with 9 variables
x0_9 <- wat$x0(9)
res_9 <- stats::optim(x0_9, wat$fn, wat$gr, method = "L-BFGS-B")
# Create your own 3 variable starting point
res_3 <- stats::optim(c(0.1, 0.2, 0.3), wat$fn, wat$gr, method = "L-BFGS-B")
}
\references{
More', J. J., Garbow, B. S., & Hillstrom, K. E. (1981).
Testing unconstrained optimization software.
\emph{ACM Transactions on Mathematical Software (TOMS)}, \emph{7}(1), 17-41.
\doi{doi.org/10.1145/355934.355936}
Kowalik, J. S., & Osborne, M. R. (1968).
\emph{Methods for unconstrained optimization problems.}
New York, NY: Elsevier North-Holland.
}
| /man/watson.Rd | permissive | jlmelville/funconstrain | R | false | true | 2,333 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/20_watson.R
\name{watson}
\alias{watson}
\title{Watson Function}
\usage{
watson()
}
\value{
A list containing:
\itemize{
\item \code{fn} Objective function which calculates the value given input
parameter vector.
\item \code{gr} Gradient function which calculates the gradient vector
given input parameter vector.
\item \code{he} If available, the hessian matrix (second derivatives)
of the function w.r.t. the parameters at the given values.
\item \code{fg} A function which, given the parameter vector, calculates
both the objective value and gradient, returning a list with members
\code{fn} and \code{gr}, respectively.
\item \code{x0} Function returning the standard starting point, given
\code{n}, the number of variables desired.
}
}
\description{
Test function 20 from the More', Garbow and Hillstrom paper.
}
\details{
The objective function is the sum of \code{m} functions, each of \code{n}
parameters.
\itemize{
\item Dimensions: Number of parameters \code{2 <= n <= 31}, number of
summand functions \code{m = 31}.
\item Minima: \code{f = 2.28767...e-3} if \code{n = 6};
\code{f = 1.39976...e-6} if \code{n = 9};
\code{f = 4.72238...e-10} if \code{n = 12}
}
The number of parameters, \code{n}, in the objective function is not
specified when invoking this function. It is implicitly set by the length of
the parameter vector passed to the objective and gradient functions that this
function creates. See the 'Examples' section.
}
\examples{
wat <- watson()
# 6 variable problem using the standard starting point
x0_6 <- wat$x0(6)
res_6 <- stats::optim(x0_6, wat$fn, wat$gr, method = "L-BFGS-B")
# Standing starting point with 9 variables
x0_9 <- wat$x0(9)
res_9 <- stats::optim(x0_9, wat$fn, wat$gr, method = "L-BFGS-B")
# Create your own 3 variable starting point
res_3 <- stats::optim(c(0.1, 0.2, 0.3), wat$fn, wat$gr, method = "L-BFGS-B")
}
\references{
More', J. J., Garbow, B. S., & Hillstrom, K. E. (1981).
Testing unconstrained optimization software.
\emph{ACM Transactions on Mathematical Software (TOMS)}, \emph{7}(1), 17-41.
\doi{doi.org/10.1145/355934.355936}
Kowalik, J. S., & Osborne, M. R. (1968).
\emph{Methods for unconstrained optimization problems.}
New York, NY: Elsevier North-Holland.
}
|
data<- read.csv.sql("household_power_consumption.txt",
sql= "select * from file where date in ( '1/2/2007' , '2/2/2007')" ,
sep=";" )
data$DIT<-paste(data$Date, data$Time)
data$DIT<-strptime(data$DIT, "%d/%m/%Y %H:%M:%S")
data$Date<-as.Date(data$Date)
png("plot1.png", height=480, width=480)
hist(data$Global_active_power, main="Global active power",
col="red", xlab="Global active power (kilowatts)", ylim=c(0,1200))
dev.off()
| /plot1.R | no_license | katarzynakedzierska/ExData_Plotting1 | R | false | false | 481 | r | data<- read.csv.sql("household_power_consumption.txt",
sql= "select * from file where date in ( '1/2/2007' , '2/2/2007')" ,
sep=";" )
data$DIT<-paste(data$Date, data$Time)
data$DIT<-strptime(data$DIT, "%d/%m/%Y %H:%M:%S")
data$Date<-as.Date(data$Date)
png("plot1.png", height=480, width=480)
hist(data$Global_active_power, main="Global active power",
col="red", xlab="Global active power (kilowatts)", ylim=c(0,1200))
dev.off()
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
##This funtion will define all the functions necessary to work on
##inverting the matrix in the cacheSolve Function
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
seti <- function(inverse) i <<- inverse
geti <- function() i
list(set = set, get = get,
seti = seti,
geti = geti)
}
## Write a short comment describing this function
## This function will determine whether the matrix has already been
##inverted or not. If not then the appropriate funtion is called which
##are defined in the makeCacheMatrix function and then use solve
##function to invert the matrix.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$geti()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$seti(i)
i
}
| /cachematrix.R | no_license | rklarpit/ProgrammingAssignment2 | R | false | false | 1,094 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
##This funtion will define all the functions necessary to work on
##inverting the matrix in the cacheSolve Function
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
seti <- function(inverse) i <<- inverse
geti <- function() i
list(set = set, get = get,
seti = seti,
geti = geti)
}
## Write a short comment describing this function
## This function will determine whether the matrix has already been
##inverted or not. If not then the appropriate funtion is called which
##are defined in the makeCacheMatrix function and then use solve
##function to invert the matrix.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$geti()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$seti(i)
i
}
|
#' NA to Zeros
#'
#' Specify a dataframe and a vector of column names. If there are NAs in those columns,
#' convert them to 0s and return a dataframe. This is slower than using na_to_zero.R with purrr:: map().
#' @param df A dataframe.
#' @param cols A vector of quoted column names.
#' @param ... Other arguments
#' @keywords zero
#' @export
#' @examples
#' sample_df <- list(a = 1:3,
#' b = letters[1:3],
#' c = c(NA, "foo", "bar")) %>%
#' tibble::as_tibble()
#' cols_to_zero <- c("b", "c")
#' na_to_zero_df(sample_df, cols_to_zero)
na_to_zero_df <- function(df, cols, ...) {
for (col in cols) {
df[[col]][which(is.na(df[[col]]))] <- 0
}
df
}
| /R/na_to_zero_df.R | no_license | aedobbyn/dobtools | R | false | false | 699 | r |
#' NA to Zeros
#'
#' Specify a dataframe and a vector of column names. If there are NAs in those columns,
#' convert them to 0s and return a dataframe. This is slower than using na_to_zero.R with purrr:: map().
#' @param df A dataframe.
#' @param cols A vector of quoted column names.
#' @param ... Other arguments
#' @keywords zero
#' @export
#' @examples
#' sample_df <- list(a = 1:3,
#' b = letters[1:3],
#' c = c(NA, "foo", "bar")) %>%
#' tibble::as_tibble()
#' cols_to_zero <- c("b", "c")
#' na_to_zero_df(sample_df, cols_to_zero)
na_to_zero_df <- function(df, cols, ...) {
for (col in cols) {
df[[col]][which(is.na(df[[col]]))] <- 0
}
df
}
|
library(chipmine)
library(org.AFumigatus.Af293.eg.db)
library(ggpubr)
library(ggrepel)
library(cowplot)
rm(list = ls())
# source("E:/Chris_UM/GitHub/omics_util/RNAseq_scripts/DESeq2_functions.R")
# source(file = "E:/Chris_UM/GitHub/omics_util/GO_enrichment/topGO_functions.R")
# "CEA17_AA_vs_CEA17_C", "5A9_AA_vs_5A9_C", "5A9_C_vs_CEA17_C", "5A9_AA_vs_CEA17_AA"
analysisName <- "5A9_AA_vs_5A9_C"
diffPair <- "5A9_AA_vs_5A9_C"
outDir <- here::here("analysis", "integration_analysis", "diffbind_DESeq2_volcano", analysisName)
outPrefix <- paste(outDir, "/", analysisName, ".diffbind_goodPeaks", sep = "")
##################################################################################
chipSamples <- c("CREEHA_CONTROL4", "CREEHA_10MMAA4")
diffbindCompare <- c("CREEHA_CONTROL", "CREEHA_10MMAA")
file_diffbindTargets <- here::here("analysis", "ChIPseq_analysis",
"peak_targets", "diffbind_allPeak_targets.tab")
file_deseq2 <- paste(diffPair, ".DEG_all.txt", sep = "")
file_degs <- here::here("analysis", "RNAseq_data", diffPair, file_deseq2)
if(!dir.exists(outDir)){
dir.create(path = outDir, recursive = TRUE)
}
orgDb <- org.AFumigatus.Af293.eg.db
file_goMap <- "E:/Chris_UM/Database/A_fumigatus_293_version_s03-m05-r06/A_fumigatus_Af293_orgDb/geneid2go.AFumigatus_Af293.topGO.map"
##################################################################################
grp1 <- diffbindCompare[1]
grp2 <- diffbindCompare[2]
grp1Enrich = paste(grp1, ":enriched", sep = "")
grp2Enrich = paste(grp2, ":enriched", sep = "")
grp1Specific = paste(grp1, ":specific", sep = "")
grp2Specific = paste(grp2, ":specific", sep = "")
tfCols <- sapply(
X = c("hasPeak", "peakId", "peakEnrichment", "peakPval", "peakQval", "peakSummit", "peakDist", "summitDist",
"peakType", "bidirectional", "featureCovFrac", "relativeSummitPos", "peakRegion", "peakPosition",
"peakCoverage", "pvalFiltered"),
FUN = function(x){ structure(paste(x, ".", chipSamples, sep = ""), names = chipSamples) },
simplify = F, USE.NAMES = T
)
fdr_col = "padj"
lfc_col = "shrinkLog2FC"
ylimit = 50
xlimit = c(-5, 5)
FDR_cut <- 0.05
lfc_cut <- 0.585
up_cut <- lfc_cut
down_cut <- lfc_cut * -1
##################################################################################
## prepare dataset
diffbindRes <- suppressMessages(readr::read_tsv(file = file_diffbindTargets, col_names = T)) %>%
dplyr::select(-starts_with("summitSeq."))
diffbindRes <- dplyr::filter(diffbindRes, pvalFilteredN > 0)
degData <- suppressMessages(readr::read_tsv(file = file_degs, col_names = T))
mergedData <- dplyr::left_join(x = degData, y = diffbindRes, by = c("geneId" = "geneId")) %>%
tidyr::replace_na(purrr::set_names(list(FALSE, FALSE), unname(tfCols$hasPeak[chipSamples]))) %>%
dplyr::mutate(
categoryRNAseq = dplyr::case_when(
!! as.name(fdr_col) < !! FDR_cut & !! as.name(lfc_col) >= !! up_cut ~ "Significant Up",
!! as.name(fdr_col) < !! FDR_cut & !! as.name(lfc_col) <= !! down_cut ~ "Significant Down",
!! as.name(fdr_col) < !! FDR_cut ~ "Significant",
TRUE ~ "Non-significant"
)
)
## store data
readr::write_tsv(x = mergedData, path = paste(outPrefix, ".data.tab"))
##################################################################################
## select best peak for a gene which has multiple peaks
plotData <- mergedData %>%
dplyr::mutate(
minPeakDist = pmin(abs(!!as.name(tfCols$peakDist[chipSamples[1]])),
abs(!!as.name(tfCols$peakDist[chipSamples[2]])),
na.rm = TRUE
)
) %>%
dplyr::group_by(geneId) %>%
dplyr::arrange(abs(minPeakDist), desc(bestPval)) %>%
dplyr::slice(1L) %>%
dplyr::ungroup()
## generate summary stats
summary_tf1 <- dplyr::filter(plotData, !! as.name(unname(tfCols$hasPeak[chipSamples[1]])) == TRUE) %>%
dplyr::group_by(categoryRNAseq) %>%
summarise(!! unname(tfCols$hasPeak[chipSamples[1]]) := n()) %>%
dplyr::ungroup()
summary_tf2 <- dplyr::filter(plotData, !! as.name(unname(tfCols$hasPeak[chipSamples[2]])) == TRUE) %>%
dplyr::group_by(categoryRNAseq) %>%
summarise(!! unname(tfCols$hasPeak[chipSamples[2]]) := n()) %>%
dplyr::ungroup()
summary_tf1_enrich <- dplyr::filter(plotData, categoryDiffbind %in% c(grp1Enrich, grp1Specific)) %>%
dplyr::group_by(categoryRNAseq) %>%
summarise(!! chipSamples[1] := n()) %>%
dplyr::ungroup()
summary_tf2_enrich <- dplyr::filter(plotData, categoryDiffbind %in% c(grp2Enrich, grp2Specific)) %>%
dplyr::group_by(categoryRNAseq) %>%
summarise(!! chipSamples[2] := n()) %>%
dplyr::ungroup()
summary_common <- dplyr::filter(plotData, categoryDiffbind == "common") %>%
dplyr::group_by(categoryRNAseq) %>%
summarise(common = n()) %>%
dplyr::ungroup()
summary_combined <- dplyr::group_by(plotData, categoryRNAseq) %>%
summarise(all_genes = n()) %>%
dplyr::ungroup() %>%
dplyr::left_join(y = summary_tf1_enrich, by = "categoryRNAseq") %>%
dplyr::left_join(y = summary_common, by = "categoryRNAseq") %>%
dplyr::left_join(y = summary_tf2_enrich, by = "categoryRNAseq")
summary_table <- dplyr::bind_rows(
summary_combined,
dplyr::summarise_if(.tbl = summary_combined, .predicate = is.numeric, .funs = sum, na.rm = TRUE) %>%
mutate(categoryRNAseq = "total")) %>%
ggpubr::ggtexttable(rows = NULL, theme = ttheme(base_style = "classic", base_size = 10))
##################################################################################
plotTitle <- paste(diffPair, "DEGs marked with ChIPseq target DiffBind change", sep = " ")
markGenes <- plotData$geneId[ which(plotData[, tfCols$hasPeak[chipSamples[1]], drop = TRUE ] ) ]
## squish the value to limits
plotData$log10FDR <- -log10(plotData[[fdr_col]])
plotData$log10FDR <- scales::squish(x = plotData$log10FDR, range = c(0, ylimit))
plotData[[lfc_col]] <- scales::squish(x = plotData[[lfc_col]], range = xlimit)
## show any specific categoryRNAseq only
significantData <- dplyr::filter(plotData, categoryRNAseq %in% c("Significant Up", "Significant Down"))
# significantData <- plotData
# targetColors <- c("specific:CREEHA_CONTROL:down" = "#a50026",
# "specific:CREEHA_CONTROL:noDiff" = "#f46d43",
# "common:down" = "#fee08b",
# "common:noDiff" = "yellow",
# "common:up" = "#e0f3f8",
# "specific:CREEHA_10MMAA:noDiff" = "#74add1",
# "specific:CREEHA_10MMAA:up" = "#006837")
#
# diffbindColors <- c("down" = "red", "noDiff" = "green", "up" = "blue")
# targetTypeShape <- c("specific:CREEHA_CONTROL" = 25,
# "common" = 22,
# "specific:CREEHA_10MMAA" = 24)
diffbindCategoryColors <- structure(
c("#e60000", "#ff4d4d", "green", "#6666ff", "#0000e6"),
names = c(grp1Specific, grp1Enrich, "common", grp2Enrich, grp2Specific))
#draw Volcano plot
vpt <- ggplot(mapping = aes(x = !! as.name(lfc_col), y = log10FDR)) +
geom_hline(yintercept = -log10(FDR_cut), color = "black", linetype = "dashed") +
geom_vline(xintercept = -lfc_cut, color = "black", linetype = "dashed") +
geom_vline(xintercept = lfc_cut, color = "black", linetype = "dashed") +
geom_point(data = plotData, color = "grey", alpha=0.5, size=2) +
geom_point(
data = dplyr::filter(significantData, !is.na(categoryDiffbind)),
mapping = aes(color = categoryDiffbind), alpha=0.7, size=2
) +
scale_color_manual(
name = "DiffBind change", values = diffbindCategoryColors,
breaks = names(diffbindCategoryColors)
) +
# geom_point(
# data = dplyr::filter(significantData, !is.na(categoryDiffbind)),
# mapping = aes(color = diffBind, fill = diffBind, shape = peakOccupancy),
# alpha=0.7, size=2
# ) +
# scale_color_manual(
# name = "DiffBind change", values = diffbindColors, breaks = names(diffbindColors)
# ) +
# scale_fill_manual(
# values = diffbindColors, breaks = names(diffbindColors), guide = FALSE
# ) +
# scale_shape_manual(
# name = "peak occupancy", values = targetTypeShape, breaks = names(targetTypeShape)
# ) +
scale_x_continuous(name = "log2(fold_change)", limits = xlimit, expand = expand_scale(mult = 0.02)) +
scale_y_continuous(name = "-log10(q-value)", limits = c(0, ylimit), expand = expand_scale(mult = 0.02)) +
guides(color = guide_legend(override.aes = list(size = 5)),
shape = guide_legend(override.aes = list(size = 5, fill = "black"))) +
theme_bw() +
theme(legend.background = element_rect(colour = "black"),
legend.text = element_text(size = 12),
legend.title = element_text(face = "bold", size = 14),
panel.grid = element_blank(),
plot.title = element_text(size = 14, face = "bold", hjust = 0.5),
plot.margin = unit(rep(0.5, 4),"cm"),
axis.title = element_text(size = 30, face = "bold"),
axis.text = element_text(size = 30)) +
ggtitle(plotTitle)
# plot(vpt)
## mark gene of interest
markGenes <- c()
showNames <- TRUE
## highlight genes of interest
if(length(markGenes) > 0){
tmpDf <- dplyr::filter(plotData, geneId %in% markGenes)
## draw the points
vpt <- vpt + geom_point(data = tmpDf, color = "black", shape = 1)
## show the gene lables
if(isTRUE(showNames)){
vpt <- vpt +
geom_text_repel(data = tmpDf, mapping = aes(label = geneId),
segment.color = '#cccccc',
segment.size = 1,
size = 4)
}
}
mergedPt <- ggarrange(
vpt, summary_table,
nrow = 2, heights = c(1, 0.2)
)
png(filename = paste(outPrefix, ".significant.volcano.png", sep = ""), width = 3500, height = 3500, res = 300)
plot(mergedPt)
dev.off()
pdf(file = paste(outPrefix, ".significant.volcano.pdf", sep = ""), width = 12, height = 12)
plot(mergedPt)
dev.off()
| /integrate_ChIP_RNA/diffbind_DESeq2_volcano.R | no_license | lakhanp1/omics_utils | R | false | false | 9,862 | r | library(chipmine)
library(org.AFumigatus.Af293.eg.db)
library(ggpubr)
library(ggrepel)
library(cowplot)
rm(list = ls())
# source("E:/Chris_UM/GitHub/omics_util/RNAseq_scripts/DESeq2_functions.R")
# source(file = "E:/Chris_UM/GitHub/omics_util/GO_enrichment/topGO_functions.R")
# "CEA17_AA_vs_CEA17_C", "5A9_AA_vs_5A9_C", "5A9_C_vs_CEA17_C", "5A9_AA_vs_CEA17_AA"
analysisName <- "5A9_AA_vs_5A9_C"
diffPair <- "5A9_AA_vs_5A9_C"
outDir <- here::here("analysis", "integration_analysis", "diffbind_DESeq2_volcano", analysisName)
outPrefix <- paste(outDir, "/", analysisName, ".diffbind_goodPeaks", sep = "")
##################################################################################
chipSamples <- c("CREEHA_CONTROL4", "CREEHA_10MMAA4")
diffbindCompare <- c("CREEHA_CONTROL", "CREEHA_10MMAA")
file_diffbindTargets <- here::here("analysis", "ChIPseq_analysis",
"peak_targets", "diffbind_allPeak_targets.tab")
file_deseq2 <- paste(diffPair, ".DEG_all.txt", sep = "")
file_degs <- here::here("analysis", "RNAseq_data", diffPair, file_deseq2)
if(!dir.exists(outDir)){
dir.create(path = outDir, recursive = TRUE)
}
orgDb <- org.AFumigatus.Af293.eg.db
file_goMap <- "E:/Chris_UM/Database/A_fumigatus_293_version_s03-m05-r06/A_fumigatus_Af293_orgDb/geneid2go.AFumigatus_Af293.topGO.map"
##################################################################################
grp1 <- diffbindCompare[1]
grp2 <- diffbindCompare[2]
grp1Enrich = paste(grp1, ":enriched", sep = "")
grp2Enrich = paste(grp2, ":enriched", sep = "")
grp1Specific = paste(grp1, ":specific", sep = "")
grp2Specific = paste(grp2, ":specific", sep = "")
tfCols <- sapply(
X = c("hasPeak", "peakId", "peakEnrichment", "peakPval", "peakQval", "peakSummit", "peakDist", "summitDist",
"peakType", "bidirectional", "featureCovFrac", "relativeSummitPos", "peakRegion", "peakPosition",
"peakCoverage", "pvalFiltered"),
FUN = function(x){ structure(paste(x, ".", chipSamples, sep = ""), names = chipSamples) },
simplify = F, USE.NAMES = T
)
fdr_col = "padj"
lfc_col = "shrinkLog2FC"
ylimit = 50
xlimit = c(-5, 5)
FDR_cut <- 0.05
lfc_cut <- 0.585
up_cut <- lfc_cut
down_cut <- lfc_cut * -1
##################################################################################
## prepare dataset
diffbindRes <- suppressMessages(readr::read_tsv(file = file_diffbindTargets, col_names = T)) %>%
dplyr::select(-starts_with("summitSeq."))
diffbindRes <- dplyr::filter(diffbindRes, pvalFilteredN > 0)
degData <- suppressMessages(readr::read_tsv(file = file_degs, col_names = T))
mergedData <- dplyr::left_join(x = degData, y = diffbindRes, by = c("geneId" = "geneId")) %>%
tidyr::replace_na(purrr::set_names(list(FALSE, FALSE), unname(tfCols$hasPeak[chipSamples]))) %>%
dplyr::mutate(
categoryRNAseq = dplyr::case_when(
!! as.name(fdr_col) < !! FDR_cut & !! as.name(lfc_col) >= !! up_cut ~ "Significant Up",
!! as.name(fdr_col) < !! FDR_cut & !! as.name(lfc_col) <= !! down_cut ~ "Significant Down",
!! as.name(fdr_col) < !! FDR_cut ~ "Significant",
TRUE ~ "Non-significant"
)
)
## store data
readr::write_tsv(x = mergedData, path = paste(outPrefix, ".data.tab"))
##################################################################################
## select best peak for a gene which has multiple peaks
plotData <- mergedData %>%
dplyr::mutate(
minPeakDist = pmin(abs(!!as.name(tfCols$peakDist[chipSamples[1]])),
abs(!!as.name(tfCols$peakDist[chipSamples[2]])),
na.rm = TRUE
)
) %>%
dplyr::group_by(geneId) %>%
dplyr::arrange(abs(minPeakDist), desc(bestPval)) %>%
dplyr::slice(1L) %>%
dplyr::ungroup()
## generate summary stats
summary_tf1 <- dplyr::filter(plotData, !! as.name(unname(tfCols$hasPeak[chipSamples[1]])) == TRUE) %>%
dplyr::group_by(categoryRNAseq) %>%
summarise(!! unname(tfCols$hasPeak[chipSamples[1]]) := n()) %>%
dplyr::ungroup()
summary_tf2 <- dplyr::filter(plotData, !! as.name(unname(tfCols$hasPeak[chipSamples[2]])) == TRUE) %>%
dplyr::group_by(categoryRNAseq) %>%
summarise(!! unname(tfCols$hasPeak[chipSamples[2]]) := n()) %>%
dplyr::ungroup()
summary_tf1_enrich <- dplyr::filter(plotData, categoryDiffbind %in% c(grp1Enrich, grp1Specific)) %>%
dplyr::group_by(categoryRNAseq) %>%
summarise(!! chipSamples[1] := n()) %>%
dplyr::ungroup()
summary_tf2_enrich <- dplyr::filter(plotData, categoryDiffbind %in% c(grp2Enrich, grp2Specific)) %>%
dplyr::group_by(categoryRNAseq) %>%
summarise(!! chipSamples[2] := n()) %>%
dplyr::ungroup()
summary_common <- dplyr::filter(plotData, categoryDiffbind == "common") %>%
dplyr::group_by(categoryRNAseq) %>%
summarise(common = n()) %>%
dplyr::ungroup()
summary_combined <- dplyr::group_by(plotData, categoryRNAseq) %>%
summarise(all_genes = n()) %>%
dplyr::ungroup() %>%
dplyr::left_join(y = summary_tf1_enrich, by = "categoryRNAseq") %>%
dplyr::left_join(y = summary_common, by = "categoryRNAseq") %>%
dplyr::left_join(y = summary_tf2_enrich, by = "categoryRNAseq")
summary_table <- dplyr::bind_rows(
summary_combined,
dplyr::summarise_if(.tbl = summary_combined, .predicate = is.numeric, .funs = sum, na.rm = TRUE) %>%
mutate(categoryRNAseq = "total")) %>%
ggpubr::ggtexttable(rows = NULL, theme = ttheme(base_style = "classic", base_size = 10))
##################################################################################
plotTitle <- paste(diffPair, "DEGs marked with ChIPseq target DiffBind change", sep = " ")
markGenes <- plotData$geneId[ which(plotData[, tfCols$hasPeak[chipSamples[1]], drop = TRUE ] ) ]
## squish the value to limits
plotData$log10FDR <- -log10(plotData[[fdr_col]])
plotData$log10FDR <- scales::squish(x = plotData$log10FDR, range = c(0, ylimit))
plotData[[lfc_col]] <- scales::squish(x = plotData[[lfc_col]], range = xlimit)
## show any specific categoryRNAseq only
significantData <- dplyr::filter(plotData, categoryRNAseq %in% c("Significant Up", "Significant Down"))
# significantData <- plotData
# targetColors <- c("specific:CREEHA_CONTROL:down" = "#a50026",
# "specific:CREEHA_CONTROL:noDiff" = "#f46d43",
# "common:down" = "#fee08b",
# "common:noDiff" = "yellow",
# "common:up" = "#e0f3f8",
# "specific:CREEHA_10MMAA:noDiff" = "#74add1",
# "specific:CREEHA_10MMAA:up" = "#006837")
#
# diffbindColors <- c("down" = "red", "noDiff" = "green", "up" = "blue")
# targetTypeShape <- c("specific:CREEHA_CONTROL" = 25,
# "common" = 22,
# "specific:CREEHA_10MMAA" = 24)
diffbindCategoryColors <- structure(
c("#e60000", "#ff4d4d", "green", "#6666ff", "#0000e6"),
names = c(grp1Specific, grp1Enrich, "common", grp2Enrich, grp2Specific))
#draw Volcano plot
vpt <- ggplot(mapping = aes(x = !! as.name(lfc_col), y = log10FDR)) +
geom_hline(yintercept = -log10(FDR_cut), color = "black", linetype = "dashed") +
geom_vline(xintercept = -lfc_cut, color = "black", linetype = "dashed") +
geom_vline(xintercept = lfc_cut, color = "black", linetype = "dashed") +
geom_point(data = plotData, color = "grey", alpha=0.5, size=2) +
geom_point(
data = dplyr::filter(significantData, !is.na(categoryDiffbind)),
mapping = aes(color = categoryDiffbind), alpha=0.7, size=2
) +
scale_color_manual(
name = "DiffBind change", values = diffbindCategoryColors,
breaks = names(diffbindCategoryColors)
) +
# geom_point(
# data = dplyr::filter(significantData, !is.na(categoryDiffbind)),
# mapping = aes(color = diffBind, fill = diffBind, shape = peakOccupancy),
# alpha=0.7, size=2
# ) +
# scale_color_manual(
# name = "DiffBind change", values = diffbindColors, breaks = names(diffbindColors)
# ) +
# scale_fill_manual(
# values = diffbindColors, breaks = names(diffbindColors), guide = FALSE
# ) +
# scale_shape_manual(
# name = "peak occupancy", values = targetTypeShape, breaks = names(targetTypeShape)
# ) +
scale_x_continuous(name = "log2(fold_change)", limits = xlimit, expand = expand_scale(mult = 0.02)) +
scale_y_continuous(name = "-log10(q-value)", limits = c(0, ylimit), expand = expand_scale(mult = 0.02)) +
guides(color = guide_legend(override.aes = list(size = 5)),
shape = guide_legend(override.aes = list(size = 5, fill = "black"))) +
theme_bw() +
theme(legend.background = element_rect(colour = "black"),
legend.text = element_text(size = 12),
legend.title = element_text(face = "bold", size = 14),
panel.grid = element_blank(),
plot.title = element_text(size = 14, face = "bold", hjust = 0.5),
plot.margin = unit(rep(0.5, 4),"cm"),
axis.title = element_text(size = 30, face = "bold"),
axis.text = element_text(size = 30)) +
ggtitle(plotTitle)
# plot(vpt)
## mark gene of interest
markGenes <- c()
showNames <- TRUE
## highlight genes of interest
if(length(markGenes) > 0){
tmpDf <- dplyr::filter(plotData, geneId %in% markGenes)
## draw the points
vpt <- vpt + geom_point(data = tmpDf, color = "black", shape = 1)
## show the gene lables
if(isTRUE(showNames)){
vpt <- vpt +
geom_text_repel(data = tmpDf, mapping = aes(label = geneId),
segment.color = '#cccccc',
segment.size = 1,
size = 4)
}
}
mergedPt <- ggarrange(
vpt, summary_table,
nrow = 2, heights = c(1, 0.2)
)
png(filename = paste(outPrefix, ".significant.volcano.png", sep = ""), width = 3500, height = 3500, res = 300)
plot(mergedPt)
dev.off()
pdf(file = paste(outPrefix, ".significant.volcano.pdf", sep = ""), width = 12, height = 12)
plot(mergedPt)
dev.off()
|
#' To perform QTL mapping with Wen method
#'
#' @param pheRaw phenotype matrix.
#' @param genRaw genotype matrix.
#' @param mapRaw1 linkage map matrix.
#' @param WalkSpeed Walk speed for Genome-wide Scanning.
#' @param CriLOD Critical LOD scores for significant QTL.
#' @param dir file path in your computer.
#'
#' @return a list
#' @export
#'
#' @examples
#' data(F2data)
#' readraw<-Readdata(file=F2data,fileFormat="GCIM",
#' method="GCIM-QEI",filecov=NULL,
#' MCIMmap=NULL,MultiEnv=TRUE)
#' DoResult<-Dodata(fileFormat="GCIM",
#' Population="F2",method="GCIM-QEI",
#' Model="Random",readraw,MultiEnv=TRUE)
#' ZhouMatrices<-ZhouF(pheRaw=DoResult$pheRaw,
#' genRaw=DoResult$genRaw,
#' mapRaw1=DoResult$mapRaw1,
#' WalkSpeed=1,CriLOD=3,
#' dir=tempdir())
ZhouF<-function(pheRaw=NULL,genRaw=NULL,mapRaw1=NULL,WalkSpeed=NULL,CriLOD=NULL,dir=NULL){
cl<-WalkSpeed
sLOD<-CriLOD
# yygg<-NULL
# mx=NULL;phe=NULL;chr_name=NULL;v.map=NULL
if(is.null(genRaw)==TRUE){
warning("Please input correct genotype dataset!")
}
if(is.null(pheRaw)==TRUE){
warning("Please input correct phenotype dataset!")
}
if(is.null(mapRaw1)==TRUE){
warning("Please input correct linkage map dataset!")
}
if((is.null(genRaw)==FALSE)&&(is.null(pheRaw)==FALSE)&&(is.null(mapRaw1)==FALSE)&&(cl<0)){
warning("Please input Walk Speed: >0!")
}
if((is.null(genRaw)==FALSE)&&(is.null(pheRaw)==FALSE)&&(is.null(mapRaw1)==FALSE)&&(cl>0)&&(sLOD<0)){
warning("Please input critical LOD score: >0!")
}
mapRaw<-as.matrix(mapRaw1)
chr_name<-unique(mapRaw[,2])
chr_secon<-as.matrix(mapRaw[,2])
mm<-numeric()
map_chr<-numeric()
for(i in 1:length(chr_name)){
chr_i<-length(which(chr_secon[]==chr_name[i]))
mm<-c(mm,chr_i)
chr_name[i]<-i
map_chr<-c(map_chr,rep(i,chr_i))
}
mm<-matrix(mm,ncol=1)
map_chr<-matrix(map_chr,ncol=1)
mapRaw[,2]<-map_chr
chr<-length(chr_name)
for(i in 1:chr){
pos1<-as.matrix(mapRaw[which(mapRaw[,2]==i),3])
delerow<-which(duplicated(pos1))
if(length(delerow)!=0){
break
}
}
if(length(delerow)!=0){
warning("Please check linkage maps (linkage groups) to make sure whether all the marker positions are different!")
}else{
blank<-matrix("",nrow=3,ncol=dim(pheRaw)[2])
blank[1,]<-colnames(pheRaw)
# p1<-as.matrix(pheRaw)
p1<-pheRaw
colnames(p1)<-NULL
p1<-t(rbind(blank,p1))
g1<-cbind(mapRaw,genRaw)
colnames(p1)<-NULL
colnames(g1)<-NULL
pgcombine<-rbind(p1,g1)
write.table(pgcombine,file=paste(dir,"/listeria_rotY",".csv",sep=""),sep=",",row.names = F,col.names = F)
########## calculate conditional probability for K matrix
f2<-read.cross("csvr",dir,"listeria_rotY.csv",genotypes=c("A","H","B","D","C"),na.strings = "-",crosstype="f2")
# f2<-jittermap(f2)# Jitter the marker positions in a genetic map so that no two markers are on top of each other # jittermap(object, amount=1e-6)
simf2<-calc.genoprob(f2, step=0,error.prob = 0.0001)
########## Access to chromosome information
genoname<-apply(mapRaw[,2:3],2,as.numeric)
genoname<-data.frame(marker=mapRaw[,1],chr=genoname[,1],pos=genoname[,2])
chr_n<-as.numeric(genoname[,2])
chr_n<-chr_n[!duplicated(chr_n)]
maxdistance<-0
for(i in 1:length(chr_n)){
maxdistance<-max(maxdistance, max(diff(as.matrix(genoname[which(genoname[,2]==i),3]))))
}
########## calculate conditional probability for K matrix
Ax0<-NULL;Hx0<-NULL;Bx0<-NULL
for(i in 1:length(chr_n)){
map_gen<-simf2$geno[[i]]$prob
A_gen<-round(map_gen[,,1],digits=15)
H_gen<-round(map_gen[,,2],digits=15)
B_gen<-round(map_gen[,,3],digits=15)
Ax0<-cbind(Ax0,A_gen)
Hx0<-cbind(Hx0,H_gen)
Bx0<-cbind(Bx0,B_gen)
}# dim(Ax0) # mn<-dim(Ax)[2]
########## Whether need to insert markers
if(maxdistance>cl){#user's options
simf2<-calc.genoprob(f2, step=cl,error.prob = 0.0001)
Ax<-NULL;Hx<-NULL;Bx<-NULL;regenoname<-NULL
for(i in 1:length(chr_n)){
map_gen<-simf2$geno[[i]]$prob
A_gen<-round(map_gen[,,1],digits=15)
H_gen<-round(map_gen[,,2],digits=15)
B_gen<-round(map_gen[,,3],digits=15)
Ax<-cbind(Ax,A_gen)
Hx<-cbind(Hx,H_gen)
Bx<-cbind(Bx,B_gen)
nowpos<-attr(map_gen,"map")
nowbin<-names(nowpos)
nowchr<-rep(as.numeric(chr_n[i]),length(nowpos))
nowdata<-data.frame(marker=nowbin,chr=nowchr,pos=nowpos)
regenoname<-rbind(regenoname,nowdata)
}# dim(Ax)
rownames(regenoname)<-NULL
regenoname<-cbind(regenoname,seq(1,dim(regenoname)[1],1))
colnames(regenoname)<-c("marker","chr","pos","id.all")
# mn<-dim(regenoname)[1]
genoname<-regenoname
}else{
Ax<-Ax0;Hx<-Hx0;Bx<-Bx0
genoname<-cbind(genoname,seq(1,nrow(genoname),by=1))
colnames(genoname)<-c("marker","chr","pos","id.all")
}
}
output<-list(genoname=genoname,mapRaw=mapRaw,
Ax0=Ax0,Hx0=Hx0,Bx0=Bx0,Ax=Ax,Hx=Hx,Bx=Bx)# yygg=yygg,pheRaw=pheRaw,chr_n=chr_n,
return(output)
}
#' The second step of Zhou method for single environment
#'
#' @param Model Random or fixed model.
#' @param pheRaw phenotype matrix.
#' @param genRaw genotype matrix.
#' @param mapRaw linkage map matrix.
#' @param CriLOD Critical LOD scores for significant QTL.
#' @param NUM The serial number of the trait to be analyzed.
#' @param yygg covariate matrix.
#' @param genoname linkage map matrix with pseudo markers inserted.
#' @param Ax0 AA genotype matrix.
#' @param Hx0 Aa genotype matrix.
#' @param Bx0 aa genotype matrix.
#' @param Ax AA genotype matrix with pseudo markers inserted.
#' @param Hx Aa genotype matrix with pseudo markers inserted.
#' @param Bx aa genotype matrix with pseudo markers inserted.
#' @param dir file storage path.
#' @param CriDis The distance of optimization.
#' @param CLO Number of CPUs.
#'
#' @return a list
#' @export
#'
#' @examples
#' data(F2data)
#' readraw<-Readdata(file=F2data,fileFormat="GCIM",
#' method="GCIM-QEI",filecov=NULL,
#' MCIMmap=NULL,MultiEnv=FALSE)
#' DoResult<-Dodata(fileFormat="GCIM",Population="F2",
#' method="GCIM-QEI",Model="Random",
#' readraw,MultiEnv=FALSE)
#' ZhouMatrices<-ZhouF(pheRaw=DoResult$pheRaw,
#' genRaw=DoResult$genRaw,mapRaw1=DoResult$mapRaw1,
#' WalkSpeed=1,CriLOD=3,dir=tempdir())
#' OutputZhou<-ZhouMethod_single_env(Model="Random",
#' pheRaw=DoResult$pheRaw,genRaw=DoResult$genRaw,
#' mapRaw=ZhouMatrices$mapRaw,CriLOD=3,NUM=1,
#' yygg=DoResult$yygg1,genoname=ZhouMatrices$genoname,
#' Ax0=ZhouMatrices$Ax0,Hx0=ZhouMatrices$Hx0,
#' Bx0=ZhouMatrices$Bx0,Ax=ZhouMatrices$Ax,
#' Hx=ZhouMatrices$Hx,Bx=ZhouMatrices$Bx,
#' dir=tempdir(),CriDis=5,CLO=2)
ZhouMethod_single_env<-function(Model=NULL,pheRaw=NULL,genRaw=NULL,mapRaw=NULL,CriLOD=NULL,NUM=NULL,yygg=NULL,genoname=NULL,
Ax0=NULL,Hx0=NULL,Bx0=NULL,Ax=NULL,Hx=NULL,Bx=NULL,dir=NULL,CriDis=NULL,CLO=NULL){# chr_n=NULL,
######################################### function #########################################
kinship_every<-function(coded_gen){
kk<-coded_gen%*%t(coded_gen)
kk<-kk/mn
return(kk)
}
p3d_method<-function(x,y,kinship){
# estimate the value of λ & λk kinship=K;
P3D<-function(x,y,kinship){
# iteration function(H=K*λ+I);estimate λ
iter_p3d<-function(ga){
lambda<-exp(ga)
diag_element<-lambda*K_value+1
logH<-sum(log(diag_element))
RH_value<-1/(diag_element)
yuRHyu<-sum(yu*RH_value*yu)
yuRHxu<-matrix(0,nrow = 1,ncol = q)
xuRHxu<-matrix(0,nrow = q,ncol = q)
for(i in 1:q){
yuRHxu[,i]<-sum(yu*RH_value*xu[,i])
for(j in 1:q){
xuRHxu[i,j]<-sum(xu[,i]*RH_value*xu[,j])
}
}
logxuRHxu<-log(det(xuRHxu))
logyuPyu<-log(yuRHyu-yuRHxu%*%tcrossprod(solve(xuRHxu),yuRHxu))
output<- -0.5*(logH+logxuRHxu+(n-q)*logyuPyu)
return(-output)
}
q<-ncol(x)
n<-nrow(y)
eigenK<-eigen(kinship)
K_vector<-eigenK$vectors
K_value<-eigenK$values
# rm(eigenK);gc()
xu<-crossprod(K_vector,x)
yu<-crossprod(K_vector,y)
ga0<-0
optimp3d<-optim(par=ga0,fn=iter_p3d,hessian = TRUE,method="L-BFGS-B",lower=-50,upper=10)
lambda<-exp(optimp3d$par)
return(list(lambda,K_vector,K_value))
}
q<-ncol(x)
value1<-P3D(x=x,y=y,kinship=kinship)
lambda<-value1[[1]]
uu<-as.matrix(value1[[2]])# The eigenvector of K matrix
vv<-value1[[3]] # The eigenvalue of K matrix
# RH_value<-1/(vv*lambda+1) # rm(value1);gc()
return(list(lambda=lambda,k_vector=uu,k_value=vv))
}
single_locus_model<-function(x,y,zz,lambda,uu,vv,CLO){
value2<-matrix(c(0.5,-0.5,0,1,-0.5,-0.5),2,3)
# iteration function(R=zu%*%t(zu)*λk+D*λ+I);estimate λk
rqtl<-function(ga){
lambdak<-exp(ga)
Hk_term<-zuRHzu*lambdak+diag(1,3)
logHk<-sum(log(lambda*vv+1))+log(det(Hk_term))
RHk_term<-solve(Hk_term)*lambdak
yuRHkyu<-yuRHyu-yuRHzu%*%tcrossprod(RHk_term,yuRHzu)
yuRHkxu<-yuRHxu-yuRHzu%*%tcrossprod(RHk_term,xuRHzu)
xuRHkxu<-xuRHxu-xuRHzu%*%tcrossprod(RHk_term,xuRHzu)
yuPkyu<-yuRHkyu-yuRHkxu%*%tcrossprod(solve(xuRHkxu),yuRHkxu)
rqtl<- -0.5*( logHk + log(det(xuRHkxu)) + (n-q)*log(yuPkyu) )
return(-rqtl)
}
#estimate the value of γ
gamma_estimate<-function(lambdak){
Hk_term<-zuRHzu*lambdak+diag(1,3)
RHk_term<-solve(Hk_term)*lambdak
yuRHkyu<-yuRHyu-yuRHzu%*%tcrossprod(RHk_term,yuRHzu)
yuRHkxu<-yuRHxu-yuRHzu%*%tcrossprod(RHk_term,xuRHzu)
xuRHkxu<-xuRHxu-xuRHzu%*%tcrossprod(RHk_term,xuRHzu)
zuRHkxu<-t(xuRHzu)-zuRHzu%*%tcrossprod(RHk_term,xuRHzu)
zuRHkyu<-t(yuRHzu)-zuRHzu%*%tcrossprod(RHk_term,yuRHzu)
zuRHkzu<-zuRHzu-zuRHzu%*%RHk_term%*%zuRHzu
beta<-solve(xuRHkxu,t(yuRHkxu))
beta<-matrix(beta,ncol = 1)
yuPkyu<-yuRHkyu-yuRHkxu%*%tcrossprod(solve(xuRHkxu),yuRHkxu)
sigma<-yuPkyu/(n-q)
sigma<-as.numeric(sigma)
gamma<-lambdak*zuRHkyu-lambdak*zuRHkxu%*%beta
var<-abs((lambdak*diag(1,3)-lambdak*zuRHkzu*lambdak)*sigma)
stderr<-sqrt(diag(var))
phi.k<-sigma*lambdak#Φk
return(list(gamma,beta,var,phi.k,sigma,stderr))
}
# estimate the value of logp
logp_estimate<-function(L, g_k, pn){
var.1<-L%*%tcrossprod(var_ga,L)
Wk.1<-crossprod(g_k,ginv(var.1))%*%g_k
rank1<-qr(L)$rank
tr1<-sum(diag(ginv(tcrossprod(L,L))%*%var.1))
dk1<-rank1-tr1/phi_k
p1<-pgamma(Wk.1,shape=pn/2,scale=2*dk1,lower.tail = FALSE,log.p = FALSE)
log1<-pgamma(Wk.1,shape=pn/2,scale=2*dk1,lower.tail = FALSE,log.p = TRUE)
log1<--log1*log10(exp(1))
return(list(p=p1,log=log1))
}
RH_value<-1/(vv*lambda+1)
mn<-ncol(zz)/3
n<-nrow(y)
q<-ncol(x)
xu<-crossprod(uu,x)
yu<-crossprod(uu,y)
yuRHyu<-sum(yu*RH_value*yu)
yuRHxu<-matrix(0,nrow=1,ncol=q)
xuRHxu<-matrix(0,nrow=q,ncol=q)
for(i in 1:q){
yuRHxu[,i]<-sum(yu*RH_value*xu[,i])
for(j in 1:q){
xuRHxu[j,i]<-sum(xu[,j]*RH_value*xu[,i])
}
}
# yuRHyu<-crossprod(yu,diag(RH_value))%*%yu
# yuRHxu<-crossprod(yu,diag(RH_value))%*%xu
# xuRHxu<-crossprod(xu,diag(RH_value))%*%xu
if(is.null(CLO)==TRUE){
cl.cores <- detectCores()
if(cl.cores<=2){
cl.cores<-1
}else{
if(cl.cores>10){
cl.cores <-10
}else{
cl.cores <- detectCores()-1
}
}
# cl.cores<-2
}else{
cl.cores <-CLO
}
cl <- makeCluster(cl.cores)
registerDoParallel(cl)
SR_i<-numeric()
result<-foreach(SR_i=1:mn,.combine=rbind)%dopar%{
# library(MASS)
z<-zz[,((SR_i-1)*3+1):(SR_i*3),drop=F]
zu<-crossprod(uu,z)
xuRHzu<-matrix(0,nrow=q,ncol=3)
yuRHzu<-matrix(0,nrow=1,ncol=3)
zuRHzu<-matrix(0,nrow=3,ncol=3)
for(i in 1:3){
yuRHzu[,i]<-sum(yu*RH_value*zu[,i])
for(j in 1:q){
xuRHzu[j,i]<-sum(xu[,j]*RH_value*zu[,i])
}
for(j in 1:3){
zuRHzu[j,i]<-sum(zu[,j]*RH_value*zu[,i])
}
}
# xuRHzu<-crossprod(xu,diag(RH_value))%*%zu
# yuRHzu<-crossprod(yu,diag(RH_value))%*%zu
# zuRHzu<-crossprod(zu,diag(RH_value))%*%zu
ga<-0
par<-optim(par=ga,fn=rqtl,hessian = TRUE,method="L-BFGS-B",lower=-10,upper=10)
lambdak<-exp(par$par)
value3<-gamma_estimate(lambdak)
gamma_k<-value3[[1]]
beta<-value3[[2]]#estimate β (y=Xβ+Zγ+ξ+ε)
var_ga<-value3[[3]]
phi_k<-value3[[4]]
sigma_2<-value3[[5]]
main_effect<-value2%*%gamma_k
logvalue1<-logp_estimate(L=value2, g_k=main_effect, pn=2)#the -log(10)p of qtl effect
p1<-logvalue1$p
log1<-logvalue1$log
result<-cbind(lambdak,beta,matrix(gamma_k,1,3),p1,log1,phi_k,sigma_2)
}
stopCluster(cl)
lambda_k<-result[,1]
mu_beta<-result[,2]
gamma_all<-result[,3:5]
p1<-result[,6]
log_p1<-result[,7]
phi_k<-result[,8]
sigma_2<-result[,9]
return(list(lambda_k=lambda_k, fixed=mu_beta, gamma=gamma_all,
p1=p1, log1=log_p1, phi_k=phi_k, sigma_2=sigma_2))
}
single_locus_model_Fixed<-function(x,y,zz,lambda,uu,vv,CLO){
value2<-matrix(c(0.5,-0.5,0,1,-0.5,-0.5),2,3)
# estimate the value of logp
logp_estimate<-function(L, g_k, pn){
var.1<-L%*%tcrossprod(var_ga,L)
Wk.1<-crossprod(g_k,ginv(var.1))%*%g_k# Test statistic
p1<-pchisq(Wk.1,df=pn,lower.tail = FALSE,log.p = FALSE)
log1<-pchisq(Wk.1,df=pn,lower.tail = FALSE,log.p = TRUE)
log1<--log1*log10(exp(1))
return(list(p=p1,log=log1))
}
Hsolve<-1/(vv*lambda+1)
mn<-ncol(zz)/3
n<-nrow(y)
q<-ncol(x)
# xu<-crossprod(uu,x)
yu<-crossprod(uu,y)# Equation deformation
if(is.null(CLO)==TRUE){
cl.cores <- detectCores()
if(cl.cores<=2){
cl.cores<-1
}else{
if(cl.cores>10){
cl.cores <-10
}else{
cl.cores <- detectCores()-1
}
}
}else{
cl.cores <-CLO
}
cl <- makeCluster(cl.cores)
registerDoParallel(cl)
SF_i<-numeric()
result<-foreach(SF_i=1:mn,.combine=rbind)%dopar%{
# library(MASS)
z<-zz[,((SF_i-1)*3+1):(SF_i*3),drop=F]
uxz<-crossprod(uu,cbind(x,z))
x_gamma<-ginv(t(uxz)%*%diag(Hsolve)%*%uxz)%*%t(uxz)%*%diag(Hsolve)%*%yu
q<-qr(uxz)$rank
sig_e2<-as.numeric(t(yu-uxz%*%x_gamma)%*%diag(Hsolve)%*%(yu-uxz%*%x_gamma)/(dim(uxz)[1]-q))
x_gamma_covmatr<-sig_e2*ginv(t(uxz)%*%diag(Hsolve)%*%uxz)
gamma<-x_gamma[-c(1:dim(x)[2])]
var_ga<-x_gamma_covmatr[-c(1:dim(x)[2]),-c(1:dim(x)[2]),drop=F]
main_effect<-value2%*%gamma
logvalue1<-logp_estimate(L=value2, g_k=main_effect, pn=2)#the -log(10)p of qtl effect
p1<-logvalue1$p
log1<-logvalue1$log
result<-cbind(x_gamma[c(1:dim(x)[2])],matrix(gamma,1,3),p1,log1,sig_e2)
}
stopCluster(cl)
mu_beta<-result[,1:dim(x)[2]]
gamma_all<-result[,(dim(x)[2]+1):(dim(x)[2]+3)]
p1<-result[,(dim(x)[2]+4)]
log_p1<-result[,(dim(x)[2]+5)]
sigma_2<-result[,(dim(x)[2]+6)]
return(list(fixed=mu_beta, gamma=gamma_all, p1=p1, log1=log_p1, sigma_2=sigma_2))
}
peak_selection<-function(log_value,genoname){
peak_pos<-function(Lod.temp){
m<-length(Lod.temp)
optids<-vector(length=0)
if(Lod.temp[1]>Lod.temp[2]) optids<-append(optids,1)
for(j in 2:(m-1)){
if ((Lod.temp[j-1]<Lod.temp[j]) & (Lod.temp[j]>Lod.temp[j+1])) {
optids<-append(optids,j)
}
}
if(Lod.temp[m]>Lod.temp[m-1]) optids<-append(optids,m)
return(optids)
}
chr_all<-as.matrix(genoname[,2])
chr_kind<-chr_all[!duplicated(chr_all)]
id_pos<-NULL
for(jjj in chr_kind){
now_id<-which(chr_all%in%jjj)
id_pos<-c(id_pos,now_id[peak_pos(log_value[now_id])])
}
return(sort(id_pos))
}
multi_peak_new<-function(gencoded,peak_id){
enk<-3
mut_peak_id<-NULL
term1<-seq(1,3,1)
for(i in 1:length(peak_id)){
mut_peak_id<-c(mut_peak_id,(rep(peak_id[i],enk)-1)*enk+term1)
}
return(list(z=gencoded[,sort(mut_peak_id)],order=sort(rep(peak_id,enk))))
}
Zhou_lars<-function(peak,CodeMatrix,n){
multi_value<-multi_peak_new(CodeMatrix,peak)
DesignMatrix<-multi_value$z
order0<-multi_value$order# length(order0); length(peak_id)
if(length(peak)>=n){
larstep<-length(order0)%/%(3*5)
lar_result<-lars(x=DesignMatrix, y=y, type = "lar",trace = FALSE, normalize = TRUE, intercept = TRUE, eps = .Machine$double.eps, use.Gram=FALSE,max.steps = larstep)
lar_result.0<-lar_result$beta[nrow(lar_result$beta),]
lar_pos0<-order0[which(lar_result.0!=0)]
lar_pos<-lar_pos0[!duplicated(lar_pos0)]# length(lar_pos)
multi_value1<-multi_peak_new(CodeMatrix,lar_pos)
DesignMatrix1<-multi_value1$z # coefficient matrix of selected peak loci
order1<-multi_value1$order
}else{
lar_pos<-peak
DesignMatrix1<-DesignMatrix
order1<-order0
}# length(lar_pos)
return(list(lar_pos=lar_pos,Matrix=DesignMatrix1,order=order1))
}
sblgwas<-function(x,y,z,t,max.iter=200,min.err=1e-6){
x<-as.matrix(x)
y<-as.matrix(y)
z<-as.matrix(z)
n<-length(y)
q<-ncol(x)
m<-ncol(z)
b0<-solve(t(x)%*%x,tol=1e-50)%*%(t(x)%*%y)
s2<-sum((y-x%*%b0)^2)/(n-q)
b0<-matrix(0,q,1)
b<-b0
g0<-matrix(0,m,1)
g<-g0
lambda<-matrix(0,m,1)
tau<-g0
v<-g0
xx<-NULL
xy<-NULL
for(i in 1:q){
xx<-c(xx,sum(x[,i]^2))
xy<-c(xy,sum(x[,i]*y))
}
zz<-NULL
zy<-NULL
for(k in 1:m){
zz<-c(zz,sum(z[,k]^2))
zy<-c(zy,sum(z[,k]*y))
}
d<-numeric(m)
a<-matrix(0,n,1)
iter<-0
err<-1e8
my.iter<-NULL
while(iter < max.iter & err > min.err){
for(i in 1:q){
a<-a-x[,i]*b0[i]
ai<-sum(x[,i]*a)
b[i]<-(xy[i]-ai)/xx[i]
a<-a+x[,i]*b[i]
}
df<-0
for(k in 1:m){
a<-a-z[,k]*g0[k]
ak<-sum(z[,k]*a)
c1<- -(t+3)*zz[k]^2
c2<- -(2*t+5)*zz[k]+(zy[k]-ak)^2
c3<- -(t+2)
if( ((c2^2-4*c1*c3) < 0) | (c2 < 0) ){
tau[k]<-0
} else {
tau[k]<-(-c2-sqrt(c2^2-4*c1*c3))/(2*c1)
}
lambda[k]<-tau[k]/s2
g[k]<-lambda[k]*(zy[k]-ak)-lambda[k]^2*zz[k]*(zy[k]-ak)/(lambda[k]*zz[k]+1)
d[k]<-lambda[k]*(zz[k]-lambda[k]*zz[k]^2/(lambda[k]*zz[k]+1))
v[k]<-tau[k]-tau[k]*d[k]
df<-df+d[k]
a<-a+z[,k]*g[k]
}
if((n-q-df) > 0){s2<-sum((y-a)^2)/(n-q-df)
}else{
s2<-sum((y-a)^2)/(n-q)
}
iter<-iter+1
err<-sum((g-g0)^2)/m
g0<-g
b0<-b
my.iter<-rbind(my.iter,cbind(iter,err,s2,t(b),t(g)))
}
my.parm<-data.frame(iter,err,s2,b,df)
names(my.parm)<-c("iter","error","s2","beta","df")
posv<-which(v!=0)
m<-length(g)
wald<-c(rep(0,m))
gg<-g[posv]
vv<-v[posv]
wald[posv]<-gg^2/vv
p<-pchisq(wald,1,lower.tail=FALSE)
my.blup<-data.frame(g,v,wald,p)
names(my.blup)<-c("gamma","vg","wald","p_wald")
var.beta<-NULL
for(i in 1:q){
var.beta<-c(var.beta,paste("beta",i,sep=""))
}
var.gamma<-NULL
for(k in 1:m){
var.gamma<-c(var.gamma,paste("gamma",k,sep=""))
}
var.names<-c(c("iter","error","s2"),var.beta,var.gamma)
my.iter<-data.frame(my.iter)
names(my.iter)<-var.names
out<-list(my.iter,my.parm,my.blup)
names(out)<-c("iteration","parm","blup")
return(out)
}
selection<-function(posx,genoname,svrad){
chose_peak<-c(posx[1])
order_now<-1
while(order_now<length(posx)){
order_now<-order_now+1
repeat_pos<-which( abs(chose_peak-as.numeric(posx[order_now]))<=(svrad) )
if(length(repeat_pos)>0){
if_condition<-length(which( genoname[chose_peak[repeat_pos],2]==as.numeric(genoname[posx[order_now],2]) ))==0
if(if_condition){
chose_peak<-c(chose_peak,posx[order_now])
}
}else{
chose_peak<-c(chose_peak,posx[order_now])
}
}
return(chose_peak)
}
selection2<-function(posx1,posx2,genoname,svrad){
chose_peak<-NULL
order_now<-0
while(order_now<length(posx1)){
order_now<-order_now+1
repeat_pos<-which( abs(posx2-as.numeric(posx1[order_now]))<=(svrad) )
if(length(repeat_pos)>0){
if_condition<-length(which( genoname[posx2[repeat_pos],2]==as.numeric(genoname[posx1[order_now],2]) ))==0
if(if_condition){
chose_peak<-c(chose_peak,posx1[order_now])
}
}else{
chose_peak<-c(chose_peak,posx1[order_now])
}
}
return(chose_peak)
}
ebayes_EM<-function(x,z,y,v0,v,tau,err_max){
n<-nrow(z);k<-ncol(z)
mk<-3; kn<-k/mk
v0<-as.numeric(v0)
v<-matrix(v,ncol=1)
if(abs(min(eigen(crossprod(x,x))$values))<1e-6){
try_b<-try({ b<-chol2inv(chol(crossprod(x,x)+diag(ncol(x))*1e-8))%*%crossprod(x,y) },silent=TRUE)
if('try-error' %in% class(try_b)){
try_c<-try({ b<-solve(crossprod(x,x))%*%crossprod(x,y) },silent=TRUE)
if('try-error' %in% class(try_c)){ b<-ginv(crossprod(x,x))%*%crossprod(x,y) }
}
}else{
try_b<-try({ b<-chol2inv(chol(crossprod(x,x)))%*%(crossprod(x,y)) },silent=TRUE)
if('try-error' %in% class(try_b)){
try_c<-try({ b<-solve(crossprod(x,x))%*%(crossprod(x,y)) },silent=TRUE)
if('try-error' %in% class(try_c)){ b<-ginv(crossprod(x,x))%*%(crossprod(x,y)) }
}
}
u<-matrix(0,nrow=mk,ncol=kn)# E(γk)
w<-matrix(0,nrow=mk,ncol=k)# var(γk)
s<-matrix(0,nrow=kn,ncol=1)# tr(var(γk))
vv<-matrix(0,n,n)
for(i in 1:kn){
nc<-( (i-1)*mk+1 ):(i*mk)
zz<-z[,nc]# Zk
vv=vv+tcrossprod(zz,zz)*v[i,]
}
vv<-vv+diag(n)*v0 # V
L<-matrix(c(0.5,-0.5,0,1,-0.5,-0.5),2,3)
rank_1<-qr(L)$rank
iter<-0;err<-1000;iter_max<-500;
omega<-0
while( (iter<iter_max)&&(err>err_max) ){
iter<-iter+1
v01<-v0# v01 is the initial σ^2
v1<-v# v1 is the initial σk^2
b1<-b# b1 is the initial β
#s1<-s
try_a<-try({ vi<-chol2inv(chol(vv)) },silent=TRUE)# solve(V)
if('try-error' %in% class(try_a)){
try_aa<-try({ vi<-solve(vv) },silent=TRUE)
if('try-error' %in% class(try_aa)){ vi<-ginv(vv) }
}
xtv<-crossprod(x,vi)# t(X)%*%solve(V)
if(ncol(x)==1){
b<-((xtv%*%x)^(-1))*(xtv%*%y)
}else{
if(abs(min(Mod(eigen(xtv%*%x)$values)))<1e-6){
try_b<-try({ b<-chol2inv(chol((xtv%*%x)+diag(ncol(x))*1e-8))%*%(xtv%*%y) },silent=TRUE)
if('try-error' %in% class(try_b)){
try_c<-try({ b<-solve((xtv%*%x))%*%(xtv%*%y) },silent=TRUE)
if('try-error' %in% class(try_c)){ b<-ginv((xtv%*%x))%*%(xtv%*%y) }
}
}else{
try_b<-try({ b<-chol2inv(chol(xtv%*%x))%*%(xtv%*%y) },silent=TRUE)
if('try-error' %in% class(try_b)){
try_c<-try({ b<-solve((xtv%*%x))%*%(xtv%*%y) },silent=TRUE)
if('try-error' %in% class(try_c)){ b<-ginv((xtv%*%x))%*%(xtv%*%y) }
}
}
}
r<-y-x%*%b# y-Xβ
ss<-matrix(0,nrow=n,ncol=1)
vv<-matrix(0,n,n)# new V
for(i in 1:kn){
nc<-( (i-1)*mk+1 ):(i*mk)
zz<-z[,nc]# Zk
zztvi<-crossprod(zz,vi)# t(Zk)%*%solve(V)
u[,i]<-v[i,]*zztvi%*%r# E(γk)
w[,nc]<-v[i,]*( diag(1,mk)-zztvi%*%zz*v[i,] )# var(γk)
s[i,]<-sum(diag(w[,nc]))# tr(var(γk))
v[i,]<-(crossprod(u[,i,drop=F],u[,i,drop=F])+s[i,]+omega)/(tau+2+mk)
ss<-ss+zz%*%u[,i,drop=F]
vv<-vv+tcrossprod(zz,zz)*v[i,]# ∑( Zk%*%t(Zk)*(σk^2) )
}
v0<-as.numeric(crossprod(r,(r-ss))/n)# new σ^2
vv<-vv+diag(n)*v0# new V
err<-(crossprod((b1-b),(b1-b))+(v01-v0)^2+crossprod((v1-v),(v1-v)))/(1+ncol(x)+kn)
beta<-t(b)
sigma2<-v0
}
u1<-matrix(0,nrow=2,ncol=kn)# main-E(γk)
p1<-matrix(1,kn,1)
# pvalue<-matrix(1,kn,1)
for(i in 1:kn){
nc<-( (i-1)*mk+1 ):(i*mk)
gammak<-u[,i,drop=F]
u1[,i]<-L%*%gammak
var_1<-L%*%w[,nc,drop=F]%*%t(L)
tr_1<-sum(diag(ginv(tcrossprod(L))%*%var_1))##tr[...]
dk1<-abs(rank_1-tr_1/v[i,])
p1[i,]<-1-pchisq( t(u1[,i,drop=F])%*%ginv(L%*%w[,nc]%*%t(L))%*%u1[,i,drop=F], 2)
}
return(list(b=b,u=u,u1=u1,sigma2=sigma2,p1=p1,iter=iter))
}
Zhou_sbl<-function(peak,Order,DesignMatrix,CodeMatrix,genoname,sbl_t,sbl_p,tau,err_max,fix_p,Sigma,SigmaK){
chr_n<-as.numeric(genoname[,2])
chr_n<-chr_n[!duplicated(chr_n)]
maxdistance<-0
for(i in 1:length(chr_n)){
maxdistance<-max(maxdistance, max(diff(as.matrix(genoname[which(genoname[,2]==i),3]))))
}
result_sblgwas<-sblgwas(x,y,DesignMatrix,sbl_t)
sbl_p_wald<-result_sblgwas$blup[,4]#sbl_par<-result_sblgwas$blup[,1]# sbl_p_wald<-p.adjust(sbl_p_wald, method = "bonferroni")
sbl_pos_order<-Order[order(sbl_p_wald)]
p_order<-sort(sbl_p_wald)
id1<-which( p_order< (1-pchisq(sbl_p*2*log(10),1)))
id2<-which((p_order>=(1-pchisq(sbl_p*2*log(10),1)))&(p_order<1))
if(length(id1)>0){
sbl_pos_order1<-sbl_pos_order[id1]
sbl_pos_order2<-sbl_pos_order[id2]
sbl_pos_order1<-sbl_pos_order1[!duplicated(sbl_pos_order1)]
sbl_pos_order2<-sbl_pos_order2[!duplicated(sbl_pos_order2)]
sort_order1<-sort(sbl_pos_order1)
result_emba<-ebayes_EM(x,multi_peak_new(CodeMatrix,sort_order1)$z,y,Sigma,SigmaK[sort_order1],tau,err_max)
emID1<-which(result_emba$p1<(1-pchisq(fix_p*2*log(10),2)))
emID2<-order(result_emba$p1)[seq(1,5,1)]
if(length(emID1)>5){
emID<-sort(emID1)
fix_pos<-sort_order1[emID]
if(maxdistance<=1){
sbl_pos_order1<-selection2(sbl_pos_order1,fix_pos,genoname,1)
sbl_pos_order1<-selection(sbl_pos_order1,genoname,1)
sbl_pos_order2<-selection2(sbl_pos_order2,fix_pos,genoname,1)
sbl_pos_order2<-selection2(sbl_pos_order2,sbl_pos_order1,genoname,1)
sbl_pos_order2<-selection(sbl_pos_order2,genoname,1)
}
}else{
emID<-sort(union(emID1,emID2))
fix_pos<-sort_order1[emID]
if(maxdistance<=1){
sbl_pos_order1<-selection(sbl_pos_order1,genoname,1)
sbl_pos_order2<-selection2(sbl_pos_order2,sbl_pos_order1,genoname,1)
sbl_pos_order2<-selection(sbl_pos_order2,genoname,1)
}
}
sbl_pos<-sort(c(sbl_pos_order1,sbl_pos_order2))# length(union(sbl_pos,fix_pos))
}else{
result_emba<-ebayes_EM(x,multi_peak_new(CodeMatrix,peak)$z,y,Sigma,SigmaK[peak],tau,err_max)
emID1<-which(result_emba$p1<(1-pchisq(fix_p*2*log(10),2)))
emID2<-order(result_emba$p1)[seq(1,5,1)]
if(length(emID1)>5){
emID<-sort(emID1)
fix_pos<-peak[emID]
if(maxdistance<=1){
sbl_pos_order1<-selection2(peak,fix_pos,genoname,1)
sbl_pos_order1<-selection(sbl_pos_order1,genoname,1)
}else{
sbl_pos_order1<-peak[-emID]
}
}else{
emID<-sort(union(emID1,emID2))
fix_pos<-peak[emID]
if(maxdistance<=1){
sbl_pos_order1<-selection2(peak,fix_pos,genoname,1)
sbl_pos_order1<-selection(sbl_pos_order1,genoname,1)
}else{
sbl_pos_order1<-peak[-emID]
}
}
sbl_pos<-sort(sbl_pos_order1)
}
sbl_fix_pos<-sort(fix_pos)
xin<-cbind(x,multi_peak_new(CodeMatrix,sbl_fix_pos)$z)
return(list(fix=sbl_fix_pos,pos=sbl_pos,xin=xin))
}
into_vector<-function(xmatrix){
xvector<-NULL
for(i in 1:dim(xmatrix)[2]){
xvector<-c(xvector,xmatrix[,i])
}
return(xvector)
}
multinormal<-function(y,mean,sigma){
pdf_value<-(1/sqrt(2*3.14159265358979323846*sigma))*exp(-(y-mean)*(y-mean)/(2*sigma));
return (pdf_value)
}
LRT_F2<-function(xxn,xxx,yn,par,mk){
# mk<-2+2*(en-1)# the number of genotypes at per locus
xn<-ncol(as.matrix(xxn))
nq<-ncol(xxx)
ns<-nrow(yn)
kn<-nq/mk
at1<-nq
ad<-if(at1>0.5) cbind(xxn,xxx) else xxn
if(length(par)==0){
bb<-if(abs(min(eigen(crossprod(ad,ad))$values))<1e-6) solve(crossprod(ad,ad)+diag(ncol(ad))*1e-8)%*%crossprod(ad,yn) else solve(crossprod(ad,ad))%*%crossprod(ad,yn)
}else{
bb<-par
}
vv<-as.numeric(crossprod((yn-ad%*%bb),(yn-ad%*%bb))/ns)##y-(X Z)t(β γ)
ll<-sum(log(abs(multinormal(yn,ad%*%bb,vv))))
lod<-matrix(0,kn,1)
if(at1>0.5){
for(m in 1:kn){
i1<-(((m-1)*mk+1):(m*mk));# i2<-((m-1)*mk+1):((m-1)*mk+2); i3<-((m-1)*mk+3):(m*mk)
m1<-seq(1,ncol(ad),1)[-c(i1+xn)];# m2<-sub[-c(i2+xn)]; m3<-sub[-c(i3+xn)]
ad1<-ad[,m1,drop=F]
if(length(par)==0){
bb1<-if(abs(min(eigen(crossprod(ad1,ad1))$values))<1e-6) solve(crossprod(ad1,ad1)+diag(ncol(ad1))*1e-8)%*%crossprod(ad1,yn) else solve(crossprod(ad1,ad1))%*%crossprod(ad1,yn)
}else{
bb1<-par[m1]
}
vv1<-as.numeric(crossprod((yn-ad1%*%bb1),(yn-ad1%*%bb1))/ns)
ll1<-sum(log(abs(multinormal(yn,ad1%*%bb1,vv1))))
lod[m,]<--2.0*(ll1-ll)/(2.0*log(10))
}
}
return(lod)
}
optimize_every_posx<-function(xpos,z,yn,genoname,rr,tau,err_max){
chr_n<-as.numeric(genoname[,2])
chr_n<-chr_n[!duplicated(chr_n)]
maxdistance<-0
for(i in 1:length(chr_n)){
maxdistance<-max(maxdistance, max(diff(as.matrix(genoname[which(genoname[,2]==i),3]))))
}
if(maxdistance<rr){
rr<-rr/maxdistance
ad<-cbind(x,multi_peak_new(z,xpos)$z)
if(abs(min(eigen(crossprod(ad,ad))$values))<1e-6){
try_a<-try({ bb<- chol2inv(chol(crossprod(ad,ad)+diag(ncol(ad))*1e-8))%*%crossprod(ad,yn) },silent=TRUE)
if('try-error' %in% class(try_a)){
try_aa<-try({bb<- solve(crossprod(ad,ad))%*%crossprod(ad,yn)},silent=TRUE)
if('try-error' %in% class(try_aa)){ bb<- ginv(crossprod(ad,ad))%*%crossprod(ad,yn)}
}
}else{
try_a<-try({ bb<-chol2inv(chol(crossprod(ad,ad)))%*%crossprod(ad,yn) },silent=TRUE)
if('try-error' %in% class(try_a)){
try_aa<-try({bb<- solve(crossprod(ad,ad))%*%crossprod(ad,yn)},silent=TRUE)
if('try-error' %in% class(try_aa)){ bb<- ginv(crossprod(ad,ad))%*%crossprod(ad,yn)}
}
}
par<-bb[-c(1:dim(x)[2])]
result_pos<-xpos
chr_sum<-NULL
for(i in 1:length(chr_n)){
chr_sum<-c(chr_sum,length(which(genoname[,2]==i)))
}
chr_sum<-c(0,chr_sum)
for(i in 1:length(xpos)){
yy<-y-multi_peak_new(z,xpos[-i])$z%*%par[-seq((i-1)*3+1,i*3,1)]
chr_now<-apply(genoname[,2,drop=F],2,as.numeric)[xpos[i]]
if(i==1){
left_rr<-min(xpos[i]-1-sum(chr_sum[seq(1,chr_now,1)]),rr)
}else{
if(genoname[xpos[i-1],2]==genoname[xpos[i],2]){
left_rr<-min(0.5*(xpos[i]-xpos[i-1]),rr)
}else{
left_rr<-min(xpos[i]-1-sum(chr_sum[seq(1,chr_now,1)]),rr)
}
}
if(i==length(xpos)){
right_rr<-min(sum(chr_sum[seq(1,chr_now+1,1)])-xpos[i],rr)
}else{
if(genoname[xpos[i+1],2]==genoname[xpos[i],2]){
right_rr<-min(0.5*(xpos[i+1]-xpos[i]),rr)
}else{
right_rr<-min(sum(chr_sum[seq(1,chr_now+1,1)])-xpos[i],rr)
}
}
left_rr<-floor(left_rr)
right_rr<-floor(right_rr)
least_pos<-xpos[-i]
now_pos<-c((xpos[i]-left_rr):(xpos[i]+right_rr))
try_x<-try({
result_embax<-ebayes_EM(x,multi_peak_new(z,now_pos)$z,yy,initial_sigma,initial_sigmak[now_pos],tau,err_max)
},silent=TRUE)
if('try-error' %in% class(try_x)){
max_pos<-now_pos[which.min(result_embax$p1)]
result_pos[i]<-max_pos# rm(result_embax)
}
}
}else{
result_pos<-xpos
}
return(result_pos)
}
multi_code_classic<-function(n_id,peak_id){
mk<-2# the number of genotypes at per locus
lengthpeak<-length(peak_id)
gen_A<-(Ax-Bx)[n_id,peak_id,drop=F]
gen_D<-Hx[n_id,peak_id,drop=F]
adgen3<-matrix(0,nrow=n,ncol=lengthpeak*mk)
adgen3[,seq(1,lengthpeak*mk,mk)]<-gen_A
adgen3[,seq(2,lengthpeak*mk,mk)]<-gen_D
return(adgen3)
}
effect_estimation<-function(n_id,xpos){
xmatrix<-multi_code_classic(n_id,xpos)
ad<-cbind(x,xmatrix)
bb<-if(abs(min(eigen(crossprod(ad,ad))$values))<1e-6) solve(crossprod(ad,ad)+diag(ncol(ad))*1e-8)%*%crossprod(ad,y) else solve(crossprod(ad,ad))%*%crossprod(ad,y)
sig_e2<-as.numeric(crossprod(y-ad%*%bb)/length(y))
bb<-bb[-1]
Into_matrix<-function(vector_x,row_n){
col_n<-length(vector_x)/row_n
result_x<-matrix(0,nrow=row_n,ncol=col_n)
for(i in 1:col_n){
result_x[,i]<-vector_x[((i-1)*row_n+1):(i*row_n)]
}
return(result_x)
}
effect_all<-t(Into_matrix(bb,2))
ef_Q<-effect_all
sig_Q<-0.5*(ef_Q[,1])^2+0.25*(ef_Q[,2])^2
sig_y<-max(var(y),(sum(sig_Q)+sig_e2))
pve<-(sig_Q/sig_y)*100
return(list(effect_all,pve,sig_Q,sig_e2,sig_y))
}
LeftRight_marker<-function(map,ChrPos){
LR_result<-NULL
for(i in 1:dim(ChrPos)[1]){
now_id<-which(as.numeric(map[,2])==as.numeric(ChrPos[i,1]))
now_pos<-as.numeric(ChrPos[i,2])
all_pos<-as.numeric(map[now_id,3])
if(now_pos<min(all_pos)){
left_mar<-""
}else{
left_id<-max(which(all_pos<=now_pos))
left_mar<-map[now_id,1][left_id]
}
if(now_pos>max(all_pos)){
right_mar<-""
}else{
right_id<-min(which(all_pos>=now_pos))
right_mar<-map[now_id,1][right_id]
}
LR_result<-rbind(LR_result,c(left_mar,right_mar))
}
return(LR_result)
}
######################################### input and basic setup #########################################
#*######### environment and phenotype #
pheno<-pheRaw[,NUM,drop=F]
yes_id<-which(pheno!="-")
y<-as.numeric(pheno[yes_id])
n<-length(y)
y<-as.matrix(y)
#*######### genotype #
# genRaw<-as.matrix(genRaw)
#*######### calculate Z matrix for K matrix #
mn<-dim(Ax0)[2]
Z<-matrix(0,nrow=n,ncol=mn*3)
Z[,seq(1,mn*3,3) ]<-Ax0[yes_id,]
Z[,seq(2,mn*3,3) ]<-Hx0[yes_id,]
Z[,seq(3,mn*3,3) ]<-Bx0[yes_id,]# dim(Z)
#*######### calculate K matrix #
K<-kinship_every(Z)
#*######### calculate Z matrix for the subsequent algorithm #
mn<-dim(Ax)[2]
Z<-matrix(0,nrow=n,ncol=mn*3)
Z[,seq(1,mn*3,3) ]<-Ax[yes_id,]
Z[,seq(2,mn*3,3) ]<-Hx[yes_id,]
Z[,seq(3,mn*3,3) ]<-Bx[yes_id,]# dim(Z)
#*######### X matrix; y=Xβ+Zγ+ξ+ε #
x<-matrix(1,nrow=n,ncol=1)#
if(is.null(yygg)==FALSE){
x<-cbind(x,yygg[yes_id,,drop=F])
}# dim(x)
if(det(crossprod(x,x))==0){
warning("X is singular")
}
ReduceDim_x<-TRUE
if(ReduceDim_x){
x_effect<-if(abs(min(eigen(crossprod(x,x))$values))<1e-6) solve(crossprod(x,x)+diag(ncol(x))*1e-8)%*%crossprod(x,y) else solve(crossprod(x,x))%*%crossprod(x,y)
yygg_effect<-x_effect[-1,1,drop=F]
y<-y-x[,-1,drop=F]%*%yygg_effect
x<-matrix(1,nrow=n,ncol=1)
}
#*######### name #
######################################### single_locus_scanning #########################################
#*######### single locus scanning #
p3d_result<-p3d_method(x,y,K)
if(Model=="Random"){
single_locus_model_result<-single_locus_model(x=x,y=y,zz=Z,lambda=p3d_result$lambda,uu=p3d_result$k_vector,vv=p3d_result$k_value,CLO=CLO)
initial_sigma<-mean(single_locus_model_result$sigma_2)
initial_sigmak<-single_locus_model_result$phi_k
}else if(Model=="Fixed"){
single_locus_model_result<-single_locus_model_Fixed(x=x,y=y,zz=Z,lambda=p3d_result$lambda,uu=p3d_result$k_vector,vv=p3d_result$k_value,CLO=CLO)
initial_sigma<-mean(single_locus_model_result$sigma_2)
initial_sigmak<-rep(1,mn)
}else{
warning("Please enter Model!")
}
#*######### pick the peaks #
peak_id<-peak_selection(single_locus_model_result$log1,genoname)# length(peak_id)
######################################### multi_locus_scanning #########################################
multi_locus_result1<-Zhou_lars(peak_id,Z,n) # length(multi_locus_result1$lar_pos)
multi_locus_result2<-Zhou_sbl(peak=multi_locus_result1$lar_pos,Order=multi_locus_result1$order,
DesignMatrix=multi_locus_result1$Matrix,CodeMatrix=Z,
genoname=genoname,
sbl_t=-1,sbl_p=3,tau=0,err_max=1e-6,fix_p=1.5,
Sigma=initial_sigma,SigmaK=initial_sigmak)# larpos=multi_locus_result1$lar_pos0,larbeta=multi_locus_result1$beta
emba_p<-3
result_emba<-ebayes_EM(multi_locus_result2$xin,multi_peak_new(Z,multi_locus_result2$pos)$z,
y,initial_sigma,initial_sigmak[multi_locus_result2$pos],
tau=-2,err_max=1e-8)
emba_pos0<-which(result_emba$p1<(1-pchisq(emba_p*2*log(10),2)))# cbind(result_emba$p1,result_emba$p2)
emba_all_pos<-sort(c(multi_locus_result2$fix,multi_locus_result2$pos[emba_pos0]))
result_emba1<-ebayes_EM(x,multi_peak_new(Z,emba_all_pos)$z,
y,initial_sigma,initial_sigmak[emba_all_pos],
tau=-2,err_max=1e-8)
emba1_pos<-emba_all_pos
emba1_par_E<-c(result_emba1$b)
emba1_par<-result_emba1$u
emba1_par<-into_vector(emba1_par)
multi_value4<-multi_peak_new(Z,emba1_pos)
z_M4<-multi_value4$z
order4<-multi_value4$order
LRT_lod<-LRT_F2(xxn=x,xxx=z_M4, yn=y,par=c(emba1_par_E,emba1_par),mk=1)# cbind(order4,LRT_lod)
lrt_pos<-order4[which(LRT_lod>2.5)]
lrt_pos<-lrt_pos[!duplicated(lrt_pos)]# length(lrt_pos)
######################################### Optimization and output #########################################
if(length(lrt_pos)>0){
if(CriDis<=4){
optimize_pos<-optimize_every_posx(xpos=lrt_pos,z=Z,yn=y,genoname,rr=CriDis,tau=0,err_max=1e-8)
}else{
optimize_pos<-lrt_pos
}
emba3_pos<-optimize_pos
# CriLOD<-3
lod_Q<-LRT_F2(xxn=x,xxx=multi_peak_new(Z,emba3_pos)$z, yn=y,par=NULL,mk=3)# cbind(emba3_pos,lod_Q)
lrt2_pos<-emba3_pos[which(lod_Q>=CriLOD)]# length(lrt2_pos)
last_lod<-lod_Q[which(lod_Q>=CriLOD)]
if(length(lrt2_pos)>0){
IC_data<-cbind(x,multi_peak_new(Z,lrt2_pos)$z)
lm_IC<-lm(y~IC_data-1)
AIC(lm_IC)
BIC(lm_IC)
LR_marker<-LeftRight_marker(map=mapRaw,ChrPos=genoname[lrt2_pos,2:3,drop=F])
result_all<-effect_estimation(yes_id,lrt2_pos)
var_e<-matrix("",nrow=length(lrt2_pos),ncol=1)
var_y<-matrix("",nrow=length(lrt2_pos),ncol=1)
var_e[1]<-round(result_all[[4]],4)
var_y[1]<-round(result_all[[5]],4)
data.all<-data.frame(genoname[lrt2_pos,2:3,drop=F],
round(result_all[[1]],4),round(last_lod,4),
LR_marker,
round(result_all[[3]],4),
round(result_all[[2]],4),
var_e,var_y)
# rep(AIC(lm_IC),length(lrt2_pos)),
# rep(BIC(lm_IC),length(lrt2_pos)))
rownames(data.all)<-NULL
colnames(data.all)<-c("Chr","Position(cM)","Effect.a","Effect.d","LOD",
"Left_marker","right_marker",
"Var_Genet","r2(%)",
"Var_Error","Var_Phen(total)")
reslt_list<-list(result=data.all,p_Q=single_locus_model_result$log1)
}else{
reslt_list<-NULL
warning("No QTL were detected!")
}
}else{
reslt_list<-NULL
warning("No QTL were detected!")
}
return(reslt_list)
}
#' The second step of Zhou method for multiple environments
#'
#' @param Model Random or fixed model.
#' @param pheRaw phenotype matrix.
#' @param genRaw genotype matrix.
#' @param mapRaw linkage map matrix.
#' @param CriLOD Critical LOD scores for significant QTL.
#' @param NUM The serial number of the trait to be analyzed.
#' @param EnvNum The number of environments for each trait is a vector.
#' @param yygg covariate matrix.
#' @param genoname linkage map matrix with pseudo markers inserted.
#' @param Ax0 AA genotype matrix.
#' @param Hx0 Aa genotype matrix.
#' @param Bx0 aa genotype matrix.
#' @param Ax AA genotype matrix with pseudo markers inserted.
#' @param Hx Aa genotype matrix with pseudo markers inserted.
#' @param Bx aa genotype matrix with pseudo markers inserted.
#' @param dir file storage path.
#' @param CriDis The distance of optimization.
#' @param CLO Number of CPUs.
#'
#' @return a list
#' @export
#'
#' @examples
#' data(F2data)
#' readraw<-Readdata(file=F2data,fileFormat="GCIM",
#' method="GCIM-QEI",filecov=NULL,
#' MCIMmap=NULL,MultiEnv=TRUE)
#' DoResult<-Dodata(fileFormat="GCIM",
#' Population="F2",method="GCIM-QEI",
#' Model="Random",readraw,MultiEnv=TRUE)
#' ZhouMatrices<-ZhouF(pheRaw=DoResult$pheRaw,
#' genRaw=DoResult$genRaw,mapRaw1=DoResult$mapRaw1,
#' WalkSpeed=1,CriLOD=3,dir=tempdir())
#' OutputZhou<-ZhouMethod(Model="Random",
#' pheRaw=DoResult$pheRaw,genRaw=DoResult$genRaw,
#' mapRaw=ZhouMatrices$mapRaw,CriLOD=3,NUM=1,
#' EnvNum=DoResult$EnvNum,yygg=DoResult$yygg1,
#' genoname=ZhouMatrices$genoname,
#' Ax0=ZhouMatrices$Ax0,Hx0=ZhouMatrices$Hx0,
#' Bx0=ZhouMatrices$Bx0,Ax=ZhouMatrices$Ax,
#' Hx=ZhouMatrices$Hx,Bx=ZhouMatrices$Bx,
#' dir=tempdir(),CriDis=5,CLO=2)
ZhouMethod<-function(Model=NULL,pheRaw=NULL,genRaw=NULL,mapRaw=NULL,CriLOD=NULL,NUM=NULL,EnvNum=NULL,yygg=NULL,genoname=NULL,
Ax0=NULL,Hx0=NULL,Bx0=NULL,Ax=NULL,Hx=NULL,Bx=NULL,dir=NULL,CriDis=NULL,CLO=NULL){# chr_n=NULL,
######################################### function #########################################
kinship_all<-function(coded_gen,n_id,en){
kinship_every<-function(coded_gen){
kk<-coded_gen%*%t(coded_gen)
kk<-kk/(dim(coded_gen)[2]/en/3)
return(kk)
}
k_all<-matrix(0,n,n)
sum_n<-0
for (i in 1:en){
row_col<-(sum_n+1):(sum_n+length(n_id[[i]]))
k_all[row_col,row_col]<-kinship_every(coded_gen[(sum_n+1):(sum_n+length(n_id[[i]])),])
sum_n<-sum_n+length(n_id[[i]])
}
return(k_all)
}
fixed_x<-function(n_id,en){
x0<-matrix(1,n,1)
col.E<-matrix(0,nrow = n,ncol = en-1)
col.E[(n-length(n_id[[en]])+1):n,]<--1
sum_n<-0
for(i in 1:(en-1)){
col.E[(sum_n+1):(sum_n+length(n_id[[i]])),i]<-1
sum_n<-sum_n+length(n_id[[i]])
}
x<-cbind(x0,col.E)
return(x)
}
name_function<-function(en){
effect_name<-NULL
for(i in 1:en){
effect_name<-c(effect_name,paste("Effect.aE",i,sep = ""))
effect_name<-c(effect_name,paste("Effect.dE",i,sep = ""))
}
effect_name<-c("Effect.a","Effect.d",effect_name)
return(effect_name)
}
p3d_method<-function(x,y,kinship){
# estimate the value of λ & λk kinship=K;
P3D<-function(x,y,kinship){
iter_p3d<-function(ga){
lambda<-exp(ga)
diag_element<-lambda*K_value+1
logH<-sum(log(diag_element))
RH_value<-1/(diag_element)
yuRHyu<-sum(yu*RH_value*yu)
yuRHxu<-matrix(0,nrow = 1,ncol = q)
xuRHxu<-matrix(0,nrow = q,ncol = q)
for(i in 1:q){
yuRHxu[,i]<-sum(yu*RH_value*xu[,i])
for(j in 1:q){
xuRHxu[i,j]<-sum(xu[,i]*RH_value*xu[,j])
}
}
logxuRHxu<-log(det(xuRHxu))
logyuPyu<-log(yuRHyu-yuRHxu%*%tcrossprod(solve(xuRHxu),yuRHxu))
output<- -0.5*(logH+logxuRHxu+(n-q)*logyuPyu)
return(-output)
}
q<-ncol(x)
n<-nrow(y)
eigenK<-eigen(kinship)
K_vector<-eigenK$vectors
K_value<-eigenK$values# rm(eigenK);gc()
xu<-crossprod(K_vector,x)
yu<-crossprod(K_vector,y)
ga0<-0
optimp3d<-optim(par=ga0,fn=iter_p3d,hessian = TRUE,method="L-BFGS-B",lower=-50,upper=10)
lambda<-exp(optimp3d$par)
return(list(lambda,K_vector,K_value))
}
q<-ncol(x)
value1<-P3D(x=x,y=y,kinship=kinship)
lambda<-value1[[1]]
uu<-as.matrix(value1[[2]])
vv<-value1[[3]]
# RH_value<-1/(vv*lambda+1)
rm(value1);gc()
return(list(lambda=lambda,k_vector=uu,k_value=vv))
}
single_locus_model<-function(x,y,zz,lambda,uu,vv,en,CLO){
# genotype effect transform to additive dominance(transformation matrix)
L_coefficient<-function(en){
e.seq<-rep(1,3*en)
a11<-matrix(0,1,3*en)
a12<-matrix(0,1,3*en)
a13<-matrix(0,1,3*en)
a.seq<-seq(1,3*en,by=3)
a11[a.seq]<-1
a12[a.seq+1]<-1
a13[a.seq+2]<-1
a123<-rbind(a11,a12,a13)
a1<-1/en*a11-1/(3*en)*e.seq
a2<-1/en*a12-1/(3*en)*e.seq
a3<-1/en*a13-1/(3*en)*e.seq
L1<-rbind(a1,a2,a3)
a4<-matrix(c(0.5,-0.5,0,1,-0.5,-0.5),2,3)
LL1<-a4%*%L1
L2<-matrix(0,en,3*en)
L3<-matrix(0,3*en,3*en)
c4<-matrix(0,2*en,3*en)
for(i in 1:en){
b11<-matrix(0,1,3*en)
b11[((i-1)*3+1):(i*3)]<-1
L2[i,]<-1/3*b11-1/(3*en)*e.seq
c4[((i-1)*2+1):(i*2),((i-1)*3+1):(i*3)]<-a4
for(i0 in 1:3){
seq.c<-(i-1)*3+i0
c0<-matrix(0,1,3*en)
c0[seq.c]<-1
L3[seq.c,]<--1/en*a123[i0,]-1/3*b11+1/(3*en)*e.seq+c0
}
}
LL3<-c4%*%L3
return(list(matrix_C1=L1, matrix_C2=L2, matrix_C3=L3 , LL1=LL1, LL3=LL3, L1=a4, L3=c4))
}
value2<-L_coefficient(en)# L coefficient matrix # rm(L_coefficient);gc()
# iteration function(R=zu%*%t(zu)*λk+D*λ+I);estimate λk
rqtl<-function(ga){
lambdak<-exp(ga)
Hk_term<-zuRHzu*lambdak+diag(1,en*3)
logHk<-sum(log(lambda*vv+1))+log(det(Hk_term))
RHk_term<-solve(Hk_term)*lambdak
yuRHkyu<-yuRHyu-yuRHzu%*%tcrossprod(RHk_term,yuRHzu)
yuRHkxu<-yuRHxu-yuRHzu%*%tcrossprod(RHk_term,xuRHzu)
xuRHkxu<-xuRHxu-xuRHzu%*%tcrossprod(RHk_term,xuRHzu)
yuPkyu<-yuRHkyu-yuRHkxu%*%tcrossprod(solve(xuRHkxu),yuRHkxu)
rqtl<- -0.5*( logHk + log(det(xuRHkxu)) + (n-q)*log(yuPkyu) )
return(-rqtl)
}
# estimate the value of γ
gamma_estimate<-function(lambdak){
Hk_term<-zuRHzu*lambdak+diag(1,en*3)
RHk_term<-solve(Hk_term)*lambdak
yuRHkyu<-yuRHyu-yuRHzu%*%tcrossprod(RHk_term,yuRHzu)
yuRHkxu<-yuRHxu-yuRHzu%*%tcrossprod(RHk_term,xuRHzu)
xuRHkxu<-xuRHxu-xuRHzu%*%tcrossprod(RHk_term,xuRHzu)
zuRHkxu<-t(xuRHzu)-zuRHzu%*%tcrossprod(RHk_term,xuRHzu)
zuRHkyu<-t(yuRHzu)-zuRHzu%*%tcrossprod(RHk_term,yuRHzu)
zuRHkzu<-zuRHzu-zuRHzu%*%RHk_term%*%zuRHzu
beta<-solve(xuRHkxu,t(yuRHkxu))
beta<-matrix(beta,ncol = 1)
yuPkyu<-yuRHkyu-yuRHkxu%*%tcrossprod(solve(xuRHkxu),yuRHkxu)
sigma<-yuPkyu/(n-q)
sigma<-as.numeric(sigma)
gamma<-lambdak*zuRHkyu-lambdak*zuRHkxu%*%beta
var<-abs((lambdak*diag(1,en*3)-lambdak*zuRHkzu*lambdak)*sigma)
stderr<-sqrt(diag(var))
phi.k<-sigma*lambdak # Φk
return(list(gamma,beta,var,phi.k,sigma,stderr))
}
# estimate the value of logp
logp_estimate<-function(L, g_k, pn){
var.1<-L%*%tcrossprod(var_ga,L)
Wk.1<-crossprod(g_k,ginv(var.1))%*%g_k
rank1<-qr(L)$rank
tr1<-sum(diag(ginv(tcrossprod(L,L))%*%var.1))
dk1<-rank1-tr1/phi_k
p1<-pgamma(Wk.1,shape=pn/2,scale=2*dk1,lower.tail = FALSE,log.p = FALSE)
log1<-pgamma(Wk.1,shape=pn/2,scale=2*dk1,lower.tail = FALSE,log.p = TRUE)
log1<--log1*log10(exp(1))
return(list(p=p1,log=log1))
}
RH_value<-1/(vv*lambda+1)
mn<-ncol(zz)/(en*3)
n<-nrow(y)
q<-ncol(x)
xu<-crossprod(uu,x)
yu<-crossprod(uu,y)
yuRHyu<-sum(yu*RH_value*yu)
yuRHxu<-matrix(0,nrow=1,ncol=q)
xuRHxu<-matrix(0,nrow=q,ncol=q)
for(i in 1:q){
yuRHxu[,i]<-sum(yu*RH_value*xu[,i])
for(j in 1:q){
xuRHxu[j,i]<-sum(xu[,j]*RH_value*xu[,i])
}
}
# yuRHyu<-crossprod(yu,diag(RH_value))%*%yu
# yuRHxu<-crossprod(yu,diag(RH_value))%*%xu
# xuRHxu<-crossprod(xu,diag(RH_value))%*%xu
if(is.null(CLO)==TRUE){
cl.cores <- detectCores()
if(cl.cores<=2){
cl.cores<-1
}else{
if(cl.cores>10){
cl.cores <-10
}else{
cl.cores <- detectCores()-1
}
}
}else{
cl.cores <-CLO
}
cl <- makeCluster(cl.cores)
registerDoParallel(cl)
MR_i<-numeric()
result<-foreach(MR_i=1:mn,.combine=rbind)%dopar%{
# library(MASS)
z<-zz[,((MR_i-1)*3*en+1):(MR_i*3*en),drop=F]
zu<-crossprod(uu,z)
xuRHzu<-matrix(0,nrow=q,ncol=3*en)
yuRHzu<-matrix(0,nrow=1,ncol=3*en)
zuRHzu<-matrix(0,nrow=3*en,ncol=3*en)
for(i in 1:(3*en)){
yuRHzu[,i]<-sum(yu*RH_value*zu[,i])
for(j in 1:q){
xuRHzu[j,i]<-sum(xu[,j]*RH_value*zu[,i])
}
for(j in 1:(3*en)){
zuRHzu[j,i]<-sum(zu[,j]*RH_value*zu[,i])
}
}
# xuRHzu<-crossprod(xu,diag(RH_value))%*%zu
# yuRHzu<-crossprod(yu,diag(RH_value))%*%zu
# zuRHzu<-crossprod(zu,diag(RH_value))%*%zu
ga<-0
par<-optim(par=ga,fn=rqtl,hessian = TRUE,method="L-BFGS-B",lower=-10,upper=10)
lambdak<-exp(par$par)
value3<-gamma_estimate(lambdak)
gamma_k<-value3[[1]]
beta<-value3[[2]]# estimate β
var_ga<-value3[[3]]
phi_k<-value3[[4]]
sigma_2<-value3[[5]]
# stderr<-value3[[6]]
gamma_k2<-gamma_k
gamma_main_k <-value2$matrix_C1%*%gamma_k2
gamma_env_k <-value2$matrix_C2%*%gamma_k2
gamma_inter_k<-value2$matrix_C3%*%gamma_k2
main_effect<-value2$L1%*%gamma_main_k
interact_effect<-value2$L3%*%gamma_inter_k
logvalue1<-logp_estimate(L=value2$LL1, g_k=main_effect, pn=2)# the -log(10)p of qtl effect
p1<-logvalue1$p
log1<-logvalue1$log
logvalue2<-logp_estimate(L=value2$matrix_C2, g_k=gamma_env_k, pn=en-1)# the -log(10)p of environment effect
p2<-logvalue2$p
log2<-logvalue2$log
logvalue3<-logp_estimate(L=value2$LL3, g_k=interact_effect, pn=2*(en-1))# the -log(10)p of interaction effect
p3<-logvalue3$p
log3<-logvalue3$log
result<-cbind(lambdak,matrix(beta,1,en),matrix(gamma_k,1,3*en),p1,p2,p3,log1,log2,log3,phi_k,sigma_2)
}
stopCluster(cl)
# rm(RH_value);gc()
lambda_k<-result[,1]
mu_beta<-result[,2:(1+en)]
gamma_all<-result[,(2+en):(1+4*en)]
p1<-result[,(2+4*en)]
p2<-result[,(3+4*en)]
p3<-result[,(4+4*en)]
log_p1<-result[,(5+4*en)]
log_p2<-result[,(6+4*en)]
log_p3<-result[,(7+4*en)]
phi_k<-result[,(8+4*en)]
sigma_2<-result[,(9+4*en)]
return(list(lambda_k=lambda_k, fixed=mu_beta, gamma=gamma_all,
p1=p1, p2=p2, p3=p3,
log1=log_p1, log2=log_p2, log3=log_p3,
phi_k=phi_k, sigma_2=sigma_2))
}
single_locus_model_Fixed<-function(x,y,zz,lambda,uu,vv,en,CLO){
# genotype effect transform to additive dominance(transformation matrix)
L_coefficient<-function(en){
e.seq<-rep(1,3*en)
a11<-matrix(0,1,3*en)
a12<-matrix(0,1,3*en)
a13<-matrix(0,1,3*en)
a.seq<-seq(1,3*en,by=3)
a11[a.seq]<-1
a12[a.seq+1]<-1
a13[a.seq+2]<-1
a123<-rbind(a11,a12,a13)
a1<-1/en*a11-1/(3*en)*e.seq
a2<-1/en*a12-1/(3*en)*e.seq
a3<-1/en*a13-1/(3*en)*e.seq
L1<-rbind(a1,a2,a3)
a4<-matrix(c(0.5,-0.5,0,1,-0.5,-0.5),2,3)
LL1<-a4%*%L1
L2<-matrix(0,en,3*en)
L3<-matrix(0,3*en,3*en)
c4<-matrix(0,2*en,3*en)
for(i in 1:en){
b11<-matrix(0,1,3*en)
b11[((i-1)*3+1):(i*3)]<-1
L2[i,]<-1/3*b11-1/(3*en)*e.seq
c4[((i-1)*2+1):(i*2),((i-1)*3+1):(i*3)]<-a4
for(i0 in 1:3){
seq.c<-(i-1)*3+i0
c0<-matrix(0,1,3*en)
c0[seq.c]<-1
L3[seq.c,]<--1/en*a123[i0,]-1/3*b11+1/(3*en)*e.seq+c0
}
}
LL3<-c4%*%L3
return(list(matrix_C1=L1, matrix_C2=L2, matrix_C3=L3 , LL1=LL1, LL3=LL3, L1=a4, L3=c4))
}
value2<-L_coefficient(en)# L coefficient matrix # rm(L_coefficient);gc()
logp_estimate<-function(L, g_k, pn){
var.1<-L%*%tcrossprod(var_ga,L)
Wk.1<-crossprod(g_k,ginv(var.1))%*%g_k
p1<-pchisq(Wk.1,df=pn,lower.tail = FALSE,log.p = FALSE)
log1<-pchisq(Wk.1,df=pn,lower.tail = FALSE,log.p = TRUE)
log1<--log1*log10(exp(1))
return(list(p=p1,log=log1))
}
yu<-crossprod(uu,y)
Hsolve<-1/(vv*lambda+1)
if(is.null(CLO)==TRUE){
cl.cores <- detectCores()
if(cl.cores<=2){
cl.cores<-1
}else{
if(cl.cores>10){
cl.cores <-10
}else{
cl.cores <- detectCores()-1
}
}
# cl.cores<-2
}else{
cl.cores <-CLO
}
cl <- makeCluster(cl.cores)
registerDoParallel(cl)
MF_i<-numeric()
result<-foreach(MF_i=1:mn,.combine=rbind)%dopar%{
# library(MASS)
z<-zz[,((MF_i-1)*3*en+1):(MF_i*3*en),drop=F]
uxz<-crossprod(uu,cbind(x,z))
x_gamma<-ginv(t(uxz)%*%diag(Hsolve)%*%uxz)%*%t(uxz)%*%diag(Hsolve)%*%yu
q<-qr(uxz)$rank
sig_e2<-as.numeric(t(yu-uxz%*%x_gamma)%*%diag(Hsolve)%*%(yu-uxz%*%x_gamma)/(dim(uxz)[1]-q))
x_gamma_covmatr<-sig_e2*ginv(t(uxz)%*%diag(Hsolve)%*%uxz)
gamma<-x_gamma[-c(1:dim(x)[2])]
var_ga<-x_gamma_covmatr[-c(1:dim(x)[2]),-c(1:dim(x)[2]),drop=F]
gamma_main_k <-value2$matrix_C1%*%gamma
gamma_env_k <-value2$matrix_C2%*%gamma
gamma_inter_k<-value2$matrix_C3%*%gamma
main_effect<-value2$L1%*%gamma_main_k
interact_effect<-value2$L3%*%gamma_inter_k
logvalue1<-logp_estimate(L=value2$LL1, g_k=main_effect, pn=2)#the -log(10)p of qtl effect
p1<-logvalue1$p
log1<-logvalue1$log
logvalue2<-logp_estimate(L=value2$matrix_C2, g_k=gamma_env_k, pn=en-1)#the -log(10)p of environment effect
p2<-logvalue2$p
log2<-logvalue2$log
logvalue3<-logp_estimate(L=value2$LL3, g_k=interact_effect, pn=2*(en-1))#the -log(10)p of interaction effect
p3<-logvalue3$p
log3<-logvalue3$log
result<-cbind(matrix(x_gamma[c(1:dim(x)[2])],1,en),matrix(gamma,1,3*en),p1,p2,p3,log1,log2,log3,sig_e2)
}
stopCluster(cl)
# rm(RH_value);gc()
mu_beta<-result[,1:dim(x)[2]]
gamma_all<-result[,(dim(x)[2]+1):(dim(x)[2]+3*en)]
p1<-result[,(dim(x)[2]+3*en+1)]
p2<-result[,(dim(x)[2]+3*en+2)]
p3<-result[,(dim(x)[2]+3*en+3)]
log_p1<-result[,(dim(x)[2]+3*en+4)]
log_p2<-result[,(dim(x)[2]+3*en+5)]
log_p3<-result[,(dim(x)[2]+3*en+6)]
sigma_2<-result[,(dim(x)[2]+3*en+7)]
return(list(fixed=mu_beta, gamma=gamma_all,
p1=p1, p2=p2, p3=p3,
log1=log_p1, log2=log_p2, log3=log_p3,
sigma_2=sigma_2))
}
peak_selection<-function(log_value,genoname){
peak_pos<-function(Lod.temp){
m<-length(Lod.temp)
optids<-vector(length=0)
if(Lod.temp[1]>Lod.temp[2]) optids<-append(optids,1)
for(j in 2:(m-1)){
if ((Lod.temp[j-1]<Lod.temp[j]) & (Lod.temp[j]>Lod.temp[j+1])) {
optids<-append(optids,j)
}
}
if(Lod.temp[m]>Lod.temp[m-1]) optids<-append(optids,m)
return(optids)
}
chr_all<-as.matrix(genoname[,2])
chr_kind<-chr_all[!duplicated(chr_all)]
id_pos<-NULL
for(jjj in chr_kind){
now_id<-which(chr_all%in%jjj)
id_pos<-c(id_pos,now_id[peak_pos(log_value[now_id])])
}
return(sort(id_pos))
}
multi_peak_new<-function(gencoded,peak_id,en){
enk<-3*en
mut_peak_id<-NULL
term1<-seq(1,3*en,1)
for(i in 1:length(peak_id)){
mut_peak_id<-c(mut_peak_id,(rep(peak_id[i],enk)-1)*enk+term1)
}
return(list(z=gencoded[,sort(mut_peak_id)],order=sort(rep(peak_id,enk))))
}
Zhou_lars<-function(peak,CodeMatrix,n,en){
multi_value<-multi_peak_new(CodeMatrix,peak,en)
DesignMatrix<-multi_value$z
order0<-multi_value$order# length(order0); length(peak_id)
if(length(peak)>=n){
lar_result<-lars(x=cbind(x[,-1,drop=F],DesignMatrix), y=y, type = "lar",trace = FALSE, normalize = TRUE, intercept = TRUE, eps = .Machine$double.eps, use.Gram=FALSE)
lar_result.0<-lar_result$beta[nrow(lar_result$beta),][-c(1:(dim(x)[2]-1))]
lar_pos0<-order0[which(lar_result.0!=0)]
lar_pos<-lar_pos0[!duplicated(lar_pos0)]# length(lar_pos)
multi_value1<-multi_peak_new(CodeMatrix,lar_pos,en)
DesignMatrix1<-multi_value1$z
order1<-multi_value1$order
}else{
lar_pos<-peak
DesignMatrix1<-DesignMatrix
order1<-order0
}# length(lar_pos)
return(list(lar_pos=lar_pos,Matrix=DesignMatrix1,order=order1))
}
sblgwas<-function(x,y,z,t,max.iter=200,min.err=1e-6){
x<-as.matrix(x)
y<-as.matrix(y)
z<-as.matrix(z)
n<-length(y)
q<-ncol(x)
m<-ncol(z)
b0<-solve(t(x)%*%x,tol=1e-50)%*%(t(x)%*%y)
s2<-sum((y-x%*%b0)^2)/(n-q)
b0<-matrix(0,q,1)
b<-b0
g0<-matrix(0,m,1)
g<-g0
lambda<-matrix(0,m,1)
tau<-g0
v<-g0
xx<-NULL
xy<-NULL
for(i in 1:q){
xx<-c(xx,sum(x[,i]^2))
xy<-c(xy,sum(x[,i]*y))
}
zz<-NULL
zy<-NULL
for(k in 1:m){
zz<-c(zz,sum(z[,k]^2))
zy<-c(zy,sum(z[,k]*y))
}
d<-numeric(m)
a<-matrix(0,n,1)
iter<-0
err<-1e8
my.iter<-NULL
while(iter < max.iter & err > min.err){
for(i in 1:q){
a<-a-x[,i]*b0[i]
ai<-sum(x[,i]*a)
b[i]<-(xy[i]-ai)/xx[i]
a<-a+x[,i]*b[i]
}
df<-0
for(k in 1:m){
a<-a-z[,k]*g0[k]
ak<-sum(z[,k]*a)
c1<- -(t+3)*zz[k]^2
c2<- -(2*t+5)*zz[k]+(zy[k]-ak)^2
c3<- -(t+2)
if( ((c2^2-4*c1*c3) < 0) | (c2 < 0) ){
tau[k]<-0
} else {
tau[k]<-(-c2-sqrt(c2^2-4*c1*c3))/(2*c1)
}
lambda[k]<-tau[k]/s2
g[k]<-lambda[k]*(zy[k]-ak)-lambda[k]^2*zz[k]*(zy[k]-ak)/(lambda[k]*zz[k]+1)
d[k]<-lambda[k]*(zz[k]-lambda[k]*zz[k]^2/(lambda[k]*zz[k]+1))
v[k]<-tau[k]-tau[k]*d[k]
df<-df+d[k]
a<-a+z[,k]*g[k]
}
if((n-q-df) > 0){s2<-sum((y-a)^2)/(n-q-df)
}else{
s2<-sum((y-a)^2)/(n-q)
}
iter<-iter+1
err<-sum((g-g0)^2)/m
g0<-g
b0<-b
my.iter<-rbind(my.iter,cbind(iter,err,s2,t(b),t(g)))
}
my.parm<-data.frame(iter,err,s2,b,df)
names(my.parm)<-c("iter","error","s2","beta","df")
posv<-which(v!=0)
m<-length(g)
wald<-c(rep(0,m))
gg<-g[posv]
vv<-v[posv]
wald[posv]<-gg^2/vv
p<-pchisq(wald,1,lower.tail=FALSE)
my.blup<-data.frame(g,v,wald,p)
names(my.blup)<-c("gamma","vg","wald","p_wald")
var.beta<-NULL
for(i in 1:q){
var.beta<-c(var.beta,paste("beta",i,sep=""))
}
var.gamma<-NULL
for(k in 1:m){
var.gamma<-c(var.gamma,paste("gamma",k,sep=""))
}
var.names<-c(c("iter","error","s2"),var.beta,var.gamma)
my.iter<-data.frame(my.iter)
names(my.iter)<-var.names
out<-list(my.iter,my.parm,my.blup)
names(out)<-c("iteration","parm","blup")
return(out)
}
selection<-function(posx,genoname,svrad){
chose_peak<-c(posx[1])
order_now<-1
while(order_now<length(posx)){
order_now<-order_now+1
repeat_pos<-which( abs(chose_peak-as.numeric(posx[order_now]))<=(svrad) )
if(length(repeat_pos)>0){
if_condition<-length(which( genoname[chose_peak[repeat_pos],2]==as.numeric(genoname[posx[order_now],2]) ))==0
if(if_condition){
chose_peak<-c(chose_peak,posx[order_now])
}
}else{
chose_peak<-c(chose_peak,posx[order_now])
}
}
return(chose_peak)
}
selection2<-function(posx1,posx2,genoname,svrad){
chose_peak<-NULL
order_now<-0
while(order_now<length(posx1)){
order_now<-order_now+1
repeat_pos<-which( abs(posx2-as.numeric(posx1[order_now]))<=(svrad) )
if(length(repeat_pos)>0){
if_condition<-length(which( genoname[posx2[repeat_pos],2]==as.numeric(genoname[posx1[order_now],2]) ))==0
if(if_condition){
chose_peak<-c(chose_peak,posx1[order_now])
}
}else{
chose_peak<-c(chose_peak,posx1[order_now])
}
}
return(chose_peak)
}
ebayes_EM<-function(x,z,y,en,v0,v,tau,err_max){
n<-nrow(z);k<-ncol(z)
mk<-3*en; kn<-k/mk
v0<-as.numeric(v0)
v<-matrix(v,ncol=1)
if(abs(min(eigen(crossprod(x,x))$values))<1e-6){
try_b<-try({ b<-chol2inv(chol(crossprod(x,x)+diag(ncol(x))*1e-8))%*%crossprod(x,y) },silent=TRUE)
if('try-error' %in% class(try_b)){
try_c<-try({ b<-solve(crossprod(x,x))%*%crossprod(x,y) },silent=TRUE)
if('try-error' %in% class(try_c)){ b<-ginv(crossprod(x,x))%*%crossprod(x,y) }
}
}else{
try_b<-try({ b<-chol2inv(chol(crossprod(x,x)))%*%(crossprod(x,y)) },silent=TRUE)
if('try-error' %in% class(try_b)){
try_c<-try({ b<-solve(crossprod(x,x))%*%(crossprod(x,y)) },silent=TRUE)
if('try-error' %in% class(try_c)){ b<-ginv(crossprod(x,x))%*%(crossprod(x,y)) }
}
}# β: fixed effect-rough estimate
u<-matrix(0,nrow=mk,ncol=kn)# E(γk)
w<-matrix(0,nrow=mk,ncol=k)# var(γk)
s<-matrix(0,nrow=kn,ncol=1)# tr(var(γk))
vv<-matrix(0,n,n)# V
for(i in 1:kn){
nc<-( (i-1)*mk+1 ):(i*mk)
zz<-z[,nc]# Zk
vv=vv+tcrossprod(zz,zz)*v[i,]# ∑( Zk%*%t(Zk)*(σk^2) )
}
vv<-vv+diag(n)*v0# V : the covariance matrix for y
# genotype effect transform to additive dominance(transformation matrix)
L_coefficient<-function(en){
e.seq<-rep(1,3*en)
a11<-matrix(0,1,3*en)
a12<-matrix(0,1,3*en)
a13<-matrix(0,1,3*en)
a.seq<-seq(1,3*en,by=3)
a11[a.seq]<-1
a12[a.seq+1]<-1
a13[a.seq+2]<-1
a123<-rbind(a11,a12,a13)
a1<-1/en*a11-1/(3*en)*e.seq
a2<-1/en*a12-1/(3*en)*e.seq
a3<-1/en*a13-1/(3*en)*e.seq
L1<-rbind(a1,a2,a3)
a4<-matrix(c(0.5,-0.5,0,1,-0.5,-0.5),2,3)
LL1<-a4%*%L1
L2<-matrix(0,en,3*en)
L3<-matrix(0,3*en,3*en)
c4<-matrix(0,2*en,3*en)
for(i in 1:en){
b11<-matrix(0,1,3*en)
b11[((i-1)*3+1):(i*3)]<-1
L2[i,]<-1/3*b11-1/(3*en)*e.seq
c4[((i-1)*2+1):(i*2),((i-1)*3+1):(i*3)]<-a4
for(i0 in 1:3){
seq.c<-(i-1)*3+i0
c0<-matrix(0,1,3*en)
c0[seq.c]<-1
L3[seq.c,]<--1/en*a123[i0,]-1/3*b11+1/(3*en)*e.seq+c0
}
}
LL3<-c4%*%L3
return(list(matrix_C1=L1, matrix_C2=L2, matrix_C3=L3 , LL1=LL1, LL3=LL3, L1=a4, L3=c4))
}
L<-L_coefficient(en)#L coefficient matrix # rm(L_coefficient);gc()
rank_1<-qr(L$LL1)$rank
rank_2<-qr(L$LL3)$rank
iter<-0;err<-1000;iter_max<-500;
omega<-0
while( (iter<iter_max)&&(err>err_max) ){
iter<-iter+1
v01<-v0# v01 is the initial σ^2
v1<-v# v1 is the initial σk^2
b1<-b# b1 is the initial β
#s1<-s
try_a<-try({ vi<-chol2inv(chol(vv)) },silent=TRUE)# solve(V)
if('try-error' %in% class(try_a)){
try_aa<-try({ vi<-solve(vv) },silent=TRUE)
if('try-error' %in% class(try_aa)){ vi<-ginv(vv) }
}
xtv<-crossprod(x,vi)# t(X)%*%solve(V)
if(ncol(x)==1){
b<-((xtv%*%x)^(-1))*(xtv%*%y)
}else{
if(abs(min(Mod(eigen(xtv%*%x)$values)))<1e-6){
try_b<-try({ b<-chol2inv(chol((xtv%*%x)+diag(ncol(x))*1e-8))%*%(xtv%*%y) },silent=TRUE)
if('try-error' %in% class(try_b)){
try_c<-try({ b<-solve((xtv%*%x))%*%(xtv%*%y) },silent=TRUE)
if('try-error' %in% class(try_c)){ b<-ginv((xtv%*%x))%*%(xtv%*%y) }
}
}else{
try_b<-try({ b<-chol2inv(chol(xtv%*%x))%*%(xtv%*%y) },silent=TRUE)
if('try-error' %in% class(try_b)){
try_c<-try({ b<-solve((xtv%*%x))%*%(xtv%*%y) },silent=TRUE)
if('try-error' %in% class(try_c)){ b<-ginv((xtv%*%x))%*%(xtv%*%y) }
}
}
}
r<-y-x%*%b# y-Xβ
ss<-matrix(0,nrow=n,ncol=1)
vv<-matrix(0,n,n)# new V
for(i in 1:kn){
nc<-( (i-1)*mk+1 ):(i*mk)
zz<-z[,nc]# Zk
zztvi<-crossprod(zz,vi)# t(Zk)%*%solve(V)
u[,i]<-v[i,]*zztvi%*%r# E(γk)
w[,nc]<-v[i,]*( diag(1,mk)-zztvi%*%zz*v[i,] )# var(γk)
s[i,]<-sum(diag(w[,nc]))# tr(var(γk))
v[i,]<-(crossprod(u[,i,drop=F],u[,i,drop=F])+s[i,]+omega)/(tau+2+mk)# new (σk^2)
ss<-ss+zz%*%u[,i,drop=F]
vv<-vv+tcrossprod(zz,zz)*v[i,]# ∑( Zk%*%t(Zk)*(σk^2) )
}
v0<-as.numeric(crossprod(r,(r-ss))/n)# new σ^2
vv<-vv+diag(n)*v0# new V
err<-(crossprod((b1-b),(b1-b))+(v01-v0)^2+crossprod((v1-v),(v1-v)))/(1+ncol(x)+kn)
beta<-t(b)
sigma2<-v0
}
u1<-matrix(0,nrow=2,ncol=kn)# main-E(γk)
u2<-matrix(0,nrow=2*en,ncol=kn)# interaction-E(γk)
p1<-matrix(1,kn,1)
p2<-matrix(1,kn,1)
# pvalue<-matrix(1,kn,1)
for(i in 1:kn){
nc<-( (i-1)*mk+1 ):(i*mk)
gammak<-u[,i,drop=F]
u1[,i]<-L$LL1%*%gammak
u2[,i]<-L$LL3%*%gammak
var_1<-L$LL1%*%w[,nc]%*%t(L$LL1)
tr_1<-sum(diag(ginv(tcrossprod(L$LL1))%*%var_1))
dk1<-abs(rank_1-tr_1/v[i,])
var_2<-L$LL3%*%w[,nc]%*%t(L$LL3)
tr_2<-sum(diag(ginv(tcrossprod(L$LL3))%*%var_2))
dk2<-abs(rank_2-tr_2/v[i,])
p1[i,]<-1-pchisq( t(u1[,i,drop=F])%*%ginv(L$LL1%*%w[,nc]%*%t(L$LL1))%*%u1[,i,drop=F], 2)
p2[i,]<-1-pchisq( t(u2[,i,drop=F])%*%ginv(L$LL3%*%w[,nc]%*%t(L$LL3))%*%u2[,i,drop=F], 2*(en-1))
}
return(list(b=b,u=u,u1=u1,u2=u2,sigma2=sigma2,p1=p1,p2=p2,iter=iter))
}
Zhou_sbl<-function(peak,Order,DesignMatrix,CodeMatrix,genoname,en,sbl_t,sbl_p,tau,err_max,fix_p,Sigma,SigmaK){
chr_n<-as.numeric(genoname[,2])
chr_n<-chr_n[!duplicated(chr_n)]
maxdistance<-0
for(i in 1:length(chr_n)){
maxdistance<-max(maxdistance, max(diff(as.matrix(genoname[which(genoname[,2]==i),3]))))
}
result_sblgwas<-sblgwas(x=x,y=y,z=DesignMatrix,t=sbl_t)
sbl_p_wald<-result_sblgwas$blup[,4]# sbl_par<-result_sblgwas$blup[,1]# sbl_p_wald<-p.adjust(sbl_p_wald, method = "bonferroni")
sbl_pos_order<-Order[order(sbl_p_wald)]
p_order<-sort(sbl_p_wald)
id1<-which( p_order< (1-pchisq(sbl_p*2*log(10),1)))
id2<-which((p_order>=(1-pchisq(sbl_p*2*log(10),1)))&(p_order<1))
if(length(id1)>0){
sbl_pos_order1<-sbl_pos_order[id1]
sbl_pos_order2<-sbl_pos_order[id2]
sbl_pos_order1<-sbl_pos_order1[!duplicated(sbl_pos_order1)]
sbl_pos_order2<-sbl_pos_order2[!duplicated(sbl_pos_order2)]
sort_order1<-sort(sbl_pos_order1)
result_emba<-ebayes_EM(x,multi_peak_new(CodeMatrix,sort_order1,en)$z,y,en,Sigma,SigmaK[sort_order1],tau,err_max)
emID1<-which((result_emba$p1<(1-pchisq(fix_p*2*log(10),2)))|(result_emba$p2<(1-pchisq(fix_p*2*log(10),2*(en-1)))))
emID2<-union(order(result_emba$p1)[seq(1,5,1)],order(result_emba$p2)[seq(1,5,1)])
if(length(emID1)>5){
emID<-sort(emID1)
fix_pos<-sort_order1[emID]
if(maxdistance<=1){
sbl_pos_order1<-selection2(sbl_pos_order1,fix_pos,genoname,1)
sbl_pos_order1<-selection(sbl_pos_order1,genoname,1)
sbl_pos_order2<-selection2(sbl_pos_order2,fix_pos,genoname,1)
sbl_pos_order2<-selection2(sbl_pos_order2,sbl_pos_order1,genoname,1)
sbl_pos_order2<-selection(sbl_pos_order2,genoname,1)
}
}else{
emID<-sort(union(emID1,emID2))
fix_pos<-sort_order1[emID]
if(maxdistance<=1){
sbl_pos_order1<-selection(sbl_pos_order1,genoname,1)
sbl_pos_order2<-selection2(sbl_pos_order2,sbl_pos_order1,genoname,1)
sbl_pos_order2<-selection(sbl_pos_order2,genoname,1)
}
}
sbl_pos<-sort(c(sbl_pos_order1,sbl_pos_order2))# length(union(sbl_pos,fix_pos))
}else{
result_emba<-ebayes_EM(x,multi_peak_new(CodeMatrix,peak,en)$z,y,en,Sigma,SigmaK[peak],tau,err_max)
emID1<-which((result_emba$p1<(1-pchisq(fix_p*2*log(10),2)))|(result_emba$p2<(1-pchisq(fix_p*2*log(10),2*(en-1)))))
emID2<-union(order(result_emba$p1)[seq(1,5,1)],order(result_emba$p2)[seq(1,5,1)])
if(length(emID1)>5){
emID<-sort(emID1)
fix_pos<-peak[emID]
if(maxdistance<=1){
sbl_pos_order1<-selection2(peak,fix_pos,genoname,1)
sbl_pos_order1<-selection(sbl_pos_order1,genoname,1)
}else{
sbl_pos_order1<-peak[-emID]
}
}else{
emID<-sort(union(emID1,emID2))
fix_pos<-peak[emID]
if(maxdistance<=1){
sbl_pos_order1<-selection2(peak,fix_pos,genoname,1)
sbl_pos_order1<-selection(sbl_pos_order1,genoname,1)
}else{
sbl_pos_order1<-peak[-emID]
}
}
sbl_pos<-sort(sbl_pos_order1)
}
sbl_fix_pos<-sort(fix_pos)
xin<-cbind(x,multi_peak_new(CodeMatrix,sbl_fix_pos,en)$z)
return(list(fix=sbl_fix_pos,pos=sbl_pos,xin=xin))
}
multi_code_classic<-function(peak,ee,n_id){
mk<-2+2*(en-1)# the number of genotypes at per locus
lengthpeak<-length(peak)
gen_A<-NULL
gen_D<-NULL
for(i in 1:en){
gen_A<-rbind(gen_A,(Ax-Bx)[n_id[[i]],peak,drop=F])
gen_D<-rbind(gen_D, Hx[n_id[[i]],peak,drop=F])
}
adgen3<-matrix(0,nrow=n,ncol=lengthpeak*mk)
adgen3[,seq(1,lengthpeak*mk,mk)]<-gen_A
adgen3[,seq(2,lengthpeak*mk,mk)]<-gen_D
for(i in 1:lengthpeak){
col.1<-seq( ((i-1)*mk+3),(i*mk),by=2 )
col.2<-seq( ((i-1)*mk+4),(i*mk),by=2 )
adgen3[,col.1]<-gen_A[,i]*ee
adgen3[,col.2]<-gen_D[,i]*ee
}
return(adgen3)
}
multinormal<-function(y,mean,sigma){
pdf_value<-(1/sqrt(2*3.14159265358979323846*sigma))*exp(-(y-mean)*(y-mean)/(2*sigma));
return (pdf_value)
}
LRT_F2<-function(xxn,xxx,yn,par,mk){
# mk<-2+2*(en-1)# the number of genotypes at per locus
xn<-ncol(as.matrix(xxn))
nq<-ncol(xxx)
ns<-nrow(yn)
kn<-nq/mk
at1<-nq
ad<-if(at1>0.5) cbind(xxn,xxx) else xxn
if(length(par)==0){
bb<-if(abs(min(eigen(crossprod(ad,ad))$values))<1e-6) solve(crossprod(ad,ad)+diag(ncol(ad))*1e-8)%*%crossprod(ad,yn) else solve(crossprod(ad,ad))%*%crossprod(ad,yn)
}else{
bb<-par
}
vv<-as.numeric(crossprod((yn-ad%*%bb),(yn-ad%*%bb))/ns)# y-(X Z)t(β γ)
ll<-sum(log(abs(multinormal(yn,ad%*%bb,vv))))
lod<-matrix(0,kn,1)
if(at1>0.5){
for(m in 1:kn){
i1<-(((m-1)*mk+1):(m*mk));# i2<-((m-1)*mk+1):((m-1)*mk+2); i3<-((m-1)*mk+3):(m*mk)
m1<-seq(1,ncol(ad),1)[-c(i1+xn)];# m2<-sub[-c(i2+xn)]; m3<-sub[-c(i3+xn)]
ad1<-ad[,m1,drop=F]
if(length(par)==0){
bb1<-if(abs(min(eigen(crossprod(ad1,ad1))$values))<1e-6) solve(crossprod(ad1,ad1)+diag(ncol(ad1))*1e-8)%*%crossprod(ad1,yn) else solve(crossprod(ad1,ad1))%*%crossprod(ad1,yn)
}else{
bb1<-par[m1]
}
vv1<-as.numeric(crossprod((yn-ad1%*%bb1),(yn-ad1%*%bb1))/ns)
ll1<-sum(log(abs(multinormal(yn,ad1%*%bb1,vv1))))
lod[m,]<--2.0*(ll1-ll)/(2.0*log(10))
}
}
return(lod)
}
optimize_every_posx<-function(xpos,z,yn,genoname,en,rr,tau,err_max){
chr_n<-as.numeric(genoname[,2])
chr_n<-chr_n[!duplicated(chr_n)]
maxdistance<-0
for(i in 1:length(chr_n)){
maxdistance<-max(maxdistance, max(diff(as.matrix(genoname[which(genoname[,2]==i),3]))))
}
if(maxdistance<rr){
rr<-rr/maxdistance
ad<-cbind(x,multi_peak_new(z,xpos,en)$z)
if(abs(min(eigen(crossprod(ad,ad))$values))<1e-6){
try_a<-try({ bb<- chol2inv(chol(crossprod(ad,ad)+diag(ncol(ad))*1e-8))%*%crossprod(ad,yn) },silent=TRUE)
if('try-error' %in% class(try_a)){
try_aa<-try({bb<- solve(crossprod(ad,ad))%*%crossprod(ad,yn)},silent=TRUE)
if('try-error' %in% class(try_aa)){ bb<- ginv(crossprod(ad,ad))%*%crossprod(ad,yn)}
}
}else{
try_a<-try({ bb<-chol2inv(chol(crossprod(ad,ad)))%*%crossprod(ad,yn) },silent=TRUE)
if('try-error' %in% class(try_a)){
try_aa<-try({bb<- solve(crossprod(ad,ad))%*%crossprod(ad,yn)},silent=TRUE)
if('try-error' %in% class(try_aa)){ bb<- ginv(crossprod(ad,ad))%*%crossprod(ad,yn)}
}
}
par<-bb[-c(1:dim(x)[2])]
result_pos<-xpos
chr_sum<-NULL
for(i in 1:length(chr_n)){
chr_sum<-c(chr_sum,length(which(genoname[,2]==i)))
}
chr_sum<-c(0,chr_sum)
for(i in 1:length(xpos)){
yy<-y-multi_peak_new(z,xpos[-i],en)$z%*%par[-seq((i-1)*3*en+1,i*3*en,1)]
chr_now<-apply(genoname[,2,drop=F],2,as.numeric)[xpos[i]]
if(i==1){
left_rr<-min(xpos[i]-1-sum(chr_sum[seq(1,chr_now,1)]),rr)
}else{
if(genoname[xpos[i-1],2]==genoname[xpos[i],2]){
left_rr<-min(0.5*(xpos[i]-xpos[i-1]),rr)
}else{
left_rr<-min(xpos[i]-1-sum(chr_sum[seq(1,chr_now,1)]),rr)
}
}
if(i==length(xpos)){
right_rr<-min(sum(chr_sum[seq(1,chr_now+1,1)])-xpos[i],rr)
}else{
if(genoname[xpos[i+1],2]==genoname[xpos[i],2]){
right_rr<-min(0.5*(xpos[i+1]-xpos[i]),rr)
}else{
right_rr<-min(sum(chr_sum[seq(1,chr_now+1,1)])-xpos[i],rr)
}
}
left_rr<-floor(left_rr)
right_rr<-floor(right_rr)
least_pos<-xpos[-i]
now_pos<-c((xpos[i]-left_rr):(xpos[i]+right_rr))
try({
result_emba2<-ebayes_EM(x,multi_peak_new(z,now_pos,en)$z,yy,en,initial_sigma,initial_sigmak[now_pos],tau,err_max)
maxp1<-min(result_emba2$p1)
maxp2<-min(result_emba2$p2)
max_pos1<-now_pos[which.min(result_emba2$p1)]
max_pos2<-now_pos[which.min(result_emba2$p2)]
max_pos1
max_pos2
if((maxp1!=1)|(maxp2!=1)){
if(max_pos1==max_pos2){
result_pos[i]<-max_pos1
}else{
result_pos[i]<-c(max_pos1,max_pos2)[which.min(c(maxp1,maxp2))]
}
}
})
}
}else{
result_pos<-xpos
}
return(result_pos)
}
effect_estimation<-function(n_id,xpos,lod,en,ee){
xmatrix<-multi_code_classic(xpos,ee,n_id)
na_id1<-which(lod[,2]==0)
na_id2<-which(lod[,3]==0)
mk<-2+2*(en-1)
na_id_Q<-sort(c((na_id1-1)*mk+1,(na_id1-1)*mk+2))
na_id_QE<-NULL
for(jj in 1:length(na_id2)){
na_id_QE<-c(na_id_QE,sort(rep((na_id2[jj]-1)*mk,2*(en-1))+seq(3,mk,1)))
}
xmatrix0<-xmatrix
xmatrix0<-cbind(x,xmatrix0)
xmatrix[,c(na_id_Q,na_id_QE)]<-0
ad<-cbind(x,xmatrix)
bb0<-if(abs(min(eigen(crossprod(ad,ad))$values))<1e-6) solve(crossprod(ad,ad)+diag(ncol(ad))*1e-8)%*%crossprod(ad,y) else solve(crossprod(ad,ad))%*%crossprod(ad,y)
bb<-bb0[-seq(1,dim(x)[2],1)]
sig_e2<-as.numeric(crossprod(y-xmatrix0%*%bb0)/length(y))
Into_matrix<-function(vector_x,row_n){
col_n<-length(vector_x)/row_n
result_x<-matrix(0,nrow=row_n,ncol=col_n)
for(i in 1:col_n){
result_x[,i]<-vector_x[((i-1)*row_n+1):(i*row_n)]
}
return(result_x)
}
effect_all_0<-t(Into_matrix(bb,2+2*(en-1)))
last_effect<-function(effect){
a<-effect[,seq(3,2+2*(en-1),2),drop=F]
d<-effect[,seq(4,2+2*(en-1),2),drop=F]
last_matrix<-matrix(0,nrow=dim(a)[1],ncol=2)
for(i in 1:(en-1)){
last_matrix[,1]<-last_matrix[,1]-a[,i]
last_matrix[,2]<-last_matrix[,2]-d[,i]
}
return(last_matrix)
}
effect_all<-cbind(effect_all_0,last_effect(effect_all_0))# dim(effect_all)
ef_Q<-effect_all[,c(1:2),drop=F]
ef_QE<-effect_all[,-c(1:2),drop=F]
sig_Q<-0.5*(ef_Q[,1])^2+0.25*(ef_Q[,2])^2
sig_QE<-matrix(0,nrow=dim(effect_all)[1],ncol=1)
for(i in 1:en){
sig_QE<-sig_QE+(1/en)*0.5*(ef_QE[,1+(i-1)*2])^2+(1/en)*0.5*(ef_QE[,2+(i-1)*2])^2
}
sig_y<-max(var(y),(sum(sig_Q)+sum(sig_QE)+sig_e2))
pve<-cbind((sig_Q/sig_y)*100,(sig_QE/sig_y)*100)
pve<-cbind(as.matrix(pve[,1]+pve[,2]),pve)
return(list(effect_all,pve,sig_Q,sig_QE,sig_e2,sig_y))
}
LeftRight_marker<-function(map,ChrPos){
LR_result<-NULL
for(i in 1:dim(ChrPos)[1]){
now_id<-which(as.numeric(map[,2])==as.numeric(ChrPos[i,1]))
now_pos<-as.numeric(ChrPos[i,2])
all_pos<-as.numeric(map[now_id,3])
if(now_pos<min(all_pos)){
left_mar<-""
}else{
left_id<-max(which(all_pos<=now_pos))
left_mar<-map[now_id,1][left_id]
}
if(now_pos>max(all_pos)){
right_mar<-""
}else{
right_id<-min(which(all_pos>=now_pos))
right_mar<-map[now_id,1][right_id]
}
LR_result<-rbind(LR_result,c(left_mar,right_mar))
}
return(LR_result)
}
######################################### input and basic setup #########################################
#*######### environment and phenotype #
en<-EnvNum[NUM]
sum_en<-sum(EnvNum[0:(NUM-1)])
pheno<-t(pheRaw[,(sum_en+1):(sum_en+en),drop=F])
# rownames(pheno)<-NULL
yes_id<-NULL
for(i in 1:dim(pheno)[1]){
yes_id[[i]]<-which(pheno[i,]!="-")
}
# pheno<-as.matrix(pheno)
y<-NULL;yall<-NULL
for(i in 1:dim(pheno)[1]){
y<-c(y,as.numeric(pheno[i,yes_id[[i]]]))
yall<-c(yall,pheno[i,])
}
n0<-dim(pheno)[2]# The number of individuals in each environment
n<-length(y)# The number of individuals in all environments after deleting the missing values
nn<-length(yall)# The number of individuals in all environments before deleting the missing values
y<-as.matrix(y) # rm(pheno);gc()
#*######### genotype #
# genRaw<-as.matrix(genRaw)
#*######### calculate Z matrix for K matrix #
mn<-dim(Ax0)[2]
Z<-matrix(0,nrow=n,ncol=mn*en*3)
sum_n<-0
for(j in 1:en){
Z[ (sum_n+1):(sum_n+length(yes_id[[j]])), seq((j-1)*3+1,mn*en*3,3*en) ]<-Ax0[yes_id[[j]],]
Z[ (sum_n+1):(sum_n+length(yes_id[[j]])), seq((j-1)*3+2,mn*en*3,3*en) ]<-Hx0[yes_id[[j]],]
Z[ (sum_n+1):(sum_n+length(yes_id[[j]])), seq((j-1)*3+3,mn*en*3,3*en) ]<-Bx0[yes_id[[j]],]
sum_n<-sum_n+length(yes_id[[j]])
}# dim(Z)
#*######### calculate K matrix #
K<-kinship_all(Z,yes_id,en)# rm(kinship_every,kinship_all,K0);gc()
#*######### calculate Z matrix for the subsequent algorithm #
mn<-dim(Ax)[2]
Z<-matrix(0,nrow=n,ncol=mn*en*3)
sum_n<-0
for(j in 1:en){
Z[ (sum_n+1):(sum_n+length(yes_id[[j]])), seq((j-1)*3+1,mn*en*3,3*en) ]<-Ax[yes_id[[j]],]
Z[ (sum_n+1):(sum_n+length(yes_id[[j]])), seq((j-1)*3+2,mn*en*3,3*en) ]<-Hx[yes_id[[j]],]
Z[ (sum_n+1):(sum_n+length(yes_id[[j]])), seq((j-1)*3+3,mn*en*3,3*en) ]<-Bx[yes_id[[j]],]
sum_n<-sum_n+length(yes_id[[j]])
}# dim(Z)
#*######### X matrix; y=Xβ+Zγ+ξ+ε #
x<-fixed_x(yes_id,en)
if(is.null(yygg)==FALSE){
yygg_x<-NULL
for(i in 1:en){
yygg_x<-rbind(yygg_x,yygg[yes_id[[i]],])
}# dim(yygg_x)
x<-cbind(x,yygg_x)
}# dim(x)
if(det(crossprod(x,x))==0){
warning("X is singular")
}
ReduceDim_x<-TRUE
if(ReduceDim_x){
x_effect<-if(abs(min(eigen(crossprod(x,x))$values))<1e-6) solve(crossprod(x,x)+diag(ncol(x))*1e-8)%*%crossprod(x,y) else solve(crossprod(x,x))%*%crossprod(x,y)
# fix_ef<-bb[seq(1,dim(x)[2],1)]
yygg_effect<-x_effect[-seq(1,en,1),1,drop=F]
y<-y-x[,-seq(1,en,1),drop=F]%*%yygg_effect
x<-fixed_x(yes_id,en)
}
#*######### name #
effect_name<-name_function(en)
######################################### single_locus_scanning #########################################
#*######### single locus scanning #
p3d_result<-p3d_method(x,y,K)
if(Model=="Random"){
single_locus_model_result<-single_locus_model(x=x,y=y,zz=Z,lambda=p3d_result$lambda,uu=p3d_result$k_vector,vv=p3d_result$k_value,en=en,CLO=CLO)
initial_sigma<-mean(single_locus_model_result$sigma_2)
initial_sigmak<-single_locus_model_result$phi_k# write.table(single_locus_model_result,file=paste("single_locus_model_result.csv",sep = ""),sep=",",row.names = F,col.names = T)
}else if(Model=="Fixed"){
single_locus_model_result<-single_locus_model_Fixed(x=x,y=y,zz=Z,lambda=p3d_result$lambda,uu=p3d_result$k_vector,vv=p3d_result$k_value,en=en,CLO=CLO)
initial_sigma<-mean(single_locus_model_result$sigma_2)
initial_sigmak<-rep(1,mn)
}else{
warning("Please enter Model!")
}
#*######### pick the peaks #
peak_id1<-peak_selection(single_locus_model_result$log1,genoname)
peak_id3<-peak_selection(single_locus_model_result$log3,genoname)
peak_id<-sort(union(peak_id1,peak_id3))# length(peak_id)
######################################### multi_locus_scanning #########################################
multi_locus_result1<-Zhou_lars(peak_id,Z,n,en)
# length(multi_locus_result1$lar_pos)
multi_locus_result2<-Zhou_sbl(peak=multi_locus_result1$lar_pos,Order=multi_locus_result1$order,
DesignMatrix=multi_locus_result1$Matrix,CodeMatrix=Z,
genoname=genoname,en=en,
sbl_t=-1,sbl_p=3,tau=0,err_max=1e-6,fix_p=1.5,
Sigma=initial_sigma,SigmaK=initial_sigmak)# larpos=multi_locus_result1$lar_pos0,larbeta=multi_locus_result1$beta
emba_p<-1.5
t1<-proc.time()
result_emba<-ebayes_EM(multi_locus_result2$xin,multi_peak_new(Z,multi_locus_result2$pos,en)$z,
y,en,initial_sigma,initial_sigmak[multi_locus_result2$pos],
tau=-2,err_max=1e-6)
t2<-proc.time()
(t2-t1)[3]
emba_pos0<-which((result_emba$p1<(1-pchisq(emba_p*2*log(10),2)))|(result_emba$p2<(1-pchisq(emba_p*2*log(10),2*(en-1)))))# cbind(result_emba$p1,result_emba$p2)
emba_all_pos<-sort(c(multi_locus_result2$fix,multi_locus_result2$pos[emba_pos0]))
if(length(multi_locus_result2$pos[emba_pos0])>0){
emba_pos_Q<-multi_locus_result2$pos[emba_pos0]
emba_pos_QE<-multi_locus_result2$pos[emba_pos0]
emba_pos<-multi_locus_result2$pos[emba_pos0]
if(length(emba_pos_Q)>0){
z_M4_Q <-multi_code_classic(peak=emba_pos_Q, ee=x[,seq(2,en),drop=F],n_id=yes_id)
order_Q<-sort(c(seq(1,dim(z_M4_Q)[2],2+2*(en-1)),seq(2,dim(z_M4_Q)[2],2+2*(en-1))))
z_M4_Q<-z_M4_Q[,order_Q,drop=F]
lod_Q<-LRT_F2(xxn=multi_locus_result2$xin,xxx=z_M4_Q, yn=y,par=NULL,mk=2)# cbind(emba_pos_Q,lod_Q)
lrt_pos_Q<-emba_pos_Q[which(lod_Q>2.5)]
emba_pos_Q[which(lod_Q>2.5)]# cbind(emba2_pos_Q[which(lod_Q>2.5)],lod_Q[which(lod_Q>2.5)])
}else{
lrt_pos_Q<-NULL
}
if(length(emba_pos_QE)>0){
z_M4_QE<-multi_code_classic(peak=emba_pos_QE,ee=x[,seq(2,en),drop=F],n_id=yes_id)
order_QE<-sort(c(seq(1,dim(z_M4_QE)[2],2+2*(en-1)),seq(2,dim(z_M4_QE)[2],2+2*(en-1))))
z_M4_QE<-z_M4_QE[,-order_QE,drop=F]
lod_QE<-LRT_F2(xxn=multi_locus_result2$xin,xxx=z_M4_QE, yn=y,par=NULL,mk=2*en-2)# cbind(emba_pos_QE,lod_QE)
lrt_pos_QE<-emba_pos_QE[which(lod_QE>2.5)]
emba_pos_QE[which(lod_QE>2.5)]# cbind(emba2_pos_QE[which(lod_QE>2.5)],lod_QE[which(lod_QE>2.5)])
}else{
lrt_pos_QE<-NULL
}
lrt_pos<-sort(union(lrt_pos_Q,lrt_pos_QE))
lrt_pos<-sort(union(multi_locus_result2$fix,lrt_pos))# length(lrt_pos)
}else{
lrt_pos<-multi_locus_result2$fix# length(lrt_pos)
}
######################################### Optimization and output #########################################
if(length(lrt_pos)>0){
optimize_pos<-optimize_every_posx(xpos=lrt_pos,z=Z,yn=y,genoname,en,rr=CriDis,tau=0,err_max=1e-6)
emba3_p<-3
result_emba3<-ebayes_EM(x,multi_peak_new(Z,optimize_pos,en)$z,y,en,initial_sigma,initial_sigmak[optimize_pos],tau=0,err_max = 1e-8)
emba3_pos1<-optimize_pos[which(result_emba3$p1<(1-pchisq(emba3_p*2*log(10),2)))]
emba3_pos2<-optimize_pos[which(result_emba3$p2<(1-pchisq(emba3_p*2*log(10),2*(en-1))))]
emba3_pos3<-optimize_pos[which((result_emba3$p1>=(1-pchisq(emba3_p*2*log(10),2)))&(result_emba3$p2>=(1-pchisq(emba3_p*2*log(10),2*(en-1)))))]
emba3_pos_Q<-sort(union(emba3_pos1,emba3_pos3))
emba3_pos_QE<-sort(union(emba3_pos2,emba3_pos3))
emba3_pos<-sort(union(emba3_pos_Q,emba3_pos_QE))
# CriLOD<-3
if(length(emba3_pos_Q)>0){
z_M5_Q<-multi_code_classic(emba3_pos_Q,x[,seq(2,en),drop=F],yes_id)
order_Q<-sort(c(seq(1,dim(z_M5_Q)[2],2+2*(en-1)),seq(2,dim(z_M5_Q)[2],2+2*(en-1))))
z_M5_Q<-z_M5_Q[,order_Q]
}else{
z_M5_Q<-NULL
}
if(length(emba3_pos_QE)>0){
z_M5_QE<-multi_code_classic(emba3_pos_QE,x[,seq(2,en),drop=F],yes_id)
order_QE<-sort(c(seq(1,dim(z_M5_QE)[2],2+2*(en-1)),seq(2,dim(z_M5_QE)[2],2+2*(en-1))))
z_M5_QE<-z_M5_QE[,-order_QE,drop=F]
}else{
z_M5_QE<-NULL
}
if(length(emba3_pos_Q)>0){
lod_Q<-LRT_F2(xxn=cbind(x,z_M5_QE),xxx=z_M5_Q, yn=y,par=NULL,mk=2)# cbind(emba3_pos_Q,lod_Q)
lrt_pos_Q<-emba3_pos_Q[which(lod_Q>=CriLOD)]
}else{
lrt_pos_Q<-NULL
}
if(length(emba3_pos_QE)>0){
lod_QE<-LRT_F2(xxn=cbind(x,z_M5_Q),xxx=z_M5_QE, yn=y,par=NULL,mk=2*en-2)# cbind(emba3_pos_QE,lod_QE)
lrt_pos_QE<-emba3_pos_QE[which(lod_QE>=CriLOD)]
}else{
lrt_pos_QE<-NULL
}
lrt2_pos<-sort(union(lrt_pos_Q,lrt_pos_QE))
if(length(lrt2_pos)>0){
last_lod<-matrix(0,nrow=length(lrt2_pos),ncol=3)
last_lod[which(lrt2_pos%in%lrt_pos_Q),2]<-lod_Q[which(lod_Q>=CriLOD)]
last_lod[which(lrt2_pos%in%lrt_pos_QE),3]<-lod_QE[which(lod_QE>=CriLOD)]
last_lod[,1]<-last_lod[,2]+last_lod[,3]
IC_data<-cbind(x,multi_peak_new(Z,lrt2_pos,en)$z)
lm_IC<-lm(y~IC_data-1)
AIC(lm_IC)
BIC(lm_IC)
if(length(lrt_pos_Q)>0){
zM_Q<-multi_code_classic(lrt_pos_Q,x[,seq(2,en),drop=F],yes_id)
order_Q<-sort(c(seq(1,dim(zM_Q)[2],2+2*(en-1)),seq(2,dim(zM_Q)[2],2+2*(en-1))))
zM_Q<-zM_Q[,order_Q,drop=F]
}else{
zM_Q<-NULL
}
if(length(lrt_pos_QE)>0){
zM_QE<-multi_code_classic(lrt_pos_QE,x[,seq(2,en),drop=F],yes_id)
order_QE<-sort(c(seq(1,dim(zM_QE)[2],2+2*(en-1)),seq(2,dim(zM_QE)[2],2+2*(en-1))))
zM_QE<-zM_QE[,-order_QE,drop=F]
}else{
zM_QE<-NULL
}
IC_data<-cbind(cbind(x,zM_Q),zM_QE)
lm_IC<-lm(y~IC_data-1)
AIC(lm_IC)
BIC(lm_IC)
LR_marker<-LeftRight_marker(map=mapRaw,ChrPos=genoname[lrt2_pos,2:3,drop=F])
result_all<-effect_estimation(n_id=yes_id,xpos=lrt2_pos,lod=last_lod,en,ee=x[,seq(2,en),drop=F])
var_e<-matrix("",nrow=length(lrt2_pos),ncol=1)
var_y<-matrix("",nrow=length(lrt2_pos),ncol=1)
var_e[1]<-round(result_all[[5]],4)
var_y[1]<-round(result_all[[6]],4)
data.all<-data.frame(genoname[lrt2_pos,2:3,drop=F],
round(result_all[[1]],4),round(last_lod,4),
LR_marker,
round(result_all[[3]]+result_all[[4]],4),
round(result_all[[3]],4),round(result_all[[4]],4),
round(result_all[[2]],4),
var_e,var_y)
# rep(AIC(lm_IC),length(lrt2_pos)),
# rep(BIC(lm_IC),length(lrt2_pos)))
rownames(data.all)<-NULL
colnames(data.all)<-c("Chr","Position(cM)",effect_name,
"LOD","LOD_QTL","LOD_QEI",
"Left_marker","right_marker",
"Var_Genet","Var_Genet_QTL","Var_Genet_QEI",
"r2(%)","r2_QTL(%)","r2_QEI(%)",
"Var_Error","Var_Phen(total)")
reslt_list<-list(result=data.all,p_Q=single_locus_model_result$log1,p_QE=single_locus_model_result$log3)
}else{
reslt_list<-NULL
warning("No QTL or QEI were detected!")
}
}else{
reslt_list<-NULL
warning("No QTL or QEI were detected!")
}
return(reslt_list)
}
| /R/ZhouF.R | no_license | cran/QTL.gCIMapping | R | false | false | 90,494 | r | #' To perform QTL mapping with Wen method
#'
#' @param pheRaw phenotype matrix.
#' @param genRaw genotype matrix.
#' @param mapRaw1 linkage map matrix.
#' @param WalkSpeed Walk speed for Genome-wide Scanning.
#' @param CriLOD Critical LOD scores for significant QTL.
#' @param dir file path in your computer.
#'
#' @return a list
#' @export
#'
#' @examples
#' data(F2data)
#' readraw<-Readdata(file=F2data,fileFormat="GCIM",
#' method="GCIM-QEI",filecov=NULL,
#' MCIMmap=NULL,MultiEnv=TRUE)
#' DoResult<-Dodata(fileFormat="GCIM",
#' Population="F2",method="GCIM-QEI",
#' Model="Random",readraw,MultiEnv=TRUE)
#' ZhouMatrices<-ZhouF(pheRaw=DoResult$pheRaw,
#' genRaw=DoResult$genRaw,
#' mapRaw1=DoResult$mapRaw1,
#' WalkSpeed=1,CriLOD=3,
#' dir=tempdir())
ZhouF<-function(pheRaw=NULL,genRaw=NULL,mapRaw1=NULL,WalkSpeed=NULL,CriLOD=NULL,dir=NULL){
cl<-WalkSpeed
sLOD<-CriLOD
# yygg<-NULL
# mx=NULL;phe=NULL;chr_name=NULL;v.map=NULL
if(is.null(genRaw)==TRUE){
warning("Please input correct genotype dataset!")
}
if(is.null(pheRaw)==TRUE){
warning("Please input correct phenotype dataset!")
}
if(is.null(mapRaw1)==TRUE){
warning("Please input correct linkage map dataset!")
}
if((is.null(genRaw)==FALSE)&&(is.null(pheRaw)==FALSE)&&(is.null(mapRaw1)==FALSE)&&(cl<0)){
warning("Please input Walk Speed: >0!")
}
if((is.null(genRaw)==FALSE)&&(is.null(pheRaw)==FALSE)&&(is.null(mapRaw1)==FALSE)&&(cl>0)&&(sLOD<0)){
warning("Please input critical LOD score: >0!")
}
mapRaw<-as.matrix(mapRaw1)
chr_name<-unique(mapRaw[,2])
chr_secon<-as.matrix(mapRaw[,2])
mm<-numeric()
map_chr<-numeric()
for(i in 1:length(chr_name)){
chr_i<-length(which(chr_secon[]==chr_name[i]))
mm<-c(mm,chr_i)
chr_name[i]<-i
map_chr<-c(map_chr,rep(i,chr_i))
}
mm<-matrix(mm,ncol=1)
map_chr<-matrix(map_chr,ncol=1)
mapRaw[,2]<-map_chr
chr<-length(chr_name)
for(i in 1:chr){
pos1<-as.matrix(mapRaw[which(mapRaw[,2]==i),3])
delerow<-which(duplicated(pos1))
if(length(delerow)!=0){
break
}
}
if(length(delerow)!=0){
warning("Please check linkage maps (linkage groups) to make sure whether all the marker positions are different!")
}else{
blank<-matrix("",nrow=3,ncol=dim(pheRaw)[2])
blank[1,]<-colnames(pheRaw)
# p1<-as.matrix(pheRaw)
p1<-pheRaw
colnames(p1)<-NULL
p1<-t(rbind(blank,p1))
g1<-cbind(mapRaw,genRaw)
colnames(p1)<-NULL
colnames(g1)<-NULL
pgcombine<-rbind(p1,g1)
write.table(pgcombine,file=paste(dir,"/listeria_rotY",".csv",sep=""),sep=",",row.names = F,col.names = F)
########## calculate conditional probability for K matrix
f2<-read.cross("csvr",dir,"listeria_rotY.csv",genotypes=c("A","H","B","D","C"),na.strings = "-",crosstype="f2")
# f2<-jittermap(f2)# Jitter the marker positions in a genetic map so that no two markers are on top of each other # jittermap(object, amount=1e-6)
simf2<-calc.genoprob(f2, step=0,error.prob = 0.0001)
########## Access to chromosome information
genoname<-apply(mapRaw[,2:3],2,as.numeric)
genoname<-data.frame(marker=mapRaw[,1],chr=genoname[,1],pos=genoname[,2])
chr_n<-as.numeric(genoname[,2])
chr_n<-chr_n[!duplicated(chr_n)]
maxdistance<-0
for(i in 1:length(chr_n)){
maxdistance<-max(maxdistance, max(diff(as.matrix(genoname[which(genoname[,2]==i),3]))))
}
########## calculate conditional probability for K matrix
Ax0<-NULL;Hx0<-NULL;Bx0<-NULL
for(i in 1:length(chr_n)){
map_gen<-simf2$geno[[i]]$prob
A_gen<-round(map_gen[,,1],digits=15)
H_gen<-round(map_gen[,,2],digits=15)
B_gen<-round(map_gen[,,3],digits=15)
Ax0<-cbind(Ax0,A_gen)
Hx0<-cbind(Hx0,H_gen)
Bx0<-cbind(Bx0,B_gen)
}# dim(Ax0) # mn<-dim(Ax)[2]
########## Whether need to insert markers
if(maxdistance>cl){#user's options
simf2<-calc.genoprob(f2, step=cl,error.prob = 0.0001)
Ax<-NULL;Hx<-NULL;Bx<-NULL;regenoname<-NULL
for(i in 1:length(chr_n)){
map_gen<-simf2$geno[[i]]$prob
A_gen<-round(map_gen[,,1],digits=15)
H_gen<-round(map_gen[,,2],digits=15)
B_gen<-round(map_gen[,,3],digits=15)
Ax<-cbind(Ax,A_gen)
Hx<-cbind(Hx,H_gen)
Bx<-cbind(Bx,B_gen)
nowpos<-attr(map_gen,"map")
nowbin<-names(nowpos)
nowchr<-rep(as.numeric(chr_n[i]),length(nowpos))
nowdata<-data.frame(marker=nowbin,chr=nowchr,pos=nowpos)
regenoname<-rbind(regenoname,nowdata)
}# dim(Ax)
rownames(regenoname)<-NULL
regenoname<-cbind(regenoname,seq(1,dim(regenoname)[1],1))
colnames(regenoname)<-c("marker","chr","pos","id.all")
# mn<-dim(regenoname)[1]
genoname<-regenoname
}else{
Ax<-Ax0;Hx<-Hx0;Bx<-Bx0
genoname<-cbind(genoname,seq(1,nrow(genoname),by=1))
colnames(genoname)<-c("marker","chr","pos","id.all")
}
}
output<-list(genoname=genoname,mapRaw=mapRaw,
Ax0=Ax0,Hx0=Hx0,Bx0=Bx0,Ax=Ax,Hx=Hx,Bx=Bx)# yygg=yygg,pheRaw=pheRaw,chr_n=chr_n,
return(output)
}
#' The second step of Zhou method for single environment
#'
#' @param Model Random or fixed model.
#' @param pheRaw phenotype matrix.
#' @param genRaw genotype matrix.
#' @param mapRaw linkage map matrix.
#' @param CriLOD Critical LOD scores for significant QTL.
#' @param NUM The serial number of the trait to be analyzed.
#' @param yygg covariate matrix.
#' @param genoname linkage map matrix with pseudo markers inserted.
#' @param Ax0 AA genotype matrix.
#' @param Hx0 Aa genotype matrix.
#' @param Bx0 aa genotype matrix.
#' @param Ax AA genotype matrix with pseudo markers inserted.
#' @param Hx Aa genotype matrix with pseudo markers inserted.
#' @param Bx aa genotype matrix with pseudo markers inserted.
#' @param dir file storage path.
#' @param CriDis The distance of optimization.
#' @param CLO Number of CPUs.
#'
#' @return a list
#' @export
#'
#' @examples
#' data(F2data)
#' readraw<-Readdata(file=F2data,fileFormat="GCIM",
#' method="GCIM-QEI",filecov=NULL,
#' MCIMmap=NULL,MultiEnv=FALSE)
#' DoResult<-Dodata(fileFormat="GCIM",Population="F2",
#' method="GCIM-QEI",Model="Random",
#' readraw,MultiEnv=FALSE)
#' ZhouMatrices<-ZhouF(pheRaw=DoResult$pheRaw,
#' genRaw=DoResult$genRaw,mapRaw1=DoResult$mapRaw1,
#' WalkSpeed=1,CriLOD=3,dir=tempdir())
#' OutputZhou<-ZhouMethod_single_env(Model="Random",
#' pheRaw=DoResult$pheRaw,genRaw=DoResult$genRaw,
#' mapRaw=ZhouMatrices$mapRaw,CriLOD=3,NUM=1,
#' yygg=DoResult$yygg1,genoname=ZhouMatrices$genoname,
#' Ax0=ZhouMatrices$Ax0,Hx0=ZhouMatrices$Hx0,
#' Bx0=ZhouMatrices$Bx0,Ax=ZhouMatrices$Ax,
#' Hx=ZhouMatrices$Hx,Bx=ZhouMatrices$Bx,
#' dir=tempdir(),CriDis=5,CLO=2)
ZhouMethod_single_env<-function(Model=NULL,pheRaw=NULL,genRaw=NULL,mapRaw=NULL,CriLOD=NULL,NUM=NULL,yygg=NULL,genoname=NULL,
Ax0=NULL,Hx0=NULL,Bx0=NULL,Ax=NULL,Hx=NULL,Bx=NULL,dir=NULL,CriDis=NULL,CLO=NULL){# chr_n=NULL,
######################################### function #########################################
kinship_every<-function(coded_gen){
kk<-coded_gen%*%t(coded_gen)
kk<-kk/mn
return(kk)
}
p3d_method<-function(x,y,kinship){
# estimate the value of λ & λk kinship=K;
P3D<-function(x,y,kinship){
# iteration function(H=K*λ+I);estimate λ
iter_p3d<-function(ga){
lambda<-exp(ga)
diag_element<-lambda*K_value+1
logH<-sum(log(diag_element))
RH_value<-1/(diag_element)
yuRHyu<-sum(yu*RH_value*yu)
yuRHxu<-matrix(0,nrow = 1,ncol = q)
xuRHxu<-matrix(0,nrow = q,ncol = q)
for(i in 1:q){
yuRHxu[,i]<-sum(yu*RH_value*xu[,i])
for(j in 1:q){
xuRHxu[i,j]<-sum(xu[,i]*RH_value*xu[,j])
}
}
logxuRHxu<-log(det(xuRHxu))
logyuPyu<-log(yuRHyu-yuRHxu%*%tcrossprod(solve(xuRHxu),yuRHxu))
output<- -0.5*(logH+logxuRHxu+(n-q)*logyuPyu)
return(-output)
}
q<-ncol(x)
n<-nrow(y)
eigenK<-eigen(kinship)
K_vector<-eigenK$vectors
K_value<-eigenK$values
# rm(eigenK);gc()
xu<-crossprod(K_vector,x)
yu<-crossprod(K_vector,y)
ga0<-0
optimp3d<-optim(par=ga0,fn=iter_p3d,hessian = TRUE,method="L-BFGS-B",lower=-50,upper=10)
lambda<-exp(optimp3d$par)
return(list(lambda,K_vector,K_value))
}
q<-ncol(x)
value1<-P3D(x=x,y=y,kinship=kinship)
lambda<-value1[[1]]
uu<-as.matrix(value1[[2]])# The eigenvector of K matrix
vv<-value1[[3]] # The eigenvalue of K matrix
# RH_value<-1/(vv*lambda+1) # rm(value1);gc()
return(list(lambda=lambda,k_vector=uu,k_value=vv))
}
single_locus_model<-function(x,y,zz,lambda,uu,vv,CLO){
value2<-matrix(c(0.5,-0.5,0,1,-0.5,-0.5),2,3)
# iteration function(R=zu%*%t(zu)*λk+D*λ+I);estimate λk
rqtl<-function(ga){
lambdak<-exp(ga)
Hk_term<-zuRHzu*lambdak+diag(1,3)
logHk<-sum(log(lambda*vv+1))+log(det(Hk_term))
RHk_term<-solve(Hk_term)*lambdak
yuRHkyu<-yuRHyu-yuRHzu%*%tcrossprod(RHk_term,yuRHzu)
yuRHkxu<-yuRHxu-yuRHzu%*%tcrossprod(RHk_term,xuRHzu)
xuRHkxu<-xuRHxu-xuRHzu%*%tcrossprod(RHk_term,xuRHzu)
yuPkyu<-yuRHkyu-yuRHkxu%*%tcrossprod(solve(xuRHkxu),yuRHkxu)
rqtl<- -0.5*( logHk + log(det(xuRHkxu)) + (n-q)*log(yuPkyu) )
return(-rqtl)
}
#estimate the value of γ
gamma_estimate<-function(lambdak){
Hk_term<-zuRHzu*lambdak+diag(1,3)
RHk_term<-solve(Hk_term)*lambdak
yuRHkyu<-yuRHyu-yuRHzu%*%tcrossprod(RHk_term,yuRHzu)
yuRHkxu<-yuRHxu-yuRHzu%*%tcrossprod(RHk_term,xuRHzu)
xuRHkxu<-xuRHxu-xuRHzu%*%tcrossprod(RHk_term,xuRHzu)
zuRHkxu<-t(xuRHzu)-zuRHzu%*%tcrossprod(RHk_term,xuRHzu)
zuRHkyu<-t(yuRHzu)-zuRHzu%*%tcrossprod(RHk_term,yuRHzu)
zuRHkzu<-zuRHzu-zuRHzu%*%RHk_term%*%zuRHzu
beta<-solve(xuRHkxu,t(yuRHkxu))
beta<-matrix(beta,ncol = 1)
yuPkyu<-yuRHkyu-yuRHkxu%*%tcrossprod(solve(xuRHkxu),yuRHkxu)
sigma<-yuPkyu/(n-q)
sigma<-as.numeric(sigma)
gamma<-lambdak*zuRHkyu-lambdak*zuRHkxu%*%beta
var<-abs((lambdak*diag(1,3)-lambdak*zuRHkzu*lambdak)*sigma)
stderr<-sqrt(diag(var))
phi.k<-sigma*lambdak#Φk
return(list(gamma,beta,var,phi.k,sigma,stderr))
}
# estimate the value of logp
logp_estimate<-function(L, g_k, pn){
var.1<-L%*%tcrossprod(var_ga,L)
Wk.1<-crossprod(g_k,ginv(var.1))%*%g_k
rank1<-qr(L)$rank
tr1<-sum(diag(ginv(tcrossprod(L,L))%*%var.1))
dk1<-rank1-tr1/phi_k
p1<-pgamma(Wk.1,shape=pn/2,scale=2*dk1,lower.tail = FALSE,log.p = FALSE)
log1<-pgamma(Wk.1,shape=pn/2,scale=2*dk1,lower.tail = FALSE,log.p = TRUE)
log1<--log1*log10(exp(1))
return(list(p=p1,log=log1))
}
RH_value<-1/(vv*lambda+1)
mn<-ncol(zz)/3
n<-nrow(y)
q<-ncol(x)
xu<-crossprod(uu,x)
yu<-crossprod(uu,y)
yuRHyu<-sum(yu*RH_value*yu)
yuRHxu<-matrix(0,nrow=1,ncol=q)
xuRHxu<-matrix(0,nrow=q,ncol=q)
for(i in 1:q){
yuRHxu[,i]<-sum(yu*RH_value*xu[,i])
for(j in 1:q){
xuRHxu[j,i]<-sum(xu[,j]*RH_value*xu[,i])
}
}
# yuRHyu<-crossprod(yu,diag(RH_value))%*%yu
# yuRHxu<-crossprod(yu,diag(RH_value))%*%xu
# xuRHxu<-crossprod(xu,diag(RH_value))%*%xu
if(is.null(CLO)==TRUE){
cl.cores <- detectCores()
if(cl.cores<=2){
cl.cores<-1
}else{
if(cl.cores>10){
cl.cores <-10
}else{
cl.cores <- detectCores()-1
}
}
# cl.cores<-2
}else{
cl.cores <-CLO
}
cl <- makeCluster(cl.cores)
registerDoParallel(cl)
SR_i<-numeric()
result<-foreach(SR_i=1:mn,.combine=rbind)%dopar%{
# library(MASS)
z<-zz[,((SR_i-1)*3+1):(SR_i*3),drop=F]
zu<-crossprod(uu,z)
xuRHzu<-matrix(0,nrow=q,ncol=3)
yuRHzu<-matrix(0,nrow=1,ncol=3)
zuRHzu<-matrix(0,nrow=3,ncol=3)
for(i in 1:3){
yuRHzu[,i]<-sum(yu*RH_value*zu[,i])
for(j in 1:q){
xuRHzu[j,i]<-sum(xu[,j]*RH_value*zu[,i])
}
for(j in 1:3){
zuRHzu[j,i]<-sum(zu[,j]*RH_value*zu[,i])
}
}
# xuRHzu<-crossprod(xu,diag(RH_value))%*%zu
# yuRHzu<-crossprod(yu,diag(RH_value))%*%zu
# zuRHzu<-crossprod(zu,diag(RH_value))%*%zu
ga<-0
par<-optim(par=ga,fn=rqtl,hessian = TRUE,method="L-BFGS-B",lower=-10,upper=10)
lambdak<-exp(par$par)
value3<-gamma_estimate(lambdak)
gamma_k<-value3[[1]]
beta<-value3[[2]]#estimate β (y=Xβ+Zγ+ξ+ε)
var_ga<-value3[[3]]
phi_k<-value3[[4]]
sigma_2<-value3[[5]]
main_effect<-value2%*%gamma_k
logvalue1<-logp_estimate(L=value2, g_k=main_effect, pn=2)#the -log(10)p of qtl effect
p1<-logvalue1$p
log1<-logvalue1$log
result<-cbind(lambdak,beta,matrix(gamma_k,1,3),p1,log1,phi_k,sigma_2)
}
stopCluster(cl)
lambda_k<-result[,1]
mu_beta<-result[,2]
gamma_all<-result[,3:5]
p1<-result[,6]
log_p1<-result[,7]
phi_k<-result[,8]
sigma_2<-result[,9]
return(list(lambda_k=lambda_k, fixed=mu_beta, gamma=gamma_all,
p1=p1, log1=log_p1, phi_k=phi_k, sigma_2=sigma_2))
}
single_locus_model_Fixed<-function(x,y,zz,lambda,uu,vv,CLO){
value2<-matrix(c(0.5,-0.5,0,1,-0.5,-0.5),2,3)
# estimate the value of logp
logp_estimate<-function(L, g_k, pn){
var.1<-L%*%tcrossprod(var_ga,L)
Wk.1<-crossprod(g_k,ginv(var.1))%*%g_k# Test statistic
p1<-pchisq(Wk.1,df=pn,lower.tail = FALSE,log.p = FALSE)
log1<-pchisq(Wk.1,df=pn,lower.tail = FALSE,log.p = TRUE)
log1<--log1*log10(exp(1))
return(list(p=p1,log=log1))
}
Hsolve<-1/(vv*lambda+1)
mn<-ncol(zz)/3
n<-nrow(y)
q<-ncol(x)
# xu<-crossprod(uu,x)
yu<-crossprod(uu,y)# Equation deformation
if(is.null(CLO)==TRUE){
cl.cores <- detectCores()
if(cl.cores<=2){
cl.cores<-1
}else{
if(cl.cores>10){
cl.cores <-10
}else{
cl.cores <- detectCores()-1
}
}
}else{
cl.cores <-CLO
}
cl <- makeCluster(cl.cores)
registerDoParallel(cl)
SF_i<-numeric()
result<-foreach(SF_i=1:mn,.combine=rbind)%dopar%{
# library(MASS)
z<-zz[,((SF_i-1)*3+1):(SF_i*3),drop=F]
uxz<-crossprod(uu,cbind(x,z))
x_gamma<-ginv(t(uxz)%*%diag(Hsolve)%*%uxz)%*%t(uxz)%*%diag(Hsolve)%*%yu
q<-qr(uxz)$rank
sig_e2<-as.numeric(t(yu-uxz%*%x_gamma)%*%diag(Hsolve)%*%(yu-uxz%*%x_gamma)/(dim(uxz)[1]-q))
x_gamma_covmatr<-sig_e2*ginv(t(uxz)%*%diag(Hsolve)%*%uxz)
gamma<-x_gamma[-c(1:dim(x)[2])]
var_ga<-x_gamma_covmatr[-c(1:dim(x)[2]),-c(1:dim(x)[2]),drop=F]
main_effect<-value2%*%gamma
logvalue1<-logp_estimate(L=value2, g_k=main_effect, pn=2)#the -log(10)p of qtl effect
p1<-logvalue1$p
log1<-logvalue1$log
result<-cbind(x_gamma[c(1:dim(x)[2])],matrix(gamma,1,3),p1,log1,sig_e2)
}
stopCluster(cl)
mu_beta<-result[,1:dim(x)[2]]
gamma_all<-result[,(dim(x)[2]+1):(dim(x)[2]+3)]
p1<-result[,(dim(x)[2]+4)]
log_p1<-result[,(dim(x)[2]+5)]
sigma_2<-result[,(dim(x)[2]+6)]
return(list(fixed=mu_beta, gamma=gamma_all, p1=p1, log1=log_p1, sigma_2=sigma_2))
}
peak_selection<-function(log_value,genoname){
peak_pos<-function(Lod.temp){
m<-length(Lod.temp)
optids<-vector(length=0)
if(Lod.temp[1]>Lod.temp[2]) optids<-append(optids,1)
for(j in 2:(m-1)){
if ((Lod.temp[j-1]<Lod.temp[j]) & (Lod.temp[j]>Lod.temp[j+1])) {
optids<-append(optids,j)
}
}
if(Lod.temp[m]>Lod.temp[m-1]) optids<-append(optids,m)
return(optids)
}
chr_all<-as.matrix(genoname[,2])
chr_kind<-chr_all[!duplicated(chr_all)]
id_pos<-NULL
for(jjj in chr_kind){
now_id<-which(chr_all%in%jjj)
id_pos<-c(id_pos,now_id[peak_pos(log_value[now_id])])
}
return(sort(id_pos))
}
multi_peak_new<-function(gencoded,peak_id){
enk<-3
mut_peak_id<-NULL
term1<-seq(1,3,1)
for(i in 1:length(peak_id)){
mut_peak_id<-c(mut_peak_id,(rep(peak_id[i],enk)-1)*enk+term1)
}
return(list(z=gencoded[,sort(mut_peak_id)],order=sort(rep(peak_id,enk))))
}
Zhou_lars<-function(peak,CodeMatrix,n){
multi_value<-multi_peak_new(CodeMatrix,peak)
DesignMatrix<-multi_value$z
order0<-multi_value$order# length(order0); length(peak_id)
if(length(peak)>=n){
larstep<-length(order0)%/%(3*5)
lar_result<-lars(x=DesignMatrix, y=y, type = "lar",trace = FALSE, normalize = TRUE, intercept = TRUE, eps = .Machine$double.eps, use.Gram=FALSE,max.steps = larstep)
lar_result.0<-lar_result$beta[nrow(lar_result$beta),]
lar_pos0<-order0[which(lar_result.0!=0)]
lar_pos<-lar_pos0[!duplicated(lar_pos0)]# length(lar_pos)
multi_value1<-multi_peak_new(CodeMatrix,lar_pos)
DesignMatrix1<-multi_value1$z # coefficient matrix of selected peak loci
order1<-multi_value1$order
}else{
lar_pos<-peak
DesignMatrix1<-DesignMatrix
order1<-order0
}# length(lar_pos)
return(list(lar_pos=lar_pos,Matrix=DesignMatrix1,order=order1))
}
sblgwas<-function(x,y,z,t,max.iter=200,min.err=1e-6){
x<-as.matrix(x)
y<-as.matrix(y)
z<-as.matrix(z)
n<-length(y)
q<-ncol(x)
m<-ncol(z)
b0<-solve(t(x)%*%x,tol=1e-50)%*%(t(x)%*%y)
s2<-sum((y-x%*%b0)^2)/(n-q)
b0<-matrix(0,q,1)
b<-b0
g0<-matrix(0,m,1)
g<-g0
lambda<-matrix(0,m,1)
tau<-g0
v<-g0
xx<-NULL
xy<-NULL
for(i in 1:q){
xx<-c(xx,sum(x[,i]^2))
xy<-c(xy,sum(x[,i]*y))
}
zz<-NULL
zy<-NULL
for(k in 1:m){
zz<-c(zz,sum(z[,k]^2))
zy<-c(zy,sum(z[,k]*y))
}
d<-numeric(m)
a<-matrix(0,n,1)
iter<-0
err<-1e8
my.iter<-NULL
while(iter < max.iter & err > min.err){
for(i in 1:q){
a<-a-x[,i]*b0[i]
ai<-sum(x[,i]*a)
b[i]<-(xy[i]-ai)/xx[i]
a<-a+x[,i]*b[i]
}
df<-0
for(k in 1:m){
a<-a-z[,k]*g0[k]
ak<-sum(z[,k]*a)
c1<- -(t+3)*zz[k]^2
c2<- -(2*t+5)*zz[k]+(zy[k]-ak)^2
c3<- -(t+2)
if( ((c2^2-4*c1*c3) < 0) | (c2 < 0) ){
tau[k]<-0
} else {
tau[k]<-(-c2-sqrt(c2^2-4*c1*c3))/(2*c1)
}
lambda[k]<-tau[k]/s2
g[k]<-lambda[k]*(zy[k]-ak)-lambda[k]^2*zz[k]*(zy[k]-ak)/(lambda[k]*zz[k]+1)
d[k]<-lambda[k]*(zz[k]-lambda[k]*zz[k]^2/(lambda[k]*zz[k]+1))
v[k]<-tau[k]-tau[k]*d[k]
df<-df+d[k]
a<-a+z[,k]*g[k]
}
if((n-q-df) > 0){s2<-sum((y-a)^2)/(n-q-df)
}else{
s2<-sum((y-a)^2)/(n-q)
}
iter<-iter+1
err<-sum((g-g0)^2)/m
g0<-g
b0<-b
my.iter<-rbind(my.iter,cbind(iter,err,s2,t(b),t(g)))
}
my.parm<-data.frame(iter,err,s2,b,df)
names(my.parm)<-c("iter","error","s2","beta","df")
posv<-which(v!=0)
m<-length(g)
wald<-c(rep(0,m))
gg<-g[posv]
vv<-v[posv]
wald[posv]<-gg^2/vv
p<-pchisq(wald,1,lower.tail=FALSE)
my.blup<-data.frame(g,v,wald,p)
names(my.blup)<-c("gamma","vg","wald","p_wald")
var.beta<-NULL
for(i in 1:q){
var.beta<-c(var.beta,paste("beta",i,sep=""))
}
var.gamma<-NULL
for(k in 1:m){
var.gamma<-c(var.gamma,paste("gamma",k,sep=""))
}
var.names<-c(c("iter","error","s2"),var.beta,var.gamma)
my.iter<-data.frame(my.iter)
names(my.iter)<-var.names
out<-list(my.iter,my.parm,my.blup)
names(out)<-c("iteration","parm","blup")
return(out)
}
selection<-function(posx,genoname,svrad){
chose_peak<-c(posx[1])
order_now<-1
while(order_now<length(posx)){
order_now<-order_now+1
repeat_pos<-which( abs(chose_peak-as.numeric(posx[order_now]))<=(svrad) )
if(length(repeat_pos)>0){
if_condition<-length(which( genoname[chose_peak[repeat_pos],2]==as.numeric(genoname[posx[order_now],2]) ))==0
if(if_condition){
chose_peak<-c(chose_peak,posx[order_now])
}
}else{
chose_peak<-c(chose_peak,posx[order_now])
}
}
return(chose_peak)
}
selection2<-function(posx1,posx2,genoname,svrad){
chose_peak<-NULL
order_now<-0
while(order_now<length(posx1)){
order_now<-order_now+1
repeat_pos<-which( abs(posx2-as.numeric(posx1[order_now]))<=(svrad) )
if(length(repeat_pos)>0){
if_condition<-length(which( genoname[posx2[repeat_pos],2]==as.numeric(genoname[posx1[order_now],2]) ))==0
if(if_condition){
chose_peak<-c(chose_peak,posx1[order_now])
}
}else{
chose_peak<-c(chose_peak,posx1[order_now])
}
}
return(chose_peak)
}
ebayes_EM<-function(x,z,y,v0,v,tau,err_max){
n<-nrow(z);k<-ncol(z)
mk<-3; kn<-k/mk
v0<-as.numeric(v0)
v<-matrix(v,ncol=1)
if(abs(min(eigen(crossprod(x,x))$values))<1e-6){
try_b<-try({ b<-chol2inv(chol(crossprod(x,x)+diag(ncol(x))*1e-8))%*%crossprod(x,y) },silent=TRUE)
if('try-error' %in% class(try_b)){
try_c<-try({ b<-solve(crossprod(x,x))%*%crossprod(x,y) },silent=TRUE)
if('try-error' %in% class(try_c)){ b<-ginv(crossprod(x,x))%*%crossprod(x,y) }
}
}else{
try_b<-try({ b<-chol2inv(chol(crossprod(x,x)))%*%(crossprod(x,y)) },silent=TRUE)
if('try-error' %in% class(try_b)){
try_c<-try({ b<-solve(crossprod(x,x))%*%(crossprod(x,y)) },silent=TRUE)
if('try-error' %in% class(try_c)){ b<-ginv(crossprod(x,x))%*%(crossprod(x,y)) }
}
}
u<-matrix(0,nrow=mk,ncol=kn)# E(γk)
w<-matrix(0,nrow=mk,ncol=k)# var(γk)
s<-matrix(0,nrow=kn,ncol=1)# tr(var(γk))
vv<-matrix(0,n,n)
for(i in 1:kn){
nc<-( (i-1)*mk+1 ):(i*mk)
zz<-z[,nc]# Zk
vv=vv+tcrossprod(zz,zz)*v[i,]
}
vv<-vv+diag(n)*v0 # V
L<-matrix(c(0.5,-0.5,0,1,-0.5,-0.5),2,3)
rank_1<-qr(L)$rank
iter<-0;err<-1000;iter_max<-500;
omega<-0
while( (iter<iter_max)&&(err>err_max) ){
iter<-iter+1
v01<-v0# v01 is the initial σ^2
v1<-v# v1 is the initial σk^2
b1<-b# b1 is the initial β
#s1<-s
try_a<-try({ vi<-chol2inv(chol(vv)) },silent=TRUE)# solve(V)
if('try-error' %in% class(try_a)){
try_aa<-try({ vi<-solve(vv) },silent=TRUE)
if('try-error' %in% class(try_aa)){ vi<-ginv(vv) }
}
xtv<-crossprod(x,vi)# t(X)%*%solve(V)
if(ncol(x)==1){
b<-((xtv%*%x)^(-1))*(xtv%*%y)
}else{
if(abs(min(Mod(eigen(xtv%*%x)$values)))<1e-6){
try_b<-try({ b<-chol2inv(chol((xtv%*%x)+diag(ncol(x))*1e-8))%*%(xtv%*%y) },silent=TRUE)
if('try-error' %in% class(try_b)){
try_c<-try({ b<-solve((xtv%*%x))%*%(xtv%*%y) },silent=TRUE)
if('try-error' %in% class(try_c)){ b<-ginv((xtv%*%x))%*%(xtv%*%y) }
}
}else{
try_b<-try({ b<-chol2inv(chol(xtv%*%x))%*%(xtv%*%y) },silent=TRUE)
if('try-error' %in% class(try_b)){
try_c<-try({ b<-solve((xtv%*%x))%*%(xtv%*%y) },silent=TRUE)
if('try-error' %in% class(try_c)){ b<-ginv((xtv%*%x))%*%(xtv%*%y) }
}
}
}
r<-y-x%*%b# y-Xβ
ss<-matrix(0,nrow=n,ncol=1)
vv<-matrix(0,n,n)# new V
for(i in 1:kn){
nc<-( (i-1)*mk+1 ):(i*mk)
zz<-z[,nc]# Zk
zztvi<-crossprod(zz,vi)# t(Zk)%*%solve(V)
u[,i]<-v[i,]*zztvi%*%r# E(γk)
w[,nc]<-v[i,]*( diag(1,mk)-zztvi%*%zz*v[i,] )# var(γk)
s[i,]<-sum(diag(w[,nc]))# tr(var(γk))
v[i,]<-(crossprod(u[,i,drop=F],u[,i,drop=F])+s[i,]+omega)/(tau+2+mk)
ss<-ss+zz%*%u[,i,drop=F]
vv<-vv+tcrossprod(zz,zz)*v[i,]# ∑( Zk%*%t(Zk)*(σk^2) )
}
v0<-as.numeric(crossprod(r,(r-ss))/n)# new σ^2
vv<-vv+diag(n)*v0# new V
err<-(crossprod((b1-b),(b1-b))+(v01-v0)^2+crossprod((v1-v),(v1-v)))/(1+ncol(x)+kn)
beta<-t(b)
sigma2<-v0
}
u1<-matrix(0,nrow=2,ncol=kn)# main-E(γk)
p1<-matrix(1,kn,1)
# pvalue<-matrix(1,kn,1)
for(i in 1:kn){
nc<-( (i-1)*mk+1 ):(i*mk)
gammak<-u[,i,drop=F]
u1[,i]<-L%*%gammak
var_1<-L%*%w[,nc,drop=F]%*%t(L)
tr_1<-sum(diag(ginv(tcrossprod(L))%*%var_1))##tr[...]
dk1<-abs(rank_1-tr_1/v[i,])
p1[i,]<-1-pchisq( t(u1[,i,drop=F])%*%ginv(L%*%w[,nc]%*%t(L))%*%u1[,i,drop=F], 2)
}
return(list(b=b,u=u,u1=u1,sigma2=sigma2,p1=p1,iter=iter))
}
Zhou_sbl<-function(peak,Order,DesignMatrix,CodeMatrix,genoname,sbl_t,sbl_p,tau,err_max,fix_p,Sigma,SigmaK){
chr_n<-as.numeric(genoname[,2])
chr_n<-chr_n[!duplicated(chr_n)]
maxdistance<-0
for(i in 1:length(chr_n)){
maxdistance<-max(maxdistance, max(diff(as.matrix(genoname[which(genoname[,2]==i),3]))))
}
result_sblgwas<-sblgwas(x,y,DesignMatrix,sbl_t)
sbl_p_wald<-result_sblgwas$blup[,4]#sbl_par<-result_sblgwas$blup[,1]# sbl_p_wald<-p.adjust(sbl_p_wald, method = "bonferroni")
sbl_pos_order<-Order[order(sbl_p_wald)]
p_order<-sort(sbl_p_wald)
id1<-which( p_order< (1-pchisq(sbl_p*2*log(10),1)))
id2<-which((p_order>=(1-pchisq(sbl_p*2*log(10),1)))&(p_order<1))
if(length(id1)>0){
sbl_pos_order1<-sbl_pos_order[id1]
sbl_pos_order2<-sbl_pos_order[id2]
sbl_pos_order1<-sbl_pos_order1[!duplicated(sbl_pos_order1)]
sbl_pos_order2<-sbl_pos_order2[!duplicated(sbl_pos_order2)]
sort_order1<-sort(sbl_pos_order1)
result_emba<-ebayes_EM(x,multi_peak_new(CodeMatrix,sort_order1)$z,y,Sigma,SigmaK[sort_order1],tau,err_max)
emID1<-which(result_emba$p1<(1-pchisq(fix_p*2*log(10),2)))
emID2<-order(result_emba$p1)[seq(1,5,1)]
if(length(emID1)>5){
emID<-sort(emID1)
fix_pos<-sort_order1[emID]
if(maxdistance<=1){
sbl_pos_order1<-selection2(sbl_pos_order1,fix_pos,genoname,1)
sbl_pos_order1<-selection(sbl_pos_order1,genoname,1)
sbl_pos_order2<-selection2(sbl_pos_order2,fix_pos,genoname,1)
sbl_pos_order2<-selection2(sbl_pos_order2,sbl_pos_order1,genoname,1)
sbl_pos_order2<-selection(sbl_pos_order2,genoname,1)
}
}else{
emID<-sort(union(emID1,emID2))
fix_pos<-sort_order1[emID]
if(maxdistance<=1){
sbl_pos_order1<-selection(sbl_pos_order1,genoname,1)
sbl_pos_order2<-selection2(sbl_pos_order2,sbl_pos_order1,genoname,1)
sbl_pos_order2<-selection(sbl_pos_order2,genoname,1)
}
}
sbl_pos<-sort(c(sbl_pos_order1,sbl_pos_order2))# length(union(sbl_pos,fix_pos))
}else{
result_emba<-ebayes_EM(x,multi_peak_new(CodeMatrix,peak)$z,y,Sigma,SigmaK[peak],tau,err_max)
emID1<-which(result_emba$p1<(1-pchisq(fix_p*2*log(10),2)))
emID2<-order(result_emba$p1)[seq(1,5,1)]
if(length(emID1)>5){
emID<-sort(emID1)
fix_pos<-peak[emID]
if(maxdistance<=1){
sbl_pos_order1<-selection2(peak,fix_pos,genoname,1)
sbl_pos_order1<-selection(sbl_pos_order1,genoname,1)
}else{
sbl_pos_order1<-peak[-emID]
}
}else{
emID<-sort(union(emID1,emID2))
fix_pos<-peak[emID]
if(maxdistance<=1){
sbl_pos_order1<-selection2(peak,fix_pos,genoname,1)
sbl_pos_order1<-selection(sbl_pos_order1,genoname,1)
}else{
sbl_pos_order1<-peak[-emID]
}
}
sbl_pos<-sort(sbl_pos_order1)
}
sbl_fix_pos<-sort(fix_pos)
xin<-cbind(x,multi_peak_new(CodeMatrix,sbl_fix_pos)$z)
return(list(fix=sbl_fix_pos,pos=sbl_pos,xin=xin))
}
into_vector<-function(xmatrix){
xvector<-NULL
for(i in 1:dim(xmatrix)[2]){
xvector<-c(xvector,xmatrix[,i])
}
return(xvector)
}
multinormal<-function(y,mean,sigma){
pdf_value<-(1/sqrt(2*3.14159265358979323846*sigma))*exp(-(y-mean)*(y-mean)/(2*sigma));
return (pdf_value)
}
LRT_F2<-function(xxn,xxx,yn,par,mk){
# mk<-2+2*(en-1)# the number of genotypes at per locus
xn<-ncol(as.matrix(xxn))
nq<-ncol(xxx)
ns<-nrow(yn)
kn<-nq/mk
at1<-nq
ad<-if(at1>0.5) cbind(xxn,xxx) else xxn
if(length(par)==0){
bb<-if(abs(min(eigen(crossprod(ad,ad))$values))<1e-6) solve(crossprod(ad,ad)+diag(ncol(ad))*1e-8)%*%crossprod(ad,yn) else solve(crossprod(ad,ad))%*%crossprod(ad,yn)
}else{
bb<-par
}
vv<-as.numeric(crossprod((yn-ad%*%bb),(yn-ad%*%bb))/ns)##y-(X Z)t(β γ)
ll<-sum(log(abs(multinormal(yn,ad%*%bb,vv))))
lod<-matrix(0,kn,1)
if(at1>0.5){
for(m in 1:kn){
i1<-(((m-1)*mk+1):(m*mk));# i2<-((m-1)*mk+1):((m-1)*mk+2); i3<-((m-1)*mk+3):(m*mk)
m1<-seq(1,ncol(ad),1)[-c(i1+xn)];# m2<-sub[-c(i2+xn)]; m3<-sub[-c(i3+xn)]
ad1<-ad[,m1,drop=F]
if(length(par)==0){
bb1<-if(abs(min(eigen(crossprod(ad1,ad1))$values))<1e-6) solve(crossprod(ad1,ad1)+diag(ncol(ad1))*1e-8)%*%crossprod(ad1,yn) else solve(crossprod(ad1,ad1))%*%crossprod(ad1,yn)
}else{
bb1<-par[m1]
}
vv1<-as.numeric(crossprod((yn-ad1%*%bb1),(yn-ad1%*%bb1))/ns)
ll1<-sum(log(abs(multinormal(yn,ad1%*%bb1,vv1))))
lod[m,]<--2.0*(ll1-ll)/(2.0*log(10))
}
}
return(lod)
}
optimize_every_posx<-function(xpos,z,yn,genoname,rr,tau,err_max){
chr_n<-as.numeric(genoname[,2])
chr_n<-chr_n[!duplicated(chr_n)]
maxdistance<-0
for(i in 1:length(chr_n)){
maxdistance<-max(maxdistance, max(diff(as.matrix(genoname[which(genoname[,2]==i),3]))))
}
if(maxdistance<rr){
rr<-rr/maxdistance
ad<-cbind(x,multi_peak_new(z,xpos)$z)
if(abs(min(eigen(crossprod(ad,ad))$values))<1e-6){
try_a<-try({ bb<- chol2inv(chol(crossprod(ad,ad)+diag(ncol(ad))*1e-8))%*%crossprod(ad,yn) },silent=TRUE)
if('try-error' %in% class(try_a)){
try_aa<-try({bb<- solve(crossprod(ad,ad))%*%crossprod(ad,yn)},silent=TRUE)
if('try-error' %in% class(try_aa)){ bb<- ginv(crossprod(ad,ad))%*%crossprod(ad,yn)}
}
}else{
try_a<-try({ bb<-chol2inv(chol(crossprod(ad,ad)))%*%crossprod(ad,yn) },silent=TRUE)
if('try-error' %in% class(try_a)){
try_aa<-try({bb<- solve(crossprod(ad,ad))%*%crossprod(ad,yn)},silent=TRUE)
if('try-error' %in% class(try_aa)){ bb<- ginv(crossprod(ad,ad))%*%crossprod(ad,yn)}
}
}
par<-bb[-c(1:dim(x)[2])]
result_pos<-xpos
chr_sum<-NULL
for(i in 1:length(chr_n)){
chr_sum<-c(chr_sum,length(which(genoname[,2]==i)))
}
chr_sum<-c(0,chr_sum)
for(i in 1:length(xpos)){
yy<-y-multi_peak_new(z,xpos[-i])$z%*%par[-seq((i-1)*3+1,i*3,1)]
chr_now<-apply(genoname[,2,drop=F],2,as.numeric)[xpos[i]]
if(i==1){
left_rr<-min(xpos[i]-1-sum(chr_sum[seq(1,chr_now,1)]),rr)
}else{
if(genoname[xpos[i-1],2]==genoname[xpos[i],2]){
left_rr<-min(0.5*(xpos[i]-xpos[i-1]),rr)
}else{
left_rr<-min(xpos[i]-1-sum(chr_sum[seq(1,chr_now,1)]),rr)
}
}
if(i==length(xpos)){
right_rr<-min(sum(chr_sum[seq(1,chr_now+1,1)])-xpos[i],rr)
}else{
if(genoname[xpos[i+1],2]==genoname[xpos[i],2]){
right_rr<-min(0.5*(xpos[i+1]-xpos[i]),rr)
}else{
right_rr<-min(sum(chr_sum[seq(1,chr_now+1,1)])-xpos[i],rr)
}
}
left_rr<-floor(left_rr)
right_rr<-floor(right_rr)
least_pos<-xpos[-i]
now_pos<-c((xpos[i]-left_rr):(xpos[i]+right_rr))
try_x<-try({
result_embax<-ebayes_EM(x,multi_peak_new(z,now_pos)$z,yy,initial_sigma,initial_sigmak[now_pos],tau,err_max)
},silent=TRUE)
if('try-error' %in% class(try_x)){
max_pos<-now_pos[which.min(result_embax$p1)]
result_pos[i]<-max_pos# rm(result_embax)
}
}
}else{
result_pos<-xpos
}
return(result_pos)
}
multi_code_classic<-function(n_id,peak_id){
mk<-2# the number of genotypes at per locus
lengthpeak<-length(peak_id)
gen_A<-(Ax-Bx)[n_id,peak_id,drop=F]
gen_D<-Hx[n_id,peak_id,drop=F]
adgen3<-matrix(0,nrow=n,ncol=lengthpeak*mk)
adgen3[,seq(1,lengthpeak*mk,mk)]<-gen_A
adgen3[,seq(2,lengthpeak*mk,mk)]<-gen_D
return(adgen3)
}
effect_estimation<-function(n_id,xpos){
xmatrix<-multi_code_classic(n_id,xpos)
ad<-cbind(x,xmatrix)
bb<-if(abs(min(eigen(crossprod(ad,ad))$values))<1e-6) solve(crossprod(ad,ad)+diag(ncol(ad))*1e-8)%*%crossprod(ad,y) else solve(crossprod(ad,ad))%*%crossprod(ad,y)
sig_e2<-as.numeric(crossprod(y-ad%*%bb)/length(y))
bb<-bb[-1]
Into_matrix<-function(vector_x,row_n){
col_n<-length(vector_x)/row_n
result_x<-matrix(0,nrow=row_n,ncol=col_n)
for(i in 1:col_n){
result_x[,i]<-vector_x[((i-1)*row_n+1):(i*row_n)]
}
return(result_x)
}
effect_all<-t(Into_matrix(bb,2))
ef_Q<-effect_all
sig_Q<-0.5*(ef_Q[,1])^2+0.25*(ef_Q[,2])^2
sig_y<-max(var(y),(sum(sig_Q)+sig_e2))
pve<-(sig_Q/sig_y)*100
return(list(effect_all,pve,sig_Q,sig_e2,sig_y))
}
LeftRight_marker<-function(map,ChrPos){
LR_result<-NULL
for(i in 1:dim(ChrPos)[1]){
now_id<-which(as.numeric(map[,2])==as.numeric(ChrPos[i,1]))
now_pos<-as.numeric(ChrPos[i,2])
all_pos<-as.numeric(map[now_id,3])
if(now_pos<min(all_pos)){
left_mar<-""
}else{
left_id<-max(which(all_pos<=now_pos))
left_mar<-map[now_id,1][left_id]
}
if(now_pos>max(all_pos)){
right_mar<-""
}else{
right_id<-min(which(all_pos>=now_pos))
right_mar<-map[now_id,1][right_id]
}
LR_result<-rbind(LR_result,c(left_mar,right_mar))
}
return(LR_result)
}
######################################### input and basic setup #########################################
#*######### environment and phenotype #
pheno<-pheRaw[,NUM,drop=F]
yes_id<-which(pheno!="-")
y<-as.numeric(pheno[yes_id])
n<-length(y)
y<-as.matrix(y)
#*######### genotype #
# genRaw<-as.matrix(genRaw)
#*######### calculate Z matrix for K matrix #
mn<-dim(Ax0)[2]
Z<-matrix(0,nrow=n,ncol=mn*3)
Z[,seq(1,mn*3,3) ]<-Ax0[yes_id,]
Z[,seq(2,mn*3,3) ]<-Hx0[yes_id,]
Z[,seq(3,mn*3,3) ]<-Bx0[yes_id,]# dim(Z)
#*######### calculate K matrix #
K<-kinship_every(Z)
#*######### calculate Z matrix for the subsequent algorithm #
mn<-dim(Ax)[2]
Z<-matrix(0,nrow=n,ncol=mn*3)
Z[,seq(1,mn*3,3) ]<-Ax[yes_id,]
Z[,seq(2,mn*3,3) ]<-Hx[yes_id,]
Z[,seq(3,mn*3,3) ]<-Bx[yes_id,]# dim(Z)
#*######### X matrix; y=Xβ+Zγ+ξ+ε #
x<-matrix(1,nrow=n,ncol=1)#
if(is.null(yygg)==FALSE){
x<-cbind(x,yygg[yes_id,,drop=F])
}# dim(x)
if(det(crossprod(x,x))==0){
warning("X is singular")
}
ReduceDim_x<-TRUE
if(ReduceDim_x){
x_effect<-if(abs(min(eigen(crossprod(x,x))$values))<1e-6) solve(crossprod(x,x)+diag(ncol(x))*1e-8)%*%crossprod(x,y) else solve(crossprod(x,x))%*%crossprod(x,y)
yygg_effect<-x_effect[-1,1,drop=F]
y<-y-x[,-1,drop=F]%*%yygg_effect
x<-matrix(1,nrow=n,ncol=1)
}
#*######### name #
######################################### single_locus_scanning #########################################
#*######### single locus scanning #
p3d_result<-p3d_method(x,y,K)
if(Model=="Random"){
single_locus_model_result<-single_locus_model(x=x,y=y,zz=Z,lambda=p3d_result$lambda,uu=p3d_result$k_vector,vv=p3d_result$k_value,CLO=CLO)
initial_sigma<-mean(single_locus_model_result$sigma_2)
initial_sigmak<-single_locus_model_result$phi_k
}else if(Model=="Fixed"){
single_locus_model_result<-single_locus_model_Fixed(x=x,y=y,zz=Z,lambda=p3d_result$lambda,uu=p3d_result$k_vector,vv=p3d_result$k_value,CLO=CLO)
initial_sigma<-mean(single_locus_model_result$sigma_2)
initial_sigmak<-rep(1,mn)
}else{
warning("Please enter Model!")
}
#*######### pick the peaks #
peak_id<-peak_selection(single_locus_model_result$log1,genoname)# length(peak_id)
######################################### multi_locus_scanning #########################################
multi_locus_result1<-Zhou_lars(peak_id,Z,n) # length(multi_locus_result1$lar_pos)
multi_locus_result2<-Zhou_sbl(peak=multi_locus_result1$lar_pos,Order=multi_locus_result1$order,
DesignMatrix=multi_locus_result1$Matrix,CodeMatrix=Z,
genoname=genoname,
sbl_t=-1,sbl_p=3,tau=0,err_max=1e-6,fix_p=1.5,
Sigma=initial_sigma,SigmaK=initial_sigmak)# larpos=multi_locus_result1$lar_pos0,larbeta=multi_locus_result1$beta
emba_p<-3
result_emba<-ebayes_EM(multi_locus_result2$xin,multi_peak_new(Z,multi_locus_result2$pos)$z,
y,initial_sigma,initial_sigmak[multi_locus_result2$pos],
tau=-2,err_max=1e-8)
emba_pos0<-which(result_emba$p1<(1-pchisq(emba_p*2*log(10),2)))# cbind(result_emba$p1,result_emba$p2)
emba_all_pos<-sort(c(multi_locus_result2$fix,multi_locus_result2$pos[emba_pos0]))
result_emba1<-ebayes_EM(x,multi_peak_new(Z,emba_all_pos)$z,
y,initial_sigma,initial_sigmak[emba_all_pos],
tau=-2,err_max=1e-8)
emba1_pos<-emba_all_pos
emba1_par_E<-c(result_emba1$b)
emba1_par<-result_emba1$u
emba1_par<-into_vector(emba1_par)
multi_value4<-multi_peak_new(Z,emba1_pos)
z_M4<-multi_value4$z
order4<-multi_value4$order
LRT_lod<-LRT_F2(xxn=x,xxx=z_M4, yn=y,par=c(emba1_par_E,emba1_par),mk=1)# cbind(order4,LRT_lod)
lrt_pos<-order4[which(LRT_lod>2.5)]
lrt_pos<-lrt_pos[!duplicated(lrt_pos)]# length(lrt_pos)
######################################### Optimization and output #########################################
if(length(lrt_pos)>0){
if(CriDis<=4){
optimize_pos<-optimize_every_posx(xpos=lrt_pos,z=Z,yn=y,genoname,rr=CriDis,tau=0,err_max=1e-8)
}else{
optimize_pos<-lrt_pos
}
emba3_pos<-optimize_pos
# CriLOD<-3
lod_Q<-LRT_F2(xxn=x,xxx=multi_peak_new(Z,emba3_pos)$z, yn=y,par=NULL,mk=3)# cbind(emba3_pos,lod_Q)
lrt2_pos<-emba3_pos[which(lod_Q>=CriLOD)]# length(lrt2_pos)
last_lod<-lod_Q[which(lod_Q>=CriLOD)]
if(length(lrt2_pos)>0){
IC_data<-cbind(x,multi_peak_new(Z,lrt2_pos)$z)
lm_IC<-lm(y~IC_data-1)
AIC(lm_IC)
BIC(lm_IC)
LR_marker<-LeftRight_marker(map=mapRaw,ChrPos=genoname[lrt2_pos,2:3,drop=F])
result_all<-effect_estimation(yes_id,lrt2_pos)
var_e<-matrix("",nrow=length(lrt2_pos),ncol=1)
var_y<-matrix("",nrow=length(lrt2_pos),ncol=1)
var_e[1]<-round(result_all[[4]],4)
var_y[1]<-round(result_all[[5]],4)
data.all<-data.frame(genoname[lrt2_pos,2:3,drop=F],
round(result_all[[1]],4),round(last_lod,4),
LR_marker,
round(result_all[[3]],4),
round(result_all[[2]],4),
var_e,var_y)
# rep(AIC(lm_IC),length(lrt2_pos)),
# rep(BIC(lm_IC),length(lrt2_pos)))
rownames(data.all)<-NULL
colnames(data.all)<-c("Chr","Position(cM)","Effect.a","Effect.d","LOD",
"Left_marker","right_marker",
"Var_Genet","r2(%)",
"Var_Error","Var_Phen(total)")
reslt_list<-list(result=data.all,p_Q=single_locus_model_result$log1)
}else{
reslt_list<-NULL
warning("No QTL were detected!")
}
}else{
reslt_list<-NULL
warning("No QTL were detected!")
}
return(reslt_list)
}
#' The second step of Zhou method for multiple environments
#'
#' @param Model Random or fixed model.
#' @param pheRaw phenotype matrix.
#' @param genRaw genotype matrix.
#' @param mapRaw linkage map matrix.
#' @param CriLOD Critical LOD scores for significant QTL.
#' @param NUM The serial number of the trait to be analyzed.
#' @param EnvNum The number of environments for each trait is a vector.
#' @param yygg covariate matrix.
#' @param genoname linkage map matrix with pseudo markers inserted.
#' @param Ax0 AA genotype matrix.
#' @param Hx0 Aa genotype matrix.
#' @param Bx0 aa genotype matrix.
#' @param Ax AA genotype matrix with pseudo markers inserted.
#' @param Hx Aa genotype matrix with pseudo markers inserted.
#' @param Bx aa genotype matrix with pseudo markers inserted.
#' @param dir file storage path.
#' @param CriDis The distance of optimization.
#' @param CLO Number of CPUs.
#'
#' @return a list
#' @export
#'
#' @examples
#' data(F2data)
#' readraw<-Readdata(file=F2data,fileFormat="GCIM",
#' method="GCIM-QEI",filecov=NULL,
#' MCIMmap=NULL,MultiEnv=TRUE)
#' DoResult<-Dodata(fileFormat="GCIM",
#' Population="F2",method="GCIM-QEI",
#' Model="Random",readraw,MultiEnv=TRUE)
#' ZhouMatrices<-ZhouF(pheRaw=DoResult$pheRaw,
#' genRaw=DoResult$genRaw,mapRaw1=DoResult$mapRaw1,
#' WalkSpeed=1,CriLOD=3,dir=tempdir())
#' OutputZhou<-ZhouMethod(Model="Random",
#' pheRaw=DoResult$pheRaw,genRaw=DoResult$genRaw,
#' mapRaw=ZhouMatrices$mapRaw,CriLOD=3,NUM=1,
#' EnvNum=DoResult$EnvNum,yygg=DoResult$yygg1,
#' genoname=ZhouMatrices$genoname,
#' Ax0=ZhouMatrices$Ax0,Hx0=ZhouMatrices$Hx0,
#' Bx0=ZhouMatrices$Bx0,Ax=ZhouMatrices$Ax,
#' Hx=ZhouMatrices$Hx,Bx=ZhouMatrices$Bx,
#' dir=tempdir(),CriDis=5,CLO=2)
ZhouMethod<-function(Model=NULL,pheRaw=NULL,genRaw=NULL,mapRaw=NULL,CriLOD=NULL,NUM=NULL,EnvNum=NULL,yygg=NULL,genoname=NULL,
Ax0=NULL,Hx0=NULL,Bx0=NULL,Ax=NULL,Hx=NULL,Bx=NULL,dir=NULL,CriDis=NULL,CLO=NULL){# chr_n=NULL,
######################################### function #########################################
kinship_all<-function(coded_gen,n_id,en){
kinship_every<-function(coded_gen){
kk<-coded_gen%*%t(coded_gen)
kk<-kk/(dim(coded_gen)[2]/en/3)
return(kk)
}
k_all<-matrix(0,n,n)
sum_n<-0
for (i in 1:en){
row_col<-(sum_n+1):(sum_n+length(n_id[[i]]))
k_all[row_col,row_col]<-kinship_every(coded_gen[(sum_n+1):(sum_n+length(n_id[[i]])),])
sum_n<-sum_n+length(n_id[[i]])
}
return(k_all)
}
fixed_x<-function(n_id,en){
x0<-matrix(1,n,1)
col.E<-matrix(0,nrow = n,ncol = en-1)
col.E[(n-length(n_id[[en]])+1):n,]<--1
sum_n<-0
for(i in 1:(en-1)){
col.E[(sum_n+1):(sum_n+length(n_id[[i]])),i]<-1
sum_n<-sum_n+length(n_id[[i]])
}
x<-cbind(x0,col.E)
return(x)
}
name_function<-function(en){
effect_name<-NULL
for(i in 1:en){
effect_name<-c(effect_name,paste("Effect.aE",i,sep = ""))
effect_name<-c(effect_name,paste("Effect.dE",i,sep = ""))
}
effect_name<-c("Effect.a","Effect.d",effect_name)
return(effect_name)
}
p3d_method<-function(x,y,kinship){
# estimate the value of λ & λk kinship=K;
P3D<-function(x,y,kinship){
iter_p3d<-function(ga){
lambda<-exp(ga)
diag_element<-lambda*K_value+1
logH<-sum(log(diag_element))
RH_value<-1/(diag_element)
yuRHyu<-sum(yu*RH_value*yu)
yuRHxu<-matrix(0,nrow = 1,ncol = q)
xuRHxu<-matrix(0,nrow = q,ncol = q)
for(i in 1:q){
yuRHxu[,i]<-sum(yu*RH_value*xu[,i])
for(j in 1:q){
xuRHxu[i,j]<-sum(xu[,i]*RH_value*xu[,j])
}
}
logxuRHxu<-log(det(xuRHxu))
logyuPyu<-log(yuRHyu-yuRHxu%*%tcrossprod(solve(xuRHxu),yuRHxu))
output<- -0.5*(logH+logxuRHxu+(n-q)*logyuPyu)
return(-output)
}
q<-ncol(x)
n<-nrow(y)
eigenK<-eigen(kinship)
K_vector<-eigenK$vectors
K_value<-eigenK$values# rm(eigenK);gc()
xu<-crossprod(K_vector,x)
yu<-crossprod(K_vector,y)
ga0<-0
optimp3d<-optim(par=ga0,fn=iter_p3d,hessian = TRUE,method="L-BFGS-B",lower=-50,upper=10)
lambda<-exp(optimp3d$par)
return(list(lambda,K_vector,K_value))
}
q<-ncol(x)
value1<-P3D(x=x,y=y,kinship=kinship)
lambda<-value1[[1]]
uu<-as.matrix(value1[[2]])
vv<-value1[[3]]
# RH_value<-1/(vv*lambda+1)
rm(value1);gc()
return(list(lambda=lambda,k_vector=uu,k_value=vv))
}
single_locus_model<-function(x,y,zz,lambda,uu,vv,en,CLO){
# genotype effect transform to additive dominance(transformation matrix)
L_coefficient<-function(en){
e.seq<-rep(1,3*en)
a11<-matrix(0,1,3*en)
a12<-matrix(0,1,3*en)
a13<-matrix(0,1,3*en)
a.seq<-seq(1,3*en,by=3)
a11[a.seq]<-1
a12[a.seq+1]<-1
a13[a.seq+2]<-1
a123<-rbind(a11,a12,a13)
a1<-1/en*a11-1/(3*en)*e.seq
a2<-1/en*a12-1/(3*en)*e.seq
a3<-1/en*a13-1/(3*en)*e.seq
L1<-rbind(a1,a2,a3)
a4<-matrix(c(0.5,-0.5,0,1,-0.5,-0.5),2,3)
LL1<-a4%*%L1
L2<-matrix(0,en,3*en)
L3<-matrix(0,3*en,3*en)
c4<-matrix(0,2*en,3*en)
for(i in 1:en){
b11<-matrix(0,1,3*en)
b11[((i-1)*3+1):(i*3)]<-1
L2[i,]<-1/3*b11-1/(3*en)*e.seq
c4[((i-1)*2+1):(i*2),((i-1)*3+1):(i*3)]<-a4
for(i0 in 1:3){
seq.c<-(i-1)*3+i0
c0<-matrix(0,1,3*en)
c0[seq.c]<-1
L3[seq.c,]<--1/en*a123[i0,]-1/3*b11+1/(3*en)*e.seq+c0
}
}
LL3<-c4%*%L3
return(list(matrix_C1=L1, matrix_C2=L2, matrix_C3=L3 , LL1=LL1, LL3=LL3, L1=a4, L3=c4))
}
value2<-L_coefficient(en)# L coefficient matrix # rm(L_coefficient);gc()
# iteration function(R=zu%*%t(zu)*λk+D*λ+I);estimate λk
rqtl<-function(ga){
lambdak<-exp(ga)
Hk_term<-zuRHzu*lambdak+diag(1,en*3)
logHk<-sum(log(lambda*vv+1))+log(det(Hk_term))
RHk_term<-solve(Hk_term)*lambdak
yuRHkyu<-yuRHyu-yuRHzu%*%tcrossprod(RHk_term,yuRHzu)
yuRHkxu<-yuRHxu-yuRHzu%*%tcrossprod(RHk_term,xuRHzu)
xuRHkxu<-xuRHxu-xuRHzu%*%tcrossprod(RHk_term,xuRHzu)
yuPkyu<-yuRHkyu-yuRHkxu%*%tcrossprod(solve(xuRHkxu),yuRHkxu)
rqtl<- -0.5*( logHk + log(det(xuRHkxu)) + (n-q)*log(yuPkyu) )
return(-rqtl)
}
# estimate the value of γ
gamma_estimate<-function(lambdak){
Hk_term<-zuRHzu*lambdak+diag(1,en*3)
RHk_term<-solve(Hk_term)*lambdak
yuRHkyu<-yuRHyu-yuRHzu%*%tcrossprod(RHk_term,yuRHzu)
yuRHkxu<-yuRHxu-yuRHzu%*%tcrossprod(RHk_term,xuRHzu)
xuRHkxu<-xuRHxu-xuRHzu%*%tcrossprod(RHk_term,xuRHzu)
zuRHkxu<-t(xuRHzu)-zuRHzu%*%tcrossprod(RHk_term,xuRHzu)
zuRHkyu<-t(yuRHzu)-zuRHzu%*%tcrossprod(RHk_term,yuRHzu)
zuRHkzu<-zuRHzu-zuRHzu%*%RHk_term%*%zuRHzu
beta<-solve(xuRHkxu,t(yuRHkxu))
beta<-matrix(beta,ncol = 1)
yuPkyu<-yuRHkyu-yuRHkxu%*%tcrossprod(solve(xuRHkxu),yuRHkxu)
sigma<-yuPkyu/(n-q)
sigma<-as.numeric(sigma)
gamma<-lambdak*zuRHkyu-lambdak*zuRHkxu%*%beta
var<-abs((lambdak*diag(1,en*3)-lambdak*zuRHkzu*lambdak)*sigma)
stderr<-sqrt(diag(var))
phi.k<-sigma*lambdak # Φk
return(list(gamma,beta,var,phi.k,sigma,stderr))
}
# estimate the value of logp
logp_estimate<-function(L, g_k, pn){
var.1<-L%*%tcrossprod(var_ga,L)
Wk.1<-crossprod(g_k,ginv(var.1))%*%g_k
rank1<-qr(L)$rank
tr1<-sum(diag(ginv(tcrossprod(L,L))%*%var.1))
dk1<-rank1-tr1/phi_k
p1<-pgamma(Wk.1,shape=pn/2,scale=2*dk1,lower.tail = FALSE,log.p = FALSE)
log1<-pgamma(Wk.1,shape=pn/2,scale=2*dk1,lower.tail = FALSE,log.p = TRUE)
log1<--log1*log10(exp(1))
return(list(p=p1,log=log1))
}
RH_value<-1/(vv*lambda+1)
mn<-ncol(zz)/(en*3)
n<-nrow(y)
q<-ncol(x)
xu<-crossprod(uu,x)
yu<-crossprod(uu,y)
yuRHyu<-sum(yu*RH_value*yu)
yuRHxu<-matrix(0,nrow=1,ncol=q)
xuRHxu<-matrix(0,nrow=q,ncol=q)
for(i in 1:q){
yuRHxu[,i]<-sum(yu*RH_value*xu[,i])
for(j in 1:q){
xuRHxu[j,i]<-sum(xu[,j]*RH_value*xu[,i])
}
}
# yuRHyu<-crossprod(yu,diag(RH_value))%*%yu
# yuRHxu<-crossprod(yu,diag(RH_value))%*%xu
# xuRHxu<-crossprod(xu,diag(RH_value))%*%xu
if(is.null(CLO)==TRUE){
cl.cores <- detectCores()
if(cl.cores<=2){
cl.cores<-1
}else{
if(cl.cores>10){
cl.cores <-10
}else{
cl.cores <- detectCores()-1
}
}
}else{
cl.cores <-CLO
}
cl <- makeCluster(cl.cores)
registerDoParallel(cl)
MR_i<-numeric()
result<-foreach(MR_i=1:mn,.combine=rbind)%dopar%{
# library(MASS)
z<-zz[,((MR_i-1)*3*en+1):(MR_i*3*en),drop=F]
zu<-crossprod(uu,z)
xuRHzu<-matrix(0,nrow=q,ncol=3*en)
yuRHzu<-matrix(0,nrow=1,ncol=3*en)
zuRHzu<-matrix(0,nrow=3*en,ncol=3*en)
for(i in 1:(3*en)){
yuRHzu[,i]<-sum(yu*RH_value*zu[,i])
for(j in 1:q){
xuRHzu[j,i]<-sum(xu[,j]*RH_value*zu[,i])
}
for(j in 1:(3*en)){
zuRHzu[j,i]<-sum(zu[,j]*RH_value*zu[,i])
}
}
# xuRHzu<-crossprod(xu,diag(RH_value))%*%zu
# yuRHzu<-crossprod(yu,diag(RH_value))%*%zu
# zuRHzu<-crossprod(zu,diag(RH_value))%*%zu
ga<-0
par<-optim(par=ga,fn=rqtl,hessian = TRUE,method="L-BFGS-B",lower=-10,upper=10)
lambdak<-exp(par$par)
value3<-gamma_estimate(lambdak)
gamma_k<-value3[[1]]
beta<-value3[[2]]# estimate β
var_ga<-value3[[3]]
phi_k<-value3[[4]]
sigma_2<-value3[[5]]
# stderr<-value3[[6]]
gamma_k2<-gamma_k
gamma_main_k <-value2$matrix_C1%*%gamma_k2
gamma_env_k <-value2$matrix_C2%*%gamma_k2
gamma_inter_k<-value2$matrix_C3%*%gamma_k2
main_effect<-value2$L1%*%gamma_main_k
interact_effect<-value2$L3%*%gamma_inter_k
logvalue1<-logp_estimate(L=value2$LL1, g_k=main_effect, pn=2)# the -log(10)p of qtl effect
p1<-logvalue1$p
log1<-logvalue1$log
logvalue2<-logp_estimate(L=value2$matrix_C2, g_k=gamma_env_k, pn=en-1)# the -log(10)p of environment effect
p2<-logvalue2$p
log2<-logvalue2$log
logvalue3<-logp_estimate(L=value2$LL3, g_k=interact_effect, pn=2*(en-1))# the -log(10)p of interaction effect
p3<-logvalue3$p
log3<-logvalue3$log
result<-cbind(lambdak,matrix(beta,1,en),matrix(gamma_k,1,3*en),p1,p2,p3,log1,log2,log3,phi_k,sigma_2)
}
stopCluster(cl)
# rm(RH_value);gc()
lambda_k<-result[,1]
mu_beta<-result[,2:(1+en)]
gamma_all<-result[,(2+en):(1+4*en)]
p1<-result[,(2+4*en)]
p2<-result[,(3+4*en)]
p3<-result[,(4+4*en)]
log_p1<-result[,(5+4*en)]
log_p2<-result[,(6+4*en)]
log_p3<-result[,(7+4*en)]
phi_k<-result[,(8+4*en)]
sigma_2<-result[,(9+4*en)]
return(list(lambda_k=lambda_k, fixed=mu_beta, gamma=gamma_all,
p1=p1, p2=p2, p3=p3,
log1=log_p1, log2=log_p2, log3=log_p3,
phi_k=phi_k, sigma_2=sigma_2))
}
single_locus_model_Fixed<-function(x,y,zz,lambda,uu,vv,en,CLO){
# genotype effect transform to additive dominance(transformation matrix)
L_coefficient<-function(en){
e.seq<-rep(1,3*en)
a11<-matrix(0,1,3*en)
a12<-matrix(0,1,3*en)
a13<-matrix(0,1,3*en)
a.seq<-seq(1,3*en,by=3)
a11[a.seq]<-1
a12[a.seq+1]<-1
a13[a.seq+2]<-1
a123<-rbind(a11,a12,a13)
a1<-1/en*a11-1/(3*en)*e.seq
a2<-1/en*a12-1/(3*en)*e.seq
a3<-1/en*a13-1/(3*en)*e.seq
L1<-rbind(a1,a2,a3)
a4<-matrix(c(0.5,-0.5,0,1,-0.5,-0.5),2,3)
LL1<-a4%*%L1
L2<-matrix(0,en,3*en)
L3<-matrix(0,3*en,3*en)
c4<-matrix(0,2*en,3*en)
for(i in 1:en){
b11<-matrix(0,1,3*en)
b11[((i-1)*3+1):(i*3)]<-1
L2[i,]<-1/3*b11-1/(3*en)*e.seq
c4[((i-1)*2+1):(i*2),((i-1)*3+1):(i*3)]<-a4
for(i0 in 1:3){
seq.c<-(i-1)*3+i0
c0<-matrix(0,1,3*en)
c0[seq.c]<-1
L3[seq.c,]<--1/en*a123[i0,]-1/3*b11+1/(3*en)*e.seq+c0
}
}
LL3<-c4%*%L3
return(list(matrix_C1=L1, matrix_C2=L2, matrix_C3=L3 , LL1=LL1, LL3=LL3, L1=a4, L3=c4))
}
value2<-L_coefficient(en)# L coefficient matrix # rm(L_coefficient);gc()
logp_estimate<-function(L, g_k, pn){
var.1<-L%*%tcrossprod(var_ga,L)
Wk.1<-crossprod(g_k,ginv(var.1))%*%g_k
p1<-pchisq(Wk.1,df=pn,lower.tail = FALSE,log.p = FALSE)
log1<-pchisq(Wk.1,df=pn,lower.tail = FALSE,log.p = TRUE)
log1<--log1*log10(exp(1))
return(list(p=p1,log=log1))
}
yu<-crossprod(uu,y)
Hsolve<-1/(vv*lambda+1)
if(is.null(CLO)==TRUE){
cl.cores <- detectCores()
if(cl.cores<=2){
cl.cores<-1
}else{
if(cl.cores>10){
cl.cores <-10
}else{
cl.cores <- detectCores()-1
}
}
# cl.cores<-2
}else{
cl.cores <-CLO
}
cl <- makeCluster(cl.cores)
registerDoParallel(cl)
MF_i<-numeric()
result<-foreach(MF_i=1:mn,.combine=rbind)%dopar%{
# library(MASS)
z<-zz[,((MF_i-1)*3*en+1):(MF_i*3*en),drop=F]
uxz<-crossprod(uu,cbind(x,z))
x_gamma<-ginv(t(uxz)%*%diag(Hsolve)%*%uxz)%*%t(uxz)%*%diag(Hsolve)%*%yu
q<-qr(uxz)$rank
sig_e2<-as.numeric(t(yu-uxz%*%x_gamma)%*%diag(Hsolve)%*%(yu-uxz%*%x_gamma)/(dim(uxz)[1]-q))
x_gamma_covmatr<-sig_e2*ginv(t(uxz)%*%diag(Hsolve)%*%uxz)
gamma<-x_gamma[-c(1:dim(x)[2])]
var_ga<-x_gamma_covmatr[-c(1:dim(x)[2]),-c(1:dim(x)[2]),drop=F]
gamma_main_k <-value2$matrix_C1%*%gamma
gamma_env_k <-value2$matrix_C2%*%gamma
gamma_inter_k<-value2$matrix_C3%*%gamma
main_effect<-value2$L1%*%gamma_main_k
interact_effect<-value2$L3%*%gamma_inter_k
logvalue1<-logp_estimate(L=value2$LL1, g_k=main_effect, pn=2)#the -log(10)p of qtl effect
p1<-logvalue1$p
log1<-logvalue1$log
logvalue2<-logp_estimate(L=value2$matrix_C2, g_k=gamma_env_k, pn=en-1)#the -log(10)p of environment effect
p2<-logvalue2$p
log2<-logvalue2$log
logvalue3<-logp_estimate(L=value2$LL3, g_k=interact_effect, pn=2*(en-1))#the -log(10)p of interaction effect
p3<-logvalue3$p
log3<-logvalue3$log
result<-cbind(matrix(x_gamma[c(1:dim(x)[2])],1,en),matrix(gamma,1,3*en),p1,p2,p3,log1,log2,log3,sig_e2)
}
stopCluster(cl)
# rm(RH_value);gc()
mu_beta<-result[,1:dim(x)[2]]
gamma_all<-result[,(dim(x)[2]+1):(dim(x)[2]+3*en)]
p1<-result[,(dim(x)[2]+3*en+1)]
p2<-result[,(dim(x)[2]+3*en+2)]
p3<-result[,(dim(x)[2]+3*en+3)]
log_p1<-result[,(dim(x)[2]+3*en+4)]
log_p2<-result[,(dim(x)[2]+3*en+5)]
log_p3<-result[,(dim(x)[2]+3*en+6)]
sigma_2<-result[,(dim(x)[2]+3*en+7)]
return(list(fixed=mu_beta, gamma=gamma_all,
p1=p1, p2=p2, p3=p3,
log1=log_p1, log2=log_p2, log3=log_p3,
sigma_2=sigma_2))
}
peak_selection<-function(log_value,genoname){
peak_pos<-function(Lod.temp){
m<-length(Lod.temp)
optids<-vector(length=0)
if(Lod.temp[1]>Lod.temp[2]) optids<-append(optids,1)
for(j in 2:(m-1)){
if ((Lod.temp[j-1]<Lod.temp[j]) & (Lod.temp[j]>Lod.temp[j+1])) {
optids<-append(optids,j)
}
}
if(Lod.temp[m]>Lod.temp[m-1]) optids<-append(optids,m)
return(optids)
}
chr_all<-as.matrix(genoname[,2])
chr_kind<-chr_all[!duplicated(chr_all)]
id_pos<-NULL
for(jjj in chr_kind){
now_id<-which(chr_all%in%jjj)
id_pos<-c(id_pos,now_id[peak_pos(log_value[now_id])])
}
return(sort(id_pos))
}
multi_peak_new<-function(gencoded,peak_id,en){
enk<-3*en
mut_peak_id<-NULL
term1<-seq(1,3*en,1)
for(i in 1:length(peak_id)){
mut_peak_id<-c(mut_peak_id,(rep(peak_id[i],enk)-1)*enk+term1)
}
return(list(z=gencoded[,sort(mut_peak_id)],order=sort(rep(peak_id,enk))))
}
Zhou_lars<-function(peak,CodeMatrix,n,en){
multi_value<-multi_peak_new(CodeMatrix,peak,en)
DesignMatrix<-multi_value$z
order0<-multi_value$order# length(order0); length(peak_id)
if(length(peak)>=n){
lar_result<-lars(x=cbind(x[,-1,drop=F],DesignMatrix), y=y, type = "lar",trace = FALSE, normalize = TRUE, intercept = TRUE, eps = .Machine$double.eps, use.Gram=FALSE)
lar_result.0<-lar_result$beta[nrow(lar_result$beta),][-c(1:(dim(x)[2]-1))]
lar_pos0<-order0[which(lar_result.0!=0)]
lar_pos<-lar_pos0[!duplicated(lar_pos0)]# length(lar_pos)
multi_value1<-multi_peak_new(CodeMatrix,lar_pos,en)
DesignMatrix1<-multi_value1$z
order1<-multi_value1$order
}else{
lar_pos<-peak
DesignMatrix1<-DesignMatrix
order1<-order0
}# length(lar_pos)
return(list(lar_pos=lar_pos,Matrix=DesignMatrix1,order=order1))
}
sblgwas<-function(x,y,z,t,max.iter=200,min.err=1e-6){
x<-as.matrix(x)
y<-as.matrix(y)
z<-as.matrix(z)
n<-length(y)
q<-ncol(x)
m<-ncol(z)
b0<-solve(t(x)%*%x,tol=1e-50)%*%(t(x)%*%y)
s2<-sum((y-x%*%b0)^2)/(n-q)
b0<-matrix(0,q,1)
b<-b0
g0<-matrix(0,m,1)
g<-g0
lambda<-matrix(0,m,1)
tau<-g0
v<-g0
xx<-NULL
xy<-NULL
for(i in 1:q){
xx<-c(xx,sum(x[,i]^2))
xy<-c(xy,sum(x[,i]*y))
}
zz<-NULL
zy<-NULL
for(k in 1:m){
zz<-c(zz,sum(z[,k]^2))
zy<-c(zy,sum(z[,k]*y))
}
d<-numeric(m)
a<-matrix(0,n,1)
iter<-0
err<-1e8
my.iter<-NULL
while(iter < max.iter & err > min.err){
for(i in 1:q){
a<-a-x[,i]*b0[i]
ai<-sum(x[,i]*a)
b[i]<-(xy[i]-ai)/xx[i]
a<-a+x[,i]*b[i]
}
df<-0
for(k in 1:m){
a<-a-z[,k]*g0[k]
ak<-sum(z[,k]*a)
c1<- -(t+3)*zz[k]^2
c2<- -(2*t+5)*zz[k]+(zy[k]-ak)^2
c3<- -(t+2)
if( ((c2^2-4*c1*c3) < 0) | (c2 < 0) ){
tau[k]<-0
} else {
tau[k]<-(-c2-sqrt(c2^2-4*c1*c3))/(2*c1)
}
lambda[k]<-tau[k]/s2
g[k]<-lambda[k]*(zy[k]-ak)-lambda[k]^2*zz[k]*(zy[k]-ak)/(lambda[k]*zz[k]+1)
d[k]<-lambda[k]*(zz[k]-lambda[k]*zz[k]^2/(lambda[k]*zz[k]+1))
v[k]<-tau[k]-tau[k]*d[k]
df<-df+d[k]
a<-a+z[,k]*g[k]
}
if((n-q-df) > 0){s2<-sum((y-a)^2)/(n-q-df)
}else{
s2<-sum((y-a)^2)/(n-q)
}
iter<-iter+1
err<-sum((g-g0)^2)/m
g0<-g
b0<-b
my.iter<-rbind(my.iter,cbind(iter,err,s2,t(b),t(g)))
}
my.parm<-data.frame(iter,err,s2,b,df)
names(my.parm)<-c("iter","error","s2","beta","df")
posv<-which(v!=0)
m<-length(g)
wald<-c(rep(0,m))
gg<-g[posv]
vv<-v[posv]
wald[posv]<-gg^2/vv
p<-pchisq(wald,1,lower.tail=FALSE)
my.blup<-data.frame(g,v,wald,p)
names(my.blup)<-c("gamma","vg","wald","p_wald")
var.beta<-NULL
for(i in 1:q){
var.beta<-c(var.beta,paste("beta",i,sep=""))
}
var.gamma<-NULL
for(k in 1:m){
var.gamma<-c(var.gamma,paste("gamma",k,sep=""))
}
var.names<-c(c("iter","error","s2"),var.beta,var.gamma)
my.iter<-data.frame(my.iter)
names(my.iter)<-var.names
out<-list(my.iter,my.parm,my.blup)
names(out)<-c("iteration","parm","blup")
return(out)
}
selection<-function(posx,genoname,svrad){
chose_peak<-c(posx[1])
order_now<-1
while(order_now<length(posx)){
order_now<-order_now+1
repeat_pos<-which( abs(chose_peak-as.numeric(posx[order_now]))<=(svrad) )
if(length(repeat_pos)>0){
if_condition<-length(which( genoname[chose_peak[repeat_pos],2]==as.numeric(genoname[posx[order_now],2]) ))==0
if(if_condition){
chose_peak<-c(chose_peak,posx[order_now])
}
}else{
chose_peak<-c(chose_peak,posx[order_now])
}
}
return(chose_peak)
}
selection2<-function(posx1,posx2,genoname,svrad){
chose_peak<-NULL
order_now<-0
while(order_now<length(posx1)){
order_now<-order_now+1
repeat_pos<-which( abs(posx2-as.numeric(posx1[order_now]))<=(svrad) )
if(length(repeat_pos)>0){
if_condition<-length(which( genoname[posx2[repeat_pos],2]==as.numeric(genoname[posx1[order_now],2]) ))==0
if(if_condition){
chose_peak<-c(chose_peak,posx1[order_now])
}
}else{
chose_peak<-c(chose_peak,posx1[order_now])
}
}
return(chose_peak)
}
ebayes_EM<-function(x,z,y,en,v0,v,tau,err_max){
n<-nrow(z);k<-ncol(z)
mk<-3*en; kn<-k/mk
v0<-as.numeric(v0)
v<-matrix(v,ncol=1)
if(abs(min(eigen(crossprod(x,x))$values))<1e-6){
try_b<-try({ b<-chol2inv(chol(crossprod(x,x)+diag(ncol(x))*1e-8))%*%crossprod(x,y) },silent=TRUE)
if('try-error' %in% class(try_b)){
try_c<-try({ b<-solve(crossprod(x,x))%*%crossprod(x,y) },silent=TRUE)
if('try-error' %in% class(try_c)){ b<-ginv(crossprod(x,x))%*%crossprod(x,y) }
}
}else{
try_b<-try({ b<-chol2inv(chol(crossprod(x,x)))%*%(crossprod(x,y)) },silent=TRUE)
if('try-error' %in% class(try_b)){
try_c<-try({ b<-solve(crossprod(x,x))%*%(crossprod(x,y)) },silent=TRUE)
if('try-error' %in% class(try_c)){ b<-ginv(crossprod(x,x))%*%(crossprod(x,y)) }
}
}# β: fixed effect-rough estimate
u<-matrix(0,nrow=mk,ncol=kn)# E(γk)
w<-matrix(0,nrow=mk,ncol=k)# var(γk)
s<-matrix(0,nrow=kn,ncol=1)# tr(var(γk))
vv<-matrix(0,n,n)# V
for(i in 1:kn){
nc<-( (i-1)*mk+1 ):(i*mk)
zz<-z[,nc]# Zk
vv=vv+tcrossprod(zz,zz)*v[i,]# ∑( Zk%*%t(Zk)*(σk^2) )
}
vv<-vv+diag(n)*v0# V : the covariance matrix for y
# genotype effect transform to additive dominance(transformation matrix)
L_coefficient<-function(en){
e.seq<-rep(1,3*en)
a11<-matrix(0,1,3*en)
a12<-matrix(0,1,3*en)
a13<-matrix(0,1,3*en)
a.seq<-seq(1,3*en,by=3)
a11[a.seq]<-1
a12[a.seq+1]<-1
a13[a.seq+2]<-1
a123<-rbind(a11,a12,a13)
a1<-1/en*a11-1/(3*en)*e.seq
a2<-1/en*a12-1/(3*en)*e.seq
a3<-1/en*a13-1/(3*en)*e.seq
L1<-rbind(a1,a2,a3)
a4<-matrix(c(0.5,-0.5,0,1,-0.5,-0.5),2,3)
LL1<-a4%*%L1
L2<-matrix(0,en,3*en)
L3<-matrix(0,3*en,3*en)
c4<-matrix(0,2*en,3*en)
for(i in 1:en){
b11<-matrix(0,1,3*en)
b11[((i-1)*3+1):(i*3)]<-1
L2[i,]<-1/3*b11-1/(3*en)*e.seq
c4[((i-1)*2+1):(i*2),((i-1)*3+1):(i*3)]<-a4
for(i0 in 1:3){
seq.c<-(i-1)*3+i0
c0<-matrix(0,1,3*en)
c0[seq.c]<-1
L3[seq.c,]<--1/en*a123[i0,]-1/3*b11+1/(3*en)*e.seq+c0
}
}
LL3<-c4%*%L3
return(list(matrix_C1=L1, matrix_C2=L2, matrix_C3=L3 , LL1=LL1, LL3=LL3, L1=a4, L3=c4))
}
L<-L_coefficient(en)#L coefficient matrix # rm(L_coefficient);gc()
rank_1<-qr(L$LL1)$rank
rank_2<-qr(L$LL3)$rank
iter<-0;err<-1000;iter_max<-500;
omega<-0
while( (iter<iter_max)&&(err>err_max) ){
iter<-iter+1
v01<-v0# v01 is the initial σ^2
v1<-v# v1 is the initial σk^2
b1<-b# b1 is the initial β
#s1<-s
try_a<-try({ vi<-chol2inv(chol(vv)) },silent=TRUE)# solve(V)
if('try-error' %in% class(try_a)){
try_aa<-try({ vi<-solve(vv) },silent=TRUE)
if('try-error' %in% class(try_aa)){ vi<-ginv(vv) }
}
xtv<-crossprod(x,vi)# t(X)%*%solve(V)
if(ncol(x)==1){
b<-((xtv%*%x)^(-1))*(xtv%*%y)
}else{
if(abs(min(Mod(eigen(xtv%*%x)$values)))<1e-6){
try_b<-try({ b<-chol2inv(chol((xtv%*%x)+diag(ncol(x))*1e-8))%*%(xtv%*%y) },silent=TRUE)
if('try-error' %in% class(try_b)){
try_c<-try({ b<-solve((xtv%*%x))%*%(xtv%*%y) },silent=TRUE)
if('try-error' %in% class(try_c)){ b<-ginv((xtv%*%x))%*%(xtv%*%y) }
}
}else{
try_b<-try({ b<-chol2inv(chol(xtv%*%x))%*%(xtv%*%y) },silent=TRUE)
if('try-error' %in% class(try_b)){
try_c<-try({ b<-solve((xtv%*%x))%*%(xtv%*%y) },silent=TRUE)
if('try-error' %in% class(try_c)){ b<-ginv((xtv%*%x))%*%(xtv%*%y) }
}
}
}
r<-y-x%*%b# y-Xβ
ss<-matrix(0,nrow=n,ncol=1)
vv<-matrix(0,n,n)# new V
for(i in 1:kn){
nc<-( (i-1)*mk+1 ):(i*mk)
zz<-z[,nc]# Zk
zztvi<-crossprod(zz,vi)# t(Zk)%*%solve(V)
u[,i]<-v[i,]*zztvi%*%r# E(γk)
w[,nc]<-v[i,]*( diag(1,mk)-zztvi%*%zz*v[i,] )# var(γk)
s[i,]<-sum(diag(w[,nc]))# tr(var(γk))
v[i,]<-(crossprod(u[,i,drop=F],u[,i,drop=F])+s[i,]+omega)/(tau+2+mk)# new (σk^2)
ss<-ss+zz%*%u[,i,drop=F]
vv<-vv+tcrossprod(zz,zz)*v[i,]# ∑( Zk%*%t(Zk)*(σk^2) )
}
v0<-as.numeric(crossprod(r,(r-ss))/n)# new σ^2
vv<-vv+diag(n)*v0# new V
err<-(crossprod((b1-b),(b1-b))+(v01-v0)^2+crossprod((v1-v),(v1-v)))/(1+ncol(x)+kn)
beta<-t(b)
sigma2<-v0
}
u1<-matrix(0,nrow=2,ncol=kn)# main-E(γk)
u2<-matrix(0,nrow=2*en,ncol=kn)# interaction-E(γk)
p1<-matrix(1,kn,1)
p2<-matrix(1,kn,1)
# pvalue<-matrix(1,kn,1)
for(i in 1:kn){
nc<-( (i-1)*mk+1 ):(i*mk)
gammak<-u[,i,drop=F]
u1[,i]<-L$LL1%*%gammak
u2[,i]<-L$LL3%*%gammak
var_1<-L$LL1%*%w[,nc]%*%t(L$LL1)
tr_1<-sum(diag(ginv(tcrossprod(L$LL1))%*%var_1))
dk1<-abs(rank_1-tr_1/v[i,])
var_2<-L$LL3%*%w[,nc]%*%t(L$LL3)
tr_2<-sum(diag(ginv(tcrossprod(L$LL3))%*%var_2))
dk2<-abs(rank_2-tr_2/v[i,])
p1[i,]<-1-pchisq( t(u1[,i,drop=F])%*%ginv(L$LL1%*%w[,nc]%*%t(L$LL1))%*%u1[,i,drop=F], 2)
p2[i,]<-1-pchisq( t(u2[,i,drop=F])%*%ginv(L$LL3%*%w[,nc]%*%t(L$LL3))%*%u2[,i,drop=F], 2*(en-1))
}
return(list(b=b,u=u,u1=u1,u2=u2,sigma2=sigma2,p1=p1,p2=p2,iter=iter))
}
Zhou_sbl<-function(peak,Order,DesignMatrix,CodeMatrix,genoname,en,sbl_t,sbl_p,tau,err_max,fix_p,Sigma,SigmaK){
chr_n<-as.numeric(genoname[,2])
chr_n<-chr_n[!duplicated(chr_n)]
maxdistance<-0
for(i in 1:length(chr_n)){
maxdistance<-max(maxdistance, max(diff(as.matrix(genoname[which(genoname[,2]==i),3]))))
}
result_sblgwas<-sblgwas(x=x,y=y,z=DesignMatrix,t=sbl_t)
sbl_p_wald<-result_sblgwas$blup[,4]# sbl_par<-result_sblgwas$blup[,1]# sbl_p_wald<-p.adjust(sbl_p_wald, method = "bonferroni")
sbl_pos_order<-Order[order(sbl_p_wald)]
p_order<-sort(sbl_p_wald)
id1<-which( p_order< (1-pchisq(sbl_p*2*log(10),1)))
id2<-which((p_order>=(1-pchisq(sbl_p*2*log(10),1)))&(p_order<1))
if(length(id1)>0){
sbl_pos_order1<-sbl_pos_order[id1]
sbl_pos_order2<-sbl_pos_order[id2]
sbl_pos_order1<-sbl_pos_order1[!duplicated(sbl_pos_order1)]
sbl_pos_order2<-sbl_pos_order2[!duplicated(sbl_pos_order2)]
sort_order1<-sort(sbl_pos_order1)
result_emba<-ebayes_EM(x,multi_peak_new(CodeMatrix,sort_order1,en)$z,y,en,Sigma,SigmaK[sort_order1],tau,err_max)
emID1<-which((result_emba$p1<(1-pchisq(fix_p*2*log(10),2)))|(result_emba$p2<(1-pchisq(fix_p*2*log(10),2*(en-1)))))
emID2<-union(order(result_emba$p1)[seq(1,5,1)],order(result_emba$p2)[seq(1,5,1)])
if(length(emID1)>5){
emID<-sort(emID1)
fix_pos<-sort_order1[emID]
if(maxdistance<=1){
sbl_pos_order1<-selection2(sbl_pos_order1,fix_pos,genoname,1)
sbl_pos_order1<-selection(sbl_pos_order1,genoname,1)
sbl_pos_order2<-selection2(sbl_pos_order2,fix_pos,genoname,1)
sbl_pos_order2<-selection2(sbl_pos_order2,sbl_pos_order1,genoname,1)
sbl_pos_order2<-selection(sbl_pos_order2,genoname,1)
}
}else{
emID<-sort(union(emID1,emID2))
fix_pos<-sort_order1[emID]
if(maxdistance<=1){
sbl_pos_order1<-selection(sbl_pos_order1,genoname,1)
sbl_pos_order2<-selection2(sbl_pos_order2,sbl_pos_order1,genoname,1)
sbl_pos_order2<-selection(sbl_pos_order2,genoname,1)
}
}
sbl_pos<-sort(c(sbl_pos_order1,sbl_pos_order2))# length(union(sbl_pos,fix_pos))
}else{
result_emba<-ebayes_EM(x,multi_peak_new(CodeMatrix,peak,en)$z,y,en,Sigma,SigmaK[peak],tau,err_max)
emID1<-which((result_emba$p1<(1-pchisq(fix_p*2*log(10),2)))|(result_emba$p2<(1-pchisq(fix_p*2*log(10),2*(en-1)))))
emID2<-union(order(result_emba$p1)[seq(1,5,1)],order(result_emba$p2)[seq(1,5,1)])
if(length(emID1)>5){
emID<-sort(emID1)
fix_pos<-peak[emID]
if(maxdistance<=1){
sbl_pos_order1<-selection2(peak,fix_pos,genoname,1)
sbl_pos_order1<-selection(sbl_pos_order1,genoname,1)
}else{
sbl_pos_order1<-peak[-emID]
}
}else{
emID<-sort(union(emID1,emID2))
fix_pos<-peak[emID]
if(maxdistance<=1){
sbl_pos_order1<-selection2(peak,fix_pos,genoname,1)
sbl_pos_order1<-selection(sbl_pos_order1,genoname,1)
}else{
sbl_pos_order1<-peak[-emID]
}
}
sbl_pos<-sort(sbl_pos_order1)
}
sbl_fix_pos<-sort(fix_pos)
xin<-cbind(x,multi_peak_new(CodeMatrix,sbl_fix_pos,en)$z)
return(list(fix=sbl_fix_pos,pos=sbl_pos,xin=xin))
}
multi_code_classic<-function(peak,ee,n_id){
mk<-2+2*(en-1)# the number of genotypes at per locus
lengthpeak<-length(peak)
gen_A<-NULL
gen_D<-NULL
for(i in 1:en){
gen_A<-rbind(gen_A,(Ax-Bx)[n_id[[i]],peak,drop=F])
gen_D<-rbind(gen_D, Hx[n_id[[i]],peak,drop=F])
}
adgen3<-matrix(0,nrow=n,ncol=lengthpeak*mk)
adgen3[,seq(1,lengthpeak*mk,mk)]<-gen_A
adgen3[,seq(2,lengthpeak*mk,mk)]<-gen_D
for(i in 1:lengthpeak){
col.1<-seq( ((i-1)*mk+3),(i*mk),by=2 )
col.2<-seq( ((i-1)*mk+4),(i*mk),by=2 )
adgen3[,col.1]<-gen_A[,i]*ee
adgen3[,col.2]<-gen_D[,i]*ee
}
return(adgen3)
}
multinormal<-function(y,mean,sigma){
pdf_value<-(1/sqrt(2*3.14159265358979323846*sigma))*exp(-(y-mean)*(y-mean)/(2*sigma));
return (pdf_value)
}
LRT_F2<-function(xxn,xxx,yn,par,mk){
# mk<-2+2*(en-1)# the number of genotypes at per locus
xn<-ncol(as.matrix(xxn))
nq<-ncol(xxx)
ns<-nrow(yn)
kn<-nq/mk
at1<-nq
ad<-if(at1>0.5) cbind(xxn,xxx) else xxn
if(length(par)==0){
bb<-if(abs(min(eigen(crossprod(ad,ad))$values))<1e-6) solve(crossprod(ad,ad)+diag(ncol(ad))*1e-8)%*%crossprod(ad,yn) else solve(crossprod(ad,ad))%*%crossprod(ad,yn)
}else{
bb<-par
}
vv<-as.numeric(crossprod((yn-ad%*%bb),(yn-ad%*%bb))/ns)# y-(X Z)t(β γ)
ll<-sum(log(abs(multinormal(yn,ad%*%bb,vv))))
lod<-matrix(0,kn,1)
if(at1>0.5){
for(m in 1:kn){
i1<-(((m-1)*mk+1):(m*mk));# i2<-((m-1)*mk+1):((m-1)*mk+2); i3<-((m-1)*mk+3):(m*mk)
m1<-seq(1,ncol(ad),1)[-c(i1+xn)];# m2<-sub[-c(i2+xn)]; m3<-sub[-c(i3+xn)]
ad1<-ad[,m1,drop=F]
if(length(par)==0){
bb1<-if(abs(min(eigen(crossprod(ad1,ad1))$values))<1e-6) solve(crossprod(ad1,ad1)+diag(ncol(ad1))*1e-8)%*%crossprod(ad1,yn) else solve(crossprod(ad1,ad1))%*%crossprod(ad1,yn)
}else{
bb1<-par[m1]
}
vv1<-as.numeric(crossprod((yn-ad1%*%bb1),(yn-ad1%*%bb1))/ns)
ll1<-sum(log(abs(multinormal(yn,ad1%*%bb1,vv1))))
lod[m,]<--2.0*(ll1-ll)/(2.0*log(10))
}
}
return(lod)
}
optimize_every_posx<-function(xpos,z,yn,genoname,en,rr,tau,err_max){
chr_n<-as.numeric(genoname[,2])
chr_n<-chr_n[!duplicated(chr_n)]
maxdistance<-0
for(i in 1:length(chr_n)){
maxdistance<-max(maxdistance, max(diff(as.matrix(genoname[which(genoname[,2]==i),3]))))
}
if(maxdistance<rr){
rr<-rr/maxdistance
ad<-cbind(x,multi_peak_new(z,xpos,en)$z)
if(abs(min(eigen(crossprod(ad,ad))$values))<1e-6){
try_a<-try({ bb<- chol2inv(chol(crossprod(ad,ad)+diag(ncol(ad))*1e-8))%*%crossprod(ad,yn) },silent=TRUE)
if('try-error' %in% class(try_a)){
try_aa<-try({bb<- solve(crossprod(ad,ad))%*%crossprod(ad,yn)},silent=TRUE)
if('try-error' %in% class(try_aa)){ bb<- ginv(crossprod(ad,ad))%*%crossprod(ad,yn)}
}
}else{
try_a<-try({ bb<-chol2inv(chol(crossprod(ad,ad)))%*%crossprod(ad,yn) },silent=TRUE)
if('try-error' %in% class(try_a)){
try_aa<-try({bb<- solve(crossprod(ad,ad))%*%crossprod(ad,yn)},silent=TRUE)
if('try-error' %in% class(try_aa)){ bb<- ginv(crossprod(ad,ad))%*%crossprod(ad,yn)}
}
}
par<-bb[-c(1:dim(x)[2])]
result_pos<-xpos
chr_sum<-NULL
for(i in 1:length(chr_n)){
chr_sum<-c(chr_sum,length(which(genoname[,2]==i)))
}
chr_sum<-c(0,chr_sum)
for(i in 1:length(xpos)){
yy<-y-multi_peak_new(z,xpos[-i],en)$z%*%par[-seq((i-1)*3*en+1,i*3*en,1)]
chr_now<-apply(genoname[,2,drop=F],2,as.numeric)[xpos[i]]
if(i==1){
left_rr<-min(xpos[i]-1-sum(chr_sum[seq(1,chr_now,1)]),rr)
}else{
if(genoname[xpos[i-1],2]==genoname[xpos[i],2]){
left_rr<-min(0.5*(xpos[i]-xpos[i-1]),rr)
}else{
left_rr<-min(xpos[i]-1-sum(chr_sum[seq(1,chr_now,1)]),rr)
}
}
if(i==length(xpos)){
right_rr<-min(sum(chr_sum[seq(1,chr_now+1,1)])-xpos[i],rr)
}else{
if(genoname[xpos[i+1],2]==genoname[xpos[i],2]){
right_rr<-min(0.5*(xpos[i+1]-xpos[i]),rr)
}else{
right_rr<-min(sum(chr_sum[seq(1,chr_now+1,1)])-xpos[i],rr)
}
}
left_rr<-floor(left_rr)
right_rr<-floor(right_rr)
least_pos<-xpos[-i]
now_pos<-c((xpos[i]-left_rr):(xpos[i]+right_rr))
try({
result_emba2<-ebayes_EM(x,multi_peak_new(z,now_pos,en)$z,yy,en,initial_sigma,initial_sigmak[now_pos],tau,err_max)
maxp1<-min(result_emba2$p1)
maxp2<-min(result_emba2$p2)
max_pos1<-now_pos[which.min(result_emba2$p1)]
max_pos2<-now_pos[which.min(result_emba2$p2)]
max_pos1
max_pos2
if((maxp1!=1)|(maxp2!=1)){
if(max_pos1==max_pos2){
result_pos[i]<-max_pos1
}else{
result_pos[i]<-c(max_pos1,max_pos2)[which.min(c(maxp1,maxp2))]
}
}
})
}
}else{
result_pos<-xpos
}
return(result_pos)
}
effect_estimation<-function(n_id,xpos,lod,en,ee){
xmatrix<-multi_code_classic(xpos,ee,n_id)
na_id1<-which(lod[,2]==0)
na_id2<-which(lod[,3]==0)
mk<-2+2*(en-1)
na_id_Q<-sort(c((na_id1-1)*mk+1,(na_id1-1)*mk+2))
na_id_QE<-NULL
for(jj in 1:length(na_id2)){
na_id_QE<-c(na_id_QE,sort(rep((na_id2[jj]-1)*mk,2*(en-1))+seq(3,mk,1)))
}
xmatrix0<-xmatrix
xmatrix0<-cbind(x,xmatrix0)
xmatrix[,c(na_id_Q,na_id_QE)]<-0
ad<-cbind(x,xmatrix)
bb0<-if(abs(min(eigen(crossprod(ad,ad))$values))<1e-6) solve(crossprod(ad,ad)+diag(ncol(ad))*1e-8)%*%crossprod(ad,y) else solve(crossprod(ad,ad))%*%crossprod(ad,y)
bb<-bb0[-seq(1,dim(x)[2],1)]
sig_e2<-as.numeric(crossprod(y-xmatrix0%*%bb0)/length(y))
Into_matrix<-function(vector_x,row_n){
col_n<-length(vector_x)/row_n
result_x<-matrix(0,nrow=row_n,ncol=col_n)
for(i in 1:col_n){
result_x[,i]<-vector_x[((i-1)*row_n+1):(i*row_n)]
}
return(result_x)
}
effect_all_0<-t(Into_matrix(bb,2+2*(en-1)))
last_effect<-function(effect){
a<-effect[,seq(3,2+2*(en-1),2),drop=F]
d<-effect[,seq(4,2+2*(en-1),2),drop=F]
last_matrix<-matrix(0,nrow=dim(a)[1],ncol=2)
for(i in 1:(en-1)){
last_matrix[,1]<-last_matrix[,1]-a[,i]
last_matrix[,2]<-last_matrix[,2]-d[,i]
}
return(last_matrix)
}
effect_all<-cbind(effect_all_0,last_effect(effect_all_0))# dim(effect_all)
ef_Q<-effect_all[,c(1:2),drop=F]
ef_QE<-effect_all[,-c(1:2),drop=F]
sig_Q<-0.5*(ef_Q[,1])^2+0.25*(ef_Q[,2])^2
sig_QE<-matrix(0,nrow=dim(effect_all)[1],ncol=1)
for(i in 1:en){
sig_QE<-sig_QE+(1/en)*0.5*(ef_QE[,1+(i-1)*2])^2+(1/en)*0.5*(ef_QE[,2+(i-1)*2])^2
}
sig_y<-max(var(y),(sum(sig_Q)+sum(sig_QE)+sig_e2))
pve<-cbind((sig_Q/sig_y)*100,(sig_QE/sig_y)*100)
pve<-cbind(as.matrix(pve[,1]+pve[,2]),pve)
return(list(effect_all,pve,sig_Q,sig_QE,sig_e2,sig_y))
}
LeftRight_marker<-function(map,ChrPos){
LR_result<-NULL
for(i in 1:dim(ChrPos)[1]){
now_id<-which(as.numeric(map[,2])==as.numeric(ChrPos[i,1]))
now_pos<-as.numeric(ChrPos[i,2])
all_pos<-as.numeric(map[now_id,3])
if(now_pos<min(all_pos)){
left_mar<-""
}else{
left_id<-max(which(all_pos<=now_pos))
left_mar<-map[now_id,1][left_id]
}
if(now_pos>max(all_pos)){
right_mar<-""
}else{
right_id<-min(which(all_pos>=now_pos))
right_mar<-map[now_id,1][right_id]
}
LR_result<-rbind(LR_result,c(left_mar,right_mar))
}
return(LR_result)
}
######################################### input and basic setup #########################################
#*######### environment and phenotype #
en<-EnvNum[NUM]
sum_en<-sum(EnvNum[0:(NUM-1)])
pheno<-t(pheRaw[,(sum_en+1):(sum_en+en),drop=F])
# rownames(pheno)<-NULL
yes_id<-NULL
for(i in 1:dim(pheno)[1]){
yes_id[[i]]<-which(pheno[i,]!="-")
}
# pheno<-as.matrix(pheno)
y<-NULL;yall<-NULL
for(i in 1:dim(pheno)[1]){
y<-c(y,as.numeric(pheno[i,yes_id[[i]]]))
yall<-c(yall,pheno[i,])
}
n0<-dim(pheno)[2]# The number of individuals in each environment
n<-length(y)# The number of individuals in all environments after deleting the missing values
nn<-length(yall)# The number of individuals in all environments before deleting the missing values
y<-as.matrix(y) # rm(pheno);gc()
#*######### genotype #
# genRaw<-as.matrix(genRaw)
#*######### calculate Z matrix for K matrix #
mn<-dim(Ax0)[2]
Z<-matrix(0,nrow=n,ncol=mn*en*3)
sum_n<-0
for(j in 1:en){
Z[ (sum_n+1):(sum_n+length(yes_id[[j]])), seq((j-1)*3+1,mn*en*3,3*en) ]<-Ax0[yes_id[[j]],]
Z[ (sum_n+1):(sum_n+length(yes_id[[j]])), seq((j-1)*3+2,mn*en*3,3*en) ]<-Hx0[yes_id[[j]],]
Z[ (sum_n+1):(sum_n+length(yes_id[[j]])), seq((j-1)*3+3,mn*en*3,3*en) ]<-Bx0[yes_id[[j]],]
sum_n<-sum_n+length(yes_id[[j]])
}# dim(Z)
#*######### calculate K matrix #
K<-kinship_all(Z,yes_id,en)# rm(kinship_every,kinship_all,K0);gc()
#*######### calculate Z matrix for the subsequent algorithm #
mn<-dim(Ax)[2]
Z<-matrix(0,nrow=n,ncol=mn*en*3)
sum_n<-0
for(j in 1:en){
Z[ (sum_n+1):(sum_n+length(yes_id[[j]])), seq((j-1)*3+1,mn*en*3,3*en) ]<-Ax[yes_id[[j]],]
Z[ (sum_n+1):(sum_n+length(yes_id[[j]])), seq((j-1)*3+2,mn*en*3,3*en) ]<-Hx[yes_id[[j]],]
Z[ (sum_n+1):(sum_n+length(yes_id[[j]])), seq((j-1)*3+3,mn*en*3,3*en) ]<-Bx[yes_id[[j]],]
sum_n<-sum_n+length(yes_id[[j]])
}# dim(Z)
#*######### X matrix; y=Xβ+Zγ+ξ+ε #
x<-fixed_x(yes_id,en)
if(is.null(yygg)==FALSE){
yygg_x<-NULL
for(i in 1:en){
yygg_x<-rbind(yygg_x,yygg[yes_id[[i]],])
}# dim(yygg_x)
x<-cbind(x,yygg_x)
}# dim(x)
if(det(crossprod(x,x))==0){
warning("X is singular")
}
ReduceDim_x<-TRUE
if(ReduceDim_x){
x_effect<-if(abs(min(eigen(crossprod(x,x))$values))<1e-6) solve(crossprod(x,x)+diag(ncol(x))*1e-8)%*%crossprod(x,y) else solve(crossprod(x,x))%*%crossprod(x,y)
# fix_ef<-bb[seq(1,dim(x)[2],1)]
yygg_effect<-x_effect[-seq(1,en,1),1,drop=F]
y<-y-x[,-seq(1,en,1),drop=F]%*%yygg_effect
x<-fixed_x(yes_id,en)
}
#*######### name #
effect_name<-name_function(en)
######################################### single_locus_scanning #########################################
#*######### single locus scanning #
p3d_result<-p3d_method(x,y,K)
if(Model=="Random"){
single_locus_model_result<-single_locus_model(x=x,y=y,zz=Z,lambda=p3d_result$lambda,uu=p3d_result$k_vector,vv=p3d_result$k_value,en=en,CLO=CLO)
initial_sigma<-mean(single_locus_model_result$sigma_2)
initial_sigmak<-single_locus_model_result$phi_k# write.table(single_locus_model_result,file=paste("single_locus_model_result.csv",sep = ""),sep=",",row.names = F,col.names = T)
}else if(Model=="Fixed"){
single_locus_model_result<-single_locus_model_Fixed(x=x,y=y,zz=Z,lambda=p3d_result$lambda,uu=p3d_result$k_vector,vv=p3d_result$k_value,en=en,CLO=CLO)
initial_sigma<-mean(single_locus_model_result$sigma_2)
initial_sigmak<-rep(1,mn)
}else{
warning("Please enter Model!")
}
#*######### pick the peaks #
peak_id1<-peak_selection(single_locus_model_result$log1,genoname)
peak_id3<-peak_selection(single_locus_model_result$log3,genoname)
peak_id<-sort(union(peak_id1,peak_id3))# length(peak_id)
######################################### multi_locus_scanning #########################################
multi_locus_result1<-Zhou_lars(peak_id,Z,n,en)
# length(multi_locus_result1$lar_pos)
multi_locus_result2<-Zhou_sbl(peak=multi_locus_result1$lar_pos,Order=multi_locus_result1$order,
DesignMatrix=multi_locus_result1$Matrix,CodeMatrix=Z,
genoname=genoname,en=en,
sbl_t=-1,sbl_p=3,tau=0,err_max=1e-6,fix_p=1.5,
Sigma=initial_sigma,SigmaK=initial_sigmak)# larpos=multi_locus_result1$lar_pos0,larbeta=multi_locus_result1$beta
emba_p<-1.5
t1<-proc.time()
result_emba<-ebayes_EM(multi_locus_result2$xin,multi_peak_new(Z,multi_locus_result2$pos,en)$z,
y,en,initial_sigma,initial_sigmak[multi_locus_result2$pos],
tau=-2,err_max=1e-6)
t2<-proc.time()
(t2-t1)[3]
emba_pos0<-which((result_emba$p1<(1-pchisq(emba_p*2*log(10),2)))|(result_emba$p2<(1-pchisq(emba_p*2*log(10),2*(en-1)))))# cbind(result_emba$p1,result_emba$p2)
emba_all_pos<-sort(c(multi_locus_result2$fix,multi_locus_result2$pos[emba_pos0]))
if(length(multi_locus_result2$pos[emba_pos0])>0){
emba_pos_Q<-multi_locus_result2$pos[emba_pos0]
emba_pos_QE<-multi_locus_result2$pos[emba_pos0]
emba_pos<-multi_locus_result2$pos[emba_pos0]
if(length(emba_pos_Q)>0){
z_M4_Q <-multi_code_classic(peak=emba_pos_Q, ee=x[,seq(2,en),drop=F],n_id=yes_id)
order_Q<-sort(c(seq(1,dim(z_M4_Q)[2],2+2*(en-1)),seq(2,dim(z_M4_Q)[2],2+2*(en-1))))
z_M4_Q<-z_M4_Q[,order_Q,drop=F]
lod_Q<-LRT_F2(xxn=multi_locus_result2$xin,xxx=z_M4_Q, yn=y,par=NULL,mk=2)# cbind(emba_pos_Q,lod_Q)
lrt_pos_Q<-emba_pos_Q[which(lod_Q>2.5)]
emba_pos_Q[which(lod_Q>2.5)]# cbind(emba2_pos_Q[which(lod_Q>2.5)],lod_Q[which(lod_Q>2.5)])
}else{
lrt_pos_Q<-NULL
}
if(length(emba_pos_QE)>0){
z_M4_QE<-multi_code_classic(peak=emba_pos_QE,ee=x[,seq(2,en),drop=F],n_id=yes_id)
order_QE<-sort(c(seq(1,dim(z_M4_QE)[2],2+2*(en-1)),seq(2,dim(z_M4_QE)[2],2+2*(en-1))))
z_M4_QE<-z_M4_QE[,-order_QE,drop=F]
lod_QE<-LRT_F2(xxn=multi_locus_result2$xin,xxx=z_M4_QE, yn=y,par=NULL,mk=2*en-2)# cbind(emba_pos_QE,lod_QE)
lrt_pos_QE<-emba_pos_QE[which(lod_QE>2.5)]
emba_pos_QE[which(lod_QE>2.5)]# cbind(emba2_pos_QE[which(lod_QE>2.5)],lod_QE[which(lod_QE>2.5)])
}else{
lrt_pos_QE<-NULL
}
lrt_pos<-sort(union(lrt_pos_Q,lrt_pos_QE))
lrt_pos<-sort(union(multi_locus_result2$fix,lrt_pos))# length(lrt_pos)
}else{
lrt_pos<-multi_locus_result2$fix# length(lrt_pos)
}
######################################### Optimization and output #########################################
if(length(lrt_pos)>0){
optimize_pos<-optimize_every_posx(xpos=lrt_pos,z=Z,yn=y,genoname,en,rr=CriDis,tau=0,err_max=1e-6)
emba3_p<-3
result_emba3<-ebayes_EM(x,multi_peak_new(Z,optimize_pos,en)$z,y,en,initial_sigma,initial_sigmak[optimize_pos],tau=0,err_max = 1e-8)
emba3_pos1<-optimize_pos[which(result_emba3$p1<(1-pchisq(emba3_p*2*log(10),2)))]
emba3_pos2<-optimize_pos[which(result_emba3$p2<(1-pchisq(emba3_p*2*log(10),2*(en-1))))]
emba3_pos3<-optimize_pos[which((result_emba3$p1>=(1-pchisq(emba3_p*2*log(10),2)))&(result_emba3$p2>=(1-pchisq(emba3_p*2*log(10),2*(en-1)))))]
emba3_pos_Q<-sort(union(emba3_pos1,emba3_pos3))
emba3_pos_QE<-sort(union(emba3_pos2,emba3_pos3))
emba3_pos<-sort(union(emba3_pos_Q,emba3_pos_QE))
# CriLOD<-3
if(length(emba3_pos_Q)>0){
z_M5_Q<-multi_code_classic(emba3_pos_Q,x[,seq(2,en),drop=F],yes_id)
order_Q<-sort(c(seq(1,dim(z_M5_Q)[2],2+2*(en-1)),seq(2,dim(z_M5_Q)[2],2+2*(en-1))))
z_M5_Q<-z_M5_Q[,order_Q]
}else{
z_M5_Q<-NULL
}
if(length(emba3_pos_QE)>0){
z_M5_QE<-multi_code_classic(emba3_pos_QE,x[,seq(2,en),drop=F],yes_id)
order_QE<-sort(c(seq(1,dim(z_M5_QE)[2],2+2*(en-1)),seq(2,dim(z_M5_QE)[2],2+2*(en-1))))
z_M5_QE<-z_M5_QE[,-order_QE,drop=F]
}else{
z_M5_QE<-NULL
}
if(length(emba3_pos_Q)>0){
lod_Q<-LRT_F2(xxn=cbind(x,z_M5_QE),xxx=z_M5_Q, yn=y,par=NULL,mk=2)# cbind(emba3_pos_Q,lod_Q)
lrt_pos_Q<-emba3_pos_Q[which(lod_Q>=CriLOD)]
}else{
lrt_pos_Q<-NULL
}
if(length(emba3_pos_QE)>0){
lod_QE<-LRT_F2(xxn=cbind(x,z_M5_Q),xxx=z_M5_QE, yn=y,par=NULL,mk=2*en-2)# cbind(emba3_pos_QE,lod_QE)
lrt_pos_QE<-emba3_pos_QE[which(lod_QE>=CriLOD)]
}else{
lrt_pos_QE<-NULL
}
lrt2_pos<-sort(union(lrt_pos_Q,lrt_pos_QE))
if(length(lrt2_pos)>0){
last_lod<-matrix(0,nrow=length(lrt2_pos),ncol=3)
last_lod[which(lrt2_pos%in%lrt_pos_Q),2]<-lod_Q[which(lod_Q>=CriLOD)]
last_lod[which(lrt2_pos%in%lrt_pos_QE),3]<-lod_QE[which(lod_QE>=CriLOD)]
last_lod[,1]<-last_lod[,2]+last_lod[,3]
IC_data<-cbind(x,multi_peak_new(Z,lrt2_pos,en)$z)
lm_IC<-lm(y~IC_data-1)
AIC(lm_IC)
BIC(lm_IC)
if(length(lrt_pos_Q)>0){
zM_Q<-multi_code_classic(lrt_pos_Q,x[,seq(2,en),drop=F],yes_id)
order_Q<-sort(c(seq(1,dim(zM_Q)[2],2+2*(en-1)),seq(2,dim(zM_Q)[2],2+2*(en-1))))
zM_Q<-zM_Q[,order_Q,drop=F]
}else{
zM_Q<-NULL
}
if(length(lrt_pos_QE)>0){
zM_QE<-multi_code_classic(lrt_pos_QE,x[,seq(2,en),drop=F],yes_id)
order_QE<-sort(c(seq(1,dim(zM_QE)[2],2+2*(en-1)),seq(2,dim(zM_QE)[2],2+2*(en-1))))
zM_QE<-zM_QE[,-order_QE,drop=F]
}else{
zM_QE<-NULL
}
IC_data<-cbind(cbind(x,zM_Q),zM_QE)
lm_IC<-lm(y~IC_data-1)
AIC(lm_IC)
BIC(lm_IC)
LR_marker<-LeftRight_marker(map=mapRaw,ChrPos=genoname[lrt2_pos,2:3,drop=F])
result_all<-effect_estimation(n_id=yes_id,xpos=lrt2_pos,lod=last_lod,en,ee=x[,seq(2,en),drop=F])
var_e<-matrix("",nrow=length(lrt2_pos),ncol=1)
var_y<-matrix("",nrow=length(lrt2_pos),ncol=1)
var_e[1]<-round(result_all[[5]],4)
var_y[1]<-round(result_all[[6]],4)
data.all<-data.frame(genoname[lrt2_pos,2:3,drop=F],
round(result_all[[1]],4),round(last_lod,4),
LR_marker,
round(result_all[[3]]+result_all[[4]],4),
round(result_all[[3]],4),round(result_all[[4]],4),
round(result_all[[2]],4),
var_e,var_y)
# rep(AIC(lm_IC),length(lrt2_pos)),
# rep(BIC(lm_IC),length(lrt2_pos)))
rownames(data.all)<-NULL
colnames(data.all)<-c("Chr","Position(cM)",effect_name,
"LOD","LOD_QTL","LOD_QEI",
"Left_marker","right_marker",
"Var_Genet","Var_Genet_QTL","Var_Genet_QEI",
"r2(%)","r2_QTL(%)","r2_QEI(%)",
"Var_Error","Var_Phen(total)")
reslt_list<-list(result=data.all,p_Q=single_locus_model_result$log1,p_QE=single_locus_model_result$log3)
}else{
reslt_list<-NULL
warning("No QTL or QEI were detected!")
}
}else{
reslt_list<-NULL
warning("No QTL or QEI were detected!")
}
return(reslt_list)
}
|
rankall <- function(outcome,num="best"){
data = read.csv("outcome-of-care-measures.csv", colClasses="character")
data[,7] <- as.factor(data[,7])
statesH <- levels(data[,7])
rnkHospital <- matrix(NA,nrow=length(statesH),ncol=2)
colnames(rnkHospital) <- c("hospital","state")
rownames(rnkHospital) <- statesH
rnkHospital[,2] <- statesH
for(i in statesH){
if(outcome == "heart attack"){
StateHospital <- data[data[,7]==i,]
StateHospital[,11] <- as.numeric(StateHospital[,11])
bestSH <- StateHospital[order(StateHospital[,11], StateHospital[,2]),]
topsh <- bestSH[,c(2,11)]
topsh <- topsh[complete.cases(topsh),]
bestest <- cbind(topsh,Rank=c(1:nrow(topsh)))
names(bestest) <-c("Hospital.Name","Rate","Rank")
if(num=="best")
{rnkHospital[i,1] = bestest[1,1]}
else if(num =="worst")
{rnkHospital[i,1]=bestest[nrow(bestest),1]}
else
{rnkHospital[i,1]=bestest[num,1]}
}
else if(outcome == "heart failure"){
StateHospital <- data[data[,7]==i,]
StateHospital[,17] <- as.numeric(StateHospital[,17])
bestSH <- StateHospital[order(StateHospital[,17],StateHospital[,2]),]
topsh <- bestSH[,c(2,17)]
topsh <- topsh[complete.cases(topsh),]
bestest <- cbind(topsh,Rank=c(1:nrow(topsh)))
names(bestest) <-c("Hospital.Name","Rate","Rank")
if(num=="best")
{rnkHospital[i,1]=bestest[1,1]}
else if(num =="worst")
{rnkHospital[i,1]=bestest[nrow(bestest),1]}
else
{rnkHospital[i,1]=bestest[num,1]}
}
else if(outcome == "pneumonia"){
StateHospital <- data[data[,7]==i,]
StateHospital[,23] <- as.numeric(StateHospital[,23])
bestSH <- StateHospital[order(StateHospital[,23],StateHospital[,2]),]
topsh <- bestSH[,c(2,23)]
topsh <- topsh[complete.cases(topsh),]
bestest <- cbind(topsh,Rank=c(1:nrow(topsh)))
names(bestest) <-c("Hospital.Name","Rate","Rank")
if(num=="best")
{rnkHospital[i,1]=bestest[1,1]}
else if(num =="worst")
{rnkHospital[i,1]=bestest[nrow(bestest),1]}
else
{rnkHospital[i,1]=bestest[num,1]}
}
else
{stop("invalid outcome")}
}
y <- rnkHospital
y <- as.data.frame(y)
print(y)
} | /rankall.R | no_license | Rhyzpect/R-programming | R | false | false | 3,210 | r | rankall <- function(outcome,num="best"){
data = read.csv("outcome-of-care-measures.csv", colClasses="character")
data[,7] <- as.factor(data[,7])
statesH <- levels(data[,7])
rnkHospital <- matrix(NA,nrow=length(statesH),ncol=2)
colnames(rnkHospital) <- c("hospital","state")
rownames(rnkHospital) <- statesH
rnkHospital[,2] <- statesH
for(i in statesH){
if(outcome == "heart attack"){
StateHospital <- data[data[,7]==i,]
StateHospital[,11] <- as.numeric(StateHospital[,11])
bestSH <- StateHospital[order(StateHospital[,11], StateHospital[,2]),]
topsh <- bestSH[,c(2,11)]
topsh <- topsh[complete.cases(topsh),]
bestest <- cbind(topsh,Rank=c(1:nrow(topsh)))
names(bestest) <-c("Hospital.Name","Rate","Rank")
if(num=="best")
{rnkHospital[i,1] = bestest[1,1]}
else if(num =="worst")
{rnkHospital[i,1]=bestest[nrow(bestest),1]}
else
{rnkHospital[i,1]=bestest[num,1]}
}
else if(outcome == "heart failure"){
StateHospital <- data[data[,7]==i,]
StateHospital[,17] <- as.numeric(StateHospital[,17])
bestSH <- StateHospital[order(StateHospital[,17],StateHospital[,2]),]
topsh <- bestSH[,c(2,17)]
topsh <- topsh[complete.cases(topsh),]
bestest <- cbind(topsh,Rank=c(1:nrow(topsh)))
names(bestest) <-c("Hospital.Name","Rate","Rank")
if(num=="best")
{rnkHospital[i,1]=bestest[1,1]}
else if(num =="worst")
{rnkHospital[i,1]=bestest[nrow(bestest),1]}
else
{rnkHospital[i,1]=bestest[num,1]}
}
else if(outcome == "pneumonia"){
StateHospital <- data[data[,7]==i,]
StateHospital[,23] <- as.numeric(StateHospital[,23])
bestSH <- StateHospital[order(StateHospital[,23],StateHospital[,2]),]
topsh <- bestSH[,c(2,23)]
topsh <- topsh[complete.cases(topsh),]
bestest <- cbind(topsh,Rank=c(1:nrow(topsh)))
names(bestest) <-c("Hospital.Name","Rate","Rank")
if(num=="best")
{rnkHospital[i,1]=bestest[1,1]}
else if(num =="worst")
{rnkHospital[i,1]=bestest[nrow(bestest),1]}
else
{rnkHospital[i,1]=bestest[num,1]}
}
else
{stop("invalid outcome")}
}
y <- rnkHospital
y <- as.data.frame(y)
print(y)
} |
testlist <- list(lambda = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), logq = numeric(0), nu = numeric(0), tol = 0, ymax = 0)
result <- do.call(COMPoissonReg:::qcmp_cpp,testlist)
str(result) | /COMPoissonReg/inst/testfiles/qcmp_cpp/libFuzzer_qcmp_cpp/qcmp_cpp_valgrind_files/1612728719-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 296 | r | testlist <- list(lambda = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), logq = numeric(0), nu = numeric(0), tol = 0, ymax = 0)
result <- do.call(COMPoissonReg:::qcmp_cpp,testlist)
str(result) |
testlist <- list(Beta = 0, CVLinf = -1.37672047637255e-268, FM = 2.78530958780685e-307, L50 = 0, L95 = 0, LenBins = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 4305175L, nlen = -819047800L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) | /DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615830358-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 486 | r | testlist <- list(Beta = 0, CVLinf = -1.37672047637255e-268, FM = 2.78530958780685e-307, L50 = 0, L95 = 0, LenBins = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 4305175L, nlen = -819047800L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_handling.r
\name{aggr_data}
\alias{aggr_data}
\title{convert individual event information to aggregated information per network node}
\usage{
aggr_data(dat, from = NULL, cumsum = TRUE)
}
\arguments{
\item{dat}{\code{data.frame} with variables \code{'node'}, \code{'time'}, \code{'delay'}, events data with single events with count magnitude}
\item{from}{character in \code{\link{strftime}} format, e.g. \code{"2014-06-12 16:15"}, data is subsetted accordingly before aggregation}
\item{cumsum}{logical indicating whether data is aggregated by cumulative sum, default is \code{TRUE}}
}
\value{
\code{data.frame} of dimension \code{(TxK)}, where \code{T} is the number of observation times and \code{K} the number of network nodes. Thus, each row represents a snapshot of the spreading process at a specific observation time with the event magnitude observed at the network nodes. Rownames are observation times, colnames are node names.
}
\description{
convert individual event information to aggregated information per network node
}
\seealso{
Other data_handling:
\code{\link{read_DB_data}()}
}
\concept{data_handling}
| /man/aggr_data.Rd | no_license | cran/NetOrigin | R | false | true | 1,206 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_handling.r
\name{aggr_data}
\alias{aggr_data}
\title{convert individual event information to aggregated information per network node}
\usage{
aggr_data(dat, from = NULL, cumsum = TRUE)
}
\arguments{
\item{dat}{\code{data.frame} with variables \code{'node'}, \code{'time'}, \code{'delay'}, events data with single events with count magnitude}
\item{from}{character in \code{\link{strftime}} format, e.g. \code{"2014-06-12 16:15"}, data is subsetted accordingly before aggregation}
\item{cumsum}{logical indicating whether data is aggregated by cumulative sum, default is \code{TRUE}}
}
\value{
\code{data.frame} of dimension \code{(TxK)}, where \code{T} is the number of observation times and \code{K} the number of network nodes. Thus, each row represents a snapshot of the spreading process at a specific observation time with the event magnitude observed at the network nodes. Rownames are observation times, colnames are node names.
}
\description{
convert individual event information to aggregated information per network node
}
\seealso{
Other data_handling:
\code{\link{read_DB_data}()}
}
\concept{data_handling}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/well_row.R
\name{well_row}
\alias{well_row}
\title{Get row number from a well label}
\usage{
well_row(well)
}
\arguments{
\item{well}{A well label (e.g., "A4")}
}
\value{
Integer value for the corresponding row
}
\description{
TODO
}
\note{
This currently does not fully support 1536-well plates
}
\examples{
well_row("C6")
}
\seealso{
\code{\link{well_column}}
}
| /man/well_row.Rd | permissive | briandconnelly/microtiterr | R | false | true | 443 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/well_row.R
\name{well_row}
\alias{well_row}
\title{Get row number from a well label}
\usage{
well_row(well)
}
\arguments{
\item{well}{A well label (e.g., "A4")}
}
\value{
Integer value for the corresponding row
}
\description{
TODO
}
\note{
This currently does not fully support 1536-well plates
}
\examples{
well_row("C6")
}
\seealso{
\code{\link{well_column}}
}
|
tr1 <- function(x) sum(diag(x))
tr2 <- icomp:::tr
set.seed(1234)
x <- matrix(rnorm(30), 10)
stopifnot(all.equal(tr1(x), tr2(x)))
x <- t(x)
stopifnot(all.equal(tr1(x), tr2(x)))
| /tests/trace.R | no_license | wrathematics/icomp | R | false | false | 179 | r | tr1 <- function(x) sum(diag(x))
tr2 <- icomp:::tr
set.seed(1234)
x <- matrix(rnorm(30), 10)
stopifnot(all.equal(tr1(x), tr2(x)))
x <- t(x)
stopifnot(all.equal(tr1(x), tr2(x)))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/occCitation.R
\name{occCitation}
\alias{occCitation}
\title{Occurrence Citations}
\usage{
occCitation(x = NULL)
}
\arguments{
\item{x}{An object of class \code{\link{occCiteData}}}
}
\value{
A dataframe with citations information for occurrences
}
\description{
Harvests citations from GBIF for occurrence data
}
\examples{
myCitations <- occCitation(x = myoccCiteObject);
}
| /man/occCitation.Rd | no_license | cmerow/occCite | R | false | true | 454 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/occCitation.R
\name{occCitation}
\alias{occCitation}
\title{Occurrence Citations}
\usage{
occCitation(x = NULL)
}
\arguments{
\item{x}{An object of class \code{\link{occCiteData}}}
}
\value{
A dataframe with citations information for occurrences
}
\description{
Harvests citations from GBIF for occurrence data
}
\examples{
myCitations <- occCitation(x = myoccCiteObject);
}
|
####################################
### Methods to calculate LLs of
### OTU structures
### And then to plot them using the network package
###
### Jarrett Byrnes
### Last updated: 4/11/2013
###
### Changelog
### 4/11/2013 - added poisson LL for LikColDensity
####################################
library(bipartite)
library(network)
library(plyr)
library(parallel)
## Get the LL based on all sequences being assumed to have
## equal abundances within a group
LikColDensity <- function(acol, useLogLik=T, dist="binom"){
if(sum(acol)==0) return(0) #defining 0*log(0) as 0 as in Allesina and Pascual 2009
s <- sum(acol)
if(dist=="binom") LL <- dbinom(acol, size=s, prob=1/length(acol), log=useLogLik)
if(dist=="pois") LL <- dpois(acol, s/length(acol), log=useLogLik)
if(useLogLik) return(sum(LL))
prod(LL)
}
## Get the LL based on network structure
LikColNet <- function(acol, useLogLik=T){
if(sum(acol)==0) return(0) #defining 0*log(0) as 0 as in Allesina and Pascual 2009
acol <- as.numeric(acol>0)
s <- length(acol)
l <- sum(acol)
if(s==l) return(0) #perfect match, and if we're going with logLik, we'll get log(0) problems
p <- l/(s)
ret <- l*log(p) + (s-l)*log(1-p)
if(!useLogLik) ret <- p^l*(1-p)^(s-l)
return(ret)
}
## Take a OTU column and calculate the LL of the grouping structure
getLogLik <- function(adf, otuCol, ids=4:6, getNet=T, binary=T, mc.cores=2, dist="binom"){
otuCol <-adf[[otuCol]]
liks <- mclapply(unique(otuCol), function(x){
reducedDF <- adf[which(otuCol==x),]
LL <- 0
if(getNet) LL <- LL + sum(apply(reducedDF[,ids], 2, LikColNet))
if(!binary) LL <- LL + sum(apply(reducedDF[,ids], 2, LikColDensity, dist=dist))
LL
}, mc.cores=mc.cores)
liks <- as.vector(simplify2array(liks))
sum(liks)
}
# loc <- matrix(c(1,0,0,
# 1,0,0,
# 0,1,1,
# 0,1,1,
# 1,0,1), ncol=3, byrow=T)
# names(loc) = c("one", "two", "three")
# mdf <- data.frame(low=1:5, med = c(1,1,2,2,3), high=c(1,1,2,2,2))
# mdf <- cbind(mdf, loc)
### Uses the bipartite library to make a bipirtate graph
### although I'm note a fan of how it looks
netPlot <- function(adf, otuCol, ids=4:6, ...){
groupWeb <- ddply(adf, otuCol, function(x) colSums(x[,ids]))
rownames(groupWeb) <- groupWeb[,1]
groupWeb <- groupWeb[,-1]
plotweb(t(groupWeb), high.lablength=0,...)
}
#netPlot(mdf, "low")
#netPlot(mdf, "med")
#netPlot(mdf, "high")
### Uses the network library to make a network graph
### with the sites as the blue nodes. Can accept
### other arguments to plot.network
getOTUAbundMat <- function(adf, otuCol, ids=20:27) ddply(adf, otuCol, function(x) colSums(x[,ids]))
netplot2 <- function(adf, otuCol, ids=4:6, edge.scale=20,
setCol="site",
setSize="same",
seqCol = "red",
siteCol = rep("grey", length(ids)),
site.cex=1,
edge.col="black",
size.scale=2,
site.sides=50,
occur.cols=rainbow(length(ids)),
...){
groupWeb <- getOTUAbundMat(adf, otuCol)
groupWeb <- groupWeb[,-1]
zmat1 <- matrix(rep(0, length(ids)^2), ncol=length(ids))
g <- rbind(zmat1, as.matrix(groupWeb))
zempty <- matrix(rep(0, nrow(g)*nrow(groupWeb)), nrow=nrow(g))
g <- cbind(g, zempty)
n_plots <- rowSums(g>0)
abund <- rowSums(g)
g2 <- network(as.matrix(g))
vertex.col <- c(siteCol, rep(seqCol, nrow(groupWeb)))
vertex.cex <- c(rep(site.cex, length(ids)), rep(1, nrow(groupWeb)))
vertex.sides <- c(rep(site.sides,length(ids)), rep(50, nrow(groupWeb)))
if(setCol=="occurance"){
vertex.col <- c(siteCol, occur.cols[n_plots])
vertex.cex <- c(rep(site.cex, length(ids)), rep(1, nrow(groupWeb)))
vertex.sides <- c(3:(3+length(ids)), rep(50, nrow(groupWeb)))
}
if(setSize == "abundance") vertex.cex <- c(rep(site.cex, length(ids)), size.scale*abund/max(abund))
#note, I still don't like how edges work here...need to figure out a better scheme
plot(g2, vertex.col=vertex.col,
vertex.cex = vertex.cex,
vertex.sides = vertex.sides,
# edge.lwd=g/max(g)*edge.scale,
usearrows=FALSE,
edge.col=edge.col,
...)
}
| /getNetworkAIC.R | no_license | el-ee/otu-frontiers | R | false | false | 4,355 | r | ####################################
### Methods to calculate LLs of
### OTU structures
### And then to plot them using the network package
###
### Jarrett Byrnes
### Last updated: 4/11/2013
###
### Changelog
### 4/11/2013 - added poisson LL for LikColDensity
####################################
library(bipartite)
library(network)
library(plyr)
library(parallel)
## Get the LL based on all sequences being assumed to have
## equal abundances within a group
LikColDensity <- function(acol, useLogLik=T, dist="binom"){
if(sum(acol)==0) return(0) #defining 0*log(0) as 0 as in Allesina and Pascual 2009
s <- sum(acol)
if(dist=="binom") LL <- dbinom(acol, size=s, prob=1/length(acol), log=useLogLik)
if(dist=="pois") LL <- dpois(acol, s/length(acol), log=useLogLik)
if(useLogLik) return(sum(LL))
prod(LL)
}
## Get the LL based on network structure
LikColNet <- function(acol, useLogLik=T){
if(sum(acol)==0) return(0) #defining 0*log(0) as 0 as in Allesina and Pascual 2009
acol <- as.numeric(acol>0)
s <- length(acol)
l <- sum(acol)
if(s==l) return(0) #perfect match, and if we're going with logLik, we'll get log(0) problems
p <- l/(s)
ret <- l*log(p) + (s-l)*log(1-p)
if(!useLogLik) ret <- p^l*(1-p)^(s-l)
return(ret)
}
## Take a OTU column and calculate the LL of the grouping structure
getLogLik <- function(adf, otuCol, ids=4:6, getNet=T, binary=T, mc.cores=2, dist="binom"){
otuCol <-adf[[otuCol]]
liks <- mclapply(unique(otuCol), function(x){
reducedDF <- adf[which(otuCol==x),]
LL <- 0
if(getNet) LL <- LL + sum(apply(reducedDF[,ids], 2, LikColNet))
if(!binary) LL <- LL + sum(apply(reducedDF[,ids], 2, LikColDensity, dist=dist))
LL
}, mc.cores=mc.cores)
liks <- as.vector(simplify2array(liks))
sum(liks)
}
# loc <- matrix(c(1,0,0,
# 1,0,0,
# 0,1,1,
# 0,1,1,
# 1,0,1), ncol=3, byrow=T)
# names(loc) = c("one", "two", "three")
# mdf <- data.frame(low=1:5, med = c(1,1,2,2,3), high=c(1,1,2,2,2))
# mdf <- cbind(mdf, loc)
### Uses the bipartite library to make a bipirtate graph
### although I'm note a fan of how it looks
netPlot <- function(adf, otuCol, ids=4:6, ...){
groupWeb <- ddply(adf, otuCol, function(x) colSums(x[,ids]))
rownames(groupWeb) <- groupWeb[,1]
groupWeb <- groupWeb[,-1]
plotweb(t(groupWeb), high.lablength=0,...)
}
#netPlot(mdf, "low")
#netPlot(mdf, "med")
#netPlot(mdf, "high")
### Uses the network library to make a network graph
### with the sites as the blue nodes. Can accept
### other arguments to plot.network
getOTUAbundMat <- function(adf, otuCol, ids=20:27) ddply(adf, otuCol, function(x) colSums(x[,ids]))
netplot2 <- function(adf, otuCol, ids=4:6, edge.scale=20,
setCol="site",
setSize="same",
seqCol = "red",
siteCol = rep("grey", length(ids)),
site.cex=1,
edge.col="black",
size.scale=2,
site.sides=50,
occur.cols=rainbow(length(ids)),
...){
groupWeb <- getOTUAbundMat(adf, otuCol)
groupWeb <- groupWeb[,-1]
zmat1 <- matrix(rep(0, length(ids)^2), ncol=length(ids))
g <- rbind(zmat1, as.matrix(groupWeb))
zempty <- matrix(rep(0, nrow(g)*nrow(groupWeb)), nrow=nrow(g))
g <- cbind(g, zempty)
n_plots <- rowSums(g>0)
abund <- rowSums(g)
g2 <- network(as.matrix(g))
vertex.col <- c(siteCol, rep(seqCol, nrow(groupWeb)))
vertex.cex <- c(rep(site.cex, length(ids)), rep(1, nrow(groupWeb)))
vertex.sides <- c(rep(site.sides,length(ids)), rep(50, nrow(groupWeb)))
if(setCol=="occurance"){
vertex.col <- c(siteCol, occur.cols[n_plots])
vertex.cex <- c(rep(site.cex, length(ids)), rep(1, nrow(groupWeb)))
vertex.sides <- c(3:(3+length(ids)), rep(50, nrow(groupWeb)))
}
if(setSize == "abundance") vertex.cex <- c(rep(site.cex, length(ids)), size.scale*abund/max(abund))
#note, I still don't like how edges work here...need to figure out a better scheme
plot(g2, vertex.col=vertex.col,
vertex.cex = vertex.cex,
vertex.sides = vertex.sides,
# edge.lwd=g/max(g)*edge.scale,
usearrows=FALSE,
edge.col=edge.col,
...)
}
|
gustometerData <- function(fileName, timeStamps = FALSE) {
# Read in an electrogustometry file
# If fewer than two time stamps are read, the function returns empty
# Otherwise assumes that last two stamps delineate the data of interest
# i.e. there was a restart before the set that was used
# Output is dataframe of file contents with new column of 'Correct'
# If timeStamps is TRUE then the list of timestamps is returned
fileData <-readLines(fileName)
#verify that there are at least two time stamps - indicating completed data
searchS = 'Time: '
times = as.numeric(grepl(searchS,fileData))
numStamps <- sum(times)
if (numStamps <2) {
return()
}
startnend = which(times == 1)
if (startnend[2]-startnend[1]<18) {
return()
}
if (timeStamps) {
return(fileData[startnend])
}
if (numStamps > 2) {
startnend<-rev(rev(startnend)[1:2])
#this takes the last two elements of startnend
}
thisData <- read.table(
fileName,
header = FALSE,
sep = "\t",
col.names = c("Stim","nLev","Resp"),
nrows = startnend[2]-startnend[1]-1,
skip = startnend[1]
)
actStim <- c(6,6,6)
sideNum <- c(1,2,1)
rNum <- 2
lNum <- 3
levels(thisData$Resp) <- c("L", "R")
if (thisData$Stim[1] == thisData$Resp[1]) {
actStim[2] = 6
} else {actStim[2] = 8}
for (i in 4:length(thisData$Stim)){
if (thisData$Stim[i] == "R") {
sideNum[i] <- rNum
rNum <- rNum+1
} else {sideNum[i] <- lNum
lNum <- lNum+1}
if (thisData$Stim[i] == thisData$Stim[i-1]) {
actStim[i] = thisData$nLev[i-1]
} else if (thisData$Stim[i] == thisData$Stim[i-2]) {
actStim[i] = thisData$nLev[i-2]
} else if (thisData$Stim[i] == thisData$Stim[i-3]) {
actStim[i] = thisData$nLev[i-3]
} else if (thisData$Stim[i] == thisData$Stim[i-4]) {
actStim[i] = thisData$nLev[i-4] }
}
thisData$corr <- as.numeric(thisData$Stim == thisData$Resp)
thisData$track <- actStim
thisData$num <- sideNum
return(thisData)
} | /gustometerData.R | no_license | pd73/R_TSFB | R | false | false | 1,922 | r | gustometerData <- function(fileName, timeStamps = FALSE) {
# Read in an electrogustometry file
# If fewer than two time stamps are read, the function returns empty
# Otherwise assumes that last two stamps delineate the data of interest
# i.e. there was a restart before the set that was used
# Output is dataframe of file contents with new column of 'Correct'
# If timeStamps is TRUE then the list of timestamps is returned
fileData <-readLines(fileName)
#verify that there are at least two time stamps - indicating completed data
searchS = 'Time: '
times = as.numeric(grepl(searchS,fileData))
numStamps <- sum(times)
if (numStamps <2) {
return()
}
startnend = which(times == 1)
if (startnend[2]-startnend[1]<18) {
return()
}
if (timeStamps) {
return(fileData[startnend])
}
if (numStamps > 2) {
startnend<-rev(rev(startnend)[1:2])
#this takes the last two elements of startnend
}
thisData <- read.table(
fileName,
header = FALSE,
sep = "\t",
col.names = c("Stim","nLev","Resp"),
nrows = startnend[2]-startnend[1]-1,
skip = startnend[1]
)
actStim <- c(6,6,6)
sideNum <- c(1,2,1)
rNum <- 2
lNum <- 3
levels(thisData$Resp) <- c("L", "R")
if (thisData$Stim[1] == thisData$Resp[1]) {
actStim[2] = 6
} else {actStim[2] = 8}
for (i in 4:length(thisData$Stim)){
if (thisData$Stim[i] == "R") {
sideNum[i] <- rNum
rNum <- rNum+1
} else {sideNum[i] <- lNum
lNum <- lNum+1}
if (thisData$Stim[i] == thisData$Stim[i-1]) {
actStim[i] = thisData$nLev[i-1]
} else if (thisData$Stim[i] == thisData$Stim[i-2]) {
actStim[i] = thisData$nLev[i-2]
} else if (thisData$Stim[i] == thisData$Stim[i-3]) {
actStim[i] = thisData$nLev[i-3]
} else if (thisData$Stim[i] == thisData$Stim[i-4]) {
actStim[i] = thisData$nLev[i-4] }
}
thisData$corr <- as.numeric(thisData$Stim == thisData$Resp)
thisData$track <- actStim
thisData$num <- sideNum
return(thisData)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DerivedValues.R
\name{DerivedValues}
\alias{DerivedValues}
\title{Function to post-process MCMC results of the year-specific sampler}
\usage{
DerivedValues(N, gamma, phi, psi, M, extrap = 0)
}
\arguments{
\item{N}{matrix containing the posteriors for abundance}
\item{gamma}{matrix or vector containing the posteriors for gamma(s)}
\item{phi}{matrix or vector containing the posteriors for phi(s)}
\item{psi}{vector containing the posterior for psi}
\item{M}{an integer indicating the i dimension of the augmented data}
\item{extrap}{an optional integer indicating how many primary periods to extrapolate beyond the
observed primary periods for EN. These are abundance projections from the population growth model}
}
\value{
a list containing realized and expected population growth rates and population sizes,
}
\description{
This function derives realized and expected population growth rates and population sizes
from the inputted MCMC chains.
}
\examples{
\dontrun{
#Run a model
t=3
N=50
p0=0.5
lam0=-log(1-p0)
sigma=0.750
phi=0.7
gamma=0.3
buff=3
X=list(expand.grid(4:9,4:9),expand.grid(4:9,4:9),expand.grid(4:9,4:9)) #Note we need 3 trap objects stuffed into the trap list
K=c(10,10,10) #and 3 numbers of occasions within primary period
M=225
data=simOpenSCR(N=N,phi=phi,gamma=gamma,lam0=lam0,sigma=sigma,K=K,X=X,t=t,M=M,buff=buff,obstype="bernoulli")
inits=list(lam0=lam0,sigma=sigma,gamma=gamma,phi=phi,psi=N[1]/M)
niter=1000
nburn=0
nthin=1
proppars=list(lam0=0.025,sigma=0.025,gamma=0.1,phi=0.1,s1x=0.2,s1y=0.2,propz=c(30,30)) #Need 1 more propz
out=mcmc.OpenSCR(data,niter=niter,nburn=nburn, nthin=nthin, M =M, inits=inits,proppars=proppars,Rcpp=TRUE)
DV=DerivedValues(out$out[,5:7],out$out[,3],out$out[,4],out$out[,8],M)
str(DV)
library(coda)
plot(mcmc(DV$EN))
plot(mcmc(DV$lambda))
plot(mcmc(DV$Elambda))
#should discard burnin
}
}
\author{
Ben Augustine
}
| /man/DerivedValues.Rd | no_license | benaug/OpenPopSCR | R | false | true | 1,953 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DerivedValues.R
\name{DerivedValues}
\alias{DerivedValues}
\title{Function to post-process MCMC results of the year-specific sampler}
\usage{
DerivedValues(N, gamma, phi, psi, M, extrap = 0)
}
\arguments{
\item{N}{matrix containing the posteriors for abundance}
\item{gamma}{matrix or vector containing the posteriors for gamma(s)}
\item{phi}{matrix or vector containing the posteriors for phi(s)}
\item{psi}{vector containing the posterior for psi}
\item{M}{an integer indicating the i dimension of the augmented data}
\item{extrap}{an optional integer indicating how many primary periods to extrapolate beyond the
observed primary periods for EN. These are abundance projections from the population growth model}
}
\value{
a list containing realized and expected population growth rates and population sizes,
}
\description{
This function derives realized and expected population growth rates and population sizes
from the inputted MCMC chains.
}
\examples{
\dontrun{
#Run a model
t=3
N=50
p0=0.5
lam0=-log(1-p0)
sigma=0.750
phi=0.7
gamma=0.3
buff=3
X=list(expand.grid(4:9,4:9),expand.grid(4:9,4:9),expand.grid(4:9,4:9)) #Note we need 3 trap objects stuffed into the trap list
K=c(10,10,10) #and 3 numbers of occasions within primary period
M=225
data=simOpenSCR(N=N,phi=phi,gamma=gamma,lam0=lam0,sigma=sigma,K=K,X=X,t=t,M=M,buff=buff,obstype="bernoulli")
inits=list(lam0=lam0,sigma=sigma,gamma=gamma,phi=phi,psi=N[1]/M)
niter=1000
nburn=0
nthin=1
proppars=list(lam0=0.025,sigma=0.025,gamma=0.1,phi=0.1,s1x=0.2,s1y=0.2,propz=c(30,30)) #Need 1 more propz
out=mcmc.OpenSCR(data,niter=niter,nburn=nburn, nthin=nthin, M =M, inits=inits,proppars=proppars,Rcpp=TRUE)
DV=DerivedValues(out$out[,5:7],out$out[,3],out$out[,4],out$out[,8],M)
str(DV)
library(coda)
plot(mcmc(DV$EN))
plot(mcmc(DV$lambda))
plot(mcmc(DV$Elambda))
#should discard burnin
}
}
\author{
Ben Augustine
}
|
#' InternetProvider
#'
#' @export
#' @keywords internal
#' @param locale (character) the locale to use. See
#' `internet_provider_locales` for locales supported (default: en_US)
#' @details
#' **Methods**
#'
#' - `to_ascii(x)` - convert to ascii
#' - `email(domain)` - get an email address
#' - `safe_email()` - get a safe email address
#' - `free_email()` - free email address
#' - `company_email()` - company email address
#' - `ascii_email()` - ascii email address
#' - `ascii_safe_email()` - safe ascii email address
#' - `ascii_free_email()` - an ascii free email address
#' - `ascii_company_email()` - ascii company email address
#' - `user_name()` - a user name
#' - `tld()` - a tld
#' - `free_email_domain()` - free email domain
#' - `url(schemes)` - get a url
#' - `domain_name(levels)` - get a domain name
#' - `domain_word()` - get a domain word
#' - `ipv4()` - get a random ipv4 address
#' - `mac_address()` - get a mac address
#' - `uri_page()` - get a uri page
#' - `uri_path()` - get a uri path
#' - `uri_extension()` - get a uri extension
#' - `image_url(width, height)` - get an image url
#'
#' @format NULL
#' @usage NULL
#'
#' @note Note that if a locale you set doesn't have a locale specific set
#' of data for [PersonProvider] or [CompanyProvider] we fall back to
#' `en_US`
#'
#' @examples
#' (x <- InternetProvider$new())
#' x$locale
#'
#' # uri/url/tld/etc.
#' x$tld()
#' x$slug()
#' x$domain_word()
#' x$domain_name()
#' x$domain_name(levels = 2)
#' x$domain_name(levels = 3)
#' x$domain_name(levels = 10)
#' ## url's
#' x$url()
#' x$url(schemes = c('hbbp', 'hggp'))
#' x$image_url()
#' ## uri's
#' x$uri()
#' x$uri_page()
#' x$uri_extension()
#' x$uri_path()
#' x$uri_path(deep = 1)
#' x$uri_path(deep = 2)
#' x$uri_path(deep = 3)
#' x$uri_path(deep = 4)
#'
#' # user name
#' x$user_name()
#'
#' # emails
#' x$email()
#' x$safe_email()
#' x$free_email()
#' x$company_email()
#' x$free_email_domain()
#' x$ascii_email()
#' x$ascii_safe_email()
#' x$ascii_free_email()
#' x$ascii_company_email()
#'
#' # addresses, mac, ipv4
#' x$mac_address()
#' if (requireNamespace("iptools", quietly=TRUE)) {
#' x$ipv4()
#' }
#'
#' # different locales
#' (x <- InternetProvider$new(locale = "en_AU"))
#' x$locale
#' x$tld()
#' x$email()
#' x$free_email_domain()
#'
#' (x <- InternetProvider$new(locale = "de_DE"))
#' x$locale
#' x$tld()
#' x$uri()
#' x$email()
#' x$ascii_email()
#'
#' (x <- InternetProvider$new(locale = "bg_BG"))
#' x$locale
#' x$tld()
#' x$uri()
#' x$url()
#' x$user_name()
#' x$email()
#' x$ascii_email()
#'
#' (x <- InternetProvider$new(locale = "cs_CZ"))
#' x$url()
#' x$user_name()
#' x$email()
#'
#' (x <- InternetProvider$new(locale = "fa_IR"))
#' x$url()
#'
#' (x <- InternetProvider$new(locale = "fr_FR"))
#' x$url()
#' x$user_name()
#' x$email()
#'
#' (x <- InternetProvider$new(locale = "hr_HR"))
#' x$url()
#' x$user_name()
#' x$email()
#'
#' # convert a string to ascii with stringi pkg
#' if (requireNamespace("stringi", quietly=TRUE)) {
#' x$to_ascii("anï")
#' }
InternetProvider <- R6::R6Class(
inherit = BaseProvider,
'InternetProvider',
public = list(
locale = NULL,
safe_email_tlds = c('org', 'com', 'net'),
free_email_domains = c('gmail.com', 'yahoo.com', 'hotmail.com'),
tlds = c(
'com', 'com', 'com', 'com', 'com', 'com', 'biz', 'info', 'net', 'org'
),
uri_pages = c(
'index', 'home', 'search', 'main', 'post', 'homepage', 'category',
'register', 'login', 'faq', 'about', 'terms', 'privacy', 'author'
),
uri_paths = c(
'app', 'main', 'wp-content', 'search', 'category', 'tag', 'categories',
'tags', 'blog', 'posts', 'list', 'explore'
),
uri_extensions = c(
'.html', '.html', '.html', '.htm', '.htm', '.php', '.php', '.jsp',
'.asp'
),
user_name_formats = c(
'{{last_names}}.{{first_names}}',
'{{first_names}}.{{last_names}}',
'{{first_names}}##',
'?{{last_names}}'
),
email_formats = c('{{user_name}}@{{domain_name}}',
'{{user_name}}@{{free_email_domain}}'),
url_formats = c('www.{{domain_name}}/', '{{domain_name}}/'),
uri_formats = c(
'{{url}}',
'{{url}}{{uri_page}}/',
'{{url}}{{uri_page}}{{uri_extension}}',
'{{url}}{{uri_path}}/{{uri_page}}/',
'{{url}}{{uri_path}}/{{uri_page}}{{uri_extension}}'
),
image_placeholder_services = c(
'https://placeholdit.imgix.net/~text?txtsize=55&txt={{width}}x{{height}}&w={{width}&h={{height}}',
'https://www.lorempixel.com/{{width}}/{{height}}',
'https://dummyimage.com/{{width}}x{{height}}'
),
replacements = list(),
initialize = function(locale = NULL) {
if (!is.null(locale)) {
# check global locales
super$check_locale(locale)
# check person provider locales
check_locale_(locale, internet_provider_locales)
self$locale <- locale
} else {
self$locale <- 'en_US'
}
private$parse_eval_safe('free_email_domains')
private$parse_eval_safe('safe_email_tlds')
private$parse_eval_safe('email_formats')
private$parse_eval_safe('user_name_formats')
private$parse_eval_safe('tlds')
private$parse_eval_safe('replacements')
private$parse_eval_safe('safe_email_tlds')
},
to_ascii = function(x) {
if (length(self$replacements) != 0) {
for (i in seq_along(self$replacements)) {
x <- gsub(self$replacements[1], self$replacements[2], x)
}
# for search, replace in self.replacements:
# string = string.replace(search, replace)
}
check4pkg("stringi")
stringi::stri_trans_general(x, "latin-ascii")
},
email = function(domain = NULL) {
if (!is.null(domain)) {
sprintf('%s@%s', self$user_name(), domain)
} else {
pattern <- super$random_element(self$email_formats)
out <- list(
user_name = self$user_name(),
domain_name = self$domain_name(),
free_email_domain = self$free_email_domain()
)
whisker::whisker.render(pattern, data = out)
}
},
safe_email = function() {
tolower(sprintf('%s@example.%s',
self$user_name(), super$random_element(self$safe_email_tlds)
))
},
free_email = function() {
tolower(paste0(self$user_name(), '@', self$free_email_domain()))
},
company_email = function() {
tolower(paste0(self$user_name(), '@', self$domain_name()))
},
ascii_email = function() {
pattern <- super$random_element(self$email_formats)
out <- list(
user_name = self$user_name(),
domain_name = self$domain_name(),
free_email_domain = self$free_email_domain()
)
tolower(self$to_ascii(whisker::whisker.render(pattern, data = out)))
},
ascii_safe_email = function() {
tolower(self$to_ascii(
paste0(self$user_name(), '@example.',
super$random_element(self$safe_email_tlds))
))
},
ascii_free_email = function() {
tolower(self$to_ascii(
paste0(self$user_name(), '@', self$free_email_domain())
))
},
ascii_company_email = function() {
tolower(self$to_ascii(paste0(self$user_name(), '@', self$domain_name())))
},
user_name = function() {
pattern <- super$random_element(self$user_name_formats)
loc <- if (private$has_locale(self$locale, person_provider_locales)) {
self$locale
} else {
"en_US"
}
out <- PersonProvider$new(locale = loc)$render(pattern)
self$to_ascii(tolower(super$bothify(out)))
},
tld = function() {
super$random_element(self$tlds)
},
free_email_domain = function() {
super$random_element(self$free_email_domains)
},
url = function(schemes = NULL) {
if (is.null(schemes)) schemes <- c('http', 'https')
pattern <- sprintf(
'%s://%s',
if (!is.null(schemes)) super$random_element(schemes) else "",
super$random_element(self$url_formats)
)
whisker::whisker.render(
template = pattern,
data = list(domain_name = self$domain_name()))
},
# Produce an Internet domain name with the specified number of
# subdomain levels
domain_name = function(levels = 1) {
if (levels < 1) {
stop("levels must be greater than or equal to 1")
} else if (levels == 1) {
paste0(self$domain_word(), '.', self$tld())
} else {
paste0(self$domain_word(), '.', self$domain_name(levels - 1))
}
},
domain_word = function() {
company <- CompanyProvider$new()
xx <- company$company()
xxx <- strsplit(xx, split = "\\s|-")[[1]]
tolower(self$to_ascii(xxx[2]))
},
ipv4 = function() {
check4pkg("iptools")
iptools::ip_random(1)
# FIXME: try to do network address later
# if network:
# address += '/' + str(self.generator.random.randint(0, IPV4LENGTH))
# address = str(ip_network(address, strict=False))
# return address
},
# def ipv6(self, network=False):
# """Produce a random IPv6 address or network with a valid CIDR"""
# address = str(ip_address(self.generator.random.randint(
# 2 ** IPV4LENGTH, (2 ** IPV6LENGTH) - 1)))
# if network:
# address += '/' + str(self.generator.random.randint(0, IPV6LENGTH))
# address = str(ip_network(address, strict=False))
# return address
mac_address = function() {
mac = replicate(7, super$random_int(0, 255))
paste0(sprintf("%02x", mac), collapse = ":")
},
uri_page = function() {
super$random_element(self$uri_pages)
},
uri_path = function(deep = NULL) {
deep <- if (!is.null(deep)) deep else super$random_int(1, 4)
paste0(
replicate(deep, super$random_element(self$uri_paths)),
collapse = "/"
)
},
uri_extension = function() {
super$random_element(self$uri_extensions)
},
uri = function() {
pattern <- super$random_element(self$uri_formats)
'{{url}}{{uri_path}}/{{uri_page}}{{uri_extension}}'
dat <- list(
url = self$url(),
uri_path = self$uri_path(),
uri_page = self$uri_page(),
uri_extension = self$uri_extension()
)
tolower(self$to_ascii(whisker::whisker.render(pattern, data = dat)))
},
slug = function(value = NULL) {
if (is.null(value))
value <- paste0(LoremProvider$new()$words(), collapse = "-")
return(value)
},
# Returns URL to placeholder image - Example: http://placehold.it/640x480
image_url = function(width = NULL, height = NULL) {
width_ = if (!is.null(width)) width else super$random_int(max = 1024)
height_ = if (!is.null(height)) height else super$random_int(max = 1024)
placeholder_url = super$random_element(self$image_placeholder_services)
whisker::whisker.render(placeholder_url,
data = list(width = width_, height = height_))
}
),
private = list(
has_locale = function(locale, provider) locale %in% provider,
parse_eval_safe = function(name) {
if (self$locale != "en_US") {
tmp <- parse_eval(sprintf("int_%s_", name), self$locale)
if (!is.null(tmp)) self[[name]] <- tmp
}
}
)
)
#' @export
#' @rdname InternetProvider
internet_provider_locales <- c(
"en_US", "en_AU", "de_DE", "bg_BG", "cs_CZ", "fa_IR", "fr_FR",
"hr_HR"
)
| /R/internet-provider.R | permissive | ktaranov/charlatan | R | false | false | 11,606 | r | #' InternetProvider
#'
#' @export
#' @keywords internal
#' @param locale (character) the locale to use. See
#' `internet_provider_locales` for locales supported (default: en_US)
#' @details
#' **Methods**
#'
#' - `to_ascii(x)` - convert to ascii
#' - `email(domain)` - get an email address
#' - `safe_email()` - get a safe email address
#' - `free_email()` - free email address
#' - `company_email()` - company email address
#' - `ascii_email()` - ascii email address
#' - `ascii_safe_email()` - safe ascii email address
#' - `ascii_free_email()` - an ascii free email address
#' - `ascii_company_email()` - ascii company email address
#' - `user_name()` - a user name
#' - `tld()` - a tld
#' - `free_email_domain()` - free email domain
#' - `url(schemes)` - get a url
#' - `domain_name(levels)` - get a domain name
#' - `domain_word()` - get a domain word
#' - `ipv4()` - get a random ipv4 address
#' - `mac_address()` - get a mac address
#' - `uri_page()` - get a uri page
#' - `uri_path()` - get a uri path
#' - `uri_extension()` - get a uri extension
#' - `image_url(width, height)` - get an image url
#'
#' @format NULL
#' @usage NULL
#'
#' @note Note that if a locale you set doesn't have a locale specific set
#' of data for [PersonProvider] or [CompanyProvider] we fall back to
#' `en_US`
#'
#' @examples
#' (x <- InternetProvider$new())
#' x$locale
#'
#' # uri/url/tld/etc.
#' x$tld()
#' x$slug()
#' x$domain_word()
#' x$domain_name()
#' x$domain_name(levels = 2)
#' x$domain_name(levels = 3)
#' x$domain_name(levels = 10)
#' ## url's
#' x$url()
#' x$url(schemes = c('hbbp', 'hggp'))
#' x$image_url()
#' ## uri's
#' x$uri()
#' x$uri_page()
#' x$uri_extension()
#' x$uri_path()
#' x$uri_path(deep = 1)
#' x$uri_path(deep = 2)
#' x$uri_path(deep = 3)
#' x$uri_path(deep = 4)
#'
#' # user name
#' x$user_name()
#'
#' # emails
#' x$email()
#' x$safe_email()
#' x$free_email()
#' x$company_email()
#' x$free_email_domain()
#' x$ascii_email()
#' x$ascii_safe_email()
#' x$ascii_free_email()
#' x$ascii_company_email()
#'
#' # addresses, mac, ipv4
#' x$mac_address()
#' if (requireNamespace("iptools", quietly=TRUE)) {
#' x$ipv4()
#' }
#'
#' # different locales
#' (x <- InternetProvider$new(locale = "en_AU"))
#' x$locale
#' x$tld()
#' x$email()
#' x$free_email_domain()
#'
#' (x <- InternetProvider$new(locale = "de_DE"))
#' x$locale
#' x$tld()
#' x$uri()
#' x$email()
#' x$ascii_email()
#'
#' (x <- InternetProvider$new(locale = "bg_BG"))
#' x$locale
#' x$tld()
#' x$uri()
#' x$url()
#' x$user_name()
#' x$email()
#' x$ascii_email()
#'
#' (x <- InternetProvider$new(locale = "cs_CZ"))
#' x$url()
#' x$user_name()
#' x$email()
#'
#' (x <- InternetProvider$new(locale = "fa_IR"))
#' x$url()
#'
#' (x <- InternetProvider$new(locale = "fr_FR"))
#' x$url()
#' x$user_name()
#' x$email()
#'
#' (x <- InternetProvider$new(locale = "hr_HR"))
#' x$url()
#' x$user_name()
#' x$email()
#'
#' # convert a string to ascii with stringi pkg
#' if (requireNamespace("stringi", quietly=TRUE)) {
#' x$to_ascii("anï")
#' }
InternetProvider <- R6::R6Class(
inherit = BaseProvider,
'InternetProvider',
public = list(
locale = NULL,
safe_email_tlds = c('org', 'com', 'net'),
free_email_domains = c('gmail.com', 'yahoo.com', 'hotmail.com'),
tlds = c(
'com', 'com', 'com', 'com', 'com', 'com', 'biz', 'info', 'net', 'org'
),
uri_pages = c(
'index', 'home', 'search', 'main', 'post', 'homepage', 'category',
'register', 'login', 'faq', 'about', 'terms', 'privacy', 'author'
),
uri_paths = c(
'app', 'main', 'wp-content', 'search', 'category', 'tag', 'categories',
'tags', 'blog', 'posts', 'list', 'explore'
),
uri_extensions = c(
'.html', '.html', '.html', '.htm', '.htm', '.php', '.php', '.jsp',
'.asp'
),
user_name_formats = c(
'{{last_names}}.{{first_names}}',
'{{first_names}}.{{last_names}}',
'{{first_names}}##',
'?{{last_names}}'
),
email_formats = c('{{user_name}}@{{domain_name}}',
'{{user_name}}@{{free_email_domain}}'),
url_formats = c('www.{{domain_name}}/', '{{domain_name}}/'),
uri_formats = c(
'{{url}}',
'{{url}}{{uri_page}}/',
'{{url}}{{uri_page}}{{uri_extension}}',
'{{url}}{{uri_path}}/{{uri_page}}/',
'{{url}}{{uri_path}}/{{uri_page}}{{uri_extension}}'
),
image_placeholder_services = c(
'https://placeholdit.imgix.net/~text?txtsize=55&txt={{width}}x{{height}}&w={{width}&h={{height}}',
'https://www.lorempixel.com/{{width}}/{{height}}',
'https://dummyimage.com/{{width}}x{{height}}'
),
replacements = list(),
initialize = function(locale = NULL) {
if (!is.null(locale)) {
# check global locales
super$check_locale(locale)
# check person provider locales
check_locale_(locale, internet_provider_locales)
self$locale <- locale
} else {
self$locale <- 'en_US'
}
private$parse_eval_safe('free_email_domains')
private$parse_eval_safe('safe_email_tlds')
private$parse_eval_safe('email_formats')
private$parse_eval_safe('user_name_formats')
private$parse_eval_safe('tlds')
private$parse_eval_safe('replacements')
private$parse_eval_safe('safe_email_tlds')
},
to_ascii = function(x) {
if (length(self$replacements) != 0) {
for (i in seq_along(self$replacements)) {
x <- gsub(self$replacements[1], self$replacements[2], x)
}
# for search, replace in self.replacements:
# string = string.replace(search, replace)
}
check4pkg("stringi")
stringi::stri_trans_general(x, "latin-ascii")
},
email = function(domain = NULL) {
if (!is.null(domain)) {
sprintf('%s@%s', self$user_name(), domain)
} else {
pattern <- super$random_element(self$email_formats)
out <- list(
user_name = self$user_name(),
domain_name = self$domain_name(),
free_email_domain = self$free_email_domain()
)
whisker::whisker.render(pattern, data = out)
}
},
safe_email = function() {
tolower(sprintf('%s@example.%s',
self$user_name(), super$random_element(self$safe_email_tlds)
))
},
free_email = function() {
tolower(paste0(self$user_name(), '@', self$free_email_domain()))
},
company_email = function() {
tolower(paste0(self$user_name(), '@', self$domain_name()))
},
ascii_email = function() {
pattern <- super$random_element(self$email_formats)
out <- list(
user_name = self$user_name(),
domain_name = self$domain_name(),
free_email_domain = self$free_email_domain()
)
tolower(self$to_ascii(whisker::whisker.render(pattern, data = out)))
},
ascii_safe_email = function() {
tolower(self$to_ascii(
paste0(self$user_name(), '@example.',
super$random_element(self$safe_email_tlds))
))
},
ascii_free_email = function() {
tolower(self$to_ascii(
paste0(self$user_name(), '@', self$free_email_domain())
))
},
ascii_company_email = function() {
tolower(self$to_ascii(paste0(self$user_name(), '@', self$domain_name())))
},
user_name = function() {
pattern <- super$random_element(self$user_name_formats)
loc <- if (private$has_locale(self$locale, person_provider_locales)) {
self$locale
} else {
"en_US"
}
out <- PersonProvider$new(locale = loc)$render(pattern)
self$to_ascii(tolower(super$bothify(out)))
},
tld = function() {
super$random_element(self$tlds)
},
free_email_domain = function() {
super$random_element(self$free_email_domains)
},
url = function(schemes = NULL) {
if (is.null(schemes)) schemes <- c('http', 'https')
pattern <- sprintf(
'%s://%s',
if (!is.null(schemes)) super$random_element(schemes) else "",
super$random_element(self$url_formats)
)
whisker::whisker.render(
template = pattern,
data = list(domain_name = self$domain_name()))
},
# Produce an Internet domain name with the specified number of
# subdomain levels
domain_name = function(levels = 1) {
if (levels < 1) {
stop("levels must be greater than or equal to 1")
} else if (levels == 1) {
paste0(self$domain_word(), '.', self$tld())
} else {
paste0(self$domain_word(), '.', self$domain_name(levels - 1))
}
},
domain_word = function() {
company <- CompanyProvider$new()
xx <- company$company()
xxx <- strsplit(xx, split = "\\s|-")[[1]]
tolower(self$to_ascii(xxx[2]))
},
ipv4 = function() {
check4pkg("iptools")
iptools::ip_random(1)
# FIXME: try to do network address later
# if network:
# address += '/' + str(self.generator.random.randint(0, IPV4LENGTH))
# address = str(ip_network(address, strict=False))
# return address
},
# def ipv6(self, network=False):
# """Produce a random IPv6 address or network with a valid CIDR"""
# address = str(ip_address(self.generator.random.randint(
# 2 ** IPV4LENGTH, (2 ** IPV6LENGTH) - 1)))
# if network:
# address += '/' + str(self.generator.random.randint(0, IPV6LENGTH))
# address = str(ip_network(address, strict=False))
# return address
mac_address = function() {
mac = replicate(7, super$random_int(0, 255))
paste0(sprintf("%02x", mac), collapse = ":")
},
uri_page = function() {
super$random_element(self$uri_pages)
},
uri_path = function(deep = NULL) {
deep <- if (!is.null(deep)) deep else super$random_int(1, 4)
paste0(
replicate(deep, super$random_element(self$uri_paths)),
collapse = "/"
)
},
uri_extension = function() {
super$random_element(self$uri_extensions)
},
uri = function() {
pattern <- super$random_element(self$uri_formats)
'{{url}}{{uri_path}}/{{uri_page}}{{uri_extension}}'
dat <- list(
url = self$url(),
uri_path = self$uri_path(),
uri_page = self$uri_page(),
uri_extension = self$uri_extension()
)
tolower(self$to_ascii(whisker::whisker.render(pattern, data = dat)))
},
slug = function(value = NULL) {
if (is.null(value))
value <- paste0(LoremProvider$new()$words(), collapse = "-")
return(value)
},
# Returns URL to placeholder image - Example: http://placehold.it/640x480
image_url = function(width = NULL, height = NULL) {
width_ = if (!is.null(width)) width else super$random_int(max = 1024)
height_ = if (!is.null(height)) height else super$random_int(max = 1024)
placeholder_url = super$random_element(self$image_placeholder_services)
whisker::whisker.render(placeholder_url,
data = list(width = width_, height = height_))
}
),
private = list(
has_locale = function(locale, provider) locale %in% provider,
parse_eval_safe = function(name) {
if (self$locale != "en_US") {
tmp <- parse_eval(sprintf("int_%s_", name), self$locale)
if (!is.null(tmp)) self[[name]] <- tmp
}
}
)
)
#' @export
#' @rdname InternetProvider
internet_provider_locales <- c(
"en_US", "en_AU", "de_DE", "bg_BG", "cs_CZ", "fa_IR", "fr_FR",
"hr_HR"
)
|
VectorsToRectangular <- function (vectors)
{
rectangular_vectors = vectors
grades <- vectors[, 2]
module <- vectors[, 1]
radians <- ToRadians(grades)
x1 = sin(radians) * module
y1 = cos(radians) * module
rectangular_vectors[, 1] <- x1
rectangular_vectors[, 2] <- y1
return(rectangular_vectors)
}
| /VecStatGraphs2D/R/VectorsToRectangular.R | no_license | ingted/R-Examples | R | false | false | 345 | r | VectorsToRectangular <- function (vectors)
{
rectangular_vectors = vectors
grades <- vectors[, 2]
module <- vectors[, 1]
radians <- ToRadians(grades)
x1 = sin(radians) * module
y1 = cos(radians) * module
rectangular_vectors[, 1] <- x1
rectangular_vectors[, 2] <- y1
return(rectangular_vectors)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tidy_scan_pvl.R
\name{add_phenames}
\alias{add_phenames}
\title{Replace old (default) trait names with true trait names}
\usage{
add_phenames(charvec, phe1, phe2)
}
\arguments{
\item{charvec}{a character vector containing entries like "profile1" and "profile2"}
\item{phe1}{character vector of length 1 that is the true trait name for the first trait}
\item{phe2}{character vector of length 1 that is the true trait name for the second trait}
}
\description{
Replace old (default) trait names with true trait names
}
| /man/add_phenames.Rd | permissive | kbroman/qtl2pleio | R | false | true | 597 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tidy_scan_pvl.R
\name{add_phenames}
\alias{add_phenames}
\title{Replace old (default) trait names with true trait names}
\usage{
add_phenames(charvec, phe1, phe2)
}
\arguments{
\item{charvec}{a character vector containing entries like "profile1" and "profile2"}
\item{phe1}{character vector of length 1 that is the true trait name for the first trait}
\item{phe2}{character vector of length 1 that is the true trait name for the second trait}
}
\description{
Replace old (default) trait names with true trait names
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/requests.R
\name{post_request}
\alias{post_request}
\title{post_request}
\usage{
post_request(endpoint_url, payload = list())
}
\arguments{
\item{endpoint_url}{the url for the endpoint}
\item{payload}{the parameters needed for the post request}
}
\value{
the contents of the response
}
\description{
A post request route used for sending a data request to bema endpoints. Takes
a payload argument with a list for various settings documented in the service
}
| /man/post_request.Rd | no_license | cunybpl/bplclientR | R | false | true | 537 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/requests.R
\name{post_request}
\alias{post_request}
\title{post_request}
\usage{
post_request(endpoint_url, payload = list())
}
\arguments{
\item{endpoint_url}{the url for the endpoint}
\item{payload}{the parameters needed for the post request}
}
\value{
the contents of the response
}
\description{
A post request route used for sending a data request to bema endpoints. Takes
a payload argument with a list for various settings documented in the service
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vizGeomorphicComponent.R
\name{vizGeomorphicComponent}
\alias{vizGeomorphicComponent}
\title{Visual Summary of Hill Landform Positions}
\usage{
vizGeomorphicComponent(
x,
s = NULL,
annotations = TRUE,
annotation.cex = 0.75,
cols = c("#D53E4F", "#FC8D59", "#FEE08B", "#E6F598", "#99D594", "#3288BD"),
...
)
}
\arguments{
\item{x}{\code{data.frame} as created by \code{soilDB::fetchOSD(..., extended=TRUE)}, see details}
\item{s}{an optional soil series name, highlighted in the figure}
\item{annotations}{logical, add number of record and normalized Shannon entropy values}
\item{annotation.cex}{annotation label scaling factor}
\item{cols}{vector of colors}
\item{\dots}{additional arguments to \verb{[iterateHydOrder]}: \verb{target = 0.9, maxIter = 20, j.amount = 0.05, verbose = FALSE}}
}
\value{
A \code{list} with the following elements:
\itemize{
\item \code{fig}: lattice object (the figure)
\item \code{order}: 1D ordering from \code{cluster::diana}
\item \code{clust}: \code{hclust} object
\item \code{match.rate}: fraction of series matching target hydrologic ordering, after clustering + rotation
}
}
\description{
A unique display of landform position probability.
}
\details{
See the \href{http://ncss-tech.github.io/AQP/soilDB/soil-series-query-functions.html}{Soil Series Query Functions} tutorial for more information.
}
\author{
D.E. Beaudette
}
| /man/vizGeomorphicComponent.Rd | no_license | cran/sharpshootR | R | false | true | 1,504 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vizGeomorphicComponent.R
\name{vizGeomorphicComponent}
\alias{vizGeomorphicComponent}
\title{Visual Summary of Hill Landform Positions}
\usage{
vizGeomorphicComponent(
x,
s = NULL,
annotations = TRUE,
annotation.cex = 0.75,
cols = c("#D53E4F", "#FC8D59", "#FEE08B", "#E6F598", "#99D594", "#3288BD"),
...
)
}
\arguments{
\item{x}{\code{data.frame} as created by \code{soilDB::fetchOSD(..., extended=TRUE)}, see details}
\item{s}{an optional soil series name, highlighted in the figure}
\item{annotations}{logical, add number of record and normalized Shannon entropy values}
\item{annotation.cex}{annotation label scaling factor}
\item{cols}{vector of colors}
\item{\dots}{additional arguments to \verb{[iterateHydOrder]}: \verb{target = 0.9, maxIter = 20, j.amount = 0.05, verbose = FALSE}}
}
\value{
A \code{list} with the following elements:
\itemize{
\item \code{fig}: lattice object (the figure)
\item \code{order}: 1D ordering from \code{cluster::diana}
\item \code{clust}: \code{hclust} object
\item \code{match.rate}: fraction of series matching target hydrologic ordering, after clustering + rotation
}
}
\description{
A unique display of landform position probability.
}
\details{
See the \href{http://ncss-tech.github.io/AQP/soilDB/soil-series-query-functions.html}{Soil Series Query Functions} tutorial for more information.
}
\author{
D.E. Beaudette
}
|
#
# Get beer data
#
# Author: Jitender Aswani, Co-Founder @datadolph.in
# Date: 3/15/2013
# Copyright (c) 2011, under the Creative Commons Attribution-NonCommercial 3.0 Unported (CC BY-NC 3.0) License
# For more information see: https://creativecommons.org/licenses/by-nc/3.0/
# All rights reserved.
require(data.table)
startup <- function() {
#initialize system
initializeSystem(0)
assign("folder.path", "./pads/raw-data/crunchbase/", envir=.GlobalEnv)
assign("comps.file", "cb_companies_june_2013.csv", envir=.GlobalEnv)
assign("comps.rounds.file", "cb_companies_rounds.csv", envir=.GlobalEnv)
assign("investors.file", "cb_investors.csv", envir=.GlobalEnv)
assign("dataset", "crunchbase", envir=.GlobalEnv)
#assign("verbose", TRUE, envir=.GlobalEnv)
loadCompaniesStats()
loadCompanyRounds()
loadInvestosStats()
#prepare pad meta data
series <- list()
series["source"] <- "CrunchBase"
series["category"] <- "Financial Sector"
series["subcategory"] <- "Investment"
series["tags"] <- tolower(paste(series$source, "VC, venture capital, startups, US startups, investments, angel, series-a, series-b, series-c, funding, seed, biotech, ecommerce, enterprise, software, mobile, web", sep=","))
series["pagetag"] <- "crunchbase"
series["desc"] <- "Built using data from CrunchBase extracted on June 6, 2013."
assign("series", series, envir=.GlobalEnv)
}
assign("folder.path", "./pads/raw-data/beer-data/", envir=.GlobalEnv)
assign("data.file", "beer_reviews.csv.gz", envir=.GlobalEnv)
beer.data <- data.table(read.csv(paste(folder.path, data.file, sep="")))
setkey(beer.data, beer_beerid)
#get beers with 500 or more reviews
b.d.500 <- beer.data[, list(no_of_reviews=nrow(.SD)), by=beer_beerid][no_of_reviews >= 500]
setkey(b.d.500, beer_beerid)
#merge to get all records ofbeers which have 500 or more reviews
b.d.500 <- merge(b.d.500, beer.data, all=F) | /FetchData/FetchBeerRatings.R | no_license | shapang/R-1 | R | false | false | 1,902 | r | #
# Get beer data
#
# Author: Jitender Aswani, Co-Founder @datadolph.in
# Date: 3/15/2013
# Copyright (c) 2011, under the Creative Commons Attribution-NonCommercial 3.0 Unported (CC BY-NC 3.0) License
# For more information see: https://creativecommons.org/licenses/by-nc/3.0/
# All rights reserved.
require(data.table)
startup <- function() {
#initialize system
initializeSystem(0)
assign("folder.path", "./pads/raw-data/crunchbase/", envir=.GlobalEnv)
assign("comps.file", "cb_companies_june_2013.csv", envir=.GlobalEnv)
assign("comps.rounds.file", "cb_companies_rounds.csv", envir=.GlobalEnv)
assign("investors.file", "cb_investors.csv", envir=.GlobalEnv)
assign("dataset", "crunchbase", envir=.GlobalEnv)
#assign("verbose", TRUE, envir=.GlobalEnv)
loadCompaniesStats()
loadCompanyRounds()
loadInvestosStats()
#prepare pad meta data
series <- list()
series["source"] <- "CrunchBase"
series["category"] <- "Financial Sector"
series["subcategory"] <- "Investment"
series["tags"] <- tolower(paste(series$source, "VC, venture capital, startups, US startups, investments, angel, series-a, series-b, series-c, funding, seed, biotech, ecommerce, enterprise, software, mobile, web", sep=","))
series["pagetag"] <- "crunchbase"
series["desc"] <- "Built using data from CrunchBase extracted on June 6, 2013."
assign("series", series, envir=.GlobalEnv)
}
assign("folder.path", "./pads/raw-data/beer-data/", envir=.GlobalEnv)
assign("data.file", "beer_reviews.csv.gz", envir=.GlobalEnv)
beer.data <- data.table(read.csv(paste(folder.path, data.file, sep="")))
setkey(beer.data, beer_beerid)
#get beers with 500 or more reviews
b.d.500 <- beer.data[, list(no_of_reviews=nrow(.SD)), by=beer_beerid][no_of_reviews >= 500]
setkey(b.d.500, beer_beerid)
#merge to get all records ofbeers which have 500 or more reviews
b.d.500 <- merge(b.d.500, beer.data, all=F) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/StyleBypasses.R
\name{setEdgeTargetArrowShapeBypass}
\alias{setEdgeTargetArrowShapeBypass}
\title{Set Edge Target Arrow Shape Bypass}
\usage{
setEdgeTargetArrowShapeBypass(
edge.names,
new.shapes,
network = NULL,
base.url = .defaultBaseUrl
)
}
\arguments{
\item{edge.names}{List of edge names}
\item{new.shapes}{List of values to set, or single value. See \link{getArrowShapes}.}
\item{network}{(optional) Name or SUID of the network. Default is the
"current" network active in Cytoscape.}
\item{base.url}{(optional) Ignore unless you need to specify a custom domain,
port or version to connect to the CyREST API. Default is http://localhost:1234
and the latest version of the CyREST API supported by this version of RCy3.}
}
\value{
None
}
\description{
Override the target arrow shape for particular edges.
}
\details{
This method permanently overrides any default values or mappings
defined for this visual property of the edge or edges specified. This method
ultimately calls the generic function, \link{setEdgePropertyBypass}, which
can be used to set any visual property. To restore defaults and mappings, use
\link{clearEdgePropertyBypass}.
}
\examples{
\donttest{
setEdgeTargetArrowShapeBypass()
}
}
\seealso{
{
\link{setEdgePropertyBypass},
\link{clearEdgePropertyBypass}
}
}
| /man/setEdgeTargetArrowShapeBypass.Rd | permissive | kumonismo/RCy3 | R | false | true | 1,378 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/StyleBypasses.R
\name{setEdgeTargetArrowShapeBypass}
\alias{setEdgeTargetArrowShapeBypass}
\title{Set Edge Target Arrow Shape Bypass}
\usage{
setEdgeTargetArrowShapeBypass(
edge.names,
new.shapes,
network = NULL,
base.url = .defaultBaseUrl
)
}
\arguments{
\item{edge.names}{List of edge names}
\item{new.shapes}{List of values to set, or single value. See \link{getArrowShapes}.}
\item{network}{(optional) Name or SUID of the network. Default is the
"current" network active in Cytoscape.}
\item{base.url}{(optional) Ignore unless you need to specify a custom domain,
port or version to connect to the CyREST API. Default is http://localhost:1234
and the latest version of the CyREST API supported by this version of RCy3.}
}
\value{
None
}
\description{
Override the target arrow shape for particular edges.
}
\details{
This method permanently overrides any default values or mappings
defined for this visual property of the edge or edges specified. This method
ultimately calls the generic function, \link{setEdgePropertyBypass}, which
can be used to set any visual property. To restore defaults and mappings, use
\link{clearEdgePropertyBypass}.
}
\examples{
\donttest{
setEdgeTargetArrowShapeBypass()
}
}
\seealso{
{
\link{setEdgePropertyBypass},
\link{clearEdgePropertyBypass}
}
}
|
acontext("PredictedPeaks data set")
require(httr)
PredictedPeaks.RData <- file.path(tempdir(), "PredictedPeaks.RData")
request <- GET("http://github.com/tdhock/animint-examples/blob/master/data/PredictedPeaks.RData?raw=true")
stop_for_status(request)
writeBin(content(request), PredictedPeaks.RData)
## If we don't load this data set into the global environment, then we
## get Error in eval(expr, envir, enclos) (from helper-functions.R#5)
## : object 'PredictedPeaks' not found
load(PredictedPeaks.RData, .GlobalEnv)
hover.dots <- subset(PredictedPeaks$chromCounts, nonInputType==type)
viz <- list(
oneChrom=ggplot()+
ggtitle("PeakSegJoint detections on selected chromosome")+
theme_bw()+
coord_cartesian(xlim=c(0, 1))+
theme_animint(width=1500, height=100)+
theme(axis.line.x=element_blank(), axis.text.x=element_blank(),
axis.ticks.x=element_blank(), axis.title.x=element_blank())+
## geom_text(aes(relative.middle, type.fac, label=samples.up,
## clickSelects=peak.name,
## showSelected2=chrom,
## showSelected=dotID),
## size=11,
## data=PredictedPeaks$chromCounts)+
geom_text(aes(relative.middle, type.fac, label=samples.up,
href=paste0(
"http://genome.ucsc.edu/cgi-bin/hgTracks?db=hg19&position=",
chrom, ":", zoomStart, "-", zoomEnd),
showSelected2=chrom,
showSelected=dotID),
size=11,
data=PredictedPeaks$chromCounts)+
scale_y_discrete("cell type", drop=FALSE),
chroms=ggplot()+
theme_bw()+
theme_animint(width=1500, height=330)+
scale_y_discrete("chromosome", drop=FALSE)+
scale_x_continuous("position on chromosome (mega bases)")+
geom_text(aes(0, chrom, label=paste0(peaks, "_"),
clickSelects=chrom,
showSelected=dotID),
hjust=1,
size=11,
data=PredictedPeaks$countsByChrom)+
geom_segment(aes(chromStart/1e6, chrom,
clickSelects=chrom,
xend=chromEnd/1e6, yend=chrom),
size=9,
data=PredictedPeaks$chrom.ranges)+
geom_point(aes(chromEnd/1e6, chrom,
id=chrom,
clickSelects=chrom),
size=5,
data=PredictedPeaks$chrom.ranges)+
geom_text(aes(max(PredictedPeaks$chrom.ranges$chromEnd)/2e6, chrom,
showSelected=dotID,
label=totals),
data=PredictedPeaks$scatter.text),
scatter=ggplot()+
geom_hline(aes(yintercept=N),
color="grey",
data=PredictedPeaks$counts.Input)+
scale_x_continuous("number of samples with a peak")+
facet_grid(nonInputType ~ .)+
theme_bw()+
scale_fill_gradient(low="grey", high="red")+
theme_animint(width=1500)+
theme(panel.margin=grid::unit(0, "cm"))+
geom_vline(aes(xintercept=N),
color="grey",
data=PredictedPeaks$counts.not.Input)+
geom_rect(aes(xmin=up-size, xmax=up+size,
ymin=Input-size, ymax=Input+size,
tooltip=totals,
clickSelects=dotID,
showSelected=chrom,
fill=log10(count)),
color="transparent",
data=PredictedPeaks$bg.rect),
first=list(dotID="38 neutro samples, 1 Input samples", chrom="chr16"))
info <- animint2HTML(viz)
## Simulate mouseover using javascript?
## myScript <- 'myObj = document.getElementById("chrM");
## myArray = [];
## for(var b in myObj) {
## myArray.push(b);
## }
## return myArray;'
## remDr$executeScript(myScript)
## remDr$executeScript('return document.getElementById("chrM").onmouseover();')
## Simulate mouseover using RSelenium?
## e <- remDr$findElement("id", "chrM")
## remDr$mouseMoveToLocation(webElement=e)
## e <- remDr$findElement("id", "chrY")
## remDr$mouseMoveToLocation(webElement=e)
## getStyleValue(getHTML(), '//g[@class="geom4_point_chroms"]//circle', "opacity")
## getNodeSet(getHTML(), '//g[@class="geom4_point_chroms"]//circle')
test_that("without selectize option, only render chrom widget", {
widget.vec <- getSelectorWidgets(info$html)
expect_identical(widget.vec, "chrom")
})
getSorted <- function(){
text.list <- getNodeSet(getHTML(), '//g[@class="geom1_text_oneChrom"]//text')
value.vec <- sapply(text.list, xmlValue)
sort(as.numeric(value.vec))
}
test_that("initially 2 text elements rendered", {
num.vec <- getSorted()
expect_equal(num.vec, c(1, 38))
})
clickID("chrM")
Sys.sleep(1)
exp.vec <- c(1, 14, 38)
test_that("3 elements rendered (first time)", {
num.vec <- getSorted()
expect_equal(num.vec, exp.vec)
})
clickID("chrY")
Sys.sleep(1)
clickID("chrM")
Sys.sleep(1)
test_that("3 elements rendered (second time)", {
num.vec <- getSorted()
expect_equal(num.vec, exp.vec)
})
thresh.df <- data.frame(max.input.samples=9, thresh.type="specific")
PredictedPeaks$counts.not.Input$thresh.type <- "max samples"
PredictedPeaks$counts.Input$thresh.type <- "max samples"
viz <- list(
oneChrom=ggplot()+
ggtitle("PeakSegJoint detections on selected chromosome")+
theme_bw()+
coord_cartesian(xlim=c(0, 1))+
theme_animint(width=1500, height=100)+
theme(axis.line.x=element_blank(), axis.text.x=element_blank(),
axis.ticks.x=element_blank(), axis.title.x=element_blank())+
geom_text(aes(relative.middle, type.fac, label=samples.up,
clickSelects=peak.name,
showSelected2=chrom,
showSelected=dotID),
size=11,
data=PredictedPeaks$chromCounts)+
scale_y_discrete("cell type", drop=FALSE),
chroms=ggplot()+
theme_bw()+
theme_animint(width=1500, height=330)+
scale_y_discrete("chromosome", drop=FALSE)+
scale_x_continuous("position on chromosome (mega bases)")+
geom_text(aes(0, chrom, label=paste0(peaks, "_"),
clickSelects=chrom,
showSelected=dotID),
hjust=1,
size=11,
data=PredictedPeaks$countsByChrom)+
geom_segment(aes(chromStart/1e6, chrom,
clickSelects=chrom,
xend=chromEnd/1e6, yend=chrom),
size=9,
data=PredictedPeaks$chrom.ranges)+
geom_point(aes(chromEnd/1e6, chrom,
id=chrom,
clickSelects=chrom),
size=5,
data=PredictedPeaks$chrom.ranges)+
geom_text(aes(max(PredictedPeaks$chrom.ranges$chromEnd)/2e6, chrom,
showSelected=dotID,
label=totals),
data=PredictedPeaks$scatter.text),
scatter=ggplot()+
geom_vline(aes(xintercept=N, color=thresh.type),
data=PredictedPeaks$counts.not.Input)+
scale_color_manual("threshold", values=c(
"max samples"="grey",
specific="grey30"))+
geom_hline(aes(yintercept=max.input.samples+0.5, color=thresh.type),
show_guide=TRUE,
data=thresh.df)+
geom_hline(aes(yintercept=N, color=thresh.type),
show_guide=TRUE,
data=PredictedPeaks$counts.Input)+
scale_x_continuous("number of samples with a peak")+
facet_grid(nonInputType ~ .)+
theme_bw()+
scale_fill_gradient(low="grey", high="red")+
theme_animint(width=1500)+
theme(panel.margin=grid::unit(0, "cm"))+
geom_rect(aes(xmin=up-size, xmax=up+size,
ymin=Input-size, ymax=Input+size,
tooltip=totals,
clickSelects=dotID,
showSelected=chrom,
fill=log10(count)),
color="transparent",
data=PredictedPeaks$bg.rect)+
geom_point(aes(up, Input,
showSelected=peak.name),
data=hover.dots),
selectize=list(dotID=TRUE, chrom=FALSE),
first=list(dotID="38 neutro samples, 1 Input samples", chrom="chr16"))
## TODO:href + hoverselects!
info <- animint2HTML(viz)
test_that("selectize option respected", {
widget.vec <- getSelectorWidgets(info$html)
expected.widgets <- c("dotID", "thresh.type")
expect_identical(sort(widget.vec), sort(expected.widgets))
})
test_that("rects rendered in fill legend", {
rect.list <- getNodeSet(info$html, '//tr[@class="log10(count)"]//rect')
expect_equal(length(rect.list), 5)
})
test_that("no lines rendered in fill legend", {
line.list <- getNodeSet(info$html, '//tr[@class="log10(count)"]//line')
expect_equal(length(line.list), 0)
})
test_that("lines in color legend", {
line.list <- getNodeSet(info$html, '//tr[@class="thresh_type"]//line')
expect_equal(length(line.list), 2)
})
specific_hlines <- function(html=getHTML()){
getNodeSet(html, '//g[@class="geom7_hline_scatter"]//line')
}
specific_opacity <- function(html=getHTML()){
as.numeric(getStyleValue(html, '//td[@id="specific"]', "opacity"))
}
test_that("initially rendered hlines", {
line.list <- specific_hlines(info$html)
expect_equal(length(line.list), 2)
computed.opacity <- specific_opacity(info$html)
expect_equal(computed.opacity, 1)
})
test_that("hlines after clicking specific", {
html <- clickHTML(id="specific")
line.list <- specific_hlines(html)
expect_equal(length(line.list), 0)
computed.opacity <- specific_opacity(html)
expect_equal(computed.opacity, 0.5)
})
test_that("hlines after clicking specific again", {
html <- clickHTML(id="specific")
line.list <- specific_hlines(html)
expect_equal(length(line.list), 2)
computed.opacity <- specific_opacity(html)
expect_equal(computed.opacity, 1)
})
## e <- remDr$findElement("class name", "show_hide_selector_widgets")
## e$clickElement()
## remDr$findElements("class name", "selectize-input")
## It takes a long time to render the selectize widget with many
## levels, why?
| /tests/testthat/test-renderer2-PredictedPeaks.R | no_license | cpsievert/animint | R | false | false | 10,024 | r | acontext("PredictedPeaks data set")
require(httr)
PredictedPeaks.RData <- file.path(tempdir(), "PredictedPeaks.RData")
request <- GET("http://github.com/tdhock/animint-examples/blob/master/data/PredictedPeaks.RData?raw=true")
stop_for_status(request)
writeBin(content(request), PredictedPeaks.RData)
## If we don't load this data set into the global environment, then we
## get Error in eval(expr, envir, enclos) (from helper-functions.R#5)
## : object 'PredictedPeaks' not found
load(PredictedPeaks.RData, .GlobalEnv)
hover.dots <- subset(PredictedPeaks$chromCounts, nonInputType==type)
viz <- list(
oneChrom=ggplot()+
ggtitle("PeakSegJoint detections on selected chromosome")+
theme_bw()+
coord_cartesian(xlim=c(0, 1))+
theme_animint(width=1500, height=100)+
theme(axis.line.x=element_blank(), axis.text.x=element_blank(),
axis.ticks.x=element_blank(), axis.title.x=element_blank())+
## geom_text(aes(relative.middle, type.fac, label=samples.up,
## clickSelects=peak.name,
## showSelected2=chrom,
## showSelected=dotID),
## size=11,
## data=PredictedPeaks$chromCounts)+
geom_text(aes(relative.middle, type.fac, label=samples.up,
href=paste0(
"http://genome.ucsc.edu/cgi-bin/hgTracks?db=hg19&position=",
chrom, ":", zoomStart, "-", zoomEnd),
showSelected2=chrom,
showSelected=dotID),
size=11,
data=PredictedPeaks$chromCounts)+
scale_y_discrete("cell type", drop=FALSE),
chroms=ggplot()+
theme_bw()+
theme_animint(width=1500, height=330)+
scale_y_discrete("chromosome", drop=FALSE)+
scale_x_continuous("position on chromosome (mega bases)")+
geom_text(aes(0, chrom, label=paste0(peaks, "_"),
clickSelects=chrom,
showSelected=dotID),
hjust=1,
size=11,
data=PredictedPeaks$countsByChrom)+
geom_segment(aes(chromStart/1e6, chrom,
clickSelects=chrom,
xend=chromEnd/1e6, yend=chrom),
size=9,
data=PredictedPeaks$chrom.ranges)+
geom_point(aes(chromEnd/1e6, chrom,
id=chrom,
clickSelects=chrom),
size=5,
data=PredictedPeaks$chrom.ranges)+
geom_text(aes(max(PredictedPeaks$chrom.ranges$chromEnd)/2e6, chrom,
showSelected=dotID,
label=totals),
data=PredictedPeaks$scatter.text),
scatter=ggplot()+
geom_hline(aes(yintercept=N),
color="grey",
data=PredictedPeaks$counts.Input)+
scale_x_continuous("number of samples with a peak")+
facet_grid(nonInputType ~ .)+
theme_bw()+
scale_fill_gradient(low="grey", high="red")+
theme_animint(width=1500)+
theme(panel.margin=grid::unit(0, "cm"))+
geom_vline(aes(xintercept=N),
color="grey",
data=PredictedPeaks$counts.not.Input)+
geom_rect(aes(xmin=up-size, xmax=up+size,
ymin=Input-size, ymax=Input+size,
tooltip=totals,
clickSelects=dotID,
showSelected=chrom,
fill=log10(count)),
color="transparent",
data=PredictedPeaks$bg.rect),
first=list(dotID="38 neutro samples, 1 Input samples", chrom="chr16"))
info <- animint2HTML(viz)
## Simulate mouseover using javascript?
## myScript <- 'myObj = document.getElementById("chrM");
## myArray = [];
## for(var b in myObj) {
## myArray.push(b);
## }
## return myArray;'
## remDr$executeScript(myScript)
## remDr$executeScript('return document.getElementById("chrM").onmouseover();')
## Simulate mouseover using RSelenium?
## e <- remDr$findElement("id", "chrM")
## remDr$mouseMoveToLocation(webElement=e)
## e <- remDr$findElement("id", "chrY")
## remDr$mouseMoveToLocation(webElement=e)
## getStyleValue(getHTML(), '//g[@class="geom4_point_chroms"]//circle', "opacity")
## getNodeSet(getHTML(), '//g[@class="geom4_point_chroms"]//circle')
test_that("without selectize option, only render chrom widget", {
widget.vec <- getSelectorWidgets(info$html)
expect_identical(widget.vec, "chrom")
})
getSorted <- function(){
text.list <- getNodeSet(getHTML(), '//g[@class="geom1_text_oneChrom"]//text')
value.vec <- sapply(text.list, xmlValue)
sort(as.numeric(value.vec))
}
test_that("initially 2 text elements rendered", {
num.vec <- getSorted()
expect_equal(num.vec, c(1, 38))
})
clickID("chrM")
Sys.sleep(1)
exp.vec <- c(1, 14, 38)
test_that("3 elements rendered (first time)", {
num.vec <- getSorted()
expect_equal(num.vec, exp.vec)
})
clickID("chrY")
Sys.sleep(1)
clickID("chrM")
Sys.sleep(1)
test_that("3 elements rendered (second time)", {
num.vec <- getSorted()
expect_equal(num.vec, exp.vec)
})
thresh.df <- data.frame(max.input.samples=9, thresh.type="specific")
PredictedPeaks$counts.not.Input$thresh.type <- "max samples"
PredictedPeaks$counts.Input$thresh.type <- "max samples"
viz <- list(
oneChrom=ggplot()+
ggtitle("PeakSegJoint detections on selected chromosome")+
theme_bw()+
coord_cartesian(xlim=c(0, 1))+
theme_animint(width=1500, height=100)+
theme(axis.line.x=element_blank(), axis.text.x=element_blank(),
axis.ticks.x=element_blank(), axis.title.x=element_blank())+
geom_text(aes(relative.middle, type.fac, label=samples.up,
clickSelects=peak.name,
showSelected2=chrom,
showSelected=dotID),
size=11,
data=PredictedPeaks$chromCounts)+
scale_y_discrete("cell type", drop=FALSE),
chroms=ggplot()+
theme_bw()+
theme_animint(width=1500, height=330)+
scale_y_discrete("chromosome", drop=FALSE)+
scale_x_continuous("position on chromosome (mega bases)")+
geom_text(aes(0, chrom, label=paste0(peaks, "_"),
clickSelects=chrom,
showSelected=dotID),
hjust=1,
size=11,
data=PredictedPeaks$countsByChrom)+
geom_segment(aes(chromStart/1e6, chrom,
clickSelects=chrom,
xend=chromEnd/1e6, yend=chrom),
size=9,
data=PredictedPeaks$chrom.ranges)+
geom_point(aes(chromEnd/1e6, chrom,
id=chrom,
clickSelects=chrom),
size=5,
data=PredictedPeaks$chrom.ranges)+
geom_text(aes(max(PredictedPeaks$chrom.ranges$chromEnd)/2e6, chrom,
showSelected=dotID,
label=totals),
data=PredictedPeaks$scatter.text),
scatter=ggplot()+
geom_vline(aes(xintercept=N, color=thresh.type),
data=PredictedPeaks$counts.not.Input)+
scale_color_manual("threshold", values=c(
"max samples"="grey",
specific="grey30"))+
geom_hline(aes(yintercept=max.input.samples+0.5, color=thresh.type),
show_guide=TRUE,
data=thresh.df)+
geom_hline(aes(yintercept=N, color=thresh.type),
show_guide=TRUE,
data=PredictedPeaks$counts.Input)+
scale_x_continuous("number of samples with a peak")+
facet_grid(nonInputType ~ .)+
theme_bw()+
scale_fill_gradient(low="grey", high="red")+
theme_animint(width=1500)+
theme(panel.margin=grid::unit(0, "cm"))+
geom_rect(aes(xmin=up-size, xmax=up+size,
ymin=Input-size, ymax=Input+size,
tooltip=totals,
clickSelects=dotID,
showSelected=chrom,
fill=log10(count)),
color="transparent",
data=PredictedPeaks$bg.rect)+
geom_point(aes(up, Input,
showSelected=peak.name),
data=hover.dots),
selectize=list(dotID=TRUE, chrom=FALSE),
first=list(dotID="38 neutro samples, 1 Input samples", chrom="chr16"))
## TODO:href + hoverselects!
info <- animint2HTML(viz)
test_that("selectize option respected", {
widget.vec <- getSelectorWidgets(info$html)
expected.widgets <- c("dotID", "thresh.type")
expect_identical(sort(widget.vec), sort(expected.widgets))
})
test_that("rects rendered in fill legend", {
rect.list <- getNodeSet(info$html, '//tr[@class="log10(count)"]//rect')
expect_equal(length(rect.list), 5)
})
test_that("no lines rendered in fill legend", {
line.list <- getNodeSet(info$html, '//tr[@class="log10(count)"]//line')
expect_equal(length(line.list), 0)
})
test_that("lines in color legend", {
line.list <- getNodeSet(info$html, '//tr[@class="thresh_type"]//line')
expect_equal(length(line.list), 2)
})
specific_hlines <- function(html=getHTML()){
getNodeSet(html, '//g[@class="geom7_hline_scatter"]//line')
}
specific_opacity <- function(html=getHTML()){
as.numeric(getStyleValue(html, '//td[@id="specific"]', "opacity"))
}
test_that("initially rendered hlines", {
line.list <- specific_hlines(info$html)
expect_equal(length(line.list), 2)
computed.opacity <- specific_opacity(info$html)
expect_equal(computed.opacity, 1)
})
test_that("hlines after clicking specific", {
html <- clickHTML(id="specific")
line.list <- specific_hlines(html)
expect_equal(length(line.list), 0)
computed.opacity <- specific_opacity(html)
expect_equal(computed.opacity, 0.5)
})
test_that("hlines after clicking specific again", {
html <- clickHTML(id="specific")
line.list <- specific_hlines(html)
expect_equal(length(line.list), 2)
computed.opacity <- specific_opacity(html)
expect_equal(computed.opacity, 1)
})
## e <- remDr$findElement("class name", "show_hide_selector_widgets")
## e$clickElement()
## remDr$findElements("class name", "selectize-input")
## It takes a long time to render the selectize widget with many
## levels, why?
|
library(lcc)
### Name: hue
### Title: Hue color data
### Aliases: hue
### Keywords: datasets
### ** Examples
data(hue)
summary(hue)
str(hue)
## Second degree polynomial model with random intercept, slope and
## quadratic term including an exponential variance function using
## time as covariate.
model<-lcc(dataset = hue, subject = "Fruit", resp = "H_mean",
method = "Method", time = "Time", qf = 2, qr = 2,
components = TRUE, time_lcc = list(from = min(hue$Time),
to = max(hue$Time), n=40), var.class=varExp,
weights.form="time")
summary(model, type="model")
summary(model, type="lcc")
## for discussion on the analysis of complete data set,
## see Oliveira et al. (2018)
| /data/genthat_extracted_code/lcc/examples/hue.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 721 | r | library(lcc)
### Name: hue
### Title: Hue color data
### Aliases: hue
### Keywords: datasets
### ** Examples
data(hue)
summary(hue)
str(hue)
## Second degree polynomial model with random intercept, slope and
## quadratic term including an exponential variance function using
## time as covariate.
model<-lcc(dataset = hue, subject = "Fruit", resp = "H_mean",
method = "Method", time = "Time", qf = 2, qr = 2,
components = TRUE, time_lcc = list(from = min(hue$Time),
to = max(hue$Time), n=40), var.class=varExp,
weights.form="time")
summary(model, type="model")
summary(model, type="lcc")
## for discussion on the analysis of complete data set,
## see Oliveira et al. (2018)
|
source("/home/mr984/diversity_metrics/scripts/checkplot_initials.R")
source("/home/mr984/diversity_metrics/scripts/checkplot_inf.R")
reps<-50
outerreps<-1000
size<-rev(round(10^seq(2, 5, 0.25)))[
10
]
nc<-12
plan(strategy=multisession, workers=nc)
map(rev(1:outerreps), function(x){
start<-Sys.time()
out<-checkplot_inf(flatten(flatten(SADs_list))[[28]], l=1, inds=size, reps=reps)
write.csv(out, paste("/scratch/mr984/SAD28","l",1,"inds", size, "outernew", x, ".csv", sep="_"), row.names=F)
rm(out)
print(Sys.time()-start)
})
| /scripts/checkplots_for_parallel_amarel/asy_839.R | no_license | dushoff/diversity_metrics | R | false | false | 536 | r | source("/home/mr984/diversity_metrics/scripts/checkplot_initials.R")
source("/home/mr984/diversity_metrics/scripts/checkplot_inf.R")
reps<-50
outerreps<-1000
size<-rev(round(10^seq(2, 5, 0.25)))[
10
]
nc<-12
plan(strategy=multisession, workers=nc)
map(rev(1:outerreps), function(x){
start<-Sys.time()
out<-checkplot_inf(flatten(flatten(SADs_list))[[28]], l=1, inds=size, reps=reps)
write.csv(out, paste("/scratch/mr984/SAD28","l",1,"inds", size, "outernew", x, ".csv", sep="_"), row.names=F)
rm(out)
print(Sys.time()-start)
})
|
#' @title Metric: Number of Traces with Selfloop
#'
#' @description Returns the number of traces in which one or more selfloops occur, both in absolute and relative numbers.
#'
#'
#' @param eventlog The event log to be used. An object of class
#' \code{eventlog}.
#'
#'
#' @export number_of_traces_with_selfloop
number_of_traces_with_selfloop <- function(eventlog) {
stop_eventlog(eventlog)
r <- number_of_selfloops(eventlog, level_of_analysis = "trace")
ntraces <- nrow(r)
r <- r %>% filter(absolute > 0)
a <- nrow(r)
r <- data.frame(c(a, a/ntraces))
r <- as.data.frame(t(r))
colnames(r) <- c("absolute","relative")
row.names(r) <- NULL
r <- tbl_df(r)
return(r)
}
| /edeaR/R/number_of_traces_with_selfloop.R | no_license | ingted/R-Examples | R | false | false | 708 | r | #' @title Metric: Number of Traces with Selfloop
#'
#' @description Returns the number of traces in which one or more selfloops occur, both in absolute and relative numbers.
#'
#'
#' @param eventlog The event log to be used. An object of class
#' \code{eventlog}.
#'
#'
#' @export number_of_traces_with_selfloop
number_of_traces_with_selfloop <- function(eventlog) {
stop_eventlog(eventlog)
r <- number_of_selfloops(eventlog, level_of_analysis = "trace")
ntraces <- nrow(r)
r <- r %>% filter(absolute > 0)
a <- nrow(r)
r <- data.frame(c(a, a/ntraces))
r <- as.data.frame(t(r))
colnames(r) <- c("absolute","relative")
row.names(r) <- NULL
r <- tbl_df(r)
return(r)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hic_loess.R
\name{hic_loess}
\alias{hic_loess}
\title{Perform joint loess normalization on two Hi-C datasets}
\usage{
hic_loess(hic.table, degree = 1, span = NA, loess.criterion = "gcv",
Plot = FALSE, parallel = FALSE, BP_param = bpparam(),
check.differences = FALSE, diff.thresh = "auto", iterations = 10000)
}
\arguments{
\item{hic.table}{hic.table or a list of hic.tables generated from
the create.hic.table function.
list of hic.tables generated from the create.hic.table function.
If you want to perform
normalization over multiple chromosomes from each cell line at
once utilizing parallel computing enter a list of hic.tables and
set parallel = TRUE.}
\item{degree}{Degree of polynomial to be used for loess. Options
are 0, 1, 2. The default setting is degree = 1.}
\item{span}{User set span for loess. If set to NA, the span will
be selected automatically using the setting of loess.criterion.
Defaults to NA so that
automatic span selection is performed.}
\item{loess.criterion}{Automatic span selection criterion. Can use either
'gcv' for generalized cross-validation or 'aicc' for Akaike Information
Criterion.
Span selection uses a slightly modified version of the \code{loess.as()}
function from the \code{fANCOVA} package. Defaults to 'gcv'.}
\item{Plot}{Logical, should the MD plot showing before/after loess
normalization be output? Defaults to FALSE.}
\item{parallel}{Logical, set to TRUE to utilize the \code{parallel} package's
parallelized computing. Only works on unix operating systems. Only useful if
entering a list of hic.tables. Defauts to FALSE.}
\item{BP_param}{Parameters for BiocParallel. Defaults to bpparam(), see help
for BiocParallel for more information
\url{http://bioconductor.org/packages/release/bioc/vignettes/BiocParallel/
inst/doc/Introduction_To_BiocParallel.pdf}}
\item{check.differences}{Logical, should difference detection be performed? If TRUE,
the same procedure as hic_diff will be performed. If FALSE,
only normalization will be performed on the entered data. Defaults to FALSE.}
\item{diff.thresh}{Fold change threshold desired to call a detected difference
clinically significant. Set to "auto" by default to indicate that the
difference threshold will be automatically calculated as 2 standard deviations
of all the adjusted M values. For no p-value adjustment
set diff.thresh = NA. To set your own threshold enter a numeric value i.e.
diff.thresh = 1. If set to "auto" or a numeric value, a check will
be made as follows: if permutation p-value < 0.05 AND M < diff.thresh (the log2
fold change for the difference between IF1 and IF2) then
the p-value will be set to 0.5. Defaults to 'auto'.}
\item{iterations}{Number of iterations for the permuation test. Will only be used
if check.differences set to TRUE. Defaults to 10000.}
}
\value{
An updated hic.table is returned with the additional columns of adj.IF1,
adj.IF2 for the respective normalized IFs, an adj.M column for the
adjusted M, and mc for the loess correction factor. If \code{check.differences}
is set to TRUE a column containing the p-values for the
significance of the difference between the two datasets will also be returned.
}
\description{
Perform joint loess normalization on two Hi-C datasets
}
\details{
The function takes in a hic.table or a list of hic.table objects created
with the \code{create.hic.loess} function. If you wish to perform joint
normalization on Hi-C data for multiple chromosomes use a list of hic.tables.
The process can be parallelized using the \code{parallel}
setting. The data is fist transformed into what is termed an MD plot (similar
to the MA plot/Bland-Altman plot). M is the log difference log2(x/y) between
the two datasets. D is the unit distance in the contact matrix. The MD plot can
be visualized with the \code{Plot} option. Loess regression is then
performed on the MD plot to model any biases between the two Hi-C datasets. An
adjusted IF is then calculated for each dataset along with an adjusted M.
See methods section of Stansfield & Dozmorov 2017 for more details. Note:
if you receive the warning "In simpleLoess(y, x, w, span, degree = degree,
parametric = parametric, ... :pseudoinverse used..." it should not effect
your results, however it can be avoided by manually setting the span to
a larger value using the span option.
}
\examples{
# Create hic.table object using included Hi-C data in sparse upper
# triangular matrix format
data("HMEC.chr22")
data("NHEK.chr22")
hic.table <- create.hic.table(HMEC.chr22, NHEK.chr22, chr= 'chr22')
# Plug hic.table into hic_loess()
result <- hic_loess(hic.table, Plot = TRUE)
# View result
result
}
| /man/hic_loess.Rd | permissive | dozmorovlab/HiCdiff | R | false | true | 4,883 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hic_loess.R
\name{hic_loess}
\alias{hic_loess}
\title{Perform joint loess normalization on two Hi-C datasets}
\usage{
hic_loess(hic.table, degree = 1, span = NA, loess.criterion = "gcv",
Plot = FALSE, parallel = FALSE, BP_param = bpparam(),
check.differences = FALSE, diff.thresh = "auto", iterations = 10000)
}
\arguments{
\item{hic.table}{hic.table or a list of hic.tables generated from
the create.hic.table function.
list of hic.tables generated from the create.hic.table function.
If you want to perform
normalization over multiple chromosomes from each cell line at
once utilizing parallel computing enter a list of hic.tables and
set parallel = TRUE.}
\item{degree}{Degree of polynomial to be used for loess. Options
are 0, 1, 2. The default setting is degree = 1.}
\item{span}{User set span for loess. If set to NA, the span will
be selected automatically using the setting of loess.criterion.
Defaults to NA so that
automatic span selection is performed.}
\item{loess.criterion}{Automatic span selection criterion. Can use either
'gcv' for generalized cross-validation or 'aicc' for Akaike Information
Criterion.
Span selection uses a slightly modified version of the \code{loess.as()}
function from the \code{fANCOVA} package. Defaults to 'gcv'.}
\item{Plot}{Logical, should the MD plot showing before/after loess
normalization be output? Defaults to FALSE.}
\item{parallel}{Logical, set to TRUE to utilize the \code{parallel} package's
parallelized computing. Only works on unix operating systems. Only useful if
entering a list of hic.tables. Defauts to FALSE.}
\item{BP_param}{Parameters for BiocParallel. Defaults to bpparam(), see help
for BiocParallel for more information
\url{http://bioconductor.org/packages/release/bioc/vignettes/BiocParallel/
inst/doc/Introduction_To_BiocParallel.pdf}}
\item{check.differences}{Logical, should difference detection be performed? If TRUE,
the same procedure as hic_diff will be performed. If FALSE,
only normalization will be performed on the entered data. Defaults to FALSE.}
\item{diff.thresh}{Fold change threshold desired to call a detected difference
clinically significant. Set to "auto" by default to indicate that the
difference threshold will be automatically calculated as 2 standard deviations
of all the adjusted M values. For no p-value adjustment
set diff.thresh = NA. To set your own threshold enter a numeric value i.e.
diff.thresh = 1. If set to "auto" or a numeric value, a check will
be made as follows: if permutation p-value < 0.05 AND M < diff.thresh (the log2
fold change for the difference between IF1 and IF2) then
the p-value will be set to 0.5. Defaults to 'auto'.}
\item{iterations}{Number of iterations for the permuation test. Will only be used
if check.differences set to TRUE. Defaults to 10000.}
}
\value{
An updated hic.table is returned with the additional columns of adj.IF1,
adj.IF2 for the respective normalized IFs, an adj.M column for the
adjusted M, and mc for the loess correction factor. If \code{check.differences}
is set to TRUE a column containing the p-values for the
significance of the difference between the two datasets will also be returned.
}
\description{
Perform joint loess normalization on two Hi-C datasets
}
\details{
The function takes in a hic.table or a list of hic.table objects created
with the \code{create.hic.loess} function. If you wish to perform joint
normalization on Hi-C data for multiple chromosomes use a list of hic.tables.
The process can be parallelized using the \code{parallel}
setting. The data is fist transformed into what is termed an MD plot (similar
to the MA plot/Bland-Altman plot). M is the log difference log2(x/y) between
the two datasets. D is the unit distance in the contact matrix. The MD plot can
be visualized with the \code{Plot} option. Loess regression is then
performed on the MD plot to model any biases between the two Hi-C datasets. An
adjusted IF is then calculated for each dataset along with an adjusted M.
See methods section of Stansfield & Dozmorov 2017 for more details. Note:
if you receive the warning "In simpleLoess(y, x, w, span, degree = degree,
parametric = parametric, ... :pseudoinverse used..." it should not effect
your results, however it can be avoided by manually setting the span to
a larger value using the span option.
}
\examples{
# Create hic.table object using included Hi-C data in sparse upper
# triangular matrix format
data("HMEC.chr22")
data("NHEK.chr22")
hic.table <- create.hic.table(HMEC.chr22, NHEK.chr22, chr= 'chr22')
# Plug hic.table into hic_loess()
result <- hic_loess(hic.table, Plot = TRUE)
# View result
result
}
|
#' @title df_line45()
#'
#' @description Cette fonction crée un dataframe avec 2 observations pour créer une ligne à 45 degrés dans un ggplot
#' @examples +geom_line(data = df_line45(),aes(x=x, y=y), color = "lightgray")
#' @export
df_line45 <- function(label="label"){
data.frame( x= c(-Inf, Inf), y = c(-Inf, Inf), label = factor(label))
}
| /R/df_line45.R | permissive | SimonCoulombe/pkgsimon | R | false | false | 349 | r | #' @title df_line45()
#'
#' @description Cette fonction crée un dataframe avec 2 observations pour créer une ligne à 45 degrés dans un ggplot
#' @examples +geom_line(data = df_line45(),aes(x=x, y=y), color = "lightgray")
#' @export
df_line45 <- function(label="label"){
data.frame( x= c(-Inf, Inf), y = c(-Inf, Inf), label = factor(label))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{gs8}
\alias{gs8}
\title{Gold Standard 8}
\usage{
gs8
}
\description{
Gold standard segmentation for subject 8.
This data was created by Alessandra Valcarcel and is artificial for example purposes.
}
| /man/gs8.Rd | no_license | avalcarcel9/rtapas | R | false | true | 305 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{gs8}
\alias{gs8}
\title{Gold Standard 8}
\usage{
gs8
}
\description{
Gold standard segmentation for subject 8.
This data was created by Alessandra Valcarcel and is artificial for example purposes.
}
|
# Lecture 9
# Illustration of exercises 5 and 6 of lecture 8
vienna_model_estimate <- lm(price ~ distance, data = my_vienna_data)
# This will give some coefficients, e.g. distance para is 2.35
# Ex 5
# What do these parameters predict for London?
# Ex 6
london_model_estimate <-lm(price ~ distance, data = my_london_data)
# you might get a parameter of 3.7
library(modelr)
library(ggplot2)
library(tidyverse)
sim1
sim1_mod <- lm(y ~ x, data = sim1)
sim1_mod
sim1_mod_no_intercept <- lm(y ~ x - 1, data = sim1)
sim1_mod_no_intercept
coef(sim1_mod)
sim1_mod$coefficients
# Add predictions and residuals, defined in modelr
(df <- sim1 %>%
add_predictions(sim1_mod) %>%
add_residuals(sim1_mod))
# Plot it
ggplot(df, aes(x, y)) +
geom_point() +
geom_point(aes(x, pred), color = "red") +
geom_abline(aes(intercept = coef(sim1_mod)[1],
slope = coef(sim1_mod)[2]),
color = "red")
# They have to have mean 0, because we minimize MSE
ggplot(df, aes(x, resid)) +
geom_point()
# Base R for predict and resid
(df <- df %>%
mutate(
pred2 = predict(sim1_mod),
resid2 = residuals(sim1_mod)
))
# Other linear models and how to specify them
ggplot(sim3, aes(x1, y, color = x2)) +
geom_point()
mod1 <- lm(y ~ x1 + x2, data = sim3)
mod2 <- lm(y ~ x1 * x2, data = sim3)
sim3 <- sim3 %>%
add_predictions(mod1, var = "mod1") %>%
add_predictions(mod2, var = "mod2")
# x1 * x2: Means try a different x1 for every value of x2
ggplot(sim3, aes(x = x1, y = y, color = x2)) +
geom_point() +
geom_line(aes(y = mod2))
ggplot(sim3, aes(x = x1, y = y, color = x2)) +
geom_point() +
geom_line(aes(y = mod1))
mod1
mod2
summary(mod1)
# You can use transforms
dm1 <- lm(log(price) ~ carat, data = diamonds)
summary(dm1)
dm2 <- lm(price ~ carat + carat^2, data = diamonds)
dm3 <- lm(price ~ carat + I(carat^2), data = diamonds)
dm4 <- lm(price ~ carat + carat*carat, data = diamonds)
dm5 <- lm(price ~ carat + I(carat*carat), data = diamonds)
diamonds %>%
add_residuals(dm5) %>%
add_predictions(dm5) %>%
ggplot(aes(carat, price)) +
geom_point() +
geom_point(aes(x = carat, y = pred), color = 'red')
# Whenever there are complicated transformations, just do it
# in the dataframe (i.e. before doing lm(...))
# Functions, iterations, conditional and all that
square_func <- function(x) {
x^2
}
square_func(4)
square_func(12)
cube_func <- function(my_silly_name) {
return(my_silly_name^3)
}
cube_func(4)
my_sqrt <- function(x) {
if (x < 0) {
"not allowed"
} else {
return(sqrt(x))
}
}
my_sqrt(4)
my_sqrt(-4)
# Based on 'R skills' by Gergo Daroczi, lecture 1 and 2
# Simulate brownian motions (BM)
# BM: you go forward or backward 1 step every period
# With equal probability
# How far are you after N steps (can be negative)
# Important in physics, in finance, in lots of things
# Get 1 step +1 or -1 with equal probability
sign(runif(1, min = -0.5, max = 0.5))
# We want to get multiple steps
# Set the seed for pseudo-random number generation to make
# debugging (and testing) easier, because non-random
set.seed(42)
x <- 0
step1 <- sign(runif(1) - 0.5)
x <- x + step1
step2 <- sign(runif(1) - 0.5)
x <- x + step2
next_step <- function() {
sign(runif(1) - 0.5)
}
(x <- x + next_step())
(x <- x + next_step())
# loop approach
set.seed(42)
x <- 0
for (i in 1:25) {
print(i)
print(x)
x <- x + next_step()
}
x
## vectorized approach
set.seed(42)
(all_steps <- sign(runif(25) - 0.5))
(x <- sum(all_steps))
# What if we want intermediate steps:
cumsum(all_steps)
ggplot(mapping = aes(x = 1:25, y = cumsum(all_steps))) +
geom_point() +
geom_line()
# What if we want BMs of different lengths:
plot_BM <- function(N) {
# TODO: Refactor
all_steps <- sign(runif(N) - 0.5)
all_xs <- cumsum(all_steps)
df <- tibble(time = 1:N, position = all_xs)
ggplot(df, aes(x = time, y = position)) +
# geom_point() +
geom_line()
}
plot_BM(25)
plot_BM(100)
plot_BM(5000)
# What if we want to use the data of the BMs plotted?
# Bad design of function
# A function should (ideally) do one thing
# Assignment
# Optional Exercise 0 for non-programmers (if you get stuck):
# If you wonder how to write functions, read chpater 19 in R4DS.
#
# Important note: It is very hard to write functions well, since at the deep level it is about
# how to organize your code, which is about understanding what it is you want to do
# and how to express this succinctly, clearly, and correctly. Defining a single simple function
# is very easy - such as defining a function that computes the square. But knowing
# which functions to write, how to make them play nicely with each other, how to not
# repeat code, etc etc is hard. I say this so you realize that it is normal to be confused,
# and to remain confused. I certainly am confused by some of the higher-level functions,
# by modules/libraries/packages (which are a kind of mega-function), by macros (another
# type of mega-function, but in a different direction), etc etc. So be patient with yourself,
# try to take it one small step at a time and to get the job done, without expecting to
# understand everything.
#
# Optional Exercise 0 (no need to report on it, but I recommend it for educational purposes):
# Read https://r4ds.had.co.nz/iteration.html#the-map-functions, section 21.5 on map functions,
# especially if you come from imperative or object-oriented languages. If you know how to use
# map functions, the pipe and functional style starts to become substantially more powerful,
# while if you still think in OO ways, you will constantly fight the way the tidyverse works.
# This is not to say that this type of functional programming is better, but that it is the
# way the tidyverse is organized, and that it has a lot going for it. If after you grok maps
# you still don't like it, that's fine. At least you know what you don't like.
# Exercise 1: Map each coefficient from mod1 and mod2 to a feature of the plot
# with two facets. For instance, what is x1 in summaryd(mod2)? Where could you
# read it off (roughly) from the graph? Etc for x1:x2b and so on. If you get
# stuck, do ask for specific questions on Discourse. Correct answers for any
# parameter look like this:
# x1 is the [slope/intercept/difference between slopes/intercepts of] for ...
# Since it is [positive/negative] this means that ... is [larger/smaller] than ...
# Exercise 2: Do the faceting with gather_predictions and if needed with data_grid.
# Look at chapter 23 for help.
# Exercise 3: Read/Skim 21.2, 21.3, and 21.4 so you are aware of some issues.
# Pick a short example from the notes that you feel you want to understand better
# and use some other use case to illustrate it (using the Vienna data, or
# diamonds, or the same but in a different way.)
| /lecture_9/lecture9-class.R | no_license | kovacskokokornel/Rcoding_CEU | R | false | false | 6,836 | r | # Lecture 9
# Illustration of exercises 5 and 6 of lecture 8
vienna_model_estimate <- lm(price ~ distance, data = my_vienna_data)
# This will give some coefficients, e.g. distance para is 2.35
# Ex 5
# What do these parameters predict for London?
# Ex 6
london_model_estimate <-lm(price ~ distance, data = my_london_data)
# you might get a parameter of 3.7
library(modelr)
library(ggplot2)
library(tidyverse)
sim1
sim1_mod <- lm(y ~ x, data = sim1)
sim1_mod
sim1_mod_no_intercept <- lm(y ~ x - 1, data = sim1)
sim1_mod_no_intercept
coef(sim1_mod)
sim1_mod$coefficients
# Add predictions and residuals, defined in modelr
(df <- sim1 %>%
add_predictions(sim1_mod) %>%
add_residuals(sim1_mod))
# Plot it
ggplot(df, aes(x, y)) +
geom_point() +
geom_point(aes(x, pred), color = "red") +
geom_abline(aes(intercept = coef(sim1_mod)[1],
slope = coef(sim1_mod)[2]),
color = "red")
# They have to have mean 0, because we minimize MSE
ggplot(df, aes(x, resid)) +
geom_point()
# Base R for predict and resid
(df <- df %>%
mutate(
pred2 = predict(sim1_mod),
resid2 = residuals(sim1_mod)
))
# Other linear models and how to specify them
ggplot(sim3, aes(x1, y, color = x2)) +
geom_point()
mod1 <- lm(y ~ x1 + x2, data = sim3)
mod2 <- lm(y ~ x1 * x2, data = sim3)
sim3 <- sim3 %>%
add_predictions(mod1, var = "mod1") %>%
add_predictions(mod2, var = "mod2")
# x1 * x2: Means try a different x1 for every value of x2
ggplot(sim3, aes(x = x1, y = y, color = x2)) +
geom_point() +
geom_line(aes(y = mod2))
ggplot(sim3, aes(x = x1, y = y, color = x2)) +
geom_point() +
geom_line(aes(y = mod1))
mod1
mod2
summary(mod1)
# You can use transforms
dm1 <- lm(log(price) ~ carat, data = diamonds)
summary(dm1)
dm2 <- lm(price ~ carat + carat^2, data = diamonds)
dm3 <- lm(price ~ carat + I(carat^2), data = diamonds)
dm4 <- lm(price ~ carat + carat*carat, data = diamonds)
dm5 <- lm(price ~ carat + I(carat*carat), data = diamonds)
diamonds %>%
add_residuals(dm5) %>%
add_predictions(dm5) %>%
ggplot(aes(carat, price)) +
geom_point() +
geom_point(aes(x = carat, y = pred), color = 'red')
# Whenever there are complicated transformations, just do it
# in the dataframe (i.e. before doing lm(...))
# Functions, iterations, conditional and all that
square_func <- function(x) {
x^2
}
square_func(4)
square_func(12)
cube_func <- function(my_silly_name) {
return(my_silly_name^3)
}
cube_func(4)
my_sqrt <- function(x) {
if (x < 0) {
"not allowed"
} else {
return(sqrt(x))
}
}
my_sqrt(4)
my_sqrt(-4)
# Based on 'R skills' by Gergo Daroczi, lecture 1 and 2
# Simulate brownian motions (BM)
# BM: you go forward or backward 1 step every period
# With equal probability
# How far are you after N steps (can be negative)
# Important in physics, in finance, in lots of things
# Get 1 step +1 or -1 with equal probability
sign(runif(1, min = -0.5, max = 0.5))
# We want to get multiple steps
# Set the seed for pseudo-random number generation to make
# debugging (and testing) easier, because non-random
set.seed(42)
x <- 0
step1 <- sign(runif(1) - 0.5)
x <- x + step1
step2 <- sign(runif(1) - 0.5)
x <- x + step2
next_step <- function() {
sign(runif(1) - 0.5)
}
(x <- x + next_step())
(x <- x + next_step())
# loop approach
set.seed(42)
x <- 0
for (i in 1:25) {
print(i)
print(x)
x <- x + next_step()
}
x
## vectorized approach
set.seed(42)
(all_steps <- sign(runif(25) - 0.5))
(x <- sum(all_steps))
# What if we want intermediate steps:
cumsum(all_steps)
ggplot(mapping = aes(x = 1:25, y = cumsum(all_steps))) +
geom_point() +
geom_line()
# What if we want BMs of different lengths:
plot_BM <- function(N) {
# TODO: Refactor
all_steps <- sign(runif(N) - 0.5)
all_xs <- cumsum(all_steps)
df <- tibble(time = 1:N, position = all_xs)
ggplot(df, aes(x = time, y = position)) +
# geom_point() +
geom_line()
}
plot_BM(25)
plot_BM(100)
plot_BM(5000)
# What if we want to use the data of the BMs plotted?
# Bad design of function
# A function should (ideally) do one thing
# Assignment
# Optional Exercise 0 for non-programmers (if you get stuck):
# If you wonder how to write functions, read chpater 19 in R4DS.
#
# Important note: It is very hard to write functions well, since at the deep level it is about
# how to organize your code, which is about understanding what it is you want to do
# and how to express this succinctly, clearly, and correctly. Defining a single simple function
# is very easy - such as defining a function that computes the square. But knowing
# which functions to write, how to make them play nicely with each other, how to not
# repeat code, etc etc is hard. I say this so you realize that it is normal to be confused,
# and to remain confused. I certainly am confused by some of the higher-level functions,
# by modules/libraries/packages (which are a kind of mega-function), by macros (another
# type of mega-function, but in a different direction), etc etc. So be patient with yourself,
# try to take it one small step at a time and to get the job done, without expecting to
# understand everything.
#
# Optional Exercise 0 (no need to report on it, but I recommend it for educational purposes):
# Read https://r4ds.had.co.nz/iteration.html#the-map-functions, section 21.5 on map functions,
# especially if you come from imperative or object-oriented languages. If you know how to use
# map functions, the pipe and functional style starts to become substantially more powerful,
# while if you still think in OO ways, you will constantly fight the way the tidyverse works.
# This is not to say that this type of functional programming is better, but that it is the
# way the tidyverse is organized, and that it has a lot going for it. If after you grok maps
# you still don't like it, that's fine. At least you know what you don't like.
# Exercise 1: Map each coefficient from mod1 and mod2 to a feature of the plot
# with two facets. For instance, what is x1 in summaryd(mod2)? Where could you
# read it off (roughly) from the graph? Etc for x1:x2b and so on. If you get
# stuck, do ask for specific questions on Discourse. Correct answers for any
# parameter look like this:
# x1 is the [slope/intercept/difference between slopes/intercepts of] for ...
# Since it is [positive/negative] this means that ... is [larger/smaller] than ...
# Exercise 2: Do the faceting with gather_predictions and if needed with data_grid.
# Look at chapter 23 for help.
# Exercise 3: Read/Skim 21.2, 21.3, and 21.4 so you are aware of some issues.
# Pick a short example from the notes that you feel you want to understand better
# and use some other use case to illustrate it (using the Vienna data, or
# diamonds, or the same but in a different way.)
|
# Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
# Load FYC file
FYC <- read_sas('C:/MEPS/.FYC..sas7bdat');
year <- .year.
FYC <- FYC %>%
mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>%
mutate(AGELAST = coalesce(AGE.yy.X, AGE42X, AGE31X))
FYC$ind = 1
# Diabetes care: Eye exam
FYC <- FYC %>%
mutate(past_year = (DSEY.yy.53==1 | DSEY.ya.53==1),
more_year = (DSEY.yb.53==1 | DSEB.yb.53==1),
never_chk = (DSEYNV53 == 1),
non_resp = (DSEY.yy.53 %in% c(-7,-8,-9))
)
FYC <- FYC %>%
mutate(
diab_eye = as.factor(case_when(
.$past_year ~ "In the past year",
.$more_year ~ "More than 1 year ago",
.$never_chk ~ "Never had eye exam",
.$non_resp ~ "Don\'t know/Non-response",
TRUE ~ "Missing")))
# Education
if(year <= 1998){
FYC <- FYC %>% mutate(EDUCYR = EDUCYR.yy.)
}else if(year <= 2004){
FYC <- FYC %>% mutate(EDUCYR = EDUCYEAR)
}
if(year >= 2012 & year < 2016){
FYC <- FYC %>%
mutate(
less_than_hs = (0 <= EDRECODE & EDRECODE < 13),
high_school = (EDRECODE == 13),
some_college = (EDRECODE > 13))
}else{
FYC <- FYC %>%
mutate(
less_than_hs = (0 <= EDUCYR & EDUCYR < 12),
high_school = (EDUCYR == 12),
some_college = (EDUCYR > 12))
}
FYC <- FYC %>% mutate(
education = 1*less_than_hs + 2*high_school + 3*some_college,
education = replace(education, AGELAST < 18, 9),
education = recode_factor(education, .default = "Missing", .missing = "Missing",
"1" = "Less than high school",
"2" = "High school",
"3" = "Some college",
"9" = "Inapplicable (age < 18)",
"0" = "Missing"))
DIABdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~DIABW.yy.F,
data = FYC,
nest = TRUE)
results <- svyby(~diab_eye, FUN = svytotal, by = ~education, design = DIABdsgn)
print(results)
| /mepstrends/hc_care/json/code/r/totPOP__education__diab_eye__.r | permissive | HHS-AHRQ/MEPS-summary-tables | R | false | false | 2,235 | r | # Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
# Load FYC file
FYC <- read_sas('C:/MEPS/.FYC..sas7bdat');
year <- .year.
FYC <- FYC %>%
mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>%
mutate(AGELAST = coalesce(AGE.yy.X, AGE42X, AGE31X))
FYC$ind = 1
# Diabetes care: Eye exam
FYC <- FYC %>%
mutate(past_year = (DSEY.yy.53==1 | DSEY.ya.53==1),
more_year = (DSEY.yb.53==1 | DSEB.yb.53==1),
never_chk = (DSEYNV53 == 1),
non_resp = (DSEY.yy.53 %in% c(-7,-8,-9))
)
FYC <- FYC %>%
mutate(
diab_eye = as.factor(case_when(
.$past_year ~ "In the past year",
.$more_year ~ "More than 1 year ago",
.$never_chk ~ "Never had eye exam",
.$non_resp ~ "Don\'t know/Non-response",
TRUE ~ "Missing")))
# Education
if(year <= 1998){
FYC <- FYC %>% mutate(EDUCYR = EDUCYR.yy.)
}else if(year <= 2004){
FYC <- FYC %>% mutate(EDUCYR = EDUCYEAR)
}
if(year >= 2012 & year < 2016){
FYC <- FYC %>%
mutate(
less_than_hs = (0 <= EDRECODE & EDRECODE < 13),
high_school = (EDRECODE == 13),
some_college = (EDRECODE > 13))
}else{
FYC <- FYC %>%
mutate(
less_than_hs = (0 <= EDUCYR & EDUCYR < 12),
high_school = (EDUCYR == 12),
some_college = (EDUCYR > 12))
}
FYC <- FYC %>% mutate(
education = 1*less_than_hs + 2*high_school + 3*some_college,
education = replace(education, AGELAST < 18, 9),
education = recode_factor(education, .default = "Missing", .missing = "Missing",
"1" = "Less than high school",
"2" = "High school",
"3" = "Some college",
"9" = "Inapplicable (age < 18)",
"0" = "Missing"))
DIABdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~DIABW.yy.F,
data = FYC,
nest = TRUE)
results <- svyby(~diab_eye, FUN = svytotal, by = ~education, design = DIABdsgn)
print(results)
|
make_percentage <- function(numerator, denominator, digits = 1){
round(numerator / denominator * 100, digits)
} | /R/make_percentage.R | no_license | sbha/handier | R | false | false | 113 | r | make_percentage <- function(numerator, denominator, digits = 1){
round(numerator / denominator * 100, digits)
} |
#' @include DBDriver.R
NULL
#' An S4 class to represent a SQL Server connection
#'
#' This class extends the \code{\link[RJDBC:JDBCConnection-class]{JDBCConnection}}
#' class to represent a SQL Server connection.
#'
#' @slot jc Java object representing the connection.
#' @slot identifier.quote quote character for a SQL Server identifier can be a
#' single quotation mark (\code{\'}), a left or right bracket (\code{[]}), or a
#' double quotation mark (\code{\"}). Usually inherited from
#' \code{\linkS4class{SQLServerDriver}}.
#' @export
setClass("SQLServerConnection", contains = 'JDBCConnection')
#' Connect to/disconnect from a SQL Server database.
#'
#' @param drv An objected generated by \code{\link{SQLServer}}, or an existing
#' \code{\linkS4class{SQLServerConnection}}. If a connection, the connection
#' will be cloned.
#' @param server The address of the server to connect to.
#' @param ... One or more \href{http://jtds.sourceforge.net/faq.html}{optional connection properties.}.
#' Note if you intend to set the \code{useNTLMv2} property to \code{'true'}
#' from the default API value of \code{'false'}, you will need to make a specific
#' authentication driver available to the SQL Server driver. See
#' \code{\link{RSQLServer}} for more details
#' @return a \code{linkS4Class{SQLServerConnection}} object
#' @examples
#' \dontrun{
#' dbConnect(SQLServer(), 'ServerName')
#' }
#' @export
setMethod(f = 'dbConnect', signature = "SQLServerDriver",
definition = function (drv, server, ...)
{
url <- build_url(server, ...)
properties <- .jnew('java/util/Properties')
jc <- .jcall(drv@jdrv, "Ljava/sql/Connection;", "connect", url,
properties)
new("SQLServerConnection", jc = jc, identifier.quote = drv@identifier.quote)
}
)
#' Send query to SQL Server
#'
#' This is basically a copy of RJDBC's \code{\link[RJDBC:JDBCConnection-methods]{dbSendQuery}}
#' method for JDBCConnection.
#'
#' @param conn connection object
#' @param statement SQL statement to execute
#' @param ... additional arguments to prepared statement substituted for "?"
#' @param list undocumented
#' @return a \code{\linkS4class{SQLServerResult}} object
#' @export
setMethod("dbSendQuery",
signature(conn = "SQLServerConnection", statement = "character"),
def = function (conn, statement, ..., list=NULL)
{
statement <- as.character(statement)[1L]
if (isTRUE(as.logical(grepl("^\\{(call|\\?= *call)", statement))))
{
s <- .jcall(conn@jc, "Ljava/sql/CallableStatement;", "prepareCall",
statement, check=FALSE)
.verify.JDBC.result(s, "Unable to execute JDBC callable statement ",
statement)
if (length(list(...)))
.fillStatementParameters(s, list(...))
if (!is.null(list))
.fillStatementParameters(s, list)
r <- .jcall(s, "Ljava/sql/ResultSet;", "executeQuery", check=FALSE)
.verify.JDBC.result(r, "Unable to retrieve JDBC result set for ",
statement)
} else if (length(list(...)) || length(list))
{
s <- .jcall(conn@jc, "Ljava/sql/PreparedStatement;", "prepareStatement",
statement, check=FALSE)
.verify.JDBC.result(s, "Unable to execute JDBC prepared statement ",
statement)
if (length(list(...)))
.fillStatementParameters(s, list(...))
if (!is.null(list))
.fillStatementParameters(s, list)
r <- .jcall(s, "Ljava/sql/ResultSet;", "executeQuery", check=FALSE)
.verify.JDBC.result(r, "Unable to retrieve JDBC result set for ",
statement)
} else
{
s <- .jcall(conn@jc, "Ljava/sql/Statement;", "createStatement")
.verify.JDBC.result(s, "Unable to create simple JDBC statement ",
statement)
r <- .jcall(s, "Ljava/sql/ResultSet;", "executeQuery",
as.character(statement)[1], check=FALSE)
.verify.JDBC.result(r, "Unable to retrieve JDBC result set for ",
statement)
}
md <- .jcall(r, "Ljava/sql/ResultSetMetaData;", "getMetaData", check=FALSE)
.verify.JDBC.result(md, "Unable to retrieve JDBC result set meta data for ",
statement, " in dbSendQuery")
new("SQLServerResult", jr=r, md=md, stat=s, pull=.jnull())
}
)
#' Get connection info
#'
#' @param dbObj Object of type \code{\linkS4class{SQLServerConnection}} representing a
#' connection
#' @param ... other arguments to methods. Not used here.
#' @return a named list containing database product name, database version,
#' user, and whether the connection is read only.
#' @examples
#' \dontrun{
#' dbGetInfo(dbConnect(SQLServer(), 'DatabaseName'))
#' }
#' @export
setMethod(f = 'dbGetInfo', signature = 'SQLServerConnection',
definition = function (dbObj, ...)
{
meta <- dbObj@jc$getMetaData()
list(dbname = meta$getDatabaseProductName(),
db.version = meta$getDatabaseMajorVersion(),
user = meta$getUserName(),
is.read.only = meta$isReadOnly())
}
)
#' Checks whether Connection is closed
#'
#' @param dbObj An object inheriting from \code{\linkS4class{SQLServerConnection}}.
#' @param ... other parameters. Not used.
#' @return logical \code{TRUE} if the connection is closed and vice-versa
#' @export
setMethod(f = 'dbIsValid', signature = 'SQLServerConnection',
definition = function (dbObj, ...)
{
dbObj@jc$isClosed()
}
)
# dbDisconnect: Inherits from JDBCConnection
# dbGetQuery: Inherits from JDBCConnection
# dbGetException: Inherits from JDBCConnection
# dbListResults: Inherits from JDBCConnection
# dbListTables: Inherits from JDBCConnection
# dbReadTable: Inherits from JDBCConnection
# dbWriteTable: Inherits from JDBCConnection
# dbExistsTable: Inherits from JDBCConnection
# dbRemoveTable: Inherits from JDBCConnection
# dbListFields: Inherits from JDBCConnection
# dbCommit: Inherits from JDBCConnection
# dbRollback: Inherits from JDBCConnection
# dbCallProc: Not yet implemented
# Copied from RJDBC:
# https://github.com/s-u/RJDBC/blob/01c55dfe76e039a37ccda732d7332325222da8c8/R/class.R
.verify.JDBC.result <- function (result, ...) {
if (is.jnull(result)) {
x <- .jgetEx(TRUE)
if (is.jnull(x))
stop(...)
else
stop(...," (",.jcall(x, "S", "getMessage"),")")
}
}
.fillStatementParameters <- function(s, l) {
for (i in 1:length(l)) {
v <- l[[i]]
if (is.na(v)) { # map NAs to NULLs (courtesy of Axel Klenk)
sqlType <- if (is.integer(v)) 4 else if (is.numeric(v)) 8 else 12
.jcall(s, "V", "setNull", i, as.integer(sqlType))
} else if (is.integer(v))
.jcall(s, "V", "setInt", i, v[1])
else if (is.numeric(v))
.jcall(s, "V", "setDouble", i, as.double(v)[1])
else
.jcall(s, "V", "setString", i, as.character(v)[1])
}
}
| /R/DBConnection.R | no_license | mnel/RSQLServer | R | false | false | 6,679 | r | #' @include DBDriver.R
NULL
#' An S4 class to represent a SQL Server connection
#'
#' This class extends the \code{\link[RJDBC:JDBCConnection-class]{JDBCConnection}}
#' class to represent a SQL Server connection.
#'
#' @slot jc Java object representing the connection.
#' @slot identifier.quote quote character for a SQL Server identifier can be a
#' single quotation mark (\code{\'}), a left or right bracket (\code{[]}), or a
#' double quotation mark (\code{\"}). Usually inherited from
#' \code{\linkS4class{SQLServerDriver}}.
#' @export
setClass("SQLServerConnection", contains = 'JDBCConnection')
#' Connect to/disconnect from a SQL Server database.
#'
#' @param drv An objected generated by \code{\link{SQLServer}}, or an existing
#' \code{\linkS4class{SQLServerConnection}}. If a connection, the connection
#' will be cloned.
#' @param server The address of the server to connect to.
#' @param ... One or more \href{http://jtds.sourceforge.net/faq.html}{optional connection properties.}.
#' Note if you intend to set the \code{useNTLMv2} property to \code{'true'}
#' from the default API value of \code{'false'}, you will need to make a specific
#' authentication driver available to the SQL Server driver. See
#' \code{\link{RSQLServer}} for more details
#' @return a \code{linkS4Class{SQLServerConnection}} object
#' @examples
#' \dontrun{
#' dbConnect(SQLServer(), 'ServerName')
#' }
#' @export
setMethod(f = 'dbConnect', signature = "SQLServerDriver",
definition = function (drv, server, ...)
{
url <- build_url(server, ...)
properties <- .jnew('java/util/Properties')
jc <- .jcall(drv@jdrv, "Ljava/sql/Connection;", "connect", url,
properties)
new("SQLServerConnection", jc = jc, identifier.quote = drv@identifier.quote)
}
)
#' Send query to SQL Server
#'
#' This is basically a copy of RJDBC's \code{\link[RJDBC:JDBCConnection-methods]{dbSendQuery}}
#' method for JDBCConnection.
#'
#' @param conn connection object
#' @param statement SQL statement to execute
#' @param ... additional arguments to prepared statement substituted for "?"
#' @param list undocumented
#' @return a \code{\linkS4class{SQLServerResult}} object
#' @export
setMethod("dbSendQuery",
signature(conn = "SQLServerConnection", statement = "character"),
def = function (conn, statement, ..., list=NULL)
{
statement <- as.character(statement)[1L]
if (isTRUE(as.logical(grepl("^\\{(call|\\?= *call)", statement))))
{
s <- .jcall(conn@jc, "Ljava/sql/CallableStatement;", "prepareCall",
statement, check=FALSE)
.verify.JDBC.result(s, "Unable to execute JDBC callable statement ",
statement)
if (length(list(...)))
.fillStatementParameters(s, list(...))
if (!is.null(list))
.fillStatementParameters(s, list)
r <- .jcall(s, "Ljava/sql/ResultSet;", "executeQuery", check=FALSE)
.verify.JDBC.result(r, "Unable to retrieve JDBC result set for ",
statement)
} else if (length(list(...)) || length(list))
{
s <- .jcall(conn@jc, "Ljava/sql/PreparedStatement;", "prepareStatement",
statement, check=FALSE)
.verify.JDBC.result(s, "Unable to execute JDBC prepared statement ",
statement)
if (length(list(...)))
.fillStatementParameters(s, list(...))
if (!is.null(list))
.fillStatementParameters(s, list)
r <- .jcall(s, "Ljava/sql/ResultSet;", "executeQuery", check=FALSE)
.verify.JDBC.result(r, "Unable to retrieve JDBC result set for ",
statement)
} else
{
s <- .jcall(conn@jc, "Ljava/sql/Statement;", "createStatement")
.verify.JDBC.result(s, "Unable to create simple JDBC statement ",
statement)
r <- .jcall(s, "Ljava/sql/ResultSet;", "executeQuery",
as.character(statement)[1], check=FALSE)
.verify.JDBC.result(r, "Unable to retrieve JDBC result set for ",
statement)
}
md <- .jcall(r, "Ljava/sql/ResultSetMetaData;", "getMetaData", check=FALSE)
.verify.JDBC.result(md, "Unable to retrieve JDBC result set meta data for ",
statement, " in dbSendQuery")
new("SQLServerResult", jr=r, md=md, stat=s, pull=.jnull())
}
)
#' Get connection info
#'
#' @param dbObj Object of type \code{\linkS4class{SQLServerConnection}} representing a
#' connection
#' @param ... other arguments to methods. Not used here.
#' @return a named list containing database product name, database version,
#' user, and whether the connection is read only.
#' @examples
#' \dontrun{
#' dbGetInfo(dbConnect(SQLServer(), 'DatabaseName'))
#' }
#' @export
setMethod(f = 'dbGetInfo', signature = 'SQLServerConnection',
definition = function (dbObj, ...)
{
meta <- dbObj@jc$getMetaData()
list(dbname = meta$getDatabaseProductName(),
db.version = meta$getDatabaseMajorVersion(),
user = meta$getUserName(),
is.read.only = meta$isReadOnly())
}
)
#' Checks whether Connection is closed
#'
#' @param dbObj An object inheriting from \code{\linkS4class{SQLServerConnection}}.
#' @param ... other parameters. Not used.
#' @return logical \code{TRUE} if the connection is closed and vice-versa
#' @export
setMethod(f = 'dbIsValid', signature = 'SQLServerConnection',
definition = function (dbObj, ...)
{
dbObj@jc$isClosed()
}
)
# dbDisconnect: Inherits from JDBCConnection
# dbGetQuery: Inherits from JDBCConnection
# dbGetException: Inherits from JDBCConnection
# dbListResults: Inherits from JDBCConnection
# dbListTables: Inherits from JDBCConnection
# dbReadTable: Inherits from JDBCConnection
# dbWriteTable: Inherits from JDBCConnection
# dbExistsTable: Inherits from JDBCConnection
# dbRemoveTable: Inherits from JDBCConnection
# dbListFields: Inherits from JDBCConnection
# dbCommit: Inherits from JDBCConnection
# dbRollback: Inherits from JDBCConnection
# dbCallProc: Not yet implemented
# Copied from RJDBC:
# https://github.com/s-u/RJDBC/blob/01c55dfe76e039a37ccda732d7332325222da8c8/R/class.R
.verify.JDBC.result <- function (result, ...) {
if (is.jnull(result)) {
x <- .jgetEx(TRUE)
if (is.jnull(x))
stop(...)
else
stop(...," (",.jcall(x, "S", "getMessage"),")")
}
}
.fillStatementParameters <- function(s, l) {
for (i in 1:length(l)) {
v <- l[[i]]
if (is.na(v)) { # map NAs to NULLs (courtesy of Axel Klenk)
sqlType <- if (is.integer(v)) 4 else if (is.numeric(v)) 8 else 12
.jcall(s, "V", "setNull", i, as.integer(sqlType))
} else if (is.integer(v))
.jcall(s, "V", "setInt", i, v[1])
else if (is.numeric(v))
.jcall(s, "V", "setDouble", i, as.double(v)[1])
else
.jcall(s, "V", "setString", i, as.character(v)[1])
}
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/utils.R
\name{left_fill}
\alias{left_fill}
\title{Fills values from left}
\usage{
left_fill(vec, str, by = NULL)
}
\description{
Fills values from left
}
| /man/left_fill.Rd | no_license | crubba/crmisc | R | false | false | 241 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/utils.R
\name{left_fill}
\alias{left_fill}
\title{Fills values from left}
\usage{
left_fill(vec, str, by = NULL)
}
\description{
Fills values from left
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ScoreTestSupport.r
\name{ScoreTestSupport}
\alias{ScoreTestSupport}
\title{Title}
\usage{
ScoreTestSupport(
y,
baselineonly = NULL,
additive = NULL,
pairwise.interaction = NULL,
saturated = NULL,
missingTumorIndicator = NULL
)
}
\arguments{
\item{missingTumorIndicator}{}
}
\description{
Title
}
| /man/ScoreTestSupport.Rd | no_license | andrewhaoyu/TOP | R | false | true | 386 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ScoreTestSupport.r
\name{ScoreTestSupport}
\alias{ScoreTestSupport}
\title{Title}
\usage{
ScoreTestSupport(
y,
baselineonly = NULL,
additive = NULL,
pairwise.interaction = NULL,
saturated = NULL,
missingTumorIndicator = NULL
)
}
\arguments{
\item{missingTumorIndicator}{}
}
\description{
Title
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helper.R
\name{h_deparsed_sys_call}
\alias{h_deparsed_sys_call}
\title{Simply converts a call into a character}
\usage{
h_deparsed_sys_call(which)
}
\arguments{
\item{which}{see \link{sys.call}. However the function bounds it by
the number of encolsing environments.}
}
\value{
the call of the corresponding environment as character
}
\description{
Simply converts a call into a character
}
| /man/h_deparsed_sys_call.Rd | no_license | cran/sanityTracker | R | false | true | 469 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helper.R
\name{h_deparsed_sys_call}
\alias{h_deparsed_sys_call}
\title{Simply converts a call into a character}
\usage{
h_deparsed_sys_call(which)
}
\arguments{
\item{which}{see \link{sys.call}. However the function bounds it by
the number of encolsing environments.}
}
\value{
the call of the corresponding environment as character
}
\description{
Simply converts a call into a character
}
|
#calculate the level of design parameta from operation environment (via QFD)
#oe ()
#qfd as matrix
calculateRequiredDesign <- function(oeData, qfd){
numberOfDP <- ncol(qfd);
requiredDesign <- matrix(data=0, ncol = numberOfDP);
diffFuncs <- as.matrix(oeData) - 1; #the difference between initial OE and current OE
diffFuncs <- diffFuncs[,rep(1,numberOfDP)]; #resize for caluating with qfd
browser();
#required DP Levels for each functions
requiredDP <- diffFuncs / qfd;
requiredDP <- requiredDP + 1;
requiredDP[is.nan(requiredDP) | is.infinite(requiredDP)] <- 0;
#calculate required DP levels to maximize the achived function.
for(i in 1:numberOfDP){
requiredDesign[,i] <- calcuateOptimalDPvalue(qfd[,i], requiredDP[,i]);
}
return(requiredDesign);
}
calcuateOptimalDPvalue <- function(qfd, requiredDPvalue){
result <- NULL;
dpValues <- NULL;
achievedFuncList <- NULL;
#narrow down to only related function to the DP
requiredDPvalue <- as.matrix(requiredDPvalue[qfd != 0]);
qfd <- as.matrix(qfd[qfd != 0]);
#enumerate the required values from max to min pby 0.01
candidateDPvalues <- seq(min(requiredDPvalue), max(requiredDPvalue), 0.01);
#the list of achived functions by each candidates of DP level
for(i in candidateDPvalues){
aCandidateDP <- i;
diffDP <- aCandidateDP - 1;
achievedFunc <- (diffDP * qfd) + 1;
achievedFuncList <- c(achievedFuncList, achievedFunc);
}
achievedFuncList <- matrix(achievedFuncList, nrow=length(qfd));
#find optimal value ????
##difference between requiend and achieved
result <- (requiredDPvalue[,rep(1,length(candidateDPvalues))]) - achievedFuncList;
##neutralize the suplus achived function
result[result>0] <- 0;
##find the minized
result <- colSums(result);
optimalDPValue <- candidateDPvalues[which.min(result)];
return(optimalDPValue);
}
#calculate the level of function from configuration design (via QFD)
#design as ()
#qfd as matrix
calculateAchievedFunction <- function(configurationDesign, qfd){
numberOfFunctions <- nrow(qfd);
achievedFunction <- matrix(data=0, nrow = numberOfFunctions);
configurationDesign <- (configurationDesign - 1); #the difference from standard design
configurationDesign <- configurationDesign[rep(1, numberOfFunctions), ];
temp <- qfd;
temp[which(temp!=0)] <- 1
achievedFunction <- configurationDesign * qfd;
achievedFunction <- achievedFunction + temp; #restore the actual level of functions
achievedFunction <- apply(achievedFunction, 1, function(x) geoMean(x[x!=0]));
return(achievedFunction);
}
| /qfdCalculation.R | no_license | calculusGura/paper2 | R | false | false | 2,649 | r | #calculate the level of design parameta from operation environment (via QFD)
#oe ()
#qfd as matrix
calculateRequiredDesign <- function(oeData, qfd){
numberOfDP <- ncol(qfd);
requiredDesign <- matrix(data=0, ncol = numberOfDP);
diffFuncs <- as.matrix(oeData) - 1; #the difference between initial OE and current OE
diffFuncs <- diffFuncs[,rep(1,numberOfDP)]; #resize for caluating with qfd
browser();
#required DP Levels for each functions
requiredDP <- diffFuncs / qfd;
requiredDP <- requiredDP + 1;
requiredDP[is.nan(requiredDP) | is.infinite(requiredDP)] <- 0;
#calculate required DP levels to maximize the achived function.
for(i in 1:numberOfDP){
requiredDesign[,i] <- calcuateOptimalDPvalue(qfd[,i], requiredDP[,i]);
}
return(requiredDesign);
}
calcuateOptimalDPvalue <- function(qfd, requiredDPvalue){
result <- NULL;
dpValues <- NULL;
achievedFuncList <- NULL;
#narrow down to only related function to the DP
requiredDPvalue <- as.matrix(requiredDPvalue[qfd != 0]);
qfd <- as.matrix(qfd[qfd != 0]);
#enumerate the required values from max to min pby 0.01
candidateDPvalues <- seq(min(requiredDPvalue), max(requiredDPvalue), 0.01);
#the list of achived functions by each candidates of DP level
for(i in candidateDPvalues){
aCandidateDP <- i;
diffDP <- aCandidateDP - 1;
achievedFunc <- (diffDP * qfd) + 1;
achievedFuncList <- c(achievedFuncList, achievedFunc);
}
achievedFuncList <- matrix(achievedFuncList, nrow=length(qfd));
#find optimal value ????
##difference between requiend and achieved
result <- (requiredDPvalue[,rep(1,length(candidateDPvalues))]) - achievedFuncList;
##neutralize the suplus achived function
result[result>0] <- 0;
##find the minized
result <- colSums(result);
optimalDPValue <- candidateDPvalues[which.min(result)];
return(optimalDPValue);
}
#calculate the level of function from configuration design (via QFD)
#design as ()
#qfd as matrix
calculateAchievedFunction <- function(configurationDesign, qfd){
numberOfFunctions <- nrow(qfd);
achievedFunction <- matrix(data=0, nrow = numberOfFunctions);
configurationDesign <- (configurationDesign - 1); #the difference from standard design
configurationDesign <- configurationDesign[rep(1, numberOfFunctions), ];
temp <- qfd;
temp[which(temp!=0)] <- 1
achievedFunction <- configurationDesign * qfd;
achievedFunction <- achievedFunction + temp; #restore the actual level of functions
achievedFunction <- apply(achievedFunction, 1, function(x) geoMean(x[x!=0]));
return(achievedFunction);
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/configservice_operations.R
\name{configservice_deliver_config_snapshot}
\alias{configservice_deliver_config_snapshot}
\title{Schedules delivery of a configuration snapshot to the Amazon S3 bucket
in the specified delivery channel}
\usage{
configservice_deliver_config_snapshot(deliveryChannelName)
}
\arguments{
\item{deliveryChannelName}{[required] The name of the delivery channel through which the snapshot is
delivered.}
}
\description{
Schedules delivery of a configuration snapshot to the Amazon S3 bucket in the specified delivery channel. After the delivery has started, Config sends the following notifications using an Amazon SNS topic that you have specified.
See \url{https://www.paws-r-sdk.com/docs/configservice_deliver_config_snapshot/} for full documentation.
}
\keyword{internal}
| /cran/paws.management/man/configservice_deliver_config_snapshot.Rd | permissive | paws-r/paws | R | false | true | 876 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/configservice_operations.R
\name{configservice_deliver_config_snapshot}
\alias{configservice_deliver_config_snapshot}
\title{Schedules delivery of a configuration snapshot to the Amazon S3 bucket
in the specified delivery channel}
\usage{
configservice_deliver_config_snapshot(deliveryChannelName)
}
\arguments{
\item{deliveryChannelName}{[required] The name of the delivery channel through which the snapshot is
delivered.}
}
\description{
Schedules delivery of a configuration snapshot to the Amazon S3 bucket in the specified delivery channel. After the delivery has started, Config sends the following notifications using an Amazon SNS topic that you have specified.
See \url{https://www.paws-r-sdk.com/docs/configservice_deliver_config_snapshot/} for full documentation.
}
\keyword{internal}
|
rm(list = ls())
gc()
#loading packages
require(ggsci)
require(data.table)
require(cowplot)
#sort function from oncoprint complexHeatmap
memoSort <- function(M) {
geneOrder <- sort(rowSums(M), decreasing=TRUE, index.return=TRUE)$ix;
scoreCol <- function(x) {
score <- 0;
for(i in 1:length(x)) {
if(x[i]) {
score <- score + 2^(length(x)-i);
}
}
return(score);
}
scores <- apply(M[geneOrder, ], 2, scoreCol);
sampleOrder <- sort(scores, decreasing=TRUE, index.return=TRUE)$ix;
return(M[geneOrder, sampleOrder]);
}
#read data
met <- fread('scriptsWithData/tumor_vs_met/All_metastasis_filter_mut.maf')[,Tumor_Sample_Barcode := paste0('S_', Tumor_Sample_Barcode)]
tum <- fread('scriptsWithData/tumor_vs_met/All_primary_filter_mut.maf')[,Tumor_Sample_Barcode := paste0('S_', Tumor_Sample_Barcode)]
#retain mutations potentially functionally
retaind_features=c("frameshift deletion","frameshift insertion","stopgain","nonsynonymous SNV","splicing","stoploss")
met=met[met$Variant_Classification %in% retaind_features,]
tum=tum[tum$Variant_Classification %in% retaind_features,]
#format data
tum.VS.met <- data.table(tum$Tumor_Sample_Barcode, tum$Tumor_Sample_Barcode %in% met$Tumor_Sample_Barcode & tum$ChromChange %in% met$ChromChange, tum$Hugo_Symbol)
met.VS.tum <- data.table(met$Tumor_Sample_Barcode, met$Tumor_Sample_Barcode %in% tum$Tumor_Sample_Barcode & met$ChromChange %in% tum$ChromChange, met$Hugo_Symbol)
fuck <- unique(as.vector(rbind(met[,16], tum[,16])))
Initial <- apply(fuck, 1, function(x) table(tum.VS.met[V1 == x, -3, with = FALSE])[1])
Common <- apply(fuck, 1, function(x) unique(table(met.VS.tum[V1 == x, -3, with = FALSE])[2], table(tum.VS.met[V1 == x])[2]))
Recurrence <- apply(fuck, 1, function(x) table(met.VS.tum[V1 == x, -3, with = FALSE])[1])
mutations <- data.table(Initial = Initial, Common = Common, Recurrence = Recurrence, Samples = as.vector(t(fuck)))
rm(Initial, Common, Recurrence, fuck)
#data format for bar plot
mutations <- melt(mutations, id = 'Samples', variable.name = 'Types')
mutations$Samples=factor(mutations$Samples,levels =unique(mutations$Samples) )
#data format for oncoplot
met.VS.tum[, V2 := as.character(V2)][V2 == 'FALSE', V2 := "Recurrence"][V2 == 'TRUE', V2 := "Common"]
tum.VS.met[, V2 := as.character(V2)][V2 == 'FALSE', V2 := "Initial"][V2 == 'TRUE', V2 := "Common"]
mutations_onco <- rbind(met.VS.tum, tum.VS.met)
names(mutations_onco) <- c("Samples", "Types", "Genes")
# reorder genes and samples based on mutation numbers
oncomatrix= data.frame(dcast(mutations_onco, Genes ~ Samples))
row.names(oncomatrix)=oncomatrix$Genes
oncomatrix=oncomatrix[,-1]
#retain top genes
filteredGenes=row.names(oncomatrix[rowSums(oncomatrix!=0)>2,])
oncomatrix=oncomatrix[filteredGenes,]
oncomatrix=memoSort(oncomatrix!=0)
filteredGenes=row.names(oncomatrix)
mutations_onco=mutations_onco[mutations_onco$Genes %in% filteredGenes,]
mutations_onco$Genes=factor(mutations_onco$Genes,levels = filteredGenes)
mutations$Samples=factor(mutations$Samples,levels =unique(colnames(oncomatrix)))
mbar <- ggplot() +
geom_col(data = mutations, aes(x = Samples, y = value, fill = Types)) +
ylab('Somantic mutations') +
theme(axis.text.x = element_text(angle = 45, vjust = 0.7))+
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())+
theme(legend.position="top")+
scale_fill_lancet()
#set background
backgroud=expand.grid(Samples=unique(mutations_onco$Samples),Genes=unique(filteredGenes))
backgroud$Samples=factor(backgroud$Samples,levels =colnames(oncomatrix) )
backgroud$Genes=factor(backgroud$Genes,levels =rev(filteredGenes))
mheatmap <- ggplot() +
geom_tile(data = backgroud, aes(x = Samples, y = Genes),fill = "Gray", width = 0.9, height = 0.9, size = 1) +
geom_tile(data = mutations_onco, aes(x = Samples, y = Genes, fill = Types), width = 0.9, height = 0.9, size = 1) +
theme(axis.text.x = element_text(angle = 45, vjust = 1,hjust = 1)) +
theme(axis.text.y = element_text(face="bold.italic")) +
theme(legend.position="none") +
scale_fill_lancet(limits = c('Initial', 'Common', 'Recurrence'))
plot_grid(mbar,mheatmap,ncol = 1, align = 'v',rel_heights = c(1/4,3/4))
# ggdraw() + draw_plot(mbar, 0.1, 0.7, 1, 0.3) + draw_plot(mheatmap, 0, 0, 1, 0.7) + draw_plot_label(c("a", "b"), c(0, 0), c(1, .5), size = 15)
save_plot('images/met_VS_tum.png', ggplot2::last_plot(), base_width = 11, base_height = 9) | /scriptsWithData/tumor_vs_met/Tumor vs Met.R | no_license | Jingjing-Qi/RscriptCollection | R | false | false | 4,492 | r | rm(list = ls())
gc()
#loading packages
require(ggsci)
require(data.table)
require(cowplot)
#sort function from oncoprint complexHeatmap
memoSort <- function(M) {
geneOrder <- sort(rowSums(M), decreasing=TRUE, index.return=TRUE)$ix;
scoreCol <- function(x) {
score <- 0;
for(i in 1:length(x)) {
if(x[i]) {
score <- score + 2^(length(x)-i);
}
}
return(score);
}
scores <- apply(M[geneOrder, ], 2, scoreCol);
sampleOrder <- sort(scores, decreasing=TRUE, index.return=TRUE)$ix;
return(M[geneOrder, sampleOrder]);
}
#read data
met <- fread('scriptsWithData/tumor_vs_met/All_metastasis_filter_mut.maf')[,Tumor_Sample_Barcode := paste0('S_', Tumor_Sample_Barcode)]
tum <- fread('scriptsWithData/tumor_vs_met/All_primary_filter_mut.maf')[,Tumor_Sample_Barcode := paste0('S_', Tumor_Sample_Barcode)]
#retain mutations potentially functionally
retaind_features=c("frameshift deletion","frameshift insertion","stopgain","nonsynonymous SNV","splicing","stoploss")
met=met[met$Variant_Classification %in% retaind_features,]
tum=tum[tum$Variant_Classification %in% retaind_features,]
#format data
tum.VS.met <- data.table(tum$Tumor_Sample_Barcode, tum$Tumor_Sample_Barcode %in% met$Tumor_Sample_Barcode & tum$ChromChange %in% met$ChromChange, tum$Hugo_Symbol)
met.VS.tum <- data.table(met$Tumor_Sample_Barcode, met$Tumor_Sample_Barcode %in% tum$Tumor_Sample_Barcode & met$ChromChange %in% tum$ChromChange, met$Hugo_Symbol)
fuck <- unique(as.vector(rbind(met[,16], tum[,16])))
Initial <- apply(fuck, 1, function(x) table(tum.VS.met[V1 == x, -3, with = FALSE])[1])
Common <- apply(fuck, 1, function(x) unique(table(met.VS.tum[V1 == x, -3, with = FALSE])[2], table(tum.VS.met[V1 == x])[2]))
Recurrence <- apply(fuck, 1, function(x) table(met.VS.tum[V1 == x, -3, with = FALSE])[1])
mutations <- data.table(Initial = Initial, Common = Common, Recurrence = Recurrence, Samples = as.vector(t(fuck)))
rm(Initial, Common, Recurrence, fuck)
#data format for bar plot
mutations <- melt(mutations, id = 'Samples', variable.name = 'Types')
mutations$Samples=factor(mutations$Samples,levels =unique(mutations$Samples) )
#data format for oncoplot
met.VS.tum[, V2 := as.character(V2)][V2 == 'FALSE', V2 := "Recurrence"][V2 == 'TRUE', V2 := "Common"]
tum.VS.met[, V2 := as.character(V2)][V2 == 'FALSE', V2 := "Initial"][V2 == 'TRUE', V2 := "Common"]
mutations_onco <- rbind(met.VS.tum, tum.VS.met)
names(mutations_onco) <- c("Samples", "Types", "Genes")
# reorder genes and samples based on mutation numbers
oncomatrix= data.frame(dcast(mutations_onco, Genes ~ Samples))
row.names(oncomatrix)=oncomatrix$Genes
oncomatrix=oncomatrix[,-1]
#retain top genes
filteredGenes=row.names(oncomatrix[rowSums(oncomatrix!=0)>2,])
oncomatrix=oncomatrix[filteredGenes,]
oncomatrix=memoSort(oncomatrix!=0)
filteredGenes=row.names(oncomatrix)
mutations_onco=mutations_onco[mutations_onco$Genes %in% filteredGenes,]
mutations_onco$Genes=factor(mutations_onco$Genes,levels = filteredGenes)
mutations$Samples=factor(mutations$Samples,levels =unique(colnames(oncomatrix)))
mbar <- ggplot() +
geom_col(data = mutations, aes(x = Samples, y = value, fill = Types)) +
ylab('Somantic mutations') +
theme(axis.text.x = element_text(angle = 45, vjust = 0.7))+
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())+
theme(legend.position="top")+
scale_fill_lancet()
#set background
backgroud=expand.grid(Samples=unique(mutations_onco$Samples),Genes=unique(filteredGenes))
backgroud$Samples=factor(backgroud$Samples,levels =colnames(oncomatrix) )
backgroud$Genes=factor(backgroud$Genes,levels =rev(filteredGenes))
mheatmap <- ggplot() +
geom_tile(data = backgroud, aes(x = Samples, y = Genes),fill = "Gray", width = 0.9, height = 0.9, size = 1) +
geom_tile(data = mutations_onco, aes(x = Samples, y = Genes, fill = Types), width = 0.9, height = 0.9, size = 1) +
theme(axis.text.x = element_text(angle = 45, vjust = 1,hjust = 1)) +
theme(axis.text.y = element_text(face="bold.italic")) +
theme(legend.position="none") +
scale_fill_lancet(limits = c('Initial', 'Common', 'Recurrence'))
plot_grid(mbar,mheatmap,ncol = 1, align = 'v',rel_heights = c(1/4,3/4))
# ggdraw() + draw_plot(mbar, 0.1, 0.7, 1, 0.3) + draw_plot(mheatmap, 0, 0, 1, 0.7) + draw_plot_label(c("a", "b"), c(0, 0), c(1, .5), size = 15)
save_plot('images/met_VS_tum.png', ggplot2::last_plot(), base_width = 11, base_height = 9) |
#' Force dataframe columns to character
#'
## Copyright(c) 2017-2020 R. Mark Sharp
## This file is part of nprcgenekeepr
#' Converts designated columns of a dataframe to character. Defaults to
#' converting columns \code{id}, \code{sire}, and \code{dam}.
#'
#' @return A dataframe with the specified columns converted to class
#' "character" for display with xtables (in shiny)
#'
#' @examples
#' \donttest{
#' library(nprcgenekeepr)
#' pedGood <- nprcgenekeepr::pedGood
#' names(pedGood) <- c("id", "sire", "dam", "sex", "birth")
#' class(pedGood[["id"]])
#' pedGood <- toCharacter(pedGood)
#' class(pedGood[["id"]])
#' }
#'
#' @param df a dataframe where the first three columns can be coerced to
#' character.
#' @param headers character vector with the columns to be converted to
#' character class. Defaults to \code{c("id", "sire", "dam")}/
#' @export
toCharacter <- function(df, headers = c("id", "sire", "dam")) {
headers <- intersect(names(df), headers)
for (col in headers) {
df[[col]] <- as.character(df[[col]])
}
return(df)
}
| /R/toCharacter.R | permissive | jhagberg/nprcgenekeepr | R | false | false | 1,051 | r | #' Force dataframe columns to character
#'
## Copyright(c) 2017-2020 R. Mark Sharp
## This file is part of nprcgenekeepr
#' Converts designated columns of a dataframe to character. Defaults to
#' converting columns \code{id}, \code{sire}, and \code{dam}.
#'
#' @return A dataframe with the specified columns converted to class
#' "character" for display with xtables (in shiny)
#'
#' @examples
#' \donttest{
#' library(nprcgenekeepr)
#' pedGood <- nprcgenekeepr::pedGood
#' names(pedGood) <- c("id", "sire", "dam", "sex", "birth")
#' class(pedGood[["id"]])
#' pedGood <- toCharacter(pedGood)
#' class(pedGood[["id"]])
#' }
#'
#' @param df a dataframe where the first three columns can be coerced to
#' character.
#' @param headers character vector with the columns to be converted to
#' character class. Defaults to \code{c("id", "sire", "dam")}/
#' @export
toCharacter <- function(df, headers = c("id", "sire", "dam")) {
headers <- intersect(names(df), headers)
for (col in headers) {
df[[col]] <- as.character(df[[col]])
}
return(df)
}
|
#'Runs the simulation for a a range of die sizes
#'
#'The function was just an additional little experiment I wanted to try, and is not part of the core functions of the package.
#'\code{roll_probabilities} creates a new transition matrix, runs the markov chain simulation, and produces a new probability distribution
#'for each die size in the range. The maximum value of the output of \code{finish_game_chance} is recorded in the vector \code{optimizedroll}.
#'This produces an interesting graph structure when plotted versus the die size.
#'
#'@param maxroll The maximum dize. Essentially, the number of iterations of this function.
#'@return A plot of the die size versus the max values of the vector returned by \code{finish_game_chance} after each simulation.
#'@examples
#'roll_probabilities(maxroll = 100)
roll_probabilities <- function(maxroll = 100) {
#creates an empty vector of size maxroll. This will be filled in the subsequent for loop and returned.
optimizedroll<-numeric(maxroll)
#This for loops runs through the creation of the transition matrix, the markov chain, and the derivatation for each die size
#Applies the max value of the output of finish_game_chance to each entry in optimizedroll
for (movemax in 1:maxroll) {
itertrans<-create_transmatrix(input = modified_board, roll = movemax)
iterdist<-markov_chain(transmat = itertrans, tau = 100)
optimizedroll[movemax]<-max(finish_game_chance(probabilities = iterdist, tau = 100))
}
optimizedrollgraph <-
ggplot(as.data.frame(optimizedroll), aes(x = seq(1, length(optimizedroll)), y = optimizedroll)) +
xlab("Die size") +
ylab("% chance of finishing game at most probable winning turn") +
geom_line()
return(optimizedrollgraph)
}
| /R/roll_probabilities.R | no_license | iawindham/snakesandladders | R | false | false | 1,782 | r | #'Runs the simulation for a a range of die sizes
#'
#'The function was just an additional little experiment I wanted to try, and is not part of the core functions of the package.
#'\code{roll_probabilities} creates a new transition matrix, runs the markov chain simulation, and produces a new probability distribution
#'for each die size in the range. The maximum value of the output of \code{finish_game_chance} is recorded in the vector \code{optimizedroll}.
#'This produces an interesting graph structure when plotted versus the die size.
#'
#'@param maxroll The maximum dize. Essentially, the number of iterations of this function.
#'@return A plot of the die size versus the max values of the vector returned by \code{finish_game_chance} after each simulation.
#'@examples
#'roll_probabilities(maxroll = 100)
roll_probabilities <- function(maxroll = 100) {
#creates an empty vector of size maxroll. This will be filled in the subsequent for loop and returned.
optimizedroll<-numeric(maxroll)
#This for loops runs through the creation of the transition matrix, the markov chain, and the derivatation for each die size
#Applies the max value of the output of finish_game_chance to each entry in optimizedroll
for (movemax in 1:maxroll) {
itertrans<-create_transmatrix(input = modified_board, roll = movemax)
iterdist<-markov_chain(transmat = itertrans, tau = 100)
optimizedroll[movemax]<-max(finish_game_chance(probabilities = iterdist, tau = 100))
}
optimizedrollgraph <-
ggplot(as.data.frame(optimizedroll), aes(x = seq(1, length(optimizedroll)), y = optimizedroll)) +
xlab("Die size") +
ylab("% chance of finishing game at most probable winning turn") +
geom_line()
return(optimizedrollgraph)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/unbmom2.R
\name{uM4pool}
\alias{uM4pool}
\title{Pooled central moment estimates - two-sample}
\usage{
uM4pool(m2, m4, n_x, n_y)
}
\arguments{
\item{m2}{naive biased variance estimate \eqn{m_2 = 1/(n_x + n_y) \sum_{i =
1}^{n_x} ((X_i - \bar{X})^2 + \sum_{i = 1}^{n_y} ((Y_i - \bar{Y})^2}{m[2] =
mean(c((X - X-bar)^2, (Y - Y-bar)^2))} for vectors \code{X} and \code{Y}.}
\item{m4}{naive biased fourth central moment estimate \eqn{m_4 = 1/(n_x +
n_y) \sum_{i = 1}^{n_x} ((X_i - \bar{X})^4 + \sum_{i = 1}^{n_y} ((Y_i -
\bar{Y})^4}{m[4] = mean(c((X - X-bar)^4, (Y - Y-bar)^4))} for vectors
\code{X} and \code{Y}.}
\item{n_x}{number of observations in the first group.}
\item{n_y}{number of observations in the second group.}
}
\value{
Pooled estimate of a fourth central moment.
}
\description{
Calculate pooled unbiased estimates of central moments and their powers and
products.
}
\examples{
nx <- 10
ny <- 8
shp <- 3
smpx <- rgamma(nx, shape = shp) - shp
smpy <- rgamma(ny, shape = shp)
mx <- mean(smpx)
my <- mean(smpy)
m <- numeric(4)
for (j in 2:4) {
m[j] <- mean(c((smpx - mx)^j, (smpy - my)^j))
}
uM4pool(m[2], m[4], nx, ny)
}
\seealso{
Other pooled estimates (two-sample): \code{\link{uM2M3pool}},
\code{\link{uM2M4pool}}, \code{\link{uM2pool}},
\code{\link{uM2pow2pool}}, \code{\link{uM2pow3pool}},
\code{\link{uM3pool}}, \code{\link{uM3pow2pool}},
\code{\link{uM5pool}}, \code{\link{uM6pool}}
}
\concept{pooled estimates (two-sample)}
| /man/uM4pool.Rd | no_license | cran/Umoments | R | false | true | 1,533 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/unbmom2.R
\name{uM4pool}
\alias{uM4pool}
\title{Pooled central moment estimates - two-sample}
\usage{
uM4pool(m2, m4, n_x, n_y)
}
\arguments{
\item{m2}{naive biased variance estimate \eqn{m_2 = 1/(n_x + n_y) \sum_{i =
1}^{n_x} ((X_i - \bar{X})^2 + \sum_{i = 1}^{n_y} ((Y_i - \bar{Y})^2}{m[2] =
mean(c((X - X-bar)^2, (Y - Y-bar)^2))} for vectors \code{X} and \code{Y}.}
\item{m4}{naive biased fourth central moment estimate \eqn{m_4 = 1/(n_x +
n_y) \sum_{i = 1}^{n_x} ((X_i - \bar{X})^4 + \sum_{i = 1}^{n_y} ((Y_i -
\bar{Y})^4}{m[4] = mean(c((X - X-bar)^4, (Y - Y-bar)^4))} for vectors
\code{X} and \code{Y}.}
\item{n_x}{number of observations in the first group.}
\item{n_y}{number of observations in the second group.}
}
\value{
Pooled estimate of a fourth central moment.
}
\description{
Calculate pooled unbiased estimates of central moments and their powers and
products.
}
\examples{
nx <- 10
ny <- 8
shp <- 3
smpx <- rgamma(nx, shape = shp) - shp
smpy <- rgamma(ny, shape = shp)
mx <- mean(smpx)
my <- mean(smpy)
m <- numeric(4)
for (j in 2:4) {
m[j] <- mean(c((smpx - mx)^j, (smpy - my)^j))
}
uM4pool(m[2], m[4], nx, ny)
}
\seealso{
Other pooled estimates (two-sample): \code{\link{uM2M3pool}},
\code{\link{uM2M4pool}}, \code{\link{uM2pool}},
\code{\link{uM2pow2pool}}, \code{\link{uM2pow3pool}},
\code{\link{uM3pool}}, \code{\link{uM3pow2pool}},
\code{\link{uM5pool}}, \code{\link{uM6pool}}
}
\concept{pooled estimates (two-sample)}
|
############# Anspach and Carlson ##############
rm(list = ls())
options(stringsAsFactors = FALSE)
library(dplyr)
library(stargazer)
library(ggplot2)
library(gridExtra)
#set working directory
setwd("C:/Users/kevin/Desktop/rap_replication")
# load data - (choose sample to analyze)
load("data/q.Rdata") # 10759 X 112
# # w/o mid attention check
# load("data/q_noMid.Rdata") # 11783 X 111
# q <- q_noMid
# # w/o mid attention check & only the 1st 9000
# load("data/q_9k.Rdata") # 9000 X 111
# q <- q_9k
# Create factor variable and labels
q$ac_condition <- factor(q$Block3_DO,
levels = c("Q4.1|Q59",
"Q4.2|Q59",
"Q4.3|Q59"),
labels = c("Preview + con. commentary",
"Preview + lib. commentary",
"Article preview"))
# relevel and make no commentary - 'Article preview' the reference group
q$ac_condition <- relevel(q$ac_condition,
ref="Article preview")
table(q$ac_condition, useNA = "always")
# party affiliation == q$leaning - this is a continuous variable
# Divide age into 4 age groups - based on quartiles
summary(q$age_self)
# Min=18, 1st Qu.=31, Median=45, 3rd Qu.=61, Max=99
q$age_group <- NA
q$age_group[q$age_self < 31] <- '18-31'
q$age_group[q$age_self >= 31 & q$age_self < 45] <- '31-45'
q$age_group[q$age_self >= 45 & q$age_self < 61] <- '45-61'
q$age_group[q$age_self >= 61] <- '61-99'
# Divide dl into 4 groups - - based on quartiles
summary(q$harg_mean)
# Min=1, 1st Qu.=3.1, Median=3.9, 3rd Qu.=4.5, Max = 5
q$dl_group <- NA
q$dl_group[q$harg_mean < 3.1] <- '1-3.1'
q$dl_group[q$harg_mean >= 3.1 & q$harg_mean < 3.9] <- '3.1-3.9'
q$dl_group[q$harg_mean >= 3.9 & q$harg_mean < 4.5] <- '3.9-4.5'
q$dl_group[q$harg_mean >= 4.5] <- '4.5-5'
table(q$dl_group, useNA = "always")
##trust regression
# Trim all white space
q$Q5.4 <- trimws(q$Q5.4)
q$Q5.5 <- trimws(q$Q5.5)
q$Q5.6 <- trimws(q$Q5.6)
# Code Q5.4 -- trust in person
q$trust_person<-as.numeric(factor(q$Q5.4, levels = c("Very untrustworthy",
"Somewhat untrustworthy",
"Not sure",
"Somewhat trustworthy",
"Very trustworthy")))
# Code Q5.5 -- trust in outlet
q$trust_outlet<-as.numeric(factor(q$Q5.5, levels = c("Very untrustworthy",
"Somewhat untrustworthy",
"Not sure",
"Somewhat trustworthy",
"Very trustworthy")))
# Q5.6 -- trust in poll
q$trust_poll<-as.numeric(factor(q$Q5.6, levels = c("Very untrustworthy",
"Somewhat untrustworthy",
"Not sure",
"Somewhat trustworthy",
"Very trustworthy")))
# reg trust - age with no interactions
fit.trust_outlet_noInteractions <- lm(trust_outlet ~ ac_condition + age_self, data = q)
fit.trust_poll_noInteractions <- lm(trust_poll ~ ac_condition + age_self, data = q)
fit.trust_person_noInteractions <- lm(trust_person ~ ac_condition + age_self, data = q)
# reg trust - age
fit.trust_outlet <- lm(trust_outlet ~ ac_condition * age_self, data = q)
fit.trust_poll <- lm(trust_poll ~ ac_condition * age_self, data = q)
fit.trust_person <- lm(trust_person ~ ac_condition * age_self, data = q)
# reg trust - age - with more control variables
fit.trust_outlet_controls <- lm(trust_outlet ~ ac_condition * age_self +
male +
white +
leaning,
data = q)
fit.trust_poll_controls <- lm(trust_poll ~ ac_condition * age_self +
male +
white +
leaning,
data = q)
fit.trust_person_controls <- lm(trust_person ~ ac_condition * age_self +
male +
white +
leaning,
data = q)
# create a table
stargazer(fit.trust_outlet_noInteractions, fit.trust_outlet, fit.trust_outlet_controls,
fit.trust_outlet_noInteractions, fit.trust_poll, fit.trust_poll_controls,
fit.trust_person_noInteractions, fit.trust_person, fit.trust_person_controls,
covariate.labels = c("Preview + con. commentary",
"Preview + lib. commentary",
"Age",
"Male",
"White",
"Party",
"Preview + con. commentary * Age",
"Preview + lib. commentary * Age",
"Constant"),
dep.var.labels=c("News Outlet", "Author/poster", "Cited poll"),
dep.var.caption = c(""),
model.numbers = FALSE,
single.row = TRUE,
#type='html',
#out = "tables/fit_age.html"
type='latex',
out = "tables/fit_age.tex"
)
# reg trust - dl without interactions
harg.trust_outlet_noInteractions <- lm(trust_outlet ~ ac_condition + harg_mean, data = q)
harg.trust_poll_noInteractions <- lm(trust_poll ~ ac_condition + harg_mean, data = q)
harg.trust_person_noInteractions <- lm(trust_person ~ ac_condition + harg_mean, data = q)
# reg trust - dl
harg.trust_outlet <- lm(trust_outlet ~ ac_condition * harg_mean, data = q)
harg.trust_poll <- lm(trust_poll ~ ac_condition * harg_mean, data = q)
harg.trust_person <- lm(trust_person ~ ac_condition * harg_mean, data = q)
# reg trust - dl - with more controls
harg.trust_outlet_controls <- lm(trust_outlet ~ ac_condition * harg_mean +
male +
white +
leaning,
data = q)
harg.trust_poll_controls <- lm(trust_poll ~ ac_condition * harg_mean +
male +
white +
leaning,
data = q)
harg.trust_person_controls <- lm(trust_person ~ ac_condition * harg_mean +
male +
white +
leaning,
data = q)
# create a table
stargazer(harg.trust_outlet_noInteractions, harg.trust_outlet, harg.trust_outlet_controls,
harg.trust_poll_noInteractions, harg.trust_poll, harg.trust_poll_controls,
harg.trust_person_noInteractions, harg.trust_person, harg.trust_person_controls,
covariate.labels = c("Preview + con. commentary",
"Preview + lib. commentary",
"Digital Literacy",
"Male",
"White",
"Party",
"Preview + con. commentary * Digital Literacy",
"Preview + lib. commentary * Digital Literacy",
"Constant"),
dep.var.labels=c('New Outlet','Author/Poster', 'Cited poll'),
dep.var.caption = c(""),
model.numbers = FALSE,
single.row = TRUE,
type='latex',
out = "tables/fit_dl.tex"
#type='html',
#out = "tables/fit_dl.html"
)
# reg trust power_mean
power.trust_outlet <- lm(trust_outlet ~ ac_condition * power_mean, data = q)
power.trust_poll <- lm(trust_poll ~ ac_condition * power_mean, data = q)
power.trust_person <- lm(trust_person ~ ac_condition * power_mean, data = q)
# correct answer
q$poll_36<-0
q$poll_36[q$Q5.2 == "36%"]<-1
# wrong answer conservative commentary
q$poll_49<-0
q$poll_49[q$Q5.2 == "49%"]<-1
# wrong answer liberal commentary
q$poll_23<-0
q$poll_23[q$Q5.2 == "23%"]<-1
# What predicts choosing the correct answer - 36% - for the three treatments?
choose36_age <- lm(poll_36 ~ age_self*ac_condition, data=q)
stargazer(choose36_age,
covariate.labels = c("Age",
"Preview + con. commentary",
"Preview + lib. commentary",
"Preview + con. commentary * Age",
"Preview + lib. commentary * Age",
"Constant"),
dep.var.labels=c(""),
dep.var.caption = c("Choose correct Answer - 36%"),
model.numbers = FALSE,
single.row = TRUE,
type='latex',
out = "tables/choose36_age.tex"
#type='html',
#out = "tables/choose36_age.html"
)
p36_age <- plot_model(choose36_age, type = 'int', colors = c("grey", "red", "blue")) +
labs(title = "",
x = "Age",
y = "Choose 36% \n (0=did not choose 36%; 1=choose 36%)",
color = "Treatment") + theme(legend.position = "none")
choose36_dl <- lm(poll_36 ~ harg_mean*ac_condition, data=q)
stargazer(choose36_dl,
covariate.labels = c("Digit Literacy",
"Preview + con. commentary",
"Preview + lib. commentary",
"Preview + con. commentary * Digit Literacy",
"Preview + lib. commentary * Digit Literacy",
"Constant"),
dep.var.labels=c(""),
dep.var.caption = c("Choose correct Answer - 36%"),
model.numbers = FALSE,
single.row = TRUE,
# type='html',
#out = "tables/choose36_dl.html"
type='latex',
out = "tables/choose36_dl.tex"
)
p36_dl <- plot_model(choose36_dl, type = 'int', colors = c("grey", "red", "blue")) +
labs(title = "",
x = "Digital Literacy",
y = "Choose 36% \n (0=did not choose 36%; 1=choose 36%)",
color = "Treatment") + theme(legend.position = "bottom")
# function to add all plots to single page and combining legend
g_legend<-function(a.gplot){
tmp <- ggplot_gtable(ggplot_build(a.gplot))
leg <- which(sapply(tmp$grobs, function(x) x$name) == "guide-box")
legend <- tmp$grobs[[leg]]
return(legend)}
mylegend<-g_legend(p36_dl)
p36_combo <- grid.arrange(arrangeGrob(p36_age + theme(legend.position="none"),
p36_dl + theme(legend.position="none"),
nrow=1),
mylegend,
nrow=2,
heights=c(10, 1))
# Replicate the bar plot from Anspach and Carlson (2018)
# Create df for each type of stimiuli
fig_t <- table(q$Q5.2, q$ac_condition, useNA = "no")
fig_t <- round((fig_t/colSums(fig_t))*100, 2)
fig_t <- fig_t[2:4,] %>% t()
fig_t <- fig_t[,c(2, 1, 3)]
d1 <- fig_t[,1]%>% data.frame()
names(d1) <- "perct"
d1$name <- rownames(d1)
p1 <- d1 %>%
ggplot(aes(x=name, y=perct)) +
geom_bar(stat = "identity") +
ylim(0, 100) +
scale_x_discrete(labels= c("Article Preview",
"Preview + \n 49% Comment",
"Preview + \n 23% Comment"))+
labs(x = "\n Correctly Identified 36% as Correct Rating",
y = "% of Participants Selecting Each Opinion")
d2 <- fig_t[,2]%>% data.frame()
names(d2) <- "perct"
d2$name <- rownames(d2)
p2 <- d2 %>%
ggplot(aes(x=name, y=perct)) +
geom_bar(stat = "identity") +
ylim(0, 100) +
scale_x_discrete(labels= c("Article Preview",
"Preview + \n 49% Comment",
"Preview + \n 23% Comment"))+
labs(x = "\n Mistakenly Identified 23% as Correct Rating",
y = "")
d3 <- fig_t[,3]%>% data.frame()
names(d3) <- "perct"
d3$name <- rownames(d3)
p3 <- d3 %>%
ggplot(aes(x=name, y=perct)) +
geom_bar(stat = "identity") +
ylim(0, 100) +
scale_x_discrete(labels= c("Article Preview",
"Preview + \n 49% Comment",
"Preview + \n 23% Comment"))+
labs(x = "\n Mistakenly Identified 49% as Correct Rating",
y ="")
p4 <- grid.arrange(p1,p2,p3, nrow=1)
# subset the dataset for manipulation checks
# Commentary = conservative slant - "surveyed more dems"
qcons<-filter(q, ac_condition =="Preview + con. commentary")
# Commentary = liberal slant
qlib<-filter(q, ac_condition == "Preview + lib. commentary" )
# No Commentary
qnor<-filter(q, ac_condition =="Article preview")
## manipulation check - Overall
# cited flaw in the poll
flaw_cons <- round(prop.table(table(qcons$Q5.3)), 2)*100
flaw_lib <- round(prop.table(table(qlib$Q5.3)), 2)*100
flaw_nor <- round(prop.table(table(qnor$Q5.3)), 2)*100
#
l <- list(flaw_nor,flaw_lib, flaw_cons)
l <- lapply(l, as.data.frame)
f <- function(x, y) merge(x, y, by="Var1", all=TRUE)
l <- Reduce(f, l)
stargazer(l,
summary = FALSE,
dep.var.caption = "Type of commentary",
covariate.labels = c("Perceived Flaw",
"Article Preview(%)",
"'Oversampled Republicans' comment (%)",
"'Oversampled Democrats' comment (%)"),
rownames = FALSE,
type = "latex",
out = "tables/manipulation_check_commentary.tex"
#type = "html",
#out = "tables/manipulation_check_commentary.html"
)
## manipulation check by Age
flaw_cons_age <- (round(prop.table(table(qcons$Q5.3, qcons$age_group)), 2)*100)
flaw_lib_age <- (round(prop.table(table(qlib$Q5.3, qlib$age_group)), 2)*100)
flaw_nor_age <- (round(prop.table(table(qnor$Q5.3, qnor$age_group)), 2)*100)
t <- cbind(flaw_nor_age, flaw_lib_age, flaw_cons_age) %>% data.frame()
stargazer(t,
summary = FALSE,
type = "latex",
out = "tables/manipulation_check_commentary_by_age.tex",
#type = "html",
#out = "tables/manipulation_check_commentary_by_age.html",
align = TRUE)
## manipulation check by Digital Literacy
flaw_cons_dl <- (round(prop.table(table(qcons$Q5.3, qcons$dl_group)), 2)*100)
flaw_lib_dl <- (round(prop.table(table(qlib$Q5.3, qlib$dl_group)), 2)*100)
flaw_nor_dl <- (round(prop.table(table(qnor$Q5.3, qnor$dl_group)), 2)*100)
dl_t <- cbind(flaw_nor_dl, flaw_lib_dl, flaw_cons_dl) %>% data.frame()
stargazer(dl_t,
summary = FALSE,
type = "latex",
out = "tables/manipulation_check_commentary_by_dl.tex",
#type = "html",
#out = "tables/manipulation_check_commentary_by_dl.html",
align = TRUE)
| /ac_experiment.R | no_license | kmunger/RAP_DL_replication_materials | R | false | false | 15,158 | r | ############# Anspach and Carlson ##############
rm(list = ls())
options(stringsAsFactors = FALSE)
library(dplyr)
library(stargazer)
library(ggplot2)
library(gridExtra)
#set working directory
setwd("C:/Users/kevin/Desktop/rap_replication")
# load data - (choose sample to analyze)
load("data/q.Rdata") # 10759 X 112
# # w/o mid attention check
# load("data/q_noMid.Rdata") # 11783 X 111
# q <- q_noMid
# # w/o mid attention check & only the 1st 9000
# load("data/q_9k.Rdata") # 9000 X 111
# q <- q_9k
# Create factor variable and labels
q$ac_condition <- factor(q$Block3_DO,
levels = c("Q4.1|Q59",
"Q4.2|Q59",
"Q4.3|Q59"),
labels = c("Preview + con. commentary",
"Preview + lib. commentary",
"Article preview"))
# relevel and make no commentary - 'Article preview' the reference group
q$ac_condition <- relevel(q$ac_condition,
ref="Article preview")
table(q$ac_condition, useNA = "always")
# party affiliation == q$leaning - this is a continuous variable
# Divide age into 4 age groups - based on quartiles
summary(q$age_self)
# Min=18, 1st Qu.=31, Median=45, 3rd Qu.=61, Max=99
q$age_group <- NA
q$age_group[q$age_self < 31] <- '18-31'
q$age_group[q$age_self >= 31 & q$age_self < 45] <- '31-45'
q$age_group[q$age_self >= 45 & q$age_self < 61] <- '45-61'
q$age_group[q$age_self >= 61] <- '61-99'
# Divide dl into 4 groups - - based on quartiles
summary(q$harg_mean)
# Min=1, 1st Qu.=3.1, Median=3.9, 3rd Qu.=4.5, Max = 5
q$dl_group <- NA
q$dl_group[q$harg_mean < 3.1] <- '1-3.1'
q$dl_group[q$harg_mean >= 3.1 & q$harg_mean < 3.9] <- '3.1-3.9'
q$dl_group[q$harg_mean >= 3.9 & q$harg_mean < 4.5] <- '3.9-4.5'
q$dl_group[q$harg_mean >= 4.5] <- '4.5-5'
table(q$dl_group, useNA = "always")
##trust regression
# Trim all white space
q$Q5.4 <- trimws(q$Q5.4)
q$Q5.5 <- trimws(q$Q5.5)
q$Q5.6 <- trimws(q$Q5.6)
# Code Q5.4 -- trust in person
q$trust_person<-as.numeric(factor(q$Q5.4, levels = c("Very untrustworthy",
"Somewhat untrustworthy",
"Not sure",
"Somewhat trustworthy",
"Very trustworthy")))
# Code Q5.5 -- trust in outlet
q$trust_outlet<-as.numeric(factor(q$Q5.5, levels = c("Very untrustworthy",
"Somewhat untrustworthy",
"Not sure",
"Somewhat trustworthy",
"Very trustworthy")))
# Q5.6 -- trust in poll
q$trust_poll<-as.numeric(factor(q$Q5.6, levels = c("Very untrustworthy",
"Somewhat untrustworthy",
"Not sure",
"Somewhat trustworthy",
"Very trustworthy")))
# reg trust - age with no interactions
fit.trust_outlet_noInteractions <- lm(trust_outlet ~ ac_condition + age_self, data = q)
fit.trust_poll_noInteractions <- lm(trust_poll ~ ac_condition + age_self, data = q)
fit.trust_person_noInteractions <- lm(trust_person ~ ac_condition + age_self, data = q)
# reg trust - age
fit.trust_outlet <- lm(trust_outlet ~ ac_condition * age_self, data = q)
fit.trust_poll <- lm(trust_poll ~ ac_condition * age_self, data = q)
fit.trust_person <- lm(trust_person ~ ac_condition * age_self, data = q)
# reg trust - age - with more control variables
fit.trust_outlet_controls <- lm(trust_outlet ~ ac_condition * age_self +
male +
white +
leaning,
data = q)
fit.trust_poll_controls <- lm(trust_poll ~ ac_condition * age_self +
male +
white +
leaning,
data = q)
fit.trust_person_controls <- lm(trust_person ~ ac_condition * age_self +
male +
white +
leaning,
data = q)
# create a table
stargazer(fit.trust_outlet_noInteractions, fit.trust_outlet, fit.trust_outlet_controls,
fit.trust_outlet_noInteractions, fit.trust_poll, fit.trust_poll_controls,
fit.trust_person_noInteractions, fit.trust_person, fit.trust_person_controls,
covariate.labels = c("Preview + con. commentary",
"Preview + lib. commentary",
"Age",
"Male",
"White",
"Party",
"Preview + con. commentary * Age",
"Preview + lib. commentary * Age",
"Constant"),
dep.var.labels=c("News Outlet", "Author/poster", "Cited poll"),
dep.var.caption = c(""),
model.numbers = FALSE,
single.row = TRUE,
#type='html',
#out = "tables/fit_age.html"
type='latex',
out = "tables/fit_age.tex"
)
# reg trust - dl without interactions
harg.trust_outlet_noInteractions <- lm(trust_outlet ~ ac_condition + harg_mean, data = q)
harg.trust_poll_noInteractions <- lm(trust_poll ~ ac_condition + harg_mean, data = q)
harg.trust_person_noInteractions <- lm(trust_person ~ ac_condition + harg_mean, data = q)
# reg trust - dl
harg.trust_outlet <- lm(trust_outlet ~ ac_condition * harg_mean, data = q)
harg.trust_poll <- lm(trust_poll ~ ac_condition * harg_mean, data = q)
harg.trust_person <- lm(trust_person ~ ac_condition * harg_mean, data = q)
# reg trust - dl - with more controls
harg.trust_outlet_controls <- lm(trust_outlet ~ ac_condition * harg_mean +
male +
white +
leaning,
data = q)
harg.trust_poll_controls <- lm(trust_poll ~ ac_condition * harg_mean +
male +
white +
leaning,
data = q)
harg.trust_person_controls <- lm(trust_person ~ ac_condition * harg_mean +
male +
white +
leaning,
data = q)
# create a table
stargazer(harg.trust_outlet_noInteractions, harg.trust_outlet, harg.trust_outlet_controls,
harg.trust_poll_noInteractions, harg.trust_poll, harg.trust_poll_controls,
harg.trust_person_noInteractions, harg.trust_person, harg.trust_person_controls,
covariate.labels = c("Preview + con. commentary",
"Preview + lib. commentary",
"Digital Literacy",
"Male",
"White",
"Party",
"Preview + con. commentary * Digital Literacy",
"Preview + lib. commentary * Digital Literacy",
"Constant"),
dep.var.labels=c('New Outlet','Author/Poster', 'Cited poll'),
dep.var.caption = c(""),
model.numbers = FALSE,
single.row = TRUE,
type='latex',
out = "tables/fit_dl.tex"
#type='html',
#out = "tables/fit_dl.html"
)
# reg trust power_mean
power.trust_outlet <- lm(trust_outlet ~ ac_condition * power_mean, data = q)
power.trust_poll <- lm(trust_poll ~ ac_condition * power_mean, data = q)
power.trust_person <- lm(trust_person ~ ac_condition * power_mean, data = q)
# correct answer
q$poll_36<-0
q$poll_36[q$Q5.2 == "36%"]<-1
# wrong answer conservative commentary
q$poll_49<-0
q$poll_49[q$Q5.2 == "49%"]<-1
# wrong answer liberal commentary
q$poll_23<-0
q$poll_23[q$Q5.2 == "23%"]<-1
# What predicts choosing the correct answer - 36% - for the three treatments?
choose36_age <- lm(poll_36 ~ age_self*ac_condition, data=q)
stargazer(choose36_age,
covariate.labels = c("Age",
"Preview + con. commentary",
"Preview + lib. commentary",
"Preview + con. commentary * Age",
"Preview + lib. commentary * Age",
"Constant"),
dep.var.labels=c(""),
dep.var.caption = c("Choose correct Answer - 36%"),
model.numbers = FALSE,
single.row = TRUE,
type='latex',
out = "tables/choose36_age.tex"
#type='html',
#out = "tables/choose36_age.html"
)
p36_age <- plot_model(choose36_age, type = 'int', colors = c("grey", "red", "blue")) +
labs(title = "",
x = "Age",
y = "Choose 36% \n (0=did not choose 36%; 1=choose 36%)",
color = "Treatment") + theme(legend.position = "none")
choose36_dl <- lm(poll_36 ~ harg_mean*ac_condition, data=q)
stargazer(choose36_dl,
covariate.labels = c("Digit Literacy",
"Preview + con. commentary",
"Preview + lib. commentary",
"Preview + con. commentary * Digit Literacy",
"Preview + lib. commentary * Digit Literacy",
"Constant"),
dep.var.labels=c(""),
dep.var.caption = c("Choose correct Answer - 36%"),
model.numbers = FALSE,
single.row = TRUE,
# type='html',
#out = "tables/choose36_dl.html"
type='latex',
out = "tables/choose36_dl.tex"
)
p36_dl <- plot_model(choose36_dl, type = 'int', colors = c("grey", "red", "blue")) +
labs(title = "",
x = "Digital Literacy",
y = "Choose 36% \n (0=did not choose 36%; 1=choose 36%)",
color = "Treatment") + theme(legend.position = "bottom")
# function to add all plots to single page and combining legend
g_legend<-function(a.gplot){
tmp <- ggplot_gtable(ggplot_build(a.gplot))
leg <- which(sapply(tmp$grobs, function(x) x$name) == "guide-box")
legend <- tmp$grobs[[leg]]
return(legend)}
mylegend<-g_legend(p36_dl)
p36_combo <- grid.arrange(arrangeGrob(p36_age + theme(legend.position="none"),
p36_dl + theme(legend.position="none"),
nrow=1),
mylegend,
nrow=2,
heights=c(10, 1))
# Replicate the bar plot from Anspach and Carlson (2018)
# Create df for each type of stimiuli
fig_t <- table(q$Q5.2, q$ac_condition, useNA = "no")
fig_t <- round((fig_t/colSums(fig_t))*100, 2)
fig_t <- fig_t[2:4,] %>% t()
fig_t <- fig_t[,c(2, 1, 3)]
d1 <- fig_t[,1]%>% data.frame()
names(d1) <- "perct"
d1$name <- rownames(d1)
p1 <- d1 %>%
ggplot(aes(x=name, y=perct)) +
geom_bar(stat = "identity") +
ylim(0, 100) +
scale_x_discrete(labels= c("Article Preview",
"Preview + \n 49% Comment",
"Preview + \n 23% Comment"))+
labs(x = "\n Correctly Identified 36% as Correct Rating",
y = "% of Participants Selecting Each Opinion")
d2 <- fig_t[,2]%>% data.frame()
names(d2) <- "perct"
d2$name <- rownames(d2)
p2 <- d2 %>%
ggplot(aes(x=name, y=perct)) +
geom_bar(stat = "identity") +
ylim(0, 100) +
scale_x_discrete(labels= c("Article Preview",
"Preview + \n 49% Comment",
"Preview + \n 23% Comment"))+
labs(x = "\n Mistakenly Identified 23% as Correct Rating",
y = "")
d3 <- fig_t[,3]%>% data.frame()
names(d3) <- "perct"
d3$name <- rownames(d3)
p3 <- d3 %>%
ggplot(aes(x=name, y=perct)) +
geom_bar(stat = "identity") +
ylim(0, 100) +
scale_x_discrete(labels= c("Article Preview",
"Preview + \n 49% Comment",
"Preview + \n 23% Comment"))+
labs(x = "\n Mistakenly Identified 49% as Correct Rating",
y ="")
p4 <- grid.arrange(p1,p2,p3, nrow=1)
# subset the dataset for manipulation checks
# Commentary = conservative slant - "surveyed more dems"
qcons<-filter(q, ac_condition =="Preview + con. commentary")
# Commentary = liberal slant
qlib<-filter(q, ac_condition == "Preview + lib. commentary" )
# No Commentary
qnor<-filter(q, ac_condition =="Article preview")
## manipulation check - Overall
# cited flaw in the poll
flaw_cons <- round(prop.table(table(qcons$Q5.3)), 2)*100
flaw_lib <- round(prop.table(table(qlib$Q5.3)), 2)*100
flaw_nor <- round(prop.table(table(qnor$Q5.3)), 2)*100
#
l <- list(flaw_nor,flaw_lib, flaw_cons)
l <- lapply(l, as.data.frame)
f <- function(x, y) merge(x, y, by="Var1", all=TRUE)
l <- Reduce(f, l)
stargazer(l,
summary = FALSE,
dep.var.caption = "Type of commentary",
covariate.labels = c("Perceived Flaw",
"Article Preview(%)",
"'Oversampled Republicans' comment (%)",
"'Oversampled Democrats' comment (%)"),
rownames = FALSE,
type = "latex",
out = "tables/manipulation_check_commentary.tex"
#type = "html",
#out = "tables/manipulation_check_commentary.html"
)
## manipulation check by Age
flaw_cons_age <- (round(prop.table(table(qcons$Q5.3, qcons$age_group)), 2)*100)
flaw_lib_age <- (round(prop.table(table(qlib$Q5.3, qlib$age_group)), 2)*100)
flaw_nor_age <- (round(prop.table(table(qnor$Q5.3, qnor$age_group)), 2)*100)
t <- cbind(flaw_nor_age, flaw_lib_age, flaw_cons_age) %>% data.frame()
stargazer(t,
summary = FALSE,
type = "latex",
out = "tables/manipulation_check_commentary_by_age.tex",
#type = "html",
#out = "tables/manipulation_check_commentary_by_age.html",
align = TRUE)
## manipulation check by Digital Literacy
flaw_cons_dl <- (round(prop.table(table(qcons$Q5.3, qcons$dl_group)), 2)*100)
flaw_lib_dl <- (round(prop.table(table(qlib$Q5.3, qlib$dl_group)), 2)*100)
flaw_nor_dl <- (round(prop.table(table(qnor$Q5.3, qnor$dl_group)), 2)*100)
dl_t <- cbind(flaw_nor_dl, flaw_lib_dl, flaw_cons_dl) %>% data.frame()
stargazer(dl_t,
summary = FALSE,
type = "latex",
out = "tables/manipulation_check_commentary_by_dl.tex",
#type = "html",
#out = "tables/manipulation_check_commentary_by_dl.html",
align = TRUE)
|
---
title: "White_Wines"
author: "Anuj Nimkar"
date: "Monday, April 27, 2015"
output: html_document
---
### Setting the work directory and loading in the train data
```{r}
setwd('F:/Anuj/Study & Work/Sem 2 Courses/INST 737/Final Project')
trainData <- read.csv('wineWhites_trainData.csv', sep = ',')
testData <- read.csv('wineWhites_testData.csv', sep = ',')
```
### Cleaning the data and ensuring data consistency
```{r}
trainData <- na.omit(trainData)
trainData <- subset(trainData,
(trainData$fixed.acidity >= 0.0 & trainData$fixed.acidity <= 10.0) &
(trainData$volatile.acidity >= 0.0 & trainData$volatile.acidity <= 1.0) &
(trainData$citric.acid >= 0.0 & trainData$citric.acid <=1.0) &
(trainData$residual.sugar >= 0.0 & trainData$residual.sugar <= 100.0) &
(trainData$chlorides >= 0.0 & trainData$chlorides <= 1.0) &
(trainData$free.sulfur.dioxide >= 0.0 & trainData$free.sulfur.dioxide <= 100.0 ))
```
### Decision tree
```{r}
install.packages('caret')
library(tree)
library(rpart)
library(caret)
rt <- rpart(quality ~ fixed.acidity+volatile.acidity+citric.acid+residual.sugar+chlorides+free.sulfur.dioxide+total.sulfur.dioxide+density+pH+sulphates+alcohol, minsplit=10, data=trainData, method="anova")
testData <- testData[complete.cases(testData),]
testData$quality <- predict(rt,testData)
write.csv(testData,file = "white_Wines_test_Data_Predictions.csv", row.names = FALSE)
confusionMatrix(quality.pred,testData$quality)
```
| /white_Wines_Decision_Tree.R | no_license | anujnimkar/WineQuality_Analysis_and_Prediction-using-R | R | false | false | 1,604 | r | ---
title: "White_Wines"
author: "Anuj Nimkar"
date: "Monday, April 27, 2015"
output: html_document
---
### Setting the work directory and loading in the train data
```{r}
setwd('F:/Anuj/Study & Work/Sem 2 Courses/INST 737/Final Project')
trainData <- read.csv('wineWhites_trainData.csv', sep = ',')
testData <- read.csv('wineWhites_testData.csv', sep = ',')
```
### Cleaning the data and ensuring data consistency
```{r}
trainData <- na.omit(trainData)
trainData <- subset(trainData,
(trainData$fixed.acidity >= 0.0 & trainData$fixed.acidity <= 10.0) &
(trainData$volatile.acidity >= 0.0 & trainData$volatile.acidity <= 1.0) &
(trainData$citric.acid >= 0.0 & trainData$citric.acid <=1.0) &
(trainData$residual.sugar >= 0.0 & trainData$residual.sugar <= 100.0) &
(trainData$chlorides >= 0.0 & trainData$chlorides <= 1.0) &
(trainData$free.sulfur.dioxide >= 0.0 & trainData$free.sulfur.dioxide <= 100.0 ))
```
### Decision tree
```{r}
install.packages('caret')
library(tree)
library(rpart)
library(caret)
rt <- rpart(quality ~ fixed.acidity+volatile.acidity+citric.acid+residual.sugar+chlorides+free.sulfur.dioxide+total.sulfur.dioxide+density+pH+sulphates+alcohol, minsplit=10, data=trainData, method="anova")
testData <- testData[complete.cases(testData),]
testData$quality <- predict(rt,testData)
write.csv(testData,file = "white_Wines_test_Data_Predictions.csv", row.names = FALSE)
confusionMatrix(quality.pred,testData$quality)
```
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Gamma2gammasCon.R
\name{Gamma2gammasCon}
\alias{Gamma2gammasCon}
\title{Constrains non-diagonal matrix elements of transition probability matrix.}
\usage{
Gamma2gammasCon(Gamma)
}
\arguments{
\item{Gamma}{Transition probability matrix.}
}
\value{
Vector of constrained non-diagonal matrix elements (column-wise).
}
\description{
Constrains non-diagonal matrix elements of transition probability matrix.
}
\details{
Function shifts 0 and 1 non-diagonal elements by \code{1e-3}.
}
| /man/Gamma2gammasCon.Rd | no_license | Fratelino/fHMM | R | false | true | 557 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Gamma2gammasCon.R
\name{Gamma2gammasCon}
\alias{Gamma2gammasCon}
\title{Constrains non-diagonal matrix elements of transition probability matrix.}
\usage{
Gamma2gammasCon(Gamma)
}
\arguments{
\item{Gamma}{Transition probability matrix.}
}
\value{
Vector of constrained non-diagonal matrix elements (column-wise).
}
\description{
Constrains non-diagonal matrix elements of transition probability matrix.
}
\details{
Function shifts 0 and 1 non-diagonal elements by \code{1e-3}.
}
|
#' @title test_edger
#'
#' @description Function \code{test_edger} computes the statistics for quasi-likelihood F-tests or likelihood ratio tests .
#'
#' @param data data frame containing the raw counts of sequencing reads. Columns corresponds to genes, rows to samples.
#' @param condition vector of levels coresponding to order of samples in data.
#' @param type type of test "lrt" for likelihood ratio tests, "qlf" for quasi-likelihood F-tests.
#' @param ... other arguments
#'
#' @return A data frame with the following columns:
#' \item{id}{The ID of the observable, taken from the row names of the counts slots.}
#' \item{log2.fold}{The log2 of the fold change.}
#' \item{pval}{The p-values for rejecting the null hypothesis about the means equality.}
#' @importFrom edgeR DGEList
#' @importFrom edgeR calcNormFactors
#' @importFrom edgeR estimateDisp
#' @importFrom edgeR glmFit
#' @importFrom edgeR glmLRT
#' @importFrom edgeR glmQLFit
#' @importFrom edgeR glmQLFTest
#' @importFrom stats model.matrix
#' @keywords internal
test_edger <- function(data, condition, type="lrt", ...) {
y <- DGEList(counts = data, group = condition)
y <- calcNormFactors(y)
design <- model.matrix(~condition)
y <- estimateDisp(y, design)
if (type == "lrt") {
fit <- glmFit(y, design, ...)
lrt <- glmLRT(fit, coef = 2)
result <- lrt@.Data[[14]]
}
if (type == "qlf") {
fit <- glmQLFit(y, design, ...)
qlf <- glmQLFTest(fit, coef = 2)
result <- qlf@.Data[[17]]
}
result$id <- rownames(result)
colnames(result)[1] <- "log2.fold"
colnames(result)[4] <- "pval"
result <- result[, c(5, 1, 4)]
rownames(result) <- 1:nrow(result)
return(result)
}
| /MLExpResso/R/test_edger.R | no_license | geneticsMiNIng/MLGenSig | R | false | false | 1,684 | r | #' @title test_edger
#'
#' @description Function \code{test_edger} computes the statistics for quasi-likelihood F-tests or likelihood ratio tests .
#'
#' @param data data frame containing the raw counts of sequencing reads. Columns corresponds to genes, rows to samples.
#' @param condition vector of levels coresponding to order of samples in data.
#' @param type type of test "lrt" for likelihood ratio tests, "qlf" for quasi-likelihood F-tests.
#' @param ... other arguments
#'
#' @return A data frame with the following columns:
#' \item{id}{The ID of the observable, taken from the row names of the counts slots.}
#' \item{log2.fold}{The log2 of the fold change.}
#' \item{pval}{The p-values for rejecting the null hypothesis about the means equality.}
#' @importFrom edgeR DGEList
#' @importFrom edgeR calcNormFactors
#' @importFrom edgeR estimateDisp
#' @importFrom edgeR glmFit
#' @importFrom edgeR glmLRT
#' @importFrom edgeR glmQLFit
#' @importFrom edgeR glmQLFTest
#' @importFrom stats model.matrix
#' @keywords internal
test_edger <- function(data, condition, type="lrt", ...) {
y <- DGEList(counts = data, group = condition)
y <- calcNormFactors(y)
design <- model.matrix(~condition)
y <- estimateDisp(y, design)
if (type == "lrt") {
fit <- glmFit(y, design, ...)
lrt <- glmLRT(fit, coef = 2)
result <- lrt@.Data[[14]]
}
if (type == "qlf") {
fit <- glmQLFit(y, design, ...)
qlf <- glmQLFTest(fit, coef = 2)
result <- qlf@.Data[[17]]
}
result$id <- rownames(result)
colnames(result)[1] <- "log2.fold"
colnames(result)[4] <- "pval"
result <- result[, c(5, 1, 4)]
rownames(result) <- 1:nrow(result)
return(result)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/StudentDistribution.R
\docType{class}
\name{Student-class}
\alias{Student-class}
\alias{Student}
\alias{quantile,Student-method}
\alias{simulate,Student,numeric-method}
\title{Student's t data distribution}
\usage{
Student(two_armed = TRUE)
\S4method{quantile}{Student}(x, probs, n, theta, ...)
\S4method{simulate}{Student,numeric}(object, nsim, n, theta, seed = NULL, ...)
}
\arguments{
\item{two_armed}{logical indicating if a two-armed trial is regarded}
\item{x}{outcome}
\item{probs}{vector of probabilities}
\item{n}{sample size}
\item{theta}{distribution parameter}
\item{...}{further optional arguments}
\item{object}{object of class \code{Student}}
\item{nsim}{number of simulation runs}
\item{seed}{random seed}
}
\description{
Implements exact t-distributions instead of a normal approximation
}
\examples{
datadist <- Student(two_armed = TRUE)
}
\seealso{
see \code{\link{probability_density_function}} and
\code{\link{cumulative_distribution_function}} to evaluate the pdf
and the cdf, respectively.
}
| /man/StudentDataDistribution-class.Rd | permissive | kkmann/adoptr | R | false | true | 1,110 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/StudentDistribution.R
\docType{class}
\name{Student-class}
\alias{Student-class}
\alias{Student}
\alias{quantile,Student-method}
\alias{simulate,Student,numeric-method}
\title{Student's t data distribution}
\usage{
Student(two_armed = TRUE)
\S4method{quantile}{Student}(x, probs, n, theta, ...)
\S4method{simulate}{Student,numeric}(object, nsim, n, theta, seed = NULL, ...)
}
\arguments{
\item{two_armed}{logical indicating if a two-armed trial is regarded}
\item{x}{outcome}
\item{probs}{vector of probabilities}
\item{n}{sample size}
\item{theta}{distribution parameter}
\item{...}{further optional arguments}
\item{object}{object of class \code{Student}}
\item{nsim}{number of simulation runs}
\item{seed}{random seed}
}
\description{
Implements exact t-distributions instead of a normal approximation
}
\examples{
datadist <- Student(two_armed = TRUE)
}
\seealso{
see \code{\link{probability_density_function}} and
\code{\link{cumulative_distribution_function}} to evaluate the pdf
and the cdf, respectively.
}
|
##Quesion 1##
v_data_q1 <- read.csv("getdata-data-ss06hid.csv")
agricultureLogical <- v_data_q1$AGS == 6 & v_data_q1$ACR == 3
which(agricultureLogical)
##Quesion 2##
library(jpeg)
url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fjeff.jpg"
file_path <- paste(getwd(), "/", "jeff.jpg", sep = "")
download.file(url, file_path)
picture <- readJPEG(file_path, native = TRUE)
quantile(x = picture, probs = c(0.3, 0.8))
##Quesion 3##
##Quesion 4##
##Quesion 5## | /Week 3/answer.R | no_license | nortonle/GettingAndCleaningData | R | false | false | 464 | r | ##Quesion 1##
v_data_q1 <- read.csv("getdata-data-ss06hid.csv")
agricultureLogical <- v_data_q1$AGS == 6 & v_data_q1$ACR == 3
which(agricultureLogical)
##Quesion 2##
library(jpeg)
url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fjeff.jpg"
file_path <- paste(getwd(), "/", "jeff.jpg", sep = "")
download.file(url, file_path)
picture <- readJPEG(file_path, native = TRUE)
quantile(x = picture, probs = c(0.3, 0.8))
##Quesion 3##
##Quesion 4##
##Quesion 5## |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fluoroscripts.R
\docType{data}
\name{protein.spectra}
\alias{protein.spectra}
\title{Spectral information of selected fluorescent proteins.}
\format{A data frame with 5600 rows and 4 variables.
\describe{
\item{protein}{Name of protein}
\item{wavelength}{Wavelength in nm}
\item{excitation}{Relative excitation curve scaled to range of 0-1}
\item{emission}{Relative emission curve scaled to range of 0-1}
}}
\usage{
protein.spectra
}
\description{
Information combines spectra available through
http://www.spectra.arizona.edu/ with manual spectra plot digitation from
original publications.
}
\keyword{datasets}
| /man/protein.spectra.Rd | permissive | ssokolen/fluoroscripts | R | false | true | 705 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fluoroscripts.R
\docType{data}
\name{protein.spectra}
\alias{protein.spectra}
\title{Spectral information of selected fluorescent proteins.}
\format{A data frame with 5600 rows and 4 variables.
\describe{
\item{protein}{Name of protein}
\item{wavelength}{Wavelength in nm}
\item{excitation}{Relative excitation curve scaled to range of 0-1}
\item{emission}{Relative emission curve scaled to range of 0-1}
}}
\usage{
protein.spectra
}
\description{
Information combines spectra available through
http://www.spectra.arizona.edu/ with manual spectra plot digitation from
original publications.
}
\keyword{datasets}
|
library(ape)
testtree <- read.tree("10329_1.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="10329_1_unrooted.txt") | /codeml_files/newick_trees_processed_and_cleaned/10329_1/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 137 | r | library(ape)
testtree <- read.tree("10329_1.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="10329_1_unrooted.txt") |
## makeCasheMatrix is a function that make a matrix and cache the inverse of it.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve is a function that compute the inverse of the matrix of makeCasheMatrix.
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
## Returing the matrix
m
}
| /cachematrix.R | no_license | shadynassrat/ProgrammingAssignment2 | R | false | false | 760 | r | ## makeCasheMatrix is a function that make a matrix and cache the inverse of it.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve is a function that compute the inverse of the matrix of makeCasheMatrix.
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
## Returing the matrix
m
}
|
testlist <- list(Rs = numeric(0), atmp = 0, relh = -1.72131968218895e+83, temp = c(8.5728629954997e-312, 4.01944536247129e+84, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161 ))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) | /meteor/inst/testfiles/ET0_Makkink/AFL_ET0_Makkink/ET0_Makkink_valgrind_files/1615853882-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 659 | r | testlist <- list(Rs = numeric(0), atmp = 0, relh = -1.72131968218895e+83, temp = c(8.5728629954997e-312, 4.01944536247129e+84, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161 ))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) |
#------------------------------------------------
# The following commands ensure that package dependencies are listed in the NAMESPACE file.
#' @useDynLib bobFunctionsEpi
#' @importFrom Rcpp evalCpp
#' @importFrom odin odin
NULL
# -----------------------------------
#' functionTimer
#'
#' Calls an Rcpp function that includes a timer around an editable block. Useful for comparing speed of different function implementations.
#'
#' @param reps number of times to repeat block
#'
#' @export
functionTimer <- function(reps=1) {
functionTimer_cpp(reps)
}
| /R/misc.R | no_license | bobverity/bobFunctionsEpi | R | false | false | 562 | r | #------------------------------------------------
# The following commands ensure that package dependencies are listed in the NAMESPACE file.
#' @useDynLib bobFunctionsEpi
#' @importFrom Rcpp evalCpp
#' @importFrom odin odin
NULL
# -----------------------------------
#' functionTimer
#'
#' Calls an Rcpp function that includes a timer around an editable block. Useful for comparing speed of different function implementations.
#'
#' @param reps number of times to repeat block
#'
#' @export
functionTimer <- function(reps=1) {
functionTimer_cpp(reps)
}
|
#' @title PrepareSimulationData
#' @description Prepare Data for WTP simulation
#' @param stan_est Stan fit model from FitMDCEV
#' @param policies list containing
#' price_p with additive price increases, and
#' dat_psi_p with new psi data
#' @param nsims Number of simulation draws to use for parameter uncertainty
#' @param price_change_only Choose if there is no change in dat_psi_p variables
#' @return A list with individual-specific data (df_indiv) and common data (df_common)
#' and n_classes for number of classes and model_num for model type
#' @export
PrepareSimulationData <- function(stan_est, policies, nsims = 30, price_change_only = FALSE){
# Checks on simulation options
model_num <- stan_est$stan_data$model_num
if (stan_est$algorithm == "MLE" && nsims > stan_est$n_draws){
nsims <- stan_est$n_draws
warning("Number of simulations > Number of mvn draws from stan_est. nsims has been set to: ", nsims)
} else if (stan_est$algorithm == "Bayes" && nsims > max(stan_est$est_pars$sim_id)) {
nsims <- max(stan_est$est_pars$sim_id)
warning("Number of simulations > Number of posterior draws from stan_est. nsims has been set to: ", nsims)
}
# Sample from parameter estimate draws
est_sim <- stan_est$est_pars %>%
distinct(sim_id) %>%
sample_n(., nsims ) %>%
left_join(stan_est$est_pars, by = "sim_id")
if(stan_est$n_classes == 1){
sim_welfare <- ProcessSimulationData(est_sim, stan_est, policies, nsims, price_change_only)
df_common <- sim_welfare
df_common$df_indiv <- NULL
df_indiv <- sim_welfare$df_indiv
} else if(stan_est$n_classes > 1){
est_sim_lc <- suppressWarnings(est_sim %>% # suppress warnings about scale not having a class parameter
filter(!stringr::str_detect(.data$parms, "beta")) %>%
tidyr::separate(.data$parms, into = c("parms", "class", "good")) %>%
mutate(good = ifelse(is.na(as.numeric(.data$good)), "0", .data$good )) %>%
tidyr::unite(parms, parms, good))
est_sim_lc <- split( est_sim_lc , f = est_sim_lc$class )
names(est_sim_lc) <- rep("est_sim", stan_est$n_classes)
est_sim_lc <- purrr::map(est_sim_lc, function(x){ x %>%
select(-class)})
sim_welfare <- purrr::map(est_sim_lc, ProcessSimulationData, stan_est, policies, nsims, price_change_only)
df_common <- purrr::map(sim_welfare, `[`, c("price_p_list", "gamma_sim_list", "alpha_sim_list", "scale_sim"))
names(df_common) <- rep("df_common", stan_est$n_classes)
df_indiv <- purrr::flatten(purrr::map(sim_welfare, `[`, c("df_indiv")))
}
sim_options <- list(n_classes = stan_est$n_classes,
model_num = model_num,
price_change_only = price_change_only)
df_wtp <- list(df_indiv = df_indiv,
df_common = df_common,
sim_options = sim_options)
return(df_wtp)
}
#'@importFrom rlang .data
#'
#'
#'
ProcessSimulationData <- function(est_sim, stan_est, policies, nsims, price_change_only){
J <- stan_est$stan_data$J
I <- stan_est$stan_data$I
# gammas
if (stan_est$stan_data$model_num == 2)
gamma_sim <- matrix(1, nsims, J)
else if (stan_est$stan_data$model_num != 2)
gamma_sim <- t(GrabParms(est_sim, "gamma"))
gamma_sim_list <- CreateListsRow(gamma_sim) # Put in a list for each simulation
# alphas
if (stan_est$stan_data$model_num != 4){
alpha_sim <- t(GrabParms(est_sim, "alpha"))
if (stan_est$stan_data$model_num == 1)
alpha_sim <- cbind(alpha_sim, matrix(0, nsims, J) )
else if (stan_est$stan_data$model_num == 3)
alpha_sim <- matrix(rep(alpha_sim,each=J+1), ncol=J+1, byrow=TRUE)
} else if (stan_est$stan_data$model_num ==4)
alpha_sim <- matrix(1e-6, nsims, J+1)
alpha_sim_list <- CreateListsRow(alpha_sim)
# scales
if (stan_est$stan_data$fixed_scale == 0)
scale_sim <- t(GrabParms(est_sim, "scale"))
else if (stan_est$stan_data$fixed_scale == 1)
scale_sim = matrix(1, nsims, 1)
# psi
psi_temp <- GrabParms(est_sim, "psi")
npols <- length(policies$price_p)
psi_temp <- CreateListsCol(psi_temp)
psi_sim <- purrr::map(psi_temp, MultiplyMatrix, mat_temp = stan_est$stan_data$dat_psi, n_rows = I)
psi_sim <- DoCbind(psi_sim)
psi_sim <- CreateListsRow(psi_sim)
psi_sim <- purrr::map(psi_sim, function(x){matrix(x , nrow = nsims, byrow = TRUE)})
psi_sim <- list(psi_sim)
names(psi_sim) <- "psi_sim"
if (price_change_only == FALSE) {
# psi_p
psi_p_sim <- purrr::map(psi_temp, function(psi){ purrr::map(policies[["dat_psi_p"]], MultiplyMatrix, x = psi, n_rows = I)})
psi_p_sim <- purrr::map(psi_p_sim, DoCbind)
psi_p_sim <- DoCbind(psi_p_sim)
psi_p_sim <- CreateListsRow(psi_p_sim)
psi_p_sim <- purrr::map(psi_p_sim, function(x){aperm(array(x, dim = c(J, npols, nsims)), perm=c(2,1,3))})
# Ensure psi_p_sim is a list of J lists each with nsims lists of npol X ngood matrices
psi_p_sim <- purrr::map(psi_p_sim, function(x){lapply(seq_len(nsims), function(i) x[,,i])})
psi_p_sim <- list(psi_p_sim)
names(psi_p_sim) <- "psi_p_sim"
} else if (price_change_only == TRUE){
psi_p_sim <- NULL
}
# Set baseline individual data into lists
inc <- list(as.list(stan_est$stan_data$inc))
names(inc) <- "inc"
quant_j <- list(CreateListsRow(stan_est$stan_data$j_quant))
names(quant_j) <- "quant_j"
price <- cbind(1, stan_est$stan_data$j_price) #add numeraire price to price matrix (<-1)
price <- list(CreateListsRow(price))
names(price) <- "price"
# Pull individual level data into one list
df_indiv <- c(inc, quant_j, price, psi_sim, psi_p_sim)
out <- list(df_indiv = df_indiv,
price_p_list = policies$price_p,
gamma_sim_list = gamma_sim_list,
alpha_sim_list = alpha_sim_list,
scale_sim = scale_sim)
return(out)
}
| /R/PrepareSimulationData.R | no_license | notabigdeal/rmdcev | R | false | false | 5,628 | r | #' @title PrepareSimulationData
#' @description Prepare Data for WTP simulation
#' @param stan_est Stan fit model from FitMDCEV
#' @param policies list containing
#' price_p with additive price increases, and
#' dat_psi_p with new psi data
#' @param nsims Number of simulation draws to use for parameter uncertainty
#' @param price_change_only Choose if there is no change in dat_psi_p variables
#' @return A list with individual-specific data (df_indiv) and common data (df_common)
#' and n_classes for number of classes and model_num for model type
#' @export
PrepareSimulationData <- function(stan_est, policies, nsims = 30, price_change_only = FALSE){
# Checks on simulation options
model_num <- stan_est$stan_data$model_num
if (stan_est$algorithm == "MLE" && nsims > stan_est$n_draws){
nsims <- stan_est$n_draws
warning("Number of simulations > Number of mvn draws from stan_est. nsims has been set to: ", nsims)
} else if (stan_est$algorithm == "Bayes" && nsims > max(stan_est$est_pars$sim_id)) {
nsims <- max(stan_est$est_pars$sim_id)
warning("Number of simulations > Number of posterior draws from stan_est. nsims has been set to: ", nsims)
}
# Sample from parameter estimate draws
est_sim <- stan_est$est_pars %>%
distinct(sim_id) %>%
sample_n(., nsims ) %>%
left_join(stan_est$est_pars, by = "sim_id")
if(stan_est$n_classes == 1){
sim_welfare <- ProcessSimulationData(est_sim, stan_est, policies, nsims, price_change_only)
df_common <- sim_welfare
df_common$df_indiv <- NULL
df_indiv <- sim_welfare$df_indiv
} else if(stan_est$n_classes > 1){
est_sim_lc <- suppressWarnings(est_sim %>% # suppress warnings about scale not having a class parameter
filter(!stringr::str_detect(.data$parms, "beta")) %>%
tidyr::separate(.data$parms, into = c("parms", "class", "good")) %>%
mutate(good = ifelse(is.na(as.numeric(.data$good)), "0", .data$good )) %>%
tidyr::unite(parms, parms, good))
est_sim_lc <- split( est_sim_lc , f = est_sim_lc$class )
names(est_sim_lc) <- rep("est_sim", stan_est$n_classes)
est_sim_lc <- purrr::map(est_sim_lc, function(x){ x %>%
select(-class)})
sim_welfare <- purrr::map(est_sim_lc, ProcessSimulationData, stan_est, policies, nsims, price_change_only)
df_common <- purrr::map(sim_welfare, `[`, c("price_p_list", "gamma_sim_list", "alpha_sim_list", "scale_sim"))
names(df_common) <- rep("df_common", stan_est$n_classes)
df_indiv <- purrr::flatten(purrr::map(sim_welfare, `[`, c("df_indiv")))
}
sim_options <- list(n_classes = stan_est$n_classes,
model_num = model_num,
price_change_only = price_change_only)
df_wtp <- list(df_indiv = df_indiv,
df_common = df_common,
sim_options = sim_options)
return(df_wtp)
}
#'@importFrom rlang .data
#'
#'
#'
ProcessSimulationData <- function(est_sim, stan_est, policies, nsims, price_change_only){
J <- stan_est$stan_data$J
I <- stan_est$stan_data$I
# gammas
if (stan_est$stan_data$model_num == 2)
gamma_sim <- matrix(1, nsims, J)
else if (stan_est$stan_data$model_num != 2)
gamma_sim <- t(GrabParms(est_sim, "gamma"))
gamma_sim_list <- CreateListsRow(gamma_sim) # Put in a list for each simulation
# alphas
if (stan_est$stan_data$model_num != 4){
alpha_sim <- t(GrabParms(est_sim, "alpha"))
if (stan_est$stan_data$model_num == 1)
alpha_sim <- cbind(alpha_sim, matrix(0, nsims, J) )
else if (stan_est$stan_data$model_num == 3)
alpha_sim <- matrix(rep(alpha_sim,each=J+1), ncol=J+1, byrow=TRUE)
} else if (stan_est$stan_data$model_num ==4)
alpha_sim <- matrix(1e-6, nsims, J+1)
alpha_sim_list <- CreateListsRow(alpha_sim)
# scales
if (stan_est$stan_data$fixed_scale == 0)
scale_sim <- t(GrabParms(est_sim, "scale"))
else if (stan_est$stan_data$fixed_scale == 1)
scale_sim = matrix(1, nsims, 1)
# psi
psi_temp <- GrabParms(est_sim, "psi")
npols <- length(policies$price_p)
psi_temp <- CreateListsCol(psi_temp)
psi_sim <- purrr::map(psi_temp, MultiplyMatrix, mat_temp = stan_est$stan_data$dat_psi, n_rows = I)
psi_sim <- DoCbind(psi_sim)
psi_sim <- CreateListsRow(psi_sim)
psi_sim <- purrr::map(psi_sim, function(x){matrix(x , nrow = nsims, byrow = TRUE)})
psi_sim <- list(psi_sim)
names(psi_sim) <- "psi_sim"
if (price_change_only == FALSE) {
# psi_p
psi_p_sim <- purrr::map(psi_temp, function(psi){ purrr::map(policies[["dat_psi_p"]], MultiplyMatrix, x = psi, n_rows = I)})
psi_p_sim <- purrr::map(psi_p_sim, DoCbind)
psi_p_sim <- DoCbind(psi_p_sim)
psi_p_sim <- CreateListsRow(psi_p_sim)
psi_p_sim <- purrr::map(psi_p_sim, function(x){aperm(array(x, dim = c(J, npols, nsims)), perm=c(2,1,3))})
# Ensure psi_p_sim is a list of J lists each with nsims lists of npol X ngood matrices
psi_p_sim <- purrr::map(psi_p_sim, function(x){lapply(seq_len(nsims), function(i) x[,,i])})
psi_p_sim <- list(psi_p_sim)
names(psi_p_sim) <- "psi_p_sim"
} else if (price_change_only == TRUE){
psi_p_sim <- NULL
}
# Set baseline individual data into lists
inc <- list(as.list(stan_est$stan_data$inc))
names(inc) <- "inc"
quant_j <- list(CreateListsRow(stan_est$stan_data$j_quant))
names(quant_j) <- "quant_j"
price <- cbind(1, stan_est$stan_data$j_price) #add numeraire price to price matrix (<-1)
price <- list(CreateListsRow(price))
names(price) <- "price"
# Pull individual level data into one list
df_indiv <- c(inc, quant_j, price, psi_sim, psi_p_sim)
out <- list(df_indiv = df_indiv,
price_p_list = policies$price_p,
gamma_sim_list = gamma_sim_list,
alpha_sim_list = alpha_sim_list,
scale_sim = scale_sim)
return(out)
}
|
# Grab the simulation results quickly! ####
# Written by Arianna Krinos, last edits by KJF on 25 May 2017
#install.packages('gplots') #if you are without gplots
#install.packages('plotrix') #if you are without plotrix
library(gplots) #if you just opened R
library(plotrix) #if you just opened R
# You need these numbers if you aren't iterating through *all* heatmaps
# You can go ahead and type in numbers regardless though, won't matter (otherwise comment out)
column <- as.numeric(readline(prompt = "Tell me what variable you're interested in (from namekey.csv): ")) + 3
column2 <- as.numeric(readline(prompt = "Tell me what variable you're interested in (from namekey2.csv): "))
#setwd("C:/Users/arian_000/Documents/Spring 2017/Carey Lab/MyResults_Sunapee10May/AIK_Sunapee_10May") # AIK working directory
setwd("C:/Users/farrellk/Documents/GRAPLER N & P/AIK_Sunapee_10May") # KF working directory
#setwd("./AIK_Sunapee_10May") # If I'm able to upload 10May results folder to GitHub, this will run wd through GLM-GRAPLEr project
# GENERAL SECTION - must be run no matter what to do anything else with this script ####
# section presently ends on line 104
sim_summary <- read.csv("sim_summary.csv", header = FALSE)
colnames(sim_summary) <- lapply(sim_summary[1,], as.character)
folders_to_find <- matrix(NA, ncol = (length(colnames(sim_summary)) - 1 ) / 4 + 1, nrow = length(sim_summary[,1])) # Expand this if you're looking for multiple factors
folders_to_find2 <- matrix(NA, ncol = (length(colnames(sim_summary)) - 1 ) / 4 + 1, nrow = length(sim_summary[,1])) # Expand this if you're looking for multiple factors
colname_folders <- c(rep(NA, ((length(colnames(sim_summary)) - 1) / 4) + 1))
conditions <- c(rep(NA, ((length(colnames(sim_summary)) - 1) / 4)))
colname_folders[1] <- "Sim Folder"
for (j in 0:((length(colnames(sim_summary)) - 1)/4 - 1)) {
colname_folders[j+2] <- colnames(sim_summary)[2 + j * 4]
assign(paste0(colname_folders[j+2], "_conditions"), sim_summary[5 + j * 4])
conditions[j + 1] <- paste0(colname_folders[j+2], "_conditions")
}
colnames(folders_to_find) <- colname_folders
print("Below are the conditions present in the sim folder: ", quote = FALSE)
print(conditions)
print("Type the name of the first analysis variable: ", quote = FALSE)
#var1 <- readline(prompt = "enter it here: ")
#var2 <- readline(prompt = "second please: ")
var2 <- "AirTemp_conditions" #listing as variable means you don't need eval(as.name()) or the deparse below
var1 <- "FLOW_conditions" #"Rain_conditions"
# alternatively, just set var1 and var2 (needed outside interactive mode)
exclude <- c(rep(NA, length(conditions) - 2))
tick <- 1
for (k in 1:length(conditions)) {
if (as.name(conditions[k]) != as.name(var1) && as.name(conditions[k]) != as.name(var2)) {
exclude[tick] <- conditions[k]
tick = tick + 1
}
}
exclude2 <- c(rep(NA, length(conditions) - 1))
tick <- 1
for (k in 1:length(conditions)) {
if (as.name(conditions[k]) != as.name(var1)) {
exclude2[tick] <- conditions[k]
tick = tick + 1
}
}
folders <- data.frame(lapply(sim_summary[,1], as.character), stringsAsFactors = FALSE)
counter <- 1
for (i in 1:length(sim_summary[,1])) {
save = TRUE
if (length(exclude) != 0) {
for (y in 1:length(exclude)) {
if (eval(as.name(exclude[y]))[i,1] != 0 && eval(as.name(exclude[y]))[i,1] != 1) {
save = FALSE
}
}
}
if (save) {
folders_to_find[counter, 1] = folders[1, i]
for (z in 2:(length(folders_to_find[1,]))) {
folders_to_find[counter, z] = eval(as.name(conditions[z - 1]))[i,1]
}
counter = counter + 1
}
}
folders_to_find <- folders_to_find[1:counter-1,] # Get rid of everything you don't need.
counter <- 1
for (i in 1:length(sim_summary[,1])) {
save = FALSE
basel = TRUE
if (length(exclude2) != 0) {
for (y in 1:length(exclude2)) {
if (eval(as.name(exclude2[y]))[i,1] == 0 || (eval(as.name(exclude2[y]))[i,1] == 1.0 && exclude2[y] == "FLOW_conditions")) {
save = TRUE
}
}
}
if (save) {
folders_to_find2[counter, 1] = folders[1, i]
for (z in 2:(length(folders_to_find2[1,]))) {
folders_to_find2[counter, z] = eval(as.name(conditions[z - 1]))[i,1]
if (((folders_to_find2[counter, z] != 0 && var1 != "FLOW_conditions" && z == 3) || (folders_to_find2[counter,z] != 1 && var1 == "FLOW_conditions" && z == 2))){ #&& folders_to_find2[counter,z] != 1) {
basel = FALSE
}
}
if (basel) {
baseline = folders[1, i]
}
counter = counter + 1
}
}
folders_to_find2 <- folders_to_find2[1:counter-1,] # Get rid of everything you don't need.
curr_directory <- getwd()
sim_folders <- list.files(paste0(curr_directory, '/Sims'))
# INITIALIZATION OF NEEDED STORAGE UNITS ####
new_counter <- 1
new_counter_2 <-1
example <- read.csv(paste0(curr_directory, "/Sims/", sim_folders[1], "/Results/Sim__.csv"))
example2 <- read.csv(paste0(curr_directory, "/Sims/", sim_folders[1], "/Results/Data_By_Time_GRAPLEready.csv"))
example3 <- read.csv(paste0(curr_directory, "/Sims/", folders_to_find2[1,1], "/Results/timeseries_nutrient.csv"))
aggregate <- t(c(rep(0, length(example[1,]))))
aggregate2 <- t(c(rep(0, (length(example2[1,]) + length(folders_to_find2[1,]) - 1))))
aggregate3 <- c(rep(0, length(example3[,1])))
colnames(aggregate) <- colnames(example)
colnames(aggregate2) <- c((conditions), colnames(example2))
allconditions <- matrix(nrow = length(folders_to_find[,1]) + 1, ncol = length(conditions))
# PLOT HEAT MAPS AND SAVE TO OWN FOLDER ####
# COLLATE DATA FOR PERMUTATIONS OF VAR1 AND VAR2 (the ones you inputted)
counter2 <- 1
for (j in 1:length(sim_folders)) { # Iterate through sim folders
for (k in 1:length(folders_to_find[,1])) {
if (sim_folders[j] == folders_to_find[k,1]) {
curr_sim0 <- read.csv(paste0(curr_directory, "/Sims/", sim_folders[j], "/Results/Sim__.csv")) # Add/change as you desire
new_counter = new_counter + 1
aggregate <- rbind(aggregate, curr_sim0)
allconditions[counter2 + 1,] <- folders_to_find[k,2:length(folders_to_find[1,])]#eval(as.name(conditions[m]))[j,1]
counter2 = counter2 + 1
break
}
}
# THIS IS YEARLY DATA FOR VAR1
for (k in 1:length(folders_to_find2[,1])) {
if (sim_folders[j] == folders_to_find2[k,1]) {
curr_sim1 <- read.csv(paste0(curr_directory, "/Sims/", sim_folders[j], "/Results/Data_By_Time_GRAPLEready.csv"))
curr_sim1 <- curr_sim1[1:(length(curr_sim1[,1]) - 1),]
new_counter_2 <- new_counter_2 + 1
blankcol <- rep(0, length(curr_sim1[,1]))
blankcols <- rep(0, length(curr_sim1[,1]))
for (p in 1:(length(folders_to_find2[1,])-2)) {
blankcols <- cbind(blankcols, blankcol)
}
curr_sim1 <- cbind(blankcols, curr_sim1)
for (i in 1:length(curr_sim1[,1])) {
curr_sim1[i,1:(length(folders_to_find2[1,])-1)] <- folders_to_find2[k,2:length(folders_to_find2[1,])]
}
colnames(curr_sim1) <- c((conditions), colnames(example2))
aggregate2 <- rbind(aggregate2, curr_sim1)
}
}
}
allconditions[1,] <- c(rep(0, length(allconditions[1,])))
colnames(allconditions) <- conditions
allconditions <- allconditions[1:length(aggregate[,1]),]
aggregate <- cbind(allconditions, aggregate)
aggregate <- aggregate[2:length(aggregate[,1]),]
aggregate2 <- aggregate2[2:length(aggregate2[,1]),]
names <- colnames(aggregate)
# Units and names to be added!!
newdir <- dir.create(paste0('Compare_14April17__', as.name(var1), as.name(var2))) # change the directory as needed
setwd(paste0(getwd(), '/Compare_14April17__', as.name(var1), as.name(var2)))
for (column in 1:length(aggregate[1,])) {
units <- c(rep(0, length(conditions)), rep("deg C", 9), rep("mg/L", 22), rep("ug/L", 4), rep("ug/L", 26),
rep('WIP', 2), rep("mg/L", 6), rep("kg",4), rep("mg/L", 6), rep("kg", 4), rep("mg/L", 6),
rep("kg", 4), rep("WIP", 12))
all <- cbind(names, units)
for (c in 1:length(colnames(allconditions))) {
if (as.name(var1) == colnames(allconditions)[c]) {
togglevar <- allconditions[2:length(allconditions[,1]),c]
break
}
}
for (d in 1:length(colnames(allconditions))) {
if (as.name(var2) == colnames(allconditions)[d]) {
comparevar <- allconditions[2:length(allconditions[,1]),d]
break
}
}
uniquevars_toggle <- sort.int(as.numeric(unique(togglevar)), decreasing = FALSE)
uniquevars_compare <- sort.int(as.numeric(unique(comparevar)), decreasing = FALSE)
totalcomparedata <- matrix(nrow = length(folders_to_find[,1]), ncol = 3)
totalcomparedata <- data.frame(totalcomparedata)
#column <- 38 # Column of var of interest in aggregate - if you're looking at aggregate 2 subtract one: 38, 42, 50, 17, 18, 19
toggle_counter <- 1
toggle_curr <- togglevar[toggle_counter]
counter_new <- 1
comparedata <- matrix(0, nrow = length(uniquevars_toggle), ncol = length(uniquevars_compare))
row.names(comparedata) <- uniquevars_toggle
colnames(comparedata) <- uniquevars_compare
# Organize results for a particular set of factors into a matrix
for (toggle_counter in 1:length(uniquevars_toggle)) {
toggle_curr <- uniquevars_toggle[toggle_counter]
for (z in 1:length(togglevar)) {
if (togglevar[z] == toggle_curr) {
for (y in 1:length(uniquevars_compare)) {
if (comparevar[z] == uniquevars_compare[y]) {
curr2 = y
}
for (f in 1:length(aggregate[,1])) {
if (comparevar[z] == aggregate[f, d] && toggle_curr == aggregate[f, c]) {
find = f
}
}
}
comparedata[toggle_counter, curr2] <- aggregate[find, column]
totalcomparedata[counter_new,] <- c(toggle_curr, comparevar[z], aggregate[find,column])
counter_new = counter_new + 1
}
}
}
jet.colors <-
colorRampPalette(c("#00007F", "blue", "#007FFF", "cyan",
"#7FFF7F", "yellow", "#FF7F00", "red"))(128)
par(cex.lab = 1)
par(cex.axis = 1)
par(mar = c(3,3,3,3))
png(file = paste0(as.character(all[column, 1]), '_', var1, '_', var2, '_8Apr17.png'))
try (heatmap.2(comparedata, density.info = 'none', dendrogram = 'none', Rowv = FALSE, Colv = FALSE,
col = jet.colors, key.title = "Gradient", key.xlab = as.character(all[column, 2]),
main = as.character(all[column, 1]), ylab = paste0(var1, ' Increment'), xlab = paste0(var2, ' Increment'),
tracecol = NA, cex.lab = 1, cex.axis = 1, margins = c(6,6), labRow = round(as.numeric(rownames(comparedata)),2),
labCol = round(as.numeric(colnames(comparedata)),2)), silent = TRUE)
#breaks = seq(1400,2000, length.out = 129)) #To set axis during standardization process
dev.off()
}
# ------------------ Sort data by date data
newdir <- dir.create(paste0('Compare_Yearly_13April17_', as.name(var1)))
setwd(paste0(getwd(), '/Compare_Yearly_13April17_', as.name(var1)))
for (column2 in (length(conditions) + 2):length(aggregate2[1,])) {
for (c in 1:length(colnames(allconditions))) {
if (as.name(var1) == colnames(allconditions)[c]) {
togglevar <- allconditions[length(allconditions[,1]),c]
break
}
}
d <- 3
togglevar <-aggregate2[,c]
comparevar <- aggregate2[,d]
uniquevars_toggle <- sort.int(as.numeric(unique(togglevar)), decreasing = FALSE)
uniquevars_compare <- sort.int(as.numeric(unique(aggregate2[,d])), decreasing = FALSE) # look at dates
totalcomparedata2 <- matrix(nrow = length(folders_to_find[,1]), ncol = 3)
totalcomparedata2 <- data.frame(totalcomparedata)
#column <- 38 # Column of var of interest in aggregate - if you're looking at aggregate 2 subtract one: 38, 42, 50, 17, 18, 19
toggle_counter <- 1
toggle_curr <- togglevar[toggle_counter]
counter_new <- 1
comparedata2 <- matrix(0, nrow = length(uniquevars_toggle), ncol = length(uniquevars_compare))
row.names(comparedata2) <- uniquevars_toggle
colnames(comparedata2) <- uniquevars_compare
# Organize results for a particular set of factors into a matrix
for (toggle_counter in 1:length(uniquevars_toggle)) {
toggle_curr <- uniquevars_toggle[toggle_counter]
for (z in 1:length(togglevar)) {
if (togglevar[z] == toggle_curr) {
for (y in 1:length(uniquevars_compare)) {
if (comparevar[z] == uniquevars_compare[y]) {
curr2 = y
}
for (f in 1:length(aggregate2[,1])) {
if (comparevar[z] == aggregate2[f, d] && toggle_curr == aggregate2[f, c]) {
find = f
}
}
}
comparedata2[toggle_counter, curr2] <- as.numeric(aggregate2[find, column2])
totalcomparedata2[counter_new,] <- c(toggle_curr, comparevar[z], as.numeric(aggregate2[find,column2]))
counter_new = counter_new + 1
}
}
}
#comparedata2 <- comparedata2[,2:length(comparedata2[1,])]
title <- colnames(aggregate2)[column2] #comment this out if you don't have aggregate2 already stored!!
par(cex.lab = 1)
par(cex.axis = 1)
par(mar = c(3,3,3,3))
png(file = paste0(title, '_', var1, '_year', '_14Apr17.png'))
try(heatmap.2((comparedata2), density.info = 'none', dendrogram = 'none', Rowv = FALSE, Colv = FALSE,
col = jet.colors, key.title = "Gradient", key.xlab = 'units',
main = title, ylab = paste0(var1, ' Increment'), xlab = paste0('Year'),
tracecol = NA, cex.lab = 1, cex.axis = 1, margins = c(6,6), labRow = round(as.numeric(rownames(comparedata2)),2),
labCol = round(as.numeric(colnames(comparedata2)),2)), silent = TRUE)
dev.off()
} | /Scripts/Sim_Output_HeatMaps_Flow.R | no_license | CareyLabVT/GLM-GRAPLEr | R | false | false | 13,659 | r | # Grab the simulation results quickly! ####
# Written by Arianna Krinos, last edits by KJF on 25 May 2017
#install.packages('gplots') #if you are without gplots
#install.packages('plotrix') #if you are without plotrix
library(gplots) #if you just opened R
library(plotrix) #if you just opened R
# You need these numbers if you aren't iterating through *all* heatmaps
# You can go ahead and type in numbers regardless though, won't matter (otherwise comment out)
column <- as.numeric(readline(prompt = "Tell me what variable you're interested in (from namekey.csv): ")) + 3
column2 <- as.numeric(readline(prompt = "Tell me what variable you're interested in (from namekey2.csv): "))
#setwd("C:/Users/arian_000/Documents/Spring 2017/Carey Lab/MyResults_Sunapee10May/AIK_Sunapee_10May") # AIK working directory
setwd("C:/Users/farrellk/Documents/GRAPLER N & P/AIK_Sunapee_10May") # KF working directory
#setwd("./AIK_Sunapee_10May") # If I'm able to upload 10May results folder to GitHub, this will run wd through GLM-GRAPLEr project
# GENERAL SECTION - must be run no matter what to do anything else with this script ####
# section presently ends on line 104
sim_summary <- read.csv("sim_summary.csv", header = FALSE)
colnames(sim_summary) <- lapply(sim_summary[1,], as.character)
folders_to_find <- matrix(NA, ncol = (length(colnames(sim_summary)) - 1 ) / 4 + 1, nrow = length(sim_summary[,1])) # Expand this if you're looking for multiple factors
folders_to_find2 <- matrix(NA, ncol = (length(colnames(sim_summary)) - 1 ) / 4 + 1, nrow = length(sim_summary[,1])) # Expand this if you're looking for multiple factors
colname_folders <- c(rep(NA, ((length(colnames(sim_summary)) - 1) / 4) + 1))
conditions <- c(rep(NA, ((length(colnames(sim_summary)) - 1) / 4)))
colname_folders[1] <- "Sim Folder"
for (j in 0:((length(colnames(sim_summary)) - 1)/4 - 1)) {
colname_folders[j+2] <- colnames(sim_summary)[2 + j * 4]
assign(paste0(colname_folders[j+2], "_conditions"), sim_summary[5 + j * 4])
conditions[j + 1] <- paste0(colname_folders[j+2], "_conditions")
}
colnames(folders_to_find) <- colname_folders
print("Below are the conditions present in the sim folder: ", quote = FALSE)
print(conditions)
print("Type the name of the first analysis variable: ", quote = FALSE)
#var1 <- readline(prompt = "enter it here: ")
#var2 <- readline(prompt = "second please: ")
var2 <- "AirTemp_conditions" #listing as variable means you don't need eval(as.name()) or the deparse below
var1 <- "FLOW_conditions" #"Rain_conditions"
# alternatively, just set var1 and var2 (needed outside interactive mode)
exclude <- c(rep(NA, length(conditions) - 2))
tick <- 1
for (k in 1:length(conditions)) {
if (as.name(conditions[k]) != as.name(var1) && as.name(conditions[k]) != as.name(var2)) {
exclude[tick] <- conditions[k]
tick = tick + 1
}
}
exclude2 <- c(rep(NA, length(conditions) - 1))
tick <- 1
for (k in 1:length(conditions)) {
if (as.name(conditions[k]) != as.name(var1)) {
exclude2[tick] <- conditions[k]
tick = tick + 1
}
}
folders <- data.frame(lapply(sim_summary[,1], as.character), stringsAsFactors = FALSE)
counter <- 1
for (i in 1:length(sim_summary[,1])) {
save = TRUE
if (length(exclude) != 0) {
for (y in 1:length(exclude)) {
if (eval(as.name(exclude[y]))[i,1] != 0 && eval(as.name(exclude[y]))[i,1] != 1) {
save = FALSE
}
}
}
if (save) {
folders_to_find[counter, 1] = folders[1, i]
for (z in 2:(length(folders_to_find[1,]))) {
folders_to_find[counter, z] = eval(as.name(conditions[z - 1]))[i,1]
}
counter = counter + 1
}
}
folders_to_find <- folders_to_find[1:counter-1,] # Get rid of everything you don't need.
counter <- 1
for (i in 1:length(sim_summary[,1])) {
save = FALSE
basel = TRUE
if (length(exclude2) != 0) {
for (y in 1:length(exclude2)) {
if (eval(as.name(exclude2[y]))[i,1] == 0 || (eval(as.name(exclude2[y]))[i,1] == 1.0 && exclude2[y] == "FLOW_conditions")) {
save = TRUE
}
}
}
if (save) {
folders_to_find2[counter, 1] = folders[1, i]
for (z in 2:(length(folders_to_find2[1,]))) {
folders_to_find2[counter, z] = eval(as.name(conditions[z - 1]))[i,1]
if (((folders_to_find2[counter, z] != 0 && var1 != "FLOW_conditions" && z == 3) || (folders_to_find2[counter,z] != 1 && var1 == "FLOW_conditions" && z == 2))){ #&& folders_to_find2[counter,z] != 1) {
basel = FALSE
}
}
if (basel) {
baseline = folders[1, i]
}
counter = counter + 1
}
}
folders_to_find2 <- folders_to_find2[1:counter-1,] # Get rid of everything you don't need.
curr_directory <- getwd()
sim_folders <- list.files(paste0(curr_directory, '/Sims'))
# INITIALIZATION OF NEEDED STORAGE UNITS ####
new_counter <- 1
new_counter_2 <-1
example <- read.csv(paste0(curr_directory, "/Sims/", sim_folders[1], "/Results/Sim__.csv"))
example2 <- read.csv(paste0(curr_directory, "/Sims/", sim_folders[1], "/Results/Data_By_Time_GRAPLEready.csv"))
example3 <- read.csv(paste0(curr_directory, "/Sims/", folders_to_find2[1,1], "/Results/timeseries_nutrient.csv"))
aggregate <- t(c(rep(0, length(example[1,]))))
aggregate2 <- t(c(rep(0, (length(example2[1,]) + length(folders_to_find2[1,]) - 1))))
aggregate3 <- c(rep(0, length(example3[,1])))
colnames(aggregate) <- colnames(example)
colnames(aggregate2) <- c((conditions), colnames(example2))
allconditions <- matrix(nrow = length(folders_to_find[,1]) + 1, ncol = length(conditions))
# PLOT HEAT MAPS AND SAVE TO OWN FOLDER ####
# COLLATE DATA FOR PERMUTATIONS OF VAR1 AND VAR2 (the ones you inputted)
counter2 <- 1
for (j in 1:length(sim_folders)) { # Iterate through sim folders
for (k in 1:length(folders_to_find[,1])) {
if (sim_folders[j] == folders_to_find[k,1]) {
curr_sim0 <- read.csv(paste0(curr_directory, "/Sims/", sim_folders[j], "/Results/Sim__.csv")) # Add/change as you desire
new_counter = new_counter + 1
aggregate <- rbind(aggregate, curr_sim0)
allconditions[counter2 + 1,] <- folders_to_find[k,2:length(folders_to_find[1,])]#eval(as.name(conditions[m]))[j,1]
counter2 = counter2 + 1
break
}
}
# THIS IS YEARLY DATA FOR VAR1
for (k in 1:length(folders_to_find2[,1])) {
if (sim_folders[j] == folders_to_find2[k,1]) {
curr_sim1 <- read.csv(paste0(curr_directory, "/Sims/", sim_folders[j], "/Results/Data_By_Time_GRAPLEready.csv"))
curr_sim1 <- curr_sim1[1:(length(curr_sim1[,1]) - 1),]
new_counter_2 <- new_counter_2 + 1
blankcol <- rep(0, length(curr_sim1[,1]))
blankcols <- rep(0, length(curr_sim1[,1]))
for (p in 1:(length(folders_to_find2[1,])-2)) {
blankcols <- cbind(blankcols, blankcol)
}
curr_sim1 <- cbind(blankcols, curr_sim1)
for (i in 1:length(curr_sim1[,1])) {
curr_sim1[i,1:(length(folders_to_find2[1,])-1)] <- folders_to_find2[k,2:length(folders_to_find2[1,])]
}
colnames(curr_sim1) <- c((conditions), colnames(example2))
aggregate2 <- rbind(aggregate2, curr_sim1)
}
}
}
allconditions[1,] <- c(rep(0, length(allconditions[1,])))
colnames(allconditions) <- conditions
allconditions <- allconditions[1:length(aggregate[,1]),]
aggregate <- cbind(allconditions, aggregate)
aggregate <- aggregate[2:length(aggregate[,1]),]
aggregate2 <- aggregate2[2:length(aggregate2[,1]),]
names <- colnames(aggregate)
# Units and names to be added!!
newdir <- dir.create(paste0('Compare_14April17__', as.name(var1), as.name(var2))) # change the directory as needed
setwd(paste0(getwd(), '/Compare_14April17__', as.name(var1), as.name(var2)))
for (column in 1:length(aggregate[1,])) {
units <- c(rep(0, length(conditions)), rep("deg C", 9), rep("mg/L", 22), rep("ug/L", 4), rep("ug/L", 26),
rep('WIP', 2), rep("mg/L", 6), rep("kg",4), rep("mg/L", 6), rep("kg", 4), rep("mg/L", 6),
rep("kg", 4), rep("WIP", 12))
all <- cbind(names, units)
for (c in 1:length(colnames(allconditions))) {
if (as.name(var1) == colnames(allconditions)[c]) {
togglevar <- allconditions[2:length(allconditions[,1]),c]
break
}
}
for (d in 1:length(colnames(allconditions))) {
if (as.name(var2) == colnames(allconditions)[d]) {
comparevar <- allconditions[2:length(allconditions[,1]),d]
break
}
}
uniquevars_toggle <- sort.int(as.numeric(unique(togglevar)), decreasing = FALSE)
uniquevars_compare <- sort.int(as.numeric(unique(comparevar)), decreasing = FALSE)
totalcomparedata <- matrix(nrow = length(folders_to_find[,1]), ncol = 3)
totalcomparedata <- data.frame(totalcomparedata)
#column <- 38 # Column of var of interest in aggregate - if you're looking at aggregate 2 subtract one: 38, 42, 50, 17, 18, 19
toggle_counter <- 1
toggle_curr <- togglevar[toggle_counter]
counter_new <- 1
comparedata <- matrix(0, nrow = length(uniquevars_toggle), ncol = length(uniquevars_compare))
row.names(comparedata) <- uniquevars_toggle
colnames(comparedata) <- uniquevars_compare
# Organize results for a particular set of factors into a matrix
for (toggle_counter in 1:length(uniquevars_toggle)) {
toggle_curr <- uniquevars_toggle[toggle_counter]
for (z in 1:length(togglevar)) {
if (togglevar[z] == toggle_curr) {
for (y in 1:length(uniquevars_compare)) {
if (comparevar[z] == uniquevars_compare[y]) {
curr2 = y
}
for (f in 1:length(aggregate[,1])) {
if (comparevar[z] == aggregate[f, d] && toggle_curr == aggregate[f, c]) {
find = f
}
}
}
comparedata[toggle_counter, curr2] <- aggregate[find, column]
totalcomparedata[counter_new,] <- c(toggle_curr, comparevar[z], aggregate[find,column])
counter_new = counter_new + 1
}
}
}
jet.colors <-
colorRampPalette(c("#00007F", "blue", "#007FFF", "cyan",
"#7FFF7F", "yellow", "#FF7F00", "red"))(128)
par(cex.lab = 1)
par(cex.axis = 1)
par(mar = c(3,3,3,3))
png(file = paste0(as.character(all[column, 1]), '_', var1, '_', var2, '_8Apr17.png'))
try (heatmap.2(comparedata, density.info = 'none', dendrogram = 'none', Rowv = FALSE, Colv = FALSE,
col = jet.colors, key.title = "Gradient", key.xlab = as.character(all[column, 2]),
main = as.character(all[column, 1]), ylab = paste0(var1, ' Increment'), xlab = paste0(var2, ' Increment'),
tracecol = NA, cex.lab = 1, cex.axis = 1, margins = c(6,6), labRow = round(as.numeric(rownames(comparedata)),2),
labCol = round(as.numeric(colnames(comparedata)),2)), silent = TRUE)
#breaks = seq(1400,2000, length.out = 129)) #To set axis during standardization process
dev.off()
}
# ------------------ Sort data by date data
newdir <- dir.create(paste0('Compare_Yearly_13April17_', as.name(var1)))
setwd(paste0(getwd(), '/Compare_Yearly_13April17_', as.name(var1)))
for (column2 in (length(conditions) + 2):length(aggregate2[1,])) {
for (c in 1:length(colnames(allconditions))) {
if (as.name(var1) == colnames(allconditions)[c]) {
togglevar <- allconditions[length(allconditions[,1]),c]
break
}
}
d <- 3
togglevar <-aggregate2[,c]
comparevar <- aggregate2[,d]
uniquevars_toggle <- sort.int(as.numeric(unique(togglevar)), decreasing = FALSE)
uniquevars_compare <- sort.int(as.numeric(unique(aggregate2[,d])), decreasing = FALSE) # look at dates
totalcomparedata2 <- matrix(nrow = length(folders_to_find[,1]), ncol = 3)
totalcomparedata2 <- data.frame(totalcomparedata)
#column <- 38 # Column of var of interest in aggregate - if you're looking at aggregate 2 subtract one: 38, 42, 50, 17, 18, 19
toggle_counter <- 1
toggle_curr <- togglevar[toggle_counter]
counter_new <- 1
comparedata2 <- matrix(0, nrow = length(uniquevars_toggle), ncol = length(uniquevars_compare))
row.names(comparedata2) <- uniquevars_toggle
colnames(comparedata2) <- uniquevars_compare
# Organize results for a particular set of factors into a matrix
for (toggle_counter in 1:length(uniquevars_toggle)) {
toggle_curr <- uniquevars_toggle[toggle_counter]
for (z in 1:length(togglevar)) {
if (togglevar[z] == toggle_curr) {
for (y in 1:length(uniquevars_compare)) {
if (comparevar[z] == uniquevars_compare[y]) {
curr2 = y
}
for (f in 1:length(aggregate2[,1])) {
if (comparevar[z] == aggregate2[f, d] && toggle_curr == aggregate2[f, c]) {
find = f
}
}
}
comparedata2[toggle_counter, curr2] <- as.numeric(aggregate2[find, column2])
totalcomparedata2[counter_new,] <- c(toggle_curr, comparevar[z], as.numeric(aggregate2[find,column2]))
counter_new = counter_new + 1
}
}
}
#comparedata2 <- comparedata2[,2:length(comparedata2[1,])]
title <- colnames(aggregate2)[column2] #comment this out if you don't have aggregate2 already stored!!
par(cex.lab = 1)
par(cex.axis = 1)
par(mar = c(3,3,3,3))
png(file = paste0(title, '_', var1, '_year', '_14Apr17.png'))
try(heatmap.2((comparedata2), density.info = 'none', dendrogram = 'none', Rowv = FALSE, Colv = FALSE,
col = jet.colors, key.title = "Gradient", key.xlab = 'units',
main = title, ylab = paste0(var1, ' Increment'), xlab = paste0('Year'),
tracecol = NA, cex.lab = 1, cex.axis = 1, margins = c(6,6), labRow = round(as.numeric(rownames(comparedata2)),2),
labCol = round(as.numeric(colnames(comparedata2)),2)), silent = TRUE)
dev.off()
} |
##
# The five essential tasks to complete the Course Project are as follows.
# Loading Activities
# 1. Merges the training and the test sets to create one data set.
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
# 3. Uses descriptive activity names to name the activities in the data set
# 4. Appropriately labels the data set with descriptive variable names.
# 5. Creates a second, independent tidy data set with the average of each
# variable for each activity and each subject.
#
#
#--------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------
# 1. -- Merge training and test sets to create one data set.
#Set working directory on my computer
setwd('C:/HardDisk/Coursera/explotary Analysis/Exploratory-Data-Analysis/Assignment/VersionA/Data');
#features Loading and extraction for Column Naming
features =read.csv('./features.txt',header = FALSE, sep = ' ')
features <- as.character(features[,2])
# Read in the data, label set, and subject codes for the test data
xtestdata <- read.table("./test/x_test.txt")
ytestdata <- read.table("./test/y_test.txt")
testsubjects <- read.table("./test/subject_test.txt")
# Test Construction and renaming of the Column
Test<- data.frame(testsubjects ,ytestdata,xtestdata)
names(Test) <- c(c('subject', 'activity'), features)
# Read in the data,label set, and subject codes for the train data
xtraindata <- read.table("./train/x_train.txt")
ytraindata <- read.table("./train/y_train.txt")
trainsubjects <- read.table("./train/subject_train.txt")
#Train Construction and renaming of the Column
Train<- data.frame(trainsubjects , ytraindata, xtraindata)
names(Train) <- c(c('subject', 'activity'), features)
# merge Test and Train it is the objective of the step 1
all <- rbind(Test, Train)
#--------------------------------------------------------------------------------------------
# 2. -- Extracts only the measurements on the mean and standard deviation for each measurement.
# grab the mean or std
colselect<-grep("mean|sted",features)
# remove columns that are not means or std. deviation features
filter <- all[,c(1,2,colselect + 2)]
#--------------------------------------------------------------------------------------------
# 3. -- Uses descriptive activity names to name the activities in the data set.
# Read the set of activity labels from the txt file
Labels <- read.table("./activity_labels.txt")
# Adjustment for character
Labels <- as.character(Labels[,2])
# New name
filter$activity <- Labels[filter$activity]
#--------------------------------------------------------------------------------------------
# 4. -- Appropriately label the data set with descriptive variable names.
# Manual Correction using the gsub
temporary <- names(filter)
temporary<- gsub("[(][)]", "", temporary)
temporary <- gsub("^t", "TimeDomain_", temporary)
temporary <- gsub("^f", "FrequencyDomain_", temporary)
temporary <- gsub("Acc", "Accelerometer", temporary)
temporary <- gsub("Gyro", "Gyroscope", temporary)
temporary <- gsub("Mag", "Magnitude", temporary)
temporary <- gsub("-mean-", "_Mean_", temporary)
temporary <- gsub("-std-", "_StandardDeviation_", temporary)
temporary <- gsub("-", "_", temporary)
names(filter) <- temporary
#--------------------------------------------------------------------------------------------
# 5.-- Creates a second, independent tidy data set with the average of each
# variable for each activity and each subject.
# use the function aggregate
tidy <- aggregate(filter[,3:48], by = list(activity = filter$activity, subject = filter$subject),FUN = mean)
#write the table
write.table(x = tidy, file = "data_very_tidy.txt", row.names = FALSE)
| /run_analysis.R | no_license | didiervila76/Getting_Cleaning_Data_Assignement | R | false | false | 3,970 | r | ##
# The five essential tasks to complete the Course Project are as follows.
# Loading Activities
# 1. Merges the training and the test sets to create one data set.
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
# 3. Uses descriptive activity names to name the activities in the data set
# 4. Appropriately labels the data set with descriptive variable names.
# 5. Creates a second, independent tidy data set with the average of each
# variable for each activity and each subject.
#
#
#--------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------
# 1. -- Merge training and test sets to create one data set.
#Set working directory on my computer
setwd('C:/HardDisk/Coursera/explotary Analysis/Exploratory-Data-Analysis/Assignment/VersionA/Data');
#features Loading and extraction for Column Naming
features =read.csv('./features.txt',header = FALSE, sep = ' ')
features <- as.character(features[,2])
# Read in the data, label set, and subject codes for the test data
xtestdata <- read.table("./test/x_test.txt")
ytestdata <- read.table("./test/y_test.txt")
testsubjects <- read.table("./test/subject_test.txt")
# Test Construction and renaming of the Column
Test<- data.frame(testsubjects ,ytestdata,xtestdata)
names(Test) <- c(c('subject', 'activity'), features)
# Read in the data,label set, and subject codes for the train data
xtraindata <- read.table("./train/x_train.txt")
ytraindata <- read.table("./train/y_train.txt")
trainsubjects <- read.table("./train/subject_train.txt")
#Train Construction and renaming of the Column
Train<- data.frame(trainsubjects , ytraindata, xtraindata)
names(Train) <- c(c('subject', 'activity'), features)
# merge Test and Train it is the objective of the step 1
all <- rbind(Test, Train)
#--------------------------------------------------------------------------------------------
# 2. -- Extracts only the measurements on the mean and standard deviation for each measurement.
# grab the mean or std
colselect<-grep("mean|sted",features)
# remove columns that are not means or std. deviation features
filter <- all[,c(1,2,colselect + 2)]
#--------------------------------------------------------------------------------------------
# 3. -- Uses descriptive activity names to name the activities in the data set.
# Read the set of activity labels from the txt file
Labels <- read.table("./activity_labels.txt")
# Adjustment for character
Labels <- as.character(Labels[,2])
# New name
filter$activity <- Labels[filter$activity]
#--------------------------------------------------------------------------------------------
# 4. -- Appropriately label the data set with descriptive variable names.
# Manual Correction using the gsub
temporary <- names(filter)
temporary<- gsub("[(][)]", "", temporary)
temporary <- gsub("^t", "TimeDomain_", temporary)
temporary <- gsub("^f", "FrequencyDomain_", temporary)
temporary <- gsub("Acc", "Accelerometer", temporary)
temporary <- gsub("Gyro", "Gyroscope", temporary)
temporary <- gsub("Mag", "Magnitude", temporary)
temporary <- gsub("-mean-", "_Mean_", temporary)
temporary <- gsub("-std-", "_StandardDeviation_", temporary)
temporary <- gsub("-", "_", temporary)
names(filter) <- temporary
#--------------------------------------------------------------------------------------------
# 5.-- Creates a second, independent tidy data set with the average of each
# variable for each activity and each subject.
# use the function aggregate
tidy <- aggregate(filter[,3:48], by = list(activity = filter$activity, subject = filter$subject),FUN = mean)
#write the table
write.table(x = tidy, file = "data_very_tidy.txt", row.names = FALSE)
|
###Examples adding arms to trials in progress
##required packages
require(MAMS)
require(parallel)
require(mvtnorm)
##source functions
funloc <- ()
source(funloc)
#####################################################################################################
#Defining parameters
#The existing MAMS trial
#choose the parameters
#the number of treatments
K <- 2
#vecotr of the number of treatments in each of the
X <- c(1:K)
#number of interim analyses
J=3
#vectors of allocation ratios for each experimental treatment
r=1:3
#ratios on control
r0=1:3
#target FWER
alpha=0.05
#power
power = 0.9
#effect size parameters for the trial
p = 0.75
p0 = 0.5
#####################################################################################################
#set up the original MAMS trial
#maximum intersection to set the lower boundaries
trial = mamsc(K=K,
J=J,
alpha=alpha,
r=r,
r0=r0,
power=power,
p=p,
p0=p0,
ushape="triangular",
lshape="triangular")
lfix = trial$l[-J]
#remove the maximum an compute the boundaries for the remaining tests
X = X[-K]
#construct all other tests
#set up parallelisation cores
#Calculate the number of cores
#note the parallelisation isn't needed for low numbers of treatments
no_cores <- detectCores() - 1
# Initiate cluster
cl <- makeCluster(no_cores)
#what are all the required components of a closed MAMS trial
htests <- parLapply(cl=cl,
X = X,
fun = mamsc,
J=J,
alpha=alpha,
r=r,
r0=r0,
power=power,
p=p,
p0=p0,
ushape="triangular",
lshape="closed",
lfix=lfix,
sample.size=FALSE)
# Stop the cluster
stopCluster(cl)
#add in the full intersection test
htests[[(length(htests)+1)]] = trial
#####################################################################################################
#conditional adding
#the number of arms to be added
Kadd <- 2
#some example observations
z <- data.frame(tr1=c(2),tr2=c(1.5))
#defining all required hypothesis tests
#matrix of sample sizes
rtestsm <- t(combn(K+Kadd,1,FUN=tabulate,nbins=K+Kadd))
for(i in 2:(K+Kadd)){
rtestsm <- rbind(rtestsm,t(combn(K+Kadd,i,FUN=tabulate,nbins=K+Kadd)))
}
rtests <- lapply(seq_len(nrow(rtestsm)), function(i) rtestsm[i,])
#we will loop through rtests in parallel
#what test is the full intersection
test <- rtests[[length(rtests)]]
#full intersection off the null hypotheses conditional test (from which to take the lower testing boundaries)
ftestc <- condtests(test,htests,z,K,Kadd,J,alpha,r,r0,power,p,p0,ushape="triangular",lshape="triangular",lfix=NULL,funloc)
#what are the lower boundaries
lfix = ftestc$l
lfix = lfix[-length(lfix)]
#remove the full intersection from the set and compute all other necessary boundaries
comptests = rtests
comptests[[length(rtests)]] = NULL
# Initiate cluster
cl = makeCluster(no_cores)
#what are all the required components of a closed MAMS trial
ctests = parLapply(cl=cl,
X = comptests,
fun = condtests,
htests,z,K,Kadd,J,alpha,r,r0,power,p,p0,ushape="triangular",lshape="closed",lfix=lfix,funloc)
# Stop the cluster
stopCluster(cl)
#add the full intersection
ctests[[length(rtests)]] = ftestc
#####################################################################################################
##simulating operating characteristics
#configuration of treatments, c(0.5,0.5,0.5,0.5) is the global null
pv = c(0.5,0.5,0.5,0.5)
#number of simulations to use (note this is low due to computational intensity)
nsim = 1000
#simulating the expected behavior of the conditionally updated trial under a given configuration
simcond(nsim=nsim,rtestsm,ctests,htests[[length(htests)]],z,pv)
#simulating the full trial with a conditional update, note this uses parallel computation
cl = makeCluster(no_cores)
simresfull = simfullpar(pv,htests,Kadd,alpha,funloc,nsim=nsim,p=p,p0=p0,cl)
stopCluster(cl)
#gather the important results from the simulation of the full trial
#prepare to store the rsults
exss = numeric(nsim)
hmat = matrix(0,ncol=4,nrow=nsim)
#can be used to check if the trial continues or not
cont = numeric(nsim)
#this loop takes the relavent information from the simulation
for(k in 1:nsim)
{
exss[k] = simresfull[[k]]$exss
hmat[k,] = simresfull[[k]]$hmat
cont[k] = simresfull[[k]]$exss > 30
}
#if the expected sample size is less than the possible minimum set it to be the minimum
exss[which(exss<30)] = 30
#output the results
message(paste("$(",pv[1],",",pv[2],",",pv[3],",",pv[4],")$"," & ",
round(colMeans(hmat)[1],digits=2)," & ",
round(colMeans(hmat)[2],digits=2)," & ",
round(colMeans(hmat)[3],digits=2)," & ",
round(colMeans(hmat)[4],digits=2)," & ",
round(mean(exss),digits=0)," \\\\")) | /MAMSaddrun.R | no_license | Thomas-Burnett/Adding-treatments-to-clinical-trials-in-progress | R | false | false | 5,296 | r | ###Examples adding arms to trials in progress
##required packages
require(MAMS)
require(parallel)
require(mvtnorm)
##source functions
funloc <- ()
source(funloc)
#####################################################################################################
#Defining parameters
#The existing MAMS trial
#choose the parameters
#the number of treatments
K <- 2
#vecotr of the number of treatments in each of the
X <- c(1:K)
#number of interim analyses
J=3
#vectors of allocation ratios for each experimental treatment
r=1:3
#ratios on control
r0=1:3
#target FWER
alpha=0.05
#power
power = 0.9
#effect size parameters for the trial
p = 0.75
p0 = 0.5
#####################################################################################################
#set up the original MAMS trial
#maximum intersection to set the lower boundaries
trial = mamsc(K=K,
J=J,
alpha=alpha,
r=r,
r0=r0,
power=power,
p=p,
p0=p0,
ushape="triangular",
lshape="triangular")
lfix = trial$l[-J]
#remove the maximum an compute the boundaries for the remaining tests
X = X[-K]
#construct all other tests
#set up parallelisation cores
#Calculate the number of cores
#note the parallelisation isn't needed for low numbers of treatments
no_cores <- detectCores() - 1
# Initiate cluster
cl <- makeCluster(no_cores)
#what are all the required components of a closed MAMS trial
htests <- parLapply(cl=cl,
X = X,
fun = mamsc,
J=J,
alpha=alpha,
r=r,
r0=r0,
power=power,
p=p,
p0=p0,
ushape="triangular",
lshape="closed",
lfix=lfix,
sample.size=FALSE)
# Stop the cluster
stopCluster(cl)
#add in the full intersection test
htests[[(length(htests)+1)]] = trial
#####################################################################################################
#conditional adding
#the number of arms to be added
Kadd <- 2
#some example observations
z <- data.frame(tr1=c(2),tr2=c(1.5))
#defining all required hypothesis tests
#matrix of sample sizes
rtestsm <- t(combn(K+Kadd,1,FUN=tabulate,nbins=K+Kadd))
for(i in 2:(K+Kadd)){
rtestsm <- rbind(rtestsm,t(combn(K+Kadd,i,FUN=tabulate,nbins=K+Kadd)))
}
rtests <- lapply(seq_len(nrow(rtestsm)), function(i) rtestsm[i,])
#we will loop through rtests in parallel
#what test is the full intersection
test <- rtests[[length(rtests)]]
#full intersection off the null hypotheses conditional test (from which to take the lower testing boundaries)
ftestc <- condtests(test,htests,z,K,Kadd,J,alpha,r,r0,power,p,p0,ushape="triangular",lshape="triangular",lfix=NULL,funloc)
#what are the lower boundaries
lfix = ftestc$l
lfix = lfix[-length(lfix)]
#remove the full intersection from the set and compute all other necessary boundaries
comptests = rtests
comptests[[length(rtests)]] = NULL
# Initiate cluster
cl = makeCluster(no_cores)
#what are all the required components of a closed MAMS trial
ctests = parLapply(cl=cl,
X = comptests,
fun = condtests,
htests,z,K,Kadd,J,alpha,r,r0,power,p,p0,ushape="triangular",lshape="closed",lfix=lfix,funloc)
# Stop the cluster
stopCluster(cl)
#add the full intersection
ctests[[length(rtests)]] = ftestc
#####################################################################################################
##simulating operating characteristics
#configuration of treatments, c(0.5,0.5,0.5,0.5) is the global null
pv = c(0.5,0.5,0.5,0.5)
#number of simulations to use (note this is low due to computational intensity)
nsim = 1000
#simulating the expected behavior of the conditionally updated trial under a given configuration
simcond(nsim=nsim,rtestsm,ctests,htests[[length(htests)]],z,pv)
#simulating the full trial with a conditional update, note this uses parallel computation
cl = makeCluster(no_cores)
simresfull = simfullpar(pv,htests,Kadd,alpha,funloc,nsim=nsim,p=p,p0=p0,cl)
stopCluster(cl)
#gather the important results from the simulation of the full trial
#prepare to store the rsults
exss = numeric(nsim)
hmat = matrix(0,ncol=4,nrow=nsim)
#can be used to check if the trial continues or not
cont = numeric(nsim)
#this loop takes the relavent information from the simulation
for(k in 1:nsim)
{
exss[k] = simresfull[[k]]$exss
hmat[k,] = simresfull[[k]]$hmat
cont[k] = simresfull[[k]]$exss > 30
}
#if the expected sample size is less than the possible minimum set it to be the minimum
exss[which(exss<30)] = 30
#output the results
message(paste("$(",pv[1],",",pv[2],",",pv[3],",",pv[4],")$"," & ",
round(colMeans(hmat)[1],digits=2)," & ",
round(colMeans(hmat)[2],digits=2)," & ",
round(colMeans(hmat)[3],digits=2)," & ",
round(colMeans(hmat)[4],digits=2)," & ",
round(mean(exss),digits=0)," \\\\")) |
library(tidyverse)
library(data.table)
library(janitor)
library(sf)
library(mapview)
rm(list = ls())
tribe_df <- read_rds("01_data/cache/tribe_bg_file_names.rds") %>%
tidyr::separate(file_path,into = c("d1","d2","rt","tribe","file"),sep = "/") %>%
filter(rt %in% c("air_federal","air_state","otsa","sdtsa","tdsa")) %>%
dplyr::select(tribe,rt,state,county,tract,block)
tribe_act <- tribe_df %>%
mutate(GEOID_county = paste0(state,county))
todo <- setdiff(unique(tribe_act$GEOID_county),
str_sub(list.files("01_data/cache/blocks_sf"),1,-5))
tribe_do <- tribe_act %>%
filter(GEOID_county %in% todo) %>%
group_split(GEOID_county)
ta <- tribe_do[[1]]
gc()
map(tribe_do,function(ta){
blocks <- tigris::blocks(state = unique(ta$state),
county = unique(ta$county),
class = "sf")
temp <- ta %>%
mutate(GEOID10 = paste0(GEOID_county,
str_pad(str_remove(tract,"[:punct:]"),
width = 6,
pad = "0"),
block)) %>%
right_join(blocks %>%
dplyr::select(geometry, GEOID10), . ,
by = c("GEOID10"))
write_rds(temp, paste0("01_data/cache/blocks_sf/",
unique(temp$GEOID_county),".rds"))
print(unique(temp$GEOID_county))
})
files <- list.files("01_data/cache/blocks_sf", full.names = T)
fl <- files[10]
all <- map_dfr(files,function(fl){
r <- read_rds(fl)
})
# t2 <- read_csv("01_data/tribe_bg_full_integrated_combined_FINAL.csv") %>%
# filter(time == "time 2") %>%
# dplyr::select(tribe,UID = census_tribe, rt) %>%
# mutate(UID = str_extract(UID,"\\d+")) %>%
# unique() %>%
# filter(!is.na(UID),
# !rt %in% c("tribalsub"))
#
# all_save <- all %>%
# mutate(UID = str_extract(tribe,"\\d+")) %>%
# inner_join(.,t2,by="UID")
class(all)
write_rds(all,"01_data/cache/tribe_shapefiles_micro.rds")
# write_rds(all,"/RSTOR/cache/tribe_shapefiles_micro.rds")
| /02_build/02_to_tribal_microsf.R | no_license | galsk223/tribalclimate | R | false | false | 2,086 | r | library(tidyverse)
library(data.table)
library(janitor)
library(sf)
library(mapview)
rm(list = ls())
tribe_df <- read_rds("01_data/cache/tribe_bg_file_names.rds") %>%
tidyr::separate(file_path,into = c("d1","d2","rt","tribe","file"),sep = "/") %>%
filter(rt %in% c("air_federal","air_state","otsa","sdtsa","tdsa")) %>%
dplyr::select(tribe,rt,state,county,tract,block)
tribe_act <- tribe_df %>%
mutate(GEOID_county = paste0(state,county))
todo <- setdiff(unique(tribe_act$GEOID_county),
str_sub(list.files("01_data/cache/blocks_sf"),1,-5))
tribe_do <- tribe_act %>%
filter(GEOID_county %in% todo) %>%
group_split(GEOID_county)
ta <- tribe_do[[1]]
gc()
map(tribe_do,function(ta){
blocks <- tigris::blocks(state = unique(ta$state),
county = unique(ta$county),
class = "sf")
temp <- ta %>%
mutate(GEOID10 = paste0(GEOID_county,
str_pad(str_remove(tract,"[:punct:]"),
width = 6,
pad = "0"),
block)) %>%
right_join(blocks %>%
dplyr::select(geometry, GEOID10), . ,
by = c("GEOID10"))
write_rds(temp, paste0("01_data/cache/blocks_sf/",
unique(temp$GEOID_county),".rds"))
print(unique(temp$GEOID_county))
})
files <- list.files("01_data/cache/blocks_sf", full.names = T)
fl <- files[10]
all <- map_dfr(files,function(fl){
r <- read_rds(fl)
})
# t2 <- read_csv("01_data/tribe_bg_full_integrated_combined_FINAL.csv") %>%
# filter(time == "time 2") %>%
# dplyr::select(tribe,UID = census_tribe, rt) %>%
# mutate(UID = str_extract(UID,"\\d+")) %>%
# unique() %>%
# filter(!is.na(UID),
# !rt %in% c("tribalsub"))
#
# all_save <- all %>%
# mutate(UID = str_extract(tribe,"\\d+")) %>%
# inner_join(.,t2,by="UID")
class(all)
write_rds(all,"01_data/cache/tribe_shapefiles_micro.rds")
# write_rds(all,"/RSTOR/cache/tribe_shapefiles_micro.rds")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generate_test_data.R
\name{generate_test_data}
\alias{generate_test_data}
\title{Generate test data}
\usage{
generate_test_data(input_data, center_mean, center_stddev, grid,
weather_dataset, wt_f=NULL, scaled = T, soil_type = NULL, points=NULL)
}
\arguments{
\item{input_data}{Dataset in sprad format containing LSUs as columns and cell
numbers as rows}
\item{center_mean}{Center attributes from scaled training data of the model
used in the parameter model. Will be used if scale is set to TRUE. Example:
train_data <- scale(train_data) col_means_train_harvest <- attr(train_data,
"scaled:center")}
\item{center_stddev}{Scale attributes from scaled training data of the model.
Will be used if scale is set to TRUE.}
\item{grid}{dataset with grid cells information. Has to be the same size as the \code{input_data}}
\item{weather_dataset}{Data set containing weather information.}
\item{wt_f}{koppen geiger climate classification dataset created by the file:
koppen_geiger_pre_processing.R}
\item{scaled}{Boolean that to define wheater the data will be scalled or not.}
\item{soil_type}{Dataset with information on soil type on cell level.}
\item{points}{A list of values that will substitute the orinal columns with
the number of LSUs in case a more fine LSU resolution is desired for
finding the optminal LSU. Ex: points = as.list(seq(0,25,0.5))}
}
\description{
Function that utilizes the original dataset, the centers of the
model to be analysed and weather information to create a test dataset that
will be scaled and read to use in posteriror evaluations.
}
\details{
If wt_t.Rdata is null all cells will be scalled and outputed, if
wt_f is filled with the koppen geiger climate classification generated by
the file: koppen_geiger_pre_processing.R. the EF an d ET climate zones will
be removed from the analysis.
}
\examples{
##Scalled test data
generate_test_data(map_harvest_real, col_means_train_harvest, col_stddevs_train_harvest, grid, weather_dataset, soil_type = soil)
##Unscaled test data
generate_test_data(map_harvest_real, col_means_train_harvest, col_stddevs_train_harvest, grid, weather_dataset, scaled = F, soil_type = soil)
##Unscaled test data with LSU points substitutes
generate_test_data(map_harvest_real, col_means_train_harvest, col_stddevs_train_harvest, grid, weather_dataset, scaled = F, soil_type = soil, points = as.list(seq(0,25,0.5)))
}
\author{
Marcos Alves \email{mppalves@gmail.com}
}
| /man/generate_test_data.Rd | no_license | mppalves/GSTools | R | false | true | 2,522 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generate_test_data.R
\name{generate_test_data}
\alias{generate_test_data}
\title{Generate test data}
\usage{
generate_test_data(input_data, center_mean, center_stddev, grid,
weather_dataset, wt_f=NULL, scaled = T, soil_type = NULL, points=NULL)
}
\arguments{
\item{input_data}{Dataset in sprad format containing LSUs as columns and cell
numbers as rows}
\item{center_mean}{Center attributes from scaled training data of the model
used in the parameter model. Will be used if scale is set to TRUE. Example:
train_data <- scale(train_data) col_means_train_harvest <- attr(train_data,
"scaled:center")}
\item{center_stddev}{Scale attributes from scaled training data of the model.
Will be used if scale is set to TRUE.}
\item{grid}{dataset with grid cells information. Has to be the same size as the \code{input_data}}
\item{weather_dataset}{Data set containing weather information.}
\item{wt_f}{koppen geiger climate classification dataset created by the file:
koppen_geiger_pre_processing.R}
\item{scaled}{Boolean that to define wheater the data will be scalled or not.}
\item{soil_type}{Dataset with information on soil type on cell level.}
\item{points}{A list of values that will substitute the orinal columns with
the number of LSUs in case a more fine LSU resolution is desired for
finding the optminal LSU. Ex: points = as.list(seq(0,25,0.5))}
}
\description{
Function that utilizes the original dataset, the centers of the
model to be analysed and weather information to create a test dataset that
will be scaled and read to use in posteriror evaluations.
}
\details{
If wt_t.Rdata is null all cells will be scalled and outputed, if
wt_f is filled with the koppen geiger climate classification generated by
the file: koppen_geiger_pre_processing.R. the EF an d ET climate zones will
be removed from the analysis.
}
\examples{
##Scalled test data
generate_test_data(map_harvest_real, col_means_train_harvest, col_stddevs_train_harvest, grid, weather_dataset, soil_type = soil)
##Unscaled test data
generate_test_data(map_harvest_real, col_means_train_harvest, col_stddevs_train_harvest, grid, weather_dataset, scaled = F, soil_type = soil)
##Unscaled test data with LSU points substitutes
generate_test_data(map_harvest_real, col_means_train_harvest, col_stddevs_train_harvest, grid, weather_dataset, scaled = F, soil_type = soil, points = as.list(seq(0,25,0.5)))
}
\author{
Marcos Alves \email{mppalves@gmail.com}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gtable.R
\name{gtable}
\alias{gtable}
\title{Create a new grob table.}
\usage{
gtable(
widths = list(),
heights = list(),
respect = FALSE,
name = "layout",
rownames = NULL,
colnames = NULL,
vp = NULL
)
}
\arguments{
\item{widths}{a unit vector giving the width of each column}
\item{heights}{a unit vector giving the height of each row}
\item{respect}{a logical vector of length 1: should the aspect ratio of
height and width specified in null units be respected. See
\code{\link[=grid.layout]{grid.layout()}} for more details}
\item{name}{a string giving the name of the table. This is used to name
the layout viewport}
\item{rownames, colnames}{character vectors of row and column names, used
for characteric subsetting, particularly for \code{gtable_align},
and \code{gtable_join}.}
\item{vp}{a grid viewport object (or NULL).}
}
\value{
A gtable object
}
\description{
A grob table captures all the information needed to layout grobs in a table
structure. It supports row and column spanning, offers some tools to
automatically figure out the correct dimensions, and makes it easy to
align and combine multiple tables.
}
\details{
Each grob is put in its own viewport - grobs in the same location are
not combined into one cell. Each grob takes up the entire cell viewport
so justification control is not available.
It constructs both the viewports and the gTree needed to display the table.
}
\section{Components}{
There are three basics components to a grob table: the specification of
table (cell heights and widths), the layout (for each grob, its position,
name and other settings), and global parameters.
It's easier to understand how \code{gtable} works if in your head you keep
the table separate from it's contents. Each cell can have 0, 1, or many
grobs inside. Each grob must belong to at least one cell, but can span
across many cells.
}
\section{Layout}{
The layout details are stored in a data frame with one row for each grob,
and columns:
\itemize{
\item \code{t} top extent of grob
\item \code{r} right extent of grob
\item \code{b} bottom extent of
\item \code{l} left extent of grob
\item \code{z} the z-order of the grob - used to reorder the grobs
before they are rendered
\item \code{clip} a string, specifying how the grob should be clipped:
either \code{"on"}, \code{"off"} or \code{"inherit"}
\item \code{name}, a character vector used to name each grob and its
viewport
}
You should not need to modify this data frame directly - instead use
functions like \code{gtable_add_grob}.
}
\examples{
library(grid)
a <- gtable(unit(1:3, c("cm")), unit(5, "cm"))
a
gtable_show_layout(a)
# Add a grob:
rect <- rectGrob(gp = gpar(fill = "black"))
a <- gtable_add_grob(a, rect, 1, 1)
a
plot(a)
# gtables behave like matrices:
dim(a)
t(a)
plot(t(a))
# when subsetting, grobs are retained if their extents lie in the
# rows/columns that retained.
b <- gtable(unit(c(2, 2, 2), "cm"), unit(c(2, 2, 2), "cm"))
b <- gtable_add_grob(b, rect, 2, 2)
b[1, ]
b[, 1]
b[2, 2]
# gtable have row and column names
rownames(b) <- 1:3
rownames(b)[2] <- 200
colnames(b) <- letters[1:3]
dimnames(b)
}
\seealso{
Other gtable construction:
\code{\link{gtable_col}()},
\code{\link{gtable_matrix}()},
\code{\link{gtable_row}()},
\code{\link{gtable_spacer}}
}
\concept{gtable construction}
| /man/gtable.Rd | no_license | cran/gtable | R | false | true | 3,398 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gtable.R
\name{gtable}
\alias{gtable}
\title{Create a new grob table.}
\usage{
gtable(
widths = list(),
heights = list(),
respect = FALSE,
name = "layout",
rownames = NULL,
colnames = NULL,
vp = NULL
)
}
\arguments{
\item{widths}{a unit vector giving the width of each column}
\item{heights}{a unit vector giving the height of each row}
\item{respect}{a logical vector of length 1: should the aspect ratio of
height and width specified in null units be respected. See
\code{\link[=grid.layout]{grid.layout()}} for more details}
\item{name}{a string giving the name of the table. This is used to name
the layout viewport}
\item{rownames, colnames}{character vectors of row and column names, used
for characteric subsetting, particularly for \code{gtable_align},
and \code{gtable_join}.}
\item{vp}{a grid viewport object (or NULL).}
}
\value{
A gtable object
}
\description{
A grob table captures all the information needed to layout grobs in a table
structure. It supports row and column spanning, offers some tools to
automatically figure out the correct dimensions, and makes it easy to
align and combine multiple tables.
}
\details{
Each grob is put in its own viewport - grobs in the same location are
not combined into one cell. Each grob takes up the entire cell viewport
so justification control is not available.
It constructs both the viewports and the gTree needed to display the table.
}
\section{Components}{
There are three basics components to a grob table: the specification of
table (cell heights and widths), the layout (for each grob, its position,
name and other settings), and global parameters.
It's easier to understand how \code{gtable} works if in your head you keep
the table separate from it's contents. Each cell can have 0, 1, or many
grobs inside. Each grob must belong to at least one cell, but can span
across many cells.
}
\section{Layout}{
The layout details are stored in a data frame with one row for each grob,
and columns:
\itemize{
\item \code{t} top extent of grob
\item \code{r} right extent of grob
\item \code{b} bottom extent of
\item \code{l} left extent of grob
\item \code{z} the z-order of the grob - used to reorder the grobs
before they are rendered
\item \code{clip} a string, specifying how the grob should be clipped:
either \code{"on"}, \code{"off"} or \code{"inherit"}
\item \code{name}, a character vector used to name each grob and its
viewport
}
You should not need to modify this data frame directly - instead use
functions like \code{gtable_add_grob}.
}
\examples{
library(grid)
a <- gtable(unit(1:3, c("cm")), unit(5, "cm"))
a
gtable_show_layout(a)
# Add a grob:
rect <- rectGrob(gp = gpar(fill = "black"))
a <- gtable_add_grob(a, rect, 1, 1)
a
plot(a)
# gtables behave like matrices:
dim(a)
t(a)
plot(t(a))
# when subsetting, grobs are retained if their extents lie in the
# rows/columns that retained.
b <- gtable(unit(c(2, 2, 2), "cm"), unit(c(2, 2, 2), "cm"))
b <- gtable_add_grob(b, rect, 2, 2)
b[1, ]
b[, 1]
b[2, 2]
# gtable have row and column names
rownames(b) <- 1:3
rownames(b)[2] <- 200
colnames(b) <- letters[1:3]
dimnames(b)
}
\seealso{
Other gtable construction:
\code{\link{gtable_col}()},
\code{\link{gtable_matrix}()},
\code{\link{gtable_row}()},
\code{\link{gtable_spacer}}
}
\concept{gtable construction}
|
library(dplyr)
# Part 2
# Data loaded to variables
hd <- read.csv("http://s3.amazonaws.com/assets.datacamp.com/production/course_2218/datasets/human_development.csv", stringsAsFactors = F)
gii <- read.csv("http://s3.amazonaws.com/assets.datacamp.com/production/course_2218/datasets/gender_inequality.csv", stringsAsFactors = F, na.strings = "..")
# Part 3
# Structure
str(hd)
str(gii)
# Dimensions
dim(gii)
dim(gii)
# Summaries
summary(hd)
summary(gii)
colnames(hd)
colnames(gii)
# Part 4
# New variable names
colnames(hd)[1] <- "rank"
colnames(hd)[2] <- "country"
colnames(hd)[3] <- "humanDev"
colnames(hd)[4] <- "expectancy"
colnames(hd)[5] <- "expectedEducation"
colnames(hd)[6] <- "meanEducation"
colnames(hd)[7] <- "gni"
colnames(hd)[8] <- "gniMinusRank"
colnames(gii)[1] <- "rank"
colnames(gii)[2] <- "country"
colnames(gii)[3] <- "genderInequality"
colnames(gii)[4] <- "maternalMortality"
colnames(gii)[5] <- "adolescentBirth"
colnames(gii)[6] <- "repInParliament"
colnames(gii)[7] <- "popWithSecondaryEduF"
colnames(gii)[8] <- "popWithSecondaryEduM"
colnames(gii)[9] <- "labourParticipationF"
colnames(gii)[10] <- "labourParticipationM"
# Part 5
#
gii <- mutate(gii, secondaryPopRatio = popWithSecondaryEduF / popWithSecondaryEduM)
gii <- mutate(gii, labourRatio = labourParticipationF / labourParticipationM)
colnames(gii)
# Part 6
human <- inner_join(hd, gii, by = c("country"))
# 195 observations and 19 variables
dim(human)
# Save to data folder
setwd("C:/Users/Anton/Documents/IODS-project/data/")
write.csv(human, file="human")
#Week 5 Data wrangling
human <- read.table("http://s3.amazonaws.com/assets.datacamp.com/production/course_2218/datasets/human1.txt", header = TRUE, sep = ",")
# Structure and dimensions
# We can see that there's 195 observations and 19 variables.
# The data is a dataset from the United Nations Development Programme. It's about human
# development in different areas.
str(human)
dim(human)
# human$gni to numeric
human$GNI <- as.numeric(human$GNI)
# Columns to keep
keep <- c("Country", "Edu2.FM", "Labo.FM", "Life.Exp", "Edu.Exp", "GNI", "Mat.Mor", "Ado.Birth", "Parli.F")
human <- dplyr::select(human, one_of(keep))
# Filter out rows with no values
human_ <- filter(human, complete.cases(human))
# Remove observations which relate to regions instead of countries
last <- nrow(human_) - 7
human_ <- human_[1:last, ]
# Add countries as rownames
rownames(human_) <- human_$Country
# Remove the Country variable
human_ <- select(human, -Country)
# Save and replace old human file
setwd("C:/Users/Anton/Documents/IODS-project/data/")
write.csv(human_, file="human")
| /data/create_human.R | no_license | ahjyrkia/IODS-project | R | false | false | 2,626 | r | library(dplyr)
# Part 2
# Data loaded to variables
hd <- read.csv("http://s3.amazonaws.com/assets.datacamp.com/production/course_2218/datasets/human_development.csv", stringsAsFactors = F)
gii <- read.csv("http://s3.amazonaws.com/assets.datacamp.com/production/course_2218/datasets/gender_inequality.csv", stringsAsFactors = F, na.strings = "..")
# Part 3
# Structure
str(hd)
str(gii)
# Dimensions
dim(gii)
dim(gii)
# Summaries
summary(hd)
summary(gii)
colnames(hd)
colnames(gii)
# Part 4
# New variable names
colnames(hd)[1] <- "rank"
colnames(hd)[2] <- "country"
colnames(hd)[3] <- "humanDev"
colnames(hd)[4] <- "expectancy"
colnames(hd)[5] <- "expectedEducation"
colnames(hd)[6] <- "meanEducation"
colnames(hd)[7] <- "gni"
colnames(hd)[8] <- "gniMinusRank"
colnames(gii)[1] <- "rank"
colnames(gii)[2] <- "country"
colnames(gii)[3] <- "genderInequality"
colnames(gii)[4] <- "maternalMortality"
colnames(gii)[5] <- "adolescentBirth"
colnames(gii)[6] <- "repInParliament"
colnames(gii)[7] <- "popWithSecondaryEduF"
colnames(gii)[8] <- "popWithSecondaryEduM"
colnames(gii)[9] <- "labourParticipationF"
colnames(gii)[10] <- "labourParticipationM"
# Part 5
#
gii <- mutate(gii, secondaryPopRatio = popWithSecondaryEduF / popWithSecondaryEduM)
gii <- mutate(gii, labourRatio = labourParticipationF / labourParticipationM)
colnames(gii)
# Part 6
human <- inner_join(hd, gii, by = c("country"))
# 195 observations and 19 variables
dim(human)
# Save to data folder
setwd("C:/Users/Anton/Documents/IODS-project/data/")
write.csv(human, file="human")
#Week 5 Data wrangling
human <- read.table("http://s3.amazonaws.com/assets.datacamp.com/production/course_2218/datasets/human1.txt", header = TRUE, sep = ",")
# Structure and dimensions
# We can see that there's 195 observations and 19 variables.
# The data is a dataset from the United Nations Development Programme. It's about human
# development in different areas.
str(human)
dim(human)
# human$gni to numeric
human$GNI <- as.numeric(human$GNI)
# Columns to keep
keep <- c("Country", "Edu2.FM", "Labo.FM", "Life.Exp", "Edu.Exp", "GNI", "Mat.Mor", "Ado.Birth", "Parli.F")
human <- dplyr::select(human, one_of(keep))
# Filter out rows with no values
human_ <- filter(human, complete.cases(human))
# Remove observations which relate to regions instead of countries
last <- nrow(human_) - 7
human_ <- human_[1:last, ]
# Add countries as rownames
rownames(human_) <- human_$Country
# Remove the Country variable
human_ <- select(human, -Country)
# Save and replace old human file
setwd("C:/Users/Anton/Documents/IODS-project/data/")
write.csv(human_, file="human")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kinesisvideo_operations.R
\name{kinesisvideo_create_signaling_channel}
\alias{kinesisvideo_create_signaling_channel}
\title{Creates a signaling channel}
\usage{
kinesisvideo_create_signaling_channel(ChannelName, ChannelType,
SingleMasterConfiguration, Tags)
}
\arguments{
\item{ChannelName}{[required] A name for the signaling channel that you are creating. It must be
unique for each AWS account and AWS Region.}
\item{ChannelType}{A type of the signaling channel that you are creating. Currently,
\code{SINGLE_MASTER} is the only supported channel type.}
\item{SingleMasterConfiguration}{A structure containing the configuration for the \code{SINGLE_MASTER} channel
type.}
\item{Tags}{A set of tags (key-value pairs) that you want to associate with this
channel.}
}
\value{
A list with the following syntax:\preformatted{list(
ChannelARN = "string"
)
}
}
\description{
Creates a signaling channel.
\code{\link[=kinesisvideo_create_signaling_channel]{create_signaling_channel}} is
an asynchronous operation.
}
\section{Request syntax}{
\preformatted{svc$create_signaling_channel(
ChannelName = "string",
ChannelType = "SINGLE_MASTER",
SingleMasterConfiguration = list(
MessageTtlSeconds = 123
),
Tags = list(
list(
Key = "string",
Value = "string"
)
)
)
}
}
\keyword{internal}
| /cran/paws.media.services/man/kinesisvideo_create_signaling_channel.Rd | permissive | paws-r/paws | R | false | true | 1,401 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kinesisvideo_operations.R
\name{kinesisvideo_create_signaling_channel}
\alias{kinesisvideo_create_signaling_channel}
\title{Creates a signaling channel}
\usage{
kinesisvideo_create_signaling_channel(ChannelName, ChannelType,
SingleMasterConfiguration, Tags)
}
\arguments{
\item{ChannelName}{[required] A name for the signaling channel that you are creating. It must be
unique for each AWS account and AWS Region.}
\item{ChannelType}{A type of the signaling channel that you are creating. Currently,
\code{SINGLE_MASTER} is the only supported channel type.}
\item{SingleMasterConfiguration}{A structure containing the configuration for the \code{SINGLE_MASTER} channel
type.}
\item{Tags}{A set of tags (key-value pairs) that you want to associate with this
channel.}
}
\value{
A list with the following syntax:\preformatted{list(
ChannelARN = "string"
)
}
}
\description{
Creates a signaling channel.
\code{\link[=kinesisvideo_create_signaling_channel]{create_signaling_channel}} is
an asynchronous operation.
}
\section{Request syntax}{
\preformatted{svc$create_signaling_channel(
ChannelName = "string",
ChannelType = "SINGLE_MASTER",
SingleMasterConfiguration = list(
MessageTtlSeconds = 123
),
Tags = list(
list(
Key = "string",
Value = "string"
)
)
)
}
}
\keyword{internal}
|
library(IAPWS95)
### Name: hTD
### Title: Specific Enthalpy, Function of Temperature and Density
### Aliases: hTD
### ** Examples
T <- 500.
D <- 838.025
h <- hTD(T,D)
h
| /data/genthat_extracted_code/IAPWS95/examples/hTD.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 177 | r | library(IAPWS95)
### Name: hTD
### Title: Specific Enthalpy, Function of Temperature and Density
### Aliases: hTD
### ** Examples
T <- 500.
D <- 838.025
h <- hTD(T,D)
h
|
tabPanel("Simulation",
h3("Reaction Times"),
h4("1. Select the parameter values:"),
fluidRow(
column(3,
uiOutput("soa_input"),
sliderInput("N","Number of trials:",
min = 1, max = 1000, value = 500)),
column(3,
sliderInput("proc.A","for auditory stimulus
(\\(\\frac{1}{\\lambda_A}\\))",
min = 20, max = 150, value = 100),
sliderInput("proc.V","Visual processing time
(\\(\\frac{1}{\\lambda_V}\\))",
min = 20, max = 150, value = 50)),
column(3,
sliderInput("mu","\\(\\mu\\)",
min = 100, max = 500, value = 200),
p("The standard deviation of the second stage processing time is
fixed to \\(\\frac{\\mu}{5}\\).")),
column(3,
sliderInput("sim.omega","Window width (\\(\\omega\\))",
min = 100, max = 300, value = 200),
sliderInput("sim.delta","Amount of integration (\\(\\Delta\\))",
min = 20, max = 100, value = 50))
),
fluidRow(
column(3,
h4("2. Simulate data"),
actionButton("sim_button", "Simulate!")),
column(9,
h4("3. (optional) To download your simulated data, press on the button
below"),
downloadButton('downloadData', 'Download (.csv)'))),
bsCollapse(id = "RT_collapse", open = "Visualization",
bsCollapsePanel("Plot and Table",
tabPanel("Plot",
h5("Boxplots of reaction times for each SOA"),
plotOutput("simplot")),
tabPanel("Table",
h5("Table of reaction times for each SOA"),
numericInput("nrowShow","Number of rows displayed",
min=1, max=60, value=10),
tableOutput("simtable"))
)),
tags$hr(),
h3("Temporal Order Judgments"),
fluidRow(
column(12,
sliderInput("beta", "(\\(\\beta\\))",
min = 0, max = 1, value = 0.5))
)) | /ui/ui_Simulation.R | no_license | Kaanwoj/tojTWIN | R | false | false | 2,110 | r | tabPanel("Simulation",
h3("Reaction Times"),
h4("1. Select the parameter values:"),
fluidRow(
column(3,
uiOutput("soa_input"),
sliderInput("N","Number of trials:",
min = 1, max = 1000, value = 500)),
column(3,
sliderInput("proc.A","for auditory stimulus
(\\(\\frac{1}{\\lambda_A}\\))",
min = 20, max = 150, value = 100),
sliderInput("proc.V","Visual processing time
(\\(\\frac{1}{\\lambda_V}\\))",
min = 20, max = 150, value = 50)),
column(3,
sliderInput("mu","\\(\\mu\\)",
min = 100, max = 500, value = 200),
p("The standard deviation of the second stage processing time is
fixed to \\(\\frac{\\mu}{5}\\).")),
column(3,
sliderInput("sim.omega","Window width (\\(\\omega\\))",
min = 100, max = 300, value = 200),
sliderInput("sim.delta","Amount of integration (\\(\\Delta\\))",
min = 20, max = 100, value = 50))
),
fluidRow(
column(3,
h4("2. Simulate data"),
actionButton("sim_button", "Simulate!")),
column(9,
h4("3. (optional) To download your simulated data, press on the button
below"),
downloadButton('downloadData', 'Download (.csv)'))),
bsCollapse(id = "RT_collapse", open = "Visualization",
bsCollapsePanel("Plot and Table",
tabPanel("Plot",
h5("Boxplots of reaction times for each SOA"),
plotOutput("simplot")),
tabPanel("Table",
h5("Table of reaction times for each SOA"),
numericInput("nrowShow","Number of rows displayed",
min=1, max=60, value=10),
tableOutput("simtable"))
)),
tags$hr(),
h3("Temporal Order Judgments"),
fluidRow(
column(12,
sliderInput("beta", "(\\(\\beta\\))",
min = 0, max = 1, value = 0.5))
)) |
currentGMVersion = "2.5.12"
#' importGraspableJson
#'
#' This simple script just loads a json data file, merges the trial data and events, and renames a few columns
#' It also twiddles things down to a single version of GM, and a single context ("experiment"). It does this only if you
#' set 'compress' to true, but that seems like the vastly most useful format by default.
#'
#' @param fileName the name of a file locating a json project
#' @param versions A vector of versions of Graspable math to include. Data that are associated
#' with a different version are excluded by default. If any non-false value, data will be limited to versions of gm matching this string
#' @param contexts A vector of contexts to include. GM is used in many contexts, including experiments, the canvas, and so on. If set to F, include all contexts.
#' @param compress If set to true, execute logic to collapse each interaction into a single row, glossing
#' diferences between touch, tap, release, etc events.
#' @param debug If true, does a number of sanity checks, like looking at the number of unique old states when collapsing
#' Comments
#'
#' @return a tibble containing the data from the file, with the class "GM X", where X is the version
#' specified (or the first version number to appear).
#' @description Graspable data is stored in JSON [describe what the data format is like, and what compressing is.]
#' @seealso
#' @export
#' @examples
#' canvasSnippetFile <- system.file("extdata", "canvasSnippet.json", package = "GraspableDataAnalysis", mustWork = TRUE)
#' canvasSnippetData <- importGraspableJson(canvasSnippetFile, versions = c("2.5.12" ))
#'
importGraspableJson <- function(filename, versions = F, contexts = F, compress=T, debug=F){
subtypeList <- c("font_size", "mode_change", "create", "create", "delete", "draw", "undo", "clear_all"
, "move", "inspect", "clone", "scrub", "redo", "auto-undo", "math")
safeHead <- function(ll){
nilVal <- "none"
if(length(intersect(class(ll), c("integer", "numeric"))) > 0){
nilVal <- NA
}
if(length(ll)==0){return(nilVal)} else {return(head(ll, 1))}
}
safeTail <- function(ll){
nilVal <- "none"
if(length(intersect(class(ll), c("integer", "numeric"))) > 0){
nilVal <- NA
}
if(length(ll)==0){return(nilVal)} else {return(tail(ll, 1))}
}
graspableJSON <- jsonlite::fromJSON(read_file(filename), simplifyDataFrame=T, flatten=T)
gmData <- as.tibble(graspableJSON$data)
trials <- as.tibble(graspableJSON$trials)
graspableJSON <- as.tibble(merge(gmData, trials, by.x="trial_id", by.y="id"))
graspableJSON$uniqueID <- graspableJSON$`_id.x`
graspableJSON <- rename(graspableJSON
, "trialTime" = "time.y"
, "trialTimeStamp" = "timestamp.y"
)
#graspableJSON <- select(graspableJSON, -`_id`)
if(versions != F){
graspableJSON <- graspableJSON %>% filter(gm_version %in% versions)
class(graspableJSON) <- append(paste("gm", versions)
, class(graspableJSON))
} else {
class(graspableJSON) <- append(paste("gm", graspableJSON$gm_version[1])
, class(graspableJSON))
}
if(contexts != F){
graspableJSON <- graspableJSON %>% filter(experiment_id %in% contexts)
}
# Collapse Rows of interaction ids down to single elements
if(compress){
i <<- 0
graspableJSON$marker <- paste(graspableJSON$trial_id, graspableJSON$interaction_id)
if(debug){print("Building Compressed Table")}
graspableJSON <- graspableJSON %>% split(.$marker) %>%
map(function(d){
if(debug){
if(i%% 50 == 0){print(i)}
i<<- i+1
}
e <- with(d, tibble(
context = safeHead(na.omit(experiment_id))
, canvasID = safeHead((na.omit(canvas_id)))
, elementID = safeHead(na.omit(el_id))
, interactionID = mean(interaction_id)
, uniqueID = safeHead(uniqueID)
, type = ifelse("interaction" %in% type, 'interaction', "event")
, action = safeTail(na.omit(action))
, subtype = safeHead(subtype[subtype %in% subtypeList])
, method = safeTail(na.omit(method))
, oldState = safeTail(na.omit(old_state))
, newState = safeTail(na.omit(new_state))
, duration = sum(na.omit(dur))
, elementType = safeHead(na.omit(el_type))
, exprXInitial = safeHead(na.omit(expr_x))
, exprYInitial = safeHead(na.omit(expr_y))
, exprXFinal = safeTail(na.omit(expr_x))
, exprYFinal = safeTail(na.omit(expr_y))
, exprWidthInitial = safeHead(na.omit(expr_width))
, exprHeightInitial = safeHead(na.omit(expr_height))
, exprWidthFinal = safeTail(na.omit(expr_width))
, exprHeightFinal = safeTail(na.omit(expr_height))
, symXInitial = safeHead(na.omit(sym_x))
, symYInitial = safeHead(na.omit(sym_y))
, selXInitial = safeHead(na.omit(sel_x))
, selYInitial = safeHead(na.omit(sel_y))
, symXFinal = safeTail(na.omit(sym_x))
, symYFinal = safeTail(na.omit(sym_y))
, selXFinal = safeTail(na.omit(sel_x))
, selYFinal = safeTail(na.omit(sel_y))
, timeStart = min(time.x, na.omit=T)
, timeFinal = max(time.x, na.omit=T)
, xInitial = NA
, yInitial = NA
))
if("move" %in% d$subtype){
startingPlace <- jsonlite::fromJSON(safeHead(na.omit(d$old_state)))
e$xInitial <- startingPlace$x
e$yInitial <- startingPlace$y
endingPlace <- jsonlite::fromJSON(safeHead(na.omit(d$new_state)))
e$xFinal <- endingPlace$x
e$yFinal <- endingPlace$y
# print(startingPlace)
}
if(debug){
e <- with(d, mutate(e
, oldStateDebugCount = length(na.omit(old_state))
, newStateDebugCount = length(na.omit(new_state))
, subtypeDebugCount = length(na.omit(subtype[subtype %in% subtypeList]))
, actionDebugCount = length(na.omit(action))
, exprXCount = length(unique(na.omit(expr_x)))
, exprYCount = length(unique(na.omit(expr_y)))
, exprWidthCount = length(unique(na.omit(expr_width)))
, exprHeightCount = length(unique(na.omit(expr_height)))
, symXCount = length(unique(na.omit(sym_x)))
, symYCount = length(unique(na.omit(sym_y)))
, selXCount = length(unique(na.omit(sel_x)))
, selYCount = length(unique(na.omit(sel_y)))
))
}
return(e)
}) %>% bind_rows()
graspableJSON <- graspableJSON %>% arrange(.$context, .$canvasID, .$interactionID)
i <<- 0
if(debug){print("Propogating absolute locations")}
graspableJSON <- graspableJSON %>% split(.$elementID) %>%
map(function(d){
if(debug){
print(i)
i<<- i+1
}
currentX <- safeHead(d$xInitial)
currentY <- safeHead(d$yInitial)
for(i in length(d$xFinal)){
if(length(na.omit(d$xInitial))==0){
print("yes")
d$xInitial <- currentX
d$yInitial <- currentY
} else {
print(d$xInitial)
currentX <- na.omit(d$xInitial)
currentY <- na.omit(d$yInitial)
}
}
return(d)
}) %>% bind_rows()
}
graspableJSON
}
| /R/dataLoading.R | no_license | dlandy/GraspableDataAnalysis | R | false | false | 8,636 | r | currentGMVersion = "2.5.12"
#' importGraspableJson
#'
#' This simple script just loads a json data file, merges the trial data and events, and renames a few columns
#' It also twiddles things down to a single version of GM, and a single context ("experiment"). It does this only if you
#' set 'compress' to true, but that seems like the vastly most useful format by default.
#'
#' @param fileName the name of a file locating a json project
#' @param versions A vector of versions of Graspable math to include. Data that are associated
#' with a different version are excluded by default. If any non-false value, data will be limited to versions of gm matching this string
#' @param contexts A vector of contexts to include. GM is used in many contexts, including experiments, the canvas, and so on. If set to F, include all contexts.
#' @param compress If set to true, execute logic to collapse each interaction into a single row, glossing
#' diferences between touch, tap, release, etc events.
#' @param debug If true, does a number of sanity checks, like looking at the number of unique old states when collapsing
#' Comments
#'
#' @return a tibble containing the data from the file, with the class "GM X", where X is the version
#' specified (or the first version number to appear).
#' @description Graspable data is stored in JSON [describe what the data format is like, and what compressing is.]
#' @seealso
#' @export
#' @examples
#' canvasSnippetFile <- system.file("extdata", "canvasSnippet.json", package = "GraspableDataAnalysis", mustWork = TRUE)
#' canvasSnippetData <- importGraspableJson(canvasSnippetFile, versions = c("2.5.12" ))
#'
importGraspableJson <- function(filename, versions = F, contexts = F, compress=T, debug=F){
subtypeList <- c("font_size", "mode_change", "create", "create", "delete", "draw", "undo", "clear_all"
, "move", "inspect", "clone", "scrub", "redo", "auto-undo", "math")
safeHead <- function(ll){
nilVal <- "none"
if(length(intersect(class(ll), c("integer", "numeric"))) > 0){
nilVal <- NA
}
if(length(ll)==0){return(nilVal)} else {return(head(ll, 1))}
}
safeTail <- function(ll){
nilVal <- "none"
if(length(intersect(class(ll), c("integer", "numeric"))) > 0){
nilVal <- NA
}
if(length(ll)==0){return(nilVal)} else {return(tail(ll, 1))}
}
graspableJSON <- jsonlite::fromJSON(read_file(filename), simplifyDataFrame=T, flatten=T)
gmData <- as.tibble(graspableJSON$data)
trials <- as.tibble(graspableJSON$trials)
graspableJSON <- as.tibble(merge(gmData, trials, by.x="trial_id", by.y="id"))
graspableJSON$uniqueID <- graspableJSON$`_id.x`
graspableJSON <- rename(graspableJSON
, "trialTime" = "time.y"
, "trialTimeStamp" = "timestamp.y"
)
#graspableJSON <- select(graspableJSON, -`_id`)
if(versions != F){
graspableJSON <- graspableJSON %>% filter(gm_version %in% versions)
class(graspableJSON) <- append(paste("gm", versions)
, class(graspableJSON))
} else {
class(graspableJSON) <- append(paste("gm", graspableJSON$gm_version[1])
, class(graspableJSON))
}
if(contexts != F){
graspableJSON <- graspableJSON %>% filter(experiment_id %in% contexts)
}
# Collapse Rows of interaction ids down to single elements
if(compress){
i <<- 0
graspableJSON$marker <- paste(graspableJSON$trial_id, graspableJSON$interaction_id)
if(debug){print("Building Compressed Table")}
graspableJSON <- graspableJSON %>% split(.$marker) %>%
map(function(d){
if(debug){
if(i%% 50 == 0){print(i)}
i<<- i+1
}
e <- with(d, tibble(
context = safeHead(na.omit(experiment_id))
, canvasID = safeHead((na.omit(canvas_id)))
, elementID = safeHead(na.omit(el_id))
, interactionID = mean(interaction_id)
, uniqueID = safeHead(uniqueID)
, type = ifelse("interaction" %in% type, 'interaction', "event")
, action = safeTail(na.omit(action))
, subtype = safeHead(subtype[subtype %in% subtypeList])
, method = safeTail(na.omit(method))
, oldState = safeTail(na.omit(old_state))
, newState = safeTail(na.omit(new_state))
, duration = sum(na.omit(dur))
, elementType = safeHead(na.omit(el_type))
, exprXInitial = safeHead(na.omit(expr_x))
, exprYInitial = safeHead(na.omit(expr_y))
, exprXFinal = safeTail(na.omit(expr_x))
, exprYFinal = safeTail(na.omit(expr_y))
, exprWidthInitial = safeHead(na.omit(expr_width))
, exprHeightInitial = safeHead(na.omit(expr_height))
, exprWidthFinal = safeTail(na.omit(expr_width))
, exprHeightFinal = safeTail(na.omit(expr_height))
, symXInitial = safeHead(na.omit(sym_x))
, symYInitial = safeHead(na.omit(sym_y))
, selXInitial = safeHead(na.omit(sel_x))
, selYInitial = safeHead(na.omit(sel_y))
, symXFinal = safeTail(na.omit(sym_x))
, symYFinal = safeTail(na.omit(sym_y))
, selXFinal = safeTail(na.omit(sel_x))
, selYFinal = safeTail(na.omit(sel_y))
, timeStart = min(time.x, na.omit=T)
, timeFinal = max(time.x, na.omit=T)
, xInitial = NA
, yInitial = NA
))
if("move" %in% d$subtype){
startingPlace <- jsonlite::fromJSON(safeHead(na.omit(d$old_state)))
e$xInitial <- startingPlace$x
e$yInitial <- startingPlace$y
endingPlace <- jsonlite::fromJSON(safeHead(na.omit(d$new_state)))
e$xFinal <- endingPlace$x
e$yFinal <- endingPlace$y
# print(startingPlace)
}
if(debug){
e <- with(d, mutate(e
, oldStateDebugCount = length(na.omit(old_state))
, newStateDebugCount = length(na.omit(new_state))
, subtypeDebugCount = length(na.omit(subtype[subtype %in% subtypeList]))
, actionDebugCount = length(na.omit(action))
, exprXCount = length(unique(na.omit(expr_x)))
, exprYCount = length(unique(na.omit(expr_y)))
, exprWidthCount = length(unique(na.omit(expr_width)))
, exprHeightCount = length(unique(na.omit(expr_height)))
, symXCount = length(unique(na.omit(sym_x)))
, symYCount = length(unique(na.omit(sym_y)))
, selXCount = length(unique(na.omit(sel_x)))
, selYCount = length(unique(na.omit(sel_y)))
))
}
return(e)
}) %>% bind_rows()
graspableJSON <- graspableJSON %>% arrange(.$context, .$canvasID, .$interactionID)
i <<- 0
if(debug){print("Propogating absolute locations")}
graspableJSON <- graspableJSON %>% split(.$elementID) %>%
map(function(d){
if(debug){
print(i)
i<<- i+1
}
currentX <- safeHead(d$xInitial)
currentY <- safeHead(d$yInitial)
for(i in length(d$xFinal)){
if(length(na.omit(d$xInitial))==0){
print("yes")
d$xInitial <- currentX
d$yInitial <- currentY
} else {
print(d$xInitial)
currentX <- na.omit(d$xInitial)
currentY <- na.omit(d$yInitial)
}
}
return(d)
}) %>% bind_rows()
}
graspableJSON
}
|
\name{summary.mpprob}
\alias{summary.mpprob}
\title{Summary of mpprob object}
\usage{
\method{summary}{mpprob} (object, ...)
}
\arguments{
\item{object}{Object of class \code{mpprob}}
\item{...}{Additional arguments}
}
\value{
Output to screen of percentage of each chromosome
inherited from founders, average number of recombinations
per chromosome and genomewide, and number of
finals/founders/chromosomes/markers per chromosome.
}
\description{
Summarizes details about underlying mpcross object as
well as descriptive statistics about estimated founder
haplotypes
}
\examples{
sim.map <- sim.map(len=rep(100, 2), n.mar=11, include.x=FALSE, eq.spacing=TRUE)
sim.ped <- sim.mpped(4, 1, 500, 6, 1)
sim.dat <- sim.mpcross(map=sim.map, pedigree=sim.ped, qtl=matrix(data=c(1, 10, .4, 0, 0, 0, 1, 70, 0, .35, 0, 0), nrow=2, ncol=6, byrow=TRUE), seed=1)
mpp.dat <- mpprob(sim.dat, program="qtl")
summary(mpp.dat)
}
\seealso{
\code{\link[mpMap]{plot.mpprob}},
\code{\link[mpMap]{mpprob}}
}
| /man/summary.mpprob.Rd | no_license | cran/mpMap | R | false | false | 1,045 | rd | \name{summary.mpprob}
\alias{summary.mpprob}
\title{Summary of mpprob object}
\usage{
\method{summary}{mpprob} (object, ...)
}
\arguments{
\item{object}{Object of class \code{mpprob}}
\item{...}{Additional arguments}
}
\value{
Output to screen of percentage of each chromosome
inherited from founders, average number of recombinations
per chromosome and genomewide, and number of
finals/founders/chromosomes/markers per chromosome.
}
\description{
Summarizes details about underlying mpcross object as
well as descriptive statistics about estimated founder
haplotypes
}
\examples{
sim.map <- sim.map(len=rep(100, 2), n.mar=11, include.x=FALSE, eq.spacing=TRUE)
sim.ped <- sim.mpped(4, 1, 500, 6, 1)
sim.dat <- sim.mpcross(map=sim.map, pedigree=sim.ped, qtl=matrix(data=c(1, 10, .4, 0, 0, 0, 1, 70, 0, .35, 0, 0), nrow=2, ncol=6, byrow=TRUE), seed=1)
mpp.dat <- mpprob(sim.dat, program="qtl")
summary(mpp.dat)
}
\seealso{
\code{\link[mpMap]{plot.mpprob}},
\code{\link[mpMap]{mpprob}}
}
|
# Importing Dataset
dataset = read.csv('50_Startups.csv')
# Encoding Categorical Data
dataset$State = factor(dataset$State,
c('New York', 'California', 'Florida'),
c(1,2,3))
# Splitting The DataSet into Training Set and Test Set
library(caTools)
set.seed(123)
split = sample.split(dataset$Profit, SplitRatio = 0.8)
train_set = subset(dataset , split == TRUE)
test_set = subset(dataset , split == FALSE)
# Fitting the Simple Linear Regression to the training set
regressor = lm(formula = Profit ~ . ,
data = train_set)
# Predicting the Test Set Results
y_pred = predict(regressor, newdata = test_set)
# Backward Elimination
regressor = lm(formula = Profit ~ R.D.Spend + Administration + Marketing.Spend + State ,
data = dataset)
regressor = lm(formula = Profit ~ R.D.Spend + Administration + Marketing.Spend,
data = dataset)
regressor = lm(formula = Profit ~ R.D.Spend + Marketing.Spend ,
data = dataset)
regressor = lm(formula = Profit ~ R.D.Spend ,
data = dataset)
y_pred = predict(regressor, newdata = test_set) | /Regression/Multiple Linear Regression/multiple_linear_regression.R | no_license | ritwik0706/ML-Playground | R | false | false | 1,148 | r |
# Importing Dataset
dataset = read.csv('50_Startups.csv')
# Encoding Categorical Data
dataset$State = factor(dataset$State,
c('New York', 'California', 'Florida'),
c(1,2,3))
# Splitting The DataSet into Training Set and Test Set
library(caTools)
set.seed(123)
split = sample.split(dataset$Profit, SplitRatio = 0.8)
train_set = subset(dataset , split == TRUE)
test_set = subset(dataset , split == FALSE)
# Fitting the Simple Linear Regression to the training set
regressor = lm(formula = Profit ~ . ,
data = train_set)
# Predicting the Test Set Results
y_pred = predict(regressor, newdata = test_set)
# Backward Elimination
regressor = lm(formula = Profit ~ R.D.Spend + Administration + Marketing.Spend + State ,
data = dataset)
regressor = lm(formula = Profit ~ R.D.Spend + Administration + Marketing.Spend,
data = dataset)
regressor = lm(formula = Profit ~ R.D.Spend + Marketing.Spend ,
data = dataset)
regressor = lm(formula = Profit ~ R.D.Spend ,
data = dataset)
y_pred = predict(regressor, newdata = test_set) |
#!/usr/bin/Rscript
# Radar interpretation
library(tidyverse)
library(stringr)
library(gtools)
library(raster)
library(geosphere)
library(RCurl)
setwd("/mnt/R")
source("../config.r")
#02 = Melbourne
#49 = Yarrawonda
rad_tab=data.frame(rad=c("02","49"),time=c(6,10))
this_rad="02"
ss_rad = rad_tab[rad_tab$rad==this_rad,]
interval = ss_rad$time
date = as.POSIXct(format(Sys.time(),tz="UTC"),tz="UTC") - (interval) * 60
mins = formatC((as.numeric(format(date,"%M")) %/% interval) * interval,width=2,flag="0")
fdate = paste0(format(date,"%Y%m%d%H"),mins)
#2018 03 04 15 18
url_root = sprintf("ftp://ftp-reg.cloud.bom.gov.au/radar/IDR%sPOL.%s.txt",this_rad,fdate)
dat=getURLContent(url_root,userpwd=bompwd,binary=FALSE)
dat=strsplit(dat,"\n")[[1]]
delta_table = c("!","[","a","b","c","]","@",
"/","d","e","f","g","h","\\",
"i","j","k","<","l","m","n",
"o","p","-",".","+","q","r",
"s","t","u",">","v","w","x",
"(","y","S","T","U","V",")",
"$","{","W","X","Y","}","&")
delta_table = asc(delta_table)
delta_matrix = matrix(delta_table,nrow=7,byrow=TRUE)
delta_lookup = function(x){
pos = which(delta_matrix==x)
col=ceiling(pos / 7)
vcol=col-4
row = which(x == delta_matrix[,col])
row=row-4
return(c(vcol,row))
}
line_lat = dat[str_detect(dat,"LATITUDE")] %>% str_split(" ",simplify=TRUE)
LAT = -as.numeric(line_lat[2])
line_stn = dat[str_detect(dat,"STNID")] %>% str_split(" ",simplify=TRUE)
STNID = as.numeric(line_stn[2])
line_lat = dat[str_detect(dat,"LATITUDE")] %>% str_split(" ",simplify=TRUE)
LAT = -as.numeric(line_lat[2])
line_lon = dat[str_detect(dat,"LONGITUDE")] %>% str_split(" ",simplify=TRUE)
LON = as.numeric(line_lon[2])
line_RNGRES = dat[str_detect(dat,"RNGRES")] %>% str_split(" ",simplify=TRUE)
RNGRES = as.numeric(line_RNGRES[2])
line_ANGRES = dat[str_detect(dat,"ANGRES")] %>% str_split(" ",simplify=TRUE)
ANGRES = as.numeric(line_ANGRES[2])
line_STARTRNG = dat[str_detect(dat,"STARTRNG")] %>% str_split(" ",simplify=TRUE)
STARTRNG = as.numeric(line_STARTRNG[2])
line_ENDRNG = dat[str_detect(dat,"ENDRNG")] %>% str_split(" ",simplify=TRUE)
ENDRNG = as.numeric(line_ENDRNG[2])
line_VIDRES = dat[str_detect(dat,"VIDRES")] %>% str_split(" ",simplify=TRUE)
VIDRES = as.numeric(line_VIDRES[2])
start_dat = which(str_detect(dat,"COPYRIGHT"))+1
end_dat = which(str_detect(dat,"END RADAR IMAGE"))-1
dat = dat[start_dat:end_dat]
nbins = (ENDRNG - STARTRNG) / RNGRES
out_mat = matrix(0,nrow=360,ncol=nbins)
for(i in dat){
ang = as.numeric(substr(i,2,4))
#print(ang)
# Grab encoding vector
avec = substr(i,5,nchar(i))
# Convert to ascii
avec = asc(avec)
# Generate flagmode vector
mvec = character(length=length(avec))
# Mark ABS absolute encoding values
mvec[avec >= strtoi(41,16) & avec <= strtoi(50,16)] = "ABS"
# Mark RLE run-length encoding values
mvec[avec >= strtoi(30,16) & avec <= strtoi(39,16)] = "RLE"
buffer=rep(NA,nbins)
output_position = 1
input_position = 1
RLE_cache = 0
while(input_position < length(mvec)){
# If we have an absolute value, just store it
if(mvec[input_position]=="ABS"){
buffer[output_position] = abs(avec[input_position])-65
RLE_cache = abs(avec[input_position])-65
input_position = input_position+1
output_position = output_position+1
next
}
# If we have an RLE - count digits, then insert RLE_cache previous value
if(mvec[input_position]=="RLE"){
if(mvec[input_position+1]!="RLE"){
RLE_count = as.numeric(chr(avec[input_position]))
skip=0
}
if(mvec[input_position+1]=="RLE" & mvec[input_position+2]!="RLE"){
RLE_count = as.numeric(paste0(chr(avec[input_position]),chr(avec[input_position+1])))
skip=1
}
if(mvec[input_position+1]=="RLE" & mvec[input_position+2]=="RLE"){
RLE_count = as.numeric(paste0(chr(avec[input_position]),chr(avec[input_position+1]),chr(avec[input_position+2])))
skip=2
}
buffer[output_position:(output_position+(RLE_count-1))]=RLE_cache
output_position = output_position + RLE_count
input_position = input_position + skip + 1
next
}
if(mvec[input_position] == ""){
deltas = delta_lookup(avec[input_position])
RLE_cache = RLE_cache+ deltas[1]
buffer[output_position]=RLE_cache
RLE_cache = RLE_cache+ deltas[2]
buffer[output_position+1]=RLE_cache
output_position = output_position + 2
input_position = input_position + 1
}
}
out_mat[ang,] = buffer
}
out_mat[is.na(out_mat)]=0
## Make translation matrix (radar-dependent)
#d2r=function(x){x*pi/180}
#r2d=function(x){x*(180/pi)}
## Define grid
grid_extent=extent(LON-2,LON+2,LAT-2,LAT+2)
grid_layer=raster(grid_extent,nrows=800,ncols=800)
#dist_template=grid_layer
#ang_template=grid_layer
#for(this_cell in 1:(800*800)){
# cp=xyFromCell(dist_template,this_cell)
# cell_lon=cp[1]
# cell_lat=cp[2]
# d=distCosine(p1=c(cell_lon,cell_lat),p2=c(LON,LAT),r=6371000)
# ag=bearing(p1=c(LON,LAT),p2=c(cell_lon,cell_lat))
# dist_template[this_cell]=d
# ang_template[this_cell]=ag
#}
#ang_template2 =floor(ang_template %% 361)+1
#dist_template2 = (dist_template-STARTRNG)/RNGRES
#dist_template2 = dist_template2+1
#dist_template2[dist_template2<1]=1
#dist_template2 = floor(dist_template2)
#pos_vec = values(dist_template2-1) * 360 + values(ang_template2)
pos_vec = readRDS(paste0("radar/posvec_",this_rad,".RDS"))
values(grid_layer) = out_mat[pos_vec]
proj4string(grid_layer)="+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0 "
values(grid_layer)[values(grid_layer)==0]=NA
colortable(grid_layer)=c("#FFFFFF00", "#CCCCFFDD", "#9999FFEE", "#6565FFff", "#3232FFff", "#0000FFff", "#3300CBff" ,"#660098ff" ,"#990065ff" ,"#CC0032ff","#FF0000ff", "#CC0000ff" ,"#980000ff" ,"#650000ff", "#320000ff", "#000000ff",rep("#FFFFFF00",256-16))
writeRaster(grid_layer,sprintf("radar/rad%s_%s.tif",fdate,this_rad),overwrite=TRUE,options=c("TIFFTAG_GDAL_NODATA=0"))
# Expand indexed tif to a rgba vrt
cmd=paste0(exeroot,"gdal_translate")
args=c("-of vrt","-expand rgba",sprintf("radar/rad%s_%s.tif",fdate,this_rad),sprintf("radar/rad%s_%s.vrt",fdate,this_rad))
system2(cmd,args)
cmd=paste0(exeroot,"gdal2tiles.py")
args=c(sprintf("radar/rad%s_%s.vrt",fdate,this_rad),"-a 0,0,0,0","-z 5-11",sprintf("tiles/rad_%s_%s",fdate,this_rad))
system2(cmd,args)
# Remove tifs
unlink(sprintf("radar/rad%s_%s.vrt",fdate,this_rad))
unlink(sprintf("radar/rad%s_%s.tif",fdate,this_rad))
# Remove old tiles
date = as.POSIXct(format(Sys.time(),tz="UTC"),tz="UTC") - (interval-1) * 60 - 120*60
flist = file.info(list.files("tiles",pattern="rad_",full.names=TRUE))
todel = rownames(flist)[flist$ctime < date]
unlink(todel,recursive=TRUE)
source("cat.r")
| /radar_1.r | permissive | ozjimbob/aaqfx-render | R | false | false | 6,917 | r | #!/usr/bin/Rscript
# Radar interpretation
library(tidyverse)
library(stringr)
library(gtools)
library(raster)
library(geosphere)
library(RCurl)
setwd("/mnt/R")
source("../config.r")
#02 = Melbourne
#49 = Yarrawonda
rad_tab=data.frame(rad=c("02","49"),time=c(6,10))
this_rad="02"
ss_rad = rad_tab[rad_tab$rad==this_rad,]
interval = ss_rad$time
date = as.POSIXct(format(Sys.time(),tz="UTC"),tz="UTC") - (interval) * 60
mins = formatC((as.numeric(format(date,"%M")) %/% interval) * interval,width=2,flag="0")
fdate = paste0(format(date,"%Y%m%d%H"),mins)
#2018 03 04 15 18
url_root = sprintf("ftp://ftp-reg.cloud.bom.gov.au/radar/IDR%sPOL.%s.txt",this_rad,fdate)
dat=getURLContent(url_root,userpwd=bompwd,binary=FALSE)
dat=strsplit(dat,"\n")[[1]]
delta_table = c("!","[","a","b","c","]","@",
"/","d","e","f","g","h","\\",
"i","j","k","<","l","m","n",
"o","p","-",".","+","q","r",
"s","t","u",">","v","w","x",
"(","y","S","T","U","V",")",
"$","{","W","X","Y","}","&")
delta_table = asc(delta_table)
delta_matrix = matrix(delta_table,nrow=7,byrow=TRUE)
delta_lookup = function(x){
pos = which(delta_matrix==x)
col=ceiling(pos / 7)
vcol=col-4
row = which(x == delta_matrix[,col])
row=row-4
return(c(vcol,row))
}
line_lat = dat[str_detect(dat,"LATITUDE")] %>% str_split(" ",simplify=TRUE)
LAT = -as.numeric(line_lat[2])
line_stn = dat[str_detect(dat,"STNID")] %>% str_split(" ",simplify=TRUE)
STNID = as.numeric(line_stn[2])
line_lat = dat[str_detect(dat,"LATITUDE")] %>% str_split(" ",simplify=TRUE)
LAT = -as.numeric(line_lat[2])
line_lon = dat[str_detect(dat,"LONGITUDE")] %>% str_split(" ",simplify=TRUE)
LON = as.numeric(line_lon[2])
line_RNGRES = dat[str_detect(dat,"RNGRES")] %>% str_split(" ",simplify=TRUE)
RNGRES = as.numeric(line_RNGRES[2])
line_ANGRES = dat[str_detect(dat,"ANGRES")] %>% str_split(" ",simplify=TRUE)
ANGRES = as.numeric(line_ANGRES[2])
line_STARTRNG = dat[str_detect(dat,"STARTRNG")] %>% str_split(" ",simplify=TRUE)
STARTRNG = as.numeric(line_STARTRNG[2])
line_ENDRNG = dat[str_detect(dat,"ENDRNG")] %>% str_split(" ",simplify=TRUE)
ENDRNG = as.numeric(line_ENDRNG[2])
line_VIDRES = dat[str_detect(dat,"VIDRES")] %>% str_split(" ",simplify=TRUE)
VIDRES = as.numeric(line_VIDRES[2])
start_dat = which(str_detect(dat,"COPYRIGHT"))+1
end_dat = which(str_detect(dat,"END RADAR IMAGE"))-1
dat = dat[start_dat:end_dat]
nbins = (ENDRNG - STARTRNG) / RNGRES
out_mat = matrix(0,nrow=360,ncol=nbins)
for(i in dat){
ang = as.numeric(substr(i,2,4))
#print(ang)
# Grab encoding vector
avec = substr(i,5,nchar(i))
# Convert to ascii
avec = asc(avec)
# Generate flagmode vector
mvec = character(length=length(avec))
# Mark ABS absolute encoding values
mvec[avec >= strtoi(41,16) & avec <= strtoi(50,16)] = "ABS"
# Mark RLE run-length encoding values
mvec[avec >= strtoi(30,16) & avec <= strtoi(39,16)] = "RLE"
buffer=rep(NA,nbins)
output_position = 1
input_position = 1
RLE_cache = 0
while(input_position < length(mvec)){
# If we have an absolute value, just store it
if(mvec[input_position]=="ABS"){
buffer[output_position] = abs(avec[input_position])-65
RLE_cache = abs(avec[input_position])-65
input_position = input_position+1
output_position = output_position+1
next
}
# If we have an RLE - count digits, then insert RLE_cache previous value
if(mvec[input_position]=="RLE"){
if(mvec[input_position+1]!="RLE"){
RLE_count = as.numeric(chr(avec[input_position]))
skip=0
}
if(mvec[input_position+1]=="RLE" & mvec[input_position+2]!="RLE"){
RLE_count = as.numeric(paste0(chr(avec[input_position]),chr(avec[input_position+1])))
skip=1
}
if(mvec[input_position+1]=="RLE" & mvec[input_position+2]=="RLE"){
RLE_count = as.numeric(paste0(chr(avec[input_position]),chr(avec[input_position+1]),chr(avec[input_position+2])))
skip=2
}
buffer[output_position:(output_position+(RLE_count-1))]=RLE_cache
output_position = output_position + RLE_count
input_position = input_position + skip + 1
next
}
if(mvec[input_position] == ""){
deltas = delta_lookup(avec[input_position])
RLE_cache = RLE_cache+ deltas[1]
buffer[output_position]=RLE_cache
RLE_cache = RLE_cache+ deltas[2]
buffer[output_position+1]=RLE_cache
output_position = output_position + 2
input_position = input_position + 1
}
}
out_mat[ang,] = buffer
}
out_mat[is.na(out_mat)]=0
## Make translation matrix (radar-dependent)
#d2r=function(x){x*pi/180}
#r2d=function(x){x*(180/pi)}
## Define grid
grid_extent=extent(LON-2,LON+2,LAT-2,LAT+2)
grid_layer=raster(grid_extent,nrows=800,ncols=800)
#dist_template=grid_layer
#ang_template=grid_layer
#for(this_cell in 1:(800*800)){
# cp=xyFromCell(dist_template,this_cell)
# cell_lon=cp[1]
# cell_lat=cp[2]
# d=distCosine(p1=c(cell_lon,cell_lat),p2=c(LON,LAT),r=6371000)
# ag=bearing(p1=c(LON,LAT),p2=c(cell_lon,cell_lat))
# dist_template[this_cell]=d
# ang_template[this_cell]=ag
#}
#ang_template2 =floor(ang_template %% 361)+1
#dist_template2 = (dist_template-STARTRNG)/RNGRES
#dist_template2 = dist_template2+1
#dist_template2[dist_template2<1]=1
#dist_template2 = floor(dist_template2)
#pos_vec = values(dist_template2-1) * 360 + values(ang_template2)
pos_vec = readRDS(paste0("radar/posvec_",this_rad,".RDS"))
values(grid_layer) = out_mat[pos_vec]
proj4string(grid_layer)="+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0 "
values(grid_layer)[values(grid_layer)==0]=NA
colortable(grid_layer)=c("#FFFFFF00", "#CCCCFFDD", "#9999FFEE", "#6565FFff", "#3232FFff", "#0000FFff", "#3300CBff" ,"#660098ff" ,"#990065ff" ,"#CC0032ff","#FF0000ff", "#CC0000ff" ,"#980000ff" ,"#650000ff", "#320000ff", "#000000ff",rep("#FFFFFF00",256-16))
writeRaster(grid_layer,sprintf("radar/rad%s_%s.tif",fdate,this_rad),overwrite=TRUE,options=c("TIFFTAG_GDAL_NODATA=0"))
# Expand indexed tif to a rgba vrt
cmd=paste0(exeroot,"gdal_translate")
args=c("-of vrt","-expand rgba",sprintf("radar/rad%s_%s.tif",fdate,this_rad),sprintf("radar/rad%s_%s.vrt",fdate,this_rad))
system2(cmd,args)
cmd=paste0(exeroot,"gdal2tiles.py")
args=c(sprintf("radar/rad%s_%s.vrt",fdate,this_rad),"-a 0,0,0,0","-z 5-11",sprintf("tiles/rad_%s_%s",fdate,this_rad))
system2(cmd,args)
# Remove tifs
unlink(sprintf("radar/rad%s_%s.vrt",fdate,this_rad))
unlink(sprintf("radar/rad%s_%s.tif",fdate,this_rad))
# Remove old tiles
date = as.POSIXct(format(Sys.time(),tz="UTC"),tz="UTC") - (interval-1) * 60 - 120*60
flist = file.info(list.files("tiles",pattern="rad_",full.names=TRUE))
todel = rownames(flist)[flist$ctime < date]
unlink(todel,recursive=TRUE)
source("cat.r")
|
context("o3 annual 4th highest")
multi_id <- readRDS("daily_averages.rds")
multi_id$valid_max8hr <- TRUE
multi_id$flag_max8hr_incomplete <- FALSE
one_id <- multi_id[multi_id$id == "a", ]
test_one <- o3_ann_4th_highest(one_id, dt = "dates", val = "val")
test_mult <- o3_ann_4th_highest(multi_id, dt = "dates", val = "val", by = "id")
test_that("Only accepts date objects", {
posix <- seq.POSIXt(as.POSIXct("2012-01-01"), as.POSIXct("2012-01-31"), by = "hour")
test <- data.frame(posix, val = rnorm(length(posix), 20, 5))
expect_error(o3_ann_4th_highest(test, "posix", "val"))
})
test_that("Is a data frame", {
expect_is(test_one, "data.frame")
expect_is(test_mult, "data.frame")
})
test_that("Has the right column names and dimensions", {
expected_names <-c("year", "valid_in_year", "quarter_1", "quarter_2", "quarter_3",
"quarter_4", "max8hr", "valid_year", "exceed",
"flag_year_based_on_incomplete_data")
expect_equal(names(test_one), expected_names)
expect_equal(dim(test_one), c(4, 10))
# For multiple years:
expect_equal(names(test_mult), c("id", expected_names))
expect_equal(dim(test_mult), c(8, 11))
})
test_that("Columns are the right class", {
classes <- c("integer", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "logical", "logical", "logical")
expect_equal(unname(sapply(test_one, class)), classes)
expect_equal(unname(sapply(test_mult, class)), c("character", classes))
})
test_that("Exceed works", {
expect_false(any(test_mult$exceed))
set.seed(42)
multi_id$val <- rnorm(nrow(multi_id), 350, 1)
res <- o3_ann_4th_highest(multi_id, dt = "dates", val = "val", by = "id")
expect_true(all(res$exceed[res$valid_year]))
})
test_that("Number of valid days in year correct", {
expect_equal(test_one$valid_in_year, c(1L, 361L, 358L, 357L))
expect_equal(test_mult$valid_in_year, c(1L, 361L, 358L, 357L, 1L, 365L, 243L, 170L))
})
test_that("can exclude data rows", {
# Take out values 9.0 or above to make sure it changes
high_dates <-
multi_id$date[multi_id$id == "a" & strftime(multi_id$dates,"%Y") == 2011 &
!is.na(multi_id$val) & multi_id$val >= 9.0]
excl_df <-
data.frame(id = "a",
start = high_dates,
stop = high_dates + 1,
stringsAsFactors = FALSE)
ret <- o3_ann_4th_highest(multi_id, dt = "dates", val = "val", by = "id",
exclude_df = excl_df, exclude_df_dt = c("start", "stop"))
expect_equal(round(ret$max8hr[ret$id == "a" & ret$year == 2011],1), 8.6)
})
| /tests/testthat/test-o3_ann_4th_highest.R | permissive | paulroberts68/rcaaqs | R | false | false | 2,622 | r | context("o3 annual 4th highest")
multi_id <- readRDS("daily_averages.rds")
multi_id$valid_max8hr <- TRUE
multi_id$flag_max8hr_incomplete <- FALSE
one_id <- multi_id[multi_id$id == "a", ]
test_one <- o3_ann_4th_highest(one_id, dt = "dates", val = "val")
test_mult <- o3_ann_4th_highest(multi_id, dt = "dates", val = "val", by = "id")
test_that("Only accepts date objects", {
posix <- seq.POSIXt(as.POSIXct("2012-01-01"), as.POSIXct("2012-01-31"), by = "hour")
test <- data.frame(posix, val = rnorm(length(posix), 20, 5))
expect_error(o3_ann_4th_highest(test, "posix", "val"))
})
test_that("Is a data frame", {
expect_is(test_one, "data.frame")
expect_is(test_mult, "data.frame")
})
test_that("Has the right column names and dimensions", {
expected_names <-c("year", "valid_in_year", "quarter_1", "quarter_2", "quarter_3",
"quarter_4", "max8hr", "valid_year", "exceed",
"flag_year_based_on_incomplete_data")
expect_equal(names(test_one), expected_names)
expect_equal(dim(test_one), c(4, 10))
# For multiple years:
expect_equal(names(test_mult), c("id", expected_names))
expect_equal(dim(test_mult), c(8, 11))
})
test_that("Columns are the right class", {
classes <- c("integer", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "logical", "logical", "logical")
expect_equal(unname(sapply(test_one, class)), classes)
expect_equal(unname(sapply(test_mult, class)), c("character", classes))
})
test_that("Exceed works", {
expect_false(any(test_mult$exceed))
set.seed(42)
multi_id$val <- rnorm(nrow(multi_id), 350, 1)
res <- o3_ann_4th_highest(multi_id, dt = "dates", val = "val", by = "id")
expect_true(all(res$exceed[res$valid_year]))
})
test_that("Number of valid days in year correct", {
expect_equal(test_one$valid_in_year, c(1L, 361L, 358L, 357L))
expect_equal(test_mult$valid_in_year, c(1L, 361L, 358L, 357L, 1L, 365L, 243L, 170L))
})
test_that("can exclude data rows", {
# Take out values 9.0 or above to make sure it changes
high_dates <-
multi_id$date[multi_id$id == "a" & strftime(multi_id$dates,"%Y") == 2011 &
!is.na(multi_id$val) & multi_id$val >= 9.0]
excl_df <-
data.frame(id = "a",
start = high_dates,
stop = high_dates + 1,
stringsAsFactors = FALSE)
ret <- o3_ann_4th_highest(multi_id, dt = "dates", val = "val", by = "id",
exclude_df = excl_df, exclude_df_dt = c("start", "stop"))
expect_equal(round(ret$max8hr[ret$id == "a" & ret$year == 2011],1), 8.6)
})
|
%% File Name: personfit.stat.Rd
%% File Version: 0.24
\name{personfit.stat}
\alias{personfit.stat}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Person Fit Statistics for the Rasch Model
}
\description{
This function collects some person fit statistics
for the Rasch model (Karabatsos, 2003; Meijer & Sijtsma, 2001).
}
\usage{
personfit.stat(dat, abil, b)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{dat}{
An \eqn{N \times I} data frame of dichotomous item
responses
}
\item{abil}{
An ability estimate, e.g. the WLE
}
\item{b}{
Estimated item difficulty
}
}
%\details{
%% ~~ If necessary, more details than the description above ~~
%}
\value{
A data frame with following columns (see Meijer & Sijtsma 2001
for a review of different person fit statistics):
\item{case}{Case index}
\item{abil}{Ability estimate \code{abil}}
\item{mean}{Person mean of correctly solved items}
\item{caution}{Caution index}
\item{depend}{Dependability index}
\item{ECI1}{\eqn{ECI1}}
\item{ECI2}{\eqn{ECI2}}
\item{ECI3}{\eqn{ECI3}}
\item{ECI4}{\eqn{ECI4}}
\item{ECI5}{\eqn{ECI5}}
\item{ECI6}{\eqn{ECI6}}
\item{l0}{Fit statistic \eqn{l_0}}
\item{lz}{Fit statistic \eqn{l_z}}
\item{outfit}{Person outfit statistic}
\item{infit}{Person infit statistic}
\item{rpbis}{Point biserial correlation of item responses
and item \eqn{p} values}
\item{rpbis.itemdiff}{Point biserial correlation of item responses
and item difficulties \code{b}}
\item{U3}{Fit statistic \eqn{U_3}}
}
\references{
Karabatsos, G. (2003). Comparing the aberrant response detection performance
of thirty-six person-fit statistics. \emph{Applied Measurement in Education,
16}, 277-298.
Meijer, R. R., & Sijtsma, K. (2001). Methodology
review: Evaluating person fit. \emph{Applied Psychological
Measurement, 25}, 107-135.
}
%\author{
%Alexander Robitzsch
%}
%\note{
%% ~~further notes~~
%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
See \code{\link{pcm.fit}} for person fit in the partial credit model.
See the \pkg{irtProb} and \pkg{PerFit} packages for person fit statistics
and person response curves and functions included in other packages:
\code{\link[mirt:personfit]{mirt::personfit}},
\code{eRm::personfit} and
\code{ltm::person.fit}.
}
% \code{\link[TAM:tam.fa]{tam.fa}} (\pkg{TAM})
\examples{
#############################################################################
# EXAMPLE 1: Person fit Reading Data
#############################################################################
data(data.read)
dat <- data.read
# estimate Rasch model
mod <- sirt::rasch.mml2( dat )
# WLE
wle1 <- sirt::wle.rasch( dat,b=mod$item$b )$theta
b <- mod$item$b # item difficulty
# evaluate person fit
pf1 <- sirt::personfit.stat( dat=dat, abil=wle1, b=b)
\dontrun{
# dimensional analysis of person fit statistics
x0 <- stats::na.omit(pf1[, -c(1:3) ] )
stats::factanal( x=x0, factors=2, rotation="promax" )
## Loadings:
## Factor1 Factor2
## caution 0.914
## depend 0.293 0.750
## ECI1 0.869 0.160
## ECI2 0.869 0.162
## ECI3 1.011
## ECI4 1.159 -0.269
## ECI5 1.012
## ECI6 0.879 0.130
## l0 0.409 -1.255
## lz -0.504 -0.529
## outfit 0.297 0.702
## infit 0.362 0.695
## rpbis -1.014
## rpbis.itemdiff 1.032
## U3 0.735 0.309
##
## Factor Correlations:
## Factor1 Factor2
## Factor1 1.000 -0.727
## Factor2 -0.727 1.000
##
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{Person fit}
%%\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
| /man/personfit.stat.Rd | no_license | rosefu79/sirt | R | false | false | 3,787 | rd | %% File Name: personfit.stat.Rd
%% File Version: 0.24
\name{personfit.stat}
\alias{personfit.stat}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Person Fit Statistics for the Rasch Model
}
\description{
This function collects some person fit statistics
for the Rasch model (Karabatsos, 2003; Meijer & Sijtsma, 2001).
}
\usage{
personfit.stat(dat, abil, b)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{dat}{
An \eqn{N \times I} data frame of dichotomous item
responses
}
\item{abil}{
An ability estimate, e.g. the WLE
}
\item{b}{
Estimated item difficulty
}
}
%\details{
%% ~~ If necessary, more details than the description above ~~
%}
\value{
A data frame with following columns (see Meijer & Sijtsma 2001
for a review of different person fit statistics):
\item{case}{Case index}
\item{abil}{Ability estimate \code{abil}}
\item{mean}{Person mean of correctly solved items}
\item{caution}{Caution index}
\item{depend}{Dependability index}
\item{ECI1}{\eqn{ECI1}}
\item{ECI2}{\eqn{ECI2}}
\item{ECI3}{\eqn{ECI3}}
\item{ECI4}{\eqn{ECI4}}
\item{ECI5}{\eqn{ECI5}}
\item{ECI6}{\eqn{ECI6}}
\item{l0}{Fit statistic \eqn{l_0}}
\item{lz}{Fit statistic \eqn{l_z}}
\item{outfit}{Person outfit statistic}
\item{infit}{Person infit statistic}
\item{rpbis}{Point biserial correlation of item responses
and item \eqn{p} values}
\item{rpbis.itemdiff}{Point biserial correlation of item responses
and item difficulties \code{b}}
\item{U3}{Fit statistic \eqn{U_3}}
}
\references{
Karabatsos, G. (2003). Comparing the aberrant response detection performance
of thirty-six person-fit statistics. \emph{Applied Measurement in Education,
16}, 277-298.
Meijer, R. R., & Sijtsma, K. (2001). Methodology
review: Evaluating person fit. \emph{Applied Psychological
Measurement, 25}, 107-135.
}
%\author{
%Alexander Robitzsch
%}
%\note{
%% ~~further notes~~
%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
See \code{\link{pcm.fit}} for person fit in the partial credit model.
See the \pkg{irtProb} and \pkg{PerFit} packages for person fit statistics
and person response curves and functions included in other packages:
\code{\link[mirt:personfit]{mirt::personfit}},
\code{eRm::personfit} and
\code{ltm::person.fit}.
}
% \code{\link[TAM:tam.fa]{tam.fa}} (\pkg{TAM})
\examples{
#############################################################################
# EXAMPLE 1: Person fit Reading Data
#############################################################################
data(data.read)
dat <- data.read
# estimate Rasch model
mod <- sirt::rasch.mml2( dat )
# WLE
wle1 <- sirt::wle.rasch( dat,b=mod$item$b )$theta
b <- mod$item$b # item difficulty
# evaluate person fit
pf1 <- sirt::personfit.stat( dat=dat, abil=wle1, b=b)
\dontrun{
# dimensional analysis of person fit statistics
x0 <- stats::na.omit(pf1[, -c(1:3) ] )
stats::factanal( x=x0, factors=2, rotation="promax" )
## Loadings:
## Factor1 Factor2
## caution 0.914
## depend 0.293 0.750
## ECI1 0.869 0.160
## ECI2 0.869 0.162
## ECI3 1.011
## ECI4 1.159 -0.269
## ECI5 1.012
## ECI6 0.879 0.130
## l0 0.409 -1.255
## lz -0.504 -0.529
## outfit 0.297 0.702
## infit 0.362 0.695
## rpbis -1.014
## rpbis.itemdiff 1.032
## U3 0.735 0.309
##
## Factor Correlations:
## Factor1 Factor2
## Factor1 1.000 -0.727
## Factor2 -0.727 1.000
##
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{Person fit}
%%\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
# sign up:
# https://coinmarketcap.com/api/
# rate/call limits:
# https://pro.coinmarketcap.com/api/features
# sign in:
# https://pro.coinmarketcap.com/account/
# documentation
# https://coinmarketcap.com/api/documentation/v1/
# *******************************************************************************************************
require("pbapply");require("data.table");require("httr");require("rvest");require("dplyr")
require("lubridate");require("jsonlite")
PASS <- new.env()
assign("apikey","insert_api_key_here",envir = PASS)
# HELPER FUNCTION - Converts timestamp to local timestamp
# format TimeZone
fixTZ = function(timeStamp){
tmDIFF = round(as.numeric(difftime(Sys.time(),
lubridate::force_tz(with_tz(Sys.time(),tz="UTC")),
units = "hours")),0)
as.POSIXct(timeStamp + hours(tmDIFF), tz= Sys.timezone())
}
# *******************************************************************************************************
# https://coinmarketcap.com/api/documentation/v1/#operation/getV1FiatMap
getFiat = function()
{
# url
url = paste0("https://pro-api.coinmarketcap.com/v1/fiat/map")
# GET request
pg <- httr::GET(url,httr::add_headers(`Accepts` = 'application/json',
`X-CMC_PRO_API_KEY` = PASS$apikey))
# read in content
dt<- fromJSON(rawToChar(pg$content))
fiat <- dt[["data"]] %>% as.data.frame()
fiat
}
tmp <- getFiat()
# *********************************************************************************
# Turn it into a function - gets latest listings
# Pass in max number of listings (limit) & fiat currency to use (ex.USD)
getLatestListings = function(limit,fiat)
{
# build URL
url = paste0("https://pro-api.coinmarketcap.com/v1/cryptocurrency/listings/latest",
"?start=1&limit=",limit,"&convert=",fiat)
# GET request
pg <- httr::GET(url,httr::add_headers(`Accepts` = 'application/json',
`X-CMC_PRO_API_KEY` = PASS$apikey))
# read in content
dt<- fromJSON(rawToChar(pg$content))
# convert to data frame
bse <- cbind(dt$data$id,
dt$data$name,
dt$data$symbol,
dt$data$slug,
dt$data$date_added,
dt$data$max_supply,
dt$data$circulating_supply,
dt$data$total_supply) %>% as.data.frame
# format column names
colnames(bse) = c("ID","Name","Symbol","Slug","DateAdded","MaxSupply","CirculatingSupply","TotalSupply")
# format DateAdded
bse$DateAdded <- as.Date(bse$DateAdded)
# quote
qte <- dt[["data"]][["quote"]] %>% as.data.frame
qte[[1]]$price <- qte[[1]]$price %>% round(digits = 4)
qte[[1]]$last_updated <- as.POSIXct(qte[[1]]$last_updated, format="%Y-%m-%dT%H:%M:%S.000Z")
qte[[1]]$percent_change_1h <- round(qte[[1]]$percent_change_1h/100,4)
qte[[1]]$percent_change_24h <- round(qte[[1]]$percent_change_24h/100,4)
qte[[1]]$percent_change_7d <- round(qte[[1]]$percent_change_7d/100,4)
qte[[1]]$percent_change_30d <- round(qte[[1]]$percent_change_30d/100,4)
qte[[1]]$percent_change_60d <- round(qte[[1]]$percent_change_60d/100,4)
qte[[1]]$percent_change_90d <- round(qte[[1]]$percent_change_90d/100,4)
qte[[1]]$market_cap_dominance<-round(qte[[1]]$market_cap_dominance/100,4)
# cbind data & quotes
df <- cbind(bse,qte)
# return table
df
}
# TEST Function
tmp = getLatestListings(limit = 5000, fiat = "USD")
# *******************************************************************************************************
# https://coinmarketcap.com/api/documentation/v1/#operation/getV1CryptocurrencyQuotesLatest
getLatestQuote= function(symbol, fiat)
{
# build URL
url = paste0("https://pro-api.coinmarketcap.com/v1/cryptocurrency/quotes/latest",
"?convert=",fiat,"&symbol=",symbol)
# GET request
pg <- httr::GET(url,httr::add_headers(`Accepts` = 'application/json',
`X-CMC_PRO_API_KEY` = PASS$apikey))
# read in content
dt<- fromJSON(rawToChar(pg$content))
# extract quote
qte <- rbindlist(dt$data[[1]]$quote) %>% as.data.frame()
# format column types
qte$price <- round(qte$price, 5)
qte$percent_change_1h <- round(qte$percent_change_1h/100,5)
qte$percent_change_24h <- round(qte$percent_change_24h/100,5)
qte$percent_change_7d <- round(qte$percent_change_7d/100,5)
qte$percent_change_30d <- round(qte$percent_change_30d/100,5)
qte$percent_change_60d <- round(qte$percent_change_60d/100,5)
qte$percent_change_90d <- round(qte$percent_change_90d/100,5)
qte$market_cap_dominance<-round(qte$market_cap_dominance/100,5)
qte$last_updated <- fixTZ(as.POSIXct(qte$last_updated, format="%Y-%m-%dT%H:%M:%S.000Z"))
# add Meta
meta <- as.data.frame(cbind(dt$data[[1]]$id,
dt$data[[1]]$name,
dt$data[[1]]$symbol,
dt$data[[1]]$slug,
dt$data[[1]]$num_market_pairs,
dt$data[[1]]$date_added,
ifelse(is.null(dt$data[[1]]$max_supply), NA,dt$data[[1]]$max_supply),
dt$data[[1]]$circulating_supply,
dt$data[[1]]$total_supply,
dt$data[[1]]$is_active
))
colnames(meta) <- c("id","name","symbol","slug","num_market_pairs",
"date_added","max_supply","circulating_supply",
"total_supply","is_active")
meta$date_added <- fixTZ(as.POSIXct(meta$date_added, format="%Y-%m-%dT%H:%M:%S.000Z"))
# combine meta & qte data
all <- cbind(meta,qte)
# return data
all
}
# TEST Function
tmp1 = getLatestQuote(symbol = "BTC", fiat = "USD")
tmp2 = getLatestQuote(symbol = "BTC", fiat = "CAD")
# call multiple quotes:
symbols = c("BTC","ETH","DOGE","ADA","XTZ","USDC")
qte <- pblapply(as.list(symbols), function(x){
tmp <- try(getLatestQuote(symbol=x, fiat="USD"))
if(!inherits(tmp, 'try-error'))
tmp
})
# row bind data
qte <- rbindlist(qte,use.names = TRUE,fill = TRUE)
# *******************************************************************************************************
# https://coinmarketcap.com/api/documentation/v1/#operation/getV1GlobalmetricsQuotesLatest
getLatestMetrics = function()
{
# build url
url = paste0("https://pro-api.coinmarketcap.com/v1/global-metrics/quotes/latest")
# GET request
pg <- httr::GET(url,httr::add_headers(`Accepts` = 'application/json',
`X-CMC_PRO_API_KEY` = PASS$apikey))
# read in content
dt<- fromJSON(rawToChar(pg$content))
# meta
meta <- as.data.frame(cbind(dt$data[1:22]))
# quote data
qte <- rbindlist(dt[["data"]][["quote"]]) %>% t %>% as.data.frame()
removed <- c("defi_volume_24h","defi_volume_24h_reported","defi_24h_percentage_change",
"defi_market_cap","stablecoin_volume_24h","stablecoin_volume_24h_reported",
"stablecoin_24h_percentage_change","stablecoin_market_cap","derivatives_volume_24h",
"derivatives_volume_24h_reported","derivatives_24h_percentage_change")
qte <- as.data.frame(qte)
qte <- as.data.frame(qte[!(rownames(qte) %in% removed),],
row.names= rownames(qte)[!(rownames(qte) %in% removed)])
qte["last_updated",] <- fixTZ(as.POSIXct(qte["last_updated",],format="%Y-%m-%dT%H:%M:%S.%OSZ")) %>%
as.character()
colnames(qte) <- "V1"
ALL <- rbind(meta,qte)
colnames(ALL) <- "Value"
# return df
ALL
}
tmp <- getLatestMetrics()
# *******************************************************************************************************
# https://coinmarketcap.com/api/documentation/v1/#operation/getV1ToolsPriceconversion
cryptoConversionRate = function(amount, fromSymbol, toSymbol)
{
# url
url = paste0("https://pro-api.coinmarketcap.com/v1/tools/price-conversion",
"?amount=",amount,"&symbol=",fromSymbol,"&convert=",toSymbol)
# GET request
pg <- httr::GET(url,httr::add_headers(`Accepts` = 'application/json',
`X-CMC_PRO_API_KEY` = PASS$apikey))
# read in content
dt<- fromJSON(rawToChar(pg$content))
fromCrypto <- as.data.frame(cbind(dt$data$id,
dt$data$symbol,
dt$data$name,
dt$data$amount,
dt$data$last_updated))
colnames(fromCrypto) <- c("id","fromSymbol","fromName","amount","last_updated")
fromCrypto$last_updated <- fixTZ(as.POSIXct(fromCrypto$last_updated,
format="%Y-%m-%dT%H:%M:%S.%OSZ")) %>% as.character()
# in case multiple currency conversions
nCurr<- length(dt[["data"]][["quote"]])
tmp <- lapply(as.list(1:nCurr), function(ii){
df <- rbind(dt$data$quote[[ii]]) %>% as.data.frame()
toCurrName <- names(dt$data$quote)[[ii]]
df$toSymbol <- toCurrName
df <- as.data.frame(df[,c("toSymbol","price")])
colnames(df)[2] <- "amount"
df
})
tmp <- do.call(cbind,tmp)
# return conversion(s)
cbind(fromCrypto,tmp)
}
tmp <- cryptoConversionRate(amount=1, fromSymbol = "BTC", toSymbol = "USD")
tmp <- cryptoConversionRate(amount=100, fromSymbol = "USD", toSymbol = "ETH")
tmp <- cryptoConversionRate(amount=1, fromSymbol = "ETH", toSymbol = "ADA")
# *******************************************************************************************************
# https://coinmarketcap.com/api/documentation/v1/#operation/getV1KeyInfo
planInfo = function()
{
# url
url = paste0("https://pro-api.coinmarketcap.com/v1/key/info")
# GET request
pg <- httr::GET(url,httr::add_headers(`Accepts` = 'application/json',
`X-CMC_PRO_API_KEY` = PASS$apikey))
# read in content
dt<- fromJSON(rawToChar(pg$content))
planInfo <- do.call(rbind,dt[["data"]][["plan"]]) %>% as.data.frame()
colnames(planInfo) <- "value"
usage <- do.call(rbind,dt[["data"]][["usage"]]) %>% as.data.frame()
list(planInfo,usage)
}
tmp <- planInfo()
| /coinmarketcapAPI.R | no_license | julia-tache/coinmarketcap | R | false | false | 10,187 | r | # sign up:
# https://coinmarketcap.com/api/
# rate/call limits:
# https://pro.coinmarketcap.com/api/features
# sign in:
# https://pro.coinmarketcap.com/account/
# documentation
# https://coinmarketcap.com/api/documentation/v1/
# *******************************************************************************************************
require("pbapply");require("data.table");require("httr");require("rvest");require("dplyr")
require("lubridate");require("jsonlite")
PASS <- new.env()
assign("apikey","insert_api_key_here",envir = PASS)
# HELPER FUNCTION - Converts timestamp to local timestamp
# format TimeZone
fixTZ = function(timeStamp){
tmDIFF = round(as.numeric(difftime(Sys.time(),
lubridate::force_tz(with_tz(Sys.time(),tz="UTC")),
units = "hours")),0)
as.POSIXct(timeStamp + hours(tmDIFF), tz= Sys.timezone())
}
# *******************************************************************************************************
# https://coinmarketcap.com/api/documentation/v1/#operation/getV1FiatMap
getFiat = function()
{
# url
url = paste0("https://pro-api.coinmarketcap.com/v1/fiat/map")
# GET request
pg <- httr::GET(url,httr::add_headers(`Accepts` = 'application/json',
`X-CMC_PRO_API_KEY` = PASS$apikey))
# read in content
dt<- fromJSON(rawToChar(pg$content))
fiat <- dt[["data"]] %>% as.data.frame()
fiat
}
tmp <- getFiat()
# *********************************************************************************
# Turn it into a function - gets latest listings
# Pass in max number of listings (limit) & fiat currency to use (ex.USD)
getLatestListings = function(limit,fiat)
{
# build URL
url = paste0("https://pro-api.coinmarketcap.com/v1/cryptocurrency/listings/latest",
"?start=1&limit=",limit,"&convert=",fiat)
# GET request
pg <- httr::GET(url,httr::add_headers(`Accepts` = 'application/json',
`X-CMC_PRO_API_KEY` = PASS$apikey))
# read in content
dt<- fromJSON(rawToChar(pg$content))
# convert to data frame
bse <- cbind(dt$data$id,
dt$data$name,
dt$data$symbol,
dt$data$slug,
dt$data$date_added,
dt$data$max_supply,
dt$data$circulating_supply,
dt$data$total_supply) %>% as.data.frame
# format column names
colnames(bse) = c("ID","Name","Symbol","Slug","DateAdded","MaxSupply","CirculatingSupply","TotalSupply")
# format DateAdded
bse$DateAdded <- as.Date(bse$DateAdded)
# quote
qte <- dt[["data"]][["quote"]] %>% as.data.frame
qte[[1]]$price <- qte[[1]]$price %>% round(digits = 4)
qte[[1]]$last_updated <- as.POSIXct(qte[[1]]$last_updated, format="%Y-%m-%dT%H:%M:%S.000Z")
qte[[1]]$percent_change_1h <- round(qte[[1]]$percent_change_1h/100,4)
qte[[1]]$percent_change_24h <- round(qte[[1]]$percent_change_24h/100,4)
qte[[1]]$percent_change_7d <- round(qte[[1]]$percent_change_7d/100,4)
qte[[1]]$percent_change_30d <- round(qte[[1]]$percent_change_30d/100,4)
qte[[1]]$percent_change_60d <- round(qte[[1]]$percent_change_60d/100,4)
qte[[1]]$percent_change_90d <- round(qte[[1]]$percent_change_90d/100,4)
qte[[1]]$market_cap_dominance<-round(qte[[1]]$market_cap_dominance/100,4)
# cbind data & quotes
df <- cbind(bse,qte)
# return table
df
}
# TEST Function
tmp = getLatestListings(limit = 5000, fiat = "USD")
# *******************************************************************************************************
# https://coinmarketcap.com/api/documentation/v1/#operation/getV1CryptocurrencyQuotesLatest
getLatestQuote= function(symbol, fiat)
{
# build URL
url = paste0("https://pro-api.coinmarketcap.com/v1/cryptocurrency/quotes/latest",
"?convert=",fiat,"&symbol=",symbol)
# GET request
pg <- httr::GET(url,httr::add_headers(`Accepts` = 'application/json',
`X-CMC_PRO_API_KEY` = PASS$apikey))
# read in content
dt<- fromJSON(rawToChar(pg$content))
# extract quote
qte <- rbindlist(dt$data[[1]]$quote) %>% as.data.frame()
# format column types
qte$price <- round(qte$price, 5)
qte$percent_change_1h <- round(qte$percent_change_1h/100,5)
qte$percent_change_24h <- round(qte$percent_change_24h/100,5)
qte$percent_change_7d <- round(qte$percent_change_7d/100,5)
qte$percent_change_30d <- round(qte$percent_change_30d/100,5)
qte$percent_change_60d <- round(qte$percent_change_60d/100,5)
qte$percent_change_90d <- round(qte$percent_change_90d/100,5)
qte$market_cap_dominance<-round(qte$market_cap_dominance/100,5)
qte$last_updated <- fixTZ(as.POSIXct(qte$last_updated, format="%Y-%m-%dT%H:%M:%S.000Z"))
# add Meta
meta <- as.data.frame(cbind(dt$data[[1]]$id,
dt$data[[1]]$name,
dt$data[[1]]$symbol,
dt$data[[1]]$slug,
dt$data[[1]]$num_market_pairs,
dt$data[[1]]$date_added,
ifelse(is.null(dt$data[[1]]$max_supply), NA,dt$data[[1]]$max_supply),
dt$data[[1]]$circulating_supply,
dt$data[[1]]$total_supply,
dt$data[[1]]$is_active
))
colnames(meta) <- c("id","name","symbol","slug","num_market_pairs",
"date_added","max_supply","circulating_supply",
"total_supply","is_active")
meta$date_added <- fixTZ(as.POSIXct(meta$date_added, format="%Y-%m-%dT%H:%M:%S.000Z"))
# combine meta & qte data
all <- cbind(meta,qte)
# return data
all
}
# TEST Function
tmp1 = getLatestQuote(symbol = "BTC", fiat = "USD")
tmp2 = getLatestQuote(symbol = "BTC", fiat = "CAD")
# call multiple quotes:
symbols = c("BTC","ETH","DOGE","ADA","XTZ","USDC")
qte <- pblapply(as.list(symbols), function(x){
tmp <- try(getLatestQuote(symbol=x, fiat="USD"))
if(!inherits(tmp, 'try-error'))
tmp
})
# row bind data
qte <- rbindlist(qte,use.names = TRUE,fill = TRUE)
# *******************************************************************************************************
# https://coinmarketcap.com/api/documentation/v1/#operation/getV1GlobalmetricsQuotesLatest
getLatestMetrics = function()
{
# build url
url = paste0("https://pro-api.coinmarketcap.com/v1/global-metrics/quotes/latest")
# GET request
pg <- httr::GET(url,httr::add_headers(`Accepts` = 'application/json',
`X-CMC_PRO_API_KEY` = PASS$apikey))
# read in content
dt<- fromJSON(rawToChar(pg$content))
# meta
meta <- as.data.frame(cbind(dt$data[1:22]))
# quote data
qte <- rbindlist(dt[["data"]][["quote"]]) %>% t %>% as.data.frame()
removed <- c("defi_volume_24h","defi_volume_24h_reported","defi_24h_percentage_change",
"defi_market_cap","stablecoin_volume_24h","stablecoin_volume_24h_reported",
"stablecoin_24h_percentage_change","stablecoin_market_cap","derivatives_volume_24h",
"derivatives_volume_24h_reported","derivatives_24h_percentage_change")
qte <- as.data.frame(qte)
qte <- as.data.frame(qte[!(rownames(qte) %in% removed),],
row.names= rownames(qte)[!(rownames(qte) %in% removed)])
qte["last_updated",] <- fixTZ(as.POSIXct(qte["last_updated",],format="%Y-%m-%dT%H:%M:%S.%OSZ")) %>%
as.character()
colnames(qte) <- "V1"
ALL <- rbind(meta,qte)
colnames(ALL) <- "Value"
# return df
ALL
}
tmp <- getLatestMetrics()
# *******************************************************************************************************
# https://coinmarketcap.com/api/documentation/v1/#operation/getV1ToolsPriceconversion
cryptoConversionRate = function(amount, fromSymbol, toSymbol)
{
# url
url = paste0("https://pro-api.coinmarketcap.com/v1/tools/price-conversion",
"?amount=",amount,"&symbol=",fromSymbol,"&convert=",toSymbol)
# GET request
pg <- httr::GET(url,httr::add_headers(`Accepts` = 'application/json',
`X-CMC_PRO_API_KEY` = PASS$apikey))
# read in content
dt<- fromJSON(rawToChar(pg$content))
fromCrypto <- as.data.frame(cbind(dt$data$id,
dt$data$symbol,
dt$data$name,
dt$data$amount,
dt$data$last_updated))
colnames(fromCrypto) <- c("id","fromSymbol","fromName","amount","last_updated")
fromCrypto$last_updated <- fixTZ(as.POSIXct(fromCrypto$last_updated,
format="%Y-%m-%dT%H:%M:%S.%OSZ")) %>% as.character()
# in case multiple currency conversions
nCurr<- length(dt[["data"]][["quote"]])
tmp <- lapply(as.list(1:nCurr), function(ii){
df <- rbind(dt$data$quote[[ii]]) %>% as.data.frame()
toCurrName <- names(dt$data$quote)[[ii]]
df$toSymbol <- toCurrName
df <- as.data.frame(df[,c("toSymbol","price")])
colnames(df)[2] <- "amount"
df
})
tmp <- do.call(cbind,tmp)
# return conversion(s)
cbind(fromCrypto,tmp)
}
tmp <- cryptoConversionRate(amount=1, fromSymbol = "BTC", toSymbol = "USD")
tmp <- cryptoConversionRate(amount=100, fromSymbol = "USD", toSymbol = "ETH")
tmp <- cryptoConversionRate(amount=1, fromSymbol = "ETH", toSymbol = "ADA")
# *******************************************************************************************************
# https://coinmarketcap.com/api/documentation/v1/#operation/getV1KeyInfo
planInfo = function()
{
# url
url = paste0("https://pro-api.coinmarketcap.com/v1/key/info")
# GET request
pg <- httr::GET(url,httr::add_headers(`Accepts` = 'application/json',
`X-CMC_PRO_API_KEY` = PASS$apikey))
# read in content
dt<- fromJSON(rawToChar(pg$content))
planInfo <- do.call(rbind,dt[["data"]][["plan"]]) %>% as.data.frame()
colnames(planInfo) <- "value"
usage <- do.call(rbind,dt[["data"]][["usage"]]) %>% as.data.frame()
list(planInfo,usage)
}
tmp <- planInfo()
|
# ROH GWAS. This script runs a modified GWAS which tests an
# effect of ROH status on annual survival at every SNP, controlling
# for a range of other variables.
# Needs to be run on a cluster, every model takes appr. 40 sec to run,
# 417K models overall.
library(lme4)
library(tidyverse)
library(broom.mixed)
library(snpStats)
library(data.table)
library(furrr)
# for running on server, this allows to run an array job for splitting
# the GWAS up in parts
part_inp <- commandArgs(trailingOnly=TRUE)
if (!(length(part_inp) == 0)) {
part <- as.numeric(part_inp[[1]])
} else {
# if no part selected, take first 1000
part <- 415
}
# fitness and pedigree data
load("data/survival_mods_data.RData")
load("data/sheep_ped.RData")
# GRM PCs from plink
pcs <- read_delim("data/ann_surv_pca.txt", " ", col_names = TRUE) %>%
mutate(id = as.character(id))
# roh data
file_path <- "data/roh.hom"
roh_lengths <- fread(file_path)
# plink name
sheep_plink_name <- "data/sheep_geno_imputed_oar_filt"
# read merged plink data
sheep_bed <- paste0(sheep_plink_name, ".bed")
sheep_bim <- paste0(sheep_plink_name, ".bim")
sheep_fam <- paste0(sheep_plink_name, ".fam")
full_sample <- read.plink(sheep_bed, sheep_bim, sheep_fam)
# make list with all parts
all_snps <- 1:nrow(full_sample$map)
all_parts <- split(all_snps, ceiling(seq_along(all_snps )/1000)) # every part runs 500 models
snp_indices <- all_parts[[part]]
# filter map data
snps_map_sub <- as_tibble(full_sample$map[snp_indices, ])
# additive genotypes
geno_sub <- as_tibble(as(full_sample$genotypes[, snps_map_sub$snp.name], Class = "numeric"),
rownames = "id")
# survival data
# survival data preprocessing
annual_survival <- fitness_data %>%
# filter na rows
filter_at(vars(survival, froh_all, birth_year, sheep_year), ~ !is.na(.)) %>%
mutate(age_cent = age - mean(age, na.rm = TRUE),
age_cent2 = age_cent^2,
age_std = as.numeric(scale(age)),
age_std2 = age_std^2,
# times 10 to estimate a 10% percent increase
froh_all10 = froh_all * 10,
froh_all10_cent = froh_all10 - mean(froh_all10, na.rm = TRUE),
lamb = ifelse(age == 0, 1, 0),
lamb_cent = lamb - mean(lamb, na.rm = TRUE),
lamb = as.factor(lamb)) %>%
as.data.frame()
#roh_lengths <- as.data.table(roh_lengths)
# check whether snp is in ROH for a given individual
setkey(roh_lengths, IID)
roh_id_per_snp <- function(i) {
position <- as.numeric(snps_map_sub[i, "position"])
chromosome <- as.numeric(snps_map_sub[i, "chromosome"])
# varname <- paste0("roh", i)
#roh <- as.numeric((roh_lengths$POS1 <= position) & (roh_lengths$POS2 >= position) & (roh_lengths$CHR == chromosome))
#roh_lengths$roh <- roh
roh_lengths[, roh := as.numeric((CHR == chromosome) & (POS1 <= position) & (POS2 >= position))]
#roh_lengths[, roh := fifelse((POS1 <= position)&(POS2 >= position)&(CHR == chromosome), 1, 0)]
roh_id <- roh_lengths[, .(roh = max(roh)), by = c("IID")]$roh
}
roh_ind <- map(1:nrow(snps_map_sub), roh_id_per_snp)
roh_df <- as.data.frame(do.call(cbind, roh_ind))
names(roh_df) <- paste0("roh_", snps_map_sub$snp.name)
roh_df$id <- as.character(unique(roh_lengths$IID))
# make some space
rm(full_sample)
# which chromosomes do the snps span?
chrs <- unique(snps_map_sub$chromosome)
froh_no_chr <- paste0("froh_no_chr", chrs)
# join additive and roh data to survival for gwas
annual_survival_gwas <- annual_survival %>%
#mutate_at(vars(starts_with("froh_no_chr")), scale) %>%
dplyr::select(id, survival, sex, twin, lamb, birth_year, sheep_year, mum_id, age_std, age_std2, {{ froh_no_chr }}) %>%
left_join(pcs, by = "id") %>%
left_join(geno_sub, by = "id") %>%
left_join(roh_df, by = "id") %>%
as_tibble()
snp_names <- snps_map_sub$snp.name
#snp_names <- top_snps %>% group_by(chromosome) %>% top_n(-2, p.value) %>% .$snp.name
for (i in snp_names) {
# dummy coding
annual_survival_gwas[[paste0("roh_0_", i)]] <- as.numeric((annual_survival_gwas[[i]] == 0) & (annual_survival_gwas[[paste0("roh_", i)]] == 1))
annual_survival_gwas[[paste0("roh_2_", i)]] <- as.numeric((annual_survival_gwas[[i]] == 2) & (annual_survival_gwas[[paste0("roh_", i)]] == 1))
annual_survival_gwas[[paste0("roh_", i)]] <- NULL
}
# time saver function for modeling
nlopt <- function(par, fn, lower, upper, control) {
.nloptr <<- res <- nloptr(par, fn, lb = lower, ub = upper,
opts = list(algorithm = "NLOPT_LN_BOBYQA", print_level = 1,
maxeval = 1000, xtol_abs = 1e-6, ftol_abs = 1e-6))
list(par = res$solution,
fval = res$objective,
conv = if (res$status > 0) 0 else res$status,
message = res$message
)
}
# focal SNP, chromosome of focal snp, data
run_gwas <- function(snp, data) {
# for mean froh without focal chr
chr <- as.numeric(snps_map_sub[snps_map_sub$snp.name == snp, "chromosome"])
froh_no_chr <- paste0("froh_no_chr", chr)
formula_snp <- as.formula(paste0("survival ~ 1 + sex + twin + age_std + age_std2 + ",
froh_no_chr, " + ",
"pc1 + pc2 + pc3 + pc4 + pc5 + pc6 + pc7 + ",
#"pc1 + pc2 + pc3 + pc4 +",
snp, "+ ", paste0("roh_0_", snp), "+", paste0("roh_2_", snp), "+ (1|birth_year) + (1|sheep_year) + (1|id)"))
#snp, "+ ", paste0("roh_", snp), " + (1|sheep_year) + (1|id)"))
mod <- glmer(formula = formula_snp,
data = data, family = "binomial",
control = glmerControl(optimizer = "nloptwrap", calc.derivs = FALSE))
out <- broom.mixed::tidy(mod)
out
}
safe_run_gwas <- purrr::safely(run_gwas)
#
snps_sub <- snps_map_sub$snp.name
# split into pieces of 50 SNPs
num_parts <- round(length(seq_along(snps_sub )) / 50)
snps_pieces <- split(snps_sub, cut(seq_along(snps_sub), num_parts, labels = FALSE))
roh_pieces_0 <- map(snps_pieces, function(x) paste0("roh_0_", x)) #### to change here
roh_pieces_2 <- map(snps_pieces, function(x) paste0("roh_2_", x))
annual_survival_gwas_pieces <-
pmap(list(snps_pieces, roh_pieces_0, roh_pieces_2), function(snps_piece, roh_piece_0, roh_piece_2) {
annual_survival_gwas %>% dplyr::select(id:pc7, one_of(c(snps_piece, roh_piece_0, roh_piece_2 )))
})
# clean up
rm(annual_survival, annual_survival_gwas, fitness_data, geno_sub,
roh_lengths, roh_pieces_0, roh_pieces_2, sheep_ped, roh_df)
# set up plan
plan(multiprocess, workers = 6)
# increase maxSize
options(future.globals.maxSize = 3000 * 1024^2)
all_out <- future_map2(snps_pieces, annual_survival_gwas_pieces, function(snps, data) {
out <- purrr::map(snps, safe_run_gwas, data)
})
all_out_simple <- purrr::flatten(all_out)
saveRDS(all_out_simple, file = paste0("output/GWAS_roh_sep_part", "_", part, ".rds"))
### testing
# all_out <- future_map2(snps_pieces[1], annual_survival_gwas_pieces[1], function(snps, data) {
# out <- purrr::map(snps[1:3], safe_run_gwas, data)
# })
| /6_alt_gwas_annual_survival_bothA_sep.R | no_license | mastoffel/sheep_ID | R | false | false | 7,431 | r | # ROH GWAS. This script runs a modified GWAS which tests an
# effect of ROH status on annual survival at every SNP, controlling
# for a range of other variables.
# Needs to be run on a cluster, every model takes appr. 40 sec to run,
# 417K models overall.
library(lme4)
library(tidyverse)
library(broom.mixed)
library(snpStats)
library(data.table)
library(furrr)
# for running on server, this allows to run an array job for splitting
# the GWAS up in parts
part_inp <- commandArgs(trailingOnly=TRUE)
if (!(length(part_inp) == 0)) {
part <- as.numeric(part_inp[[1]])
} else {
# if no part selected, take first 1000
part <- 415
}
# fitness and pedigree data
load("data/survival_mods_data.RData")
load("data/sheep_ped.RData")
# GRM PCs from plink
pcs <- read_delim("data/ann_surv_pca.txt", " ", col_names = TRUE) %>%
mutate(id = as.character(id))
# roh data
file_path <- "data/roh.hom"
roh_lengths <- fread(file_path)
# plink name
sheep_plink_name <- "data/sheep_geno_imputed_oar_filt"
# read merged plink data
sheep_bed <- paste0(sheep_plink_name, ".bed")
sheep_bim <- paste0(sheep_plink_name, ".bim")
sheep_fam <- paste0(sheep_plink_name, ".fam")
full_sample <- read.plink(sheep_bed, sheep_bim, sheep_fam)
# make list with all parts
all_snps <- 1:nrow(full_sample$map)
all_parts <- split(all_snps, ceiling(seq_along(all_snps )/1000)) # every part runs 500 models
snp_indices <- all_parts[[part]]
# filter map data
snps_map_sub <- as_tibble(full_sample$map[snp_indices, ])
# additive genotypes
geno_sub <- as_tibble(as(full_sample$genotypes[, snps_map_sub$snp.name], Class = "numeric"),
rownames = "id")
# survival data
# survival data preprocessing
annual_survival <- fitness_data %>%
# filter na rows
filter_at(vars(survival, froh_all, birth_year, sheep_year), ~ !is.na(.)) %>%
mutate(age_cent = age - mean(age, na.rm = TRUE),
age_cent2 = age_cent^2,
age_std = as.numeric(scale(age)),
age_std2 = age_std^2,
# times 10 to estimate a 10% percent increase
froh_all10 = froh_all * 10,
froh_all10_cent = froh_all10 - mean(froh_all10, na.rm = TRUE),
lamb = ifelse(age == 0, 1, 0),
lamb_cent = lamb - mean(lamb, na.rm = TRUE),
lamb = as.factor(lamb)) %>%
as.data.frame()
#roh_lengths <- as.data.table(roh_lengths)
# check whether snp is in ROH for a given individual
setkey(roh_lengths, IID)
roh_id_per_snp <- function(i) {
position <- as.numeric(snps_map_sub[i, "position"])
chromosome <- as.numeric(snps_map_sub[i, "chromosome"])
# varname <- paste0("roh", i)
#roh <- as.numeric((roh_lengths$POS1 <= position) & (roh_lengths$POS2 >= position) & (roh_lengths$CHR == chromosome))
#roh_lengths$roh <- roh
roh_lengths[, roh := as.numeric((CHR == chromosome) & (POS1 <= position) & (POS2 >= position))]
#roh_lengths[, roh := fifelse((POS1 <= position)&(POS2 >= position)&(CHR == chromosome), 1, 0)]
roh_id <- roh_lengths[, .(roh = max(roh)), by = c("IID")]$roh
}
roh_ind <- map(1:nrow(snps_map_sub), roh_id_per_snp)
roh_df <- as.data.frame(do.call(cbind, roh_ind))
names(roh_df) <- paste0("roh_", snps_map_sub$snp.name)
roh_df$id <- as.character(unique(roh_lengths$IID))
# make some space
rm(full_sample)
# which chromosomes do the snps span?
chrs <- unique(snps_map_sub$chromosome)
froh_no_chr <- paste0("froh_no_chr", chrs)
# join additive and roh data to survival for gwas
annual_survival_gwas <- annual_survival %>%
#mutate_at(vars(starts_with("froh_no_chr")), scale) %>%
dplyr::select(id, survival, sex, twin, lamb, birth_year, sheep_year, mum_id, age_std, age_std2, {{ froh_no_chr }}) %>%
left_join(pcs, by = "id") %>%
left_join(geno_sub, by = "id") %>%
left_join(roh_df, by = "id") %>%
as_tibble()
snp_names <- snps_map_sub$snp.name
#snp_names <- top_snps %>% group_by(chromosome) %>% top_n(-2, p.value) %>% .$snp.name
for (i in snp_names) {
# dummy coding
annual_survival_gwas[[paste0("roh_0_", i)]] <- as.numeric((annual_survival_gwas[[i]] == 0) & (annual_survival_gwas[[paste0("roh_", i)]] == 1))
annual_survival_gwas[[paste0("roh_2_", i)]] <- as.numeric((annual_survival_gwas[[i]] == 2) & (annual_survival_gwas[[paste0("roh_", i)]] == 1))
annual_survival_gwas[[paste0("roh_", i)]] <- NULL
}
# time saver function for modeling
nlopt <- function(par, fn, lower, upper, control) {
.nloptr <<- res <- nloptr(par, fn, lb = lower, ub = upper,
opts = list(algorithm = "NLOPT_LN_BOBYQA", print_level = 1,
maxeval = 1000, xtol_abs = 1e-6, ftol_abs = 1e-6))
list(par = res$solution,
fval = res$objective,
conv = if (res$status > 0) 0 else res$status,
message = res$message
)
}
# focal SNP, chromosome of focal snp, data
run_gwas <- function(snp, data) {
# for mean froh without focal chr
chr <- as.numeric(snps_map_sub[snps_map_sub$snp.name == snp, "chromosome"])
froh_no_chr <- paste0("froh_no_chr", chr)
formula_snp <- as.formula(paste0("survival ~ 1 + sex + twin + age_std + age_std2 + ",
froh_no_chr, " + ",
"pc1 + pc2 + pc3 + pc4 + pc5 + pc6 + pc7 + ",
#"pc1 + pc2 + pc3 + pc4 +",
snp, "+ ", paste0("roh_0_", snp), "+", paste0("roh_2_", snp), "+ (1|birth_year) + (1|sheep_year) + (1|id)"))
#snp, "+ ", paste0("roh_", snp), " + (1|sheep_year) + (1|id)"))
mod <- glmer(formula = formula_snp,
data = data, family = "binomial",
control = glmerControl(optimizer = "nloptwrap", calc.derivs = FALSE))
out <- broom.mixed::tidy(mod)
out
}
safe_run_gwas <- purrr::safely(run_gwas)
#
snps_sub <- snps_map_sub$snp.name
# split into pieces of 50 SNPs
num_parts <- round(length(seq_along(snps_sub )) / 50)
snps_pieces <- split(snps_sub, cut(seq_along(snps_sub), num_parts, labels = FALSE))
roh_pieces_0 <- map(snps_pieces, function(x) paste0("roh_0_", x)) #### to change here
roh_pieces_2 <- map(snps_pieces, function(x) paste0("roh_2_", x))
annual_survival_gwas_pieces <-
pmap(list(snps_pieces, roh_pieces_0, roh_pieces_2), function(snps_piece, roh_piece_0, roh_piece_2) {
annual_survival_gwas %>% dplyr::select(id:pc7, one_of(c(snps_piece, roh_piece_0, roh_piece_2 )))
})
# clean up
rm(annual_survival, annual_survival_gwas, fitness_data, geno_sub,
roh_lengths, roh_pieces_0, roh_pieces_2, sheep_ped, roh_df)
# set up plan
plan(multiprocess, workers = 6)
# increase maxSize
options(future.globals.maxSize = 3000 * 1024^2)
all_out <- future_map2(snps_pieces, annual_survival_gwas_pieces, function(snps, data) {
out <- purrr::map(snps, safe_run_gwas, data)
})
all_out_simple <- purrr::flatten(all_out)
saveRDS(all_out_simple, file = paste0("output/GWAS_roh_sep_part", "_", part, ".rds"))
### testing
# all_out <- future_map2(snps_pieces[1], annual_survival_gwas_pieces[1], function(snps, data) {
# out <- purrr::map(snps[1:3], safe_run_gwas, data)
# })
|
# Import libraries
#install.packages("tidyverse")
library(tidyverse)
# Import data from CSV
charGifts <- read.csv("./Gantt_CharGift_Char_Data.csv")
head(charGifts)
stdGifts <- read.csv("./Gantt_CharGift_Std_Data.csv")
head(stdGifts)
# Change column names
cols <- c("Character", "Best Gifts", "Image")
colnames(charGifts) <- cols
head(charGifts)
cols <- c("Character", "Best Gifts", "Image")
colnames(stdGifts) <- cols
head(stdGifts)
# Save as RDS for use in Shiny
saveRDS(charGifts, file = "Folder where app.r is stored/charDetails.rds") ## UPDATE THIS
saveRDS(stdGifts, file = "Folder where app.r is stored/charDetailsStd.rds") ## UPDATE THIS
| /Gantt_CharGiftTable_Script.R | no_license | mnandrews24/stardew-valley-connections | R | false | false | 674 | r | # Import libraries
#install.packages("tidyverse")
library(tidyverse)
# Import data from CSV
charGifts <- read.csv("./Gantt_CharGift_Char_Data.csv")
head(charGifts)
stdGifts <- read.csv("./Gantt_CharGift_Std_Data.csv")
head(stdGifts)
# Change column names
cols <- c("Character", "Best Gifts", "Image")
colnames(charGifts) <- cols
head(charGifts)
cols <- c("Character", "Best Gifts", "Image")
colnames(stdGifts) <- cols
head(stdGifts)
# Save as RDS for use in Shiny
saveRDS(charGifts, file = "Folder where app.r is stored/charDetails.rds") ## UPDATE THIS
saveRDS(stdGifts, file = "Folder where app.r is stored/charDetailsStd.rds") ## UPDATE THIS
|
# US市場で持っている銘柄のリスク管理プログラム
# ポートフォリオ全体でのリスクと、リターン、
# 銘柄ごとのリスクとリターン
# パフォーマンス分析
# の3つをトラックできるようにしたい
setwd("/Users/popopopo/myWork/stock_info/R_analysis")
library(fPortfolio)
library(fImport)
# Yahoo!からデータを取り込む
# 取得銘柄ティッカー
# 今持っている銘柄は
# AAPL : 5
# AMZN : 2
# CSCO : 10
# FXI : 40
# Adj. Close : Close price adjusted for dividends and splits.
#
s1 <- yahooSeries("AAPL", from="2005-01-01")
s2 <- yahooSeries("AMZN", from="2005-01-01")
s3 <- yahooSeries("CSCO", from="2005-01-01")
s4 <- yahooSeries("FXI", from="2005-01-01")
p1 <- s1[,6]
p2 <- s2[,6]
p3 <- s3[,6]
p4 <- s4[,6]
mypf <- cbind(p1,p2,p3,p4)
colnames(mypf) <- c("Apple","Amazon","Cisco","FXI")
mypf.ret <- returns(mypf)
summary(mypf.ret)
basicStats(mypf.ret)
round(cor(mypf.ret)*100,digits=4)
varRisk(mypf.ret*100,weights=c(5,2,10,40)/(5+2+10+40))
cvarRisk(mypf.ret*100,weights=c(5,2,10,40)/(5+2+10+40))
plot(mypf, plot.type="single",,col=1:4,xlab="Date", ylab="Adjusted Close Price")
hgrid()
legend("topleft", colnames(mypf), lty=c(1,1,1,1), col=1:4)
plot(mypf,xlab="Date", ylab="Adjusted Close Price", col="steelblue")
plot(mypf.ret, pch=19, cex=0.4, col="brown", xlab="Date", ylab="Adjusted Close Price", main="Progress of daily returns")
grid()
#
seriesPlot(mypf[,1])
returnPlot(mypf[,1])
par(mfrow=c(2,2))
cumulatedPlot(mypf.ret)
histPlot(mypf.ret[,1])
######################################################################
start(IDX);end(IDX)
start(EPI);start(IDX);start(TOK);start(VNM);start(VWO)
max(start(EPI),start(IDX),start(TOK),start(VNM),start(VWO))
| /R_analysis/us_portfolio_analysis.R | no_license | stubz/stock_info | R | false | false | 1,763 | r | # US市場で持っている銘柄のリスク管理プログラム
# ポートフォリオ全体でのリスクと、リターン、
# 銘柄ごとのリスクとリターン
# パフォーマンス分析
# の3つをトラックできるようにしたい
setwd("/Users/popopopo/myWork/stock_info/R_analysis")
library(fPortfolio)
library(fImport)
# Yahoo!からデータを取り込む
# 取得銘柄ティッカー
# 今持っている銘柄は
# AAPL : 5
# AMZN : 2
# CSCO : 10
# FXI : 40
# Adj. Close : Close price adjusted for dividends and splits.
#
s1 <- yahooSeries("AAPL", from="2005-01-01")
s2 <- yahooSeries("AMZN", from="2005-01-01")
s3 <- yahooSeries("CSCO", from="2005-01-01")
s4 <- yahooSeries("FXI", from="2005-01-01")
p1 <- s1[,6]
p2 <- s2[,6]
p3 <- s3[,6]
p4 <- s4[,6]
mypf <- cbind(p1,p2,p3,p4)
colnames(mypf) <- c("Apple","Amazon","Cisco","FXI")
mypf.ret <- returns(mypf)
summary(mypf.ret)
basicStats(mypf.ret)
round(cor(mypf.ret)*100,digits=4)
varRisk(mypf.ret*100,weights=c(5,2,10,40)/(5+2+10+40))
cvarRisk(mypf.ret*100,weights=c(5,2,10,40)/(5+2+10+40))
plot(mypf, plot.type="single",,col=1:4,xlab="Date", ylab="Adjusted Close Price")
hgrid()
legend("topleft", colnames(mypf), lty=c(1,1,1,1), col=1:4)
plot(mypf,xlab="Date", ylab="Adjusted Close Price", col="steelblue")
plot(mypf.ret, pch=19, cex=0.4, col="brown", xlab="Date", ylab="Adjusted Close Price", main="Progress of daily returns")
grid()
#
seriesPlot(mypf[,1])
returnPlot(mypf[,1])
par(mfrow=c(2,2))
cumulatedPlot(mypf.ret)
histPlot(mypf.ret[,1])
######################################################################
start(IDX);end(IDX)
start(EPI);start(IDX);start(TOK);start(VNM);start(VWO)
max(start(EPI),start(IDX),start(TOK),start(VNM),start(VWO))
|
\name{codev}
\alias{codev}
\title{ Calcola la codevianza }
\description{
Questa funzione calcola la codevianza come somma dei prodotti degli scarti dalla media
}
\usage{
codev(x,y)
}
\arguments{
\item{x}{ vettore numerico di dimensione \emph{n} }
\item{y}{ vettore numerico di dimensione \emph{n} }
}
\references{ Iacus, S., and Masarotto, G. (2003). \emph{ Laboratorio di Statistica con R }. Milano: McGraw-Hill }
\author{ Fabio Frascati <fabiofrascati@yahoo.it>}
\seealso{ \code{\link{popstderror}}, \code{\link{stderror}}, \code{\link{sigma}}, \code{\link{sigma2.test}}, \code{\link{sigma2m}}, \code{\link[stats:cor]{var}}, \code{\link[stats]{sd}}, \code{\link[stats:cor]{cov}}, \code{\link[stats:cor]{cov2cor}}, \code{\link{ssdev}} }
\examples{
codev(x=c(1.2,3.4,5.6,7.8,8.9),y=c(1,2.3,5.6,8.8,8.9))
}
\keyword{ univar }
| /man/codev.Rd | no_license | cran/sigma2tools | R | false | false | 877 | rd | \name{codev}
\alias{codev}
\title{ Calcola la codevianza }
\description{
Questa funzione calcola la codevianza come somma dei prodotti degli scarti dalla media
}
\usage{
codev(x,y)
}
\arguments{
\item{x}{ vettore numerico di dimensione \emph{n} }
\item{y}{ vettore numerico di dimensione \emph{n} }
}
\references{ Iacus, S., and Masarotto, G. (2003). \emph{ Laboratorio di Statistica con R }. Milano: McGraw-Hill }
\author{ Fabio Frascati <fabiofrascati@yahoo.it>}
\seealso{ \code{\link{popstderror}}, \code{\link{stderror}}, \code{\link{sigma}}, \code{\link{sigma2.test}}, \code{\link{sigma2m}}, \code{\link[stats:cor]{var}}, \code{\link[stats]{sd}}, \code{\link[stats:cor]{cov}}, \code{\link[stats:cor]{cov2cor}}, \code{\link{ssdev}} }
\examples{
codev(x=c(1.2,3.4,5.6,7.8,8.9),y=c(1,2.3,5.6,8.8,8.9))
}
\keyword{ univar }
|
## Basic R coding and usage
## 30 january 2018
## ALS
library(ggplot2)
#using the assignment operator
x <- 5 #preferred
print(x)
y = 4 #legal but not used except in function
y= y +1.1
y <- y+1
plantHeight <- 5.5
#--------------------------------------------------{end class}
# The combine function
z <- c(3,7,7,10) # simple atomic vector
print(z)
typeof(z) #get variable type (double is numbers)
str(z) # get structure of the variable
is.numeric(z) # logical test for variable type
is.character(z)
# c always "flattens" to an atomic vector
z <- c(c(3,4), c(5,6))
print(z)
# character strings with single or double quotes
z <- c("perch", "bass", "trout", "red snapper")
print(z)
#use nboth quote types fpr an internal quote
z <- c("htis is only 'one' character strong", 'a second string')
str(z)
#logical TRUE FALSE
z <- c(TRUE, TRUE, FALSE)
is.numeric(z)
is.logical(z)
#Three properties of ataomic vectors
#type of atomic vector
z <- c(1.2,2.2,3.2)
typeof(z)
is.numeric(z)
#length of vector
length(z)
#name of vector element (optional)
z <- runif(5) #random uniforn (0,1)
names(z)
#add names after variable is created
names(z) <- c("chow", "pug", "beagle", "greyhound", "akita")
print(z)
#add names when variable is built
z2 <- c(gold=3.3, silver=10, lead=2)
print(z2)
names(z2) <- NULL
print(z2)
names(z2) <- c("copper","Zinc")
print(z2)
#Special data values
#NA for missing values
z <- c(3.2, 3.3, NA)
length(z)
typeof(z[3])
# missing values can trip up basic functions
mean(z) # does not work
is.na(z) #checks for missid
!is.na(z) # ! is the NOT
mean(!is.na(z)) #WROOONG- mean of the true false values, 2/3 were true. be clear
mean(z[!is.na(z)]) #do it this way
#-------------------------------
#NaN, Inf -Inf
# bad results from nmumeric calc
z <- 0/0
print(z)
z <- 1/0
print(z)
z <- -1/0
print(z)
z <- 0/1
print(z)
z <- 1/0
typeof(z)
#------------------
#NULL is an object that is nothing
z <- NULL
typeof(z)
length(z)
is.null(z)
#Three properities of atomic vectors
#coercion
a <- c(2.1, 2.2)
typeof(a)
b <- c("purple", "green")
typeof(b)
d <- c(a,b)
print(d)
typeof(d)
#numbers have been coerced into character strings
# hierarchy of conversions
#logical -> integers-> double-> character
a <- runif(10)
print(a)
a > 0.5 #logical operation
temp <- a > 0.5 #hold thsese logical variable
sum(temp)
# what proportion of the values are >0.5
mean(a>0.5)
# qualify exam question: approx proportion of observation from a normal (0,1) random variable are >2.0
mean(rnorm(1000000)>2.0)
#------------ VEctorization
z <- c(10,20,30)
z + 1
y <- c(1,2,3)
z+y #element-by-element matching
short <- c(1,2)
z + short #what willl happen@
z^2
#creating vectors
#create an empty vector
z <- vector(mode="numeric", length=0)
print(z)
#add elements to empty vectori
z <- c(z,5) #dont do this in your code
print(z)
#insitead create a vector of predefined length
z <- rep(NA,100)
z[1] <- 3.3
head(z)
z <- rep(NA, 100)
head(z)
typeof(z)
z[c(1:20)] <- c("Washington", 2.2)
typeof(z)
head(z)
z[1:30]
#generate a long list of names
myVector <- runif(13) #get 100 random uniform
myNames <- paste("File",seq(1:length(myVector)), ".csv", sep="")
head(myNames)
names(myVector) <- myNames
head(myVector)
#using rep to repeat elements
#-----------------------------------------------------------end feb 1 2018
#---------------------------------------------- Feb 6th 2018
#using rep to repeat elements and create vectors
rep(0.5,6)
rep("mystring", 3)
#more formal way of command
rep(x=0.5, times=6)
rep(times=6 , x=0.5)
myVec <- c(1,2,3)
rep(myVec, times=2)
rep(x=myVec, each=2)
rep(x=myVec, times=myVec)
rep(x=1:3, times=3:1)
#seq funciton for creating sequences
seq(from=2,to=4)
seq(from=2,to=4,by=0.5)
seq(from=2,to=4,length=7)
x <- seq(from=2,to=4,length=7)
1:length(x)
seq_along(x) #faster, better
1:5
seq(1,5)
seq_len(10)
#why are we using seq along
x <- vector(mode="numeric", length=0)
str(x)
#num(0) is empty vector
1:length(x) #interpreted one to zero
seq_along(x)
#seq and rep will generate values, give ordered values that are known
#using random numbers
runif(1)
set.seed(100)
runif(1)
runif(n=5, min=100,max=200)
library(ggplot2) #graphics library
z <- runif(n=1000,min=30,max=300)
qplot(x=z)
#random normal values
z <- rnorm(1000)
qplot(x=z)
z <- rnorm(n=1000, mean=30, sd=20)
qplot(x=z)
#use sample function to draw from existing vector
longVec <- seq_len(10)
longVec
sample(x=longVec)
sample(x=longVec, size=3) # Sample withoyt replacement
sample(x=longVec,size=3, replace=TRUE)
myWeights <- c(rep(20,5), rep(100,5))
myWeights
sample(x=longVec, replace=TRUE, prob=myWeights)
sample(x=longVec,replace=FALSE, prob=myWeights)
#subsetting of atomic vectors
z <- c(3.1,9.2, 1.3, 0.4, 7.5)
#Subsetting on positive index values
z[2]
z[c(2,3)]
#ssubset on negative index values
z[-c(2,3)]
#subset by creating a boolean vector to select elements to meet a condition
z<3
z[z<3]
#ABove, got only the individual elements that satisfed z<3
which(z<3)
#"which" gives us actual locations of the elements thar satisfy the command, unless with the bracket as three lines below gives us values
myCriteria <- z<3
z[myCriteria]
z[which(z<3)]
zx <- c(NA,z)
zx[zx<3] #missing value is retained
zx[which(zx<3)] #missing value is dropped
#keeps entire vector
z[]
z[-(length(z):(length(z)-2))] #- in front is get rid of what is in brackets and give me what is left. in bracket it "says"- give me elements from 5 to three, bu then get ride of it
#subset on names of vector elements
z
names(z) <- letters[seq_along(z)]
z
z[c("b","d","e")]
#arithmetic operators
10+3
10-3
10*3
10/3
10^3
log(10)
log10(10)
#modulus operator (remainder)
10%%3
#interger division
10%/%3
#generate the set of all numbers from 1 to 100 that are divisible by 9? the fuckkk- remainder value thing
q <- seq_len(100)
q[q%%9==0]
#-------------------------------------END CLASS FEB 6
#--------------------------------------- Begin class feb 8
#relational operators
#all return a boolean
3<4
3>5:7
3 >= 3
3<=3
3==4 #DOUBLE !!!!
3 = 4 #throws an error
3 != 4 # != not equal to
#set operator s
#compare two atomic vectors
#return one atomic vector
#always strip out duplicate elements
# Before the comparisons
i <- c(1,1:7)
print(i)
j <- 3:10
print(j)
union(i,j) #all elements
intersect(i,j) #common elements
setdiff(i,j) #unique elements of i not j
setdiff(j,i) #unique elements of j not in i
#set of operators that return a single boolean (T or F)
setequal(i,j) #asking if they are same
setequal(i,i)
is.element(i,j) #compare elements one at a time in i to j
is.element(j,i) #(or i %in% J)
# Logical operators
z <- 10:20
z<15
z < 20 & z >17 #when ran, goes thru each one #AND operator
z <20 | z>17 #OR operator
#atomic vector- all elements must be same type, and are one dimension
| /BasicCoding.R | no_license | anyasteinhart9898/BIOL-381 | R | false | false | 6,844 | r | ## Basic R coding and usage
## 30 january 2018
## ALS
library(ggplot2)
#using the assignment operator
x <- 5 #preferred
print(x)
y = 4 #legal but not used except in function
y= y +1.1
y <- y+1
plantHeight <- 5.5
#--------------------------------------------------{end class}
# The combine function
z <- c(3,7,7,10) # simple atomic vector
print(z)
typeof(z) #get variable type (double is numbers)
str(z) # get structure of the variable
is.numeric(z) # logical test for variable type
is.character(z)
# c always "flattens" to an atomic vector
z <- c(c(3,4), c(5,6))
print(z)
# character strings with single or double quotes
z <- c("perch", "bass", "trout", "red snapper")
print(z)
#use nboth quote types fpr an internal quote
z <- c("htis is only 'one' character strong", 'a second string')
str(z)
#logical TRUE FALSE
z <- c(TRUE, TRUE, FALSE)
is.numeric(z)
is.logical(z)
#Three properties of ataomic vectors
#type of atomic vector
z <- c(1.2,2.2,3.2)
typeof(z)
is.numeric(z)
#length of vector
length(z)
#name of vector element (optional)
z <- runif(5) #random uniforn (0,1)
names(z)
#add names after variable is created
names(z) <- c("chow", "pug", "beagle", "greyhound", "akita")
print(z)
#add names when variable is built
z2 <- c(gold=3.3, silver=10, lead=2)
print(z2)
names(z2) <- NULL
print(z2)
names(z2) <- c("copper","Zinc")
print(z2)
#Special data values
#NA for missing values
z <- c(3.2, 3.3, NA)
length(z)
typeof(z[3])
# missing values can trip up basic functions
mean(z) # does not work
is.na(z) #checks for missid
!is.na(z) # ! is the NOT
mean(!is.na(z)) #WROOONG- mean of the true false values, 2/3 were true. be clear
mean(z[!is.na(z)]) #do it this way
#-------------------------------
#NaN, Inf -Inf
# bad results from nmumeric calc
z <- 0/0
print(z)
z <- 1/0
print(z)
z <- -1/0
print(z)
z <- 0/1
print(z)
z <- 1/0
typeof(z)
#------------------
#NULL is an object that is nothing
z <- NULL
typeof(z)
length(z)
is.null(z)
#Three properities of atomic vectors
#coercion
a <- c(2.1, 2.2)
typeof(a)
b <- c("purple", "green")
typeof(b)
d <- c(a,b)
print(d)
typeof(d)
#numbers have been coerced into character strings
# hierarchy of conversions
#logical -> integers-> double-> character
a <- runif(10)
print(a)
a > 0.5 #logical operation
temp <- a > 0.5 #hold thsese logical variable
sum(temp)
# what proportion of the values are >0.5
mean(a>0.5)
# qualify exam question: approx proportion of observation from a normal (0,1) random variable are >2.0
mean(rnorm(1000000)>2.0)
#------------ VEctorization
z <- c(10,20,30)
z + 1
y <- c(1,2,3)
z+y #element-by-element matching
short <- c(1,2)
z + short #what willl happen@
z^2
#creating vectors
#create an empty vector
z <- vector(mode="numeric", length=0)
print(z)
#add elements to empty vectori
z <- c(z,5) #dont do this in your code
print(z)
#insitead create a vector of predefined length
z <- rep(NA,100)
z[1] <- 3.3
head(z)
z <- rep(NA, 100)
head(z)
typeof(z)
z[c(1:20)] <- c("Washington", 2.2)
typeof(z)
head(z)
z[1:30]
#generate a long list of names
myVector <- runif(13) #get 100 random uniform
myNames <- paste("File",seq(1:length(myVector)), ".csv", sep="")
head(myNames)
names(myVector) <- myNames
head(myVector)
#using rep to repeat elements
#-----------------------------------------------------------end feb 1 2018
#---------------------------------------------- Feb 6th 2018
#using rep to repeat elements and create vectors
rep(0.5,6)
rep("mystring", 3)
#more formal way of command
rep(x=0.5, times=6)
rep(times=6 , x=0.5)
myVec <- c(1,2,3)
rep(myVec, times=2)
rep(x=myVec, each=2)
rep(x=myVec, times=myVec)
rep(x=1:3, times=3:1)
#seq funciton for creating sequences
seq(from=2,to=4)
seq(from=2,to=4,by=0.5)
seq(from=2,to=4,length=7)
x <- seq(from=2,to=4,length=7)
1:length(x)
seq_along(x) #faster, better
1:5
seq(1,5)
seq_len(10)
#why are we using seq along
x <- vector(mode="numeric", length=0)
str(x)
#num(0) is empty vector
1:length(x) #interpreted one to zero
seq_along(x)
#seq and rep will generate values, give ordered values that are known
#using random numbers
runif(1)
set.seed(100)
runif(1)
runif(n=5, min=100,max=200)
library(ggplot2) #graphics library
z <- runif(n=1000,min=30,max=300)
qplot(x=z)
#random normal values
z <- rnorm(1000)
qplot(x=z)
z <- rnorm(n=1000, mean=30, sd=20)
qplot(x=z)
#use sample function to draw from existing vector
longVec <- seq_len(10)
longVec
sample(x=longVec)
sample(x=longVec, size=3) # Sample withoyt replacement
sample(x=longVec,size=3, replace=TRUE)
myWeights <- c(rep(20,5), rep(100,5))
myWeights
sample(x=longVec, replace=TRUE, prob=myWeights)
sample(x=longVec,replace=FALSE, prob=myWeights)
#subsetting of atomic vectors
z <- c(3.1,9.2, 1.3, 0.4, 7.5)
#Subsetting on positive index values
z[2]
z[c(2,3)]
#ssubset on negative index values
z[-c(2,3)]
#subset by creating a boolean vector to select elements to meet a condition
z<3
z[z<3]
#ABove, got only the individual elements that satisfed z<3
which(z<3)
#"which" gives us actual locations of the elements thar satisfy the command, unless with the bracket as three lines below gives us values
myCriteria <- z<3
z[myCriteria]
z[which(z<3)]
zx <- c(NA,z)
zx[zx<3] #missing value is retained
zx[which(zx<3)] #missing value is dropped
#keeps entire vector
z[]
z[-(length(z):(length(z)-2))] #- in front is get rid of what is in brackets and give me what is left. in bracket it "says"- give me elements from 5 to three, bu then get ride of it
#subset on names of vector elements
z
names(z) <- letters[seq_along(z)]
z
z[c("b","d","e")]
#arithmetic operators
10+3
10-3
10*3
10/3
10^3
log(10)
log10(10)
#modulus operator (remainder)
10%%3
#interger division
10%/%3
#generate the set of all numbers from 1 to 100 that are divisible by 9? the fuckkk- remainder value thing
q <- seq_len(100)
q[q%%9==0]
#-------------------------------------END CLASS FEB 6
#--------------------------------------- Begin class feb 8
#relational operators
#all return a boolean
3<4
3>5:7
3 >= 3
3<=3
3==4 #DOUBLE !!!!
3 = 4 #throws an error
3 != 4 # != not equal to
#set operator s
#compare two atomic vectors
#return one atomic vector
#always strip out duplicate elements
# Before the comparisons
i <- c(1,1:7)
print(i)
j <- 3:10
print(j)
union(i,j) #all elements
intersect(i,j) #common elements
setdiff(i,j) #unique elements of i not j
setdiff(j,i) #unique elements of j not in i
#set of operators that return a single boolean (T or F)
setequal(i,j) #asking if they are same
setequal(i,i)
is.element(i,j) #compare elements one at a time in i to j
is.element(j,i) #(or i %in% J)
# Logical operators
z <- 10:20
z<15
z < 20 & z >17 #when ran, goes thru each one #AND operator
z <20 | z>17 #OR operator
#atomic vector- all elements must be same type, and are one dimension
|
## Getting full dataset
## first placed unzipped data source file in current directory
data_full <- read.csv("household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data_full$Date <- as.Date(data_full$Date, format="%d/%m/%Y")
## Subsetting the data
data <- subset(data_full, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(data_full)
## Converting dates
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
## Plot 4
par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(0,0,2,0))
with(data, {
plot(Global_active_power~Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
plot(Voltage~Datetime, type="l",
ylab="Voltage (volt)", xlab="")
plot(Sub_metering_1~Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2~Datetime,col='Red')
lines(Sub_metering_3~Datetime,col='Blue')
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, bty="n",
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(Global_reactive_power~Datetime, type="l",
ylab="Global Rective Power (kilowatts)",xlab="")
})
## Saving to file
dev.copy(png, file="plot4.png", height=480, width=480)
dev.off()
| /plot4.R | no_license | devendramehra/datasciencecoursera | R | false | false | 1,374 | r | ## Getting full dataset
## first placed unzipped data source file in current directory
data_full <- read.csv("household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data_full$Date <- as.Date(data_full$Date, format="%d/%m/%Y")
## Subsetting the data
data <- subset(data_full, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(data_full)
## Converting dates
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
## Plot 4
par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(0,0,2,0))
with(data, {
plot(Global_active_power~Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
plot(Voltage~Datetime, type="l",
ylab="Voltage (volt)", xlab="")
plot(Sub_metering_1~Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2~Datetime,col='Red')
lines(Sub_metering_3~Datetime,col='Blue')
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, bty="n",
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(Global_reactive_power~Datetime, type="l",
ylab="Global Rective Power (kilowatts)",xlab="")
})
## Saving to file
dev.copy(png, file="plot4.png", height=480, width=480)
dev.off()
|
#나이브 베이즈 확률 이론
#동전을 100회 던졌을때 앞면이 나오는 횟수는?
#전통적 확률에서는 일어난 횟수/전체시도횟수로 계산
# =>경험적 확률 = 일정한 확률로 반복 실행
#만일 A라는 도시에 수지가 태어날 경우
#이 아이가 노벨상을 받을 확률은 얼마나 될까?
#이것을 경험적 확률로 계싼하려면
#이 아이를 여러명 살게하고
#그 중 몇명이 노벨상을 받는지 평가해보면된다.
#문제는 동일한 유전자,환경에 자란 아이를 만들수 있는가?
#이러한 상황에서 베이즈 확률론을 이용한다.
#일어나지 않을 일에 대한 확률을 불확실성이라는 개념으로 이야기 한다.
#몬티홀 문제 - 미국 티비쇼에서 유래한 퍼즐
#아들딸 패러독스
#두 아이가 있는 어떤 집에서 [첫 아이]가 남자일때
#두 아이 모두가 남자일 확률은?
#두 아이가 있는 어떤 집에서 [한명이]남자일때
#두 아이 모두가 남자일 확률은?
#베이즈 정리
#이전의 경험과 현재의 증거를 토대로 어떤 사건의 확률을 추론하는 알고리즘
#따라서, 사건이 일어날 확률을 토대로 의사결정을 하는경우
#그와 관련된 사전정보를 얼마나 알고있나에 따라 크게 좌우한다.
#기하학 - 피타고라스 정리
#확률학 - 베이즈 정리
#베이즈 정리 예제
#삼키기 어려울 정도의 목에 통증 유발 - 인후염
#병원 방문후 검사(정확도 90%) 시행 -> 결과 양성(폐암)
#의사: 이 결과로 폐암일 확률은 10%도 안될 수 있다. 폐암에 걸린 남성은 성인 남성 1%
#환자: 그래도 걱정이 되니 추가 검사 시행 - 음성(!)
#베이즈 정리에 근거, 실제 검사에서 양성이 나왔을때 진짜 폐암에 걸릴 확률은???
베이즈정리 : P(A|B) = P(A)P(B|A)/P(B)
조건부 확률 : P(A n B) = P(A)P(B|A) = P(B)P(A|B)
P(A n B) = P(B)P(A|B)에서
P(A|B) =P(A n B)/P(B) 로 도출가능
P(A|B) =P(A)P(B|A)/P(B)에서
P(A|B) =P(B)P(A|B)/P(B)로도 도출가능
양성일때 -> 폐암일 확률
P(폐암|양성) = P(폐암 n 양성)/P(양성) = P(폐암)P(양성|폐암)/P(양성)
폐암일때 ->양성일 확률
P(양성|폐암) = P(양성) n 폐암)/P(폐암) = P(양성)P(폐암|양성)/P(폐암)
#정확도 90%검사로 양성일때 폐암일 확률
P(양성|폐암)=0.9
P(음성|폐암)=0.1
#성인남성이 폐암에 걸릴 확률
p(폐암) = 0.01
P(양성) : 폐암이고 진짜 양성일 확률과 폐암이 아닌고 양성일 확률을 더한 확률
P(양성|폐암)P(폐암)+P(양성|1-폐암)P(1-폐암)
0.9*0.01 +0.1*0.99 = -0.108 = 11%
P(폐암)P(양성|폐암)/P(양성)
0.01*0.9/0.108 = 0.083 =>8.3%
#시간이 지나 다시 목이아프코 숨을 쉬기 어려워서 다시 병원에 감
#다시 검사(정확도99%)해보니 역시 양성
#예전 경험에 비춰 별거 아니라고 생각했지만 폐암 확률이 50%증가
#의사는 심각할수 있다 충고
0.99*0.01 +0.01*0.99 = 0.0198
0.01*0.99/0.0198 =>50%
#ex)1.동호회 회원수가 100명일때 여성은 40명남성ㅇ은 60명이다
#이중 기혼인 여성은 16명 남성은 30명이라할때 임의로 뽑은 회원이 기혼이라 할때 여성일 확률은?
P(g)= 0.4
p(m) = 0.6
P(여성|기혼) = P(여성 n 기혼)/P(기혼)
P(A n B) = P(B)P(A|B)=P(A)P(B|A)을 토대로
P(여성|기혼) = P(여성 n 기혼)/P(기혼)
P(여성n기혼) = P(기혼)P(여성|기혼) =P(여성)P(기혼|여성)
P(g) = 0.4
P(기혼|여성) = 16/40 = 0.4
P(기혼) =여성이 기혼일 확률 +남성이 기혼일 확률
P(여성)P(기혼|여성)+P(남성)P(기혼|남성)
0.4*0.4+0.6*0.5 = 0.46
P(A)P(B|A)/P(B)
P(여성|기혼)= P(여성)P(기혼|여성)/P(기혼)
0.4*0.4/0.46 = 0.347 =>약 35%
#2.2개의 조립라인을 가진 공장에서 각 라인에서 생산된 1000대의 휴대폰이 있다.
#1번/2번 조립라인에서 생산된 휴대폰의 불량품이 각각 10%, 15%일때,
#임의로 뽑은 휴대폰이 불량일때,이것이 1본 조립라인에서 생산되었을 확률은?
P(1|불량) = P(1)P(불량|1)/P(불량)
P(불량) = P(1)P(불량|1)+P(2)P(불량|2)
P(불량|1) = 0.1
P(불량|2) =0.15
P(1) = P(2) = 1
=0.25
P(1)P(불량|1)/P(1)P(불량|1)+P(2)P(불량|2)
0.5*0.1/(0.5*0.1+0.5*0.15) = 0.4 =>40%
#3.2개의 상자에 검은공과 흰공이 각각2,2/1,2 있다고 할때, 임의로 상자를 선택해서
#공을 1개 꺼냈더니 검은공이 나왔다. 그상자에 남은 공이 모두 흰공일 확률은?
P(A|B) = P(A n B) /P(B)
P(A n B) = P(B)P(A|B)=P(A)P(B|A)
P(b|검은공) = P(A n B)/P(B),P(A n B)/P(B) / P(A)=
P(b n 검은공)/P(검은공)= P(A)P(B|A)/P(검은공)=
P(b)P(검은공|b)/P(검은공)
P(B) = 1/2
P(검은공|B) = 1/3
P(A)P(B|A)/P(B)
P(검은공) = P(A)P(검은공|A) + P(B)P(검은공|B)
P(A)P(A|검은공) = 1/2*1/2 = 0.25
0.25 +(1/2 *1/3) = 0.416 =>42%
P(b|검은공) = =P(b)P(검은공|b)/P(검은공)
0.5*0.3/0.4= 0.375 =>38%
#베이즈 이론의 활용
#흉부회과 환자 303명 조사(나이,성별,검진정보)
#심장병 여부 판별
#개인의 나이, 성별, 직업등의 데이터를 토대로
#영화 장르 추천 시스템
#메일 제목으로 메일종류를 분석
#무료,빅데이터,상담,쇼핑,광고,모집,회원
| /R1811/R13-베이즈확률이론.R | no_license | SonDog0/bigdata | R | false | false | 5,507 | r | #나이브 베이즈 확률 이론
#동전을 100회 던졌을때 앞면이 나오는 횟수는?
#전통적 확률에서는 일어난 횟수/전체시도횟수로 계산
# =>경험적 확률 = 일정한 확률로 반복 실행
#만일 A라는 도시에 수지가 태어날 경우
#이 아이가 노벨상을 받을 확률은 얼마나 될까?
#이것을 경험적 확률로 계싼하려면
#이 아이를 여러명 살게하고
#그 중 몇명이 노벨상을 받는지 평가해보면된다.
#문제는 동일한 유전자,환경에 자란 아이를 만들수 있는가?
#이러한 상황에서 베이즈 확률론을 이용한다.
#일어나지 않을 일에 대한 확률을 불확실성이라는 개념으로 이야기 한다.
#몬티홀 문제 - 미국 티비쇼에서 유래한 퍼즐
#아들딸 패러독스
#두 아이가 있는 어떤 집에서 [첫 아이]가 남자일때
#두 아이 모두가 남자일 확률은?
#두 아이가 있는 어떤 집에서 [한명이]남자일때
#두 아이 모두가 남자일 확률은?
#베이즈 정리
#이전의 경험과 현재의 증거를 토대로 어떤 사건의 확률을 추론하는 알고리즘
#따라서, 사건이 일어날 확률을 토대로 의사결정을 하는경우
#그와 관련된 사전정보를 얼마나 알고있나에 따라 크게 좌우한다.
#기하학 - 피타고라스 정리
#확률학 - 베이즈 정리
#베이즈 정리 예제
#삼키기 어려울 정도의 목에 통증 유발 - 인후염
#병원 방문후 검사(정확도 90%) 시행 -> 결과 양성(폐암)
#의사: 이 결과로 폐암일 확률은 10%도 안될 수 있다. 폐암에 걸린 남성은 성인 남성 1%
#환자: 그래도 걱정이 되니 추가 검사 시행 - 음성(!)
#베이즈 정리에 근거, 실제 검사에서 양성이 나왔을때 진짜 폐암에 걸릴 확률은???
베이즈정리 : P(A|B) = P(A)P(B|A)/P(B)
조건부 확률 : P(A n B) = P(A)P(B|A) = P(B)P(A|B)
P(A n B) = P(B)P(A|B)에서
P(A|B) =P(A n B)/P(B) 로 도출가능
P(A|B) =P(A)P(B|A)/P(B)에서
P(A|B) =P(B)P(A|B)/P(B)로도 도출가능
양성일때 -> 폐암일 확률
P(폐암|양성) = P(폐암 n 양성)/P(양성) = P(폐암)P(양성|폐암)/P(양성)
폐암일때 ->양성일 확률
P(양성|폐암) = P(양성) n 폐암)/P(폐암) = P(양성)P(폐암|양성)/P(폐암)
#정확도 90%검사로 양성일때 폐암일 확률
P(양성|폐암)=0.9
P(음성|폐암)=0.1
#성인남성이 폐암에 걸릴 확률
p(폐암) = 0.01
P(양성) : 폐암이고 진짜 양성일 확률과 폐암이 아닌고 양성일 확률을 더한 확률
P(양성|폐암)P(폐암)+P(양성|1-폐암)P(1-폐암)
0.9*0.01 +0.1*0.99 = -0.108 = 11%
P(폐암)P(양성|폐암)/P(양성)
0.01*0.9/0.108 = 0.083 =>8.3%
#시간이 지나 다시 목이아프코 숨을 쉬기 어려워서 다시 병원에 감
#다시 검사(정확도99%)해보니 역시 양성
#예전 경험에 비춰 별거 아니라고 생각했지만 폐암 확률이 50%증가
#의사는 심각할수 있다 충고
0.99*0.01 +0.01*0.99 = 0.0198
0.01*0.99/0.0198 =>50%
#ex)1.동호회 회원수가 100명일때 여성은 40명남성ㅇ은 60명이다
#이중 기혼인 여성은 16명 남성은 30명이라할때 임의로 뽑은 회원이 기혼이라 할때 여성일 확률은?
P(g)= 0.4
p(m) = 0.6
P(여성|기혼) = P(여성 n 기혼)/P(기혼)
P(A n B) = P(B)P(A|B)=P(A)P(B|A)을 토대로
P(여성|기혼) = P(여성 n 기혼)/P(기혼)
P(여성n기혼) = P(기혼)P(여성|기혼) =P(여성)P(기혼|여성)
P(g) = 0.4
P(기혼|여성) = 16/40 = 0.4
P(기혼) =여성이 기혼일 확률 +남성이 기혼일 확률
P(여성)P(기혼|여성)+P(남성)P(기혼|남성)
0.4*0.4+0.6*0.5 = 0.46
P(A)P(B|A)/P(B)
P(여성|기혼)= P(여성)P(기혼|여성)/P(기혼)
0.4*0.4/0.46 = 0.347 =>약 35%
#2.2개의 조립라인을 가진 공장에서 각 라인에서 생산된 1000대의 휴대폰이 있다.
#1번/2번 조립라인에서 생산된 휴대폰의 불량품이 각각 10%, 15%일때,
#임의로 뽑은 휴대폰이 불량일때,이것이 1본 조립라인에서 생산되었을 확률은?
P(1|불량) = P(1)P(불량|1)/P(불량)
P(불량) = P(1)P(불량|1)+P(2)P(불량|2)
P(불량|1) = 0.1
P(불량|2) =0.15
P(1) = P(2) = 1
=0.25
P(1)P(불량|1)/P(1)P(불량|1)+P(2)P(불량|2)
0.5*0.1/(0.5*0.1+0.5*0.15) = 0.4 =>40%
#3.2개의 상자에 검은공과 흰공이 각각2,2/1,2 있다고 할때, 임의로 상자를 선택해서
#공을 1개 꺼냈더니 검은공이 나왔다. 그상자에 남은 공이 모두 흰공일 확률은?
P(A|B) = P(A n B) /P(B)
P(A n B) = P(B)P(A|B)=P(A)P(B|A)
P(b|검은공) = P(A n B)/P(B),P(A n B)/P(B) / P(A)=
P(b n 검은공)/P(검은공)= P(A)P(B|A)/P(검은공)=
P(b)P(검은공|b)/P(검은공)
P(B) = 1/2
P(검은공|B) = 1/3
P(A)P(B|A)/P(B)
P(검은공) = P(A)P(검은공|A) + P(B)P(검은공|B)
P(A)P(A|검은공) = 1/2*1/2 = 0.25
0.25 +(1/2 *1/3) = 0.416 =>42%
P(b|검은공) = =P(b)P(검은공|b)/P(검은공)
0.5*0.3/0.4= 0.375 =>38%
#베이즈 이론의 활용
#흉부회과 환자 303명 조사(나이,성별,검진정보)
#심장병 여부 판별
#개인의 나이, 성별, 직업등의 데이터를 토대로
#영화 장르 추천 시스템
#메일 제목으로 메일종류를 분석
#무료,빅데이터,상담,쇼핑,광고,모집,회원
|
context("radioGroupButtons")
library("shiny")
test_that("Default", {
choices <- c("A", "B", "C")
rtag <- radioGroupButtons(
inputId = "Id029",
label = "Label",
choices = choices
)
choicestag <- rtag$children[[3]]$children[[1]]$children[[1]]
expect_length(choicestag, length(choices))
checked <- lapply(choicestag, function(x) grepl(pattern = "checked", x = as.character(x)))
checked <- unlist(checked)
expect_equal(which(checked), 1)
})
test_that("With choices", {
choices <- c("A", "B", "C", "D")
rtag <- radioGroupButtons(
inputId = "Id030",
label = "Label",
choices = choices,
selected = choices[2]
)
choicestag <- rtag$children[[3]]$children[[1]]$children[[1]]
expect_length(choicestag, length(choices))
checked <- lapply(choicestag, function(x) grepl(pattern = "checked", x = as.character(x)))
checked <- unlist(checked)
expect_equal(which(checked), 2)
})
test_that("Danger status", {
rtag <- radioGroupButtons(
inputId = "Id031",
label = "Label",
choices = c("A", "B", "C", "D"),
status = "danger"
)
choicestag <- rtag$children[[3]]$children[[1]]$children[[1]]
danger <- lapply(choicestag, function(x) grepl(pattern = "danger", x = as.character(x)))
danger <- unlist(danger)
expect_true(all(danger))
})
test_that("Success status", {
rtag <- radioGroupButtons(
inputId = "Id031",
label = "Label",
choices = c("A", "B", "C", "D"),
status = "success"
)
choicestag <- rtag$children[[3]]$children[[1]]$children[[1]]
success <- lapply(choicestag, function(x) grepl(pattern = "success", x = as.character(x)))
success <- unlist(success)
expect_true(all(success))
})
test_that("Justified", {
rtag <- radioGroupButtons(
inputId = "Id033",
label = "Label",
choices = c("A", "B"),
justified = TRUE
)
justified <- rtag$children[[3]]$children[[1]]$attribs$class
expect_identical(justified, "btn-group btn-group-justified")
})
test_that("Vertical", {
rtag <- radioGroupButtons(
inputId = "Id034",
label = "Label",
choices = c("A", "B", "C", "D"),
direction = "vertical"
)
vertical <- rtag$children[[3]]$children[[1]]$attribs$class
expect_identical(vertical, "btn-group-vertical")
})
test_that("Size", {
rtag <- radioGroupButtons(
inputId = "Id035",
label = "Label",
choices = c("A", "B", "C", "D"),
size = "lg"
)
lg <- rtag$children[[3]]$children[[1]]$attribs$class
expect_identical(lg, "btn-group btn-group-lg")
})
test_that("Icons button", {
rtag <- radioGroupButtons(
inputId = "Id036",
label = "Choose a graph :",
choiceNames = list(
shiny::icon("gear"),
shiny::icon("cogs")
),
choiceValues = c("A", "B"),
justified = TRUE
)
rtag <- as.character(rtag)
expect_true(grepl(pattern = as.character(shiny::icon("gear")), x = rtag))
expect_true(grepl(pattern = as.character(shiny::icon("cogs")), x = rtag))
})
test_that("Icons check", {
rtag <- radioGroupButtons(
inputId = "Id037",
label = "Label",
choices = c("A", "B", "C", "D"),
justified = TRUE,
checkIcon = list(yes = shiny::icon("ok", lib = "glyphicon"))
)
rtag <- as.character(rtag)
expect_true(grepl(pattern = "radio-btn-icon-yes", x = rtag))
expect_true(grepl(pattern = "radio-btn-icon-no", x = rtag))
})
test_that("Icons check / uncheck", {
rtag <- radioGroupButtons(
inputId = "Id038",
label = "Label", choices = c("A", "B", "C", "D"),
status = "primary",
checkIcon = list(yes = shiny::icon("ok", lib = "glyphicon"),
no = shiny::icon("remove", lib = "glyphicon"))
)
rtag <- as.character(rtag)
expect_true(grepl(pattern = "radio-btn-icon-no", x = rtag))
})
test_that("Separated buttons", {
rtag <- radioGroupButtons(
inputId = "Id040",
label = "Label",
choices = c("Option 1",
"Option 2", "Option 3",
"Option 4"),
individual = TRUE
)
justified <- rtag$children[[3]]$children[[1]]$attribs
nm <- names(rtag$children[[3]]$children[[1]]$attribs)
justified <- justified[which(nm == "class")]
expect_true(any(grepl(pattern = "btn-group-container-sw", x = unlist(justified))))
})
| /tests/testthat/test-radioGroupButtons.R | permissive | jcheng5/shinyWidgets | R | false | false | 4,235 | r |
context("radioGroupButtons")
library("shiny")
test_that("Default", {
choices <- c("A", "B", "C")
rtag <- radioGroupButtons(
inputId = "Id029",
label = "Label",
choices = choices
)
choicestag <- rtag$children[[3]]$children[[1]]$children[[1]]
expect_length(choicestag, length(choices))
checked <- lapply(choicestag, function(x) grepl(pattern = "checked", x = as.character(x)))
checked <- unlist(checked)
expect_equal(which(checked), 1)
})
test_that("With choices", {
choices <- c("A", "B", "C", "D")
rtag <- radioGroupButtons(
inputId = "Id030",
label = "Label",
choices = choices,
selected = choices[2]
)
choicestag <- rtag$children[[3]]$children[[1]]$children[[1]]
expect_length(choicestag, length(choices))
checked <- lapply(choicestag, function(x) grepl(pattern = "checked", x = as.character(x)))
checked <- unlist(checked)
expect_equal(which(checked), 2)
})
test_that("Danger status", {
rtag <- radioGroupButtons(
inputId = "Id031",
label = "Label",
choices = c("A", "B", "C", "D"),
status = "danger"
)
choicestag <- rtag$children[[3]]$children[[1]]$children[[1]]
danger <- lapply(choicestag, function(x) grepl(pattern = "danger", x = as.character(x)))
danger <- unlist(danger)
expect_true(all(danger))
})
test_that("Success status", {
rtag <- radioGroupButtons(
inputId = "Id031",
label = "Label",
choices = c("A", "B", "C", "D"),
status = "success"
)
choicestag <- rtag$children[[3]]$children[[1]]$children[[1]]
success <- lapply(choicestag, function(x) grepl(pattern = "success", x = as.character(x)))
success <- unlist(success)
expect_true(all(success))
})
test_that("Justified", {
rtag <- radioGroupButtons(
inputId = "Id033",
label = "Label",
choices = c("A", "B"),
justified = TRUE
)
justified <- rtag$children[[3]]$children[[1]]$attribs$class
expect_identical(justified, "btn-group btn-group-justified")
})
test_that("Vertical", {
rtag <- radioGroupButtons(
inputId = "Id034",
label = "Label",
choices = c("A", "B", "C", "D"),
direction = "vertical"
)
vertical <- rtag$children[[3]]$children[[1]]$attribs$class
expect_identical(vertical, "btn-group-vertical")
})
test_that("Size", {
rtag <- radioGroupButtons(
inputId = "Id035",
label = "Label",
choices = c("A", "B", "C", "D"),
size = "lg"
)
lg <- rtag$children[[3]]$children[[1]]$attribs$class
expect_identical(lg, "btn-group btn-group-lg")
})
test_that("Icons button", {
rtag <- radioGroupButtons(
inputId = "Id036",
label = "Choose a graph :",
choiceNames = list(
shiny::icon("gear"),
shiny::icon("cogs")
),
choiceValues = c("A", "B"),
justified = TRUE
)
rtag <- as.character(rtag)
expect_true(grepl(pattern = as.character(shiny::icon("gear")), x = rtag))
expect_true(grepl(pattern = as.character(shiny::icon("cogs")), x = rtag))
})
test_that("Icons check", {
rtag <- radioGroupButtons(
inputId = "Id037",
label = "Label",
choices = c("A", "B", "C", "D"),
justified = TRUE,
checkIcon = list(yes = shiny::icon("ok", lib = "glyphicon"))
)
rtag <- as.character(rtag)
expect_true(grepl(pattern = "radio-btn-icon-yes", x = rtag))
expect_true(grepl(pattern = "radio-btn-icon-no", x = rtag))
})
test_that("Icons check / uncheck", {
rtag <- radioGroupButtons(
inputId = "Id038",
label = "Label", choices = c("A", "B", "C", "D"),
status = "primary",
checkIcon = list(yes = shiny::icon("ok", lib = "glyphicon"),
no = shiny::icon("remove", lib = "glyphicon"))
)
rtag <- as.character(rtag)
expect_true(grepl(pattern = "radio-btn-icon-no", x = rtag))
})
test_that("Separated buttons", {
rtag <- radioGroupButtons(
inputId = "Id040",
label = "Label",
choices = c("Option 1",
"Option 2", "Option 3",
"Option 4"),
individual = TRUE
)
justified <- rtag$children[[3]]$children[[1]]$attribs
nm <- names(rtag$children[[3]]$children[[1]]$attribs)
justified <- justified[which(nm == "class")]
expect_true(any(grepl(pattern = "btn-group-container-sw", x = unlist(justified))))
})
|
library(ggplot2)
library(tidyverse)
library(qmap)
library(hydroGOF)
library(EnvStats)
obs <- read_csv("./observed/CHIANG_RAI_TH_TH000048303_1951-2019.csv",
col_types = cols(
TAVG = col_double(),
TMIN = col_double(),
TMAX = col_double(),
PRCP = col_double()
)
)
mod <- read_csv("./RCM/RCM_hist_TH.csv")
# convert kelvin to celcius
mod <- mod %>% mutate(tas=tas-273.15)
mod <- mod %>% mutate(tasmin=tasmin-273.15)
mod <- mod %>% mutate(tasmax=tasmax-273.15)
# convert prcp unit to mm/day
mod <- mod %>% mutate(pr=pr*86400)
# find RCM min grid center from station
obs_lat <- unique(obs$LATITUDE)
obs_lon <- unique(obs$LONGITUDE)
mod_lats <- unique(mod$lat)
mod_lons <- unique(mod$lon)
grid_lat <- mod_lats[which.min(abs(mod_lats-obs_lat))]
grid_lon <- mod_lons[which.min(abs(mod_lons-obs_lon))]
# cleansing RCM model and observed data
obs_clean <- na.omit(obs)
obs_clean <- obs_clean %>% filter(DATE >= "1970-01-01", DATE <= "2005-12-01")
# filter grid and date
mod_clean <- mod %>% filter(lat==grid_lat, lon==grid_lon)
mod_clean <- mod_clean %>% subset(date %in% obs_clean$DATE)
obs_train <- obs_clean %>% filter(DATE < "2000-01-01")
mod_train <- mod_clean %>% filter(date < "2000-01-01")
obs_test <- obs_clean %>% filter(DATE >= "2000-01-01")
mod_test <- mod_clean %>% filter(date >= "2000-01-01")
# Select index
mod_ind <- 'pr'
obs_ind <- 'PRCP'
# Qmap Empirical Quantile mapping
qm <- fitQmapSSPLIN(obs_train[, obs_ind], mod_train[, mod_ind],
qstep=0.001)
mod_train_corrected <- doQmap(mod_train[, mod_ind], qm)
mod_test_corrected <- doQmap(mod_test[, mod_ind], qm)
print(sprintf("Train: %f", mae(mod_train[, mod_ind], obs_train[, obs_ind])))
print(sprintf("Train bias corrected: %f", mae(mod_train_corrected, obs_train[, obs_ind])))
print(sprintf("Test: %f", mae(mod_test[, mod_ind], obs_test[, obs_ind])))
print(sprintf("Test bias corrected: %f", mae(mod_test_corrected, obs_test[, obs_ind])))
summary(obs_test %>% dplyr::select(DATE:TMIN))
summary(mod_test %>% dplyr::select(tas:pr))
| /qmap_prcp.R | no_license | chuan-khuna/bias_correction | R | false | false | 2,221 | r | library(ggplot2)
library(tidyverse)
library(qmap)
library(hydroGOF)
library(EnvStats)
obs <- read_csv("./observed/CHIANG_RAI_TH_TH000048303_1951-2019.csv",
col_types = cols(
TAVG = col_double(),
TMIN = col_double(),
TMAX = col_double(),
PRCP = col_double()
)
)
mod <- read_csv("./RCM/RCM_hist_TH.csv")
# convert kelvin to celcius
mod <- mod %>% mutate(tas=tas-273.15)
mod <- mod %>% mutate(tasmin=tasmin-273.15)
mod <- mod %>% mutate(tasmax=tasmax-273.15)
# convert prcp unit to mm/day
mod <- mod %>% mutate(pr=pr*86400)
# find RCM min grid center from station
obs_lat <- unique(obs$LATITUDE)
obs_lon <- unique(obs$LONGITUDE)
mod_lats <- unique(mod$lat)
mod_lons <- unique(mod$lon)
grid_lat <- mod_lats[which.min(abs(mod_lats-obs_lat))]
grid_lon <- mod_lons[which.min(abs(mod_lons-obs_lon))]
# cleansing RCM model and observed data
obs_clean <- na.omit(obs)
obs_clean <- obs_clean %>% filter(DATE >= "1970-01-01", DATE <= "2005-12-01")
# filter grid and date
mod_clean <- mod %>% filter(lat==grid_lat, lon==grid_lon)
mod_clean <- mod_clean %>% subset(date %in% obs_clean$DATE)
obs_train <- obs_clean %>% filter(DATE < "2000-01-01")
mod_train <- mod_clean %>% filter(date < "2000-01-01")
obs_test <- obs_clean %>% filter(DATE >= "2000-01-01")
mod_test <- mod_clean %>% filter(date >= "2000-01-01")
# Select index
mod_ind <- 'pr'
obs_ind <- 'PRCP'
# Qmap Empirical Quantile mapping
qm <- fitQmapSSPLIN(obs_train[, obs_ind], mod_train[, mod_ind],
qstep=0.001)
mod_train_corrected <- doQmap(mod_train[, mod_ind], qm)
mod_test_corrected <- doQmap(mod_test[, mod_ind], qm)
print(sprintf("Train: %f", mae(mod_train[, mod_ind], obs_train[, obs_ind])))
print(sprintf("Train bias corrected: %f", mae(mod_train_corrected, obs_train[, obs_ind])))
print(sprintf("Test: %f", mae(mod_test[, mod_ind], obs_test[, obs_ind])))
print(sprintf("Test bias corrected: %f", mae(mod_test_corrected, obs_test[, obs_ind])))
summary(obs_test %>% dplyr::select(DATE:TMIN))
summary(mod_test %>% dplyr::select(tas:pr))
|
#-------------------
# Principal Coordinate Analysis
#-------------------
library(vegan)
library(rgl)
library(ape)
setwd("~/Desktop/diseaseScript_Final/pcoa/")
dat=read.csv("VSDandPVALS_disease.csv")
names(dat)
head(dat)
data=dat[,2:25]
row.names(data)=data$X
head(data)
#-------------Set experimental conditions
names(data)
disease=c(rep("AL",8), rep("H",8), rep("D",8))
indiv= c("8", "7", "6", "5", "4", "3", "2", "1", "16", "15", "14", "13", "12", "11", "10", "9", "8", "7", "6", "5", "4", "3", "2", "1")
conditions=data.frame(cbind(disease,indiv))
conditions
#-------------Calulate principal coordinates
dd.veg=vegdist(t(data), "manhattan")
div.dd.veg=dd.veg/1000
head(div.dd.veg)
dd.pcoa=pcoa(div.dd.veg)
head(dd.pcoa)
scores=dd.pcoa$vectors
#-------------First and second axes
quartz()
plot(scores[,1], scores[,2],col=as.numeric(conditions$disease), pch=16)
ordihull(scores,disease,label=T)
#-------------Second and third axes
quartz()
plot(scores[,2], scores[,3],col=as.numeric(conditions$disease),pch=16)
ordihull(scores[,2:3],disease,label=T)
#-------------PERMANOVA
adonis(t(data)~disease+indiv,data=conditions,method="manhattan")
adonis(t(data)~disease,data=conditions,method="manhattan")
pco1=scores[,1]
TukeyHSD(aov(pco1~disease))
##########-----Unadjustd pvalue < 0.05 Only------##############
#-------------Subset for significant
head(dat)
DHdat=row.names(dat[dat$pvalDH<0.05 & !is.na(dat$pvalDH),])
AHdat=row.names(dat[dat$pvalAH<0.05 & !is.na(dat$pvalAH),])
DAdat=row.names(dat[dat$pvalDA<0.05 & !is.na(dat$pvalDA),])
sdat=union(DHdat,AHdat)
sdat=union(sdat,DAdat)
length(sdat)
sdata=dat[(row.names(dat) %in% sdat),]
head(sdata)
names(sdata)
data=sdata[,2:25]
row.names(data)=sdata$X
head(data)
#-------------Calulate principal coordinates
dd.veg=vegdist(t(data), "manhattan")
div.dd.veg=dd.veg/1000
head(div.dd.veg)
dd.pcoa=pcoa(div.dd.veg)
head(dd.pcoa)
scores=dd.pcoa$vectors
#-------------First and second axes
quartz()
plot(scores[,1], scores[,2],col=as.numeric(conditions$disease), pch=16)
ordihull(scores,disease,label=T)
#-------------Second and third axes
quartz()
plot(scores[,2], scores[,3],col=as.numeric(conditions$disease),pch=16)
ordihull(scores[,2:3],disease,label=T)
#-------------PERMANOVA
adonis(t(data)~disease+indiv,data=conditions,method="manhattan")
adonis(t(data)~disease,data=conditions,method="manhattan")
pco1=scores[,1]
TukeyHSD(aov(pco1~disease))
| /PCoA_Ahya.R | no_license | rachelwright8/Ahya-White-Syndromes | R | false | false | 2,426 | r | #-------------------
# Principal Coordinate Analysis
#-------------------
library(vegan)
library(rgl)
library(ape)
setwd("~/Desktop/diseaseScript_Final/pcoa/")
dat=read.csv("VSDandPVALS_disease.csv")
names(dat)
head(dat)
data=dat[,2:25]
row.names(data)=data$X
head(data)
#-------------Set experimental conditions
names(data)
disease=c(rep("AL",8), rep("H",8), rep("D",8))
indiv= c("8", "7", "6", "5", "4", "3", "2", "1", "16", "15", "14", "13", "12", "11", "10", "9", "8", "7", "6", "5", "4", "3", "2", "1")
conditions=data.frame(cbind(disease,indiv))
conditions
#-------------Calulate principal coordinates
dd.veg=vegdist(t(data), "manhattan")
div.dd.veg=dd.veg/1000
head(div.dd.veg)
dd.pcoa=pcoa(div.dd.veg)
head(dd.pcoa)
scores=dd.pcoa$vectors
#-------------First and second axes
quartz()
plot(scores[,1], scores[,2],col=as.numeric(conditions$disease), pch=16)
ordihull(scores,disease,label=T)
#-------------Second and third axes
quartz()
plot(scores[,2], scores[,3],col=as.numeric(conditions$disease),pch=16)
ordihull(scores[,2:3],disease,label=T)
#-------------PERMANOVA
adonis(t(data)~disease+indiv,data=conditions,method="manhattan")
adonis(t(data)~disease,data=conditions,method="manhattan")
pco1=scores[,1]
TukeyHSD(aov(pco1~disease))
##########-----Unadjustd pvalue < 0.05 Only------##############
#-------------Subset for significant
head(dat)
DHdat=row.names(dat[dat$pvalDH<0.05 & !is.na(dat$pvalDH),])
AHdat=row.names(dat[dat$pvalAH<0.05 & !is.na(dat$pvalAH),])
DAdat=row.names(dat[dat$pvalDA<0.05 & !is.na(dat$pvalDA),])
sdat=union(DHdat,AHdat)
sdat=union(sdat,DAdat)
length(sdat)
sdata=dat[(row.names(dat) %in% sdat),]
head(sdata)
names(sdata)
data=sdata[,2:25]
row.names(data)=sdata$X
head(data)
#-------------Calulate principal coordinates
dd.veg=vegdist(t(data), "manhattan")
div.dd.veg=dd.veg/1000
head(div.dd.veg)
dd.pcoa=pcoa(div.dd.veg)
head(dd.pcoa)
scores=dd.pcoa$vectors
#-------------First and second axes
quartz()
plot(scores[,1], scores[,2],col=as.numeric(conditions$disease), pch=16)
ordihull(scores,disease,label=T)
#-------------Second and third axes
quartz()
plot(scores[,2], scores[,3],col=as.numeric(conditions$disease),pch=16)
ordihull(scores[,2:3],disease,label=T)
#-------------PERMANOVA
adonis(t(data)~disease+indiv,data=conditions,method="manhattan")
adonis(t(data)~disease,data=conditions,method="manhattan")
pco1=scores[,1]
TukeyHSD(aov(pco1~disease))
|
#' JuliaCall: Seamless Integration Between R and Julia.
#'
#' JuliaCall provides you with functions to call Julia functions and
#' to use Julia packages as easy as possible.
#'
#' @examples
#'
#' if (identical(Sys.getenv("AUTO_JULIA_INSTALL"), "true")) { ## The examples are quite time consuming
#'
#' ## Do initiation for JuliaCall and automatic installation if necessary
#'
#' julia <- julia_setup(installJulia = TRUE)
#'
#' ## Different ways for calculating `sqrt(2)`
#'
#' # julia$command("a = sqrt(2)"); julia$eval("a")
#' julia_command("a = sqrt(2)"); julia_eval("a")
#'
#' # julia$eval("sqrt(2)")
#' julia_eval("sqrt(2)")
#'
#' # julia$call("sqrt", 2)
#' julia_call("sqrt", 2)
#'
#' # julia$eval("sqrt")(2)
#' julia_eval("sqrt")(2)
#'
#' ## You can use `julia_exists` as `exists` in R to test
#' ## whether a function or name exists in Julia or not
#'
#' # julia$exists("sqrt")
#' julia_exists("sqrt")
#'
#' ## You can use `julia$help` to get help for Julia functions
#'
#' # julia$help("sqrt")
#' julia_help("sqrt")
#'
#' ## You can install and use Julia packages through JuliaCall
#'
#' # julia$install_package("Optim")
#' julia_install_package("Optim")
#'
#' # julia$install_package_if_needed("Optim")
#' julia_install_package_if_needed("Optim")
#'
#' # julia$installed_package("Optim")
#' julia_installed_package("Optim")
#'
#' # julia$library("Optim")
#' julia_library("Optim")
#' }
#'
#' @docType package
#' @useDynLib JuliaCall
#' @import Rcpp
#' @name JuliaCall
NULL
| /R/JuliaCall.R | permissive | cran/JuliaCall | R | false | false | 1,594 | r | #' JuliaCall: Seamless Integration Between R and Julia.
#'
#' JuliaCall provides you with functions to call Julia functions and
#' to use Julia packages as easy as possible.
#'
#' @examples
#'
#' if (identical(Sys.getenv("AUTO_JULIA_INSTALL"), "true")) { ## The examples are quite time consuming
#'
#' ## Do initiation for JuliaCall and automatic installation if necessary
#'
#' julia <- julia_setup(installJulia = TRUE)
#'
#' ## Different ways for calculating `sqrt(2)`
#'
#' # julia$command("a = sqrt(2)"); julia$eval("a")
#' julia_command("a = sqrt(2)"); julia_eval("a")
#'
#' # julia$eval("sqrt(2)")
#' julia_eval("sqrt(2)")
#'
#' # julia$call("sqrt", 2)
#' julia_call("sqrt", 2)
#'
#' # julia$eval("sqrt")(2)
#' julia_eval("sqrt")(2)
#'
#' ## You can use `julia_exists` as `exists` in R to test
#' ## whether a function or name exists in Julia or not
#'
#' # julia$exists("sqrt")
#' julia_exists("sqrt")
#'
#' ## You can use `julia$help` to get help for Julia functions
#'
#' # julia$help("sqrt")
#' julia_help("sqrt")
#'
#' ## You can install and use Julia packages through JuliaCall
#'
#' # julia$install_package("Optim")
#' julia_install_package("Optim")
#'
#' # julia$install_package_if_needed("Optim")
#' julia_install_package_if_needed("Optim")
#'
#' # julia$installed_package("Optim")
#' julia_installed_package("Optim")
#'
#' # julia$library("Optim")
#' julia_library("Optim")
#' }
#'
#' @docType package
#' @useDynLib JuliaCall
#' @import Rcpp
#' @name JuliaCall
NULL
|
#' Vernacular name search
#'
#' Search for taxa using vernacular names
#'
#' @param name a vernacular name
#' @param exact approximate or exact match?
#' @export
#' @return a data frame of results or NA
#' @examples
#' \dontrun{
#' vernacular("pimenta", exact = TRUE)
#' vernacular("pimenta", exact = FALSE)
#' }
vernacular <- function(name, exact = FALSE) {
name <- trim(name)
if (exact) {
res <- vernacular.names[grep(paste("^", name, "$", sep = ""), vernacular.names$vernacular.name, ignore.case = TRUE), c("id", "vernacular.name", "locality")]
} else {
res <- vernacular.names[agrep(name, vernacular.names$vernacular.name, ignore.case = TRUE), c("id", "locality", "vernacular.name")]
}
if (nrow(res) == 0L) {
NA
} else {
merge(all.taxa[, c("id", "search.str", "family")], res, by = "id")
}
} | /flora/R/vernacular.R | no_license | ingted/R-Examples | R | false | false | 828 | r | #' Vernacular name search
#'
#' Search for taxa using vernacular names
#'
#' @param name a vernacular name
#' @param exact approximate or exact match?
#' @export
#' @return a data frame of results or NA
#' @examples
#' \dontrun{
#' vernacular("pimenta", exact = TRUE)
#' vernacular("pimenta", exact = FALSE)
#' }
vernacular <- function(name, exact = FALSE) {
name <- trim(name)
if (exact) {
res <- vernacular.names[grep(paste("^", name, "$", sep = ""), vernacular.names$vernacular.name, ignore.case = TRUE), c("id", "vernacular.name", "locality")]
} else {
res <- vernacular.names[agrep(name, vernacular.names$vernacular.name, ignore.case = TRUE), c("id", "locality", "vernacular.name")]
}
if (nrow(res) == 0L) {
NA
} else {
merge(all.taxa[, c("id", "search.str", "family")], res, by = "id")
}
} |
# libraries, global functions and global values are handled in "global.R"
# For the original (non-shiny) code see https://doi.org/10.5281/zenodo.3727255
# where it may be easier to follow without the reactive wrappers
# Define server logic
server <- function(input, output, session) {
# define the reset button
observeEvent(input$reset_input, {
shinyjs::reset("sidePanel")
})
#####################
# Infectiousness tab
#####################
### Get parameters
# Parameters start using our published values and then can be altered by the user
getDoublingTime <- reactive({
input$doublingTime
})
getEnvInfType <- reactive({
input$env.type
})
getEnvDecay <- reactive({
input$envDecayRate
})
getEnvConst <- reactive({
input$envConstant
})
getIncperMeanlog <- reactive({
log(input$incperMedian)
})
getIncperSdlog <- reactive({
input$incperSdlog
})
getSerIntShape <- reactive({
input$serIntShape
})
getSerIntScale <- reactive({
input$serIntScale
})
getXp <- reactive({
1 # relative infectiousness of presymptomatic to symptomatic individuals (fixed at 1)
})
getXa <- reactive({
input$xa # the relative infectiousness of asymptomatic c.f. symptomatic individuals
})
getP.a <- reactive({
input$P.a # the fraction of all infections that are asymptomatic
})
getFrac.Re <- reactive({
input$frac.Re # the fraction of all transmissions that are environmentally mediated
})
################
# Calculations
###############
# following Chris's method, create "dummy" which uses model.gen.solve and other functions to create a list of the values for
# env.scale.constant
# R0
# RSorP
# RA
# RS
# RP
# RE
# theta.obs.predicted
getDummy <- reactive({
# get parameters
frac.Re <- getFrac.Re()
P.a <- getP.a()
doubling.time <- getDoublingTime()
xp <- getXp()
xa <- getXa()
incper.meanlog <- getIncperMeanlog()
incper.sdlog <- getIncperSdlog()
serint.shape <- getSerIntShape()
serint.scale <- getSerIntScale()
theta.obs <- 0.83
env.decay.rate <- getEnvDecay()
env.constant.duration <- getEnvConst()
env.infectiousness.type <- getEnvInfType()
# get reactive functions
model.gen.beta.env.div.by.E.RSorP <- getModelGenBetaEnvDivByERSorP()
model.gen.full.beta.div.by.RSorP <- getModelGenFullBetaDivByRSorP()
# use "model.gen.solve" in a reactive way:
r <- log(2) / doubling.time # units of per day
integral.of.model.gen.beta.s.div.by.RSorP <-
integrate(model.gen.beta.s.div.by.RSorP,
lower = 0,
upper = Inf,
incper.meanlog = incper.meanlog,
incper.sdlog = incper.sdlog,
serint.scale = serint.scale,
serint.shape = serint.shape,
P.a = P.a,
xp = xp)$value
env.scale.constant <- (1 + P.a * xa * integral.of.model.gen.beta.s.div.by.RSorP) /
(((1 / frac.Re) - 1) * integrate(model.gen.beta.env.div.by.E.RSorP,
lower = 0,
upper = Inf,
serint.scale = serint.scale,
serint.shape = serint.shape,
incper.meanlog = incper.meanlog,
incper.sdlog = incper.sdlog,
P.a = P.a,
xp = xp,
env.decay.rate = env.decay.rate,
env.constant.duration = env.constant.duration,
env.infectiousness.type = env.infectiousness.type)$value)
RSorP <- 1 / integrate(function(tau) {
model.gen.full.beta.div.by.RSorP(tau = tau,
serint.scale = serint.scale,
serint.shape = serint.shape,
P.a = P.a,
xa = xa,
incper.meanlog = incper.meanlog,
incper.sdlog = incper.sdlog,
xp = xp,
env.scale.constant = env.scale.constant,
env.decay.rate = env.decay.rate,
env.constant.duration = env.constant.duration,
env.infectiousness.type = env.infectiousness.type) *
exp(-r * tau)}, lower = 0, upper = Inf)$value
R0 <- RSorP * integrate(model.gen.full.beta.div.by.RSorP,
lower = 0,
upper = Inf,
serint.scale = serint.scale,
serint.shape = serint.shape,
P.a = P.a,
xa = xa,
incper.meanlog = incper.meanlog,
incper.sdlog = incper.sdlog,
xp = xp,
env.scale.constant = env.scale.constant,
env.decay.rate = env.decay.rate,
env.constant.duration = env.constant.duration,
env.infectiousness.type = env.infectiousness.type)$value
should.equal.one <- integrate(function(tau) {exp(-r * tau) * RSorP *
model.gen.full.beta.div.by.RSorP(tau = tau,
serint.scale = serint.scale,
serint.shape = serint.shape,
P.a = P.a,
xa = xa,
incper.meanlog = incper.meanlog,
incper.sdlog = incper.sdlog,
xp = xp,
env.scale.constant = env.scale.constant,
env.decay.rate = env.decay.rate,
env.constant.duration = env.constant.duration,
env.infectiousness.type = env.infectiousness.type)},
lower = 0, upper = Inf)$value
RS <- integrate(model.gen.beta.sym.tot,
lower = 0,
upper = Inf,
incper.meanlog = incper.meanlog,
incper.sdlog = incper.sdlog,
serint.scale = serint.scale,
serint.shape = serint.shape,
P.a = P.a,
xp = xp,
RSorP = RSorP)$value
RP <- integrate(model.gen.beta.presym.tot,
lower = 0,
upper = Inf,
incper.meanlog = incper.meanlog,
incper.sdlog = incper.sdlog,
serint.scale = serint.scale,
serint.shape = serint.shape,
P.a = P.a,
xp = xp,
RSorP = RSorP)$value
RA <- RSorP * P.a * xa * integral.of.model.gen.beta.s.div.by.RSorP
RE <- env.scale.constant *
RSorP *
integrate(function(tau) {
model.gen.beta.env.div.by.E.RSorP(tau = tau,
serint.scale = serint.scale,
serint.shape = serint.shape,
incper.meanlog = incper.meanlog,
incper.sdlog = incper.sdlog,
P.a = P.a,
xp = xp,
env.decay.rate = env.decay.rate,
env.constant.duration = env.constant.duration,
env.infectiousness.type = env.infectiousness.type)},
lower = 0, upper = Inf)$value
theta.obs.predicted <- 1 - integrate(function(tau) {
model.gen.beta.sym.tot(tau = tau,
incper.meanlog = incper.meanlog,
incper.sdlog = incper.sdlog,
serint.scale = serint.scale,
serint.shape = serint.shape,
P.a = P.a,
xp = xp,
RSorP = RSorP) *
exp(-r * tau)},
lower = 0, upper = Inf)$value
# return
list(env.scale.constant = env.scale.constant,
R0 = R0,
RSorP = RSorP,
RA = RA,
RS = RS,
RP = RP,
RE = RE,
theta.obs.predicted = theta.obs.predicted)
})
# extract each component from the list "dummy"
getR0 <- reactive({
dummy <- getDummy()
dummy$R0
})
getRSorP <- reactive({
dummy <- getDummy()
dummy$RSorP
})
getRe <- reactive({
dummy <- getDummy()
dummy$RE
})
getRa <- reactive({
dummy <- getDummy()
dummy$RA
})
getRs <- reactive({
dummy <- getDummy()
dummy$RS
})
getRp <- reactive({
dummy <- getDummy()
dummy$RP
})
getTheta <- reactive({
rp <- getRp()
R0 <- getR0()
1 - rp / R0
})
getEnvScaleConst <- reactive({
dummy <- getDummy()
dummy$env.scale.constant
})
# following "model.gen.beta.env.div.by.E.RSorP"
getModelGenBetaEnvDivByERSorP <- reactive({
incper.meanlog <- getIncperMeanlog()
incper.sdlog <- getIncperSdlog()
serint.scale <- getSerIntScale()
serint.shape <- getSerIntShape()
P.a <- getP.a()
xp <- getXp()
env.decay.rate <- getEnvDecay()
env.constant.duration <- getEnvConst()
env.infectiousness.type <- getEnvInfType()
# because of the way that "env.decay.rate" and "env.constant.duration" depend on
# "env.infectiousness.type", there is a delay when shiny is launched, when these are undefined
# so we use "req" to wait until they are defined before continuing the calculation
if (env.infectiousness.type == "constant") {
req(env.constant.duration)
} else {
req(env.decay.rate)
}
Vectorize(function(tau,
incper.meanlog,
incper.sdlog,
serint.scale,
serint.shape,
P.a,
xp,
env.decay.rate,
env.constant.duration,
env.infectiousness.type) {
integrate(function(l) {
model.gen.beta.s.div.by.RSorP(tau = tau - l,
incper.meanlog = incper.meanlog,
incper.sdlog = incper.sdlog,
serint.scale = serint.scale,
serint.shape = serint.shape,
P.a = P.a,
xp = xp) *
p.el(l = l,
env.decay.rate = env.decay.rate,
env.constant.duration = env.constant.duration,
env.infectiousness.type = env.infectiousness.type)
},
lower = 0, upper = tau)$value
}, vectorize.args = "tau")
})
# following model.gen.full.beta.div.by.RSorP
getModelGenFullBetaDivByRSorP <- reactive({
serint.scale <- getSerIntScale()
serint.shape <- getSerIntShape()
P.a <- getP.a()
xa <- getXa()
incper.meanlog <- getIncperMeanlog()
incper.sdlog <- getIncperSdlog()
xp <- getXp()
env.decay.rate <- getEnvDecay()
env.constant.duration <- getEnvConst()
env.infectiousness.type <- getEnvInfType()
model.gen.beta.env.div.by.E.RSorP <- getModelGenBetaEnvDivByERSorP()
function(tau, serint.scale, serint.shape, P.a, xa, incper.meanlog,
incper.sdlog, xp, env.scale.constant, env.decay.rate,
env.constant.duration, env.infectiousness.type) {
serint(x = tau,
serint.scale = serint.scale,
serint.shape = serint.shape) *
(1 + P.a * xa / model.gen.f(tau = tau,
incper.meanlog = incper.meanlog,
incper.sdlog = incper.sdlog,
P.a = P.a,
xp = xp)) +
env.scale.constant * # this is a free variable of this function
model.gen.beta.env.div.by.E.RSorP(tau = tau,
serint.scale = serint.scale,
serint.shape = serint.shape,
incper.meanlog = incper.meanlog,
incper.sdlog = incper.sdlog,
P.a = P.a,
xp = xp,
env.decay.rate = env.decay.rate,
env.constant.duration = env.constant.duration,
env.infectiousness.type = env.infectiousness.type)
}
})
################################
# Build data frames of results
################################
# a reactive version of df.beta.p
getDFBetaP <- reactive({
# get parameters
incper.meanlog <- getIncperMeanlog()
incper.sdlog <- getIncperSdlog()
serint.scale <- getSerIntScale()
serint.shape <- getSerIntShape()
P.a <- getP.a()
xp <- getXp()
RSorP <- getRSorP()
data.frame(tau = tau.test,
label = "pre-symptomatic",
beta = vapply(tau.test,
model.gen.beta.presym.tot,
numeric(1),
incper.meanlog = incper.meanlog,
incper.sdlog = incper.sdlog,
serint.scale = serint.scale,
serint.shape = serint.shape,
P.a = P.a,
xp = xp,
RSorP = RSorP))
})
# a reactive version of df.beta.s
getDFBetaS <- reactive({
incper.meanlog <- getIncperMeanlog()
incper.sdlog <- getIncperSdlog()
serint.scale <- getSerIntScale()
serint.shape <- getSerIntShape()
P.a <- getP.a()
xp <- getXp()
RSorP <- getRSorP()
data.frame(tau = tau.test,
label = "symptomatic",
beta = vapply(tau.test,
model.gen.beta.sym.tot,
numeric(1),
incper.meanlog = incper.meanlog,
incper.sdlog = incper.sdlog,
serint.scale = serint.scale,
serint.shape = serint.shape,
P.a = P.a,
xp = xp,
RSorP = RSorP))
})
# a reactive version of df.beta.a
getDFBetaA <- reactive({
xp <- getXp()
xa <- getXa()
P.a <- getP.a()
RSorP <- getRSorP()
serint.scale <- getSerIntScale()
serint.shape <- getSerIntShape()
incper.meanlog <- getIncperMeanlog()
incper.sdlog <- getIncperSdlog()
data.frame(tau = tau.test,
label = "asymptomatic",
beta = vapply(tau.test, function(tau) {
RSorP * P.a * xa * model.gen.beta.s.div.by.RSorP(tau = tau,
incper.meanlog = incper.meanlog,
incper.sdlog = incper.sdlog,
serint.scale = serint.scale,
serint.shape = serint.shape,
P.a = P.a, xp = xp)} , numeric(1)))
})
# a reactive version of df.beta.e
getDFBetaE <- reactive({
model.gen.beta.env.div.by.E.RSorP <- getModelGenBetaEnvDivByERSorP()
xp <- getXp()
P.a <- getP.a()
RSorP <- getRSorP()
serint.scale <- getSerIntScale()
serint.shape <- getSerIntShape()
incper.meanlog <- getIncperMeanlog()
incper.sdlog <- getIncperSdlog()
env.decay.rate <- getEnvDecay()
env.constant.duration <- getEnvConst()
env.infectiousness.type <- getEnvInfType()
env.scale.constant <- getEnvScaleConst()
df.beta.e <- data.frame(tau = tau.test,
label = "environmental",
beta = vapply(tau.test,
model.gen.beta.env.div.by.E.RSorP,
numeric(1),
serint.scale = serint.scale,
serint.shape = serint.shape,
incper.meanlog = incper.meanlog,
incper.sdlog = incper.sdlog,
P.a = P.a,
xp = xp,
env.decay.rate = env.decay.rate,
env.constant.duration = env.constant.duration,
env.infectiousness.type = env.infectiousness.type)
)
df.beta.e$beta <- df.beta.e$beta * env.scale.constant * RSorP
df.beta.e
})
# collect each df.beta into a data frame, for plotting
getDF <- reactive({
validate(
need(try(df.beta.p <- getDFBetaP()), "Parameters are too extreme for the integral to converge, please adjust one or more of the sliders")
)
validate(
need(try(df.beta.s <- getDFBetaS()), "Parameters are too extreme for the integral to converge, please adjust one or more of the sliders")
)
validate(
need(try(df.beta.e <- getDFBetaE()), "Parameters are too extreme for the integral to converge, please adjust one or more of the sliders")
)
validate(
need(try(df.beta.a <- getDFBetaA()), "Parameters are too extreme for the integral to converge, please adjust one or more of the sliders")
)
rbind(df.beta.p, df.beta.s, df.beta.e, df.beta.a) # if you change the order of these you also need to change the order of the legend labels!
})
################
# For plotting
################
# get ymax for plotting
getYmax <- reactive({
df.plot <- getDF()
df.plot.wide <- spread(df.plot, label, beta)
df.plot.wide$max <- df.plot.wide$`pre-symptomatic` + df.plot.wide$symptomatic +
df.plot.wide$environmental + df.plot.wide$asymptomatic
ymax <- max(df.plot.wide$max * 1.05)
})
# an example of how to set the colours; this can be a user input if desired
getCols <- reactive({
brewer.pal(8, "Paired")[c(1,3,5,7,2)] # palette which is colour-blind friendly but doesn't risk implying any pairing. Four colours for main plot, final colour for mini plots
})
# get the main plot using the latest data frame
getPlot <- reactive({
df.plot <- getDF()
ymax <- getYmax()
cols <- getCols()
RS <- getRs()
RP <- getRp()
RE <- getRe()
RA <- getRa()
R0 <- getR0()
mainPlot <- ggplot(df.plot, aes(x=tau, y=beta)) + #, color=label)) +
theme_bw(base_size = 18) +
geom_area(aes(fill=label)) +
labs(x = expression(paste("Days, ",tau)),
y = expression(paste("New infections per day, ",beta, "(", tau,")")),
fill = bquote(paste('R'['0'] * ' = ' * .(format(round(R0, 1), nsmall = 1)) * ":" ))
) +
coord_cartesian(xlim = c(0, max(tau.test)), ylim = c(0, ymax), expand = F) +
scale_fill_manual(values = cols,
labels = c(
bquote(paste('R'['p'] * ' = ' * .(format(round(RP, 1), nsmall = 1)) * " from pre-symptomatic")),
bquote(paste('R'['s'] * ' = ' * .(format(round(RS, 1), nsmall = 1)) * " from symptomatic")),
bquote(paste('R'['e'] * ' = ' * .(format(round(RE, 1), nsmall = 1)) * " from environmental")),
bquote(paste('R'['a'] * ' = ' * .(format(round(RA, 1), nsmall = 1)) * " from asymptomatic"))))
#scale_x_continuous(minor_breaks = minor_breaks, breaks = breaks)
grid.arrange(mainPlot, ncol=1)
})
# create four mini plots, decomposing the main plot into each category
getDecompositionPlot <- reactive({
validate(
need(try(df.beta.p <- getDFBetaP()), "Parameters are too extreme for the integral to converge, please adjust one or more of the sliders")
)
validate(
need(try(df.beta.s <- getDFBetaS()), "Parameters are too extreme for the integral to converge, please adjust one or more of the sliders")
)
validate(
need(try(df.beta.e <- getDFBetaE()), "Parameters are too extreme for the integral to converge, please adjust one or more of the sliders")
)
validate(
need(try(df.beta.a <- getDFBetaA()), "Parameters are too extreme for the integral to converge, please adjust one or more of the sliders")
)
ymax <- getYmax()
cols <- getCols()
RS <- getRs()
RP <- getRp()
RE <- getRe()
RA <- getRa()
R0 <- getR0()
p <- ggplot(df.beta.p, aes(x=tau, y=beta)) +
theme_bw(base_size = 18) +
theme(legend.position="none") +
geom_area(aes(fill=label)) +
labs(x = expression(paste("Days, ",tau)),
y = expression(paste("Contribution to ", beta, "(", tau,")"))
) +
coord_cartesian(xlim = c(0, max(tau.test)), ylim = c(0, ymax), expand = F) +
scale_fill_manual(values = cols[[1]])
s <- ggplot(df.beta.s, aes(x=tau, y=beta)) +
theme_bw(base_size = 18) +
theme(legend.position="none") +
geom_area(aes(fill=label)) +
labs(x = expression(paste("Days, ",tau)),
y = ""
) +
coord_cartesian(xlim = c(0, max(tau.test)), ylim = c(0, ymax), expand = F) +
scale_fill_manual(values = cols[[2]])
e <- ggplot(df.beta.e, aes(x=tau, y=beta)) +
theme_bw(base_size = 18) +
theme(legend.position="none") +
geom_area(aes(fill=label)) +
labs(x = expression(paste("Days, ",tau)),
y = ""
) +
coord_cartesian(xlim = c(0, max(tau.test)), ylim = c(0, ymax), expand = F) +
scale_fill_manual(values = cols[[3]])
a <- ggplot(df.beta.a, aes(x=tau, y=beta)) +
theme_bw(base_size = 18) +
theme(legend.position="none") +
geom_area(aes(fill=label)) +
labs(x = expression(paste("Days, ",tau)),
y = ""
) +
coord_cartesian(xlim = c(0, max(tau.test)), ylim = c(0, ymax), expand = F) +
scale_fill_manual(values = cols[[4]])
grid.arrange(p,s,e,a, ncol=4)
})
# get the parameter summary to be written below the plot
getParameterSummary <- reactive({
validate(
need(try(theta <- getTheta()), "")
)
HTML(paste0("<h4>
<br/>
<br/> Proportion of infections which are not from direct contact with symptomatic individuals Θ = ", round(theta, 2),
"</h4>")) # using html "θ" to get symbol theta
})
### Make mini plot to demonstrate the user-defined incubation period distribution with
# the mean and sd they have specified.
getIncperDistributionPlot <- reactive({
req(getIncperMeanlog()) # can't plot it until these values have been calculated
req(getIncperSdlog())
m <- getIncperMeanlog()
s <- getIncperSdlog()
cols <- getCols()
# Make sure we capture at least 99% of the distribution in the plot
xmax <- 20 #max(qlnorm(0.99, meanlog = m, sdlog = s), 20)
x <- seq(0,xmax,by=0.01)
y <- sapply(x, function(x) plnorm(x,m,s))
ggplot(cbind.data.frame(x,y), aes(x=x, y=y)) +
geom_line(col=cols[[5]]) +
theme_classic() +
coord_cartesian(expand = F, xlim = c(0, xmax), ylim = c(0, 1.05 * max(y))) +
labs(x = "Incubation period (days)", y = "Probability density")
})
# mini plot for generation time distribution
getSerintDistributionPlot <- reactive({
req(getSerIntShape()) # can't plot it until these values have been calculated
req(getSerIntScale())
m <- getSerIntShape()
s <- getSerIntScale()
cols <- getCols()
xmax <- 20
x <- seq(0,xmax,by=0.01)
y <- sapply(x, function(x) dweibull(x,m,s))
ggplot(cbind.data.frame(x,y), aes(x=x, y=y)) +
geom_line(col=cols[[5]]) +
theme_classic() +
coord_cartesian(expand = F, xlim = c(0, xmax), ylim = c(0, 1.05 * max(y))) +
labs(x = "Generation time (days)", y = "Probability density")
})
###############################################
# Reactive sidebar input / outputs
###############################################
getEnvSliders <- reactive({
type <- input$env.type
if (type=="exp.decay") {
sliderInput("envDecayRate",
h6("Environmental infectiousness exponential decay rate (per day):"),
min = 0.05,
max = 5,
step = 0.01,
value = 2.3)
} else {
sliderInput("envConstant",
h6("Environmental infectiousness duration (days):"),
min = 1,
max = 10,
step = 0.01,
value = 3)
}
})
###########
# OUTPUTS
##########
# main plot
output$mainPlot <- renderPlot({
validate(need(getPlot(), ""))
})
# decomposed version of plot
output$decompositionPlot <- renderPlot({
validate(need(getDecompositionPlot(), ""))
})
# parameter summary below plot
output$parameterSummary <- renderUI({
validate(need(getParameterSummary(), ""))
getParameterSummary()
})
# SIDEBAR:
# mini plot to demonstrate incper distribution as chosen by user
output$IncperDistribPlot <- renderPlot({
getIncperDistributionPlot()
})
# mini plot to demonstrate serint distribution as chosen by user
output$SerintDistribPlot <- renderPlot({
getSerintDistributionPlot()
})
# environment sliders, depending on type
output$environment_sliders <- renderUI({
getEnvSliders()
})
#####################################################################################################
#####################
# Interventions tab
#####################
# define the reset button
observeEvent(input$reset_inputContour, {
shinyjs::reset("sidePanel2")
shinyjs::reset("mainPanel2")
})
# Largely a repeat of everything above, with "Contour" appended! Plus delay slider:
getDelay <- reactive({
input$delay
})
getIncperMedianlogContour <- reactive({
log(input$incperMedianContour)
})
getIncperSdlogContour <- reactive({
input$incperSdlogContour
})
getSerIntShapeContour <- reactive({
input$serIntShapeContour
})
getSerIntScaleContour <- reactive({
input$serIntScaleContour
})
getDoublingTimeContour <- reactive({
input$doublingTimeContour
})
getXpContour <- reactive({
1
})
getXaContour <- reactive({
input$xaContour # the relative infectiousness of asymptomatic c.f. symptomatic individuals
})
getP.aContour <- reactive({
input$P.aContour # the fraction of all infections that are asymptomatic
})
getFrac.ReContour <- reactive({
input$frac.ReContour # the fraction of all transmissions that are environmentally mediated
})
### Make mini plot to demonstrate the user-defined incubation period distribution with
# the mean and sd they have specified.
getIncperDistributionPlotContour <- reactive({
req(getIncperMedianlogContour()) # can't plot it until these values have been calculated
req(getIncperSdlogContour())
m <- getIncperMedianlogContour()
s <- getIncperSdlogContour()
cols <- getCols()
xmax <- 20
x <- seq(0,xmax,by=0.01)
y <- sapply(x, function(x) plnorm(x,m,s))
ggplot(cbind.data.frame(x,y), aes(x=x, y=y)) +
geom_line(col=cols[[5]]) +
theme_classic() +
coord_cartesian(expand = F, xlim = c(0, xmax), ylim = c(0, 1.05 * max(y))) +
labs(x = "Incubation period (days)", y = "Probability density")
})
# mini plot for generation time distribution
getSerintDistributionPlotContour <- reactive({
req(getSerIntShapeContour()) # can't plot it until these values have been calculated
req(getSerIntScaleContour())
m <- getSerIntShapeContour()
s <- getSerIntScaleContour()
cols <- getCols()
xmax <- 20
x <- seq(0,xmax,by=0.01)
y <- sapply(x, function(x) dweibull(x,m,s))
ggplot(cbind.data.frame(x,y), aes(x=x, y=y)) +
geom_line(col=cols[[5]]) +
theme_classic() +
coord_cartesian(expand = F, xlim = c(0, xmax), ylim = c(0, 1.05 * max(y))) +
labs(x = "Generation time (days)", y = "Probability density")
})
getFa <- reactive({
P.a <- getP.aContour()
x.a <- getXaContour()
fa = P.a*x.a / (P.a*x.a + (1 - P.a) )
})
# make interactive sliders for environmental decay type
getEnvSlidersContour <- reactive({
type <- input$env.typeContour
if (type=="exp.decay") {
sliderInput("envDecayRateContour",
h6("Environmental infectiousness exponential decay rate (per day):"),
min = 0.05,
max = 5,
step = 0.01,
value = 2.3)
} else {
sliderInput("envConstantContour",
h6("Environmental infectiousness duration (days):"),
min = 1,
max = 10,
step = 0.01,
value = 3)
}
})
getEnvInfTypeContour <- reactive({
input$env.typeContour
})
getEnvDecayContour <- reactive({
input$envDecayRateContour
})
getEnvConstContour <- reactive({
input$envConstantContour
})
# following "model.gen.beta.env.div.by.E.RSorP"
getModelGenBetaEnvDivByERSorPContour <- reactive({
incper.meanlog <- getIncperMedianlogContour()
incper.sdlog <- getIncperSdlogContour()
serint.scale <- getSerIntScaleContour()
serint.shape <- getSerIntShapeContour()
P.a <- getP.aContour()
xp <- getXpContour()
env.decay.rate <- getEnvDecayContour()
env.constant.duration <- getEnvConstContour()
env.infectiousness.type <- getEnvInfTypeContour()
# because of the way that "env.decay.rate" and "env.constant.duration" depend on
# "env.infectiousness.type", there is a delay when shiny is launched, when these are undefined
# so we use "req" to wait until they are defined before continuing the calculation
if (env.infectiousness.type == "constant") {
req(env.constant.duration)
} else {
req(env.decay.rate)
}
Vectorize(function(tau,
incper.meanlog,
incper.sdlog,
serint.scale,
serint.shape,
P.a,
xp,
env.decay.rate,
env.constant.duration,
env.infectiousness.type) {
integrate(function(l) {
model.gen.beta.s.div.by.RSorP(tau = tau - l,
incper.meanlog = incper.meanlog,
incper.sdlog = incper.sdlog,
serint.scale = serint.scale,
serint.shape = serint.shape,
P.a = P.a,
xp = xp) *
p.el(l = l,
env.decay.rate = env.decay.rate,
env.constant.duration = env.constant.duration,
env.infectiousness.type = env.infectiousness.type)
},
lower = 0, upper = tau)$value
}, vectorize.args = "tau")
})
# following model.gen.full.beta.div.by.RSorP
getModelGenFullBetaDivByRSorPContour <- reactive({
serint.scale <- getSerIntScaleContour()
serint.shape <- getSerIntShapeContour()
P.a <- getP.aContour()
xa <- getXaContour()
incper.meanlog <- getIncperMedianlogContour()
incper.sdlog <- getIncperSdlogContour()
xp <- getXpContour()
env.decay.rate <- getEnvDecayContour()
env.constant.duration <- getEnvConstContour()
env.infectiousness.type <- getEnvInfTypeContour()
model.gen.beta.env.div.by.E.RSorP <- getModelGenBetaEnvDivByERSorPContour()
function(tau, serint.scale, serint.shape, P.a, xa, incper.meanlog,
incper.sdlog, xp, env.scale.constant, env.decay.rate,
env.constant.duration, env.infectiousness.type) {
serint(x = tau,
serint.scale = serint.scale,
serint.shape = serint.shape) *
(1 + P.a * xa / model.gen.f(tau = tau,
incper.meanlog = incper.meanlog,
incper.sdlog = incper.sdlog,
P.a = P.a,
xp = xp)) +
env.scale.constant * # this is a free variable of this function
model.gen.beta.env.div.by.E.RSorP(tau = tau,
serint.scale = serint.scale,
serint.shape = serint.shape,
incper.meanlog = incper.meanlog,
incper.sdlog = incper.sdlog,
P.a = P.a,
xp = xp,
env.decay.rate = env.decay.rate,
env.constant.duration = env.constant.duration,
env.infectiousness.type = env.infectiousness.type)
}
})
# following "getDummy()" from the infectiousness tab, but slimmed down because we only need to get R0 out
getR0Contour <- reactive({
# get parameters
frac.Re <- getFrac.ReContour()
P.a <- getP.aContour()
doubling.time <- getDoublingTimeContour()
xp <- getXpContour()
xa <- getXaContour()
incper.meanlog <- getIncperMedianlogContour()
incper.sdlog <- getIncperSdlogContour()
serint.shape <- getSerIntShapeContour()
serint.scale <- getSerIntScaleContour()
theta.obs <- 0.83
env.decay.rate <- getEnvDecayContour()
env.constant.duration <- getEnvConstContour()
env.infectiousness.type <- getEnvInfTypeContour()
# get reactive functions
model.gen.beta.env.div.by.E.RSorP <- getModelGenBetaEnvDivByERSorPContour()
model.gen.full.beta.div.by.RSorP <- getModelGenFullBetaDivByRSorPContour()
# use "model.gen.solve" in a reactive way:
r <- log(2) / doubling.time # units of per day. This is r before interventions
integral.of.model.gen.beta.s.div.by.RSorP <-
integrate(model.gen.beta.s.div.by.RSorP,
lower = 0,
upper = Inf,
incper.meanlog = incper.meanlog,
incper.sdlog = incper.sdlog,
serint.scale = serint.scale,
serint.shape = serint.shape,
P.a = P.a,
xp = xp)$value
env.scale.constant <- (1 + P.a * xa * integral.of.model.gen.beta.s.div.by.RSorP) /
(((1 / frac.Re) - 1) * integrate(model.gen.beta.env.div.by.E.RSorP,
lower = 0,
upper = Inf,
serint.scale = serint.scale,
serint.shape = serint.shape,
incper.meanlog = incper.meanlog,
incper.sdlog = incper.sdlog,
P.a = P.a,
xp = xp,
env.decay.rate = env.decay.rate,
env.constant.duration = env.constant.duration,
env.infectiousness.type = env.infectiousness.type)$value)
RSorP <- 1 / integrate(function(tau) {
model.gen.full.beta.div.by.RSorP(tau = tau,
serint.scale = serint.scale,
serint.shape = serint.shape,
P.a = P.a,
xa = xa,
incper.meanlog = incper.meanlog,
incper.sdlog = incper.sdlog,
xp = xp,
env.scale.constant = env.scale.constant,
env.decay.rate = env.decay.rate,
env.constant.duration = env.constant.duration,
env.infectiousness.type = env.infectiousness.type) *
exp(-r * tau)}, lower = 0, upper = Inf)$value
R0 <- RSorP * integrate(model.gen.full.beta.div.by.RSorP,
lower = 0,
upper = Inf,
serint.scale = serint.scale,
serint.shape = serint.shape,
P.a = P.a,
xa = xa,
incper.meanlog = incper.meanlog,
incper.sdlog = incper.sdlog,
xp = xp,
env.scale.constant = env.scale.constant,
env.decay.rate = env.decay.rate,
env.constant.duration = env.constant.duration,
env.infectiousness.type = env.infectiousness.type)$value
R0
})
getM <- reactive({
fa <- getFa()
fe <- getFrac.ReContour()
validate(need(try(R0 <- getR0Contour()), "")) # this is not immediately available because of the reactive "environment" sliders
delay <- getDelay()
# incubation from Lauer et al
log_incubation_sd <- getIncperSdlogContour()
log_incubation_median <- getIncperMedianlogContour()
S<-function(x){1-(1-fa)*plnorm(x-delay/24,meanlog = log_incubation_median, sdlog = log_incubation_sd)}
# Weibull generation time
beta<-function(x){
dweibull(x, shape = getSerIntShapeContour(), scale = getSerIntScaleContour())
}
# auxiliary functions
M1<-function(x){R0*beta(x)*(1-ei+ei*S(x))}
M2<-function(x,y){(1-et+et*S(x+y)/S(y))}
# nmax, n, ndiscr are defined in global.R
# initialization
Y<-rep(1,nmax)/nmax
r<-0
m<-matrix(0,nrow=n-1,ncol=n-1)
for(ci in 1:(n-1)){
for(cj in 1:(n-1)){
ei<-ci/n ; et<-cj/n*(1-fe)
eigen<-function(my_r){
r<-my_r
Y<-rep(1,nmax)/nmax;
Yold<-Y
# for(i in 1:maxiter){ # removed in favour of the three lines below, suggested by Chris for speed-up
# Y<-M1(v)*exp(-v*r)*sapply(v,function(z){sum(M2(z,v)*Y)/ndiscr})
temp.vec <- M1(v)*exp(-v*r)
for(i in 1:maxiter){
Y<-temp.vec * sapply(v,function(z){sum(M2(z,v)*Y)/ndiscr})
Y<-Y/sum(Y)
if(sum(abs(Y-Yold))<Yerror & i>=miniter){break}
Yold<-Y
}
return(lm(I(M1(v)*exp(-v*r)*sapply(v,function(z){sum(M2(z,v)*Y)/ndiscr})) ~ Y)$coeff[2]-1)
}
m[ci,cj]<-tryCatch(uniroot(eigen,interval = c(-2,2))$root, error = function(e) {return(NA)} ) }
}
#colnames(m)<-c(1:(n-1))/n
#rownames(m)<-c(1:(n-1))/n
df <- tibble(x=numeric((n-1)^2), y=numeric((n-1)^2), z=numeric((n-1)^2))
count <- 0
for (i in 1:(n-1)) {
for (j in 1:(n-1)) {
count <- count + 1
df[count,] <- list(x = ((1:(n-1))/n)[[i]],
y = ((1:(n-1))/n)[[j]],
z = m[i,j])
}
}
# remove the "X" at the start of the y values, and convert to percentages
df$y <- vapply(df$y, function(value) substring(value, 2), character(1))
df$x <- 100 * as.numeric(df$x)
df$y <- 100 * as.numeric(df$y)
df
})
getContourPlot <- reactive({
validate(need(try(df <- getM()), ""))
ggplot(df, aes(x, y, fill = z)) +
theme_bw(base_size = 18) +
coord_cartesian(expand = F) +
geom_tile() +
scale_fill_gradient2(low = "#66C2A5",
mid = "white",
high = "#FC8D62",
midpoint = 0) +
labs(x = "% success in isolating cases",
y = "% success in quarantining contacts",
fill = "growth rate r \nafter interventions") +
stat_contour(aes(z=z), breaks=0, color = "black", size = 1)
})
# describe the delay assumption above the plot
getContourDescribeDelay <- reactive({
validate(need(d <- getDelay(), ""))
if (d==0) { HTML(paste0("<h4>With instant interventions:</h4>")) }
else if (d==1) {HTML(paste0("<h4>With interventions after a delay of ",d," hour:</h4>")) } # ("hour" singular)
else {HTML(paste0("<h4>With interventions after a delay of ",d," hours:</h4>")) }
})
# describe the delay assumption above the plot
getContourDescribeR0 <- reactive({
validate(need(R0 <- getR0Contour(), ""))
r <- log(2) / getDoublingTimeContour()
HTML(paste0("<h5>The current choice of input parameters describes an epidemic
where the reproduction number R<sub>0</sub> = ",round(R0,1)," and the
growth rate r = ",round(r,2)," <b>before</b> interventions are applied.
The colours in the plot indicate what the growth rate would be if interventions were applied
with a range of success rates: red corresponds to a growing epidemic, green to a declining epidemic.
The black line shows the combinations of success rates of interventions which are needed to achieve r = 0, the threshold for epidemic control.
</h5>"))
})
##########
# OUTPUTS
##########
# mini plot to demonstrate incper distribution as chosen by user
output$IncperDistribPlotContour <- renderPlot({
getIncperDistributionPlotContour()
})
# mini plot to demonstrate serint distribution as chosen by user
output$SerintDistribPlotContour <- renderPlot({
getSerintDistributionPlotContour()
})
# environment sliders, depending on type
output$environment_slidersContour <- renderUI({
getEnvSlidersContour()
})
output$contourPlot <- renderPlot({
getContourPlot()
})
output$contourDescribeDelay <- renderUI({
getContourDescribeDelay()
})
output$contourDescribeR0 <- renderUI({
getContourDescribeR0()
})
}
| /server.R | no_license | DrRoad/covid-19_shiny | R | false | false | 43,660 | r | # libraries, global functions and global values are handled in "global.R"
# For the original (non-shiny) code see https://doi.org/10.5281/zenodo.3727255
# where it may be easier to follow without the reactive wrappers
# Define server logic
server <- function(input, output, session) {
# define the reset button
observeEvent(input$reset_input, {
shinyjs::reset("sidePanel")
})
#####################
# Infectiousness tab
#####################
### Get parameters
# Parameters start using our published values and then can be altered by the user
getDoublingTime <- reactive({
input$doublingTime
})
getEnvInfType <- reactive({
input$env.type
})
getEnvDecay <- reactive({
input$envDecayRate
})
getEnvConst <- reactive({
input$envConstant
})
getIncperMeanlog <- reactive({
log(input$incperMedian)
})
getIncperSdlog <- reactive({
input$incperSdlog
})
getSerIntShape <- reactive({
input$serIntShape
})
getSerIntScale <- reactive({
input$serIntScale
})
getXp <- reactive({
1 # relative infectiousness of presymptomatic to symptomatic individuals (fixed at 1)
})
getXa <- reactive({
input$xa # the relative infectiousness of asymptomatic c.f. symptomatic individuals
})
getP.a <- reactive({
input$P.a # the fraction of all infections that are asymptomatic
})
getFrac.Re <- reactive({
input$frac.Re # the fraction of all transmissions that are environmentally mediated
})
################
# Calculations
###############
# following Chris's method, create "dummy" which uses model.gen.solve and other functions to create a list of the values for
# env.scale.constant
# R0
# RSorP
# RA
# RS
# RP
# RE
# theta.obs.predicted
getDummy <- reactive({
# get parameters
frac.Re <- getFrac.Re()
P.a <- getP.a()
doubling.time <- getDoublingTime()
xp <- getXp()
xa <- getXa()
incper.meanlog <- getIncperMeanlog()
incper.sdlog <- getIncperSdlog()
serint.shape <- getSerIntShape()
serint.scale <- getSerIntScale()
theta.obs <- 0.83
env.decay.rate <- getEnvDecay()
env.constant.duration <- getEnvConst()
env.infectiousness.type <- getEnvInfType()
# get reactive functions
model.gen.beta.env.div.by.E.RSorP <- getModelGenBetaEnvDivByERSorP()
model.gen.full.beta.div.by.RSorP <- getModelGenFullBetaDivByRSorP()
# use "model.gen.solve" in a reactive way:
r <- log(2) / doubling.time # units of per day
integral.of.model.gen.beta.s.div.by.RSorP <-
integrate(model.gen.beta.s.div.by.RSorP,
lower = 0,
upper = Inf,
incper.meanlog = incper.meanlog,
incper.sdlog = incper.sdlog,
serint.scale = serint.scale,
serint.shape = serint.shape,
P.a = P.a,
xp = xp)$value
env.scale.constant <- (1 + P.a * xa * integral.of.model.gen.beta.s.div.by.RSorP) /
(((1 / frac.Re) - 1) * integrate(model.gen.beta.env.div.by.E.RSorP,
lower = 0,
upper = Inf,
serint.scale = serint.scale,
serint.shape = serint.shape,
incper.meanlog = incper.meanlog,
incper.sdlog = incper.sdlog,
P.a = P.a,
xp = xp,
env.decay.rate = env.decay.rate,
env.constant.duration = env.constant.duration,
env.infectiousness.type = env.infectiousness.type)$value)
RSorP <- 1 / integrate(function(tau) {
model.gen.full.beta.div.by.RSorP(tau = tau,
serint.scale = serint.scale,
serint.shape = serint.shape,
P.a = P.a,
xa = xa,
incper.meanlog = incper.meanlog,
incper.sdlog = incper.sdlog,
xp = xp,
env.scale.constant = env.scale.constant,
env.decay.rate = env.decay.rate,
env.constant.duration = env.constant.duration,
env.infectiousness.type = env.infectiousness.type) *
exp(-r * tau)}, lower = 0, upper = Inf)$value
R0 <- RSorP * integrate(model.gen.full.beta.div.by.RSorP,
lower = 0,
upper = Inf,
serint.scale = serint.scale,
serint.shape = serint.shape,
P.a = P.a,
xa = xa,
incper.meanlog = incper.meanlog,
incper.sdlog = incper.sdlog,
xp = xp,
env.scale.constant = env.scale.constant,
env.decay.rate = env.decay.rate,
env.constant.duration = env.constant.duration,
env.infectiousness.type = env.infectiousness.type)$value
should.equal.one <- integrate(function(tau) {exp(-r * tau) * RSorP *
model.gen.full.beta.div.by.RSorP(tau = tau,
serint.scale = serint.scale,
serint.shape = serint.shape,
P.a = P.a,
xa = xa,
incper.meanlog = incper.meanlog,
incper.sdlog = incper.sdlog,
xp = xp,
env.scale.constant = env.scale.constant,
env.decay.rate = env.decay.rate,
env.constant.duration = env.constant.duration,
env.infectiousness.type = env.infectiousness.type)},
lower = 0, upper = Inf)$value
RS <- integrate(model.gen.beta.sym.tot,
lower = 0,
upper = Inf,
incper.meanlog = incper.meanlog,
incper.sdlog = incper.sdlog,
serint.scale = serint.scale,
serint.shape = serint.shape,
P.a = P.a,
xp = xp,
RSorP = RSorP)$value
RP <- integrate(model.gen.beta.presym.tot,
lower = 0,
upper = Inf,
incper.meanlog = incper.meanlog,
incper.sdlog = incper.sdlog,
serint.scale = serint.scale,
serint.shape = serint.shape,
P.a = P.a,
xp = xp,
RSorP = RSorP)$value
RA <- RSorP * P.a * xa * integral.of.model.gen.beta.s.div.by.RSorP
RE <- env.scale.constant *
RSorP *
integrate(function(tau) {
model.gen.beta.env.div.by.E.RSorP(tau = tau,
serint.scale = serint.scale,
serint.shape = serint.shape,
incper.meanlog = incper.meanlog,
incper.sdlog = incper.sdlog,
P.a = P.a,
xp = xp,
env.decay.rate = env.decay.rate,
env.constant.duration = env.constant.duration,
env.infectiousness.type = env.infectiousness.type)},
lower = 0, upper = Inf)$value
theta.obs.predicted <- 1 - integrate(function(tau) {
model.gen.beta.sym.tot(tau = tau,
incper.meanlog = incper.meanlog,
incper.sdlog = incper.sdlog,
serint.scale = serint.scale,
serint.shape = serint.shape,
P.a = P.a,
xp = xp,
RSorP = RSorP) *
exp(-r * tau)},
lower = 0, upper = Inf)$value
# return
list(env.scale.constant = env.scale.constant,
R0 = R0,
RSorP = RSorP,
RA = RA,
RS = RS,
RP = RP,
RE = RE,
theta.obs.predicted = theta.obs.predicted)
})
# extract each component from the list "dummy"
getR0 <- reactive({
dummy <- getDummy()
dummy$R0
})
getRSorP <- reactive({
dummy <- getDummy()
dummy$RSorP
})
getRe <- reactive({
dummy <- getDummy()
dummy$RE
})
getRa <- reactive({
dummy <- getDummy()
dummy$RA
})
getRs <- reactive({
dummy <- getDummy()
dummy$RS
})
getRp <- reactive({
dummy <- getDummy()
dummy$RP
})
getTheta <- reactive({
rp <- getRp()
R0 <- getR0()
1 - rp / R0
})
getEnvScaleConst <- reactive({
dummy <- getDummy()
dummy$env.scale.constant
})
# following "model.gen.beta.env.div.by.E.RSorP"
getModelGenBetaEnvDivByERSorP <- reactive({
incper.meanlog <- getIncperMeanlog()
incper.sdlog <- getIncperSdlog()
serint.scale <- getSerIntScale()
serint.shape <- getSerIntShape()
P.a <- getP.a()
xp <- getXp()
env.decay.rate <- getEnvDecay()
env.constant.duration <- getEnvConst()
env.infectiousness.type <- getEnvInfType()
# because of the way that "env.decay.rate" and "env.constant.duration" depend on
# "env.infectiousness.type", there is a delay when shiny is launched, when these are undefined
# so we use "req" to wait until they are defined before continuing the calculation
if (env.infectiousness.type == "constant") {
req(env.constant.duration)
} else {
req(env.decay.rate)
}
Vectorize(function(tau,
incper.meanlog,
incper.sdlog,
serint.scale,
serint.shape,
P.a,
xp,
env.decay.rate,
env.constant.duration,
env.infectiousness.type) {
integrate(function(l) {
model.gen.beta.s.div.by.RSorP(tau = tau - l,
incper.meanlog = incper.meanlog,
incper.sdlog = incper.sdlog,
serint.scale = serint.scale,
serint.shape = serint.shape,
P.a = P.a,
xp = xp) *
p.el(l = l,
env.decay.rate = env.decay.rate,
env.constant.duration = env.constant.duration,
env.infectiousness.type = env.infectiousness.type)
},
lower = 0, upper = tau)$value
}, vectorize.args = "tau")
})
# following model.gen.full.beta.div.by.RSorP
getModelGenFullBetaDivByRSorP <- reactive({
serint.scale <- getSerIntScale()
serint.shape <- getSerIntShape()
P.a <- getP.a()
xa <- getXa()
incper.meanlog <- getIncperMeanlog()
incper.sdlog <- getIncperSdlog()
xp <- getXp()
env.decay.rate <- getEnvDecay()
env.constant.duration <- getEnvConst()
env.infectiousness.type <- getEnvInfType()
model.gen.beta.env.div.by.E.RSorP <- getModelGenBetaEnvDivByERSorP()
function(tau, serint.scale, serint.shape, P.a, xa, incper.meanlog,
incper.sdlog, xp, env.scale.constant, env.decay.rate,
env.constant.duration, env.infectiousness.type) {
serint(x = tau,
serint.scale = serint.scale,
serint.shape = serint.shape) *
(1 + P.a * xa / model.gen.f(tau = tau,
incper.meanlog = incper.meanlog,
incper.sdlog = incper.sdlog,
P.a = P.a,
xp = xp)) +
env.scale.constant * # this is a free variable of this function
model.gen.beta.env.div.by.E.RSorP(tau = tau,
serint.scale = serint.scale,
serint.shape = serint.shape,
incper.meanlog = incper.meanlog,
incper.sdlog = incper.sdlog,
P.a = P.a,
xp = xp,
env.decay.rate = env.decay.rate,
env.constant.duration = env.constant.duration,
env.infectiousness.type = env.infectiousness.type)
}
})
################################
# Build data frames of results
################################
# a reactive version of df.beta.p
getDFBetaP <- reactive({
# get parameters
incper.meanlog <- getIncperMeanlog()
incper.sdlog <- getIncperSdlog()
serint.scale <- getSerIntScale()
serint.shape <- getSerIntShape()
P.a <- getP.a()
xp <- getXp()
RSorP <- getRSorP()
data.frame(tau = tau.test,
label = "pre-symptomatic",
beta = vapply(tau.test,
model.gen.beta.presym.tot,
numeric(1),
incper.meanlog = incper.meanlog,
incper.sdlog = incper.sdlog,
serint.scale = serint.scale,
serint.shape = serint.shape,
P.a = P.a,
xp = xp,
RSorP = RSorP))
})
# a reactive version of df.beta.s
getDFBetaS <- reactive({
incper.meanlog <- getIncperMeanlog()
incper.sdlog <- getIncperSdlog()
serint.scale <- getSerIntScale()
serint.shape <- getSerIntShape()
P.a <- getP.a()
xp <- getXp()
RSorP <- getRSorP()
data.frame(tau = tau.test,
label = "symptomatic",
beta = vapply(tau.test,
model.gen.beta.sym.tot,
numeric(1),
incper.meanlog = incper.meanlog,
incper.sdlog = incper.sdlog,
serint.scale = serint.scale,
serint.shape = serint.shape,
P.a = P.a,
xp = xp,
RSorP = RSorP))
})
# a reactive version of df.beta.a
getDFBetaA <- reactive({
xp <- getXp()
xa <- getXa()
P.a <- getP.a()
RSorP <- getRSorP()
serint.scale <- getSerIntScale()
serint.shape <- getSerIntShape()
incper.meanlog <- getIncperMeanlog()
incper.sdlog <- getIncperSdlog()
data.frame(tau = tau.test,
label = "asymptomatic",
beta = vapply(tau.test, function(tau) {
RSorP * P.a * xa * model.gen.beta.s.div.by.RSorP(tau = tau,
incper.meanlog = incper.meanlog,
incper.sdlog = incper.sdlog,
serint.scale = serint.scale,
serint.shape = serint.shape,
P.a = P.a, xp = xp)} , numeric(1)))
})
# a reactive version of df.beta.e
getDFBetaE <- reactive({
model.gen.beta.env.div.by.E.RSorP <- getModelGenBetaEnvDivByERSorP()
xp <- getXp()
P.a <- getP.a()
RSorP <- getRSorP()
serint.scale <- getSerIntScale()
serint.shape <- getSerIntShape()
incper.meanlog <- getIncperMeanlog()
incper.sdlog <- getIncperSdlog()
env.decay.rate <- getEnvDecay()
env.constant.duration <- getEnvConst()
env.infectiousness.type <- getEnvInfType()
env.scale.constant <- getEnvScaleConst()
df.beta.e <- data.frame(tau = tau.test,
label = "environmental",
beta = vapply(tau.test,
model.gen.beta.env.div.by.E.RSorP,
numeric(1),
serint.scale = serint.scale,
serint.shape = serint.shape,
incper.meanlog = incper.meanlog,
incper.sdlog = incper.sdlog,
P.a = P.a,
xp = xp,
env.decay.rate = env.decay.rate,
env.constant.duration = env.constant.duration,
env.infectiousness.type = env.infectiousness.type)
)
df.beta.e$beta <- df.beta.e$beta * env.scale.constant * RSorP
df.beta.e
})
# collect each df.beta into a data frame, for plotting
getDF <- reactive({
validate(
need(try(df.beta.p <- getDFBetaP()), "Parameters are too extreme for the integral to converge, please adjust one or more of the sliders")
)
validate(
need(try(df.beta.s <- getDFBetaS()), "Parameters are too extreme for the integral to converge, please adjust one or more of the sliders")
)
validate(
need(try(df.beta.e <- getDFBetaE()), "Parameters are too extreme for the integral to converge, please adjust one or more of the sliders")
)
validate(
need(try(df.beta.a <- getDFBetaA()), "Parameters are too extreme for the integral to converge, please adjust one or more of the sliders")
)
rbind(df.beta.p, df.beta.s, df.beta.e, df.beta.a) # if you change the order of these you also need to change the order of the legend labels!
})
################
# For plotting
################
# get ymax for plotting
getYmax <- reactive({
df.plot <- getDF()
df.plot.wide <- spread(df.plot, label, beta)
df.plot.wide$max <- df.plot.wide$`pre-symptomatic` + df.plot.wide$symptomatic +
df.plot.wide$environmental + df.plot.wide$asymptomatic
ymax <- max(df.plot.wide$max * 1.05)
})
# an example of how to set the colours; this can be a user input if desired
getCols <- reactive({
brewer.pal(8, "Paired")[c(1,3,5,7,2)] # palette which is colour-blind friendly but doesn't risk implying any pairing. Four colours for main plot, final colour for mini plots
})
# get the main plot using the latest data frame
getPlot <- reactive({
df.plot <- getDF()
ymax <- getYmax()
cols <- getCols()
RS <- getRs()
RP <- getRp()
RE <- getRe()
RA <- getRa()
R0 <- getR0()
mainPlot <- ggplot(df.plot, aes(x=tau, y=beta)) + #, color=label)) +
theme_bw(base_size = 18) +
geom_area(aes(fill=label)) +
labs(x = expression(paste("Days, ",tau)),
y = expression(paste("New infections per day, ",beta, "(", tau,")")),
fill = bquote(paste('R'['0'] * ' = ' * .(format(round(R0, 1), nsmall = 1)) * ":" ))
) +
coord_cartesian(xlim = c(0, max(tau.test)), ylim = c(0, ymax), expand = F) +
scale_fill_manual(values = cols,
labels = c(
bquote(paste('R'['p'] * ' = ' * .(format(round(RP, 1), nsmall = 1)) * " from pre-symptomatic")),
bquote(paste('R'['s'] * ' = ' * .(format(round(RS, 1), nsmall = 1)) * " from symptomatic")),
bquote(paste('R'['e'] * ' = ' * .(format(round(RE, 1), nsmall = 1)) * " from environmental")),
bquote(paste('R'['a'] * ' = ' * .(format(round(RA, 1), nsmall = 1)) * " from asymptomatic"))))
#scale_x_continuous(minor_breaks = minor_breaks, breaks = breaks)
grid.arrange(mainPlot, ncol=1)
})
# create four mini plots, decomposing the main plot into each category
getDecompositionPlot <- reactive({
validate(
need(try(df.beta.p <- getDFBetaP()), "Parameters are too extreme for the integral to converge, please adjust one or more of the sliders")
)
validate(
need(try(df.beta.s <- getDFBetaS()), "Parameters are too extreme for the integral to converge, please adjust one or more of the sliders")
)
validate(
need(try(df.beta.e <- getDFBetaE()), "Parameters are too extreme for the integral to converge, please adjust one or more of the sliders")
)
validate(
need(try(df.beta.a <- getDFBetaA()), "Parameters are too extreme for the integral to converge, please adjust one or more of the sliders")
)
ymax <- getYmax()
cols <- getCols()
RS <- getRs()
RP <- getRp()
RE <- getRe()
RA <- getRa()
R0 <- getR0()
p <- ggplot(df.beta.p, aes(x=tau, y=beta)) +
theme_bw(base_size = 18) +
theme(legend.position="none") +
geom_area(aes(fill=label)) +
labs(x = expression(paste("Days, ",tau)),
y = expression(paste("Contribution to ", beta, "(", tau,")"))
) +
coord_cartesian(xlim = c(0, max(tau.test)), ylim = c(0, ymax), expand = F) +
scale_fill_manual(values = cols[[1]])
s <- ggplot(df.beta.s, aes(x=tau, y=beta)) +
theme_bw(base_size = 18) +
theme(legend.position="none") +
geom_area(aes(fill=label)) +
labs(x = expression(paste("Days, ",tau)),
y = ""
) +
coord_cartesian(xlim = c(0, max(tau.test)), ylim = c(0, ymax), expand = F) +
scale_fill_manual(values = cols[[2]])
e <- ggplot(df.beta.e, aes(x=tau, y=beta)) +
theme_bw(base_size = 18) +
theme(legend.position="none") +
geom_area(aes(fill=label)) +
labs(x = expression(paste("Days, ",tau)),
y = ""
) +
coord_cartesian(xlim = c(0, max(tau.test)), ylim = c(0, ymax), expand = F) +
scale_fill_manual(values = cols[[3]])
a <- ggplot(df.beta.a, aes(x=tau, y=beta)) +
theme_bw(base_size = 18) +
theme(legend.position="none") +
geom_area(aes(fill=label)) +
labs(x = expression(paste("Days, ",tau)),
y = ""
) +
coord_cartesian(xlim = c(0, max(tau.test)), ylim = c(0, ymax), expand = F) +
scale_fill_manual(values = cols[[4]])
grid.arrange(p,s,e,a, ncol=4)
})
# get the parameter summary to be written below the plot
getParameterSummary <- reactive({
validate(
need(try(theta <- getTheta()), "")
)
HTML(paste0("<h4>
<br/>
<br/> Proportion of infections which are not from direct contact with symptomatic individuals Θ = ", round(theta, 2),
"</h4>")) # using html "θ" to get symbol theta
})
### Make mini plot to demonstrate the user-defined incubation period distribution with
# the mean and sd they have specified.
getIncperDistributionPlot <- reactive({
req(getIncperMeanlog()) # can't plot it until these values have been calculated
req(getIncperSdlog())
m <- getIncperMeanlog()
s <- getIncperSdlog()
cols <- getCols()
# Make sure we capture at least 99% of the distribution in the plot
xmax <- 20 #max(qlnorm(0.99, meanlog = m, sdlog = s), 20)
x <- seq(0,xmax,by=0.01)
y <- sapply(x, function(x) plnorm(x,m,s))
ggplot(cbind.data.frame(x,y), aes(x=x, y=y)) +
geom_line(col=cols[[5]]) +
theme_classic() +
coord_cartesian(expand = F, xlim = c(0, xmax), ylim = c(0, 1.05 * max(y))) +
labs(x = "Incubation period (days)", y = "Probability density")
})
# mini plot for generation time distribution
getSerintDistributionPlot <- reactive({
req(getSerIntShape()) # can't plot it until these values have been calculated
req(getSerIntScale())
m <- getSerIntShape()
s <- getSerIntScale()
cols <- getCols()
xmax <- 20
x <- seq(0,xmax,by=0.01)
y <- sapply(x, function(x) dweibull(x,m,s))
ggplot(cbind.data.frame(x,y), aes(x=x, y=y)) +
geom_line(col=cols[[5]]) +
theme_classic() +
coord_cartesian(expand = F, xlim = c(0, xmax), ylim = c(0, 1.05 * max(y))) +
labs(x = "Generation time (days)", y = "Probability density")
})
###############################################
# Reactive sidebar input / outputs
###############################################
getEnvSliders <- reactive({
type <- input$env.type
if (type=="exp.decay") {
sliderInput("envDecayRate",
h6("Environmental infectiousness exponential decay rate (per day):"),
min = 0.05,
max = 5,
step = 0.01,
value = 2.3)
} else {
sliderInput("envConstant",
h6("Environmental infectiousness duration (days):"),
min = 1,
max = 10,
step = 0.01,
value = 3)
}
})
###########
# OUTPUTS
##########
# main plot
output$mainPlot <- renderPlot({
validate(need(getPlot(), ""))
})
# decomposed version of plot
output$decompositionPlot <- renderPlot({
validate(need(getDecompositionPlot(), ""))
})
# parameter summary below plot
output$parameterSummary <- renderUI({
validate(need(getParameterSummary(), ""))
getParameterSummary()
})
# SIDEBAR:
# mini plot to demonstrate incper distribution as chosen by user
output$IncperDistribPlot <- renderPlot({
getIncperDistributionPlot()
})
# mini plot to demonstrate serint distribution as chosen by user
output$SerintDistribPlot <- renderPlot({
getSerintDistributionPlot()
})
# environment sliders, depending on type
output$environment_sliders <- renderUI({
getEnvSliders()
})
#####################################################################################################
#####################
# Interventions tab
#####################
# define the reset button
observeEvent(input$reset_inputContour, {
shinyjs::reset("sidePanel2")
shinyjs::reset("mainPanel2")
})
# Largely a repeat of everything above, with "Contour" appended! Plus delay slider:
getDelay <- reactive({
input$delay
})
getIncperMedianlogContour <- reactive({
log(input$incperMedianContour)
})
getIncperSdlogContour <- reactive({
input$incperSdlogContour
})
getSerIntShapeContour <- reactive({
input$serIntShapeContour
})
getSerIntScaleContour <- reactive({
input$serIntScaleContour
})
getDoublingTimeContour <- reactive({
input$doublingTimeContour
})
getXpContour <- reactive({
1
})
getXaContour <- reactive({
input$xaContour # the relative infectiousness of asymptomatic c.f. symptomatic individuals
})
getP.aContour <- reactive({
input$P.aContour # the fraction of all infections that are asymptomatic
})
getFrac.ReContour <- reactive({
input$frac.ReContour # the fraction of all transmissions that are environmentally mediated
})
### Make mini plot to demonstrate the user-defined incubation period distribution with
# the mean and sd they have specified.
getIncperDistributionPlotContour <- reactive({
req(getIncperMedianlogContour()) # can't plot it until these values have been calculated
req(getIncperSdlogContour())
m <- getIncperMedianlogContour()
s <- getIncperSdlogContour()
cols <- getCols()
xmax <- 20
x <- seq(0,xmax,by=0.01)
y <- sapply(x, function(x) plnorm(x,m,s))
ggplot(cbind.data.frame(x,y), aes(x=x, y=y)) +
geom_line(col=cols[[5]]) +
theme_classic() +
coord_cartesian(expand = F, xlim = c(0, xmax), ylim = c(0, 1.05 * max(y))) +
labs(x = "Incubation period (days)", y = "Probability density")
})
# mini plot for generation time distribution
getSerintDistributionPlotContour <- reactive({
req(getSerIntShapeContour()) # can't plot it until these values have been calculated
req(getSerIntScaleContour())
m <- getSerIntShapeContour()
s <- getSerIntScaleContour()
cols <- getCols()
xmax <- 20
x <- seq(0,xmax,by=0.01)
y <- sapply(x, function(x) dweibull(x,m,s))
ggplot(cbind.data.frame(x,y), aes(x=x, y=y)) +
geom_line(col=cols[[5]]) +
theme_classic() +
coord_cartesian(expand = F, xlim = c(0, xmax), ylim = c(0, 1.05 * max(y))) +
labs(x = "Generation time (days)", y = "Probability density")
})
getFa <- reactive({
P.a <- getP.aContour()
x.a <- getXaContour()
fa = P.a*x.a / (P.a*x.a + (1 - P.a) )
})
# make interactive sliders for environmental decay type
getEnvSlidersContour <- reactive({
type <- input$env.typeContour
if (type=="exp.decay") {
sliderInput("envDecayRateContour",
h6("Environmental infectiousness exponential decay rate (per day):"),
min = 0.05,
max = 5,
step = 0.01,
value = 2.3)
} else {
sliderInput("envConstantContour",
h6("Environmental infectiousness duration (days):"),
min = 1,
max = 10,
step = 0.01,
value = 3)
}
})
getEnvInfTypeContour <- reactive({
input$env.typeContour
})
getEnvDecayContour <- reactive({
input$envDecayRateContour
})
getEnvConstContour <- reactive({
input$envConstantContour
})
# following "model.gen.beta.env.div.by.E.RSorP"
getModelGenBetaEnvDivByERSorPContour <- reactive({
incper.meanlog <- getIncperMedianlogContour()
incper.sdlog <- getIncperSdlogContour()
serint.scale <- getSerIntScaleContour()
serint.shape <- getSerIntShapeContour()
P.a <- getP.aContour()
xp <- getXpContour()
env.decay.rate <- getEnvDecayContour()
env.constant.duration <- getEnvConstContour()
env.infectiousness.type <- getEnvInfTypeContour()
# because of the way that "env.decay.rate" and "env.constant.duration" depend on
# "env.infectiousness.type", there is a delay when shiny is launched, when these are undefined
# so we use "req" to wait until they are defined before continuing the calculation
if (env.infectiousness.type == "constant") {
req(env.constant.duration)
} else {
req(env.decay.rate)
}
Vectorize(function(tau,
incper.meanlog,
incper.sdlog,
serint.scale,
serint.shape,
P.a,
xp,
env.decay.rate,
env.constant.duration,
env.infectiousness.type) {
integrate(function(l) {
model.gen.beta.s.div.by.RSorP(tau = tau - l,
incper.meanlog = incper.meanlog,
incper.sdlog = incper.sdlog,
serint.scale = serint.scale,
serint.shape = serint.shape,
P.a = P.a,
xp = xp) *
p.el(l = l,
env.decay.rate = env.decay.rate,
env.constant.duration = env.constant.duration,
env.infectiousness.type = env.infectiousness.type)
},
lower = 0, upper = tau)$value
}, vectorize.args = "tau")
})
# following model.gen.full.beta.div.by.RSorP
getModelGenFullBetaDivByRSorPContour <- reactive({
serint.scale <- getSerIntScaleContour()
serint.shape <- getSerIntShapeContour()
P.a <- getP.aContour()
xa <- getXaContour()
incper.meanlog <- getIncperMedianlogContour()
incper.sdlog <- getIncperSdlogContour()
xp <- getXpContour()
env.decay.rate <- getEnvDecayContour()
env.constant.duration <- getEnvConstContour()
env.infectiousness.type <- getEnvInfTypeContour()
model.gen.beta.env.div.by.E.RSorP <- getModelGenBetaEnvDivByERSorPContour()
function(tau, serint.scale, serint.shape, P.a, xa, incper.meanlog,
incper.sdlog, xp, env.scale.constant, env.decay.rate,
env.constant.duration, env.infectiousness.type) {
serint(x = tau,
serint.scale = serint.scale,
serint.shape = serint.shape) *
(1 + P.a * xa / model.gen.f(tau = tau,
incper.meanlog = incper.meanlog,
incper.sdlog = incper.sdlog,
P.a = P.a,
xp = xp)) +
env.scale.constant * # this is a free variable of this function
model.gen.beta.env.div.by.E.RSorP(tau = tau,
serint.scale = serint.scale,
serint.shape = serint.shape,
incper.meanlog = incper.meanlog,
incper.sdlog = incper.sdlog,
P.a = P.a,
xp = xp,
env.decay.rate = env.decay.rate,
env.constant.duration = env.constant.duration,
env.infectiousness.type = env.infectiousness.type)
}
})
# following "getDummy()" from the infectiousness tab, but slimmed down because we only need to get R0 out
getR0Contour <- reactive({
# get parameters
frac.Re <- getFrac.ReContour()
P.a <- getP.aContour()
doubling.time <- getDoublingTimeContour()
xp <- getXpContour()
xa <- getXaContour()
incper.meanlog <- getIncperMedianlogContour()
incper.sdlog <- getIncperSdlogContour()
serint.shape <- getSerIntShapeContour()
serint.scale <- getSerIntScaleContour()
theta.obs <- 0.83
env.decay.rate <- getEnvDecayContour()
env.constant.duration <- getEnvConstContour()
env.infectiousness.type <- getEnvInfTypeContour()
# get reactive functions
model.gen.beta.env.div.by.E.RSorP <- getModelGenBetaEnvDivByERSorPContour()
model.gen.full.beta.div.by.RSorP <- getModelGenFullBetaDivByRSorPContour()
# use "model.gen.solve" in a reactive way:
r <- log(2) / doubling.time # units of per day. This is r before interventions
integral.of.model.gen.beta.s.div.by.RSorP <-
integrate(model.gen.beta.s.div.by.RSorP,
lower = 0,
upper = Inf,
incper.meanlog = incper.meanlog,
incper.sdlog = incper.sdlog,
serint.scale = serint.scale,
serint.shape = serint.shape,
P.a = P.a,
xp = xp)$value
env.scale.constant <- (1 + P.a * xa * integral.of.model.gen.beta.s.div.by.RSorP) /
(((1 / frac.Re) - 1) * integrate(model.gen.beta.env.div.by.E.RSorP,
lower = 0,
upper = Inf,
serint.scale = serint.scale,
serint.shape = serint.shape,
incper.meanlog = incper.meanlog,
incper.sdlog = incper.sdlog,
P.a = P.a,
xp = xp,
env.decay.rate = env.decay.rate,
env.constant.duration = env.constant.duration,
env.infectiousness.type = env.infectiousness.type)$value)
RSorP <- 1 / integrate(function(tau) {
model.gen.full.beta.div.by.RSorP(tau = tau,
serint.scale = serint.scale,
serint.shape = serint.shape,
P.a = P.a,
xa = xa,
incper.meanlog = incper.meanlog,
incper.sdlog = incper.sdlog,
xp = xp,
env.scale.constant = env.scale.constant,
env.decay.rate = env.decay.rate,
env.constant.duration = env.constant.duration,
env.infectiousness.type = env.infectiousness.type) *
exp(-r * tau)}, lower = 0, upper = Inf)$value
R0 <- RSorP * integrate(model.gen.full.beta.div.by.RSorP,
lower = 0,
upper = Inf,
serint.scale = serint.scale,
serint.shape = serint.shape,
P.a = P.a,
xa = xa,
incper.meanlog = incper.meanlog,
incper.sdlog = incper.sdlog,
xp = xp,
env.scale.constant = env.scale.constant,
env.decay.rate = env.decay.rate,
env.constant.duration = env.constant.duration,
env.infectiousness.type = env.infectiousness.type)$value
R0
})
getM <- reactive({
fa <- getFa()
fe <- getFrac.ReContour()
validate(need(try(R0 <- getR0Contour()), "")) # this is not immediately available because of the reactive "environment" sliders
delay <- getDelay()
# incubation from Lauer et al
log_incubation_sd <- getIncperSdlogContour()
log_incubation_median <- getIncperMedianlogContour()
S<-function(x){1-(1-fa)*plnorm(x-delay/24,meanlog = log_incubation_median, sdlog = log_incubation_sd)}
# Weibull generation time
beta<-function(x){
dweibull(x, shape = getSerIntShapeContour(), scale = getSerIntScaleContour())
}
# auxiliary functions
M1<-function(x){R0*beta(x)*(1-ei+ei*S(x))}
M2<-function(x,y){(1-et+et*S(x+y)/S(y))}
# nmax, n, ndiscr are defined in global.R
# initialization
Y<-rep(1,nmax)/nmax
r<-0
m<-matrix(0,nrow=n-1,ncol=n-1)
for(ci in 1:(n-1)){
for(cj in 1:(n-1)){
ei<-ci/n ; et<-cj/n*(1-fe)
eigen<-function(my_r){
r<-my_r
Y<-rep(1,nmax)/nmax;
Yold<-Y
# for(i in 1:maxiter){ # removed in favour of the three lines below, suggested by Chris for speed-up
# Y<-M1(v)*exp(-v*r)*sapply(v,function(z){sum(M2(z,v)*Y)/ndiscr})
temp.vec <- M1(v)*exp(-v*r)
for(i in 1:maxiter){
Y<-temp.vec * sapply(v,function(z){sum(M2(z,v)*Y)/ndiscr})
Y<-Y/sum(Y)
if(sum(abs(Y-Yold))<Yerror & i>=miniter){break}
Yold<-Y
}
return(lm(I(M1(v)*exp(-v*r)*sapply(v,function(z){sum(M2(z,v)*Y)/ndiscr})) ~ Y)$coeff[2]-1)
}
m[ci,cj]<-tryCatch(uniroot(eigen,interval = c(-2,2))$root, error = function(e) {return(NA)} ) }
}
#colnames(m)<-c(1:(n-1))/n
#rownames(m)<-c(1:(n-1))/n
df <- tibble(x=numeric((n-1)^2), y=numeric((n-1)^2), z=numeric((n-1)^2))
count <- 0
for (i in 1:(n-1)) {
for (j in 1:(n-1)) {
count <- count + 1
df[count,] <- list(x = ((1:(n-1))/n)[[i]],
y = ((1:(n-1))/n)[[j]],
z = m[i,j])
}
}
# remove the "X" at the start of the y values, and convert to percentages
df$y <- vapply(df$y, function(value) substring(value, 2), character(1))
df$x <- 100 * as.numeric(df$x)
df$y <- 100 * as.numeric(df$y)
df
})
getContourPlot <- reactive({
validate(need(try(df <- getM()), ""))
ggplot(df, aes(x, y, fill = z)) +
theme_bw(base_size = 18) +
coord_cartesian(expand = F) +
geom_tile() +
scale_fill_gradient2(low = "#66C2A5",
mid = "white",
high = "#FC8D62",
midpoint = 0) +
labs(x = "% success in isolating cases",
y = "% success in quarantining contacts",
fill = "growth rate r \nafter interventions") +
stat_contour(aes(z=z), breaks=0, color = "black", size = 1)
})
# describe the delay assumption above the plot
getContourDescribeDelay <- reactive({
validate(need(d <- getDelay(), ""))
if (d==0) { HTML(paste0("<h4>With instant interventions:</h4>")) }
else if (d==1) {HTML(paste0("<h4>With interventions after a delay of ",d," hour:</h4>")) } # ("hour" singular)
else {HTML(paste0("<h4>With interventions after a delay of ",d," hours:</h4>")) }
})
# describe the delay assumption above the plot
getContourDescribeR0 <- reactive({
validate(need(R0 <- getR0Contour(), ""))
r <- log(2) / getDoublingTimeContour()
HTML(paste0("<h5>The current choice of input parameters describes an epidemic
where the reproduction number R<sub>0</sub> = ",round(R0,1)," and the
growth rate r = ",round(r,2)," <b>before</b> interventions are applied.
The colours in the plot indicate what the growth rate would be if interventions were applied
with a range of success rates: red corresponds to a growing epidemic, green to a declining epidemic.
The black line shows the combinations of success rates of interventions which are needed to achieve r = 0, the threshold for epidemic control.
</h5>"))
})
##########
# OUTPUTS
##########
# mini plot to demonstrate incper distribution as chosen by user
output$IncperDistribPlotContour <- renderPlot({
getIncperDistributionPlotContour()
})
# mini plot to demonstrate serint distribution as chosen by user
output$SerintDistribPlotContour <- renderPlot({
getSerintDistributionPlotContour()
})
# environment sliders, depending on type
output$environment_slidersContour <- renderUI({
getEnvSlidersContour()
})
output$contourPlot <- renderPlot({
getContourPlot()
})
output$contourDescribeDelay <- renderUI({
getContourDescribeDelay()
})
output$contourDescribeR0 <- renderUI({
getContourDescribeR0()
})
}
|
setwd("../Downloads/")
d=read.csv("predOutcome.txt",header=F)
##########################viz#########################
tiss.lbl=names(table(d[,2]))
barplot(height=table(d[,2]),names.arg =tiss.lbl,ylim=c(0,1500)) # overview across all patients
##########################cohort##########################
allbarcode=NULL
allpatient=NULL
for (i in 1:nrow(d)){
tcga_barcode =unlist(strsplit(as.character(x=d[i,1]),split="/"))[10]
temp=unlist(strsplit(x=tcga_barcode,split="-")) # first three for a patient id
this_patient=paste(temp[1:3],collapse="-")
allpatient=c(allpatient,this_patient)
allbarcode=c(allbarcode,tcga_barcode)
}
unique(allpatient)
| /postprocessing/vgg/patchClassification.r | no_license | cshukai/tcga_colorectal_histology_images | R | false | false | 655 | r | setwd("../Downloads/")
d=read.csv("predOutcome.txt",header=F)
##########################viz#########################
tiss.lbl=names(table(d[,2]))
barplot(height=table(d[,2]),names.arg =tiss.lbl,ylim=c(0,1500)) # overview across all patients
##########################cohort##########################
allbarcode=NULL
allpatient=NULL
for (i in 1:nrow(d)){
tcga_barcode =unlist(strsplit(as.character(x=d[i,1]),split="/"))[10]
temp=unlist(strsplit(x=tcga_barcode,split="-")) # first three for a patient id
this_patient=paste(temp[1:3],collapse="-")
allpatient=c(allpatient,this_patient)
allbarcode=c(allbarcode,tcga_barcode)
}
unique(allpatient)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{week7_SleepDeprivationExpt}
\alias{week7_SleepDeprivationExpt}
\title{Light at Night for Mice}
\format{An object of class \code{data.frame} with 21 rows and 2 columns.}
\usage{
week7_SleepDeprivationExpt
}
\description{
Data from a study by Stickgold et al. (2000)
Nature Neuroscience 2 (1237-8)
given in Investigating Statistical Concepts Applications and
Methods (2006) by Rossman and Chance
}
\keyword{datasets}
| /man/week7_SleepDeprivationExpt.Rd | no_license | l-inda/FutureLearnData | R | false | true | 521 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{week7_SleepDeprivationExpt}
\alias{week7_SleepDeprivationExpt}
\title{Light at Night for Mice}
\format{An object of class \code{data.frame} with 21 rows and 2 columns.}
\usage{
week7_SleepDeprivationExpt
}
\description{
Data from a study by Stickgold et al. (2000)
Nature Neuroscience 2 (1237-8)
given in Investigating Statistical Concepts Applications and
Methods (2006) by Rossman and Chance
}
\keyword{datasets}
|
#############################################################################
#
# XLConnect
# Copyright (C) 2010-2021 Mirai Solutions GmbH
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
#############################################################################
#############################################################################
#
# Writing a named region to an Excel file
#
# Author: Martin Studer, Mirai Solutions GmbH
#
#############################################################################
require(XLConnect)
# Excel workbook to write
demoExcelFile <- "mtcars.xlsx"
# Remove file if it already exists
if(file.exists(demoExcelFile)) file.remove(demoExcelFile)
# Load workbook (create if not existing)
wb <- loadWorkbook(demoExcelFile, create = TRUE)
# Create a worksheet named 'mtcars'
createSheet(wb, name = "mtcars")
# Alternatively: wb$createSheet(name = "mtcars")
# Create a named region called 'mtcars' on the sheet called 'mtcars'
createName(wb, name = "mtcars", formula = "mtcars!$A$1")
# Alternatively: wb$createName(name = "mtcars", formula = "mtcars!$A$1")
# Write built-in data set 'mtcars' to the above defined named region
writeNamedRegion(wb, mtcars, name = "mtcars")
# Alternatively: wb$writeNamedRegion(mtcars, name = "mtcars")
# Save workbook (this actually writes the file to disk)
saveWorkbook(wb)
# Alternatively: wb$saveWorkbook()
if(interactive()) {
answer <- readline("Open the created Excel file (y/n)? ")
if(answer == "y") browseURL(file.path(getwd(), demoExcelFile))
}
| /demo/writeNamedRegion.R | no_license | miraisolutions/xlconnect | R | false | false | 2,182 | r | #############################################################################
#
# XLConnect
# Copyright (C) 2010-2021 Mirai Solutions GmbH
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
#############################################################################
#############################################################################
#
# Writing a named region to an Excel file
#
# Author: Martin Studer, Mirai Solutions GmbH
#
#############################################################################
require(XLConnect)
# Excel workbook to write
demoExcelFile <- "mtcars.xlsx"
# Remove file if it already exists
if(file.exists(demoExcelFile)) file.remove(demoExcelFile)
# Load workbook (create if not existing)
wb <- loadWorkbook(demoExcelFile, create = TRUE)
# Create a worksheet named 'mtcars'
createSheet(wb, name = "mtcars")
# Alternatively: wb$createSheet(name = "mtcars")
# Create a named region called 'mtcars' on the sheet called 'mtcars'
createName(wb, name = "mtcars", formula = "mtcars!$A$1")
# Alternatively: wb$createName(name = "mtcars", formula = "mtcars!$A$1")
# Write built-in data set 'mtcars' to the above defined named region
writeNamedRegion(wb, mtcars, name = "mtcars")
# Alternatively: wb$writeNamedRegion(mtcars, name = "mtcars")
# Save workbook (this actually writes the file to disk)
saveWorkbook(wb)
# Alternatively: wb$saveWorkbook()
if(interactive()) {
answer <- readline("Open the created Excel file (y/n)? ")
if(answer == "y") browseURL(file.path(getwd(), demoExcelFile))
}
|
# viz_eigenangles_batch_vs_all<-function(...){
# list(...) %>% map(~.$batch_vs_all) %>% transpose -> angles
# angles.df<-data.frame(
# cosinus=angles %>% unlist %>% cospi,
# sinus=angles %>% unlist %>% sinpi,
# method=angles %>% map(imap %>% partial(...=,~rep(..2,each=length(..1)))) %>% unlist %>% factor,
# batch=angles %>% imap(~rep(..2,each=length(unlist(..1)))) %>% unlist
# )
# ggplot(angles.df,aes(x=cosinus,y=sinus,colour=method))+
# xlim(c(0,1.25))+ylim(c(0,1))+
# geom_segment(xend=0,yend=0)+
# geom_text(label=angles %>% unlist %>% round(3) %>% paste('$\\pi$') %>% TeX,parse=TRUE,nudge_x=.1)+
# geom_arc(aes(x0=0,y0=0,r=1,start=0,end=pi/2),colour='black',inherit.aes = FALSE)+coord_fixed()+
# facet_wrap(~batch)
# }
#
# viz_eigenangles_inter_batch<-function(...){
# list(...) %>% map(~.$inter_batch) -> angles
# angles.df<-data.frame(
# cosinus=angles %>% unlist %>% cospi,
# sinus=angles %>% unlist %>% sinpi,
# method=angles %>% imap(~rep(.y,each=length(unlist(.x)))) %>% unlist %>% factor,
# batch1=angles %>% map(imap %>% partial(...=,~rep(.y,each=length(unlist(.x))))) %>% unlist,
# batch2=angles %>% map(map %>% partial(...=,imap %>% partial(...=,~rep(.y,each=length(.x))))) %>% unlist
# )
# ggplot(angles.df,aes(x=cosinus,y=sinus,colour=method))+
# xlim(c(0,1.25))+ylim(c(0,1))+
# geom_segment(xend=0,yend=0)+
# geom_text(label=angles %>% unlist %>% round(3) %>% paste('$\\pi$') %>% TeX,parse=TRUE,nudge_x=.1)+
# geom_arc(aes(x0=0,y0=0,r=1,start=0,end=pi/2),colour='black',inherit.aes = FALSE)+coord_fixed()+
# facet_grid(batch2~batch1)
# }
| /R/viz_angles.R | no_license | gheger11/eigenangles | R | false | false | 1,652 | r | # viz_eigenangles_batch_vs_all<-function(...){
# list(...) %>% map(~.$batch_vs_all) %>% transpose -> angles
# angles.df<-data.frame(
# cosinus=angles %>% unlist %>% cospi,
# sinus=angles %>% unlist %>% sinpi,
# method=angles %>% map(imap %>% partial(...=,~rep(..2,each=length(..1)))) %>% unlist %>% factor,
# batch=angles %>% imap(~rep(..2,each=length(unlist(..1)))) %>% unlist
# )
# ggplot(angles.df,aes(x=cosinus,y=sinus,colour=method))+
# xlim(c(0,1.25))+ylim(c(0,1))+
# geom_segment(xend=0,yend=0)+
# geom_text(label=angles %>% unlist %>% round(3) %>% paste('$\\pi$') %>% TeX,parse=TRUE,nudge_x=.1)+
# geom_arc(aes(x0=0,y0=0,r=1,start=0,end=pi/2),colour='black',inherit.aes = FALSE)+coord_fixed()+
# facet_wrap(~batch)
# }
#
# viz_eigenangles_inter_batch<-function(...){
# list(...) %>% map(~.$inter_batch) -> angles
# angles.df<-data.frame(
# cosinus=angles %>% unlist %>% cospi,
# sinus=angles %>% unlist %>% sinpi,
# method=angles %>% imap(~rep(.y,each=length(unlist(.x)))) %>% unlist %>% factor,
# batch1=angles %>% map(imap %>% partial(...=,~rep(.y,each=length(unlist(.x))))) %>% unlist,
# batch2=angles %>% map(map %>% partial(...=,imap %>% partial(...=,~rep(.y,each=length(.x))))) %>% unlist
# )
# ggplot(angles.df,aes(x=cosinus,y=sinus,colour=method))+
# xlim(c(0,1.25))+ylim(c(0,1))+
# geom_segment(xend=0,yend=0)+
# geom_text(label=angles %>% unlist %>% round(3) %>% paste('$\\pi$') %>% TeX,parse=TRUE,nudge_x=.1)+
# geom_arc(aes(x0=0,y0=0,r=1,start=0,end=pi/2),colour='black',inherit.aes = FALSE)+coord_fixed()+
# facet_grid(batch2~batch1)
# }
|
/2. SECOND YEAR/Statistical Software/R/Scripts classes /Clase10/Rfunction_fourGraphs.R | no_license | laurajuliamelis/BachelorDegree_Statistics | R | false | false | 991 | r |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.