content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
# This function is internal
fancytab2<-function(x,y=NULL,digits,sumby=2,rowvar="",rowNames=NULL,missings='ifany',margins=TRUE)
{
tout=table(x,y,useNA=missings)
pout=niceRound(100*prop.table(tout,margin=sumby),digits)
if(margins)
{
tout=addmargins(tout)
pout=niceRound(200*prop.table(tout,margin=sumby),digits)
}
rownames(tout)[is.na(rownames(tout))]="missing"
rownames(pout)[is.na(rownames(pout))]="missing"
colnames(tout)[is.na(colnames(tout))]="missing"
colnames(pout)[is.na(colnames(pout))]="missing"
tout=as.data.frame(cbind(rownames(tout),as.data.frame.matrix(tout)))
names(tout)[1]=rowvar
if(!is.null(rowNames)) tout[,1]=rowNames
pout=as.data.frame(cbind(rownames(pout),as.data.frame.matrix(pout)))
names(pout)[1]=rowvar
if(!is.null(rowNames)) pout[,1]=rowNames
return(list(Counts=tout,Percent=pout))
}
##' Produces 2-way contingency tables, optionally with percentages, exports them to a spreadsheet, and saves the file.
##'
##' This function produces two-way cross-tabulated counts of unique values of \code{rowvar, colvar},
##' optionally with percentages, calculated either by row (\code{sumby=1}, default) or column (\code{sumby=2}).
##' Row and column margins are also produced. ##' Tables are automatically saved to the file associated with the \code{wb} spreadsheet object.
##'
##' There is an underlying asymmetry between rows and columns, because the tables are converted to data frame in order for \code{\link{writeWorksheet}} to export them.
##' The percents can be in parentheses in the same cells as the counts (\code{combine=TRUE}, default), in an identically-sized table on the side (\code{combine=FALSE,percents=TRUE}), or absent (\code{combine=FALSE,percents=FALSE}). If you want no margins, just use the simpler function \code{\link{XLgeneric}}.
##'
##' @note The worksheet \code{sheet} does not have to pre-exist; the function will create it if it doesn't already exist.
#'
##' @note By default, if \code{sheet} exists, it will be written into - rather than completely cleared and rewritten de novo. Only existing data in individual cells that are part of the exported tables' target range will be overwritten. If you do want to clear an existing sheet while exporting the new tables, set \code{purge=TRUE}. This behavior, and the usage of \code{purge}, are the same across all \code{table1xls} export functions.
##'
##'
##' @title Two-way Contingency Tables exported to a spreadsheet
##'
##' @param wb an \code{\link[XLConnect]{workbook-class}} object
##' @param sheet numeric or character: a worksheet name (character) or position (numeric) within \code{wb}.
##' @param rowvar vector: categorical variable (logical, numeric, character, factor, etc.) for the table's rows
##' @param colvar vector: categorical variable (logical, numeric, character factor, etc.) for the table's columns
##' @param table1mode logical: is the function called from \code{\link{XLtable1}}? If \code{TRUE}, some modifications will be made to the output. Default \code{FALSE}.
##' @param sumby whether percentages should be calculated across rows (1, default) or columns (2).
##' @param rowTitle character: the title to be placed above the row name column (default empty string)
##' @param rowNames,colNames character vector of row and column names. Default behavior (\code{NULL}): automatically determined from data
##' @param ord numeric vector specifying row-index order in the produced table. Default (\code{NULL}) is no re-ordering.
##' @param row1,col1 numeric: the first row and column occupied by the table (title included if relevant).
##' @param title character: an optional overall title to the table. Default (\code{NULL}) is no title.
##' @param header logical: should a header row with the captions "Counts:" and "Percentages:" be added right above the tables? Relevant only when \code{combine=FALSE,percents=TRUE})
##' @param purge logical: should \code{sheet} be created anew, by first removing the previous copy if it exists? (default \code{FALSE})
##' @param digits numeric: how many digits (after the decimal point) to show in the percents? Defaults to 1 if n>=500, 0 otherwise.
##' @param useNA How to handle missing values. Passed on to \code{\link{table}} (see help on that function for options).
##' @param percents logical: would you like only a count table (\code{FALSE}), or also a percents table side-by-side with the the count table (\code{TRUE}, default)?
##' @param combine logical: should counts and percents be combined to the popular \code{"Count(percent)"} format, or presented side-by-side in separate tables? (default: same value as \code{percents})
##' @param testname string, the *name* of a function to run a significance test on the table. Default `chisq.test`. If you want no test, set \code{testname=NULL}
##' @param pround number of significant digits in test p-value representation. Default 3.
##' @param testBelow logical, should test p-value be placed right below the table? Default \code{FALSE}, which places it next to the table's right edge, one row below the column headings.
##' @param margins logical: should margins with totals be returned? Default \code{TRUE}.
##' @param ... additional arguments as needed, to pass on to \code{get(textfun)}
##'
##' @return The function returns invisibly, after writing the data into \code{sheet}.
##' @example inst/examples/Ex2way.r
##' @author Assaf P. Oron \code{<assaf.oron.at.seattlechildrens.org>}
##' @seealso Uses \code{\link{writeWorksheet}} to access the spreadsheet. See \code{\link{setStyleAction}} to control the output style. If interested in one-way tables, see \code{\link{XLoneWay}}.
##' @note This function uses the internal function \code{fancytab2} which produces 2-way tables with counts, percentages and margins.
##' @export
XLtwoWay<-function(wb,sheet,rowvar,colvar,table1mode=FALSE,sumby=1,rowTitle="",rowNames=NULL,colNames=NULL,ord=NULL,row1=1,col1=1,title=NULL,header=FALSE,purge=FALSE,digits=ifelse(length(rowvar)>=500,1,0),useNA='ifany',percents=TRUE,combine=percents,testname='chisq.test',pround=3,testBelow=FALSE,margins=TRUE,...)
{
if(length(rowvar)!=length(colvar)) stop("x:y length mismatch.\n")
if(table1mode) margins<-FALSE
if(purge) removeSheet(wb,sheet)
if(!existsSheet(wb,sheet)) createSheet(wb,sheet)
### Producing counts and percents table via the internal function 'fancytab2'
tab=fancytab2(rowvar,colvar,sumby=sumby,rowvar=rowTitle,rowNames=rowNames,digits=digits,missings=useNA,margins=margins)
if(!is.null(title)) ### Adding a title
{
XLaddText(wb,sheet,text=title,row1=row1,col1=col1)
row1=row1+1
}
if (is.null(ord)) ord=1:dim(tab$Counts)[1]
if (!is.null(colNames))
{
names(tab$Counts)[-1]=colNames
names(tab$Percent)[-1]=colNames
}
widt=dim(tab$Counts)[2]+1
if(combine) ### combining counts and percents to a single table (default)
{
tabout=as.data.frame(mapply(paste0,tab$Count[,-1],' (',tab$Percent[,-1],'%)'))
tabout=cbind(tab$Count[,1],tabout)
names(tabout)[1]=rowTitle
writeWorksheet(wb,tabout[ord,],sheet,startRow=row1,startCol=col1)
} else {
if(percents && header) ### adding headers indicating 'counts' and 'percents'
{
XLaddText(wb,sheet,"Counts:",row1=row1,col1=col1)
XLaddText(wb,sheet,"Percent:",row1=row1,col1=col1+widt)
row1=row1+1
}
writeWorksheet(wb,tab$Counts[ord,],sheet,startRow=row1,startCol=col1)
if(percents) writeWorksheet(wb,tab$Percent[ord,],sheet,startRow=row1,startCol=col1+widt)
}
### Perform test and p-value on table
if(!is.null(testname) && length(unique(rowvar))>1 && length(unique(colvar))>1 )
{
pval=suppressWarnings(try(get(testname)(rowvar,colvar,...)$p.value))
ptext=paste(testname,'p:',ifelse(is.finite(pval),niceRound(pval,pround,plurb=TRUE),'Error'))
prow=ifelse(testBelow,row1+dim(tab$Counts)[1]+1,row1+1)
pcol=ifelse(testBelow,col1,col1+widt-1)
XLaddText(wb,sheet,ptext,row1=prow,col1=pcol)
}
setColumnWidth(wb, sheet = sheet, column = col1:(col1+2*widt+1), width=-1)
saveWorkbook(wb)
} ### Function end
##' Univariate Statistics Exported to Excel
##'
##' Calculates univariate summary statistics (optionally stratified), exports the formatted output to a spreadsheet, and saves the file.
##'
##' This function evaluates up to 2 univariate functions on the input vector \code{calcvar}, either as a single sample, or grouped by strata defined via \code{colvar} (which is named this way for compatibility with \code{\link{XLtable1}}). It produces a single-column or single-row table (apart from row/column headers), with each interior cell containing the formatted results from the two functions. The table is exported to a spreadsheet and the file is saved.
##'
##' The cell can be formatted to show a combined result, e.g. "Mean (SD)" which is the default. Tne function is quite mutable: both \code{fun1$fun, fun2$fun} and the strings separating their formatted output can be user-defined. The functions can return either a string (i.e., a formatted output) or a number that will be interpreted as a string in subsequent formatting.
##' The default calls \code{\link{roundmean},\link{roundSD}} and prints the summaries in \code{"mean(SD)"} format.
##'
##' See the \code{\link{XLtwoWay}} help page, for behavior regarding new-sheet creation, overwriting, etc.
##' @return The function returns invisibly, after writing the data into \code{sheet} and saving the file.
##'
##' @author Assaf P. Oron \code{<assaf.oron.at.seattlechildrens.org>}
##' @seealso Uses \code{\link{writeWorksheet}} to access the spreadsheet, \code{\link{rangeString}} for some utilities that can be used as \code{fun1$fun,fun2$fun}. For one-way (univariate) contingency tables, \code{\link{XLoneWay}}.
##'
##'
##' @example inst/examples/ExUnivar.r
##' @param wb a \code{\link[XLConnect]{workbook-class}} object
##' @param sheet numeric or character: a worksheet name (character) or position (numeric) within \code{wb}.
##' @param calcvar vector: variable to calculate the statistics for (usually numeric, can be logical).
##' @param colvar vector: categorical variable to stratify \code{calcvar}'s summaries over. Will show as columns in output only if \code{sideBySide=TRUE}; otherwise as rows. Default behavior if left unspecified, is to calculate overall summaries for a single row/column output.
##' @param table1mode logical: is the function called from \code{\link{XLtable1}}? If \code{TRUE}, some modifications will be made to the output. Default \code{FALSE}.
##' @param fun1,fun2 two lists describing the utility functions that will calculate the statistics. Each list has a \code{fun} component for the function, and a \code{name} component for its name as it would appear in the column header.
##' @param seps character vector of length 3, specifying the formatted separators before the output of \code{fun1$fun}, between the two outputs, and after the output of \code{fun2$fun}. Default behavior encloses the second output in parentheses. See 'Examples'.
##' @param sideBySide logical: should output be arranged horizontally rather than vertically? Default \code{FALSE}.
##' @param title character: an optional overall title to the table. Default (\code{NULL}) is no title.
##' @param rowTitle character: the title to be placed above the row name column (default empty string)
##' @param rowNames character vector of row names. Default behavior (\code{NULL}): automatically determined from data
##' @param colNames column names for stratifying variable, used when \code{sideBySide=TRUE}. Default: equal to \code{rowNames}.
##' @param ord numeric vector specifying row-index order (i.e., a re-ordering of \code{rowvar}'s levels) in the produced table. Default (\code{NULL}) is no re-ordering.
##' @param row1,col1 numeric: the first row and column occupied by the table (title included if relevant).
##' @param purge logical: should \code{sheet} be created anew, by first removing the previous copy if it exists? (default \code{FALSE})
##' @param ... parameters passed on to \code{fun1$fun,fun2$fun}
##'
##' @export
XLunivariate<-function(wb,sheet,calcvar,colvar=rep("",length(calcvar)),table1mode=FALSE,fun1=list(fun=roundmean,name="Mean"),fun2=list(fun=roundSD,name="SD"),seps=c('',' (',')'),sideBySide=FALSE,title=NULL,rowTitle="",rowNames=NULL,colNames=rowNames,ord=NULL,row1=1,col1=1,purge=FALSE,...)
{
if(table1mode)
{
if(length(unique(colvar))>1) sideBySide<-TRUE
if(is.null(colvar)) colvar=rep("",length(calcvar))
rowNames=rowTitle
rowTitle=""
}
if(purge) removeSheet(wb,sheet)
if(!existsSheet(wb,sheet)) createSheet(wb,sheet)
num1=tapply(calcvar,colvar,fun1$fun,...)
num2=tapply(calcvar,colvar,fun2$fun,...)
if (is.null(ord)) ord=1:length(num1)
if(length(ord)!=length(num1)) stop("Argument 'ord' in XLunivariate has wrong length.")
if (is.null(rowNames)) rowNames=names(num1)
statname=paste(seps[1],fun1$name,seps[2],fun2$name,seps[3],sep='')
if(sideBySide)
{
outdat=data.frame(statname)
if(table1mode) {rowTitle=statname;outdat[1]=rowNames}
for (a in ord) outdat=cbind(outdat,paste(seps[1],num1[a],seps[2],num2[a],seps[3],sep=''))
if(!is.null(colNames) && length(colNames)==length(ord)) names(num1)=colNames
names(outdat)=c(rowTitle,names(num1)[ord])
ord=1
} else {
outdat=data.frame(cbind(rowNames,paste(seps[1],num1,seps[2],num2,seps[3],sep='')))
names(outdat)=c(rowTitle,statname)
}
if(!is.null(title)) ### Adding a title
{
XLaddText(wb,sheet,text=title,row1=row1,col1=col1)
row1=row1+1
}
writeWorksheet(wb,outdat[ord,],sheet,startRow=row1,startCol=col1)
setColumnWidth(wb, sheet = sheet, column = col1:(col1+3), width=-1)
saveWorkbook(wb)
}
| /R/exploratoryUtils.r | no_license | cran/table1xls | R | false | false | 13,834 | r |
# This function is internal
fancytab2<-function(x,y=NULL,digits,sumby=2,rowvar="",rowNames=NULL,missings='ifany',margins=TRUE)
{
tout=table(x,y,useNA=missings)
pout=niceRound(100*prop.table(tout,margin=sumby),digits)
if(margins)
{
tout=addmargins(tout)
pout=niceRound(200*prop.table(tout,margin=sumby),digits)
}
rownames(tout)[is.na(rownames(tout))]="missing"
rownames(pout)[is.na(rownames(pout))]="missing"
colnames(tout)[is.na(colnames(tout))]="missing"
colnames(pout)[is.na(colnames(pout))]="missing"
tout=as.data.frame(cbind(rownames(tout),as.data.frame.matrix(tout)))
names(tout)[1]=rowvar
if(!is.null(rowNames)) tout[,1]=rowNames
pout=as.data.frame(cbind(rownames(pout),as.data.frame.matrix(pout)))
names(pout)[1]=rowvar
if(!is.null(rowNames)) pout[,1]=rowNames
return(list(Counts=tout,Percent=pout))
}
##' Produces 2-way contingency tables, optionally with percentages, exports them to a spreadsheet, and saves the file.
##'
##' This function produces two-way cross-tabulated counts of unique values of \code{rowvar, colvar},
##' optionally with percentages, calculated either by row (\code{sumby=1}, default) or column (\code{sumby=2}).
##' Row and column margins are also produced. ##' Tables are automatically saved to the file associated with the \code{wb} spreadsheet object.
##'
##' There is an underlying asymmetry between rows and columns, because the tables are converted to data frame in order for \code{\link{writeWorksheet}} to export them.
##' The percents can be in parentheses in the same cells as the counts (\code{combine=TRUE}, default), in an identically-sized table on the side (\code{combine=FALSE,percents=TRUE}), or absent (\code{combine=FALSE,percents=FALSE}). If you want no margins, just use the simpler function \code{\link{XLgeneric}}.
##'
##' @note The worksheet \code{sheet} does not have to pre-exist; the function will create it if it doesn't already exist.
#'
##' @note By default, if \code{sheet} exists, it will be written into - rather than completely cleared and rewritten de novo. Only existing data in individual cells that are part of the exported tables' target range will be overwritten. If you do want to clear an existing sheet while exporting the new tables, set \code{purge=TRUE}. This behavior, and the usage of \code{purge}, are the same across all \code{table1xls} export functions.
##'
##'
##' @title Two-way Contingency Tables exported to a spreadsheet
##'
##' @param wb an \code{\link[XLConnect]{workbook-class}} object
##' @param sheet numeric or character: a worksheet name (character) or position (numeric) within \code{wb}.
##' @param rowvar vector: categorical variable (logical, numeric, character, factor, etc.) for the table's rows
##' @param colvar vector: categorical variable (logical, numeric, character factor, etc.) for the table's columns
##' @param table1mode logical: is the function called from \code{\link{XLtable1}}? If \code{TRUE}, some modifications will be made to the output. Default \code{FALSE}.
##' @param sumby whether percentages should be calculated across rows (1, default) or columns (2).
##' @param rowTitle character: the title to be placed above the row name column (default empty string)
##' @param rowNames,colNames character vector of row and column names. Default behavior (\code{NULL}): automatically determined from data
##' @param ord numeric vector specifying row-index order in the produced table. Default (\code{NULL}) is no re-ordering.
##' @param row1,col1 numeric: the first row and column occupied by the table (title included if relevant).
##' @param title character: an optional overall title to the table. Default (\code{NULL}) is no title.
##' @param header logical: should a header row with the captions "Counts:" and "Percentages:" be added right above the tables? Relevant only when \code{combine=FALSE,percents=TRUE})
##' @param purge logical: should \code{sheet} be created anew, by first removing the previous copy if it exists? (default \code{FALSE})
##' @param digits numeric: how many digits (after the decimal point) to show in the percents? Defaults to 1 if n>=500, 0 otherwise.
##' @param useNA How to handle missing values. Passed on to \code{\link{table}} (see help on that function for options).
##' @param percents logical: would you like only a count table (\code{FALSE}), or also a percents table side-by-side with the the count table (\code{TRUE}, default)?
##' @param combine logical: should counts and percents be combined to the popular \code{"Count(percent)"} format, or presented side-by-side in separate tables? (default: same value as \code{percents})
##' @param testname string, the *name* of a function to run a significance test on the table. Default `chisq.test`. If you want no test, set \code{testname=NULL}
##' @param pround number of significant digits in test p-value representation. Default 3.
##' @param testBelow logical, should test p-value be placed right below the table? Default \code{FALSE}, which places it next to the table's right edge, one row below the column headings.
##' @param margins logical: should margins with totals be returned? Default \code{TRUE}.
##' @param ... additional arguments as needed, to pass on to \code{get(textfun)}
##'
##' @return The function returns invisibly, after writing the data into \code{sheet}.
##' @example inst/examples/Ex2way.r
##' @author Assaf P. Oron \code{<assaf.oron.at.seattlechildrens.org>}
##' @seealso Uses \code{\link{writeWorksheet}} to access the spreadsheet. See \code{\link{setStyleAction}} to control the output style. If interested in one-way tables, see \code{\link{XLoneWay}}.
##' @note This function uses the internal function \code{fancytab2} which produces 2-way tables with counts, percentages and margins.
##' @export
XLtwoWay<-function(wb,sheet,rowvar,colvar,table1mode=FALSE,sumby=1,rowTitle="",rowNames=NULL,colNames=NULL,ord=NULL,row1=1,col1=1,title=NULL,header=FALSE,purge=FALSE,digits=ifelse(length(rowvar)>=500,1,0),useNA='ifany',percents=TRUE,combine=percents,testname='chisq.test',pround=3,testBelow=FALSE,margins=TRUE,...)
{
if(length(rowvar)!=length(colvar)) stop("x:y length mismatch.\n")
if(table1mode) margins<-FALSE
if(purge) removeSheet(wb,sheet)
if(!existsSheet(wb,sheet)) createSheet(wb,sheet)
### Producing counts and percents table via the internal function 'fancytab2'
tab=fancytab2(rowvar,colvar,sumby=sumby,rowvar=rowTitle,rowNames=rowNames,digits=digits,missings=useNA,margins=margins)
if(!is.null(title)) ### Adding a title
{
XLaddText(wb,sheet,text=title,row1=row1,col1=col1)
row1=row1+1
}
if (is.null(ord)) ord=1:dim(tab$Counts)[1]
if (!is.null(colNames))
{
names(tab$Counts)[-1]=colNames
names(tab$Percent)[-1]=colNames
}
widt=dim(tab$Counts)[2]+1
if(combine) ### combining counts and percents to a single table (default)
{
tabout=as.data.frame(mapply(paste0,tab$Count[,-1],' (',tab$Percent[,-1],'%)'))
tabout=cbind(tab$Count[,1],tabout)
names(tabout)[1]=rowTitle
writeWorksheet(wb,tabout[ord,],sheet,startRow=row1,startCol=col1)
} else {
if(percents && header) ### adding headers indicating 'counts' and 'percents'
{
XLaddText(wb,sheet,"Counts:",row1=row1,col1=col1)
XLaddText(wb,sheet,"Percent:",row1=row1,col1=col1+widt)
row1=row1+1
}
writeWorksheet(wb,tab$Counts[ord,],sheet,startRow=row1,startCol=col1)
if(percents) writeWorksheet(wb,tab$Percent[ord,],sheet,startRow=row1,startCol=col1+widt)
}
### Perform test and p-value on table
if(!is.null(testname) && length(unique(rowvar))>1 && length(unique(colvar))>1 )
{
pval=suppressWarnings(try(get(testname)(rowvar,colvar,...)$p.value))
ptext=paste(testname,'p:',ifelse(is.finite(pval),niceRound(pval,pround,plurb=TRUE),'Error'))
prow=ifelse(testBelow,row1+dim(tab$Counts)[1]+1,row1+1)
pcol=ifelse(testBelow,col1,col1+widt-1)
XLaddText(wb,sheet,ptext,row1=prow,col1=pcol)
}
setColumnWidth(wb, sheet = sheet, column = col1:(col1+2*widt+1), width=-1)
saveWorkbook(wb)
} ### Function end
##' Univariate Statistics Exported to Excel
##'
##' Calculates univariate summary statistics (optionally stratified), exports the formatted output to a spreadsheet, and saves the file.
##'
##' This function evaluates up to 2 univariate functions on the input vector \code{calcvar}, either as a single sample, or grouped by strata defined via \code{colvar} (which is named this way for compatibility with \code{\link{XLtable1}}). It produces a single-column or single-row table (apart from row/column headers), with each interior cell containing the formatted results from the two functions. The table is exported to a spreadsheet and the file is saved.
##'
##' The cell can be formatted to show a combined result, e.g. "Mean (SD)" which is the default. Tne function is quite mutable: both \code{fun1$fun, fun2$fun} and the strings separating their formatted output can be user-defined. The functions can return either a string (i.e., a formatted output) or a number that will be interpreted as a string in subsequent formatting.
##' The default calls \code{\link{roundmean},\link{roundSD}} and prints the summaries in \code{"mean(SD)"} format.
##'
##' See the \code{\link{XLtwoWay}} help page, for behavior regarding new-sheet creation, overwriting, etc.
##' @return The function returns invisibly, after writing the data into \code{sheet} and saving the file.
##'
##' @author Assaf P. Oron \code{<assaf.oron.at.seattlechildrens.org>}
##' @seealso Uses \code{\link{writeWorksheet}} to access the spreadsheet, \code{\link{rangeString}} for some utilities that can be used as \code{fun1$fun,fun2$fun}. For one-way (univariate) contingency tables, \code{\link{XLoneWay}}.
##'
##'
##' @example inst/examples/ExUnivar.r
##' @param wb a \code{\link[XLConnect]{workbook-class}} object
##' @param sheet numeric or character: a worksheet name (character) or position (numeric) within \code{wb}.
##' @param calcvar vector: variable to calculate the statistics for (usually numeric, can be logical).
##' @param colvar vector: categorical variable to stratify \code{calcvar}'s summaries over. Will show as columns in output only if \code{sideBySide=TRUE}; otherwise as rows. Default behavior if left unspecified, is to calculate overall summaries for a single row/column output.
##' @param table1mode logical: is the function called from \code{\link{XLtable1}}? If \code{TRUE}, some modifications will be made to the output. Default \code{FALSE}.
##' @param fun1,fun2 two lists describing the utility functions that will calculate the statistics. Each list has a \code{fun} component for the function, and a \code{name} component for its name as it would appear in the column header.
##' @param seps character vector of length 3, specifying the formatted separators before the output of \code{fun1$fun}, between the two outputs, and after the output of \code{fun2$fun}. Default behavior encloses the second output in parentheses. See 'Examples'.
##' @param sideBySide logical: should output be arranged horizontally rather than vertically? Default \code{FALSE}.
##' @param title character: an optional overall title to the table. Default (\code{NULL}) is no title.
##' @param rowTitle character: the title to be placed above the row name column (default empty string)
##' @param rowNames character vector of row names. Default behavior (\code{NULL}): automatically determined from data
##' @param colNames column names for stratifying variable, used when \code{sideBySide=TRUE}. Default: equal to \code{rowNames}.
##' @param ord numeric vector specifying row-index order (i.e., a re-ordering of \code{rowvar}'s levels) in the produced table. Default (\code{NULL}) is no re-ordering.
##' @param row1,col1 numeric: the first row and column occupied by the table (title included if relevant).
##' @param purge logical: should \code{sheet} be created anew, by first removing the previous copy if it exists? (default \code{FALSE})
##' @param ... parameters passed on to \code{fun1$fun,fun2$fun}
##'
##' @export
XLunivariate<-function(wb,sheet,calcvar,colvar=rep("",length(calcvar)),table1mode=FALSE,fun1=list(fun=roundmean,name="Mean"),fun2=list(fun=roundSD,name="SD"),seps=c('',' (',')'),sideBySide=FALSE,title=NULL,rowTitle="",rowNames=NULL,colNames=rowNames,ord=NULL,row1=1,col1=1,purge=FALSE,...)
{
if(table1mode)
{
if(length(unique(colvar))>1) sideBySide<-TRUE
if(is.null(colvar)) colvar=rep("",length(calcvar))
rowNames=rowTitle
rowTitle=""
}
if(purge) removeSheet(wb,sheet)
if(!existsSheet(wb,sheet)) createSheet(wb,sheet)
num1=tapply(calcvar,colvar,fun1$fun,...)
num2=tapply(calcvar,colvar,fun2$fun,...)
if (is.null(ord)) ord=1:length(num1)
if(length(ord)!=length(num1)) stop("Argument 'ord' in XLunivariate has wrong length.")
if (is.null(rowNames)) rowNames=names(num1)
statname=paste(seps[1],fun1$name,seps[2],fun2$name,seps[3],sep='')
if(sideBySide)
{
outdat=data.frame(statname)
if(table1mode) {rowTitle=statname;outdat[1]=rowNames}
for (a in ord) outdat=cbind(outdat,paste(seps[1],num1[a],seps[2],num2[a],seps[3],sep=''))
if(!is.null(colNames) && length(colNames)==length(ord)) names(num1)=colNames
names(outdat)=c(rowTitle,names(num1)[ord])
ord=1
} else {
outdat=data.frame(cbind(rowNames,paste(seps[1],num1,seps[2],num2,seps[3],sep='')))
names(outdat)=c(rowTitle,statname)
}
if(!is.null(title)) ### Adding a title
{
XLaddText(wb,sheet,text=title,row1=row1,col1=col1)
row1=row1+1
}
writeWorksheet(wb,outdat[ord,],sheet,startRow=row1,startCol=col1)
setColumnWidth(wb, sheet = sheet, column = col1:(col1+3), width=-1)
saveWorkbook(wb)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/antsrTransform_class.R
\name{readAntsrTransform}
\alias{readAntsrTransform}
\title{readAntsrTransform}
\usage{
readAntsrTransform(filename, dimension = 3, precision = "float")
}
\arguments{
\item{filename}{filename of transform}
\item{dimension}{spatial dimension of transform}
\item{precision}{numerical precision of transform}
}
\value{
antsrTransform
}
\description{
read a transform from file
}
\examples{
\dontrun{
tx = readAntsrTransform( "yourtx.mat")
}
}
| /man/readAntsrTransform.Rd | permissive | Pe6r0/ANTsRCoreTest | R | false | true | 543 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/antsrTransform_class.R
\name{readAntsrTransform}
\alias{readAntsrTransform}
\title{readAntsrTransform}
\usage{
readAntsrTransform(filename, dimension = 3, precision = "float")
}
\arguments{
\item{filename}{filename of transform}
\item{dimension}{spatial dimension of transform}
\item{precision}{numerical precision of transform}
}
\value{
antsrTransform
}
\description{
read a transform from file
}
\examples{
\dontrun{
tx = readAntsrTransform( "yourtx.mat")
}
}
|
####################
#### TITLE: Variance estimation at group level + CI coverage
#### Contents:
####
#### Source Files:
#### First Modified: 14/06/2018
#### Notes:
#################
##
###############
### Notes
###############
##
# Simulate fMRI time series per subject using a GLM with between-subject variability.
# I will simulate on grid, but only save middle voxel (no smoothing).
# Then estimate the variance using FSL mixed effects + R lmer.
# Then construct CI and calculate empirical coverage (EC) of the CI
# around the true parameter.
##
###############
### Preparation
###############
##
# Let us just run it locally for now
for(l in 1:200){
print(l)
# Take argument from master file
input <- commandArgs(TRUE)
# K'th simulation
hpcID <- try(as.numeric(as.character(input)[1]),silent=TRUE)
# Which machine
MACHINE <- try(as.character(input)[2],silent=TRUE)
# DataWrite directory: where all temporary files are written to
DataWrite <- try(as.character(input)[3],silent=TRUE)
# If no machine is specified, then it has to be this machine!
if(is.na(MACHINE)){
MACHINE <- 'MAC'
hpcID <- l
DataWrite <- '~/Desktop/VAR2LVL'
}
# Give path to FSL
if(MACHINE=='HPC'){
fslpath <- ''
}
if(MACHINE=='MAC'){
fslpath <- '/usr/local/fsl/bin/'
}
# Implement for loop over r iterations here: hpcID goes from 1 to 100 in master file
rIter <- 10
startIndex <- try(1 + rIter * (hpcID - 1), silent = TRUE)
endIndex <- try(startIndex + (rIter - 1), silent = TRUE)
# Set WD: this is location where results are written
if(MACHINE=='HPC'){
wd <- '/user/scratch/gent/gvo000/gvo00022/vsc40728/Variance_2lvl/'
}
if(MACHINE=='MAC'){
wd <- '/Volumes/2_TB_WD_Elements_10B8_Han/PhD/Simulation/Results/Variance_2lvl/'
}
# Load in libraries
library(AnalyzeFMRI)
library(lattice)
library(gridExtra)
library(oro.nifti)
library(ggplot2)
library(dplyr)
library(tibble)
library(tidyr)
library(reshape2)
library(lme4)
library(MASS)
library(RColorBrewer)
library(mvmeta)
library(metafor)
library(devtools)
library(neuRosim)
library(NeuRRoStat)
library(fMRIGI)
##
###############
### Functions
###############
##
##
###############
### Simulation parameters
###############
##
###################
#### Global variables
###################
# Number of subjects
nsub <- 50
# Value for sigma in the model
sigma_eps <- 100
# Between subject variability (variance of random slope)
sigma_b2 <- c(0, 5, 10)[3]
# Variance of random intercept
sigma_b1 <- c(0, 1, 1)[3]
###################
#### Data characteristics
###################
# Signal characteristics
TR <- 2
nscan <- 200
total <- TR*nscan
on1 <- seq(1,total,40)
onsets <- list(on1)
duration <- list(20)
###################
#### Generate a design: GROUND TRUTH DESIGN
###################
# true %BOLD change
BOLDC <- 3
# Base/intercept of signal
intcpt <- 100
#######################################
#### DESIGN AND SIGNAL TIME SERIES ####
#######################################
# Generating a design matrix: convolution of block design with double-gamma HRF
X <- neuRosim::simprepTemporal(total,1,onsets = onsets,
effectsize = 1, durations = duration,
TR = TR, acc = 0.1, hrf = "double-gamma")
# X vector for one subject = predicted signal
X_s <- neuRosim::simTSfmri(design=X, base=0, SNR=1, noise="none", verbose=FALSE)
# Now the model will be: (intcpt + b1) + (BOLDC + b2) * pred + epsilon
## Design parameters
# Extend the design matrix with the intercept
xIN <- cbind(intcpt, X_s)
# Contrast: not interested in intercept
CONTRAST <- matrix(c(0,1),nrow=1)
# Calculate (X'X)^(-1) with contrast
design_factor <- CONTRAST %*% (solve(t(xIN) %*% xIN )) %*% t(CONTRAST)
##################
#### GENERATE DATA
##################
# Empty lmer results data frame
LMER_res <- FLAME_res <- comb_res <- data.frame() %>% as_tibble()
# Start some iterations (increases efficiency since iterations run very fast)
for(ID in startIndex:endIndex){
# Set starting seed
starting.seed <- pi*ID
set.seed(starting.seed)
# Generate D matrix: variance-covariance matrix of random intercept + slope
# Variance of slope = sigma_b**2
var_cov_D <- rbind(c(sigma_b1**2, 0), c(0, sigma_b2**2))
# Generate the subject-specific values for intercept and slope using this D-matrix
B_matrix <- MASS::mvrnorm(nsub, mu=c(0,0), Sigma = var_cov_D)
# Empty vector
Y <- data.frame() %>% as_tibble()
# For loop over all subjects
for(i in 1:nsub){
# Generate nscan values, corresponding to time series of one subject
# note: random intercept and random slope generated earlier
Y_s <- (intcpt + B_matrix[i,1]) + ((BOLDC + B_matrix[i,2]) * X_s) +
rnorm(n = nscan, mean = 0, sd = sigma_eps)
# Add to data frame
Y <- data.frame(Y = Y_s, X = X_s, sub = as.integer(i)) %>% as_tibble() %>%
bind_rows(Y, .)
}
#############################################
#### LINEAR MIXED MODEL APPROACH USING R ####
#############################################
# Fit model with random intercept and random slope for subject.
# Get coefficients using tidy.
LMER_results <- broom::tidy(lmer(Y ~ 1 + X + (1 + X|sub), data = Y)) %>%
as_tibble() %>% mutate(sim = ID)
#################################################
#### LINEAR MIXED MODEL APPROACH USING WATER ####
#################################################
# For this, we need to first analyze each subject individually, save COPE and VARCOPE
# and then proceed.
# We call this object secLevel
secLevel <- Y %>%
group_by(sub) %>%
do(.,
# For each subject, fit linear model with an intercept and X as predictors
broom::tidy(
lm(Y ~ 1 + X, data = .))) %>%
# Filter on predictor
filter(term == 'X') %>%
# Now select the estimate and standard error
dplyr::select(sub, estimate, std.error) %>%
# Create variance
mutate(varCope = std.error^2)
# Create 4D images (all voxels in first 3 dimensions are the same), otherwise FSL crashes!
# Then convert the estimates and variance to nifti images
COPE4D <- nifti(img=array(rep(as.numeric(secLevel$estimate), each = 8),
dim=c(2,2,2,nsub)),
dim=c(2,2,2,nsub), datatype = 16)
VARCOPE4D <- nifti(img=array(rep(as.numeric(secLevel$varCope), each = 8),
dim=c(2,2,2,nsub)),
dim=c(2,2,2,nsub), datatype = 16)
# Write them to DataWrite
writeNIfTI(COPE4D, filename = paste(DataWrite,'/COPE',sep=''), gzipped=FALSE)
writeNIfTI(VARCOPE4D, filename = paste(DataWrite,'/VARCOPE',sep=''), gzipped=FALSE)
# Write auxiliarly files to DataWrite. We need:
# GRCOPE in nifti
# GRVARCOPE in nifti
# 4D mask
# design.mat file
# design.grp file
# design.con file
#----- 1 ----#
### Design.mat
fileCon <- paste(DataWrite,"/design.mat",sep="")
# Text to be written to the file
cat('/NumWaves\t1
/NumPoints\t',paste(nsub,sep=''),'
/PPheights\t\t1.000000e+00
/Matrix
',rep("1.000000e+00\n",nsub),file=fileCon)
#----- 2 ----#
### Design.con
fileCon <- file(paste(DataWrite,"/design.con", sep=""))
writeLines('/ContrastName1 Group Average
/NumWaves 1
/NumContrasts 1
/PPheights 1.000000e+00
/RequiredEffect 5.034
/Matrix
1.000000e+00
',fileCon)
close(fileCon)
#----- 3 ----#
### Design.grp
fileCon <- paste(DataWrite,"/design.grp",sep="")
# Text to be written to the file
cat('/NumWaves\t1
/NumPoints\t',paste(nsub,sep=''),'
/Matrix
',rep("1\n",nsub),file=fileCon)
#----- 4 ----#
### mask.nii
mask <- nifti(img=array(1, dim=c(2,2,2,nsub)), dim=c(2,2,2,nsub), datatype=2)
writeNIfTI(mask, filename = paste(DataWrite,'/mask',sep=''), gzipped=FALSE)
# FSL TIME!
setwd(DataWrite)
command <- paste(fslpath, 'flameo --cope=COPE --vc=VARCOPE --mask=mask --ld=FSL_stats --dm=design.mat --cs=design.grp --tc=design.con --runmode=flame1', sep='')
Sys.setenv(FSLOUTPUTTYPE="NIFTI")
system(command)
# Read back results
FLAME_results <- data.frame(value = c(
readNIfTI(paste(DataWrite,"/FSL_stats/cope1.nii",sep=""),
verbose=FALSE, warn=-1, reorient=TRUE,
call=NULL)[1,1,1],
readNIfTI(paste(DataWrite,"/FSL_stats/varcope1.nii",sep=""),
verbose=FALSE, warn=-1, reorient=TRUE,
call=NULL)[1,1,1])) %>%
mutate(parameter = c('estimate', 'variance'))
# Degrees of freedom:
tdof_t1 <- readNIfTI(paste(DataWrite,"/FSL_stats/tdof_t1.nii",sep=""),
verbose=FALSE, warn=-1, reorient=TRUE,
call=NULL)[1,1,1]
# The estimated between-subject variability
var_bsub <- readNIfTI(paste(DataWrite,"/FSL_stats/mean_random_effects_var1.nii",sep=""),
verbose=FALSE, warn=-1, reorient=TRUE,
call=NULL)[1,1,1]
############################################################
#### CONSRUCT 95% CONFIDENCE INTERVALS AND CALCULATE EC ####
############################################################
LMER_res <-
LMER_results %>% filter(term == 'X') %>%
dplyr::select(term, estimate, std.error) %>%
# CI around beta: using std.error of parameter!
mutate(CIlow = estimate - qt(0.975, df = tdof_t1) * std.error,
CIup = estimate + qt(0.975, df = tdof_t1) * std.error) %>%
mutate(EC = ifelse(BOLDC >= CIlow & BOLDC <= CIup, 1, 0)) %>%
# Now select the estimate of between-subject variability (SD)
mutate(sd_X.sub = unlist(LMER_results %>% filter(term == 'sd_X.sub') %>%
dplyr::select(estimate))) %>%
# Add variance of parameter estimate (VARCOPE)
mutate(variance = std.error^2) %>%
# re-arrange
dplyr::select(estimate, std.error, variance, sd_X.sub, CIlow, CIup, EC) %>%
# Rename
rename(estimate = estimate, SE_beta = std.error, variance = variance,
SD_bsub = sd_X.sub, CIlow = CIlow, CIup = CIup, EC = EC) %>%
mutate(type = 'LMER', simID = ID)
FLAME_res <- FLAME_results %>%
tidyr::spread(key = parameter, value = value) %>%
mutate(CIlow = estimate - qt(0.975, df = tdof_t1) * sqrt(variance),
CIup = estimate + qt(0.975, df = tdof_t1) * sqrt(variance)) %>%
mutate(EC = ifelse(BOLDC >= CIlow & BOLDC <= CIup, 1, 0)) %>%
# Add info and rename data object
mutate(type = 'FLAME', simID = ID, SE_beta = sqrt(variance),
SD_bsub = sqrt(var_bsub)) %>%
# Re-order
dplyr::select(estimate, SE_beta, variance, SD_bsub, CIlow, CIup,
EC, type, simID) %>% as_tibble()
#########################################
#### COMBINE DATA AND WRITE TO FILES ####
#########################################
comb_res <- bind_rows(comb_res, LMER_res, FLAME_res)
# Remove objects in DataWrite folder
command <- paste0('rm -r ', DataWrite, '/*')
system(command)
}
# Save R object
saveRDS(comb_res, file = paste0(wd, 'Results_bsub_',sigma_b2,'/VAR2LVL_', hpcID, '.rda'))
# Reset
rm(list = ls())
}
| /1_Scripts/CI_IBMAvsGLM/Simulations/Activation/Variance_2levels/var_2lvl.R | permissive | NeuroStat/IBMAvsGLM | R | false | false | 11,253 | r | ####################
#### TITLE: Variance estimation at group level + CI coverage
#### Contents:
####
#### Source Files:
#### First Modified: 14/06/2018
#### Notes:
#################
##
###############
### Notes
###############
##
# Simulate fMRI time series per subject using a GLM with between-subject variability.
# I will simulate on grid, but only save middle voxel (no smoothing).
# Then estimate the variance using FSL mixed effects + R lmer.
# Then construct CI and calculate empirical coverage (EC) of the CI
# around the true parameter.
##
###############
### Preparation
###############
##
# Let us just run it locally for now
for(l in 1:200){
print(l)
# Take argument from master file
input <- commandArgs(TRUE)
# K'th simulation
hpcID <- try(as.numeric(as.character(input)[1]),silent=TRUE)
# Which machine
MACHINE <- try(as.character(input)[2],silent=TRUE)
# DataWrite directory: where all temporary files are written to
DataWrite <- try(as.character(input)[3],silent=TRUE)
# If no machine is specified, then it has to be this machine!
if(is.na(MACHINE)){
MACHINE <- 'MAC'
hpcID <- l
DataWrite <- '~/Desktop/VAR2LVL'
}
# Give path to FSL
if(MACHINE=='HPC'){
fslpath <- ''
}
if(MACHINE=='MAC'){
fslpath <- '/usr/local/fsl/bin/'
}
# Implement for loop over r iterations here: hpcID goes from 1 to 100 in master file
rIter <- 10
startIndex <- try(1 + rIter * (hpcID - 1), silent = TRUE)
endIndex <- try(startIndex + (rIter - 1), silent = TRUE)
# Set WD: this is location where results are written
if(MACHINE=='HPC'){
wd <- '/user/scratch/gent/gvo000/gvo00022/vsc40728/Variance_2lvl/'
}
if(MACHINE=='MAC'){
wd <- '/Volumes/2_TB_WD_Elements_10B8_Han/PhD/Simulation/Results/Variance_2lvl/'
}
# Load in libraries
library(AnalyzeFMRI)
library(lattice)
library(gridExtra)
library(oro.nifti)
library(ggplot2)
library(dplyr)
library(tibble)
library(tidyr)
library(reshape2)
library(lme4)
library(MASS)
library(RColorBrewer)
library(mvmeta)
library(metafor)
library(devtools)
library(neuRosim)
library(NeuRRoStat)
library(fMRIGI)
##
###############
### Functions
###############
##
##
###############
### Simulation parameters
###############
##
###################
#### Global variables
###################
# Number of subjects
nsub <- 50
# Value for sigma in the model
sigma_eps <- 100
# Between subject variability (variance of random slope)
sigma_b2 <- c(0, 5, 10)[3]
# Variance of random intercept
sigma_b1 <- c(0, 1, 1)[3]
###################
#### Data characteristics
###################
# Signal characteristics
TR <- 2
nscan <- 200
total <- TR*nscan
on1 <- seq(1,total,40)
onsets <- list(on1)
duration <- list(20)
###################
#### Generate a design: GROUND TRUTH DESIGN
###################
# true %BOLD change
BOLDC <- 3
# Base/intercept of signal
intcpt <- 100
#######################################
#### DESIGN AND SIGNAL TIME SERIES ####
#######################################
# Generating a design matrix: convolution of block design with double-gamma HRF
X <- neuRosim::simprepTemporal(total,1,onsets = onsets,
effectsize = 1, durations = duration,
TR = TR, acc = 0.1, hrf = "double-gamma")
# X vector for one subject = predicted signal
X_s <- neuRosim::simTSfmri(design=X, base=0, SNR=1, noise="none", verbose=FALSE)
# Now the model will be: (intcpt + b1) + (BOLDC + b2) * pred + epsilon
## Design parameters
# Extend the design matrix with the intercept
xIN <- cbind(intcpt, X_s)
# Contrast: not interested in intercept
CONTRAST <- matrix(c(0,1),nrow=1)
# Calculate (X'X)^(-1) with contrast
design_factor <- CONTRAST %*% (solve(t(xIN) %*% xIN )) %*% t(CONTRAST)
##################
#### GENERATE DATA
##################
# Empty lmer results data frame
LMER_res <- FLAME_res <- comb_res <- data.frame() %>% as_tibble()
# Start some iterations (increases efficiency since iterations run very fast)
for(ID in startIndex:endIndex){
# Set starting seed
starting.seed <- pi*ID
set.seed(starting.seed)
# Generate D matrix: variance-covariance matrix of random intercept + slope
# Variance of slope = sigma_b**2
var_cov_D <- rbind(c(sigma_b1**2, 0), c(0, sigma_b2**2))
# Generate the subject-specific values for intercept and slope using this D-matrix
B_matrix <- MASS::mvrnorm(nsub, mu=c(0,0), Sigma = var_cov_D)
# Empty vector
Y <- data.frame() %>% as_tibble()
# For loop over all subjects
for(i in 1:nsub){
# Generate nscan values, corresponding to time series of one subject
# note: random intercept and random slope generated earlier
Y_s <- (intcpt + B_matrix[i,1]) + ((BOLDC + B_matrix[i,2]) * X_s) +
rnorm(n = nscan, mean = 0, sd = sigma_eps)
# Add to data frame
Y <- data.frame(Y = Y_s, X = X_s, sub = as.integer(i)) %>% as_tibble() %>%
bind_rows(Y, .)
}
#############################################
#### LINEAR MIXED MODEL APPROACH USING R ####
#############################################
# Fit model with random intercept and random slope for subject.
# Get coefficients using tidy.
LMER_results <- broom::tidy(lmer(Y ~ 1 + X + (1 + X|sub), data = Y)) %>%
as_tibble() %>% mutate(sim = ID)
#################################################
#### LINEAR MIXED MODEL APPROACH USING WATER ####
#################################################
# For this, we need to first analyze each subject individually, save COPE and VARCOPE
# and then proceed.
# We call this object secLevel
secLevel <- Y %>%
group_by(sub) %>%
do(.,
# For each subject, fit linear model with an intercept and X as predictors
broom::tidy(
lm(Y ~ 1 + X, data = .))) %>%
# Filter on predictor
filter(term == 'X') %>%
# Now select the estimate and standard error
dplyr::select(sub, estimate, std.error) %>%
# Create variance
mutate(varCope = std.error^2)
# Create 4D images (all voxels in first 3 dimensions are the same), otherwise FSL crashes!
# Then convert the estimates and variance to nifti images
COPE4D <- nifti(img=array(rep(as.numeric(secLevel$estimate), each = 8),
dim=c(2,2,2,nsub)),
dim=c(2,2,2,nsub), datatype = 16)
VARCOPE4D <- nifti(img=array(rep(as.numeric(secLevel$varCope), each = 8),
dim=c(2,2,2,nsub)),
dim=c(2,2,2,nsub), datatype = 16)
# Write them to DataWrite
writeNIfTI(COPE4D, filename = paste(DataWrite,'/COPE',sep=''), gzipped=FALSE)
writeNIfTI(VARCOPE4D, filename = paste(DataWrite,'/VARCOPE',sep=''), gzipped=FALSE)
# Write auxiliarly files to DataWrite. We need:
# GRCOPE in nifti
# GRVARCOPE in nifti
# 4D mask
# design.mat file
# design.grp file
# design.con file
#----- 1 ----#
### Design.mat
fileCon <- paste(DataWrite,"/design.mat",sep="")
# Text to be written to the file
cat('/NumWaves\t1
/NumPoints\t',paste(nsub,sep=''),'
/PPheights\t\t1.000000e+00
/Matrix
',rep("1.000000e+00\n",nsub),file=fileCon)
#----- 2 ----#
### Design.con
fileCon <- file(paste(DataWrite,"/design.con", sep=""))
writeLines('/ContrastName1 Group Average
/NumWaves 1
/NumContrasts 1
/PPheights 1.000000e+00
/RequiredEffect 5.034
/Matrix
1.000000e+00
',fileCon)
close(fileCon)
#----- 3 ----#
### Design.grp
fileCon <- paste(DataWrite,"/design.grp",sep="")
# Text to be written to the file
cat('/NumWaves\t1
/NumPoints\t',paste(nsub,sep=''),'
/Matrix
',rep("1\n",nsub),file=fileCon)
#----- 4 ----#
### mask.nii
mask <- nifti(img=array(1, dim=c(2,2,2,nsub)), dim=c(2,2,2,nsub), datatype=2)
writeNIfTI(mask, filename = paste(DataWrite,'/mask',sep=''), gzipped=FALSE)
# FSL TIME!
setwd(DataWrite)
command <- paste(fslpath, 'flameo --cope=COPE --vc=VARCOPE --mask=mask --ld=FSL_stats --dm=design.mat --cs=design.grp --tc=design.con --runmode=flame1', sep='')
Sys.setenv(FSLOUTPUTTYPE="NIFTI")
system(command)
# Read back results
FLAME_results <- data.frame(value = c(
readNIfTI(paste(DataWrite,"/FSL_stats/cope1.nii",sep=""),
verbose=FALSE, warn=-1, reorient=TRUE,
call=NULL)[1,1,1],
readNIfTI(paste(DataWrite,"/FSL_stats/varcope1.nii",sep=""),
verbose=FALSE, warn=-1, reorient=TRUE,
call=NULL)[1,1,1])) %>%
mutate(parameter = c('estimate', 'variance'))
# Degrees of freedom:
tdof_t1 <- readNIfTI(paste(DataWrite,"/FSL_stats/tdof_t1.nii",sep=""),
verbose=FALSE, warn=-1, reorient=TRUE,
call=NULL)[1,1,1]
# The estimated between-subject variability
var_bsub <- readNIfTI(paste(DataWrite,"/FSL_stats/mean_random_effects_var1.nii",sep=""),
verbose=FALSE, warn=-1, reorient=TRUE,
call=NULL)[1,1,1]
############################################################
#### CONSRUCT 95% CONFIDENCE INTERVALS AND CALCULATE EC ####
############################################################
LMER_res <-
LMER_results %>% filter(term == 'X') %>%
dplyr::select(term, estimate, std.error) %>%
# CI around beta: using std.error of parameter!
mutate(CIlow = estimate - qt(0.975, df = tdof_t1) * std.error,
CIup = estimate + qt(0.975, df = tdof_t1) * std.error) %>%
mutate(EC = ifelse(BOLDC >= CIlow & BOLDC <= CIup, 1, 0)) %>%
# Now select the estimate of between-subject variability (SD)
mutate(sd_X.sub = unlist(LMER_results %>% filter(term == 'sd_X.sub') %>%
dplyr::select(estimate))) %>%
# Add variance of parameter estimate (VARCOPE)
mutate(variance = std.error^2) %>%
# re-arrange
dplyr::select(estimate, std.error, variance, sd_X.sub, CIlow, CIup, EC) %>%
# Rename
rename(estimate = estimate, SE_beta = std.error, variance = variance,
SD_bsub = sd_X.sub, CIlow = CIlow, CIup = CIup, EC = EC) %>%
mutate(type = 'LMER', simID = ID)
FLAME_res <- FLAME_results %>%
tidyr::spread(key = parameter, value = value) %>%
mutate(CIlow = estimate - qt(0.975, df = tdof_t1) * sqrt(variance),
CIup = estimate + qt(0.975, df = tdof_t1) * sqrt(variance)) %>%
mutate(EC = ifelse(BOLDC >= CIlow & BOLDC <= CIup, 1, 0)) %>%
# Add info and rename data object
mutate(type = 'FLAME', simID = ID, SE_beta = sqrt(variance),
SD_bsub = sqrt(var_bsub)) %>%
# Re-order
dplyr::select(estimate, SE_beta, variance, SD_bsub, CIlow, CIup,
EC, type, simID) %>% as_tibble()
#########################################
#### COMBINE DATA AND WRITE TO FILES ####
#########################################
comb_res <- bind_rows(comb_res, LMER_res, FLAME_res)
# Remove objects in DataWrite folder
command <- paste0('rm -r ', DataWrite, '/*')
system(command)
}
# Save R object
saveRDS(comb_res, file = paste0(wd, 'Results_bsub_',sigma_b2,'/VAR2LVL_', hpcID, '.rda'))
# Reset
rm(list = ls())
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/responses.R
\name{load_responses_all}
\alias{load_responses_all}
\title{Load all response datasets in a local directory}
\usage{
load_responses_all(params, contingency_run = FALSE)
}
\arguments{
\item{params}{a named listed containing a value named "input", a vector of
paths to load by the function, and "input_dir", the directory where the
input files are found}
\item{contingency_run}{boolean indicating if currently running contingency
code}
}
\value{
A data frame of all loaded data files concatenated into one data
frame
}
\description{
Note that if some columns are not present in all files -- for example, if
survey questions changed and so newer data files have different columns --
the resulting data frame will contain all columns, with NAs in rows where
that column was not present.
}
| /facebook/delphiFacebook/man/load_responses_all.Rd | permissive | alexcoda/covidcast-indicators | R | false | true | 878 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/responses.R
\name{load_responses_all}
\alias{load_responses_all}
\title{Load all response datasets in a local directory}
\usage{
load_responses_all(params, contingency_run = FALSE)
}
\arguments{
\item{params}{a named listed containing a value named "input", a vector of
paths to load by the function, and "input_dir", the directory where the
input files are found}
\item{contingency_run}{boolean indicating if currently running contingency
code}
}
\value{
A data frame of all loaded data files concatenated into one data
frame
}
\description{
Note that if some columns are not present in all files -- for example, if
survey questions changed and so newer data files have different columns --
the resulting data frame will contain all columns, with NAs in rows where
that column was not present.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generated_client.R
\name{media_put_spot_orders_archive}
\alias{media_put_spot_orders_archive}
\title{Update the archive status of this object}
\usage{
media_put_spot_orders_archive(id, status)
}
\arguments{
\item{id}{integer required. The ID of the object.}
\item{status}{boolean required. The desired archived status of the object.}
}
\value{
A list containing the following elements:
\item{id}{integer, The ID for the spot order.}
\item{archived}{string, The archival status of the requested object(s).}
\item{csvS3Uri}{string, S3 URI for the spot order CSV file.}
\item{jsonS3Uri}{string, S3 URI for the spot order JSON file.}
\item{xmlArchiveS3Uri}{string, S3 URI for the spot order XML archive.}
\item{lastTransformJobId}{integer, ID of the spot order transformation job.}
}
\description{
Update the archive status of this object
}
| /man/media_put_spot_orders_archive.Rd | no_license | JosiahParry/civis-r | R | false | true | 916 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generated_client.R
\name{media_put_spot_orders_archive}
\alias{media_put_spot_orders_archive}
\title{Update the archive status of this object}
\usage{
media_put_spot_orders_archive(id, status)
}
\arguments{
\item{id}{integer required. The ID of the object.}
\item{status}{boolean required. The desired archived status of the object.}
}
\value{
A list containing the following elements:
\item{id}{integer, The ID for the spot order.}
\item{archived}{string, The archival status of the requested object(s).}
\item{csvS3Uri}{string, S3 URI for the spot order CSV file.}
\item{jsonS3Uri}{string, S3 URI for the spot order JSON file.}
\item{xmlArchiveS3Uri}{string, S3 URI for the spot order XML archive.}
\item{lastTransformJobId}{integer, ID of the spot order transformation job.}
}
\description{
Update the archive status of this object
}
|
\name{as.symDMatrix.character}
\alias{as.symDMatrix.character}
\title{Coerce a Character Vector to a symDMatrix Object}
\description{
This function creates a \code{symDMatrix} object from a character vector of
path names to \code{RData} files, each containing exactly one
\code{ff_matrix} object that is used as a block, and is useful for
distributed computing where each block is processed on a different node.
}
\usage{
\method{as.symDMatrix}{character}(x, ...)
}
\arguments{
\item{x}{
A character vector with path names to \code{RData} files.
}
\item{...}{
Additional arguments (currently unused).
}
}
\details{
The \code{RData} files must be ordered by block: \code{G11, G12, G13, ...,
G1q, G22, G23, ..., G2q, ..., Gqq}. The matrix-like objects are
initialized similarly to \code{load.symDMatrix}.
}
\value{
A \code{symDMatrix} object.
}
\seealso{
\code{\link[base]{list.files}} to create a character vector of file paths
that match a certain pattern.
}
| /man/as.symDMatrix.character.Rd | no_license | QuantGen/symDMatrix | R | false | false | 1,031 | rd | \name{as.symDMatrix.character}
\alias{as.symDMatrix.character}
\title{Coerce a Character Vector to a symDMatrix Object}
\description{
This function creates a \code{symDMatrix} object from a character vector of
path names to \code{RData} files, each containing exactly one
\code{ff_matrix} object that is used as a block, and is useful for
distributed computing where each block is processed on a different node.
}
\usage{
\method{as.symDMatrix}{character}(x, ...)
}
\arguments{
\item{x}{
A character vector with path names to \code{RData} files.
}
\item{...}{
Additional arguments (currently unused).
}
}
\details{
The \code{RData} files must be ordered by block: \code{G11, G12, G13, ...,
G1q, G22, G23, ..., G2q, ..., Gqq}. The matrix-like objects are
initialized similarly to \code{load.symDMatrix}.
}
\value{
A \code{symDMatrix} object.
}
\seealso{
\code{\link[base]{list.files}} to create a character vector of file paths
that match a certain pattern.
}
|
# haohan
library(tm)
library(RTextTools)
### ICEWS_NLP ###
rm(list=ls())
load("~/Dropbox/WORK/Research (now)/Chinese NPL project/Data/icews_nlp.Rdata")
icews <- read.csv("~/Dropbox/WORK/Research (now)/Chinese NPL project/Data/all_protest_from2001_2014_20151111.csv")
names(icews)
icews$issue_f <- as.factor(icews$issue)
# Re-code issues according to Howard's new schemes
icews$issue2 <- icews$issue_human
icews$issue2[icews$issue2 %in% c(1,13)] <- 18
icews$issue2[icews$issue2 %in% c(2,7)] <- 19
icews$issue2[icews$issue2 %in% c(8,12,14)] <- 20
icews$issue2[icews$issue2 %in% c(16, 17)] <- 21
icews$issue2_f <- factor(icews$issue2, levels = unique(icews$issue2)[order( unique(icews$issue2) )])
issue_num <- unique(icews$issue2)
issue_num
issue_txt <- c( "Social policy", "Econ policy", "Pollution", "Religious",
"Anti-Japan", "Student", "Democracy", "Labor", "Land-Corruption", "Riot-Justice", "Ethnic")
icews$issue2_f2 <- NA
icews$issue2_f2[icews$issue2_f == 3] <- "Social policy"
icews$issue2_f2[icews$issue2_f == 4] <- "Econ policy"
icews$issue2_f2[icews$issue2_f == 5] <- "Pollution"
icews$issue2_f2[icews$issue2_f == 6] <- "Religious"
icews$issue2_f2[icews$issue2_f == 9] <- "Anti-Japan"
icews$issue2_f2[icews$issue2_f == 10] <- "Student"
icews$issue2_f2[icews$issue2_f == 11] <- "Democracy"
icews$issue2_f2[icews$issue2_f == 18] <- "Labor"
icews$issue2_f2[icews$issue2_f == 19] <- "Land-Corruption"
icews$issue2_f2[icews$issue2_f == 20] <- "Riot-Justice"
icews$issue2_f2[icews$issue2_f == 21] <- "Ethnic"
# 15 ismissing
icews$issue2_f2 <- as.factor(icews$issue2_f2)
data.frame(issue_num, issue_txt)
table(icews$issue)
# Recode issue types according to Howard's new coding
list.files("~/Dropbox/WORK/Research (now)/Chinese NPL project/Data/")
set.seed(10)
dim(icews)
ntrain <- sample(1:642, 642 * .8, replace = F); icews_train <- icews[ntrain, ]
ntest <- which( !(1:642 %in% ntrain) ); icews_test <- icews[ntest, ]
as.character(icews$text[3])
#doc_matrix <- create_matrix(icews_train$text, language = "english", removeNumbers = T, stemWords = T,
# removePunctuation = T, weighting=weightTfIdf)
doc_matrix <- create_matrix(icews$text, language = "english", removeNumbers = T, stemWords = T,
removeSparseTerms = .998)
#View(inspect(doc_matrix))
tmp_wordvec <- strsplit(as.character(icews$text[3]), " ")[[1]]
data.frame(tmp_wordvec, wordStem( tmp_wordvec ))
str(doc_matrix)
doc_matrix$ncol
doc_matrix$nrow
container <- create_container(doc_matrix, icews$issue2_f, trainSize = ntrain, testSize = ntest, virgin = F)
SVM <- train_model(container,"SVM")
#SVM2 <- train_model(container,"SVM", kernel = "polynomial")
#GLMNET <- train_model(container,"GLMNET")
#MAXENT <- train_model(container,"MAXENT")
#SLDA <- train_model(container,"SLDA")
BOOSTING <- train_model(container,"BOOSTING")
BAGGING <- train_model(container,"BAGGING")
RF <- train_model(container,"RF")
#NNET <- train_model(container,"NNET")
TREE <- train_model(container,"TREE")
SVM_CLASSIFY <- classify_model(container, SVM)
#SVM_CLASSIFY2 <- classify_model(container, SVM2)
#GLMNET_CLASSIFY <- classify_model(container, GLMNET)
#MAXENT_CLASSIFY <- classify_model(container, MAXENT)
#SLDA_CLASSIFY <- classify_model(container, SLDA)
BOOSTING_CLASSIFY <- classify_model(container, BOOSTING)
BAGGING_CLASSIFY <- classify_model(container, BAGGING)
RF_CLASSIFY <- classify_model(container, RF)
#NNET_CLASSIFY <- classify_model(container, NNET)
TREE_CLASSIFY <- classify_model(container, TREE)
analytics <- create_analytics(container,
cbind(SVM_CLASSIFY, #SLDA_CLASSIFY,
BOOSTING_CLASSIFY, BAGGING_CLASSIFY, RF_CLASSIFY,
#NNET_CLASSIFY,
TREE_CLASSIFY
#MAXENT_CLASSIFY
))
analytics <- create_analytics(container,
cbind(SVM_CLASSIFY, #SLDA_CLASSIFY,
BOOSTING_CLASSIFY, BAGGING_CLASSIFY, RF_CLASSIFY,
NNET_CLASSIFY, TREE_CLASSIFY,
MAXENT_CLASSIFY))
summary(analytics)
library(xtable)
# CREATE THE data.frame SUMMARIES
topic_summary <- analytics@label_summary
topic_summary
alg_summary <- analytics@algorithm_summary
alg_summary
ens_summary <-analytics@ensemble_summary
ens_summary
doc_summary <- analytics@document_summary
doc_summary
create_ensembleSummary(analytics@document_summary)
SVM <- cross_validate(container, 4, "SVM")
#GLMNET <- cross_validate(container, 4, "GLMNET")
MAXENT <- cross_validate(container, 4, "MAXENT")
SLDA <- cross_validate(container, 4, "SLDA")
BAGGING <- cross_validate(container, 4, "BAGGING")
BOOSTING <- cross_validate(container, 4, "BOOSTING")
#RF <- cross_validate(container, 4, "RF")
NNET <- cross_validate(container, 4, "NNET")
TREE <- cross_validate(container, 4, "TREE")
# Code location (for Howard)
############################
load("~/Dropbox/WORK/Research (now)/Chinese NPL project/Data/locs_nlp.Rdata")
locs
locs_d <- as.data.frame(matrix(ncol = 20, nrow = 642))
for (i in 1:642){
if (length(locs[[i]]) > 0){
locs_d[i, 1:length(locs[[i]])] <- locs[[i]]
}
print(i)
}
names(locs_d) <- paste0("location_", 1:20)
names(locs_d)
head(icews)
dim(icews)
icews[643:657, 2]
icews2 <- read.csv("~/Dropbox/WORK/Research (now)/Chinese NPL project/Data/all_protest_from2001_2014_20151111.csv")
dim(icews2)
icews_loc <- cbind(icews2, locs_d)
save(locs_d, file = "~/Dropbox/WORK/Research (now)/Chinese NPL project/Data/locs_nlp_recode.Rdata")
write.csv(locs_d, "~/Dropbox/WORK/Research (now)/Chinese NPL project/Data/locs_nlp_recode.csv")
names(icews_loc)
save(icews_loc, file = "~/Dropbox/WORK/Research (now)/Chinese NPL project/Data/icews+loc.Rdata")
write.csv(icews_loc, "~/Dropbox/WORK/Research (now)/Chinese NPL project/Data/icews+loc")
############################
# http://www.svm-tutorial.com/2014/11/svm-classify-text-r/
# Step 5: Create and train the SVM model
| /codes/haohan.R | no_license | sophielee1/NLP_byline_filter | R | false | false | 6,135 | r | # haohan
library(tm)
library(RTextTools)
### ICEWS_NLP ###
rm(list=ls())
load("~/Dropbox/WORK/Research (now)/Chinese NPL project/Data/icews_nlp.Rdata")
icews <- read.csv("~/Dropbox/WORK/Research (now)/Chinese NPL project/Data/all_protest_from2001_2014_20151111.csv")
names(icews)
icews$issue_f <- as.factor(icews$issue)
# Re-code issues according to Howard's new schemes
icews$issue2 <- icews$issue_human
icews$issue2[icews$issue2 %in% c(1,13)] <- 18
icews$issue2[icews$issue2 %in% c(2,7)] <- 19
icews$issue2[icews$issue2 %in% c(8,12,14)] <- 20
icews$issue2[icews$issue2 %in% c(16, 17)] <- 21
icews$issue2_f <- factor(icews$issue2, levels = unique(icews$issue2)[order( unique(icews$issue2) )])
issue_num <- unique(icews$issue2)
issue_num
issue_txt <- c( "Social policy", "Econ policy", "Pollution", "Religious",
"Anti-Japan", "Student", "Democracy", "Labor", "Land-Corruption", "Riot-Justice", "Ethnic")
icews$issue2_f2 <- NA
icews$issue2_f2[icews$issue2_f == 3] <- "Social policy"
icews$issue2_f2[icews$issue2_f == 4] <- "Econ policy"
icews$issue2_f2[icews$issue2_f == 5] <- "Pollution"
icews$issue2_f2[icews$issue2_f == 6] <- "Religious"
icews$issue2_f2[icews$issue2_f == 9] <- "Anti-Japan"
icews$issue2_f2[icews$issue2_f == 10] <- "Student"
icews$issue2_f2[icews$issue2_f == 11] <- "Democracy"
icews$issue2_f2[icews$issue2_f == 18] <- "Labor"
icews$issue2_f2[icews$issue2_f == 19] <- "Land-Corruption"
icews$issue2_f2[icews$issue2_f == 20] <- "Riot-Justice"
icews$issue2_f2[icews$issue2_f == 21] <- "Ethnic"
# 15 ismissing
icews$issue2_f2 <- as.factor(icews$issue2_f2)
data.frame(issue_num, issue_txt)
table(icews$issue)
# Recode issue types according to Howard's new coding
list.files("~/Dropbox/WORK/Research (now)/Chinese NPL project/Data/")
set.seed(10)
dim(icews)
ntrain <- sample(1:642, 642 * .8, replace = F); icews_train <- icews[ntrain, ]
ntest <- which( !(1:642 %in% ntrain) ); icews_test <- icews[ntest, ]
as.character(icews$text[3])
#doc_matrix <- create_matrix(icews_train$text, language = "english", removeNumbers = T, stemWords = T,
# removePunctuation = T, weighting=weightTfIdf)
doc_matrix <- create_matrix(icews$text, language = "english", removeNumbers = T, stemWords = T,
removeSparseTerms = .998)
#View(inspect(doc_matrix))
tmp_wordvec <- strsplit(as.character(icews$text[3]), " ")[[1]]
data.frame(tmp_wordvec, wordStem( tmp_wordvec ))
str(doc_matrix)
doc_matrix$ncol
doc_matrix$nrow
container <- create_container(doc_matrix, icews$issue2_f, trainSize = ntrain, testSize = ntest, virgin = F)
SVM <- train_model(container,"SVM")
#SVM2 <- train_model(container,"SVM", kernel = "polynomial")
#GLMNET <- train_model(container,"GLMNET")
#MAXENT <- train_model(container,"MAXENT")
#SLDA <- train_model(container,"SLDA")
BOOSTING <- train_model(container,"BOOSTING")
BAGGING <- train_model(container,"BAGGING")
RF <- train_model(container,"RF")
#NNET <- train_model(container,"NNET")
TREE <- train_model(container,"TREE")
SVM_CLASSIFY <- classify_model(container, SVM)
#SVM_CLASSIFY2 <- classify_model(container, SVM2)
#GLMNET_CLASSIFY <- classify_model(container, GLMNET)
#MAXENT_CLASSIFY <- classify_model(container, MAXENT)
#SLDA_CLASSIFY <- classify_model(container, SLDA)
BOOSTING_CLASSIFY <- classify_model(container, BOOSTING)
BAGGING_CLASSIFY <- classify_model(container, BAGGING)
RF_CLASSIFY <- classify_model(container, RF)
#NNET_CLASSIFY <- classify_model(container, NNET)
TREE_CLASSIFY <- classify_model(container, TREE)
analytics <- create_analytics(container,
cbind(SVM_CLASSIFY, #SLDA_CLASSIFY,
BOOSTING_CLASSIFY, BAGGING_CLASSIFY, RF_CLASSIFY,
#NNET_CLASSIFY,
TREE_CLASSIFY
#MAXENT_CLASSIFY
))
analytics <- create_analytics(container,
cbind(SVM_CLASSIFY, #SLDA_CLASSIFY,
BOOSTING_CLASSIFY, BAGGING_CLASSIFY, RF_CLASSIFY,
NNET_CLASSIFY, TREE_CLASSIFY,
MAXENT_CLASSIFY))
summary(analytics)
library(xtable)
# CREATE THE data.frame SUMMARIES
topic_summary <- analytics@label_summary
topic_summary
alg_summary <- analytics@algorithm_summary
alg_summary
ens_summary <-analytics@ensemble_summary
ens_summary
doc_summary <- analytics@document_summary
doc_summary
create_ensembleSummary(analytics@document_summary)
SVM <- cross_validate(container, 4, "SVM")
#GLMNET <- cross_validate(container, 4, "GLMNET")
MAXENT <- cross_validate(container, 4, "MAXENT")
SLDA <- cross_validate(container, 4, "SLDA")
BAGGING <- cross_validate(container, 4, "BAGGING")
BOOSTING <- cross_validate(container, 4, "BOOSTING")
#RF <- cross_validate(container, 4, "RF")
NNET <- cross_validate(container, 4, "NNET")
TREE <- cross_validate(container, 4, "TREE")
# Code location (for Howard)
############################
load("~/Dropbox/WORK/Research (now)/Chinese NPL project/Data/locs_nlp.Rdata")
locs
locs_d <- as.data.frame(matrix(ncol = 20, nrow = 642))
for (i in 1:642){
if (length(locs[[i]]) > 0){
locs_d[i, 1:length(locs[[i]])] <- locs[[i]]
}
print(i)
}
names(locs_d) <- paste0("location_", 1:20)
names(locs_d)
head(icews)
dim(icews)
icews[643:657, 2]
icews2 <- read.csv("~/Dropbox/WORK/Research (now)/Chinese NPL project/Data/all_protest_from2001_2014_20151111.csv")
dim(icews2)
icews_loc <- cbind(icews2, locs_d)
save(locs_d, file = "~/Dropbox/WORK/Research (now)/Chinese NPL project/Data/locs_nlp_recode.Rdata")
write.csv(locs_d, "~/Dropbox/WORK/Research (now)/Chinese NPL project/Data/locs_nlp_recode.csv")
names(icews_loc)
save(icews_loc, file = "~/Dropbox/WORK/Research (now)/Chinese NPL project/Data/icews+loc.Rdata")
write.csv(icews_loc, "~/Dropbox/WORK/Research (now)/Chinese NPL project/Data/icews+loc")
############################
# http://www.svm-tutorial.com/2014/11/svm-classify-text-r/
# Step 5: Create and train the SVM model
|
##################
# Landscape MSOM
##################
library(dplyr)
library(lubridate)
library(readr)
library(stringr)
library(devtools)
if(packageVersion("tidyr") < "0.8.99.9000") devtools::install_github("tidyverse/tidyr") # ensure tidyr version with pivot_wider
library(tidyr)
########################################
###### SALAMANDER OCCUPANCY DATA #######
########################################
# Read in data
canaan <- read.csv("Data/Landscape/CVNWR_transects.csv", header = TRUE, stringsAsFactors = FALSE)
capital <- read.csv("Data/Landscape/NCRlotic_all.csv", header = TRUE, stringsAsFactors = FALSE)
shenandoah <- read.csv("Data/Landscape/Shen_snp12.csv", header = TRUE, stringsAsFactors = FALSE)
wmaryland <- read.csv("Data/Date_Location_Transect_Visit_Data_Processed.csv", header = TRUE, stringsAsFactors = FALSE)
str(canaan)
str(capital)
str(shenandoah)
str(wmaryland)
# Format data: region - transect ID - species - age - pass/visit 1- pass/visit 2 - pass/visit - 3
# make all same format, column names
#----- Canaan Valley National Wildlife Refuge Dataset -----
can <- canaan %>%
mutate(Transect = ifelse(is.na(Transect), 0, Transect),
Transect = paste0(Name, Transect),
Date = mdy(Date)) %>%
group_by(Transect, Species, Age) %>%
select(Transect, Pass, Species, Age, Caught, Date)
max_pass_can <- can %>%
ungroup() %>%
group_by(Transect, Date) %>%
summarize(max_pass = max(Pass),
visit = NA_integer_) %>%
arrange(Transect, Date) %>%
ungroup()
max_pass_can$visit[1] <- 1
for(i in 2:nrow(max_pass_can)) {
if(max_pass_can$Transect[i] == max_pass_can$Transect[i-1]) {
max_pass_can$visit[i] <- max_pass_can$visit[i-1] + 1
} else {
max_pass_can$visit[i] <- 1
}
}
just_pass <- max_pass_can %>%
filter(visit == 1)
combos_can <- can %>%
dplyr::ungroup() %>%
mutate(Species = ifelse(Species == "DOCR", "DOCH", Species)) %>%
tidyr::expand(nesting(Transect, Date), Species, Age, Pass) %>%
dplyr::filter(Species %in% c("GPOR", "DFUS", "EBIS", "DMON", "DOCH"),
Age %in% c("A", "L")) %>%
dplyr::arrange(Transect, Date, Species, Age, Pass) %>%
dplyr::left_join(max_pass_can)
can2 <- combos_can %>%
left_join(can) %>%
# group_by(Site) %>%
mutate(Caught = ifelse(Pass <= max_pass & is.na(Caught), 0, Caught)) %>%
arrange(Transect, Date, Species, Age, Pass)
# check the size of the combos_can vs resulting dataframe
length(unique(paste(can$Transect, can$Date))) * 5 * 2 * 4
# Convert counts to binary
can2$obs <- can2$Caught
can2[can2$obs > 1 & !is.na(can2$obs), "obs"] <- 1
summary(can2)
#--------- need to add date below and check if expanded for species-larvae-*age* combos for each transect -----------#
###### It did not spread for all species-age combos at all sites, something wrong with spread(), can't get pivot_wider() to load
can3 <- can2 %>%
ungroup() %>%
select(-visit, -Caught) %>%
group_by(Transect, Date, Species, Age) %>%
# select(-region) %>%
mutate(Pass = paste0("p", Pass)) %>%
tidyr::pivot_wider(names_from = Pass, values_from = obs) %>%
mutate(region = "Canaan") %>%
#spread(Pass, Caught) %>% #### This doesn't spread correctly, it leaves out some species that need to be at all sites (even if not found)
ungroup() %>%
mutate(year = year(Date)) %>%
select(region, Transect, Date, Species, Age, p1, p2, p3, p4) %>%
as.data.frame(. , stringsAsFactors = FALSE) %>%
arrange(region, Transect, Date, Species, Age)
# Redo the naming
colnames(can3) <- c("region", "transect", "date", "species", "age", "pass1", "pass2", "pass3", "pass4")
# Save detailed occupancy data for canaan
if(!dir.exists("Data/Derived")) dir.create("Data/Derived", recursive = TRUE)
saveRDS(can3, "Data/Derived/canaan_detailed_occ.rds")
#----- National Capitals Region Dataset ------
cap <- capital %>%
mutate(#Transect = paste(PointName, Visit, sep = "_v"),
pass4 = NA_real_,
region = "Capital") %>% # added pass4 column to match canaan dataframe
group_by(PointName, SpeciesCode, SAgeID) %>%
select(region, PointName, SDate, Visit, SpeciesCode, SAgeID, PassCount1, PassCount2, PassCount3, pass4)
colnames(cap) <- c("region", "transect", "date", "visit", "species", "age", "pass1", "pass2", "pass3", "pass4")
# Remove NULLs from capitals data
na <- cap[which(cap$species == "NULL"),]
cap1 <- cap[-which(cap$species == "NULL"),]
cap <- cap1
cap[cap == "NULL"] <- NA_integer_
cap <- cap %>%
arrange(region, transect, date, species, age) %>%
mutate(pass1 = as.numeric(pass1),
pass2 = as.numeric(pass2),
pass3 = as.numeric(pass3),
pass4 = as.numeric(pass4),
age = ifelse(age == "juvenile" | age == "adult", "A", age), # add together
age = ifelse(age == "larva" | age == "metamorphosing", "L", age)) %>%
group_by(region, transect, date, visit, species, age) %>%
summarise_all(.funs = sum) %>%
ungroup() %>%
# select(-region) %>%
as.data.frame(. , stringsAsFactors = FALSE)
max_pass_cap <- cap %>%
ungroup() %>%
pivot_longer(cols = starts_with("pass"), names_to = "pass", values_to = "count") %>%
mutate(pass = gsub(pattern = "pass*", replacement = "", x = pass)) %>%
filter(!is.na(count)) %>%
select(transect, date, visit, pass) %>%
group_by(transect, date) %>%
mutate(max_pass = max(pass)) %>%
arrange(transect, date, visit) %>%
ungroup() %>%
mutate(date = mdy(date),
visit_old = as.integer(visit),
pass = as.integer(pass),
max_pass = as.integer(max_pass)) %>%
mutate(year = year(date)) %>%
group_by(transect, year, pass) %>%
mutate(visit_1 = ifelse(date == min(date), 1, 0)) %>%
distinct() %>%
arrange(transect, date) %>%
filter(visit_1 == 1) %>%
select(-visit_old) %>%
ungroup()
combos_cap <- cap %>%
dplyr::ungroup() %>%
mutate(species = ifelse(species == "ebis", "EBIS", species)) %>%
tidyr::expand(nesting(transect, date, visit), species, age) %>%
# nesting(Transect, Date, Species)
dplyr::filter(species %in% c("DFUS", "EBIS", "PRUB", "ELON", "EGUT"),
age %in% c("A", "L")) %>%
dplyr::arrange(transect, date, species, age)
length(unique(cap$transect))
length(unique(paste0(cap$transect, "_", cap$date)))
length(unique(cap$species))
length(unique(cap$age))
# desired rows (before filtering to first visit each year)
rows_cap <- length(unique(paste0(cap$transect, "_", cap$date))) * 5 * 2
cap2 <- combos_cap %>%
ungroup() %>%
left_join(ungroup(cap)) %>%
mutate(date = mdy(date))
rows_cap == nrow(cap2)
visit_passes <- max_pass_cap %>%
select(transect, date, max_pass) %>%
group_by(transect, date) %>%
summarise_all(max) %>%
ungroup()
cap3 <- cap2 %>%
ungroup() %>%
right_join(ungroup(visit_passes)) %>%
# filter(pass == 1 | is.na(pass)) %>%
mutate(pass1 = ifelse(1 <= max_pass & is.na(pass1), 0, pass1),
pass2 = ifelse(2 <= max_pass & is.na(pass2), 0, pass2),
pass3 = ifelse(3 <= max_pass & is.na(pass3), 0, pass3),
pass4 = ifelse(4 <= max_pass & is.na(pass4), 0, pass4),
region = "Capital") %>%
arrange(transect, date, species, age) %>%
distinct() %>%
select(region, transect, date, species, age, pass1, pass2, pass3, pass4)
# reduce from counts to occupancy
cap4 <- cap3 %>%
mutate(pass1 = ifelse(pass1 >= 1, 1, pass1),
pass2 = ifelse(pass2 >= 1, 1, pass2),
pass3 = ifelse(pass3 >= 1, 1, pass3),
pass4 = ifelse(pass4 >= 1, 1, pass4),
date = ymd(date))
# cap3 <- combos_cap %>%
# left_join(she) %>%
# # group_by(Site) %>%
# mutate(count = ifelse(Pass <= max_pass & is.na(count), 0, count),
# Year = 2012) %>%
# arrange(Site, Date, Species, Age, Pass, visit)
# Save detailed occupancy data for the national capitals region
saveRDS(cap3, "Data/Derived/ncr_detailed_occ.rds")
# ------------------------------- need max pass for each transect-date combo to separate 0 from NA ------------------------ #
#----- Shenandoah National Park Dataset ----
# list <- c(shenandoah$Site, shenandoah$Species, shenandoah$Age)
# add_count(shenandoah, name = "count")
she <- shenandoah %>%
mutate(Date = mdy(Date),
Age = ifelse(Age == "J", "A", Age)) %>%
filter(Pass %in% 1:5,
Age != "") %>%
group_by(Site, Date, Species, Age, Pass) %>%
select(Site, Date, Species, Age, Pass) %>%
summarise(count = n()) %>%
ungroup() %>%
mutate(Year = year(Date),
Age = ifelse(Age == "l", "L", Age))
max_pass <- she %>%
ungroup() %>%
group_by(Site, Date) %>%
summarize(max_pass = max(Pass),
visit = NA_integer_) %>%
arrange(Site, Date) %>%
ungroup()
max_pass$visit[1] <- 1
for(i in 2:nrow(max_pass)) {
if(max_pass$Site[i] == max_pass$Site[i-1]) {
max_pass$visit[i] <- max_pass$visit[i-1] + 1
} else {
max_pass$visit[i] <- 1
}
}
just_pass <- max_pass %>%
filter(visit == 1) %>%
select(-Date)
# filter to just first visit to each site
# she <- she %>%
# filter(visit == 1) # filter combo site-date in just pass one filter(site-date %in% unique(max_pass$site-date))
#Pass = paste0("p", Pass)
# desired output length for combos_she
length(unique(paste(she$Site, she$Date))) * length(unique(she$Species)) * length(unique(she$Age)) * length(unique(she$Pass))
combos_she <- she %>%
tidyr::expand(nesting(Site, Date), Age, Species, Pass) %>%
left_join(just_pass)
she2 <- combos_she %>%
left_join(she) %>%
# group_by(Site) %>%
mutate(count = ifelse(Pass <= max_pass & is.na(count), 0, count),
Year = 2012) %>%
arrange(Site, Date, Species, Age, Pass, visit)
she2 <- she2[-2338,]
# Convert counts to binary (detection/nondetection)
she2$obs <- she2$count
she2[she2$obs > 1 & !is.na(she2$obs), "obs"] <- 1
summary(she2)
# spread canaan dataset
she3 <- she2 %>%
mutate(Pass = paste0("p", Pass)) %>%
select(-max_pass, -visit, -count, -Year) %>%
tidyr::pivot_wider(names_from = Pass, values_from = obs) %>%
mutate(region = "Shenandoah") %>%
filter(Species != "PCIN") %>%
select(region, Site, Date, Species, Age, p1, p2, p3, p4, p5) %>% # these pass names may cause problems
as.data.frame(. , stringsAsFactors = FALSE)
colnames(she3) <- c("region", "transect", "date", "species", "age", "pass1", "pass2", "pass3", "pass4", "pass5")
# Save detailed occupancy data for the national capitals region
saveRDS(she3, "Data/Derived/shen_detailed_occ.rds")
#----- Western Maryland Dataset ----
# Rearrange data into long format
df <- wmaryland %>%
mutate(stream = ifelse(stream == "POPLICKTRIB", "PopLick", stream),
stream = ifelse(stream == "ALEX", "Alexander Run", stream),
stream = ifelse(stream == "ELKLICK", "ElkLick", stream),
stream = ifelse(stream == "MILL", "Mill", stream),
stream = ifelse(stream == "BLUELICK", "BlueLick", stream),
stream = ifelse(stream == "WSHALEN", "West Shale North", stream),
stream = ifelse(stream == "KOCH", "Koch", stream),
stream = ifelse(stream == "DUNGHILL", "Bowser-Dung Hill", stream),
stream = ifelse(stream == "BEARHILL", "Maynardier Ridge at Bear Hill", stream),
trans = paste0(stream, "_", transect)) %>%
group_by(trans, stream, transect, visit) %>%
tidyr::gather(sp_stage, count, -date, -trans, - stream, -transect, -type, -up_down, -dist, -visit, -time_min, -air, -water, -pH, -DO, -EC, -TDS, -observers) %>%
tidyr::separate(sp_stage, into = c("species", "stage"), sep = 4) %>%
filter(species != "tota",
!is.na(count)) %>%
# mutate(type = ifelse(type == "res", up_down, type)) %>%
select(date, stream, transect, visit, trans, species, stage, count) %>%
ungroup()
# Convert counts to binary (detection/nondetection)
df$obs <- df$count
df[df$obs > 1 & !is.na(df$obs), "obs"] <- 1
summary(df)
# Remove PRUB from df
prub <- df[which(df$species == "PRUB"),]
df2 <- df[-which(df$species == "PRUB"),]
df <- df2
max_visit_df <- df %>%
ungroup() %>%
group_by(stream, transect) %>%
summarize(max_pass = max(visit),
visit = NA_integer_) %>%
ungroup() %>%
mutate(trans = paste0(stream, "_", transect))
max_visit_df$visit[1] <- 1
for(i in 2:nrow(max_visit_df)) {
if(max_visit_df$trans[i] == max_visit_df$trans[i-1]) {
max_visit_df$visit[i] <- max_visit_df$visit[i-1] + 1
} else {
max_visit_df$visit[i] <- 1
}
}
just_visit <- max_visit_df %>%
select(trans, max_pass)
colnames(just_visit) <- c("trans", "max_visit")
# desired output length for combos_df
length(unique(df$trans)) * length(unique(df$species)) * length(unique(df$stage)) * length(unique(df$visit))
combos_df <- df %>%
ungroup() %>%
select(date, trans, visit, species, stage, obs) %>%
tidyr::expand(nesting(trans), stage, species, visit) %>%
left_join(just_visit) %>%
select(trans, species, stage, visit, max_visit) %>%
arrange(trans, species, stage, visit)
df2 <- combos_df %>%
left_join(df) %>%
mutate(date = mdy(date)) %>%
arrange(trans, species, stage, visit)
# spread dataset
df3 <- df2 %>%
ungroup() %>%
mutate(visit = paste0("v", visit)) %>%
select(-max_visit, -stream, -transect, -count, -date) %>% # did not include date because it separates the counts into separate rows for each visit because each visit was done on a different day
tidyr::pivot_wider(names_from = visit, values_from = obs) %>%
mutate(region = "WMaryland") %>%
as.data.frame(. , stringsAsFactors = FALSE) %>%
mutate(date = NA) %>%
select(region, trans, date, species, stage, v1, v2, v3, v4) # these are VISITS NOT PASSES
colnames(df3) <- c("region", "transect", "date", "species", "age", "pass1", "pass2", "pass3", "pass4")
# Save detailed occupancy data for western maryland
saveRDS(df3, "Data/Derived/westmd_detailed_occ.rds")
# array with matching dates and transect-visit, not sure if this is needed yet.....
date_df <- df %>%
select(date, trans, visit)
#----- Combine all salamander occ data -----
landscape_N <- bind_rows(can3, cap3, she3, df3)
##### Like Shen replace the NA if <= max pass with 0
spec <- c("DMON", "DOCH", "GPOR", "DFUS", "DOCR", "EBIS", "PRUB", "ELON", "EGUT")
landscape_occ <- landscape_N %>%
mutate(pass1 = ifelse(pass1 > 0, 1, pass1),
pass2 = ifelse(pass2 > 0, 1, pass2),
pass3 = ifelse(pass3 > 0, 1, pass3),
pass4 = ifelse(pass4 > 0, 1, pass4),
pass5 = ifelse(pass5 > 0, 1, pass5),
canaan = ifelse(region == "Canaan", 1, 0),
capital = ifelse(region == "Capital", 1, 0),
shenandoah = ifelse(region == "Shenandoah", 1, 0),
wmaryland = ifelse(region == "WMaryland", 1, 0),
age = ifelse(age == "juvenile" | age == "recently metamorphosed" | age == "adult" | age == "metamorphosing", "A", age),
age = ifelse(age == "" | age == " ", NA, age),
age = ifelse(age == "larva", "L", age)) %>%
filter(species %in% spec,
!transect %in% c("MRC2T1", "PR300", "MRC3TL", "PR")) %>%
mutate(#transect = ifelse(region == "Canaan", substr(transect, 1, nchar(transect) - 5), transect),
#transect = ifelse(transect == "Camp 70-Yellow Creek_NA", "Camp 70-Yellow Creek", transect),
#transect = ifelse(region == "Canaan", gsub(pattern = "*_", replacement = "", x = transect), transect),
#transect = ifelse(region == "Capital", substr(transect, 1, nchar(transect) - 3), transect),
transect = ifelse(region == "Capital", gsub(pattern = "_v.$", replacement = "", x = transect), transect),
transect = ifelse(region == "Capital", gsub(pattern = "_vNULL", replacement = "", x = transect), transect),
stream = transect) %>%
separate(col = "transect", into = c("transect", "transect_num"), sep = "_") %>%
select(region, stream, date, species, age, pass1, pass2, pass3, pass4, pass5, canaan, capital, shenandoah, wmaryland)
## WARNING: HARMLESS - just says that there are a lot of NAs filled into the stream column because it is conditional on the region = "wmaryland"
colnames(landscape_occ) <- c("region", "transect", "date", "species", "age", "pass1", "pass2", "pass3", "pass4", "pass5", "canaan", "capital", "shenandoah", "wmaryland")
# Remove "PR" transect from landscape_occ (wasn't working in line 441)
# landscape_occ_pr <- landscape_occ[-which(landscape_occ$transect == "PR"),]
# landscape_occ <- landscape_occ_pr
summary(landscape_occ)
unique(landscape_occ$age)
unique(landscape_occ$species)
# Save detailed occupancy data for western maryland
saveRDS(landscape_occ, "Data/Derived/combined_detailed_occ.rds")
#---------------cleaning---------------------
rm(list = ls())
gc()
# unload packages?
| /Code/combine_obs_data.R | permissive | jclbrooks/MD_Stream_Salamanders | R | false | false | 16,603 | r | ##################
# Landscape MSOM
##################
library(dplyr)
library(lubridate)
library(readr)
library(stringr)
library(devtools)
if(packageVersion("tidyr") < "0.8.99.9000") devtools::install_github("tidyverse/tidyr") # ensure tidyr version with pivot_wider
library(tidyr)
########################################
###### SALAMANDER OCCUPANCY DATA #######
########################################
# Read in data
canaan <- read.csv("Data/Landscape/CVNWR_transects.csv", header = TRUE, stringsAsFactors = FALSE)
capital <- read.csv("Data/Landscape/NCRlotic_all.csv", header = TRUE, stringsAsFactors = FALSE)
shenandoah <- read.csv("Data/Landscape/Shen_snp12.csv", header = TRUE, stringsAsFactors = FALSE)
wmaryland <- read.csv("Data/Date_Location_Transect_Visit_Data_Processed.csv", header = TRUE, stringsAsFactors = FALSE)
str(canaan)
str(capital)
str(shenandoah)
str(wmaryland)
# Format data: region - transect ID - species - age - pass/visit 1- pass/visit 2 - pass/visit - 3
# make all same format, column names
#----- Canaan Valley National Wildlife Refuge Dataset -----
can <- canaan %>%
mutate(Transect = ifelse(is.na(Transect), 0, Transect),
Transect = paste0(Name, Transect),
Date = mdy(Date)) %>%
group_by(Transect, Species, Age) %>%
select(Transect, Pass, Species, Age, Caught, Date)
max_pass_can <- can %>%
ungroup() %>%
group_by(Transect, Date) %>%
summarize(max_pass = max(Pass),
visit = NA_integer_) %>%
arrange(Transect, Date) %>%
ungroup()
max_pass_can$visit[1] <- 1
for(i in 2:nrow(max_pass_can)) {
if(max_pass_can$Transect[i] == max_pass_can$Transect[i-1]) {
max_pass_can$visit[i] <- max_pass_can$visit[i-1] + 1
} else {
max_pass_can$visit[i] <- 1
}
}
just_pass <- max_pass_can %>%
filter(visit == 1)
combos_can <- can %>%
dplyr::ungroup() %>%
mutate(Species = ifelse(Species == "DOCR", "DOCH", Species)) %>%
tidyr::expand(nesting(Transect, Date), Species, Age, Pass) %>%
dplyr::filter(Species %in% c("GPOR", "DFUS", "EBIS", "DMON", "DOCH"),
Age %in% c("A", "L")) %>%
dplyr::arrange(Transect, Date, Species, Age, Pass) %>%
dplyr::left_join(max_pass_can)
can2 <- combos_can %>%
left_join(can) %>%
# group_by(Site) %>%
mutate(Caught = ifelse(Pass <= max_pass & is.na(Caught), 0, Caught)) %>%
arrange(Transect, Date, Species, Age, Pass)
# check the size of the combos_can vs resulting dataframe
length(unique(paste(can$Transect, can$Date))) * 5 * 2 * 4
# Convert counts to binary
can2$obs <- can2$Caught
can2[can2$obs > 1 & !is.na(can2$obs), "obs"] <- 1
summary(can2)
#--------- need to add date below and check if expanded for species-larvae-*age* combos for each transect -----------#
###### It did not spread for all species-age combos at all sites, something wrong with spread(), can't get pivot_wider() to load
can3 <- can2 %>%
ungroup() %>%
select(-visit, -Caught) %>%
group_by(Transect, Date, Species, Age) %>%
# select(-region) %>%
mutate(Pass = paste0("p", Pass)) %>%
tidyr::pivot_wider(names_from = Pass, values_from = obs) %>%
mutate(region = "Canaan") %>%
#spread(Pass, Caught) %>% #### This doesn't spread correctly, it leaves out some species that need to be at all sites (even if not found)
ungroup() %>%
mutate(year = year(Date)) %>%
select(region, Transect, Date, Species, Age, p1, p2, p3, p4) %>%
as.data.frame(. , stringsAsFactors = FALSE) %>%
arrange(region, Transect, Date, Species, Age)
# Redo the naming
colnames(can3) <- c("region", "transect", "date", "species", "age", "pass1", "pass2", "pass3", "pass4")
# Save detailed occupancy data for canaan
if(!dir.exists("Data/Derived")) dir.create("Data/Derived", recursive = TRUE)
saveRDS(can3, "Data/Derived/canaan_detailed_occ.rds")
#----- National Capitals Region Dataset ------
cap <- capital %>%
mutate(#Transect = paste(PointName, Visit, sep = "_v"),
pass4 = NA_real_,
region = "Capital") %>% # added pass4 column to match canaan dataframe
group_by(PointName, SpeciesCode, SAgeID) %>%
select(region, PointName, SDate, Visit, SpeciesCode, SAgeID, PassCount1, PassCount2, PassCount3, pass4)
colnames(cap) <- c("region", "transect", "date", "visit", "species", "age", "pass1", "pass2", "pass3", "pass4")
# Remove NULLs from capitals data
na <- cap[which(cap$species == "NULL"),]
cap1 <- cap[-which(cap$species == "NULL"),]
cap <- cap1
cap[cap == "NULL"] <- NA_integer_
cap <- cap %>%
arrange(region, transect, date, species, age) %>%
mutate(pass1 = as.numeric(pass1),
pass2 = as.numeric(pass2),
pass3 = as.numeric(pass3),
pass4 = as.numeric(pass4),
age = ifelse(age == "juvenile" | age == "adult", "A", age), # add together
age = ifelse(age == "larva" | age == "metamorphosing", "L", age)) %>%
group_by(region, transect, date, visit, species, age) %>%
summarise_all(.funs = sum) %>%
ungroup() %>%
# select(-region) %>%
as.data.frame(. , stringsAsFactors = FALSE)
max_pass_cap <- cap %>%
ungroup() %>%
pivot_longer(cols = starts_with("pass"), names_to = "pass", values_to = "count") %>%
mutate(pass = gsub(pattern = "pass*", replacement = "", x = pass)) %>%
filter(!is.na(count)) %>%
select(transect, date, visit, pass) %>%
group_by(transect, date) %>%
mutate(max_pass = max(pass)) %>%
arrange(transect, date, visit) %>%
ungroup() %>%
mutate(date = mdy(date),
visit_old = as.integer(visit),
pass = as.integer(pass),
max_pass = as.integer(max_pass)) %>%
mutate(year = year(date)) %>%
group_by(transect, year, pass) %>%
mutate(visit_1 = ifelse(date == min(date), 1, 0)) %>%
distinct() %>%
arrange(transect, date) %>%
filter(visit_1 == 1) %>%
select(-visit_old) %>%
ungroup()
combos_cap <- cap %>%
dplyr::ungroup() %>%
mutate(species = ifelse(species == "ebis", "EBIS", species)) %>%
tidyr::expand(nesting(transect, date, visit), species, age) %>%
# nesting(Transect, Date, Species)
dplyr::filter(species %in% c("DFUS", "EBIS", "PRUB", "ELON", "EGUT"),
age %in% c("A", "L")) %>%
dplyr::arrange(transect, date, species, age)
length(unique(cap$transect))
length(unique(paste0(cap$transect, "_", cap$date)))
length(unique(cap$species))
length(unique(cap$age))
# desired rows (before filtering to first visit each year)
rows_cap <- length(unique(paste0(cap$transect, "_", cap$date))) * 5 * 2
cap2 <- combos_cap %>%
ungroup() %>%
left_join(ungroup(cap)) %>%
mutate(date = mdy(date))
rows_cap == nrow(cap2)
visit_passes <- max_pass_cap %>%
select(transect, date, max_pass) %>%
group_by(transect, date) %>%
summarise_all(max) %>%
ungroup()
cap3 <- cap2 %>%
ungroup() %>%
right_join(ungroup(visit_passes)) %>%
# filter(pass == 1 | is.na(pass)) %>%
mutate(pass1 = ifelse(1 <= max_pass & is.na(pass1), 0, pass1),
pass2 = ifelse(2 <= max_pass & is.na(pass2), 0, pass2),
pass3 = ifelse(3 <= max_pass & is.na(pass3), 0, pass3),
pass4 = ifelse(4 <= max_pass & is.na(pass4), 0, pass4),
region = "Capital") %>%
arrange(transect, date, species, age) %>%
distinct() %>%
select(region, transect, date, species, age, pass1, pass2, pass3, pass4)
# reduce from counts to occupancy
cap4 <- cap3 %>%
mutate(pass1 = ifelse(pass1 >= 1, 1, pass1),
pass2 = ifelse(pass2 >= 1, 1, pass2),
pass3 = ifelse(pass3 >= 1, 1, pass3),
pass4 = ifelse(pass4 >= 1, 1, pass4),
date = ymd(date))
# cap3 <- combos_cap %>%
# left_join(she) %>%
# # group_by(Site) %>%
# mutate(count = ifelse(Pass <= max_pass & is.na(count), 0, count),
# Year = 2012) %>%
# arrange(Site, Date, Species, Age, Pass, visit)
# Save detailed occupancy data for the national capitals region
saveRDS(cap3, "Data/Derived/ncr_detailed_occ.rds")
# ------------------------------- need max pass for each transect-date combo to separate 0 from NA ------------------------ #
#----- Shenandoah National Park Dataset ----
# list <- c(shenandoah$Site, shenandoah$Species, shenandoah$Age)
# add_count(shenandoah, name = "count")
she <- shenandoah %>%
mutate(Date = mdy(Date),
Age = ifelse(Age == "J", "A", Age)) %>%
filter(Pass %in% 1:5,
Age != "") %>%
group_by(Site, Date, Species, Age, Pass) %>%
select(Site, Date, Species, Age, Pass) %>%
summarise(count = n()) %>%
ungroup() %>%
mutate(Year = year(Date),
Age = ifelse(Age == "l", "L", Age))
max_pass <- she %>%
ungroup() %>%
group_by(Site, Date) %>%
summarize(max_pass = max(Pass),
visit = NA_integer_) %>%
arrange(Site, Date) %>%
ungroup()
max_pass$visit[1] <- 1
for(i in 2:nrow(max_pass)) {
if(max_pass$Site[i] == max_pass$Site[i-1]) {
max_pass$visit[i] <- max_pass$visit[i-1] + 1
} else {
max_pass$visit[i] <- 1
}
}
just_pass <- max_pass %>%
filter(visit == 1) %>%
select(-Date)
# filter to just first visit to each site
# she <- she %>%
# filter(visit == 1) # filter combo site-date in just pass one filter(site-date %in% unique(max_pass$site-date))
#Pass = paste0("p", Pass)
# desired output length for combos_she
length(unique(paste(she$Site, she$Date))) * length(unique(she$Species)) * length(unique(she$Age)) * length(unique(she$Pass))
combos_she <- she %>%
tidyr::expand(nesting(Site, Date), Age, Species, Pass) %>%
left_join(just_pass)
she2 <- combos_she %>%
left_join(she) %>%
# group_by(Site) %>%
mutate(count = ifelse(Pass <= max_pass & is.na(count), 0, count),
Year = 2012) %>%
arrange(Site, Date, Species, Age, Pass, visit)
she2 <- she2[-2338,]
# Convert counts to binary (detection/nondetection)
she2$obs <- she2$count
she2[she2$obs > 1 & !is.na(she2$obs), "obs"] <- 1
summary(she2)
# spread canaan dataset
she3 <- she2 %>%
mutate(Pass = paste0("p", Pass)) %>%
select(-max_pass, -visit, -count, -Year) %>%
tidyr::pivot_wider(names_from = Pass, values_from = obs) %>%
mutate(region = "Shenandoah") %>%
filter(Species != "PCIN") %>%
select(region, Site, Date, Species, Age, p1, p2, p3, p4, p5) %>% # these pass names may cause problems
as.data.frame(. , stringsAsFactors = FALSE)
colnames(she3) <- c("region", "transect", "date", "species", "age", "pass1", "pass2", "pass3", "pass4", "pass5")
# Save detailed occupancy data for the national capitals region
saveRDS(she3, "Data/Derived/shen_detailed_occ.rds")
#----- Western Maryland Dataset ----
# Rearrange data into long format
df <- wmaryland %>%
mutate(stream = ifelse(stream == "POPLICKTRIB", "PopLick", stream),
stream = ifelse(stream == "ALEX", "Alexander Run", stream),
stream = ifelse(stream == "ELKLICK", "ElkLick", stream),
stream = ifelse(stream == "MILL", "Mill", stream),
stream = ifelse(stream == "BLUELICK", "BlueLick", stream),
stream = ifelse(stream == "WSHALEN", "West Shale North", stream),
stream = ifelse(stream == "KOCH", "Koch", stream),
stream = ifelse(stream == "DUNGHILL", "Bowser-Dung Hill", stream),
stream = ifelse(stream == "BEARHILL", "Maynardier Ridge at Bear Hill", stream),
trans = paste0(stream, "_", transect)) %>%
group_by(trans, stream, transect, visit) %>%
tidyr::gather(sp_stage, count, -date, -trans, - stream, -transect, -type, -up_down, -dist, -visit, -time_min, -air, -water, -pH, -DO, -EC, -TDS, -observers) %>%
tidyr::separate(sp_stage, into = c("species", "stage"), sep = 4) %>%
filter(species != "tota",
!is.na(count)) %>%
# mutate(type = ifelse(type == "res", up_down, type)) %>%
select(date, stream, transect, visit, trans, species, stage, count) %>%
ungroup()
# Convert counts to binary (detection/nondetection)
df$obs <- df$count
df[df$obs > 1 & !is.na(df$obs), "obs"] <- 1
summary(df)
# Remove PRUB from df
prub <- df[which(df$species == "PRUB"),]
df2 <- df[-which(df$species == "PRUB"),]
df <- df2
max_visit_df <- df %>%
ungroup() %>%
group_by(stream, transect) %>%
summarize(max_pass = max(visit),
visit = NA_integer_) %>%
ungroup() %>%
mutate(trans = paste0(stream, "_", transect))
max_visit_df$visit[1] <- 1
for(i in 2:nrow(max_visit_df)) {
if(max_visit_df$trans[i] == max_visit_df$trans[i-1]) {
max_visit_df$visit[i] <- max_visit_df$visit[i-1] + 1
} else {
max_visit_df$visit[i] <- 1
}
}
just_visit <- max_visit_df %>%
select(trans, max_pass)
colnames(just_visit) <- c("trans", "max_visit")
# desired output length for combos_df
length(unique(df$trans)) * length(unique(df$species)) * length(unique(df$stage)) * length(unique(df$visit))
combos_df <- df %>%
ungroup() %>%
select(date, trans, visit, species, stage, obs) %>%
tidyr::expand(nesting(trans), stage, species, visit) %>%
left_join(just_visit) %>%
select(trans, species, stage, visit, max_visit) %>%
arrange(trans, species, stage, visit)
df2 <- combos_df %>%
left_join(df) %>%
mutate(date = mdy(date)) %>%
arrange(trans, species, stage, visit)
# spread dataset
df3 <- df2 %>%
ungroup() %>%
mutate(visit = paste0("v", visit)) %>%
select(-max_visit, -stream, -transect, -count, -date) %>% # did not include date because it separates the counts into separate rows for each visit because each visit was done on a different day
tidyr::pivot_wider(names_from = visit, values_from = obs) %>%
mutate(region = "WMaryland") %>%
as.data.frame(. , stringsAsFactors = FALSE) %>%
mutate(date = NA) %>%
select(region, trans, date, species, stage, v1, v2, v3, v4) # these are VISITS NOT PASSES
colnames(df3) <- c("region", "transect", "date", "species", "age", "pass1", "pass2", "pass3", "pass4")
# Save detailed occupancy data for western maryland
saveRDS(df3, "Data/Derived/westmd_detailed_occ.rds")
# array with matching dates and transect-visit, not sure if this is needed yet.....
date_df <- df %>%
select(date, trans, visit)
#----- Combine all salamander occ data -----
landscape_N <- bind_rows(can3, cap3, she3, df3)
##### Like Shen replace the NA if <= max pass with 0
spec <- c("DMON", "DOCH", "GPOR", "DFUS", "DOCR", "EBIS", "PRUB", "ELON", "EGUT")
landscape_occ <- landscape_N %>%
mutate(pass1 = ifelse(pass1 > 0, 1, pass1),
pass2 = ifelse(pass2 > 0, 1, pass2),
pass3 = ifelse(pass3 > 0, 1, pass3),
pass4 = ifelse(pass4 > 0, 1, pass4),
pass5 = ifelse(pass5 > 0, 1, pass5),
canaan = ifelse(region == "Canaan", 1, 0),
capital = ifelse(region == "Capital", 1, 0),
shenandoah = ifelse(region == "Shenandoah", 1, 0),
wmaryland = ifelse(region == "WMaryland", 1, 0),
age = ifelse(age == "juvenile" | age == "recently metamorphosed" | age == "adult" | age == "metamorphosing", "A", age),
age = ifelse(age == "" | age == " ", NA, age),
age = ifelse(age == "larva", "L", age)) %>%
filter(species %in% spec,
!transect %in% c("MRC2T1", "PR300", "MRC3TL", "PR")) %>%
mutate(#transect = ifelse(region == "Canaan", substr(transect, 1, nchar(transect) - 5), transect),
#transect = ifelse(transect == "Camp 70-Yellow Creek_NA", "Camp 70-Yellow Creek", transect),
#transect = ifelse(region == "Canaan", gsub(pattern = "*_", replacement = "", x = transect), transect),
#transect = ifelse(region == "Capital", substr(transect, 1, nchar(transect) - 3), transect),
transect = ifelse(region == "Capital", gsub(pattern = "_v.$", replacement = "", x = transect), transect),
transect = ifelse(region == "Capital", gsub(pattern = "_vNULL", replacement = "", x = transect), transect),
stream = transect) %>%
separate(col = "transect", into = c("transect", "transect_num"), sep = "_") %>%
select(region, stream, date, species, age, pass1, pass2, pass3, pass4, pass5, canaan, capital, shenandoah, wmaryland)
## WARNING: HARMLESS - just says that there are a lot of NAs filled into the stream column because it is conditional on the region = "wmaryland"
colnames(landscape_occ) <- c("region", "transect", "date", "species", "age", "pass1", "pass2", "pass3", "pass4", "pass5", "canaan", "capital", "shenandoah", "wmaryland")
# Remove "PR" transect from landscape_occ (wasn't working in line 441)
# landscape_occ_pr <- landscape_occ[-which(landscape_occ$transect == "PR"),]
# landscape_occ <- landscape_occ_pr
summary(landscape_occ)
unique(landscape_occ$age)
unique(landscape_occ$species)
# Save detailed occupancy data for western maryland
saveRDS(landscape_occ, "Data/Derived/combined_detailed_occ.rds")
#---------------cleaning---------------------
rm(list = ls())
gc()
# unload packages?
|
library(tidyverse)
# Pivot and summarize
iris_pivot <- iris %>%
pivot_longer(-Species, "Measure") %>%
group_by(Species, Measure) %>%
summarise_all(mean) %>%
ungroup()
# Show results
print(means) | /raw_branches/merge_b/iris.R | permissive | sjchiass/git_training | R | false | false | 204 | r | library(tidyverse)
# Pivot and summarize
iris_pivot <- iris %>%
pivot_longer(-Species, "Measure") %>%
group_by(Species, Measure) %>%
summarise_all(mean) %>%
ungroup()
# Show results
print(means) |
library(readr)
install.packages("e1071")
library(e1071)
library(caret)
train_sal <- read.csv("SalaryData_Train")
#Exploratory Data Analysis
train_sal<-read.csv("SalaryData_Train.csv") %>%
nrow()
train_sal<-read.csv("SalaryData_Train.csv") %>%
colnames()
train_sal<-read.csv("SalaryData_Train.csv") %>%
pull(Salary) %>%
unique() %>%
length()
str(train_sal)
train_sal$educationno <- as.factor(train_sal$educationno)
test_sal <- read.csv("SalaryData_Test")
#Exploratory Data Analysis
test_sal <- read.csv("SalaryData_Test") %>%
nrow()
test_sal <- read.csv("SalaryData_Test") %>%
colnames()
test_sal <- read.csv("SalaryData_Test") %>%
pull(Salary) %>%
unique() %>%
length()
str(test_sal)
test_sal$educationno <- as.factor(test_sal$educationno)
Model <- naiveBayes(train_sal$Salary ~ ., data = train_sal)
Model
Model_pred <- predict(Model,test_sal)
mean(Model_pred==test_sal$Salary)confusionMatrix(Model_pred,test_sal$Salary)
| /naive_bayes.R | no_license | Aruna-y56/statistics | R | false | false | 973 | r | library(readr)
install.packages("e1071")
library(e1071)
library(caret)
train_sal <- read.csv("SalaryData_Train")
#Exploratory Data Analysis
train_sal<-read.csv("SalaryData_Train.csv") %>%
nrow()
train_sal<-read.csv("SalaryData_Train.csv") %>%
colnames()
train_sal<-read.csv("SalaryData_Train.csv") %>%
pull(Salary) %>%
unique() %>%
length()
str(train_sal)
train_sal$educationno <- as.factor(train_sal$educationno)
test_sal <- read.csv("SalaryData_Test")
#Exploratory Data Analysis
test_sal <- read.csv("SalaryData_Test") %>%
nrow()
test_sal <- read.csv("SalaryData_Test") %>%
colnames()
test_sal <- read.csv("SalaryData_Test") %>%
pull(Salary) %>%
unique() %>%
length()
str(test_sal)
test_sal$educationno <- as.factor(test_sal$educationno)
Model <- naiveBayes(train_sal$Salary ~ ., data = train_sal)
Model
Model_pred <- predict(Model,test_sal)
mean(Model_pred==test_sal$Salary)confusionMatrix(Model_pred,test_sal$Salary)
|
HP <- read.csv('HappyIndex.csv', header=TRUE)
str(HP)
head(HP)
HD <- data.frame(Rank=HP[,1],Country=HP[,2],LifeExpectancy=HP[,4],Wellbeing=HP[,5],
Footprint=HP[,7],InequalityOutcome=HP[,8],HPI=HP[,11])
head(HD)
HD <- transform(HD, wel = cut(Wellbeing, breaks = c(0, 5, 6, 8), include.lowest = T, right = F, labels = c('C', 'B', 'A')))
head(HD)
aov.out <- aov(HPI ~ wel, data=HD)
summary(aov.out)
tkh <- TukeyHSD(aov.out, conf.level=0.95)
tkh
plot(tkh, las=1)
| /HW13/hw13.R | no_license | duddlf23/CS564 | R | false | false | 483 | r | HP <- read.csv('HappyIndex.csv', header=TRUE)
str(HP)
head(HP)
HD <- data.frame(Rank=HP[,1],Country=HP[,2],LifeExpectancy=HP[,4],Wellbeing=HP[,5],
Footprint=HP[,7],InequalityOutcome=HP[,8],HPI=HP[,11])
head(HD)
HD <- transform(HD, wel = cut(Wellbeing, breaks = c(0, 5, 6, 8), include.lowest = T, right = F, labels = c('C', 'B', 'A')))
head(HD)
aov.out <- aov(HPI ~ wel, data=HD)
summary(aov.out)
tkh <- TukeyHSD(aov.out, conf.level=0.95)
tkh
plot(tkh, las=1)
|
############################### TRANSITIONS VERS UN SYSTEME DE COMPTES NOTIONNELS ###################
# Equilibrage du régime à court terme (disparition de la bosse)
# A partir de la date AnneeDebCN, tous les droits sont calcules dans le nouveau systeme.
# Hypothese de depart en retraite: meme age que dans le scenario de reference
# Taux de cotisation 23 % + ANC
# Gestion de la bosse de début de période.
# Réduction des pensions : 90% des droits acquis seulement.
t0 <- Sys.time()
#### Chargement des programmes source ####
rm(list = ls())
# Déclaration du chemin pour les fichiers sources
cheminsource <- "/Users/simonrabate/Desktop/PENSIPP 0.1/"
source( (paste0(cheminsource,"Modele/Outils/OutilsRetraite/OutilsMS.R" )) )
source( (paste0(cheminsource,"Modele/Outils/OutilsRetraite/OutilsPensIPP.R" )) )
source( (paste0(cheminsource,"Modele/Outils/OutilsRetraite/OutilsLeg.R" )) )
source( (paste0(cheminsource,"Modele/Outils/OutilsRetraite/OutilsRetr.R" )) )
source( (paste0(cheminsource,"Modele/Outils/OutilsRetraite/OutilsCN.R" )) )
graph_compar <- function (serie,t1,t2,titre)
{
plot (seq(1900+t1,1900+t2,by=1),serie[1,t1:t2],xlab="Annee", ylab=titre,
ylim=c(min(serie[,t1:t2],na.rm=TRUE),max(serie[,t1:t2],na.rm=TRUE)),lwd=2,col="orange",type="l")
points (seq(1900+t1,1900+t2,by=1),serie[2,t1:t2],lwd=3,type="l")
points (seq(1900+t1,1900+t2,by=1),serie[3,t1:t2],lwd=1,type="l")
points (seq(1900+t1,1900+t2,by=1),serie[4,t1:t2],lwd=2,type="l")
}
# Declaration des variable d'outputs
TRC <- numeric(taille_max) # Taux de remplacemnt cible des liquidants
ageliq_ <- matrix(nrow=taille_max,ncol=3)
duree_liq <- numeric(taille_max)
dar_ <- matrix(nrow=taille_max,ncol=3)
pliq_rg <- matrix(nrow=taille_max,ncol=3)
pliq_ <- matrix(nrow=taille_max,ncol=3)
points_cn <- numeric(taille_max)
pension_cn <- numeric(taille_max)
conv <- numeric(taille_max)
ageref <- numeric(taille_max)
actifs <- numeric(taille_max) # Filtre population active
retraites <- numeric(taille_max) # Filtre population retraitée
liquidants <- numeric(taille_max)
MSAL <- matrix(nrow=4,ncol=200) # Masse salariale par année
MPENS <- matrix(nrow=4,ncol=200) # Masse des pensions année
PIBREF <- matrix(nrow=4,ncol=200) # PIB annuel
RATIOPENS <- matrix(nrow=4,ncol=200) # Ratio pension/PIB par année
RATIOFIN <- matrix(nrow=4,ncol=200) # Ratio masse des pensions/masse des salaires par année
RATIODEM <- matrix(nrow=4,ncol=200) # Ratio +60ans/-60ans par année
SALMOY <- matrix(nrow=4,ncol=200) # Salaire moyen par année
PENMOY <- matrix(nrow=4,ncol=200) # Pension moyenne par année
DUREE_LIQ <- matrix(nrow=4,ncol=200) # Duree totale a la liquidation
PLIQ_TOT <- matrix(nrow=4,ncol=200) # Pension moyenne liquidants par année
PLIQ_RG <- matrix(nrow=4,ncol=200) # Pension moyenne liquidants RG
PLIQ_AR <- matrix(nrow=4,ncol=200) # Pension moyenne liquidants ARRCO par année
PLIQ_AG <- matrix(nrow=4,ncol=200) # Pension moyenne liquidants AGIRC par année
PLIQ_FP <- matrix(nrow=4,ncol=200) # Pension moyenne liquidants FP par année
PLIQ_IN <- matrix(nrow=4,ncol=200) # Pension moyenne liquidants indep par année
PLIQ_CN <- matrix(nrow=4,ncol=200) # Pension moyenne liquidants CN
POINTS_CN <- matrix(nrow=4,ncol=200) # Points CN moyens des liquidants par année
CONV_MOY <- matrix(nrow=4,ncol=200) # Coeff Conv moyen des liquidants par année
PENREL <- matrix(nrow=4,ncol=200) # Ratio pension/salaire
TRCMOY <- matrix(nrow=4,ncol=200) # Taux de remplacement cible des liquidants par année
FLUXLIQ <- matrix(nrow=4,ncol=200) # Effectifs de liquidants
AGELIQ <- matrix(nrow=4,ncol=160) # Age de liquidation moyen par année
W <- 2047.501
#### Début de la simulation ####
for (sc in c(3))
{
# Reinitialisation variables
source( (paste0(cheminsource,"Modele/Outils/OutilsRetraite/DefVarRetr_Destinie.R")) )
load ( (paste0(cheminsource,"Modele/Outils/OutilsBio/BiosDestinie2.RData" )) )
setwd ( (paste0(cheminsource,"Simulations/CN" )) )
duree_liq <- rep(0,taille_max)
points_cn <- rep(0,taille_max)
pension_cn <- rep(0,taille_max)
if (sc==3) {UseOptCN(c("discount"))}
plafond<-8
for (t in 60:160) # Début boucle temporelle
{
print (c(sc,t))
if (sc>1)
{
AnneeDepartCN <- 115
TauxCotCN[t] <- 0.23
if (t <110) {RendementCN[t] <- PIB[t]/PIB[t-1]-1}
else {RendementCN[t] <- log((MSAL[sc,t-1]*Prix[t-1])/(MSAL[sc,t-6]*Prix[t-6]))/5}
RendementCNPrev[t] <- RendementCN[t]
RevaloCN[t+1] <- Prix[t]/Prix[t-1]
UseConv(55,70,t)
# print (CoeffConv[60:80])
}
if (sc>1 && t==AnneeDepartCN)
{
for (i in 1:taille_max)
{
if (ageliq[i]==0)
{
statut[i,statut[i,1:160]>1]<- statut[i,statut[i,1:160]>1]+100
}
}
}
# Liquidations
for (i in 1:taille_max) # Début boucle individuelle
{
# Liquidation
if ((t-t_naiss[i]>=55) && (ageliq[i]==0))
{
if (sc>1 && t>=AnneeDepartCN)
{
Leg <- t
UseLeg(Leg,t_naiss[i])
SimDir(i,t,"exo",ageref)
}
else
# Cas ou CN n'ont pas démarré, liquidation taux plein et conservation age
{
Leg <- t
UseLeg(Leg,t_naiss[i])
SimDir(i,t,"TP")
}
if (t_liq[i]==t)
{
points_cn[i] <- points_cn_pri+points_cn_fp+points_cn_ind
pension_cn[i] <- pension_cn_pri[i]+pension_cn_fp[i]+pension_cn_ind[i]
pliq_rg[i,sc] <- pension_rg[i]
pliq_[i,sc] <- pension[i]
dar_[i,sc] <- dar[i]
duree_liq[i] <- duree_tot
if (points_cn[i]>0) {conv[i] <- pension_cn[i]/points_cn[i]}
if (sc==1) {ageref[i] <- t-t_naiss[i]}
}
}
else if (ageliq[i]>0)
{
Revalo(i,t,t+1)
}
} # Fin de la boucle individuelle
ageliq_[,sc] <- t_liq-t_naiss
actifs <- (salaire[,t]>0) & (statut[,t]>0)
retraites <- (pension>0) & (statut[,t]>0)
liquidants <- (pension>0) & (t_liq==t)
if (sc >0)
{
DUREE_LIQ[sc,t] <- mean(duree_liq[liquidants])
PLIQ_TOT[sc,t] <- mean(pension[liquidants])
PLIQ_RG[sc,t] <- mean(pension_rg[liquidants])
PLIQ_AR[sc,t] <- mean(pension_ar[liquidants])
PLIQ_AG[sc,t] <- mean(pension_ag[liquidants])
PLIQ_FP[sc,t] <- mean(pension_fp[liquidants])
PLIQ_IN[sc,t] <- mean(pension_in[liquidants])
PLIQ_CN[sc,t] <- mean(pension_cn[liquidants])
SALMOY[sc,t] <- mean (salaire[actifs,t]/Prix[t])
MPENS[sc,t] <- W*sum(pension[retraites])/Prix[t]
MSAL[sc,t] <- W*sum(salaire[actifs,t])/Prix[t]
PIBREF[sc,t] <- MSAL[sc,t]*(PIB[109]/Prix[109])/MSAL[sc,109]
RATIOPENS[sc,t] <- MPENS[sc,t]/PIBREF[sc,t]
TRCMOY[sc,t] <- mean (TRC[which(t_liq[]==t)])
RATIOFIN[sc,t] <- MPENS[sc,t]/MSAL[sc,t]
RATIODEM[sc,t] <- sum ((t-t_naiss>=60) & (statut[,t]>0))/sum((t-t_naiss<60) &(statut[,t]>0))
PENMOY[sc,t] <- mean (pension[retraites]/Prix[t])
POINTS_CN[sc,t] <- mean (points_cn[which( (pension>0)&t_liq==t)])
CONV_MOY[sc,t] <- mean (conv[ which( (pension>0)&t_liq==t)])
PENREL[sc,t] <- PENMOY[sc,t]/SALMOY[sc,t]
FLUXLIQ[sc,t] <- W*sum(t_liq==t)
AGELIQ[sc,t] <- mean ( ageliq[which(t_liq==t)])
}
} # Fin de de la boucle temporelle
} # Fin boucle scenarios
save.image(paste0(cheminsource,"Simulations/CN/CNeq2(95).RData"))
#### Sorties ####
par(mar=c(6.1, 3.1, 4.1, 2.1))
par(xpd=TRUE)
plot (seq(2010,2059,by=1),RATIOFIN[1,110:159],xlab="Annee", ylab="ratio retraite/PIB",ylim=c(0.20,0.28),col="grey0",lwd=4,type="l")
points (seq(2010,2059,by=1),RATIOFIN[2,110:159],lwd=4,col="grey80",type="l")
points (seq(2010,2059,by=1),RATIOFIN[3,110:159],lwd=4,col="grey40",type="l")
title("Graphe 6a : Evolution du ratio retraites/salaires \n(taux 23%, ANC)", cex.main = 0.9)
legend.text <- c("Scénario de référénce","CN","CN réduction droits acquis")
legend("bottom",inset=c(-0.2,-0.55),cex=0.8,legend.text, fill=c("grey0","grey80","grey40"))
| /Simulations/CN/CN_MACRO/DiscountDroits2.R | no_license | philippechataignon/pensipp | R | false | false | 8,636 | r | ############################### TRANSITIONS VERS UN SYSTEME DE COMPTES NOTIONNELS ###################
# Equilibrage du régime à court terme (disparition de la bosse)
# A partir de la date AnneeDebCN, tous les droits sont calcules dans le nouveau systeme.
# Hypothese de depart en retraite: meme age que dans le scenario de reference
# Taux de cotisation 23 % + ANC
# Gestion de la bosse de début de période.
# Réduction des pensions : 90% des droits acquis seulement.
t0 <- Sys.time()
#### Chargement des programmes source ####
rm(list = ls())
# Déclaration du chemin pour les fichiers sources
cheminsource <- "/Users/simonrabate/Desktop/PENSIPP 0.1/"
source( (paste0(cheminsource,"Modele/Outils/OutilsRetraite/OutilsMS.R" )) )
source( (paste0(cheminsource,"Modele/Outils/OutilsRetraite/OutilsPensIPP.R" )) )
source( (paste0(cheminsource,"Modele/Outils/OutilsRetraite/OutilsLeg.R" )) )
source( (paste0(cheminsource,"Modele/Outils/OutilsRetraite/OutilsRetr.R" )) )
source( (paste0(cheminsource,"Modele/Outils/OutilsRetraite/OutilsCN.R" )) )
graph_compar <- function (serie,t1,t2,titre)
{
plot (seq(1900+t1,1900+t2,by=1),serie[1,t1:t2],xlab="Annee", ylab=titre,
ylim=c(min(serie[,t1:t2],na.rm=TRUE),max(serie[,t1:t2],na.rm=TRUE)),lwd=2,col="orange",type="l")
points (seq(1900+t1,1900+t2,by=1),serie[2,t1:t2],lwd=3,type="l")
points (seq(1900+t1,1900+t2,by=1),serie[3,t1:t2],lwd=1,type="l")
points (seq(1900+t1,1900+t2,by=1),serie[4,t1:t2],lwd=2,type="l")
}
# Declaration des variable d'outputs
TRC <- numeric(taille_max) # Taux de remplacemnt cible des liquidants
ageliq_ <- matrix(nrow=taille_max,ncol=3)
duree_liq <- numeric(taille_max)
dar_ <- matrix(nrow=taille_max,ncol=3)
pliq_rg <- matrix(nrow=taille_max,ncol=3)
pliq_ <- matrix(nrow=taille_max,ncol=3)
points_cn <- numeric(taille_max)
pension_cn <- numeric(taille_max)
conv <- numeric(taille_max)
ageref <- numeric(taille_max)
actifs <- numeric(taille_max) # Filtre population active
retraites <- numeric(taille_max) # Filtre population retraitée
liquidants <- numeric(taille_max)
MSAL <- matrix(nrow=4,ncol=200) # Masse salariale par année
MPENS <- matrix(nrow=4,ncol=200) # Masse des pensions année
PIBREF <- matrix(nrow=4,ncol=200) # PIB annuel
RATIOPENS <- matrix(nrow=4,ncol=200) # Ratio pension/PIB par année
RATIOFIN <- matrix(nrow=4,ncol=200) # Ratio masse des pensions/masse des salaires par année
RATIODEM <- matrix(nrow=4,ncol=200) # Ratio +60ans/-60ans par année
SALMOY <- matrix(nrow=4,ncol=200) # Salaire moyen par année
PENMOY <- matrix(nrow=4,ncol=200) # Pension moyenne par année
DUREE_LIQ <- matrix(nrow=4,ncol=200) # Duree totale a la liquidation
PLIQ_TOT <- matrix(nrow=4,ncol=200) # Pension moyenne liquidants par année
PLIQ_RG <- matrix(nrow=4,ncol=200) # Pension moyenne liquidants RG
PLIQ_AR <- matrix(nrow=4,ncol=200) # Pension moyenne liquidants ARRCO par année
PLIQ_AG <- matrix(nrow=4,ncol=200) # Pension moyenne liquidants AGIRC par année
PLIQ_FP <- matrix(nrow=4,ncol=200) # Pension moyenne liquidants FP par année
PLIQ_IN <- matrix(nrow=4,ncol=200) # Pension moyenne liquidants indep par année
PLIQ_CN <- matrix(nrow=4,ncol=200) # Pension moyenne liquidants CN
POINTS_CN <- matrix(nrow=4,ncol=200) # Points CN moyens des liquidants par année
CONV_MOY <- matrix(nrow=4,ncol=200) # Coeff Conv moyen des liquidants par année
PENREL <- matrix(nrow=4,ncol=200) # Ratio pension/salaire
TRCMOY <- matrix(nrow=4,ncol=200) # Taux de remplacement cible des liquidants par année
FLUXLIQ <- matrix(nrow=4,ncol=200) # Effectifs de liquidants
AGELIQ <- matrix(nrow=4,ncol=160) # Age de liquidation moyen par année
W <- 2047.501
#### Début de la simulation ####
for (sc in c(3))
{
# Reinitialisation variables
source( (paste0(cheminsource,"Modele/Outils/OutilsRetraite/DefVarRetr_Destinie.R")) )
load ( (paste0(cheminsource,"Modele/Outils/OutilsBio/BiosDestinie2.RData" )) )
setwd ( (paste0(cheminsource,"Simulations/CN" )) )
duree_liq <- rep(0,taille_max)
points_cn <- rep(0,taille_max)
pension_cn <- rep(0,taille_max)
if (sc==3) {UseOptCN(c("discount"))}
plafond<-8
for (t in 60:160) # Début boucle temporelle
{
print (c(sc,t))
if (sc>1)
{
AnneeDepartCN <- 115
TauxCotCN[t] <- 0.23
if (t <110) {RendementCN[t] <- PIB[t]/PIB[t-1]-1}
else {RendementCN[t] <- log((MSAL[sc,t-1]*Prix[t-1])/(MSAL[sc,t-6]*Prix[t-6]))/5}
RendementCNPrev[t] <- RendementCN[t]
RevaloCN[t+1] <- Prix[t]/Prix[t-1]
UseConv(55,70,t)
# print (CoeffConv[60:80])
}
if (sc>1 && t==AnneeDepartCN)
{
for (i in 1:taille_max)
{
if (ageliq[i]==0)
{
statut[i,statut[i,1:160]>1]<- statut[i,statut[i,1:160]>1]+100
}
}
}
# Liquidations
for (i in 1:taille_max) # Début boucle individuelle
{
# Liquidation
if ((t-t_naiss[i]>=55) && (ageliq[i]==0))
{
if (sc>1 && t>=AnneeDepartCN)
{
Leg <- t
UseLeg(Leg,t_naiss[i])
SimDir(i,t,"exo",ageref)
}
else
# Cas ou CN n'ont pas démarré, liquidation taux plein et conservation age
{
Leg <- t
UseLeg(Leg,t_naiss[i])
SimDir(i,t,"TP")
}
if (t_liq[i]==t)
{
points_cn[i] <- points_cn_pri+points_cn_fp+points_cn_ind
pension_cn[i] <- pension_cn_pri[i]+pension_cn_fp[i]+pension_cn_ind[i]
pliq_rg[i,sc] <- pension_rg[i]
pliq_[i,sc] <- pension[i]
dar_[i,sc] <- dar[i]
duree_liq[i] <- duree_tot
if (points_cn[i]>0) {conv[i] <- pension_cn[i]/points_cn[i]}
if (sc==1) {ageref[i] <- t-t_naiss[i]}
}
}
else if (ageliq[i]>0)
{
Revalo(i,t,t+1)
}
} # Fin de la boucle individuelle
ageliq_[,sc] <- t_liq-t_naiss
actifs <- (salaire[,t]>0) & (statut[,t]>0)
retraites <- (pension>0) & (statut[,t]>0)
liquidants <- (pension>0) & (t_liq==t)
if (sc >0)
{
DUREE_LIQ[sc,t] <- mean(duree_liq[liquidants])
PLIQ_TOT[sc,t] <- mean(pension[liquidants])
PLIQ_RG[sc,t] <- mean(pension_rg[liquidants])
PLIQ_AR[sc,t] <- mean(pension_ar[liquidants])
PLIQ_AG[sc,t] <- mean(pension_ag[liquidants])
PLIQ_FP[sc,t] <- mean(pension_fp[liquidants])
PLIQ_IN[sc,t] <- mean(pension_in[liquidants])
PLIQ_CN[sc,t] <- mean(pension_cn[liquidants])
SALMOY[sc,t] <- mean (salaire[actifs,t]/Prix[t])
MPENS[sc,t] <- W*sum(pension[retraites])/Prix[t]
MSAL[sc,t] <- W*sum(salaire[actifs,t])/Prix[t]
PIBREF[sc,t] <- MSAL[sc,t]*(PIB[109]/Prix[109])/MSAL[sc,109]
RATIOPENS[sc,t] <- MPENS[sc,t]/PIBREF[sc,t]
TRCMOY[sc,t] <- mean (TRC[which(t_liq[]==t)])
RATIOFIN[sc,t] <- MPENS[sc,t]/MSAL[sc,t]
RATIODEM[sc,t] <- sum ((t-t_naiss>=60) & (statut[,t]>0))/sum((t-t_naiss<60) &(statut[,t]>0))
PENMOY[sc,t] <- mean (pension[retraites]/Prix[t])
POINTS_CN[sc,t] <- mean (points_cn[which( (pension>0)&t_liq==t)])
CONV_MOY[sc,t] <- mean (conv[ which( (pension>0)&t_liq==t)])
PENREL[sc,t] <- PENMOY[sc,t]/SALMOY[sc,t]
FLUXLIQ[sc,t] <- W*sum(t_liq==t)
AGELIQ[sc,t] <- mean ( ageliq[which(t_liq==t)])
}
} # Fin de de la boucle temporelle
} # Fin boucle scenarios
save.image(paste0(cheminsource,"Simulations/CN/CNeq2(95).RData"))
#### Sorties ####
par(mar=c(6.1, 3.1, 4.1, 2.1))
par(xpd=TRUE)
plot (seq(2010,2059,by=1),RATIOFIN[1,110:159],xlab="Annee", ylab="ratio retraite/PIB",ylim=c(0.20,0.28),col="grey0",lwd=4,type="l")
points (seq(2010,2059,by=1),RATIOFIN[2,110:159],lwd=4,col="grey80",type="l")
points (seq(2010,2059,by=1),RATIOFIN[3,110:159],lwd=4,col="grey40",type="l")
title("Graphe 6a : Evolution du ratio retraites/salaires \n(taux 23%, ANC)", cex.main = 0.9)
legend.text <- c("Scénario de référénce","CN","CN réduction droits acquis")
legend("bottom",inset=c(-0.2,-0.55),cex=0.8,legend.text, fill=c("grey0","grey80","grey40"))
|
#The first function, makeCacheMatrix creates a special "matrix" object that can cache its inverse.
# set the value of the vector
# get the value of the vector
# set the value of the mean
# get the value of the mean
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
# The function matrix.cacheSolve computes the inverse of the special "matrix" returned by makeCacheMatrix above.
# If the inverse has already been calculated (and the matrix has not changed), then the cachesolve should retrieve the
# inverse from the cache.
# This function assumes that the matrix is always invertible.
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setinverse(inv)
inv
}
## Sample run:
# x = rbind(c(1, 2), c(3, 4))
# m = makeCacheMatrix(x)
# m$get()
# [,1] [,2]
# [1,] 1 2
# [2,] 3 4
## No cache in the first run
# > cacheSolve(m)
# [,1] [,2]
# [1,] -2.0 1.0
# [2,] 1.5 -0.5
# Retrieving from the cache in the second run
# > cacheSolve(m)
# getting cached data
# [,1] [,2]
# [1,] -2.0 1.0
# [2,] 1.5 -0.5
| /cachematrix.R | no_license | rprog-032/coursera-rprog-assignment2 | R | false | false | 1,424 | r |
#The first function, makeCacheMatrix creates a special "matrix" object that can cache its inverse.
# set the value of the vector
# get the value of the vector
# set the value of the mean
# get the value of the mean
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
# The function matrix.cacheSolve computes the inverse of the special "matrix" returned by makeCacheMatrix above.
# If the inverse has already been calculated (and the matrix has not changed), then the cachesolve should retrieve the
# inverse from the cache.
# This function assumes that the matrix is always invertible.
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setinverse(inv)
inv
}
## Sample run:
# x = rbind(c(1, 2), c(3, 4))
# m = makeCacheMatrix(x)
# m$get()
# [,1] [,2]
# [1,] 1 2
# [2,] 3 4
## No cache in the first run
# > cacheSolve(m)
# [,1] [,2]
# [1,] -2.0 1.0
# [2,] 1.5 -0.5
# Retrieving from the cache in the second run
# > cacheSolve(m)
# getting cached data
# [,1] [,2]
# [1,] -2.0 1.0
# [2,] 1.5 -0.5
|
# ---- Etapa 0: Información del documento ----
# Título: Documento de procesamiento para bases de datos CEP
# Autor: Ojeda, P & Venegas, M
# Fecha: 27 - 9 - 2020
# ---- Etapa 1: Cargar paquetes ----
library(pacman)
pacman::p_load(tidyverse, summarytools, ggplot2, sjmisc, stargazer, openxlsx, readxl, sjlabelled, car, haven)
# ---- Etapa 2: Cargar bases de datos ---
bd2006_2008_52 <- read_excel("input/data/2006-2008/cep52junjul2006.xls")
bd2006_2008_54 <- read_sav("input/data/2006-2008/cep54dic2006.sav")
bd2006_2008_55 <- read_sav("input/data/2006-2008/cep55jun2007.sav")
bd2006_2008_56 <- read_sav("input/data/2006-2008/cep56novdic2007.sav")
bd2006_2008_57 <- read_sav("input/data/2006-2008/cep57jun2008.sav")
bd2006_2008_58 <- read_sav("input/data/2006-2008/cep58novdic2008.sav")
# ---- Etapa 3: Procesamiento de datos ----
## Seleccionar variables a utilizar
#base de datos <- select(base de datos, nse, escolaridad, edad, sexo, variables confianza, identificacion partidaria, posicion politica)
bd2006_2008_52 <- select(bd2006_2008_52, dd28, ESCOLARIDAD, dd2, dd1, mb11) # mb16 variable sobre escala 1 a 10 en espectro politico
bd2006_2008_54 <- select(bd2006_2008_54, DDP22, ESCOLARIDAD, DDP2, DDP1, TE_2P4a:TE_2P4h, MBP13, MBP17)
bd2006_2008_55 <- select(bd2006_2008_55, DDP30, ESCOLARIDAD, DDP2, DDP1, TE2P2_A:TE2P2_H, MBP13, MBP14) #MBP16 preguta en escala de 1 a 10
bd2006_2008_56 <- select(bd2006_2008_56, DDP21, ESCOLARIDAD, DDP2, DDP1, MBP14, MBP15)
bd2006_2008_57 <- select(bd2006_2008_57, ddp30, ESCOLARIDAD, ddp2, ddp1, te3p08_a:te3p08_e, mbp14, mbp16)
bd2006_2008_58 <- select(bd2006_2008_58, DDP23, ESCOLARIDAD, DDP2, DDP1, TE2P13_A:TE2P13_M, MBP14, MBP16)
## Renombrarlas
### 2006_2008: CEP 52
bd2006_2008_52 <- rename(bd2006_2008_52,
edad = dd2,
id_part = mb11,
nse = dd28,
sexo = dd1,
esc = ESCOLARIDAD)
### 2006_2008: CEP 54
bd2006_2008_54 <- rename(bd2006_2008_54,
edad = DDP2,
id_part = MBP13,
pos_pol = MBP17,
nse = DDP22,
sexo = DDP1,
esc = ESCOLARIDAD,
conf_ffaa = TE_2P4a,
conf_tribunalesjust = TE_2P4b,
conf_partidos = TE_2P4d)
### 2006_2008: CEP 55
bd2006_2008_55 <- rename(bd2006_2008_55,
edad = DDP2,
id_part = MBP13,
pos_pol = MBP14,
nse = DDP30,
sexo = DDP1,
esc = ESCOLARIDAD,
conf_partido1 = TE2P2_A,
conf_partido2 = TE2P2_B,
conf_partido3 = TE2P2_C,
conf_partido4 = TE2P2_D,
conf_partido5 = TE2P2_E,
conf_partido6 = TE2P2_F,
conf_partido7 = TE2P2_G,
conf_partido8 = TE2P2_H)
### 2006_2008: CEP 56
bd2006_2008_56 <- rename(bd2006_2008_56,
edad = DDP2,
id_part = MBP14,
pos_pol = MBP15,
nse = DDP21,
sexo = DDP1,
esc = ESCOLARIDAD)
### 2006_2008: CEP 57
bd2006_2008_57 <- rename(bd2006_2008_57,
edad = ddp2,
id_part = mbp14,
pos_pol = mbp16,
nse = ddp30,
sexo = ddp1,
esc = ESCOLARIDAD,
conf_congreso = te3p08_a,
conf_comercio = te3p08_b,
conf_iglesias = te3p08_c,
conf_sistjudicial = te3p08_d,
conf_sistemaedu = te3p08_e)
### 2006_2008: CEP 58
bd2006_2008_58 <- rename(bd2006_2008_58,
edad = DDP2,
id_part = MBP14,
pos_pol = MBP16,
nse = DDP23,
sexo = DDP1,
esc = ESCOLARIDAD,
conf_iglesiacat = TE2P13_A,
conf_ffaa = TE2P13_B,
conf_iglesiaev = TE2P13_C,
conf_partidos = TE2P13_D,
conf_tribunalesjust = TE2P13_E,
conf_diarios = TE2P13_F,
conf_tele = TE2P13_G,
conf_radios = TE2P13_H,
conf_sindicatos = TE2P13_I,
conf_carabineros = TE2P13_J,
conf_gobierno = TE2P13_K,
conf_congreso = TE2P13_L,
conf_emppriv = TE2P13_M)
#---- 3.1 Tratamiento de sociodemográficas ----
#---- 3.1.1 Frecuencias ----
## 2006_2008: CEP 52
frq(bd2006_2008_52$nse) # NSE
frq(bd2006_2008_52$esc) # Escolaridad
frq(bd2006_2008_52$edad) # Edad
frq(bd2006_2008_52$sexo) # Sexo
## 2006_2008: CEP 54
frq(bd2006_2008_54$nse)
frq(bd2006_2008_54$esc)
frq(bd2006_2008_54$edad)
frq(bd2006_2008_54$sexo)
## 2006_2008: CEP 55
frq(bd2006_2008_55$nse)
frq(bd2006_2008_55$esc)
frq(bd2006_2008_55$edad)
frq(bd2006_2008_55$sexo)
## 2006_2008: CEP 56
frq(bd2006_2008_56$nse) # NSE
frq(bd2006_2008_56$esc) # Escolaridad
frq(bd2006_2008_56$edad) # Edad
frq(bd2006_2008_56$sexo) # Sexo
## 2006_2008: CEP 57
frq(bd2006_2008_57$nse)
frq(bd2006_2008_57$esc)
frq(bd2006_2008_57$edad)
frq(bd2006_2008_57$sexo)
## 2006_2008: CEP 58
frq(bd2006_2008_58$nse) # NSE
frq(bd2006_2008_58$esc) # Escolaridad
frq(bd2006_2008_58$edad) # Edad
frq(bd2006_2008_58$sexo) # Sexo
#---- 3.1.2 Recodificación ----
# Remover etiquetas
bd2006_2008_52 <- sjlabelled::remove_all_labels(bd2006_2008_52)
bd2006_2008_54 <- sjlabelled::remove_all_labels(bd2006_2008_54)
bd2006_2008_55 <- sjlabelled::remove_all_labels(bd2006_2008_55)
bd2006_2008_56 <- sjlabelled::remove_all_labels(bd2006_2008_56)
bd2006_2008_57 <- sjlabelled::remove_all_labels(bd2006_2008_57)
bd2006_2008_58 <- sjlabelled::remove_all_labels(bd2006_2008_58)
## 2006_2008: CEP 52
bd2006_2008_52$nse <- car::recode(bd2006_2008_52$nse, "1 = 'ABC1'; 2 = 'C2'; 3 = 'C3'; 4 = 'D'; 5 = 'E'", as.factor = T)
bd2006_2008_52$esc <- car::recode(bd2006_2008_52$esc, "1 = '0-3'; 2 = '4-8'; 3 = '9-12'; 4 = '13 y mas'; 5 = NA", as.factor = T)
bd2006_2008_52$edad <- car::recode(bd2006_2008_52$edad, "18:24 = '18-24'; 25:34 = '25-34'; 35:54 = '35-54'; else = '55 y mas'", as.factor = T)
bd2006_2008_52$sexo <- car::recode(bd2006_2008_52$sexo, "1 = 'Hombre'; 2 = 'Mujer'", as.factor = T)
## 2006_2008: CEP 54
bd2006_2008_54$nse <- car::recode(bd2006_2008_54$nse, "1 = 'ABC1'; 2 = 'C2'; 3 = 'C3'; 4 = 'D'; 5 = 'E'", as.factor = T)
bd2006_2008_54$esc <- car::recode(bd2006_2008_54$esc, "1 = '0-3'; 2 = '4-8'; 3 = '9-12'; 4 = '13 y mas'; 5 = NA", as.factor = T)
bd2006_2008_54$edad <- car::recode(bd2006_2008_54$edad, "18:24 = '18-24'; 25:34 = '25-34'; 35:54 = '35-54'; else = '55 y mas'", as.factor = T)
bd2006_2008_54$sexo <- car::recode(bd2006_2008_54$sexo, "1 = 'Hombre'; 2 = 'Mujer'", as.factor = T)
## 2006_2008: CEP 55
bd2006_2008_55$nse <- car::recode(bd2006_2008_55$nse, "1 = 'ABC1'; 2 = 'C2'; 3 = 'C3'; 4 = 'D'; 5 = 'E'", as.factor = T)
bd2006_2008_55$esc <- car::recode(bd2006_2008_55$esc, "1 = '0-3'; 2 = '4-8'; 3 = '9-12'; 4 = '13 y mas'; 5 = NA", as.factor = T)
bd2006_2008_55$edad <- car::recode(bd2006_2008_55$edad, "18:24 = '18-24'; 25:34 = '25-34'; 35:54 = '35-54'; else = '55 y mas'", as.factor = T)
bd2006_2008_55$sexo <- car::recode(bd2006_2008_55$sexo, "1 = 'Hombre'; 2 = 'Mujer'", as.factor = T)
## 2006_2008: CEP 56
bd2006_2008_56$nse <- car::recode(bd2006_2008_56$nse, "1 = 'ABC1'; 2 = 'C2'; 3 = 'C3'; 4 = 'D'; 5 = 'E'", as.factor = T)
bd2006_2008_56$esc <- car::recode(bd2006_2008_56$esc, "1 = '0-3'; 2 = '4-8'; 3 = '9-12'; 4 = '13 y mas'; 5 = NA", as.factor = T)
bd2006_2008_56$edad <- car::recode(bd2006_2008_56$edad, "18:24 = '18-24'; 25:34 = '25-34'; 35:54 = '35-54'; else = '55 y mas'", as.factor = T)
bd2006_2008_56$sexo <- car::recode(bd2006_2008_56$sexo, "1 = 'Hombre'; 2 = 'Mujer'", as.factor = T)
## 2006_2008: CEP 57
bd2006_2008_57$nse <- car::recode(bd2006_2008_57$nse, "1 = 'ABC1'; 2 = 'C2'; 3 = 'C3'; 4 = 'D'; 5 = 'E'", as.factor = T)
bd2006_2008_57$esc <- car::recode(bd2006_2008_57$esc, "1 = '0-3'; 2 = '4-8'; 3 = '9-12'; 4 = '13 y mas'; 5 = NA", as.factor = T)
bd2006_2008_57$edad <- car::recode(bd2006_2008_57$edad, "18:24 = '18-24'; 25:34 = '25-34'; 35:54 = '35-54'; else = '55 y mas'", as.factor = T)
bd2006_2008_57$sexo <- car::recode(bd2006_2008_57$sexo, "1 = 'Hombre'; 2 = 'Mujer'", as.factor = T)
## 2006_2008: CEP 58
bd2006_2008_58$nse <- car::recode(bd2006_2008_58$nse, "1 = 'ABC1'; 2 = 'C2'; 3 = 'C3'; 4 = 'D'; 5 = 'E'", as.factor = T)
bd2006_2008_58$esc <- car::recode(bd2006_2008_58$esc, "1 = '0-3'; 2 = '4-8'; 3 = '9-12'; 4 = '13 y mas'; 5 = NA", as.factor = T)
bd2006_2008_58$edad <- car::recode(bd2006_2008_58$edad, "18:24 = '18-24'; 25:34 = '25-34'; 35:54 = '35-54'; else = '55 y mas'", as.factor = T)
bd2006_2008_58$sexo <- car::recode(bd2006_2008_58$sexo, "1 = 'Hombre'; 2 = 'Mujer'", as.factor = T)
#---- 3.2 Tratamiento de variables de confianza ----
#---- 3.2.1 Frecuencias ----
## 2006_2008: CEP 54
frq(bd2006_2008_54$conf_ffaa)
frq(bd2006_2008_54$conf_tribunalesjust)
frq(bd2006_2008_54$conf_partidos)
## 2006_2008: CEP 55
frq(bd2006_2008_55$conf_partido1) # No me llama la atencion el usar esto.
frq(bd2006_2008_55$conf_partido2)
frq(bd2006_2008_55$conf_partido3)
frq(bd2006_2008_55$conf_partido4)
frq(bd2006_2008_55$conf_partido5)
frq(bd2006_2008_55$conf_partido6)
## 2006_2008: CEP 57
frq(bd2006_2008_57$conf_congreso)
frq(bd2006_2008_57$conf_comercio)
frq(bd2006_2008_57$conf_iglesias)
frq(bd2006_2008_57$conf_sistjudicial)
frq(bd2006_2008_57$conf_sistemaedu)
## 2006_2008: CEP 58
frq(bd2006_2008_58$conf_iglesiacat) # Combinar:iglesia
frq(bd2006_2008_58$conf_ffaa)
frq(bd2006_2008_58$conf_iglesiaev) # Combinar:iglesia
frq(bd2006_2008_58$conf_partidos)
frq(bd2006_2008_58$conf_tribunalesjust)
frq(bd2006_2008_58$conf_diarios) # Combinar: MMC
frq(bd2006_2008_58$conf_tele) # Combinar: MMC
frq(bd2006_2008_58$conf_radios) # Combinar: MMC
frq(bd2006_2008_58$conf_sindicatos) # No usar
frq(bd2006_2008_58$conf_carabineros) # Combinar: instituciones del orden
frq(bd2006_2008_58$conf_gobierno)
frq(bd2006_2008_58$conf_congreso)
frq(bd2006_2008_58$conf_emppriv)
#---- 3.2.2 Recodificacion ----
## 2006_2008: CEP 54
bd2006_2008_54$conf_ffaa <- car::recode(bd2006_2008_54$conf_ffaa,"3 = 'Alta o media confianza'; c(1,2) = 'Baja o nula confianza'; c(8,9) = 'NS/NC'", as.factor = T)
bd2006_2008_54$conf_tribunalesjust <- car::recode(bd2006_2008_54$conf_tribunalesjust,"3 = 'Alta o media confianza'; c(1,2) = 'Baja o nula confianza'; c(8,9) = 'NS/NC'", as.factor = T)
bd2006_2008_54$conf_partidos <- car::recode(bd2006_2008_54$conf_partidos,"3 = 'Alta o media confianza'; c(1,2) = 'Baja o nula confianza'; c(8,9) = 'NS/NC'", as.factor = T)
## 2006_2008: CEP 55
bd2006_2008_55$conf_partido1 <- car::recode(bd2006_2008_55$conf_partido1, "c(1,2) = 'Alta o media confianza'; c(3,4) = 'Baja o nula confianza'; c(8,9) = 'NS/NC'", as.factor = T)
bd2006_2008_55$conf_partido2 <- car::recode(bd2006_2008_55$conf_partido2, "c(1,2) = 'Alta o media confianza'; c(3,4) = 'Baja o nula confianza'; c(8,9) = 'NS/NC'", as.factor = T)
bd2006_2008_55$conf_partido3 <- car::recode(bd2006_2008_55$conf_partido3, "c(1,2) = 'Alta o media confianza'; c(3,4) = 'Baja o nula confianza'; c(8,9) = 'NS/NC'", as.factor = T)
bd2006_2008_55$conf_partido4 <- car::recode(bd2006_2008_55$conf_partido4, "c(1,2) = 'Alta o media confianza'; c(3,4) = 'Baja o nula confianza'; c(8,9) = 'NS/NC'", as.factor = T)
bd2006_2008_55$conf_partido5 <- car::recode(bd2006_2008_55$conf_partido5, "c(1,2) = 'Alta o media confianza'; c(3,4) = 'Baja o nula confianza'; c(8,9) = 'NS/NC'", as.factor = T)
bd2006_2008_55$conf_partido6 <- car::recode(bd2006_2008_55$conf_partido6, "c(1,2) = 'Alta o media confianza'; c(3,4) = 'Baja o nula confianza'; c(8,9) = 'NS/NC'", as.factor = T)
## 2006_2008: CEP 57
bd2006_2008_57$conf_congreso <- car::recode(bd2006_2008_57$conf_congreso, "1 = 'Mucha confianza'; c(2, 3, 4, 5) = 'Otra'; 8 = NA", as.factor = T)
bd2006_2008_57$conf_comercio <- car::recode(bd2006_2008_57$conf_comercio, "1 = 'Mucha confianza'; c(2, 3, 4, 5) = 'Otra'; 8 = NA", as.factor = T)
bd2006_2008_57$conf_iglesias <- car::recode(bd2006_2008_57$conf_iglesias, "1 = 'Mucha confianza'; c(2, 3, 4, 5) = 'Otra'; 8 = NA", as.factor = T)
bd2006_2008_57$conf_sistjudicial <- car::recode(bd2006_2008_57$conf_sistjudicial, "1 = 'Mucha confianza'; c(2, 3, 4, 5) = 'Otra'; 8 = NA", as.factor = T)
bd2006_2008_57$conf_sistemaedu <- car::recode(bd2006_2008_57$conf_sistemaedu, "1 = 'Mucha confianza'; c(2, 3, 4, 5) = 'Otra'; 8 = NA", as.factor = T)
## 2006_2008: CEP 58
bd2006_2008_58$conf_iglesiacat <- car::recode(bd2006_2008_58$conf_iglesiacat,"c(1, 2, 3) = 'Otra'; 4 = 'Mucha confianza'; c(8, 9) = NA", as.factor = T)
bd2006_2008_58$conf_ffaa <- car::recode(bd2006_2008_58$conf_ffaa,"c(1, 2, 3) = 'Otra'; 4 = 'Mucha confianza'; c(8, 9) = NA", as.factor = T)
bd2006_2008_58$conf_iglesiaev <- car::recode(bd2006_2008_58$conf_iglesiaev,"c(1, 2, 3) = 'Otra'; 4 = 'Mucha confianza'; c(8, 9) = NA", as.factor = T)
bd2006_2008_58$conf_partidos <- car::recode(bd2006_2008_58$conf_partidos, "c(1, 2, 3) = 'Otra'; 4 = 'Mucha confianza'; c(8, 9) = NA", as.factor = T)
bd2006_2008_58$conf_tribunalesjust <- car::recode(bd2006_2008_58$conf_tribunalesjust, "c(1, 2, 3) = 'Otra'; 4 = 'Mucha confianza'; c(8, 9) = NA", as.factor = T)
bd2006_2008_58$conf_diarios <- car::recode(bd2006_2008_58$conf_diarios, "c(1, 2, 3) = 'Otra'; 4 = 'Mucha confianza'; c(8, 9) = NA", as.factor = T)
bd2006_2008_58$conf_tele <- car::recode(bd2006_2008_58$conf_tele, "c(1, 2, 3) = 'Otra'; 4 = 'Mucha confianza'; c(8, 9) = NA", as.factor = T)
bd2006_2008_58$conf_radios <- car::recode(bd2006_2008_58$conf_radios, "c(1, 2, 3) = 'Otra'; 4 = 'Mucha confianza'; c(8, 9) = NA", as.factor = T)
bd2006_2008_58$conf_sindicatos <- car::recode(bd2006_2008_58$conf_sindicatos, "c(1, 2, 3) = 'Otra'; 4 = 'Mucha confianza'; c(8, 9) = NA", as.factor = T)
bd2006_2008_58$conf_carabineros <- car::recode(bd2006_2008_58$conf_carabineros, "c(1, 2, 3) = 'Otra'; 4 = 'Mucha confianza'; c(8, 9) = NA", as.factor = T)
bd2006_2008_58$conf_gobierno <- car::recode(bd2006_2008_58$conf_gobierno, "c(1, 2, 3) = 'Otra'; 4 = 'Mucha confianza'; c(8, 9) = NA", as.factor = T)
bd2006_2008_58$conf_congreso <- car::recode(bd2006_2008_58$conf_congreso, "c(1, 2, 3) = 'Otra'; 4 = 'Mucha confianza'; c(8, 9) = NA", as.factor = T)
bd2006_2008_58$conf_emppriv <- car::recode(bd2006_2008_58$conf_emppriv, "c(1, 2, 3) = 'Otra'; 4 = 'Mucha confianza'; c(8, 9) = NA", as.factor = T)
# No olvidar
# Codificación original
#1. Mucha confianza
#2. Bastante confianza
#3. No mucha confianza
#4. Ninguna confianza
#8. No sabe
#9. No contesta
#---- 3.2.3 Otros ajustes --
## 2006_2008: CEP 58
### Construccion variable iglesia en calidad de institucion
bd2006_2008_58$conf_iglesia[bd2006_2008_58$conf_iglesiacat == 'Mucha confianza' & bd2006_2008_58$conf_iglesiaev == 'Mucha confianza'] <- 'Mucha confianza'
bd2006_2008_58$conf_iglesia[bd2006_2008_58$conf_iglesiacat == 'Otra' & bd2006_2008_58$conf_iglesiaev == 'Otra'] <- 'Otra'
### Construccion variable MMC
bd2006_2008_58$conf_mmc[bd2006_2008_58$conf_diarios == 'Mucha confianza' & bd2006_2008_58$conf_radios == 'Mucha confianza'] <- 'Mucha confianza'
bd2006_2008_58$conf_mmc[bd2006_2008_58$conf_diarios == 'Mucha confianza' & bd2006_2008_58$conf_tele == 'Mucha confianza'] <- 'Mucha confianza'
bd2006_2008_58$conf_mmc[bd2006_2008_58$conf_tele == 'Mucha confianza' & bd2006_2008_58$conf_radios == 'Mucha confianza'] <- 'Mucha confianza'
bd2006_2008_58$conf_mmc[bd2006_2008_58$conf_diarios == 'Otra' & bd2006_2008_58$conf_radios == 'Otra'] <- 'Otra'
bd2006_2008_58$conf_mmc[bd2006_2008_58$conf_diarios == 'Otra' & bd2006_2008_58$conf_tele == 'Otra'] <- 'Otra'
bd2006_2008_58$conf_mmc[bd2006_2008_58$conf_tele == 'Otra' & bd2006_2008_58$conf_radios == 'Otra'] <- 'Otra'
bd2006_2008_58$conf_mmc[bd2006_2008_58$conf_tele == 'Mucha confianza' & bd2006_2008_58$conf_radios == 'Mucha confianza' & bd2006_2008_58$conf_diarios == 'Mucha confianza'] <- 'Mucha confianza'
bd2006_2008_58$conf_mmc[bd2006_2008_58$conf_tele == 'Otra' & bd2006_2008_58$conf_radios == 'Otra' & bd2006_2008_58$conf_diarios == 'Otra'] <- 'Otra'
# Ver frecuencia variable nueva
frq(bd2006_2008_58$conf_mmc)
### Sacar variables de confianza que no usaremos.
### 2006_2008-2008: CEP 58
bd2006_2008_58 <- select(bd2006_2008_58,-conf_gobierno, -conf_radios, -conf_tele, -conf_sindicatos, -conf_carabineros, -conf_diarios)
#---- 3.2.4 Guardar bases de confianza ----
save(bd2006_2008_58, file = "input/data/bd2006_2008_58.RData")
#---- 3.3 Tratamiento de variables de identificación partidaria e identificación política (o posición política)
#---- 3.3.1 Frecuencias ----
frq(bd2006_2008_52$id_part)
# No esta la bateria tipica de posicion politica
frq(bd2006_2008_54$id_part)
frq(bd2006_2008_54$pos_pol)
frq(bd2006_2008_55$id_part)
frq(bd2006_2008_55$pos_pol)
frq(bd2006_2008_56$id_part)
frq(bd2006_2008_56$pos_pol)
frq(bd2006_2008_57$id_part)
frq(bd2006_2008_57$pos_pol)
frq(bd2006_2008_58$id_part)
frq(bd2006_2008_58$pos_pol)
# ---- 3.3.2 Recodificacion ----
# 2006 - 2008: CEP 52
bd2006_2008_52$id_part <- car::recode(bd2006_2008_52$id_part, "c(2,4) = 'Derecha';
c(1,5,6,7,8) = 'Centro-Izquierda concertación';
3 = 'Izquierda extraconcertación';
10 = 'Ninguno';
c(9,88,99) = NA", as.factor = T)
# 2006 - 2008: CEP 54
bd2006_2008_54$id_part <- car::recode(bd2006_2008_54$id_part, "c(2,4) = 'Derecha';
c(1,5,6,7,8) = 'Centro-Izquierda concertación';
3 = 'Izquierda extraconcertación';
10 = 'Ninguno';
c(9,88,99) = NA", as.factor = T)
bd2006_2008_54$pos_pol <- car::recode(bd2006_2008_54$pos_pol,"1 = 'Derecha';
2 = 'Centro Derecha';
3 = 'Centro';
4 = 'Centro Izquierda';
5 = 'Izquierda';
6 = 'Independiente';
7 = 'Ninguna';
8 = NA;
9 = NA", as.factor = T)
# 2006 - 2008: CEP 55
bd2006_2008_55$id_part <- car::recode(bd2006_2008_55$id_part, "c(2,4) = 'Derecha';
c(1,5,6,7,8) = 'Centro-Izquierda concertación';
3 = 'Izquierda extraconcertación';
10 = 'Ninguno';
c(9,88,99) = NA", as.factor = T)
bd2006_2008_55$pos_pol <- car::recode(bd2006_2008_55$pos_pol,"1 = 'Derecha';
2 = 'Centro Derecha';
3 = 'Centro';
4 = 'Centro Izquierda';
5 = 'Izquierda';
6 = 'Independiente';
7 = 'Ninguna';
8 = NA;
9 = NA", as.factor = T)
# 2006 - 2008: CEP 56
bd2006_2008_56$id_part <- car::recode(bd2006_2008_56$id_part, "c(2,4) = 'Derecha';
c(1,5,6,7,8) = 'Centro-Izquierda concertación';
3 = 'Izquierda extraconcertación';
10 = 'Ninguno';
c(9,88,99) = NA", as.factor = T)
bd2006_2008_56$pos_pol <- car::recode(bd2006_2008_56$pos_pol,"1 = 'Derecha';
2 = 'Centro Derecha';
3 = 'Centro';
4 = 'Centro Izquierda';
5 = 'Izquierda';
6 = 'Independiente';
7 = 'Ninguna';
8 = NA;
9 = NA", as.factor = T)
# 2006 - 2008: CEP 57
bd2006_2008_57$id_part <- car::recode(bd2006_2008_57$id_part, "c(2,4) = 'Derecha';
c(1,5,6,7,8) = 'Centro-Izquierda concertación';
3 = 'Izquierda extraconcertación';
10 = 'Ninguno';
c(9,88,99) = NA", as.factor = T)
bd2006_2008_57$pos_pol <- car::recode(bd2006_2008_57$pos_pol,"1 = 'Derecha';
2 = 'Centro Derecha';
3 = 'Centro';
4 = 'Centro Izquierda';
5 = 'Izquierda';
6 = 'Independiente';
7 = 'Ninguna';
8 = NA;
9 = NA", as.factor = T)
# 2006 - 2008: CEP 58
bd2006_2008_58$id_part <- car::recode(bd2006_2008_58$id_part, "c(2,4) = 'Derecha';
c(1,5,6,7,8) = 'Centro-Izquierda concertación';
3 = 'Izquierda extraconcertación';
12 = 'Ninguno';
c(9,10,11,88,99) = NA", as.factor = T)
bd2006_2008_58$pos_pol <- car::recode(bd2006_2008_58$pos_pol,"1 = 'Derecha';
2 = 'Centro Derecha';
3 = 'Centro';
4 = 'Centro Izquierda';
5 = 'Izquierda';
6 = 'Independiente';
7 = 'Ninguna';
8 = NA;
9 = NA", as.factor = T)
# ---- 3.4 Guardar base de datos final ----
save(bd2006_2008_52, file = "input/data/bd2006_2008_52.RData")
save(bd2006_2008_54, file = "input/data/bd2006_2008_54.RData")
save(bd2006_2008_55, file = "input/data/bd2006_2008_55.RData")
save(bd2006_2008_56, file = "input/data/bd2006_2008_56.RData")
save(bd2006_2008_57, file = "input/data/bd2006_2008_57.RData")
save(bd2006_2008_58, file = "input/data/bd2006_2008_58.RData")
| /processing/processing_cep_2006-2008.R | no_license | Martin-Venegas-M/CEP | R | false | false | 21,626 | r | # ---- Etapa 0: Información del documento ----
# Título: Documento de procesamiento para bases de datos CEP
# Autor: Ojeda, P & Venegas, M
# Fecha: 27 - 9 - 2020
# ---- Etapa 1: Cargar paquetes ----
library(pacman)
pacman::p_load(tidyverse, summarytools, ggplot2, sjmisc, stargazer, openxlsx, readxl, sjlabelled, car, haven)
# ---- Etapa 2: Cargar bases de datos ---
bd2006_2008_52 <- read_excel("input/data/2006-2008/cep52junjul2006.xls")
bd2006_2008_54 <- read_sav("input/data/2006-2008/cep54dic2006.sav")
bd2006_2008_55 <- read_sav("input/data/2006-2008/cep55jun2007.sav")
bd2006_2008_56 <- read_sav("input/data/2006-2008/cep56novdic2007.sav")
bd2006_2008_57 <- read_sav("input/data/2006-2008/cep57jun2008.sav")
bd2006_2008_58 <- read_sav("input/data/2006-2008/cep58novdic2008.sav")
# ---- Etapa 3: Procesamiento de datos ----
## Seleccionar variables a utilizar
#base de datos <- select(base de datos, nse, escolaridad, edad, sexo, variables confianza, identificacion partidaria, posicion politica)
bd2006_2008_52 <- select(bd2006_2008_52, dd28, ESCOLARIDAD, dd2, dd1, mb11) # mb16 variable sobre escala 1 a 10 en espectro politico
bd2006_2008_54 <- select(bd2006_2008_54, DDP22, ESCOLARIDAD, DDP2, DDP1, TE_2P4a:TE_2P4h, MBP13, MBP17)
bd2006_2008_55 <- select(bd2006_2008_55, DDP30, ESCOLARIDAD, DDP2, DDP1, TE2P2_A:TE2P2_H, MBP13, MBP14) #MBP16 preguta en escala de 1 a 10
bd2006_2008_56 <- select(bd2006_2008_56, DDP21, ESCOLARIDAD, DDP2, DDP1, MBP14, MBP15)
bd2006_2008_57 <- select(bd2006_2008_57, ddp30, ESCOLARIDAD, ddp2, ddp1, te3p08_a:te3p08_e, mbp14, mbp16)
bd2006_2008_58 <- select(bd2006_2008_58, DDP23, ESCOLARIDAD, DDP2, DDP1, TE2P13_A:TE2P13_M, MBP14, MBP16)
## Renombrarlas
### 2006_2008: CEP 52
bd2006_2008_52 <- rename(bd2006_2008_52,
edad = dd2,
id_part = mb11,
nse = dd28,
sexo = dd1,
esc = ESCOLARIDAD)
### 2006_2008: CEP 54
bd2006_2008_54 <- rename(bd2006_2008_54,
edad = DDP2,
id_part = MBP13,
pos_pol = MBP17,
nse = DDP22,
sexo = DDP1,
esc = ESCOLARIDAD,
conf_ffaa = TE_2P4a,
conf_tribunalesjust = TE_2P4b,
conf_partidos = TE_2P4d)
### 2006_2008: CEP 55
bd2006_2008_55 <- rename(bd2006_2008_55,
edad = DDP2,
id_part = MBP13,
pos_pol = MBP14,
nse = DDP30,
sexo = DDP1,
esc = ESCOLARIDAD,
conf_partido1 = TE2P2_A,
conf_partido2 = TE2P2_B,
conf_partido3 = TE2P2_C,
conf_partido4 = TE2P2_D,
conf_partido5 = TE2P2_E,
conf_partido6 = TE2P2_F,
conf_partido7 = TE2P2_G,
conf_partido8 = TE2P2_H)
### 2006_2008: CEP 56
bd2006_2008_56 <- rename(bd2006_2008_56,
edad = DDP2,
id_part = MBP14,
pos_pol = MBP15,
nse = DDP21,
sexo = DDP1,
esc = ESCOLARIDAD)
### 2006_2008: CEP 57
bd2006_2008_57 <- rename(bd2006_2008_57,
edad = ddp2,
id_part = mbp14,
pos_pol = mbp16,
nse = ddp30,
sexo = ddp1,
esc = ESCOLARIDAD,
conf_congreso = te3p08_a,
conf_comercio = te3p08_b,
conf_iglesias = te3p08_c,
conf_sistjudicial = te3p08_d,
conf_sistemaedu = te3p08_e)
### 2006_2008: CEP 58
bd2006_2008_58 <- rename(bd2006_2008_58,
edad = DDP2,
id_part = MBP14,
pos_pol = MBP16,
nse = DDP23,
sexo = DDP1,
esc = ESCOLARIDAD,
conf_iglesiacat = TE2P13_A,
conf_ffaa = TE2P13_B,
conf_iglesiaev = TE2P13_C,
conf_partidos = TE2P13_D,
conf_tribunalesjust = TE2P13_E,
conf_diarios = TE2P13_F,
conf_tele = TE2P13_G,
conf_radios = TE2P13_H,
conf_sindicatos = TE2P13_I,
conf_carabineros = TE2P13_J,
conf_gobierno = TE2P13_K,
conf_congreso = TE2P13_L,
conf_emppriv = TE2P13_M)
#---- 3.1 Tratamiento de sociodemográficas ----
#---- 3.1.1 Frecuencias ----
## 2006_2008: CEP 52
frq(bd2006_2008_52$nse) # NSE
frq(bd2006_2008_52$esc) # Escolaridad
frq(bd2006_2008_52$edad) # Edad
frq(bd2006_2008_52$sexo) # Sexo
## 2006_2008: CEP 54
frq(bd2006_2008_54$nse)
frq(bd2006_2008_54$esc)
frq(bd2006_2008_54$edad)
frq(bd2006_2008_54$sexo)
## 2006_2008: CEP 55
frq(bd2006_2008_55$nse)
frq(bd2006_2008_55$esc)
frq(bd2006_2008_55$edad)
frq(bd2006_2008_55$sexo)
## 2006_2008: CEP 56
frq(bd2006_2008_56$nse) # NSE
frq(bd2006_2008_56$esc) # Escolaridad
frq(bd2006_2008_56$edad) # Edad
frq(bd2006_2008_56$sexo) # Sexo
## 2006_2008: CEP 57
frq(bd2006_2008_57$nse)
frq(bd2006_2008_57$esc)
frq(bd2006_2008_57$edad)
frq(bd2006_2008_57$sexo)
## 2006_2008: CEP 58
frq(bd2006_2008_58$nse) # NSE
frq(bd2006_2008_58$esc) # Escolaridad
frq(bd2006_2008_58$edad) # Edad
frq(bd2006_2008_58$sexo) # Sexo
#---- 3.1.2 Recodificación ----
# Remover etiquetas
bd2006_2008_52 <- sjlabelled::remove_all_labels(bd2006_2008_52)
bd2006_2008_54 <- sjlabelled::remove_all_labels(bd2006_2008_54)
bd2006_2008_55 <- sjlabelled::remove_all_labels(bd2006_2008_55)
bd2006_2008_56 <- sjlabelled::remove_all_labels(bd2006_2008_56)
bd2006_2008_57 <- sjlabelled::remove_all_labels(bd2006_2008_57)
bd2006_2008_58 <- sjlabelled::remove_all_labels(bd2006_2008_58)
## 2006_2008: CEP 52
bd2006_2008_52$nse <- car::recode(bd2006_2008_52$nse, "1 = 'ABC1'; 2 = 'C2'; 3 = 'C3'; 4 = 'D'; 5 = 'E'", as.factor = T)
bd2006_2008_52$esc <- car::recode(bd2006_2008_52$esc, "1 = '0-3'; 2 = '4-8'; 3 = '9-12'; 4 = '13 y mas'; 5 = NA", as.factor = T)
bd2006_2008_52$edad <- car::recode(bd2006_2008_52$edad, "18:24 = '18-24'; 25:34 = '25-34'; 35:54 = '35-54'; else = '55 y mas'", as.factor = T)
bd2006_2008_52$sexo <- car::recode(bd2006_2008_52$sexo, "1 = 'Hombre'; 2 = 'Mujer'", as.factor = T)
## 2006_2008: CEP 54
bd2006_2008_54$nse <- car::recode(bd2006_2008_54$nse, "1 = 'ABC1'; 2 = 'C2'; 3 = 'C3'; 4 = 'D'; 5 = 'E'", as.factor = T)
bd2006_2008_54$esc <- car::recode(bd2006_2008_54$esc, "1 = '0-3'; 2 = '4-8'; 3 = '9-12'; 4 = '13 y mas'; 5 = NA", as.factor = T)
bd2006_2008_54$edad <- car::recode(bd2006_2008_54$edad, "18:24 = '18-24'; 25:34 = '25-34'; 35:54 = '35-54'; else = '55 y mas'", as.factor = T)
bd2006_2008_54$sexo <- car::recode(bd2006_2008_54$sexo, "1 = 'Hombre'; 2 = 'Mujer'", as.factor = T)
## 2006_2008: CEP 55
bd2006_2008_55$nse <- car::recode(bd2006_2008_55$nse, "1 = 'ABC1'; 2 = 'C2'; 3 = 'C3'; 4 = 'D'; 5 = 'E'", as.factor = T)
bd2006_2008_55$esc <- car::recode(bd2006_2008_55$esc, "1 = '0-3'; 2 = '4-8'; 3 = '9-12'; 4 = '13 y mas'; 5 = NA", as.factor = T)
bd2006_2008_55$edad <- car::recode(bd2006_2008_55$edad, "18:24 = '18-24'; 25:34 = '25-34'; 35:54 = '35-54'; else = '55 y mas'", as.factor = T)
bd2006_2008_55$sexo <- car::recode(bd2006_2008_55$sexo, "1 = 'Hombre'; 2 = 'Mujer'", as.factor = T)
## 2006_2008: CEP 56
bd2006_2008_56$nse <- car::recode(bd2006_2008_56$nse, "1 = 'ABC1'; 2 = 'C2'; 3 = 'C3'; 4 = 'D'; 5 = 'E'", as.factor = T)
bd2006_2008_56$esc <- car::recode(bd2006_2008_56$esc, "1 = '0-3'; 2 = '4-8'; 3 = '9-12'; 4 = '13 y mas'; 5 = NA", as.factor = T)
bd2006_2008_56$edad <- car::recode(bd2006_2008_56$edad, "18:24 = '18-24'; 25:34 = '25-34'; 35:54 = '35-54'; else = '55 y mas'", as.factor = T)
bd2006_2008_56$sexo <- car::recode(bd2006_2008_56$sexo, "1 = 'Hombre'; 2 = 'Mujer'", as.factor = T)
## 2006_2008: CEP 57
bd2006_2008_57$nse <- car::recode(bd2006_2008_57$nse, "1 = 'ABC1'; 2 = 'C2'; 3 = 'C3'; 4 = 'D'; 5 = 'E'", as.factor = T)
bd2006_2008_57$esc <- car::recode(bd2006_2008_57$esc, "1 = '0-3'; 2 = '4-8'; 3 = '9-12'; 4 = '13 y mas'; 5 = NA", as.factor = T)
bd2006_2008_57$edad <- car::recode(bd2006_2008_57$edad, "18:24 = '18-24'; 25:34 = '25-34'; 35:54 = '35-54'; else = '55 y mas'", as.factor = T)
bd2006_2008_57$sexo <- car::recode(bd2006_2008_57$sexo, "1 = 'Hombre'; 2 = 'Mujer'", as.factor = T)
## 2006_2008: CEP 58
bd2006_2008_58$nse <- car::recode(bd2006_2008_58$nse, "1 = 'ABC1'; 2 = 'C2'; 3 = 'C3'; 4 = 'D'; 5 = 'E'", as.factor = T)
bd2006_2008_58$esc <- car::recode(bd2006_2008_58$esc, "1 = '0-3'; 2 = '4-8'; 3 = '9-12'; 4 = '13 y mas'; 5 = NA", as.factor = T)
bd2006_2008_58$edad <- car::recode(bd2006_2008_58$edad, "18:24 = '18-24'; 25:34 = '25-34'; 35:54 = '35-54'; else = '55 y mas'", as.factor = T)
bd2006_2008_58$sexo <- car::recode(bd2006_2008_58$sexo, "1 = 'Hombre'; 2 = 'Mujer'", as.factor = T)
#---- 3.2 Tratamiento de variables de confianza ----
#---- 3.2.1 Frecuencias ----
## 2006_2008: CEP 54
frq(bd2006_2008_54$conf_ffaa)
frq(bd2006_2008_54$conf_tribunalesjust)
frq(bd2006_2008_54$conf_partidos)
## 2006_2008: CEP 55
frq(bd2006_2008_55$conf_partido1) # No me llama la atencion el usar esto.
frq(bd2006_2008_55$conf_partido2)
frq(bd2006_2008_55$conf_partido3)
frq(bd2006_2008_55$conf_partido4)
frq(bd2006_2008_55$conf_partido5)
frq(bd2006_2008_55$conf_partido6)
## 2006_2008: CEP 57
frq(bd2006_2008_57$conf_congreso)
frq(bd2006_2008_57$conf_comercio)
frq(bd2006_2008_57$conf_iglesias)
frq(bd2006_2008_57$conf_sistjudicial)
frq(bd2006_2008_57$conf_sistemaedu)
## 2006_2008: CEP 58
frq(bd2006_2008_58$conf_iglesiacat) # Combinar:iglesia
frq(bd2006_2008_58$conf_ffaa)
frq(bd2006_2008_58$conf_iglesiaev) # Combinar:iglesia
frq(bd2006_2008_58$conf_partidos)
frq(bd2006_2008_58$conf_tribunalesjust)
frq(bd2006_2008_58$conf_diarios) # Combinar: MMC
frq(bd2006_2008_58$conf_tele) # Combinar: MMC
frq(bd2006_2008_58$conf_radios) # Combinar: MMC
frq(bd2006_2008_58$conf_sindicatos) # No usar
frq(bd2006_2008_58$conf_carabineros) # Combinar: instituciones del orden
frq(bd2006_2008_58$conf_gobierno)
frq(bd2006_2008_58$conf_congreso)
frq(bd2006_2008_58$conf_emppriv)
#---- 3.2.2 Recodificacion ----
## 2006_2008: CEP 54
bd2006_2008_54$conf_ffaa <- car::recode(bd2006_2008_54$conf_ffaa,"3 = 'Alta o media confianza'; c(1,2) = 'Baja o nula confianza'; c(8,9) = 'NS/NC'", as.factor = T)
bd2006_2008_54$conf_tribunalesjust <- car::recode(bd2006_2008_54$conf_tribunalesjust,"3 = 'Alta o media confianza'; c(1,2) = 'Baja o nula confianza'; c(8,9) = 'NS/NC'", as.factor = T)
bd2006_2008_54$conf_partidos <- car::recode(bd2006_2008_54$conf_partidos,"3 = 'Alta o media confianza'; c(1,2) = 'Baja o nula confianza'; c(8,9) = 'NS/NC'", as.factor = T)
## 2006_2008: CEP 55
bd2006_2008_55$conf_partido1 <- car::recode(bd2006_2008_55$conf_partido1, "c(1,2) = 'Alta o media confianza'; c(3,4) = 'Baja o nula confianza'; c(8,9) = 'NS/NC'", as.factor = T)
bd2006_2008_55$conf_partido2 <- car::recode(bd2006_2008_55$conf_partido2, "c(1,2) = 'Alta o media confianza'; c(3,4) = 'Baja o nula confianza'; c(8,9) = 'NS/NC'", as.factor = T)
bd2006_2008_55$conf_partido3 <- car::recode(bd2006_2008_55$conf_partido3, "c(1,2) = 'Alta o media confianza'; c(3,4) = 'Baja o nula confianza'; c(8,9) = 'NS/NC'", as.factor = T)
bd2006_2008_55$conf_partido4 <- car::recode(bd2006_2008_55$conf_partido4, "c(1,2) = 'Alta o media confianza'; c(3,4) = 'Baja o nula confianza'; c(8,9) = 'NS/NC'", as.factor = T)
bd2006_2008_55$conf_partido5 <- car::recode(bd2006_2008_55$conf_partido5, "c(1,2) = 'Alta o media confianza'; c(3,4) = 'Baja o nula confianza'; c(8,9) = 'NS/NC'", as.factor = T)
bd2006_2008_55$conf_partido6 <- car::recode(bd2006_2008_55$conf_partido6, "c(1,2) = 'Alta o media confianza'; c(3,4) = 'Baja o nula confianza'; c(8,9) = 'NS/NC'", as.factor = T)
## 2006_2008: CEP 57
bd2006_2008_57$conf_congreso <- car::recode(bd2006_2008_57$conf_congreso, "1 = 'Mucha confianza'; c(2, 3, 4, 5) = 'Otra'; 8 = NA", as.factor = T)
bd2006_2008_57$conf_comercio <- car::recode(bd2006_2008_57$conf_comercio, "1 = 'Mucha confianza'; c(2, 3, 4, 5) = 'Otra'; 8 = NA", as.factor = T)
bd2006_2008_57$conf_iglesias <- car::recode(bd2006_2008_57$conf_iglesias, "1 = 'Mucha confianza'; c(2, 3, 4, 5) = 'Otra'; 8 = NA", as.factor = T)
bd2006_2008_57$conf_sistjudicial <- car::recode(bd2006_2008_57$conf_sistjudicial, "1 = 'Mucha confianza'; c(2, 3, 4, 5) = 'Otra'; 8 = NA", as.factor = T)
bd2006_2008_57$conf_sistemaedu <- car::recode(bd2006_2008_57$conf_sistemaedu, "1 = 'Mucha confianza'; c(2, 3, 4, 5) = 'Otra'; 8 = NA", as.factor = T)
## 2006_2008: CEP 58
bd2006_2008_58$conf_iglesiacat <- car::recode(bd2006_2008_58$conf_iglesiacat,"c(1, 2, 3) = 'Otra'; 4 = 'Mucha confianza'; c(8, 9) = NA", as.factor = T)
bd2006_2008_58$conf_ffaa <- car::recode(bd2006_2008_58$conf_ffaa,"c(1, 2, 3) = 'Otra'; 4 = 'Mucha confianza'; c(8, 9) = NA", as.factor = T)
bd2006_2008_58$conf_iglesiaev <- car::recode(bd2006_2008_58$conf_iglesiaev,"c(1, 2, 3) = 'Otra'; 4 = 'Mucha confianza'; c(8, 9) = NA", as.factor = T)
bd2006_2008_58$conf_partidos <- car::recode(bd2006_2008_58$conf_partidos, "c(1, 2, 3) = 'Otra'; 4 = 'Mucha confianza'; c(8, 9) = NA", as.factor = T)
bd2006_2008_58$conf_tribunalesjust <- car::recode(bd2006_2008_58$conf_tribunalesjust, "c(1, 2, 3) = 'Otra'; 4 = 'Mucha confianza'; c(8, 9) = NA", as.factor = T)
bd2006_2008_58$conf_diarios <- car::recode(bd2006_2008_58$conf_diarios, "c(1, 2, 3) = 'Otra'; 4 = 'Mucha confianza'; c(8, 9) = NA", as.factor = T)
bd2006_2008_58$conf_tele <- car::recode(bd2006_2008_58$conf_tele, "c(1, 2, 3) = 'Otra'; 4 = 'Mucha confianza'; c(8, 9) = NA", as.factor = T)
bd2006_2008_58$conf_radios <- car::recode(bd2006_2008_58$conf_radios, "c(1, 2, 3) = 'Otra'; 4 = 'Mucha confianza'; c(8, 9) = NA", as.factor = T)
bd2006_2008_58$conf_sindicatos <- car::recode(bd2006_2008_58$conf_sindicatos, "c(1, 2, 3) = 'Otra'; 4 = 'Mucha confianza'; c(8, 9) = NA", as.factor = T)
bd2006_2008_58$conf_carabineros <- car::recode(bd2006_2008_58$conf_carabineros, "c(1, 2, 3) = 'Otra'; 4 = 'Mucha confianza'; c(8, 9) = NA", as.factor = T)
bd2006_2008_58$conf_gobierno <- car::recode(bd2006_2008_58$conf_gobierno, "c(1, 2, 3) = 'Otra'; 4 = 'Mucha confianza'; c(8, 9) = NA", as.factor = T)
bd2006_2008_58$conf_congreso <- car::recode(bd2006_2008_58$conf_congreso, "c(1, 2, 3) = 'Otra'; 4 = 'Mucha confianza'; c(8, 9) = NA", as.factor = T)
bd2006_2008_58$conf_emppriv <- car::recode(bd2006_2008_58$conf_emppriv, "c(1, 2, 3) = 'Otra'; 4 = 'Mucha confianza'; c(8, 9) = NA", as.factor = T)
# No olvidar
# Codificación original
#1. Mucha confianza
#2. Bastante confianza
#3. No mucha confianza
#4. Ninguna confianza
#8. No sabe
#9. No contesta
#---- 3.2.3 Otros ajustes --
## 2006_2008: CEP 58
### Construccion variable iglesia en calidad de institucion
bd2006_2008_58$conf_iglesia[bd2006_2008_58$conf_iglesiacat == 'Mucha confianza' & bd2006_2008_58$conf_iglesiaev == 'Mucha confianza'] <- 'Mucha confianza'
bd2006_2008_58$conf_iglesia[bd2006_2008_58$conf_iglesiacat == 'Otra' & bd2006_2008_58$conf_iglesiaev == 'Otra'] <- 'Otra'
### Construccion variable MMC
bd2006_2008_58$conf_mmc[bd2006_2008_58$conf_diarios == 'Mucha confianza' & bd2006_2008_58$conf_radios == 'Mucha confianza'] <- 'Mucha confianza'
bd2006_2008_58$conf_mmc[bd2006_2008_58$conf_diarios == 'Mucha confianza' & bd2006_2008_58$conf_tele == 'Mucha confianza'] <- 'Mucha confianza'
bd2006_2008_58$conf_mmc[bd2006_2008_58$conf_tele == 'Mucha confianza' & bd2006_2008_58$conf_radios == 'Mucha confianza'] <- 'Mucha confianza'
bd2006_2008_58$conf_mmc[bd2006_2008_58$conf_diarios == 'Otra' & bd2006_2008_58$conf_radios == 'Otra'] <- 'Otra'
bd2006_2008_58$conf_mmc[bd2006_2008_58$conf_diarios == 'Otra' & bd2006_2008_58$conf_tele == 'Otra'] <- 'Otra'
bd2006_2008_58$conf_mmc[bd2006_2008_58$conf_tele == 'Otra' & bd2006_2008_58$conf_radios == 'Otra'] <- 'Otra'
bd2006_2008_58$conf_mmc[bd2006_2008_58$conf_tele == 'Mucha confianza' & bd2006_2008_58$conf_radios == 'Mucha confianza' & bd2006_2008_58$conf_diarios == 'Mucha confianza'] <- 'Mucha confianza'
bd2006_2008_58$conf_mmc[bd2006_2008_58$conf_tele == 'Otra' & bd2006_2008_58$conf_radios == 'Otra' & bd2006_2008_58$conf_diarios == 'Otra'] <- 'Otra'
# Ver frecuencia variable nueva
frq(bd2006_2008_58$conf_mmc)
### Sacar variables de confianza que no usaremos.
### 2006_2008-2008: CEP 58
bd2006_2008_58 <- select(bd2006_2008_58,-conf_gobierno, -conf_radios, -conf_tele, -conf_sindicatos, -conf_carabineros, -conf_diarios)
#---- 3.2.4 Guardar bases de confianza ----
save(bd2006_2008_58, file = "input/data/bd2006_2008_58.RData")
#---- 3.3 Tratamiento de variables de identificación partidaria e identificación política (o posición política)
#---- 3.3.1 Frecuencias ----
frq(bd2006_2008_52$id_part)
# No esta la bateria tipica de posicion politica
frq(bd2006_2008_54$id_part)
frq(bd2006_2008_54$pos_pol)
frq(bd2006_2008_55$id_part)
frq(bd2006_2008_55$pos_pol)
frq(bd2006_2008_56$id_part)
frq(bd2006_2008_56$pos_pol)
frq(bd2006_2008_57$id_part)
frq(bd2006_2008_57$pos_pol)
frq(bd2006_2008_58$id_part)
frq(bd2006_2008_58$pos_pol)
# ---- 3.3.2 Recodificacion ----
# 2006 - 2008: CEP 52
bd2006_2008_52$id_part <- car::recode(bd2006_2008_52$id_part, "c(2,4) = 'Derecha';
c(1,5,6,7,8) = 'Centro-Izquierda concertación';
3 = 'Izquierda extraconcertación';
10 = 'Ninguno';
c(9,88,99) = NA", as.factor = T)
# 2006 - 2008: CEP 54
bd2006_2008_54$id_part <- car::recode(bd2006_2008_54$id_part, "c(2,4) = 'Derecha';
c(1,5,6,7,8) = 'Centro-Izquierda concertación';
3 = 'Izquierda extraconcertación';
10 = 'Ninguno';
c(9,88,99) = NA", as.factor = T)
bd2006_2008_54$pos_pol <- car::recode(bd2006_2008_54$pos_pol,"1 = 'Derecha';
2 = 'Centro Derecha';
3 = 'Centro';
4 = 'Centro Izquierda';
5 = 'Izquierda';
6 = 'Independiente';
7 = 'Ninguna';
8 = NA;
9 = NA", as.factor = T)
# 2006 - 2008: CEP 55
bd2006_2008_55$id_part <- car::recode(bd2006_2008_55$id_part, "c(2,4) = 'Derecha';
c(1,5,6,7,8) = 'Centro-Izquierda concertación';
3 = 'Izquierda extraconcertación';
10 = 'Ninguno';
c(9,88,99) = NA", as.factor = T)
bd2006_2008_55$pos_pol <- car::recode(bd2006_2008_55$pos_pol,"1 = 'Derecha';
2 = 'Centro Derecha';
3 = 'Centro';
4 = 'Centro Izquierda';
5 = 'Izquierda';
6 = 'Independiente';
7 = 'Ninguna';
8 = NA;
9 = NA", as.factor = T)
# 2006 - 2008: CEP 56
bd2006_2008_56$id_part <- car::recode(bd2006_2008_56$id_part, "c(2,4) = 'Derecha';
c(1,5,6,7,8) = 'Centro-Izquierda concertación';
3 = 'Izquierda extraconcertación';
10 = 'Ninguno';
c(9,88,99) = NA", as.factor = T)
bd2006_2008_56$pos_pol <- car::recode(bd2006_2008_56$pos_pol,"1 = 'Derecha';
2 = 'Centro Derecha';
3 = 'Centro';
4 = 'Centro Izquierda';
5 = 'Izquierda';
6 = 'Independiente';
7 = 'Ninguna';
8 = NA;
9 = NA", as.factor = T)
# 2006 - 2008: CEP 57
bd2006_2008_57$id_part <- car::recode(bd2006_2008_57$id_part, "c(2,4) = 'Derecha';
c(1,5,6,7,8) = 'Centro-Izquierda concertación';
3 = 'Izquierda extraconcertación';
10 = 'Ninguno';
c(9,88,99) = NA", as.factor = T)
bd2006_2008_57$pos_pol <- car::recode(bd2006_2008_57$pos_pol,"1 = 'Derecha';
2 = 'Centro Derecha';
3 = 'Centro';
4 = 'Centro Izquierda';
5 = 'Izquierda';
6 = 'Independiente';
7 = 'Ninguna';
8 = NA;
9 = NA", as.factor = T)
# 2006 - 2008: CEP 58
bd2006_2008_58$id_part <- car::recode(bd2006_2008_58$id_part, "c(2,4) = 'Derecha';
c(1,5,6,7,8) = 'Centro-Izquierda concertación';
3 = 'Izquierda extraconcertación';
12 = 'Ninguno';
c(9,10,11,88,99) = NA", as.factor = T)
bd2006_2008_58$pos_pol <- car::recode(bd2006_2008_58$pos_pol,"1 = 'Derecha';
2 = 'Centro Derecha';
3 = 'Centro';
4 = 'Centro Izquierda';
5 = 'Izquierda';
6 = 'Independiente';
7 = 'Ninguna';
8 = NA;
9 = NA", as.factor = T)
# ---- 3.4 Guardar base de datos final ----
save(bd2006_2008_52, file = "input/data/bd2006_2008_52.RData")
save(bd2006_2008_54, file = "input/data/bd2006_2008_54.RData")
save(bd2006_2008_55, file = "input/data/bd2006_2008_55.RData")
save(bd2006_2008_56, file = "input/data/bd2006_2008_56.RData")
save(bd2006_2008_57, file = "input/data/bd2006_2008_57.RData")
save(bd2006_2008_58, file = "input/data/bd2006_2008_58.RData")
|
#' Run fMRI quality assurance procedure on a NIfTI data file
#'
#' @param data_file input data in nifti format, a file chooser will open if not set
#' @param roi_width roi analysis region in pixels (default=21)
#' @param slice_num slice number for analysis (default=middle slice)
#' @param skip number of initial volumes to exclude from the analysis (default=2)
#' @param tr override the TR detected from data (seconds)
#' @param poly_det_ord polynomial order used for detrending (default=3)
#' @param spike_detect generate k-space spike-detection plot (default=FALSE)
#' @param x_pos x position of ROI (default=center of gravity)
#' @param y_pos y position of ROI (default=center of gravity)
#' @param plot_title add a title to the png and pdf plots
#' @param last_vol last volume number to use in the analysis
#' @param gen_png output png plot (default=TRUE)
#' @param gen_res_csv output csv results (default=TRUE)
#' @param gen_pdf output pdf plot (default=FALSE)
#' @param gen_spec_csv output csv of spectral points (default=FALSE)
#' @param png_fname png plot filename
#' @param res_fname csv results filename
#' @param pdf_fname pdf plot filename
#' @param spec_fname csv spectral data filename
#' @param verbose provide text output while running (default=TRUE)
#' @return dataframe of QA metrics
#' @examples
#' fname <- system.file("extdata", "qa_data.nii.gz", package = "fmriqa")
#' res <- run_fmriqa(data_file = fname, gen_png = FALSE, gen_res_csv = FALSE, tr = 3)
#'
#' @import viridisLite
#' @import RNifti
#' @import ggplot2
#' @import reshape2
#' @import gridExtra
#' @import grid
#' @import tidyr
#' @import optparse
#' @import tcltk
#' @import pracma
#' @importFrom grDevices graphics.off pdf png
#' @importFrom stats fft mad poly quantile sd median
#' @importFrom utils write.csv
#' @export
run_fmriqa <- function(data_file = NULL, roi_width = 21, slice_num = NULL,
skip = 2, tr = NULL, poly_det_ord = 3, spike_detect = FALSE,
x_pos = NULL, y_pos = NULL, plot_title = NULL,
last_vol = NULL, gen_png = TRUE, gen_res_csv = TRUE,
gen_pdf = FALSE, gen_spec_csv = FALSE, png_fname = NULL,
res_fname = NULL, pdf_fname = NULL, spec_fname = NULL,
verbose = TRUE) {
if (is.null(data_file)) {
filters <- matrix(c("NIfTI", ".nii.gz", "NIfTI", ".nii",
"All files", "*"),
3, 2, byrow = TRUE)
data_file <- tk_choose.files(caption = "Select nifti data file for analysis",
multi = FALSE, filters = filters)
if (length(data_file) == 0) {
stop("Error : input file not given.")
}
}
basename <- sub(".nii.gz$", "", data_file)
basename <- sub(".nii$", "", basename)
if (is.null(res_fname)) {
csv_file <- paste(basename, "_qa_results.csv", sep = "")
} else {
csv_file <- res_fname
}
if (is.null(png_fname)) {
png_file <- paste(basename, "_qa_plot.png", sep = "")
} else {
png_file <- png_fname
}
if (is.null(pdf_fname)) {
pdf_file <- paste(basename, "_qa_plot.pdf", sep = "")
} else {
pdf_file <- pdf_fname
}
if (is.null(spec_fname)) {
spec_file <- paste(basename, "_qa_spec.csv", sep = "")
} else {
spec_file <- spec_fname
}
#image_cols <- inferno(64)
image_cols <- viridis(64)
if (verbose) cat(paste("Reading data : ", data_file, "\n\n", sep = ""))
data <- readNifti(data_file)
x_dim <- dim(data)[1]
y_dim <- dim(data)[2]
z_dim <- dim(data)[3]
if (is.null(tr)) tr <- pixdim(data)[4]
if (is.null(slice_num)) slice_num <- ceiling(dim(data)[3] / 2)
if (is.null(last_vol)) {
N <- dim(data)[4]
} else {
N <- last_vol
}
dyns <- N - skip
t <- seq(from = 0, by = tr, length.out = dyns)
#t_full <- seq(from = 0, by = tr, length.out = N)
if (verbose) {
cat("Basic analysis parameters\n")
cat("-------------------------\n")
cat(paste("X,Y dims : ", x_dim, "x", y_dim, "\n", sep = ""))
cat(paste("Slices : ", z_dim, "\n", sep = ""))
cat(paste("TR : ", round(tr, 2), "s\n", sep = ""))
cat(paste("Slice # : ", slice_num, "\n", sep = ""))
cat(paste("ROI width : ", roi_width, "\n", sep = ""))
cat(paste("Total vols : ", dim(data)[4], "\n", sep = ""))
cat(paste("Analysis vols : ", dyns, "\n", sep = ""))
}
# scale data
# scl_slope <- dumpNifti(data)$scl_slope
# data <- data * scl_slope
# chop out the slice we will be working with
data_raw <- data[,,slice_num, (skip + 1):N]
# detrend data with polynomial
X <- poly(1:dyns, poly_det_ord)[,]
X <- cbind(1,X)
data_detrend <- apply(data_raw, c(1,2), detrend_fast, X)
data_detrend <- aperm(data_detrend, c(2,3,1))
# calculate temporal fluctuation noise (TFN)
TFN <- apply(data_detrend, c(1,2), sd)
av_image <- apply(data_raw, c(1,2), mean)
SFNR_full <- av_image / TFN
# calc diff image
odd_dynamics <- data_raw[,,c(TRUE, FALSE)]
even_dynamics <- data_raw[,,c(FALSE, TRUE)]
if (length(odd_dynamics) > length(even_dynamics)) {
odd_dynamics <- odd_dynamics[,,-(dim(odd_dynamics)[3])]
warning("Odd number of dynamic scans, removing last one for the odd even diff calculation.")
}
DIFF <- apply(odd_dynamics, c(1, 2), sum) - apply(even_dynamics, c(1, 2), sum)
# flip lr direction
# SFNR_full <- flipud(SFNR_full)
# av_image <- flipud(av_image)
# DIFF <- flipud(DIFF)
# TFN <- flipud(TFN)
# set na values to zero
SFNR_full[is.na(SFNR_full)] <- 0
# threshold the image to reduce inhomogenity for cog calc
cog_image <- av_image > quantile(av_image, .6)
#cog_image <- av_image
if (is.null(x_pos)) {
x_pos <- sum(array(1:x_dim, c(x_dim, y_dim)) * cog_image) / sum(cog_image)
x_pos <- round(x_pos)
}
if (is.null(y_pos)) {
y_pos <- sum(t(array(1:y_dim, c(y_dim, x_dim))) * cog_image) / sum(cog_image)
y_pos <- round(y_pos)
}
# get ROI indices
ROI_x <- get_pixel_range(x_pos, roi_width)
ROI_y <- get_pixel_range(y_pos, roi_width)
SFNR <- SFNR_full[ROI_x, ROI_y]
av_SFNR <- mean(SFNR)
DIFF_ROI <- DIFF[ROI_x, ROI_y]
signal_summary_value <- mean(av_image[ROI_x, ROI_y])
SNR <- signal_summary_value / sqrt((sd(DIFF_ROI) ^ 2) / dyns)
slice_data_ROI <- data_raw[ROI_x, ROI_y,]
mean_sig_intensity_t <- apply(slice_data_ROI, 3, mean)
mean_sig_intensity <- mean(mean_sig_intensity_t)
mean_sig_intensity_t_detrend <- detrend_fast(mean_sig_intensity_t, X)
y_fit <- mean_sig_intensity_t - mean_sig_intensity_t_detrend
residuals <- mean_sig_intensity_t - y_fit
sd_roi <- sd(residuals)
percent_fluc <- 100.0 * sd_roi / mean_sig_intensity
percent_drift_fit <- 100.0 * (max(y_fit) - min(y_fit)) / mean_sig_intensity
percent_drift <- 100.0 * (max(mean_sig_intensity_t) -
min(mean_sig_intensity_t)) / mean_sig_intensity
detrend_res <- mean_sig_intensity_t - y_fit
zp <- 4
spec <- Mod(fft(c(detrend_res, rep(0,(zp - 1) * dyns))))[1:(dyns * zp / 2)]
max_spec_outlier <- max(spec) / mad(spec)
# x <- 1:(zp * N / 2)
t <- seq(from = 0, by = tr, length.out = dyns)
vols <- seq(from = skip + 1, by = 1, length.out = dyns)
freq <- seq(from = 0, to = (1 - 1/(zp * dyns / 2))/(tr * 2),
length.out = zp * dyns / 2)
# get a mean time course for each slice
slice_tc <- apply(data[,,,(skip + 1):N, drop = FALSE], c(3, 4), mean)
# detrend
X <- poly(1:dyns, poly_det_ord)[,]
X <- cbind(1, X)
slice_tc_dt <- apply(slice_tc, 1, detrend_fast, X)
max_tc_outlier <- max(abs(slice_tc_dt)) / mad(slice_tc_dt)
# normalise
# slice_tc_dt <- scale(slice_tc_dt, center = F)
# calculate RDC
CV <- vector(length = roi_width)
CV_ideal <- vector(length = roi_width)
for (n in (1:roi_width)) {
x_range <- get_pixel_range(x_pos, n)
y_range <- get_pixel_range(y_pos, n)
slice_data_ROI <- data_raw[x_range, y_range,, drop = F]
mean_sig_intensity_t <- apply(slice_data_ROI, 3, mean)
mean_sig_intensity <- mean(mean_sig_intensity_t)
# detrend
X <- poly(1:dyns, poly_det_ord)[,]
X <- cbind(1,X)
mean_sig_intensity_t_dt <- detrend_fast(y = mean_sig_intensity_t, X = X)
sd_sig_intensity <- sd(mean_sig_intensity_t_dt)
CV[n] <- 100 * sd_sig_intensity / mean_sig_intensity
CV_ideal[n] <- CV[1] / n
}
RDC <- CV[1] / CV[length(CV)]
line1 <- paste("Mean signal : ", round(mean_sig_intensity, 1), "\n", sep = "")
line2 <- paste("STD : ", round(sd_roi, 2), "\n", sep = "")
line3 <- paste("Percent fluc : ", round(percent_fluc, 2), "\n", sep = "")
line4 <- paste("Drift : ", round(percent_drift, 2), "\n", sep = "")
line5 <- paste("Drift fit : ", round(percent_drift_fit, 2), "\n", sep = "")
line6 <- paste("SNR : ", round(SNR, 1), "\n", sep = "")
line7 <- paste("SFNR : ", round(av_SFNR, 1), "\n", sep = "")
line8 <- paste("RDC : ", round(RDC, 2), "\n", sep = "")
line9 <- paste("TC outlier : ", round(max_tc_outlier, 2), "\n", sep = "")
line10 <- paste("Spec outlier : ", round(max_spec_outlier, 2), "\n", sep = "")
if (verbose) {
cat("\nQA metrics\n")
cat("----------\n")
cat(line1)
cat(line2)
cat(line3)
cat(line4)
cat(line5)
cat(line6)
cat(line7)
cat(line8)
cat(line9)
cat(line10)
}
if (is.null(plot_title)) plot_title <- NA
results_tab <- data.frame(data_file, title = plot_title,
mean_signal = mean_sig_intensity, std = sd_roi,
percent_fluc = percent_fluc, drift = percent_drift,
drift_fit = percent_drift_fit, snr = SNR,
sfnr = av_SFNR, rdc = RDC, tc_outlier = max_tc_outlier,
spec_outlier = max_spec_outlier)
if (gen_res_csv) {
write.csv(results_tab, csv_file, row.names = FALSE)
}
if (gen_spec_csv) {
spec_out <- data.frame(t(spec))
colnames(spec_out) <- freq
spec_out <- cbind(data.frame(data_file, title = plot_title), spec_out)
write.csv(spec_out, spec_file, row.names = FALSE)
}
# plotting stuff below
if (gen_pdf | gen_png) {
# spike detection plot
if (spike_detect) {
cat("\nCalculating k-space spike detection map...\n")
# calc diff volumes
diff_vols <- apply(data[,,,(skip + 1):N, drop = FALSE], c(1,2,3), diff)
diff_vols <- aperm(diff_vols, c(2,3,4,1))
# transform all slices into k-space
diff_vols_fft <- apply(diff_vols, c(3,4), fft)
dim(diff_vols_fft) <- dim(diff_vols)
# calc the maximum slice projection in k-space
max_slice_proj <- apply(abs(diff_vols_fft), c(1,2), max)
max_slice_proj <- apply(apply(max_slice_proj, 1, fftshift), 1,
fftshift)
#max_z <- max(max_slice_proj) / 4
max_z <- mad(max_slice_proj) * 8 + median(max_slice_proj)
max_slice_proj <- ifelse(max_slice_proj > max_z, max_z, max_slice_proj)
max_slice_proj_plot <- ggplot(melt(max_slice_proj), aes(Var1, Var2, fill = value)) +
geom_raster(interpolate = TRUE) +
scale_fill_gradientn(colours = image_cols) +
coord_fixed(ratio = 1) + labs(x = "",y = "", fill = "Intensity",
title = "Max. proj. of k-space slice differences") +
scale_x_continuous(expand = c(0, 0)) + scale_y_continuous(expand = c(0, 0))
}
theme_set(theme_bw())
marg <- theme(plot.margin = unit(c(0.5,0.5,0.5,0.5), "cm"))
raw_text <- paste(line1, line2, line3, line4, line5, line6, line7, line8, line9, line10,
sep = "")
text <- textGrob(raw_text, x = 0.2, just = 0, gp = gpar(fontfamily = "mono", fontsize = 14))
# these are to appease R checks
Measured <- NULL
Theoretical <- NULL
group <- NULL
roi_width_vec <- NULL
fit <- NULL
tc <- NULL
Var1 <- NULL
Var2 <- NULL
value <- NULL
# RDC plot
rdc_df <- data.frame(roi_width_vec = 1:roi_width, Theoretical = CV_ideal, Measured = CV)
rdc_df <- gather(rdc_df, group, CV, c(Measured, Theoretical))
rdc_plot <- ggplot(rdc_df, aes(x = roi_width_vec, y = CV, colour = group)) + geom_line() +
geom_point() + scale_x_log10(limits = c(1,100)) +
scale_y_log10(limits = c(0.01,10), breaks = c(0.01,0.1,1,10)) +
labs(y = "100*CV", x = "ROI width (pixels)", title = "RDC plot") + marg +
theme(legend.position = c(0.8, 0.8)) + scale_color_manual(values = c("black","red"))
tc_fit <- data.frame(t = vols, tc = mean_sig_intensity_t, fit = y_fit)
tc_plot <- ggplot(tc_fit, aes(t)) + geom_line(aes(y = tc)) +
geom_line(aes(y = fit), color = "red") +
theme(legend.position = "none") +
labs(y = "Intensity (a.u.)", x = "Time (volumes)",
title = "Intensity drift plot") + marg
spec_plot <- qplot(freq, spec, xlab = "Frequency (Hz)",
ylab = "Intensity (a.u.)", geom = "line",
main = "Fluctuation spectrum") + marg
x_st = ROI_x[1]
x_end = ROI_x[length(ROI_x)]
y_st = ROI_y[1]
y_end = ROI_y[length(ROI_y)]
lcol <- "white"
roi_a <- geom_segment(aes(x = x_st, xend = x_st, y = y_st, yend = y_end),
colour = lcol)
roi_b <- geom_segment(aes(x = x_end, xend = x_end, y = y_st, yend = y_end),
colour = lcol)
roi_c <- geom_segment(aes(x = x_st, xend = x_end, y = y_st, yend = y_st),
colour = lcol)
roi_d <- geom_segment(aes(x = x_st, xend = x_end, y = y_end, yend = y_end),
colour = lcol)
top_val <- quantile(SFNR_full,0.999)
SFNR_full <- ifelse(SFNR_full > top_val, top_val, SFNR_full)
sfnr_plot <- ggplot(melt(SFNR_full), aes(Var1, Var2, fill = value)) +
geom_raster(interpolate = TRUE) +
scale_fill_gradientn(colours = image_cols) +
coord_fixed(ratio = 1) + labs(x = "", y = "", fill = "Intensity",
title = "SFNR image") +
marg + roi_a + roi_b + roi_c + roi_d +
scale_x_continuous(expand = c(0, 0)) + scale_y_continuous(expand = c(0, 0))
# useful for checking where the ROI really is
# av_image[ROI_x,ROI_y] = 0
av_plot <- ggplot(melt(av_image), aes(Var1, Var2, fill = value)) +
geom_raster(interpolate = TRUE) +
scale_fill_gradientn(colours = image_cols) +
coord_fixed(ratio = 1) + labs(x = "",y = "", fill = "Intensity",
title = "Mean image") +
marg + roi_a + roi_b + roi_c + roi_d +
scale_x_continuous(expand = c(0, 0)) + scale_y_continuous(expand = c(0, 0))
diff_plot <- ggplot(melt(DIFF), aes(Var1, Var2, fill = value)) +
geom_raster(interpolate = TRUE) +
scale_fill_gradientn(colours = image_cols) +
coord_fixed(ratio = 1) + labs(x = "",y = "", fill = "Intensity",
title = "Odd-even difference") +
marg + roi_a + roi_b + roi_c + roi_d +
scale_x_continuous(expand = c(0, 0)) + scale_y_continuous(expand = c(0, 0))
tfn_plot <- ggplot(melt(TFN), aes(Var1, Var2, fill = value)) +
geom_raster(interpolate = TRUE) +
scale_fill_gradientn(colours = image_cols) +
coord_fixed(ratio = 1) +
labs(x = "", y = "", fill = "Intensity",
title = "Temporal fluctuation noise") +
marg + roi_a + roi_b + roi_c + roi_d +
scale_x_continuous(expand = c(0, 0)) + scale_y_continuous(expand = c(0, 0))
slice_tc_plot <- ggplot(melt(slice_tc_dt), aes(x = Var1 + skip, y = value, group = Var2)) +
geom_line(alpha = 0.5) +
labs(x = "Time (volumes)", y = "Intensity (a.u.)",
title = "Mean slice TCs (detrended)")
if (is.na(plot_title)) {
title <- NULL
} else {
title <- textGrob(plot_title, gp = gpar(fontsize = 25))
}
if (gen_pdf) {
pdf(pdf_file, height = 10, width = 16)
if (spike_detect) {
lay <- rbind(c(1,5,6,9),
c(4,2,2,3),
c(7,8,8,10))
grid.arrange(text, tc_plot, spec_plot, av_plot, diff_plot, sfnr_plot, tfn_plot,
slice_tc_plot, rdc_plot, max_slice_proj_plot, layout_matrix = lay,
top = title)
} else {
lay <- rbind(c(1,5,6,9),
c(4,2,2,3),
c(7,8,8,8))
grid.arrange(text, tc_plot, spec_plot, av_plot, diff_plot, sfnr_plot, tfn_plot,
slice_tc_plot, rdc_plot, layout_matrix = lay,
top = title)
}
graphics.off()
}
if (gen_png) {
png(png_file, height = 800, width = 1200, type = "cairo")
if (spike_detect) {
lay <- rbind(c(1,5,6,9),
c(4,2,2,3),
c(7,8,8,10))
grid.arrange(text, tc_plot, spec_plot, av_plot, diff_plot, sfnr_plot, tfn_plot,
slice_tc_plot, rdc_plot, max_slice_proj_plot, layout_matrix = lay,
top = title)
} else {
lay <- rbind(c(1,5,6,9),
c(4,2,2,3),
c(7,8,8,8))
grid.arrange(text, tc_plot, spec_plot, av_plot, diff_plot, sfnr_plot, tfn_plot,
slice_tc_plot, rdc_plot, layout_matrix = lay,
top = title)
}
graphics.off()
}
}
# end of plotting
if (verbose) {
if (gen_pdf) cat(paste("\nPDF report : ", pdf_file, sep = ""))
if (gen_spec_csv) cat(paste("\nCSV spec file : ", spec_file, sep = ""))
if (gen_png) cat(paste("\nPNG report : ", png_file, sep = ""))
if (gen_res_csv) cat(paste("\nCSV results : ", csv_file, "\n\n", sep = ""))
}
results_tab
}
#' @import RcppEigen
detrend_fast <- function(y, X) {
fastLmPure(y = y, X = X)$residual
}
get_pixel_range <- function(center, width) {
ROI_half_start <- floor(width / 2)
ROI_half_end <- ceiling(width / 2)
start <- floor(center - ROI_half_start)
end <- floor(center + ROI_half_end) - 1
start:end
}
| /R/run_fmriqa.R | no_license | muschellij2/fmriqa | R | false | false | 17,847 | r | #' Run fMRI quality assurance procedure on a NIfTI data file
#'
#' @param data_file input data in nifti format, a file chooser will open if not set
#' @param roi_width roi analysis region in pixels (default=21)
#' @param slice_num slice number for analysis (default=middle slice)
#' @param skip number of initial volumes to exclude from the analysis (default=2)
#' @param tr override the TR detected from data (seconds)
#' @param poly_det_ord polynomial order used for detrending (default=3)
#' @param spike_detect generate k-space spike-detection plot (default=FALSE)
#' @param x_pos x position of ROI (default=center of gravity)
#' @param y_pos y position of ROI (default=center of gravity)
#' @param plot_title add a title to the png and pdf plots
#' @param last_vol last volume number to use in the analysis
#' @param gen_png output png plot (default=TRUE)
#' @param gen_res_csv output csv results (default=TRUE)
#' @param gen_pdf output pdf plot (default=FALSE)
#' @param gen_spec_csv output csv of spectral points (default=FALSE)
#' @param png_fname png plot filename
#' @param res_fname csv results filename
#' @param pdf_fname pdf plot filename
#' @param spec_fname csv spectral data filename
#' @param verbose provide text output while running (default=TRUE)
#' @return dataframe of QA metrics
#' @examples
#' fname <- system.file("extdata", "qa_data.nii.gz", package = "fmriqa")
#' res <- run_fmriqa(data_file = fname, gen_png = FALSE, gen_res_csv = FALSE, tr = 3)
#'
#' @import viridisLite
#' @import RNifti
#' @import ggplot2
#' @import reshape2
#' @import gridExtra
#' @import grid
#' @import tidyr
#' @import optparse
#' @import tcltk
#' @import pracma
#' @importFrom grDevices graphics.off pdf png
#' @importFrom stats fft mad poly quantile sd median
#' @importFrom utils write.csv
#' @export
run_fmriqa <- function(data_file = NULL, roi_width = 21, slice_num = NULL,
skip = 2, tr = NULL, poly_det_ord = 3, spike_detect = FALSE,
x_pos = NULL, y_pos = NULL, plot_title = NULL,
last_vol = NULL, gen_png = TRUE, gen_res_csv = TRUE,
gen_pdf = FALSE, gen_spec_csv = FALSE, png_fname = NULL,
res_fname = NULL, pdf_fname = NULL, spec_fname = NULL,
verbose = TRUE) {
if (is.null(data_file)) {
filters <- matrix(c("NIfTI", ".nii.gz", "NIfTI", ".nii",
"All files", "*"),
3, 2, byrow = TRUE)
data_file <- tk_choose.files(caption = "Select nifti data file for analysis",
multi = FALSE, filters = filters)
if (length(data_file) == 0) {
stop("Error : input file not given.")
}
}
basename <- sub(".nii.gz$", "", data_file)
basename <- sub(".nii$", "", basename)
if (is.null(res_fname)) {
csv_file <- paste(basename, "_qa_results.csv", sep = "")
} else {
csv_file <- res_fname
}
if (is.null(png_fname)) {
png_file <- paste(basename, "_qa_plot.png", sep = "")
} else {
png_file <- png_fname
}
if (is.null(pdf_fname)) {
pdf_file <- paste(basename, "_qa_plot.pdf", sep = "")
} else {
pdf_file <- pdf_fname
}
if (is.null(spec_fname)) {
spec_file <- paste(basename, "_qa_spec.csv", sep = "")
} else {
spec_file <- spec_fname
}
#image_cols <- inferno(64)
image_cols <- viridis(64)
if (verbose) cat(paste("Reading data : ", data_file, "\n\n", sep = ""))
data <- readNifti(data_file)
x_dim <- dim(data)[1]
y_dim <- dim(data)[2]
z_dim <- dim(data)[3]
if (is.null(tr)) tr <- pixdim(data)[4]
if (is.null(slice_num)) slice_num <- ceiling(dim(data)[3] / 2)
if (is.null(last_vol)) {
N <- dim(data)[4]
} else {
N <- last_vol
}
dyns <- N - skip
t <- seq(from = 0, by = tr, length.out = dyns)
#t_full <- seq(from = 0, by = tr, length.out = N)
if (verbose) {
cat("Basic analysis parameters\n")
cat("-------------------------\n")
cat(paste("X,Y dims : ", x_dim, "x", y_dim, "\n", sep = ""))
cat(paste("Slices : ", z_dim, "\n", sep = ""))
cat(paste("TR : ", round(tr, 2), "s\n", sep = ""))
cat(paste("Slice # : ", slice_num, "\n", sep = ""))
cat(paste("ROI width : ", roi_width, "\n", sep = ""))
cat(paste("Total vols : ", dim(data)[4], "\n", sep = ""))
cat(paste("Analysis vols : ", dyns, "\n", sep = ""))
}
# scale data
# scl_slope <- dumpNifti(data)$scl_slope
# data <- data * scl_slope
# chop out the slice we will be working with
data_raw <- data[,,slice_num, (skip + 1):N]
# detrend data with polynomial
X <- poly(1:dyns, poly_det_ord)[,]
X <- cbind(1,X)
data_detrend <- apply(data_raw, c(1,2), detrend_fast, X)
data_detrend <- aperm(data_detrend, c(2,3,1))
# calculate temporal fluctuation noise (TFN)
TFN <- apply(data_detrend, c(1,2), sd)
av_image <- apply(data_raw, c(1,2), mean)
SFNR_full <- av_image / TFN
# calc diff image
odd_dynamics <- data_raw[,,c(TRUE, FALSE)]
even_dynamics <- data_raw[,,c(FALSE, TRUE)]
if (length(odd_dynamics) > length(even_dynamics)) {
odd_dynamics <- odd_dynamics[,,-(dim(odd_dynamics)[3])]
warning("Odd number of dynamic scans, removing last one for the odd even diff calculation.")
}
DIFF <- apply(odd_dynamics, c(1, 2), sum) - apply(even_dynamics, c(1, 2), sum)
# flip lr direction
# SFNR_full <- flipud(SFNR_full)
# av_image <- flipud(av_image)
# DIFF <- flipud(DIFF)
# TFN <- flipud(TFN)
# set na values to zero
SFNR_full[is.na(SFNR_full)] <- 0
# threshold the image to reduce inhomogenity for cog calc
cog_image <- av_image > quantile(av_image, .6)
#cog_image <- av_image
if (is.null(x_pos)) {
x_pos <- sum(array(1:x_dim, c(x_dim, y_dim)) * cog_image) / sum(cog_image)
x_pos <- round(x_pos)
}
if (is.null(y_pos)) {
y_pos <- sum(t(array(1:y_dim, c(y_dim, x_dim))) * cog_image) / sum(cog_image)
y_pos <- round(y_pos)
}
# get ROI indices
ROI_x <- get_pixel_range(x_pos, roi_width)
ROI_y <- get_pixel_range(y_pos, roi_width)
SFNR <- SFNR_full[ROI_x, ROI_y]
av_SFNR <- mean(SFNR)
DIFF_ROI <- DIFF[ROI_x, ROI_y]
signal_summary_value <- mean(av_image[ROI_x, ROI_y])
SNR <- signal_summary_value / sqrt((sd(DIFF_ROI) ^ 2) / dyns)
slice_data_ROI <- data_raw[ROI_x, ROI_y,]
mean_sig_intensity_t <- apply(slice_data_ROI, 3, mean)
mean_sig_intensity <- mean(mean_sig_intensity_t)
mean_sig_intensity_t_detrend <- detrend_fast(mean_sig_intensity_t, X)
y_fit <- mean_sig_intensity_t - mean_sig_intensity_t_detrend
residuals <- mean_sig_intensity_t - y_fit
sd_roi <- sd(residuals)
percent_fluc <- 100.0 * sd_roi / mean_sig_intensity
percent_drift_fit <- 100.0 * (max(y_fit) - min(y_fit)) / mean_sig_intensity
percent_drift <- 100.0 * (max(mean_sig_intensity_t) -
min(mean_sig_intensity_t)) / mean_sig_intensity
detrend_res <- mean_sig_intensity_t - y_fit
zp <- 4
spec <- Mod(fft(c(detrend_res, rep(0,(zp - 1) * dyns))))[1:(dyns * zp / 2)]
max_spec_outlier <- max(spec) / mad(spec)
# x <- 1:(zp * N / 2)
t <- seq(from = 0, by = tr, length.out = dyns)
vols <- seq(from = skip + 1, by = 1, length.out = dyns)
freq <- seq(from = 0, to = (1 - 1/(zp * dyns / 2))/(tr * 2),
length.out = zp * dyns / 2)
# get a mean time course for each slice
slice_tc <- apply(data[,,,(skip + 1):N, drop = FALSE], c(3, 4), mean)
# detrend
X <- poly(1:dyns, poly_det_ord)[,]
X <- cbind(1, X)
slice_tc_dt <- apply(slice_tc, 1, detrend_fast, X)
max_tc_outlier <- max(abs(slice_tc_dt)) / mad(slice_tc_dt)
# normalise
# slice_tc_dt <- scale(slice_tc_dt, center = F)
# calculate RDC
CV <- vector(length = roi_width)
CV_ideal <- vector(length = roi_width)
for (n in (1:roi_width)) {
x_range <- get_pixel_range(x_pos, n)
y_range <- get_pixel_range(y_pos, n)
slice_data_ROI <- data_raw[x_range, y_range,, drop = F]
mean_sig_intensity_t <- apply(slice_data_ROI, 3, mean)
mean_sig_intensity <- mean(mean_sig_intensity_t)
# detrend
X <- poly(1:dyns, poly_det_ord)[,]
X <- cbind(1,X)
mean_sig_intensity_t_dt <- detrend_fast(y = mean_sig_intensity_t, X = X)
sd_sig_intensity <- sd(mean_sig_intensity_t_dt)
CV[n] <- 100 * sd_sig_intensity / mean_sig_intensity
CV_ideal[n] <- CV[1] / n
}
RDC <- CV[1] / CV[length(CV)]
line1 <- paste("Mean signal : ", round(mean_sig_intensity, 1), "\n", sep = "")
line2 <- paste("STD : ", round(sd_roi, 2), "\n", sep = "")
line3 <- paste("Percent fluc : ", round(percent_fluc, 2), "\n", sep = "")
line4 <- paste("Drift : ", round(percent_drift, 2), "\n", sep = "")
line5 <- paste("Drift fit : ", round(percent_drift_fit, 2), "\n", sep = "")
line6 <- paste("SNR : ", round(SNR, 1), "\n", sep = "")
line7 <- paste("SFNR : ", round(av_SFNR, 1), "\n", sep = "")
line8 <- paste("RDC : ", round(RDC, 2), "\n", sep = "")
line9 <- paste("TC outlier : ", round(max_tc_outlier, 2), "\n", sep = "")
line10 <- paste("Spec outlier : ", round(max_spec_outlier, 2), "\n", sep = "")
if (verbose) {
cat("\nQA metrics\n")
cat("----------\n")
cat(line1)
cat(line2)
cat(line3)
cat(line4)
cat(line5)
cat(line6)
cat(line7)
cat(line8)
cat(line9)
cat(line10)
}
if (is.null(plot_title)) plot_title <- NA
results_tab <- data.frame(data_file, title = plot_title,
mean_signal = mean_sig_intensity, std = sd_roi,
percent_fluc = percent_fluc, drift = percent_drift,
drift_fit = percent_drift_fit, snr = SNR,
sfnr = av_SFNR, rdc = RDC, tc_outlier = max_tc_outlier,
spec_outlier = max_spec_outlier)
if (gen_res_csv) {
write.csv(results_tab, csv_file, row.names = FALSE)
}
if (gen_spec_csv) {
spec_out <- data.frame(t(spec))
colnames(spec_out) <- freq
spec_out <- cbind(data.frame(data_file, title = plot_title), spec_out)
write.csv(spec_out, spec_file, row.names = FALSE)
}
# plotting stuff below
if (gen_pdf | gen_png) {
# spike detection plot
if (spike_detect) {
cat("\nCalculating k-space spike detection map...\n")
# calc diff volumes
diff_vols <- apply(data[,,,(skip + 1):N, drop = FALSE], c(1,2,3), diff)
diff_vols <- aperm(diff_vols, c(2,3,4,1))
# transform all slices into k-space
diff_vols_fft <- apply(diff_vols, c(3,4), fft)
dim(diff_vols_fft) <- dim(diff_vols)
# calc the maximum slice projection in k-space
max_slice_proj <- apply(abs(diff_vols_fft), c(1,2), max)
max_slice_proj <- apply(apply(max_slice_proj, 1, fftshift), 1,
fftshift)
#max_z <- max(max_slice_proj) / 4
max_z <- mad(max_slice_proj) * 8 + median(max_slice_proj)
max_slice_proj <- ifelse(max_slice_proj > max_z, max_z, max_slice_proj)
max_slice_proj_plot <- ggplot(melt(max_slice_proj), aes(Var1, Var2, fill = value)) +
geom_raster(interpolate = TRUE) +
scale_fill_gradientn(colours = image_cols) +
coord_fixed(ratio = 1) + labs(x = "",y = "", fill = "Intensity",
title = "Max. proj. of k-space slice differences") +
scale_x_continuous(expand = c(0, 0)) + scale_y_continuous(expand = c(0, 0))
}
theme_set(theme_bw())
marg <- theme(plot.margin = unit(c(0.5,0.5,0.5,0.5), "cm"))
raw_text <- paste(line1, line2, line3, line4, line5, line6, line7, line8, line9, line10,
sep = "")
text <- textGrob(raw_text, x = 0.2, just = 0, gp = gpar(fontfamily = "mono", fontsize = 14))
# these are to appease R checks
Measured <- NULL
Theoretical <- NULL
group <- NULL
roi_width_vec <- NULL
fit <- NULL
tc <- NULL
Var1 <- NULL
Var2 <- NULL
value <- NULL
# RDC plot
rdc_df <- data.frame(roi_width_vec = 1:roi_width, Theoretical = CV_ideal, Measured = CV)
rdc_df <- gather(rdc_df, group, CV, c(Measured, Theoretical))
rdc_plot <- ggplot(rdc_df, aes(x = roi_width_vec, y = CV, colour = group)) + geom_line() +
geom_point() + scale_x_log10(limits = c(1,100)) +
scale_y_log10(limits = c(0.01,10), breaks = c(0.01,0.1,1,10)) +
labs(y = "100*CV", x = "ROI width (pixels)", title = "RDC plot") + marg +
theme(legend.position = c(0.8, 0.8)) + scale_color_manual(values = c("black","red"))
tc_fit <- data.frame(t = vols, tc = mean_sig_intensity_t, fit = y_fit)
tc_plot <- ggplot(tc_fit, aes(t)) + geom_line(aes(y = tc)) +
geom_line(aes(y = fit), color = "red") +
theme(legend.position = "none") +
labs(y = "Intensity (a.u.)", x = "Time (volumes)",
title = "Intensity drift plot") + marg
spec_plot <- qplot(freq, spec, xlab = "Frequency (Hz)",
ylab = "Intensity (a.u.)", geom = "line",
main = "Fluctuation spectrum") + marg
x_st = ROI_x[1]
x_end = ROI_x[length(ROI_x)]
y_st = ROI_y[1]
y_end = ROI_y[length(ROI_y)]
lcol <- "white"
roi_a <- geom_segment(aes(x = x_st, xend = x_st, y = y_st, yend = y_end),
colour = lcol)
roi_b <- geom_segment(aes(x = x_end, xend = x_end, y = y_st, yend = y_end),
colour = lcol)
roi_c <- geom_segment(aes(x = x_st, xend = x_end, y = y_st, yend = y_st),
colour = lcol)
roi_d <- geom_segment(aes(x = x_st, xend = x_end, y = y_end, yend = y_end),
colour = lcol)
top_val <- quantile(SFNR_full,0.999)
SFNR_full <- ifelse(SFNR_full > top_val, top_val, SFNR_full)
sfnr_plot <- ggplot(melt(SFNR_full), aes(Var1, Var2, fill = value)) +
geom_raster(interpolate = TRUE) +
scale_fill_gradientn(colours = image_cols) +
coord_fixed(ratio = 1) + labs(x = "", y = "", fill = "Intensity",
title = "SFNR image") +
marg + roi_a + roi_b + roi_c + roi_d +
scale_x_continuous(expand = c(0, 0)) + scale_y_continuous(expand = c(0, 0))
# useful for checking where the ROI really is
# av_image[ROI_x,ROI_y] = 0
av_plot <- ggplot(melt(av_image), aes(Var1, Var2, fill = value)) +
geom_raster(interpolate = TRUE) +
scale_fill_gradientn(colours = image_cols) +
coord_fixed(ratio = 1) + labs(x = "",y = "", fill = "Intensity",
title = "Mean image") +
marg + roi_a + roi_b + roi_c + roi_d +
scale_x_continuous(expand = c(0, 0)) + scale_y_continuous(expand = c(0, 0))
diff_plot <- ggplot(melt(DIFF), aes(Var1, Var2, fill = value)) +
geom_raster(interpolate = TRUE) +
scale_fill_gradientn(colours = image_cols) +
coord_fixed(ratio = 1) + labs(x = "",y = "", fill = "Intensity",
title = "Odd-even difference") +
marg + roi_a + roi_b + roi_c + roi_d +
scale_x_continuous(expand = c(0, 0)) + scale_y_continuous(expand = c(0, 0))
tfn_plot <- ggplot(melt(TFN), aes(Var1, Var2, fill = value)) +
geom_raster(interpolate = TRUE) +
scale_fill_gradientn(colours = image_cols) +
coord_fixed(ratio = 1) +
labs(x = "", y = "", fill = "Intensity",
title = "Temporal fluctuation noise") +
marg + roi_a + roi_b + roi_c + roi_d +
scale_x_continuous(expand = c(0, 0)) + scale_y_continuous(expand = c(0, 0))
slice_tc_plot <- ggplot(melt(slice_tc_dt), aes(x = Var1 + skip, y = value, group = Var2)) +
geom_line(alpha = 0.5) +
labs(x = "Time (volumes)", y = "Intensity (a.u.)",
title = "Mean slice TCs (detrended)")
if (is.na(plot_title)) {
title <- NULL
} else {
title <- textGrob(plot_title, gp = gpar(fontsize = 25))
}
if (gen_pdf) {
pdf(pdf_file, height = 10, width = 16)
if (spike_detect) {
lay <- rbind(c(1,5,6,9),
c(4,2,2,3),
c(7,8,8,10))
grid.arrange(text, tc_plot, spec_plot, av_plot, diff_plot, sfnr_plot, tfn_plot,
slice_tc_plot, rdc_plot, max_slice_proj_plot, layout_matrix = lay,
top = title)
} else {
lay <- rbind(c(1,5,6,9),
c(4,2,2,3),
c(7,8,8,8))
grid.arrange(text, tc_plot, spec_plot, av_plot, diff_plot, sfnr_plot, tfn_plot,
slice_tc_plot, rdc_plot, layout_matrix = lay,
top = title)
}
graphics.off()
}
if (gen_png) {
png(png_file, height = 800, width = 1200, type = "cairo")
if (spike_detect) {
lay <- rbind(c(1,5,6,9),
c(4,2,2,3),
c(7,8,8,10))
grid.arrange(text, tc_plot, spec_plot, av_plot, diff_plot, sfnr_plot, tfn_plot,
slice_tc_plot, rdc_plot, max_slice_proj_plot, layout_matrix = lay,
top = title)
} else {
lay <- rbind(c(1,5,6,9),
c(4,2,2,3),
c(7,8,8,8))
grid.arrange(text, tc_plot, spec_plot, av_plot, diff_plot, sfnr_plot, tfn_plot,
slice_tc_plot, rdc_plot, layout_matrix = lay,
top = title)
}
graphics.off()
}
}
# end of plotting
if (verbose) {
if (gen_pdf) cat(paste("\nPDF report : ", pdf_file, sep = ""))
if (gen_spec_csv) cat(paste("\nCSV spec file : ", spec_file, sep = ""))
if (gen_png) cat(paste("\nPNG report : ", png_file, sep = ""))
if (gen_res_csv) cat(paste("\nCSV results : ", csv_file, "\n\n", sep = ""))
}
results_tab
}
#' @import RcppEigen
detrend_fast <- function(y, X) {
fastLmPure(y = y, X = X)$residual
}
get_pixel_range <- function(center, width) {
ROI_half_start <- floor(width / 2)
ROI_half_end <- ceiling(width / 2)
start <- floor(center - ROI_half_start)
end <- floor(center + ROI_half_end) - 1
start:end
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/R6Classes_H5R.R
\name{format.H5R}
\alias{format.H5R}
\title{Formatting of an H5R object}
\usage{
\method{format}{H5R}(x, ...)
}
\arguments{
\item{x}{The object to format}
\item{...}{ignored}
}
\value{
Character vector with the class names in angle-brackets
}
\description{
Formatting of an H5R object
}
\details{
Formatting of H5R objects
}
\author{
Holger Hoefling
}
| /man/format.H5R.Rd | permissive | Novartis/hdf5r | R | false | true | 448 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/R6Classes_H5R.R
\name{format.H5R}
\alias{format.H5R}
\title{Formatting of an H5R object}
\usage{
\method{format}{H5R}(x, ...)
}
\arguments{
\item{x}{The object to format}
\item{...}{ignored}
}
\value{
Character vector with the class names in angle-brackets
}
\description{
Formatting of an H5R object
}
\details{
Formatting of H5R objects
}
\author{
Holger Hoefling
}
|
# Map-related observers
# observe if show/hide flammability mask is checked
observeEvent(input$flammable, {
proxy <- leafletProxy("Map")
if(!input$flammable){
proxy %>% hideGroup("flammable")
} else { # hiding and re-showing all layers maintains necessary layer order for map clicks
proxy %>% hideGroup("selected") %>% hideGroup("not_selected") %>%
showGroup("flammable") %>% showGroup("not_selected") %>% showGroup("selected")
}
})
# observe region selectInput and update map polygons
observeEvent(input$regions, {
x <- input$regions
if(is.null(x) || x[1]!="AK"){
proxy <- leafletProxy("Map")
not_selected <- setdiff(rv$regions, x)
if(length(not_selected)) walk(not_selected, ~proxy %>% removeShape(layerId=paste0("selected_", .x)))
walk(x, ~proxy %>%
addPolygons(data=subset(rv$shp, REGION==.x),
stroke=TRUE, fillOpacity=0.2, weight=1, group="selected", layerId=paste0("selected_", .x)))
}
}, ignoreNULL=FALSE)
# observe map shape click and add or remove selected polygons and update region selectInput
observeEvent(input$Map_shape_click, {
p <- input$Map_shape_click$id
x <- input$regions
if(is.null(x) || x[1]!="AK"){
p1 <- strsplit(p, "_")[[1]][2]
proxy <- leafletProxy("Map")
if(substr(p, 1, 9)=="selected_"){
proxy %>% removeShape(layerId=p)
} else {
proxy %>% addPolygons(data=subset(rv$shp, REGION==p), stroke=TRUE, fillOpacity=0.2, weight=1,
group="selected", layerId=paste0("selected_", p))
}
if(!is.null(p)){
if(is.na(p1) && (is.null(x) || !p %in% x)){
updateSelectInput(session, "regions", selected=c(x, p))
} else if(!is.na(p1) && p1 %in% x){
updateSelectInput(session, "regions", selected=x[x!=p1])
}
}
}
})
| /jfsp-archive/other_example_apps/jfsp-dev-aws/observers.R | no_license | ua-snap/snap-r-tools | R | false | false | 1,791 | r | # Map-related observers
# observe if show/hide flammability mask is checked
observeEvent(input$flammable, {
proxy <- leafletProxy("Map")
if(!input$flammable){
proxy %>% hideGroup("flammable")
} else { # hiding and re-showing all layers maintains necessary layer order for map clicks
proxy %>% hideGroup("selected") %>% hideGroup("not_selected") %>%
showGroup("flammable") %>% showGroup("not_selected") %>% showGroup("selected")
}
})
# observe region selectInput and update map polygons
observeEvent(input$regions, {
x <- input$regions
if(is.null(x) || x[1]!="AK"){
proxy <- leafletProxy("Map")
not_selected <- setdiff(rv$regions, x)
if(length(not_selected)) walk(not_selected, ~proxy %>% removeShape(layerId=paste0("selected_", .x)))
walk(x, ~proxy %>%
addPolygons(data=subset(rv$shp, REGION==.x),
stroke=TRUE, fillOpacity=0.2, weight=1, group="selected", layerId=paste0("selected_", .x)))
}
}, ignoreNULL=FALSE)
# observe map shape click and add or remove selected polygons and update region selectInput
observeEvent(input$Map_shape_click, {
p <- input$Map_shape_click$id
x <- input$regions
if(is.null(x) || x[1]!="AK"){
p1 <- strsplit(p, "_")[[1]][2]
proxy <- leafletProxy("Map")
if(substr(p, 1, 9)=="selected_"){
proxy %>% removeShape(layerId=p)
} else {
proxy %>% addPolygons(data=subset(rv$shp, REGION==p), stroke=TRUE, fillOpacity=0.2, weight=1,
group="selected", layerId=paste0("selected_", p))
}
if(!is.null(p)){
if(is.na(p1) && (is.null(x) || !p %in% x)){
updateSelectInput(session, "regions", selected=c(x, p))
} else if(!is.na(p1) && p1 %in% x){
updateSelectInput(session, "regions", selected=x[x!=p1])
}
}
}
})
|
## Put comments here that give an overall description of what your
## functions do
# The function 'makeCacheMatrix(x)' creates a 'spcecial matrix' that stores
# the matrix 'x' (numeric vector) and its inverse 'inv'
# The input is the matrix 'x' and the output is a list of
# 4 functions: set, get, setinversa and getinversa
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function () x
setinversa <- function(solve) inv <<- solve
getinversa <- function() inv
list(set = set,
get = get,
setinversa = setinversa,
getinversa = getinversa)
}
# The function 'cacheSolve' provides the inverse matrix of 'x'
# If the inverse has already been stored, the function retrieves it
# (from the output of 'makeCacheSolve'), otherwise it is computed
# The input of cacheSolve is the output of makeCacheSolve (list of 4 functions)
# The output is the inverse matrix of 'x', providing it exists
# If the inverse exists, the function displays a message
cacheSolve <- function(x, ...) {
inv <- x$getinversa()
if (!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinversa(inv)
inv
}
| /cachematrix.R | no_license | montselopezcobo/ProgrammingAssignment2 | R | false | false | 1,358 | r | ## Put comments here that give an overall description of what your
## functions do
# The function 'makeCacheMatrix(x)' creates a 'spcecial matrix' that stores
# the matrix 'x' (numeric vector) and its inverse 'inv'
# The input is the matrix 'x' and the output is a list of
# 4 functions: set, get, setinversa and getinversa
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function () x
setinversa <- function(solve) inv <<- solve
getinversa <- function() inv
list(set = set,
get = get,
setinversa = setinversa,
getinversa = getinversa)
}
# The function 'cacheSolve' provides the inverse matrix of 'x'
# If the inverse has already been stored, the function retrieves it
# (from the output of 'makeCacheSolve'), otherwise it is computed
# The input of cacheSolve is the output of makeCacheSolve (list of 4 functions)
# The output is the inverse matrix of 'x', providing it exists
# If the inverse exists, the function displays a message
cacheSolve <- function(x, ...) {
inv <- x$getinversa()
if (!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinversa(inv)
inv
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-cohen_exercise.R, R/data-grades.R
\docType{data}
\name{grades}
\alias{grades}
\alias{cohen_exercise}
\title{Data from Cohen, Cohen, West, and Aiken (2003) Chapter 7 (mcfanda GitHub)}
\format{
A data frame of fake data with 245 rows and 4 variables:
\describe{
\item{id}{Participant identification number}
\item{age}{Participant age in years}
\item{exercise}{Number of years of excercise}
\item{endurance}{Physical endurance}
}
A data frame of fake data with 100 rows and 3 variables:
\describe{
\item{anxiety}{Anxiety rating}
\item{preparation}{Preparation rating}
\item{exam}{Exam grade}
}
}
\source{
\url{https://github.com/mcfanda/gamlj_docs/blob/master/data/exercise.csv}
}
\usage{
data(cohen_exercise)
data(grades)
}
\description{
A data set from Cohen, Cohen, West, and Aiken (2003), Chapter 7
}
\references{
Cohen, J., Cohen, P., West, S. G., & Aiken, L. S. (2003). Applied multiple regression. Correlation Analysis for the Behavioral Sciences.
}
\keyword{datasets}
| /man/grades.Rd | no_license | dstanley4/fastInteraction | R | false | true | 1,072 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-cohen_exercise.R, R/data-grades.R
\docType{data}
\name{grades}
\alias{grades}
\alias{cohen_exercise}
\title{Data from Cohen, Cohen, West, and Aiken (2003) Chapter 7 (mcfanda GitHub)}
\format{
A data frame of fake data with 245 rows and 4 variables:
\describe{
\item{id}{Participant identification number}
\item{age}{Participant age in years}
\item{exercise}{Number of years of excercise}
\item{endurance}{Physical endurance}
}
A data frame of fake data with 100 rows and 3 variables:
\describe{
\item{anxiety}{Anxiety rating}
\item{preparation}{Preparation rating}
\item{exam}{Exam grade}
}
}
\source{
\url{https://github.com/mcfanda/gamlj_docs/blob/master/data/exercise.csv}
}
\usage{
data(cohen_exercise)
data(grades)
}
\description{
A data set from Cohen, Cohen, West, and Aiken (2003), Chapter 7
}
\references{
Cohen, J., Cohen, P., West, S. G., & Aiken, L. S. (2003). Applied multiple regression. Correlation Analysis for the Behavioral Sciences.
}
\keyword{datasets}
|
# Required libraries
library(lattice)
library(foreign)
library(MASS)
library(strucchange)
require(stats)
require(stats4)
library(KernSmooth)
library(fastICA)
library(cluster)
library(leaps)
library(mgcv)
library(rpart)
library(pan)
library(mgcv)
library(DAAG)
library(TTR)
library(tis)
library(xts)
library(forecast)
library(stats)
library(TSA)
library(timeSeries)
library(fUnitRoots)
library(fBasics)
library(tseries)
library(timsac)
library(TTR)
library(fpp)
library(seasonal)
library(strucchange)
library(dplyr)
library(vars)
setwd('C:/Users/arra1/Desktop/unemployment_forecast')
input1 = read.csv('CAURN.csv')
input2 = read.csv('NYURN.csv')
# Set up abstractions
time_series1 = data.frame(index = input1$DATE)
time_series1$val = input1$CAURN
time_series2 = data.frame(index = input2$DATE)
time_series2$val = input2$NYURN
# Plot time series, acf, pacf
# California
plot(time_series1$val, type = "l", ylab = "CA Unemployment Rate (%)", xlab = "Date", col = "blue")
acf(time_series1$val, main = "CA Unemployment % ACF")
pacf(time_series1$val, main = "CA Unemployment % PACF")
# New York
plot(time_series2$val, type = "l", ylab = "NY Unemployment Rate (%)", xlab = "Date", col = "blue")
acf(time_series2$val, main = "NY Unemployment % ACF")
pacf(time_series2$val, main = "NY Unemployment % PACF")
unemployment_ca = ts(time_series1$val)
unemployment_ny = ts(time_series2$val)
# ARIMA (1,0,0), (0,0,12) models
model1_ca = Arima(unemployment_ca, order=c(1,0,0), seasonal=c(0,0,12))
model1_ny = Arima(unemployment_ny, order=c(1,0,0), seasonal=c(0,0,12))
summary(model1_ca)
summary(model1_ny)
# Model fits
plot(unemployment_ca, type = "l", col = "green")
lines(model1_ca$fitted, col = "red")
plot(unemployment_ny, type = "l", col = "green")
lines(model1_ny$fitted, col = "red")
# Model residuals
plot(model1_ca$residuals)
plot(model1_ny$residuals)
# Residual ACF & PACF
acf(model1_ca$residuals, main = "CA Residuals ACF")
pacf(model1_ca$residuals,main = "CA Residuals PACF")
# Recursive residuals
plot(recresid(model1_ca$residuals~ 1),type = "l",main = "CA Resursive Residuals", ylab = "Residuals")
plot(recresid(model1_ny$residuals~ 1),type = "l",main = "NY Resursive Residuals",ylab = "Residuals")
# CUSUM
plot(efp(model1_ca$residuals ~ 1, type = "Rec-CUSUM"),main = "CA Recursive CUSUM")
plot(efp(model1_ny$residuals ~ 1, type = "Rec-CUSUM"), main = "NY Recursive CUSUM")
# Model forecasts
plot(forecast(model1_ca, h = 12), main = "CA Seasonal ARIMA Forecast")
plot(forecast(model1_ny, h = 12), main = "NY Seasonal ARIMA Forecast")
# Cross-correlation function
ccf(unemployment_ca, unemployment_ny, main = "CA & NY Unemployment Cross-Correlation")
# VAR Model
unemployment_tot = cbind(cbind(unemployment_ca, unemployment_ny))
var_model = VAR(unemployment_tot,p=6)
summary(var_model)
plot(var_model)
par(mfrow=c(2,1))
acf(residuals(var_model)[,1])
pacf(residuals(var_model)[,1])
# Impulse Response Function
irf(var_model)
plot(irf(var_model, n.ahead=36), main = "IRF")
# Recursive CUSUM of VAR model
plot(stability(var_model, type = "Rec-CUSUM"), plot.type="single")
# Granger-Causality test
grangertest(unemployment_ca ~ unemployment_ny, order = 6)
grangertest(unemployment_ny ~ unemployment_ca, order = 6)
#12-step ahead forecast
var.predict = predict(object=var_model, n.ahead=12)
plot(var.predict)
| /main.R | no_license | arra1997/unemployment_forecast | R | false | false | 3,451 | r | # Required libraries
library(lattice)
library(foreign)
library(MASS)
library(strucchange)
require(stats)
require(stats4)
library(KernSmooth)
library(fastICA)
library(cluster)
library(leaps)
library(mgcv)
library(rpart)
library(pan)
library(mgcv)
library(DAAG)
library(TTR)
library(tis)
library(xts)
library(forecast)
library(stats)
library(TSA)
library(timeSeries)
library(fUnitRoots)
library(fBasics)
library(tseries)
library(timsac)
library(TTR)
library(fpp)
library(seasonal)
library(strucchange)
library(dplyr)
library(vars)
setwd('C:/Users/arra1/Desktop/unemployment_forecast')
input1 = read.csv('CAURN.csv')
input2 = read.csv('NYURN.csv')
# Set up abstractions
time_series1 = data.frame(index = input1$DATE)
time_series1$val = input1$CAURN
time_series2 = data.frame(index = input2$DATE)
time_series2$val = input2$NYURN
# Plot time series, acf, pacf
# California
plot(time_series1$val, type = "l", ylab = "CA Unemployment Rate (%)", xlab = "Date", col = "blue")
acf(time_series1$val, main = "CA Unemployment % ACF")
pacf(time_series1$val, main = "CA Unemployment % PACF")
# New York
plot(time_series2$val, type = "l", ylab = "NY Unemployment Rate (%)", xlab = "Date", col = "blue")
acf(time_series2$val, main = "NY Unemployment % ACF")
pacf(time_series2$val, main = "NY Unemployment % PACF")
unemployment_ca = ts(time_series1$val)
unemployment_ny = ts(time_series2$val)
# ARIMA (1,0,0), (0,0,12) models
model1_ca = Arima(unemployment_ca, order=c(1,0,0), seasonal=c(0,0,12))
model1_ny = Arima(unemployment_ny, order=c(1,0,0), seasonal=c(0,0,12))
summary(model1_ca)
summary(model1_ny)
# Model fits
plot(unemployment_ca, type = "l", col = "green")
lines(model1_ca$fitted, col = "red")
plot(unemployment_ny, type = "l", col = "green")
lines(model1_ny$fitted, col = "red")
# Model residuals
plot(model1_ca$residuals)
plot(model1_ny$residuals)
# Residual ACF & PACF
acf(model1_ca$residuals, main = "CA Residuals ACF")
pacf(model1_ca$residuals,main = "CA Residuals PACF")
# Recursive residuals
plot(recresid(model1_ca$residuals~ 1),type = "l",main = "CA Resursive Residuals", ylab = "Residuals")
plot(recresid(model1_ny$residuals~ 1),type = "l",main = "NY Resursive Residuals",ylab = "Residuals")
# CUSUM
plot(efp(model1_ca$residuals ~ 1, type = "Rec-CUSUM"),main = "CA Recursive CUSUM")
plot(efp(model1_ny$residuals ~ 1, type = "Rec-CUSUM"), main = "NY Recursive CUSUM")
# Model forecasts
plot(forecast(model1_ca, h = 12), main = "CA Seasonal ARIMA Forecast")
plot(forecast(model1_ny, h = 12), main = "NY Seasonal ARIMA Forecast")
# Cross-correlation function
ccf(unemployment_ca, unemployment_ny, main = "CA & NY Unemployment Cross-Correlation")
# VAR Model
unemployment_tot = cbind(cbind(unemployment_ca, unemployment_ny))
var_model = VAR(unemployment_tot,p=6)
summary(var_model)
plot(var_model)
par(mfrow=c(2,1))
acf(residuals(var_model)[,1])
pacf(residuals(var_model)[,1])
# Impulse Response Function
irf(var_model)
plot(irf(var_model, n.ahead=36), main = "IRF")
# Recursive CUSUM of VAR model
plot(stability(var_model, type = "Rec-CUSUM"), plot.type="single")
# Granger-Causality test
grangertest(unemployment_ca ~ unemployment_ny, order = 6)
grangertest(unemployment_ny ~ unemployment_ca, order = 6)
#12-step ahead forecast
var.predict = predict(object=var_model, n.ahead=12)
plot(var.predict)
|
\name{gkmsvm_trainCV}
\alias{gkmsvm_trainCV}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Training the SVM model, using repeated CV to tune parameter C and plot ROC curves}
\description{Using the kernel matrix created by 'gkmsvm_kernel', this function trains the SVM classifier.
It uses repeated CV to find optimum SVM parameter C. Also generates ROC and PRC curves.}
\usage{gkmsvm_trainCV(kernelfn, posfn, negfn, svmfnprfx=NA,
nCV=5, nrepeat=1, cv=NA, Type="C-svc", C=1, shrinking=FALSE,
showPlots=TRUE, outputPDFfn=NA, outputCVpredfn=NA, outputROCfn=NA, ...)}
\arguments{
\item{kernelfn}{kernel matrix file name}
\item{posfn}{positive sequences file name}
\item{negfn}{negative sequences file name}
\item{svmfnprfx}{(optional) output SVM model file name prefix }
\item{nCV}{(optional) number of CV folds}
\item{nrepeat}{(optional) number of repeated CVs}
\item{cv}{(optional) CV group label. An array of length (npos+nneg), containing CV group number (between 1 an nCV) for each sequence}
\item{Type}{(optional) SVM type (default='C-svc'), see 'kernlab' documentation for more details.}
\item{C}{(optional)a vector of all values of C (SVM parameter) to be tested. (default=1), see 'kernlab' documentation for more details.}
\item{shrinking}{optional: shrinking parameter for kernlab (default=FALSE), see 'kernlab' documentation for more details.}
\item{showPlots}{generate plots (default==TRUE)}
\item{outputPDFfn}{filename for output PDF, default=NA (no PDF output)}
\item{outputCVpredfn}{filename for output cvpred (predicted CV values), default=NA (no output)}
\item{outputROCfn}{filename for output auROC (Area Under an ROC Curve) and auPRC (Area Under the Precision Recall Curve) values, default=NA (no output)}
\item{...}{optional: additional SVM parameters, see 'kernlab' documentation for more details.}
}
\details{Trains SVM classifier and generates two files: [svmfnprfx]_svalpha.out for SVM alphas and the other for the corresponding SV sequences ([svmfnprfx]_svseq.fa) }
\author{Mahmoud Ghandi}
\examples{
#Input file names:
posfn= 'test_positives.fa' #positive set (FASTA format)
negfn= 'test_negatives.fa' #negative set (FASTA format)
testfn= 'test_testset.fa' #test set (FASTA format)
#Output file names:
kernelfn= 'test_kernel.txt' #kernel matrix
svmfnprfx= 'test_svmtrain' #SVM files
outfn = 'output.txt' #output scores for sequences in the test set
# gkmsvm_kernel(posfn, negfn, kernelfn); #computes kernel
# cvres = gkmsvm_trainCV(kernelfn,posfn, negfn, svmfnprfx,
# outputPDFfn='ROC.pdf', outputCVpredfn='cvpred.out');
# #trains SVM, plots ROC and PRC curves, and outputs model predictions.
# gkmsvm_classify(testfn, svmfnprfx, outfn); #scores test sequences
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
%\keyword{gkmsvm_train}
| /fuzzedpackages/gkmSVM/man/gkmsvm_trainCV.Rd | no_license | akhikolla/testpackages | R | false | false | 2,961 | rd | \name{gkmsvm_trainCV}
\alias{gkmsvm_trainCV}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Training the SVM model, using repeated CV to tune parameter C and plot ROC curves}
\description{Using the kernel matrix created by 'gkmsvm_kernel', this function trains the SVM classifier.
It uses repeated CV to find optimum SVM parameter C. Also generates ROC and PRC curves.}
\usage{gkmsvm_trainCV(kernelfn, posfn, negfn, svmfnprfx=NA,
nCV=5, nrepeat=1, cv=NA, Type="C-svc", C=1, shrinking=FALSE,
showPlots=TRUE, outputPDFfn=NA, outputCVpredfn=NA, outputROCfn=NA, ...)}
\arguments{
\item{kernelfn}{kernel matrix file name}
\item{posfn}{positive sequences file name}
\item{negfn}{negative sequences file name}
\item{svmfnprfx}{(optional) output SVM model file name prefix }
\item{nCV}{(optional) number of CV folds}
\item{nrepeat}{(optional) number of repeated CVs}
\item{cv}{(optional) CV group label. An array of length (npos+nneg), containing CV group number (between 1 an nCV) for each sequence}
\item{Type}{(optional) SVM type (default='C-svc'), see 'kernlab' documentation for more details.}
\item{C}{(optional)a vector of all values of C (SVM parameter) to be tested. (default=1), see 'kernlab' documentation for more details.}
\item{shrinking}{optional: shrinking parameter for kernlab (default=FALSE), see 'kernlab' documentation for more details.}
\item{showPlots}{generate plots (default==TRUE)}
\item{outputPDFfn}{filename for output PDF, default=NA (no PDF output)}
\item{outputCVpredfn}{filename for output cvpred (predicted CV values), default=NA (no output)}
\item{outputROCfn}{filename for output auROC (Area Under an ROC Curve) and auPRC (Area Under the Precision Recall Curve) values, default=NA (no output)}
\item{...}{optional: additional SVM parameters, see 'kernlab' documentation for more details.}
}
\details{Trains SVM classifier and generates two files: [svmfnprfx]_svalpha.out for SVM alphas and the other for the corresponding SV sequences ([svmfnprfx]_svseq.fa) }
\author{Mahmoud Ghandi}
\examples{
#Input file names:
posfn= 'test_positives.fa' #positive set (FASTA format)
negfn= 'test_negatives.fa' #negative set (FASTA format)
testfn= 'test_testset.fa' #test set (FASTA format)
#Output file names:
kernelfn= 'test_kernel.txt' #kernel matrix
svmfnprfx= 'test_svmtrain' #SVM files
outfn = 'output.txt' #output scores for sequences in the test set
# gkmsvm_kernel(posfn, negfn, kernelfn); #computes kernel
# cvres = gkmsvm_trainCV(kernelfn,posfn, negfn, svmfnprfx,
# outputPDFfn='ROC.pdf', outputCVpredfn='cvpred.out');
# #trains SVM, plots ROC and PRC curves, and outputs model predictions.
# gkmsvm_classify(testfn, svmfnprfx, outfn); #scores test sequences
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
%\keyword{gkmsvm_train}
|
library(readr)
library(dplyr)
setwd("~/Dropbox/thesis/5_APST/julia_compute_distance/src")
rm(list = ls())
#CREATE A NEW CURRENT VALUE DF THAT GROUPS BY 3DIGIT OCC AND DECADE WITH COUNT, 1DIGIT OCC AND 2DIGIT OCC
#----------------------------------------------------------
df <- read_csv("../input/current_values.csv")[,-1]
#Calculate the decade of each row in the dataframe based on year
df$decade <- cut(df$Year,
breaks = c(1980,1985,1990,1995,2001), labels=c(1980,1985, 1990,1995))
tf <- df %>%
group_by(`3digit occupation`, decade) %>%
summarize(`number_employed`=n(), `1digit occ`=first(`1digit occupation`), `2digit occ`=first(`2digit occupation`))
#CREATE A LOOP FOR EACH TYPE OF PCA AND FOR EACH OF THE 3,4,7 PCS
tf$`1digit occ`[tf$`1digit occ` == 5] <- 6
types <- c("pca","spca")
for (x in types){
for (num in c(3,4,7)){
#READ THE PC
#_____________________________________________________________
filename <- paste("../input/", x, "_by_5yr/", x, "_by_5yr_", toString(num), sep="", collapse=NULL)
print(filename)
kf <- read_csv(filename)[,-1]
names(kf) <- sub("X1_1", "PC1", names(kf))
names(kf) <- sub("X", "PC", names(kf))
#MERGES THE TWO DATAFRAMES
#1. Merge N_jt = # of workers who work in occ j in decade t with PCA output.
#ROW is {occ j} x {decade}
#COL is {PCA 1} x {PCA 2} ..., N_jt
#-------------------------------------------------
df$decade <- as.character(df$decade)
kf$year <- as.character(kf$year)
df <- inner_join(tf, kf, by = c("3digit occupation"="occ1990dd", "decade"="year"))
#GET DISSIMILARITY
#-------------------------------------------------------------
if (num == 3){
#get the weighted mean of the pcs by two digit occ and decade
lf <- df %>%
group_by(`1digit occ`, decade)%>%
summarize(`weighted_pc1`= weighted.mean(`PC1`, `number_employed`), `weighted_pc2`= weighted.mean(`PC2`, `number_employed`), `weighted_pc3`= weighted.mean(`PC3`, `number_employed`))
#Merge the dataframe with the weighted mean pcs of each 2digit occ in each decade w the 3digit occs and pcs
df <- inner_join(df, lf, by = c("1digit occ", "decade"))
#calculate each 3digit occs pc distance from weighted mean pc for their 2 digit occ
df$distance1 <- abs(df$PC1 - df$weighted_pc1)
df$distance2 <- abs(df$PC2 - df$weighted_pc2)
df$distance3 <- abs(df$PC3 - df$weighted_pc3)
df$total_distance <- df$distance1 + df$distance2 + df$distance3
#Get the weighted mean distance for each two digit occ each decade
df <- df %>%
group_by(`1digit occ`, decade) %>%
summarize(mean_distance = weighted.mean(total_distance, number_employed))
}else if (num == 4){
#get the weighted mean of the pcs by two digit occ and decade
lf <- df %>%
group_by(`1digit occ`, decade)%>%
summarize(`weighted_pc1`= weighted.mean(`PC1`, `number_employed`), `weighted_pc2`= weighted.mean(`PC2`, `number_employed`), `weighted_pc3`= weighted.mean(`PC3`, `number_employed`), `weighted_pc4`= weighted.mean(`PC4`, `number_employed`))
#Merge the dataframe with the weighted mean pcs of each 2digit occ in each decade w the 3digit occs and pcs
df <- inner_join(df, lf, by = c("1digit occ", "decade"))
#calculate each 3digit occs pc distance from weighted mean pc for their 2 digit occ
df$distance1 <- abs(df$PC1 - df$weighted_pc1)
df$distance2 <- abs(df$PC2 - df$weighted_pc2)
df$distance3 <- abs(df$PC3 - df$weighted_pc3)
df$distance4 <- abs(df$PC4 - df$weighted_pc4)
df$total_distance <- df$distance1 + df$distance2 + df$distance3 + df$distance4
#Get the weighted mean distance for each two digit occ each decade
df <- df %>%
group_by(`1digit occ`, decade) %>%
summarize(mean_distance = weighted.mean(total_distance, number_employed))
}else{
#get the weighted mean of the pcs by two digit occ and decade
lf <- df %>%
group_by(`1digit occ`, decade)%>%
summarize(`weighted_pc1`= weighted.mean(`PC1`, `number_employed`), `weighted_pc2`= weighted.mean(`PC2`, `number_employed`), `weighted_pc3`= weighted.mean(`PC3`, `number_employed`), `weighted_pc4`= weighted.mean(`PC4`, `number_employed`), `weighted_pc5`= weighted.mean(`PC5`, `number_employed`), `weighted_pc6`= weighted.mean(`PC6`, `number_employed`), `weighted_pc7`= weighted.mean(`PC7`, `number_employed`))
#Merge the dataframe with the weighted mean pcs of each 2digit occ in each decade w the 3digit occs and pcs
df <- inner_join(df, lf, by = c("1digit occ", "decade"))
#calculate each 3digit occs pc distance from weighted mean pc for their 2 digit occ
df$distance1 <- abs(df$PC1 - df$weighted_pc1)
df$distance2 <- abs(df$PC2 - df$weighted_pc2)
df$distance3 <- abs(df$PC3 - df$weighted_pc3)
df$distance4 <- abs(df$PC4 - df$weighted_pc4)
df$distance5 <- abs(df$PC5 - df$weighted_pc5)
df$distance6 <- abs(df$PC6 - df$weighted_pc6)
df$distance7 <- abs(df$PC7 - df$weighted_pc7)
df$total_distance <- df$distance1 + df$distance2 + df$distance3 + df$distance4 + df$distance5 + df$distance6 + df$distance7
#Get the weighted mean distance for each two digit occ each decade
df <- df %>%
group_by(`1digit occ`, decade) %>%
summarize(mean_distance = weighted.mean(total_distance, number_employed))
}
#MERGE WITH TWO DIGIT OCCUPATIONAL CODES
#-------------------------------------------------------------
lf <- read_csv("../input/1digit_occupations.csv")[,-1]
df <- inner_join(df, lf, by = c("1digit occ"="codes"))
df <- df %>%
select(occupation, everything())
#WRITE FILE
#-------------------------------------------
filename <- paste("../output/1digit_5yr/", x, "_distance_", toString(num), ".csv", sep="", collapse=NULL)
write.csv(df, filename)
}
}
| /Compute distance-dissimilarity/src/computational scripts/distance_1digit_5yr.R | no_license | Julia-Susser/Uchicago-Research-Work | R | false | false | 6,021 | r | library(readr)
library(dplyr)
setwd("~/Dropbox/thesis/5_APST/julia_compute_distance/src")
rm(list = ls())
#CREATE A NEW CURRENT VALUE DF THAT GROUPS BY 3DIGIT OCC AND DECADE WITH COUNT, 1DIGIT OCC AND 2DIGIT OCC
#----------------------------------------------------------
df <- read_csv("../input/current_values.csv")[,-1]
#Calculate the decade of each row in the dataframe based on year
df$decade <- cut(df$Year,
breaks = c(1980,1985,1990,1995,2001), labels=c(1980,1985, 1990,1995))
tf <- df %>%
group_by(`3digit occupation`, decade) %>%
summarize(`number_employed`=n(), `1digit occ`=first(`1digit occupation`), `2digit occ`=first(`2digit occupation`))
#CREATE A LOOP FOR EACH TYPE OF PCA AND FOR EACH OF THE 3,4,7 PCS
tf$`1digit occ`[tf$`1digit occ` == 5] <- 6
types <- c("pca","spca")
for (x in types){
for (num in c(3,4,7)){
#READ THE PC
#_____________________________________________________________
filename <- paste("../input/", x, "_by_5yr/", x, "_by_5yr_", toString(num), sep="", collapse=NULL)
print(filename)
kf <- read_csv(filename)[,-1]
names(kf) <- sub("X1_1", "PC1", names(kf))
names(kf) <- sub("X", "PC", names(kf))
#MERGES THE TWO DATAFRAMES
#1. Merge N_jt = # of workers who work in occ j in decade t with PCA output.
#ROW is {occ j} x {decade}
#COL is {PCA 1} x {PCA 2} ..., N_jt
#-------------------------------------------------
df$decade <- as.character(df$decade)
kf$year <- as.character(kf$year)
df <- inner_join(tf, kf, by = c("3digit occupation"="occ1990dd", "decade"="year"))
#GET DISSIMILARITY
#-------------------------------------------------------------
if (num == 3){
#get the weighted mean of the pcs by two digit occ and decade
lf <- df %>%
group_by(`1digit occ`, decade)%>%
summarize(`weighted_pc1`= weighted.mean(`PC1`, `number_employed`), `weighted_pc2`= weighted.mean(`PC2`, `number_employed`), `weighted_pc3`= weighted.mean(`PC3`, `number_employed`))
#Merge the dataframe with the weighted mean pcs of each 2digit occ in each decade w the 3digit occs and pcs
df <- inner_join(df, lf, by = c("1digit occ", "decade"))
#calculate each 3digit occs pc distance from weighted mean pc for their 2 digit occ
df$distance1 <- abs(df$PC1 - df$weighted_pc1)
df$distance2 <- abs(df$PC2 - df$weighted_pc2)
df$distance3 <- abs(df$PC3 - df$weighted_pc3)
df$total_distance <- df$distance1 + df$distance2 + df$distance3
#Get the weighted mean distance for each two digit occ each decade
df <- df %>%
group_by(`1digit occ`, decade) %>%
summarize(mean_distance = weighted.mean(total_distance, number_employed))
}else if (num == 4){
#get the weighted mean of the pcs by two digit occ and decade
lf <- df %>%
group_by(`1digit occ`, decade)%>%
summarize(`weighted_pc1`= weighted.mean(`PC1`, `number_employed`), `weighted_pc2`= weighted.mean(`PC2`, `number_employed`), `weighted_pc3`= weighted.mean(`PC3`, `number_employed`), `weighted_pc4`= weighted.mean(`PC4`, `number_employed`))
#Merge the dataframe with the weighted mean pcs of each 2digit occ in each decade w the 3digit occs and pcs
df <- inner_join(df, lf, by = c("1digit occ", "decade"))
#calculate each 3digit occs pc distance from weighted mean pc for their 2 digit occ
df$distance1 <- abs(df$PC1 - df$weighted_pc1)
df$distance2 <- abs(df$PC2 - df$weighted_pc2)
df$distance3 <- abs(df$PC3 - df$weighted_pc3)
df$distance4 <- abs(df$PC4 - df$weighted_pc4)
df$total_distance <- df$distance1 + df$distance2 + df$distance3 + df$distance4
#Get the weighted mean distance for each two digit occ each decade
df <- df %>%
group_by(`1digit occ`, decade) %>%
summarize(mean_distance = weighted.mean(total_distance, number_employed))
}else{
#get the weighted mean of the pcs by two digit occ and decade
lf <- df %>%
group_by(`1digit occ`, decade)%>%
summarize(`weighted_pc1`= weighted.mean(`PC1`, `number_employed`), `weighted_pc2`= weighted.mean(`PC2`, `number_employed`), `weighted_pc3`= weighted.mean(`PC3`, `number_employed`), `weighted_pc4`= weighted.mean(`PC4`, `number_employed`), `weighted_pc5`= weighted.mean(`PC5`, `number_employed`), `weighted_pc6`= weighted.mean(`PC6`, `number_employed`), `weighted_pc7`= weighted.mean(`PC7`, `number_employed`))
#Merge the dataframe with the weighted mean pcs of each 2digit occ in each decade w the 3digit occs and pcs
df <- inner_join(df, lf, by = c("1digit occ", "decade"))
#calculate each 3digit occs pc distance from weighted mean pc for their 2 digit occ
df$distance1 <- abs(df$PC1 - df$weighted_pc1)
df$distance2 <- abs(df$PC2 - df$weighted_pc2)
df$distance3 <- abs(df$PC3 - df$weighted_pc3)
df$distance4 <- abs(df$PC4 - df$weighted_pc4)
df$distance5 <- abs(df$PC5 - df$weighted_pc5)
df$distance6 <- abs(df$PC6 - df$weighted_pc6)
df$distance7 <- abs(df$PC7 - df$weighted_pc7)
df$total_distance <- df$distance1 + df$distance2 + df$distance3 + df$distance4 + df$distance5 + df$distance6 + df$distance7
#Get the weighted mean distance for each two digit occ each decade
df <- df %>%
group_by(`1digit occ`, decade) %>%
summarize(mean_distance = weighted.mean(total_distance, number_employed))
}
#MERGE WITH TWO DIGIT OCCUPATIONAL CODES
#-------------------------------------------------------------
lf <- read_csv("../input/1digit_occupations.csv")[,-1]
df <- inner_join(df, lf, by = c("1digit occ"="codes"))
df <- df %>%
select(occupation, everything())
#WRITE FILE
#-------------------------------------------
filename <- paste("../output/1digit_5yr/", x, "_distance_", toString(num), ".csv", sep="", collapse=NULL)
write.csv(df, filename)
}
}
|
#' Get patterns for ambiguous taxa
#'
#' This function stores the regex patterns for ambiguous taxa.
#'
#' @param unknown If \code{TRUE}, include names that suggest they are
#' placeholders for unknown taxa (e.g. "unknown ...").
#' @param uncultured If \code{TRUE}, include names that suggest they are
#' assigned to uncultured organisms (e.g. "uncultured ...").
#' @param regex If \code{TRUE}, includes regex syntax to make matching things like spaces more robust.
#' @param case_variations If \code{TRUE}, include variations of letter case.
#'
#' @export
ambiguous_synonyms <- function(unknown = TRUE, uncultured = TRUE, regex = TRUE, case_variations = FALSE) {
unknown_syns <- c(
'unknown',
'unidentified',
'incertae sedis',
'ambiguous',
'ambiguous taxa',
'unassigned',
'possible',
'putative'
)
uncultured_syns <- c(
'uncultured',
'candidatus',
'metagenome'
)
output <- c()
if (unknown) {
output <- c(output, unknown_syns)
}
if (uncultured) {
output <- c(output, uncultured_syns)
}
if (case_variations) {
output <- c(output,
capitalize(output),
toupper(output))
}
if (regex) {
output <- gsub(output, pattern = ' ', replacement = '[_ -]+')
}
return(output)
}
#' Get patterns for ambiguous taxa
#'
#' This function stores the regex patterns for ambiguous taxa.
#'
#' @param unknown If \code{TRUE}, Remove taxa with names the suggest they are
#' placeholders for unknown taxa (e.g. "unknown ...").
#' @param uncultured If \code{TRUE}, Remove taxa with names the suggest they are
#' assigned to uncultured organisms (e.g. "uncultured ...").
#' @param case_variations If \code{TRUE}, include variations of letter case.
#' @param whole_match If \code{TRUE}, add "^" to front and "$" to the back of each
#' pattern to indicate they are to match whole words.
#' @param name_regex The regex code to match a valid character in a taxon name.
#' For example, "[a-z]" would mean taxon names can only be lower case letters.
#'
#' @keywords internal
ambiguous_patterns <- function(unknown = TRUE, uncultured = TRUE, case_variations = FALSE,
whole_match = FALSE, name_regex = ".") {
# Initialize output vector
output <- paste0(name_regex, "*",
ambiguous_synonyms(unknown = unknown,
uncultured = uncultured,
case_variations = case_variations),
name_regex, "*")
# Add regex code for full matches
if (whole_match) {
output <- paste0("^", output, "$")
}
return(output)
}
#' Find ambiguous taxon names
#'
#' Find taxa with ambiguous names, such as "unknown" or "uncultured".
#'
#' If you encounter a taxon name that represents an ambiguous taxon that is not
#' filtered out by this function, let us know and we will add it.
#'
#' @param taxon_names A \code{\link[taxa]{taxmap}} object
#' @inheritParams ambiguous_patterns
#' @param ignore_case If \code{TRUE}, dont consider the case of the text when
#' determining a match.
#'
#' @return TRUE/FALSE vector corresponding to \code{taxon_names}
#'
#' @examples
#' is_ambiguous(c("unknown", "uncultured", "homo sapiens", "kfdsjfdljsdf"))
#'
#' @export
is_ambiguous <- function(taxon_names, unknown = TRUE, uncultured = TRUE,
name_regex = ".", ignore_case = TRUE) {
# Get patterns to filter out
patterns <- ambiguous_patterns(unknown = unknown, uncultured = uncultured,
name_regex = name_regex)
# Find which taxa to filter out
Reduce(`|`, lapply(patterns, function(x) {
grepl(taxon_names, pattern = x, ignore.case = ignore_case)
}))
}
#' Filter ambiguous taxon names
#'
#' Filter out taxa with ambiguous names, such as "unknown" or "uncultured".
#' NOTE: some parameters of this function are passed to
#' \code{\link[taxa]{filter_taxa}} with the "invert" option set to \code{TRUE}.
#' Works the same way as \code{\link[taxa]{filter_taxa}} for the most part.
#'
#' If you encounter a taxon name that represents an ambiguous taxon that is not
#' filtered out by this function, let us know and we will add it.
#'
#' @param obj A \code{\link[taxa]{taxmap}} object
#' @inheritParams is_ambiguous
#' @inheritParams taxa::filter_taxa
#'
#' @return A \code{\link[taxa]{taxmap}} object
#'
#' @examples
#' obj <- parse_tax_data(c("Plantae;Solanaceae;Solanum;lycopersicum",
#' "Plantae;Solanaceae;Solanum;tuberosum",
#' "Plantae;Solanaceae;Solanum;unknown",
#' "Plantae;Solanaceae;Solanum;uncultured",
#' "Plantae;UNIDENTIFIED"))
#' filter_ambiguous_taxa(obj)
#'
#' @export
filter_ambiguous_taxa <- function(obj, unknown = TRUE, uncultured = TRUE,
name_regex = ".", ignore_case = TRUE,
subtaxa = FALSE, drop_obs = TRUE,
reassign_obs = TRUE, reassign_taxa = TRUE) {
# Identify taxa to filter out
to_remove <- is_ambiguous(obj$taxon_names(), unknown = unknown,
uncultured = uncultured, name_regex = name_regex,
ignore_case = ignore_case)
taxa::filter_taxa(obj, to_remove, invert = TRUE,
subtaxa = subtaxa, drop_obs = drop_obs,
reassign_obs = reassign_obs, reassign_taxa = reassign_taxa)
}
| /metacoder/R/remove_ambiguous.R | permissive | akhikolla/updatedatatype-list3 | R | false | false | 5,506 | r | #' Get patterns for ambiguous taxa
#'
#' This function stores the regex patterns for ambiguous taxa.
#'
#' @param unknown If \code{TRUE}, include names that suggest they are
#' placeholders for unknown taxa (e.g. "unknown ...").
#' @param uncultured If \code{TRUE}, include names that suggest they are
#' assigned to uncultured organisms (e.g. "uncultured ...").
#' @param regex If \code{TRUE}, includes regex syntax to make matching things like spaces more robust.
#' @param case_variations If \code{TRUE}, include variations of letter case.
#'
#' @export
ambiguous_synonyms <- function(unknown = TRUE, uncultured = TRUE, regex = TRUE, case_variations = FALSE) {
unknown_syns <- c(
'unknown',
'unidentified',
'incertae sedis',
'ambiguous',
'ambiguous taxa',
'unassigned',
'possible',
'putative'
)
uncultured_syns <- c(
'uncultured',
'candidatus',
'metagenome'
)
output <- c()
if (unknown) {
output <- c(output, unknown_syns)
}
if (uncultured) {
output <- c(output, uncultured_syns)
}
if (case_variations) {
output <- c(output,
capitalize(output),
toupper(output))
}
if (regex) {
output <- gsub(output, pattern = ' ', replacement = '[_ -]+')
}
return(output)
}
#' Get patterns for ambiguous taxa
#'
#' This function stores the regex patterns for ambiguous taxa.
#'
#' @param unknown If \code{TRUE}, Remove taxa with names the suggest they are
#' placeholders for unknown taxa (e.g. "unknown ...").
#' @param uncultured If \code{TRUE}, Remove taxa with names the suggest they are
#' assigned to uncultured organisms (e.g. "uncultured ...").
#' @param case_variations If \code{TRUE}, include variations of letter case.
#' @param whole_match If \code{TRUE}, add "^" to front and "$" to the back of each
#' pattern to indicate they are to match whole words.
#' @param name_regex The regex code to match a valid character in a taxon name.
#' For example, "[a-z]" would mean taxon names can only be lower case letters.
#'
#' @keywords internal
ambiguous_patterns <- function(unknown = TRUE, uncultured = TRUE, case_variations = FALSE,
whole_match = FALSE, name_regex = ".") {
# Initialize output vector
output <- paste0(name_regex, "*",
ambiguous_synonyms(unknown = unknown,
uncultured = uncultured,
case_variations = case_variations),
name_regex, "*")
# Add regex code for full matches
if (whole_match) {
output <- paste0("^", output, "$")
}
return(output)
}
#' Find ambiguous taxon names
#'
#' Find taxa with ambiguous names, such as "unknown" or "uncultured".
#'
#' If you encounter a taxon name that represents an ambiguous taxon that is not
#' filtered out by this function, let us know and we will add it.
#'
#' @param taxon_names A \code{\link[taxa]{taxmap}} object
#' @inheritParams ambiguous_patterns
#' @param ignore_case If \code{TRUE}, dont consider the case of the text when
#' determining a match.
#'
#' @return TRUE/FALSE vector corresponding to \code{taxon_names}
#'
#' @examples
#' is_ambiguous(c("unknown", "uncultured", "homo sapiens", "kfdsjfdljsdf"))
#'
#' @export
is_ambiguous <- function(taxon_names, unknown = TRUE, uncultured = TRUE,
name_regex = ".", ignore_case = TRUE) {
# Get patterns to filter out
patterns <- ambiguous_patterns(unknown = unknown, uncultured = uncultured,
name_regex = name_regex)
# Find which taxa to filter out
Reduce(`|`, lapply(patterns, function(x) {
grepl(taxon_names, pattern = x, ignore.case = ignore_case)
}))
}
#' Filter ambiguous taxon names
#'
#' Filter out taxa with ambiguous names, such as "unknown" or "uncultured".
#' NOTE: some parameters of this function are passed to
#' \code{\link[taxa]{filter_taxa}} with the "invert" option set to \code{TRUE}.
#' Works the same way as \code{\link[taxa]{filter_taxa}} for the most part.
#'
#' If you encounter a taxon name that represents an ambiguous taxon that is not
#' filtered out by this function, let us know and we will add it.
#'
#' @param obj A \code{\link[taxa]{taxmap}} object
#' @inheritParams is_ambiguous
#' @inheritParams taxa::filter_taxa
#'
#' @return A \code{\link[taxa]{taxmap}} object
#'
#' @examples
#' obj <- parse_tax_data(c("Plantae;Solanaceae;Solanum;lycopersicum",
#' "Plantae;Solanaceae;Solanum;tuberosum",
#' "Plantae;Solanaceae;Solanum;unknown",
#' "Plantae;Solanaceae;Solanum;uncultured",
#' "Plantae;UNIDENTIFIED"))
#' filter_ambiguous_taxa(obj)
#'
#' @export
filter_ambiguous_taxa <- function(obj, unknown = TRUE, uncultured = TRUE,
name_regex = ".", ignore_case = TRUE,
subtaxa = FALSE, drop_obs = TRUE,
reassign_obs = TRUE, reassign_taxa = TRUE) {
# Identify taxa to filter out
to_remove <- is_ambiguous(obj$taxon_names(), unknown = unknown,
uncultured = uncultured, name_regex = name_regex,
ignore_case = ignore_case)
taxa::filter_taxa(obj, to_remove, invert = TRUE,
subtaxa = subtaxa, drop_obs = drop_obs,
reassign_obs = reassign_obs, reassign_taxa = reassign_taxa)
}
|
source("block_functions.R")
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
analysis <- function(input=input,X=X,con=con){
shiny::isolate({
source("block_functions.R")
if (input$spat_cor == "Independent" ){
genfunc <- gen2roi_error_ind
} else if (input$spat_cor == "Exponential" ){
genfunc <- gen2roi_error_exp
} else if (input$spat_cor == "Gaussian" ){
genfunc <- gen2roi_error_gau
} else if (input$spat_cor == "Identical" ){
genfunc <- gen2roi_error_same
}
data <- genfunc(input,X)
data <- normalseries(data)
data_smooth <- spatial_smoothing(data=data, input)
result_mean <- block_mean(data_smooth, X=X, con=con)
result_dw <- block_dw(data,input, X=X, con=con, level=1)
result <- rbind( c(result_mean, "Mean") , c(result_dw, "DW") )
}
)
return(result)
}
output$value <- renderPrint({ print(paste0("Simulation ", input$go)) })
resulttable <- eventReactive(input$go, {
cl <- makeCluster(8, 'PSOCK')
clusterExport(
cl, varlist=c("input","analysis"),
envir=environment())
mat <- parSapply(cl, 1:(input$N.sim*input$N.subj), function(i) {
source("block_functions.R")
analysis(input, X, con)
},simplify = FALSE)
stopCluster(cl)
#mat <- replicate( input$N.sim *input$N.subj , analysis(input, X, con)
# ,simplify=FALSE )
data <- as.data.frame(do.call("rbind", mat))
colnames(data) <- c("roi1", "roi2", "method")
data$roi1 <- as.numeric(as.character(data$roi1))
data$roi2 <- as.numeric(as.character(data$roi2))
meandata <- data[ data$method == "Mean" ,]
dwdata <- data[ data$method == "DW" ,]
errorrate <- function(testdata, Nsimu = input$N.sim, Nsub = input$N.subj){
resultP <- rep(NA,Nsimu)
for (i in 1: Nsimu){
test1 <- t.test( testdata[ (1:Nsub) + (i-1)*Nsub ] )
resultP[i] <- test1$p.value
}
return(resultP)
}
# mean_roi1 <- errorrate(meandata$roi1)
# mean_roi2 <- errorrate(meandata$roi2)
# dw_roi1 <- errorrate(dwdata$roi1)
# dw_roi2 <- errorrate(dwdata$roi2)
meantypeI <- mean(errorrate(meandata$roi2) < 0.05)
meantypeII <- mean(errorrate(meandata$roi1) > 0.05)
dwtypeI <- mean(errorrate(dwdata$roi2) < 0.05)
dwtypeII <- mean(errorrate(dwdata$roi1) > 0.05)
result <- rbind( c(meantypeI , meantypeII ), c(dwtypeI, dwtypeII) )
colnames(result) <- c("Type I Error", "Type II Error")
rownames(result) <- c("Mean-Voxel", "Double-Wavelet")
result
})
output$table <- renderTable({
resulttable()
}, rownames = TRUE)
output$download <- downloadHandler(
filename = function() {
paste0("DW_Block_" ,Sys.Date(), '.html')
},
content = function(file) {
out = render('block.Rmd', clean = TRUE)
file.rename(out, file) # move pdf to file for downloading
},
contentType = 'application/html'
)
rest_analysis <- function(input=input,X=X){
shiny::isolate({
source("block_functions.R")
if (input$spat_cor == "Independent" ){
genfunc <- gen2roi_error_ind
} else if (input$spat_cor == "Exponential" ){
genfunc <- gen2roi_error_exp
} else if (input$spat_cor == "Gaussian" ){
genfunc <- gen2roi_error_gau
} else if (input$spat_cor == "Identical" ){
genfunc <- gen2roi_error_same
}
data <- genfunc(input,X, block=FALSE)
data <- normalseries(data)
data_smooth <- spatial_smoothing(data=data, input)
result_mean <- cor_mean(data_smooth)
result_dw <- cor_dw(data,input)
result <- rbind( c(result_mean, "Mean") , c(result_dw, "DW") )
}
)
return(result)
}
output$rest_value <- renderPrint({ print(paste0("Simulation ", input$rest_go)) })
rest_resultplot <- eventReactive(input$rest_go, {
allcor <- seq( input$rest_correlation[1], input$rest_correlation[2], by =0.1)
result <- matrix(NA, ncol=5, nrow = length(allcor)*2 )
restcount <- 0
for (restcor in allcor){
restinput <- list()
restinput$N.sim <- input$rest_N.sim
restinput$N.dim1 <- input$rest_N.dim1
restinput$N.dim2 <- input$rest_N.dim2
restinput$N.time <- input$rest_N.time
restinput$waveP <- input$rest_waveP
restinput$waveT <- input$rest_waveT
restinput$phi <- input$rest_phi
restinput$spat_cor <- input$rest_spat_cor
restinput$spat_phi <- input$rest_spat_phi
restinput$phi_sigma <- input$rest_phi_sigma
restinput$GauSigma <- input$rest_GauSigma
restinput$correlation <- restcor
restinput$randomsigma <- input$rest_randomsigma
cl <- makeCluster(2, 'PSOCK')
clusterExport(
cl, varlist=c("restinput","rest_analysis"),
envir=environment())
mat <- parSapply(cl, 1:(restinput$N.sim), function(i) {
source("block_functions.R")
rest_analysis(restinput, X)
},simplify = FALSE)
stopCluster(cl)
data <- as.data.frame(do.call("rbind", mat))
colnames(data) <- c("correlation","method")
data$truth <- restcor
data$correlation <- as.numeric(as.character(data$correlation))
meandata <- data[ data$method== "Mean" ,]
dwdata <- data[ data$method== "DW" ,]
restcount <- restcount + 1
result[restcount, 1] <- "Mean"
result[restcount, 2] <- mean(meandata$correlation - meandata$truth)
result[restcount, 3] <- var(meandata$correlation - meandata$truth)
result[restcount, 4] <- mean(meandata$correlation - meandata$truth)^2 +
var(meandata$correlation - meandata$truth)
result[restcount,5] <- restcor
restcount <- restcount + 1
result[restcount, 1] <- "Double-Wavelet"
result[restcount, 2] <- mean(dwdata$correlation - dwdata$truth)
result[restcount, 3] <- var(dwdata$correlation - dwdata$truth)
result[restcount, 4] <- mean(dwdata$correlation - dwdata$truth)^2 +
var(dwdata$correlation - dwdata$truth)
result[restcount,5] <- restcor
plotresult <- as.data.frame(result)
if (sum(is.na(plotresult[,1])) > 0)
plotresult <- plotresult[ - which(is.na(plotresult[,1])) ,]
colnames(plotresult) <- c("Method", "Bias", "Variance", "MSE", "Truth")
plotresult$MSE <- as.numeric(as.character(plotresult$MSE))
plotresult$Truth <- as.numeric(as.character(plotresult$Truth))
p1 <- ggplot(plotresult, aes(Truth, MSE, group=Method, color=Method))+ geom_line() +
ylab("MSE") + geom_point() + xlim(input$rest_correlation)
renderPlot({p1})
}
})
output$rest_plot <- renderUI({
rest_resultplot()
})
output$download_gui <- downloadHandler(
filename <- function() {
paste("dw_gui", "zip", sep=".")
},
content <- function(file) {
file.copy("gui/dw_gui.zip", file)
},
contentType = "application/zip"
)
output$download_subj1 <- downloadHandler(
filename <- function() {
paste("subj1", "nii", sep=".")
},
content <- function(file) {
file.copy("gui/subj1_run1.nii", file)
}
)
})
| /server.R | no_license | chocochun/R_Double_Wavelet | R | false | false | 7,459 | r |
source("block_functions.R")
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
analysis <- function(input=input,X=X,con=con){
shiny::isolate({
source("block_functions.R")
if (input$spat_cor == "Independent" ){
genfunc <- gen2roi_error_ind
} else if (input$spat_cor == "Exponential" ){
genfunc <- gen2roi_error_exp
} else if (input$spat_cor == "Gaussian" ){
genfunc <- gen2roi_error_gau
} else if (input$spat_cor == "Identical" ){
genfunc <- gen2roi_error_same
}
data <- genfunc(input,X)
data <- normalseries(data)
data_smooth <- spatial_smoothing(data=data, input)
result_mean <- block_mean(data_smooth, X=X, con=con)
result_dw <- block_dw(data,input, X=X, con=con, level=1)
result <- rbind( c(result_mean, "Mean") , c(result_dw, "DW") )
}
)
return(result)
}
output$value <- renderPrint({ print(paste0("Simulation ", input$go)) })
resulttable <- eventReactive(input$go, {
cl <- makeCluster(8, 'PSOCK')
clusterExport(
cl, varlist=c("input","analysis"),
envir=environment())
mat <- parSapply(cl, 1:(input$N.sim*input$N.subj), function(i) {
source("block_functions.R")
analysis(input, X, con)
},simplify = FALSE)
stopCluster(cl)
#mat <- replicate( input$N.sim *input$N.subj , analysis(input, X, con)
# ,simplify=FALSE )
data <- as.data.frame(do.call("rbind", mat))
colnames(data) <- c("roi1", "roi2", "method")
data$roi1 <- as.numeric(as.character(data$roi1))
data$roi2 <- as.numeric(as.character(data$roi2))
meandata <- data[ data$method == "Mean" ,]
dwdata <- data[ data$method == "DW" ,]
errorrate <- function(testdata, Nsimu = input$N.sim, Nsub = input$N.subj){
resultP <- rep(NA,Nsimu)
for (i in 1: Nsimu){
test1 <- t.test( testdata[ (1:Nsub) + (i-1)*Nsub ] )
resultP[i] <- test1$p.value
}
return(resultP)
}
# mean_roi1 <- errorrate(meandata$roi1)
# mean_roi2 <- errorrate(meandata$roi2)
# dw_roi1 <- errorrate(dwdata$roi1)
# dw_roi2 <- errorrate(dwdata$roi2)
meantypeI <- mean(errorrate(meandata$roi2) < 0.05)
meantypeII <- mean(errorrate(meandata$roi1) > 0.05)
dwtypeI <- mean(errorrate(dwdata$roi2) < 0.05)
dwtypeII <- mean(errorrate(dwdata$roi1) > 0.05)
result <- rbind( c(meantypeI , meantypeII ), c(dwtypeI, dwtypeII) )
colnames(result) <- c("Type I Error", "Type II Error")
rownames(result) <- c("Mean-Voxel", "Double-Wavelet")
result
})
output$table <- renderTable({
resulttable()
}, rownames = TRUE)
output$download <- downloadHandler(
filename = function() {
paste0("DW_Block_" ,Sys.Date(), '.html')
},
content = function(file) {
out = render('block.Rmd', clean = TRUE)
file.rename(out, file) # move pdf to file for downloading
},
contentType = 'application/html'
)
rest_analysis <- function(input=input,X=X){
shiny::isolate({
source("block_functions.R")
if (input$spat_cor == "Independent" ){
genfunc <- gen2roi_error_ind
} else if (input$spat_cor == "Exponential" ){
genfunc <- gen2roi_error_exp
} else if (input$spat_cor == "Gaussian" ){
genfunc <- gen2roi_error_gau
} else if (input$spat_cor == "Identical" ){
genfunc <- gen2roi_error_same
}
data <- genfunc(input,X, block=FALSE)
data <- normalseries(data)
data_smooth <- spatial_smoothing(data=data, input)
result_mean <- cor_mean(data_smooth)
result_dw <- cor_dw(data,input)
result <- rbind( c(result_mean, "Mean") , c(result_dw, "DW") )
}
)
return(result)
}
output$rest_value <- renderPrint({ print(paste0("Simulation ", input$rest_go)) })
rest_resultplot <- eventReactive(input$rest_go, {
allcor <- seq( input$rest_correlation[1], input$rest_correlation[2], by =0.1)
result <- matrix(NA, ncol=5, nrow = length(allcor)*2 )
restcount <- 0
for (restcor in allcor){
restinput <- list()
restinput$N.sim <- input$rest_N.sim
restinput$N.dim1 <- input$rest_N.dim1
restinput$N.dim2 <- input$rest_N.dim2
restinput$N.time <- input$rest_N.time
restinput$waveP <- input$rest_waveP
restinput$waveT <- input$rest_waveT
restinput$phi <- input$rest_phi
restinput$spat_cor <- input$rest_spat_cor
restinput$spat_phi <- input$rest_spat_phi
restinput$phi_sigma <- input$rest_phi_sigma
restinput$GauSigma <- input$rest_GauSigma
restinput$correlation <- restcor
restinput$randomsigma <- input$rest_randomsigma
cl <- makeCluster(2, 'PSOCK')
clusterExport(
cl, varlist=c("restinput","rest_analysis"),
envir=environment())
mat <- parSapply(cl, 1:(restinput$N.sim), function(i) {
source("block_functions.R")
rest_analysis(restinput, X)
},simplify = FALSE)
stopCluster(cl)
data <- as.data.frame(do.call("rbind", mat))
colnames(data) <- c("correlation","method")
data$truth <- restcor
data$correlation <- as.numeric(as.character(data$correlation))
meandata <- data[ data$method== "Mean" ,]
dwdata <- data[ data$method== "DW" ,]
restcount <- restcount + 1
result[restcount, 1] <- "Mean"
result[restcount, 2] <- mean(meandata$correlation - meandata$truth)
result[restcount, 3] <- var(meandata$correlation - meandata$truth)
result[restcount, 4] <- mean(meandata$correlation - meandata$truth)^2 +
var(meandata$correlation - meandata$truth)
result[restcount,5] <- restcor
restcount <- restcount + 1
result[restcount, 1] <- "Double-Wavelet"
result[restcount, 2] <- mean(dwdata$correlation - dwdata$truth)
result[restcount, 3] <- var(dwdata$correlation - dwdata$truth)
result[restcount, 4] <- mean(dwdata$correlation - dwdata$truth)^2 +
var(dwdata$correlation - dwdata$truth)
result[restcount,5] <- restcor
plotresult <- as.data.frame(result)
if (sum(is.na(plotresult[,1])) > 0)
plotresult <- plotresult[ - which(is.na(plotresult[,1])) ,]
colnames(plotresult) <- c("Method", "Bias", "Variance", "MSE", "Truth")
plotresult$MSE <- as.numeric(as.character(plotresult$MSE))
plotresult$Truth <- as.numeric(as.character(plotresult$Truth))
p1 <- ggplot(plotresult, aes(Truth, MSE, group=Method, color=Method))+ geom_line() +
ylab("MSE") + geom_point() + xlim(input$rest_correlation)
renderPlot({p1})
}
})
output$rest_plot <- renderUI({
rest_resultplot()
})
output$download_gui <- downloadHandler(
filename <- function() {
paste("dw_gui", "zip", sep=".")
},
content <- function(file) {
file.copy("gui/dw_gui.zip", file)
},
contentType = "application/zip"
)
output$download_subj1 <- downloadHandler(
filename <- function() {
paste("subj1", "nii", sep=".")
},
content <- function(file) {
file.copy("gui/subj1_run1.nii", file)
}
)
})
|
# Assignment 1.1: Test Scores
# Name: Harvey, Anna
# Date: 2020 - 06 - 07
# 1. What are the observational units in this study?
# 1. The observational units are course grades and total points earned.
# 2. Identify the variables mentioned in the narrative paragraph and
# determine which are categorical and quantitative?
# 2. The variables are the students in the course and the content taught. The
# students are categorical variables and the grades are quantitative variables.
getwd()
dir()
setwd("/users/Anna/Documents/GitHub/dsc520")
scores_df <- read.csv("data/scores.csv")
# 3. Create one variable to hold a subset of your data set that contains
# only the Regular Section and one variable for the Sports Section.
regular_sec <- scores_df[scores_df$Section == "Regular", ]
regular_sec
sports_sec <- scores_df[scores_df$Section == "Sports", ]
sports_sec
# 4. Use the Plot function to plot each Sections scores and the number of
# students achieving that score. Use additional Plot Arguments to label the graph
# and give each axis an appropriate label.
plot(Score~Count, data = regular_sec, col = "blue", main = "Regular Section Scores",
xlab = "Number of Students")
plot(Score~Count, data = sports_sec, col = "red", main = "Sports Section Scores",
xlab = "Number of Students")
# Once you have produced your Plots answer the following questions:
# a. Comparing and contrasting the point distributions between the two section,
# looking at both tendency and consistency: Can you say that one section
# tended to score more points than the other? Justify and explain your answer.
# Answer: The sports section had slightly more variety in scores than the regular
# section (sports had 19 different scores and regular had 17 different
# scores). It also had a wider range of scores (sports range = 200 - 395;
# regular range = 265 - 380).
# However, the average score for students in the regular section is higher.
# This can be seen in the fact that 260 students scored 300 or higher in
# the regular section, while only 220 students scored 320 or higher in
# the sports section. (Scoring 300-400 points would be the upper quartile
# of the data if we assume the range is 0-400 possible points). (Note that
# there seemed to be duplicate rows in the data for the regular section
# which could cause errors in analysis.)
# b. Did every student in one section score more points than every student in the
# other section? If not, explain what a statistical tendency means in this context.
# Answer: Neither section had every student score more points then every student
# in the other section. In this context, the statistical tendency would most
# likely be the mode (the most frequent score in each section). For the
# sports section, the mode was 285 and 335 (both had 30 students with
# those scores). The regular section mode was 350 (30 students).
# c. What could be one additional variable that was not mentioned in the narrative
# that could be influencing the point distributions between the two sections?
# Answer: Another variable that could be influencing the point distribution may be
# a demographic difference between the two sections. It is stated that the
# sports-themed section was advertised as such to the students prior to
# registration. It is possible that the students who were more likely to
# pick a sports-themed class would score differently on average than their
# their counterparts, particularly when only given sports examples. It is
# also possible that the students who specifically chose NOT to register
# for a sports-themed class would be influenced differently by a more
# diverse teaching method. | /assignments/1.1_TestScores_HarveyAnna/1.1_TestScores_HarveyAnna.R | permissive | anhar421/dsc520 | R | false | false | 3,855 | r | # Assignment 1.1: Test Scores
# Name: Harvey, Anna
# Date: 2020 - 06 - 07
# 1. What are the observational units in this study?
# 1. The observational units are course grades and total points earned.
# 2. Identify the variables mentioned in the narrative paragraph and
# determine which are categorical and quantitative?
# 2. The variables are the students in the course and the content taught. The
# students are categorical variables and the grades are quantitative variables.
getwd()
dir()
setwd("/users/Anna/Documents/GitHub/dsc520")
scores_df <- read.csv("data/scores.csv")
# 3. Create one variable to hold a subset of your data set that contains
# only the Regular Section and one variable for the Sports Section.
regular_sec <- scores_df[scores_df$Section == "Regular", ]
regular_sec
sports_sec <- scores_df[scores_df$Section == "Sports", ]
sports_sec
# 4. Use the Plot function to plot each Sections scores and the number of
# students achieving that score. Use additional Plot Arguments to label the graph
# and give each axis an appropriate label.
plot(Score~Count, data = regular_sec, col = "blue", main = "Regular Section Scores",
xlab = "Number of Students")
plot(Score~Count, data = sports_sec, col = "red", main = "Sports Section Scores",
xlab = "Number of Students")
# Once you have produced your Plots answer the following questions:
# a. Comparing and contrasting the point distributions between the two section,
# looking at both tendency and consistency: Can you say that one section
# tended to score more points than the other? Justify and explain your answer.
# Answer: The sports section had slightly more variety in scores than the regular
# section (sports had 19 different scores and regular had 17 different
# scores). It also had a wider range of scores (sports range = 200 - 395;
# regular range = 265 - 380).
# However, the average score for students in the regular section is higher.
# This can be seen in the fact that 260 students scored 300 or higher in
# the regular section, while only 220 students scored 320 or higher in
# the sports section. (Scoring 300-400 points would be the upper quartile
# of the data if we assume the range is 0-400 possible points). (Note that
# there seemed to be duplicate rows in the data for the regular section
# which could cause errors in analysis.)
# b. Did every student in one section score more points than every student in the
# other section? If not, explain what a statistical tendency means in this context.
# Answer: Neither section had every student score more points then every student
# in the other section. In this context, the statistical tendency would most
# likely be the mode (the most frequent score in each section). For the
# sports section, the mode was 285 and 335 (both had 30 students with
# those scores). The regular section mode was 350 (30 students).
# c. What could be one additional variable that was not mentioned in the narrative
# that could be influencing the point distributions between the two sections?
# Answer: Another variable that could be influencing the point distribution may be
# a demographic difference between the two sections. It is stated that the
# sports-themed section was advertised as such to the students prior to
# registration. It is possible that the students who were more likely to
# pick a sports-themed class would score differently on average than their
# their counterparts, particularly when only given sports examples. It is
# also possible that the students who specifically chose NOT to register
# for a sports-themed class would be influenced differently by a more
# diverse teaching method. |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wordbankr.R
\name{get_administration_data}
\alias{get_administration_data}
\title{Get the Wordbank by-administration data}
\usage{
get_administration_data(language = NULL, form = NULL, filter_age = TRUE,
original_ids = FALSE, mode = "remote")
}
\arguments{
\item{language}{An optional string specifying which language's
administrations to retrieve.}
\item{form}{An optional string specifying which form's administrations to
retrieve.}
\item{filter_age}{A logical indicating whether to filter the administrations
to ones in the valid age range for their instrument}
\item{original_ids}{A logical indicating whether to include the original ids provided
by data contributors. Wordbank provides no guarantees about the structure or
uniqueness of these ids. Use at your own risk!}
\item{mode}{A string indicating connection mode: one of \code{"local"},
or \code{"remote"} (defaults to \code{"remote"})}
}
\value{
A data frame where each row is a CDI administration and each column
is a variable about the administration (\code{data_id}, \code{age},
\code{comprehension}, \code{production}), its instrument (\code{language},
\code{form}), its child (\code{birth_order}, \code{ethnicity}, \code{sex},
\code{mom_ed}), or its dataset source (\code{norming},
\code{longitudinal}). Also includes an \code{original_id} column if the
\code{original_ids} flag is \code{TRUE}.
}
\description{
Get the Wordbank by-administration data
}
\examples{
\dontrun{
english_ws_admins <- get_administration_data("English", "WS")
all_admins <- get_administration_data()
}
}
| /man/get_administration_data.Rd | no_license | muranava/wordbankr | R | false | true | 1,652 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wordbankr.R
\name{get_administration_data}
\alias{get_administration_data}
\title{Get the Wordbank by-administration data}
\usage{
get_administration_data(language = NULL, form = NULL, filter_age = TRUE,
original_ids = FALSE, mode = "remote")
}
\arguments{
\item{language}{An optional string specifying which language's
administrations to retrieve.}
\item{form}{An optional string specifying which form's administrations to
retrieve.}
\item{filter_age}{A logical indicating whether to filter the administrations
to ones in the valid age range for their instrument}
\item{original_ids}{A logical indicating whether to include the original ids provided
by data contributors. Wordbank provides no guarantees about the structure or
uniqueness of these ids. Use at your own risk!}
\item{mode}{A string indicating connection mode: one of \code{"local"},
or \code{"remote"} (defaults to \code{"remote"})}
}
\value{
A data frame where each row is a CDI administration and each column
is a variable about the administration (\code{data_id}, \code{age},
\code{comprehension}, \code{production}), its instrument (\code{language},
\code{form}), its child (\code{birth_order}, \code{ethnicity}, \code{sex},
\code{mom_ed}), or its dataset source (\code{norming},
\code{longitudinal}). Also includes an \code{original_id} column if the
\code{original_ids} flag is \code{TRUE}.
}
\description{
Get the Wordbank by-administration data
}
\examples{
\dontrun{
english_ws_admins <- get_administration_data("English", "WS")
all_admins <- get_administration_data()
}
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/word_length.R
\name{print.word_length}
\alias{print.word_length}
\title{Prints a word_length object}
\usage{
\method{print}{word_length}(x, ...)
}
\arguments{
\item{x}{The word_length object}
\item{\ldots}{ignored}
}
\description{
Prints a word_length object
}
| /man/print.word_length.Rd | no_license | Maddocent/qdap | R | false | false | 349 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/word_length.R
\name{print.word_length}
\alias{print.word_length}
\title{Prints a word_length object}
\usage{
\method{print}{word_length}(x, ...)
}
\arguments{
\item{x}{The word_length object}
\item{\ldots}{ignored}
}
\description{
Prints a word_length object
}
|
#' @title read.nextstrain.json
#' @param x the json tree file of auspice from nextstrain.
#' @return treedata object
#' @export
#' @author Shuangbin Xu
#' @examples
#' file1 <- system.file("extdata/nextstrain.json", "minimal_v2.json", package="treeio")
#' tr <- read.nextstrain.json(file1)
#' tr
read.nextstrain.json <- function(x){
x <- jsonlite::read_json(x)
if (all(c('meta', 'tree') %in% names(x))){
dt <- parser_children(x$tree)
}else{
dt <- parser_children(x)
}
if ('branch.length' %in% colnames(dt)){
rmclnm <- c("parentID", "NodeID", "branch.length")
edgedf <- dt[, rmclnm]
}else{
rmclnm <- c("parentID", "NodeID")
edgedf <- dt[, rmclnm]
}
dd <- as.phylo(edgedf, "branch.length")
dt$label <- as.character(dt$NodeID)
dt <- dt[, !colnames(dt) %in% rmclnm, drop=FALSE]
dd <- dd |> tidytree::as_tibble() |> dplyr::full_join(dt, by='label')
if ("name" %in% colnames(dd)){
dd$label <- dd$name
dd$name <- NULL
}
tr <- dd |> as.treedata()
return(tr)
}
parser_children <- function(x, id=list2env(list(id = 0L)), parent = 1){
id[["id"]] <- id[["id"]] + 1L
id[["data"]][[id[["id"]]]] <- extract_node_attrs(x, id=id[["id"]], isTip=FALSE, parent=parent)
if ('div' %in% colnames(id[['data']][[id[['id']]]])){
parent.index <- id[['data']][[id[['id']]]][['parentID']]
id[['data']][[id[['id']]]][['branch.length']] <- as.numeric(id[['data']][[id[['id']]]][['div']]) -
as.numeric(id[['data']][[parent.index]][['div']])
}
if ('children' %in% names(x)){
lapply(x$children,
parser_children,
id = id,
parent = ifelse(id[['id']]>=2, id[["data"]][[id[["id"]]-1L]][["NodeID"]], 1)
)
}else{
id[["data"]][[id[["id"]]]][["isTip"]] <- TRUE
}
dat <- dplyr::bind_rows(as.list(id[["data"]])) %>% dplyr::mutate_if(check_num, as.numeric)
return(dat)
}
check_num <- function(x){
is_numeric(x) && is.character(x)
}
extract_node_attrs <- function(x, id, isTip, parent){
if ('node_attrs' %in% names(x)){
res <- build_node_attrs(x[['node_attrs']])
}else if('attr' %in% names(x)){
res <- build_node_attrs(x[['attr']])
}else{
res <- data.frame()
}
if ('name' %in% names(x)){
res$name <- x[['name']]
}else if('strain' %in% names(x)){
res$name <- x[['strain']]
}
res$parentID <- parent
res$NodeID <- id
res$isTip <- isTip
return(res)
}
build_node_attrs <- function(x){
x <- unlist(x)
index <- grepl('\\.value$', names(x))
names(x)[index] <- gsub('\\.value$', '', names(x)[index])
x <- tibble::as_tibble(t(x))
return(x)
}
| /R/nextstrain.json.R | no_license | YuLab-SMU/treeio | R | false | false | 2,760 | r | #' @title read.nextstrain.json
#' @param x the json tree file of auspice from nextstrain.
#' @return treedata object
#' @export
#' @author Shuangbin Xu
#' @examples
#' file1 <- system.file("extdata/nextstrain.json", "minimal_v2.json", package="treeio")
#' tr <- read.nextstrain.json(file1)
#' tr
read.nextstrain.json <- function(x){
x <- jsonlite::read_json(x)
if (all(c('meta', 'tree') %in% names(x))){
dt <- parser_children(x$tree)
}else{
dt <- parser_children(x)
}
if ('branch.length' %in% colnames(dt)){
rmclnm <- c("parentID", "NodeID", "branch.length")
edgedf <- dt[, rmclnm]
}else{
rmclnm <- c("parentID", "NodeID")
edgedf <- dt[, rmclnm]
}
dd <- as.phylo(edgedf, "branch.length")
dt$label <- as.character(dt$NodeID)
dt <- dt[, !colnames(dt) %in% rmclnm, drop=FALSE]
dd <- dd |> tidytree::as_tibble() |> dplyr::full_join(dt, by='label')
if ("name" %in% colnames(dd)){
dd$label <- dd$name
dd$name <- NULL
}
tr <- dd |> as.treedata()
return(tr)
}
parser_children <- function(x, id=list2env(list(id = 0L)), parent = 1){
id[["id"]] <- id[["id"]] + 1L
id[["data"]][[id[["id"]]]] <- extract_node_attrs(x, id=id[["id"]], isTip=FALSE, parent=parent)
if ('div' %in% colnames(id[['data']][[id[['id']]]])){
parent.index <- id[['data']][[id[['id']]]][['parentID']]
id[['data']][[id[['id']]]][['branch.length']] <- as.numeric(id[['data']][[id[['id']]]][['div']]) -
as.numeric(id[['data']][[parent.index]][['div']])
}
if ('children' %in% names(x)){
lapply(x$children,
parser_children,
id = id,
parent = ifelse(id[['id']]>=2, id[["data"]][[id[["id"]]-1L]][["NodeID"]], 1)
)
}else{
id[["data"]][[id[["id"]]]][["isTip"]] <- TRUE
}
dat <- dplyr::bind_rows(as.list(id[["data"]])) %>% dplyr::mutate_if(check_num, as.numeric)
return(dat)
}
check_num <- function(x){
is_numeric(x) && is.character(x)
}
extract_node_attrs <- function(x, id, isTip, parent){
if ('node_attrs' %in% names(x)){
res <- build_node_attrs(x[['node_attrs']])
}else if('attr' %in% names(x)){
res <- build_node_attrs(x[['attr']])
}else{
res <- data.frame()
}
if ('name' %in% names(x)){
res$name <- x[['name']]
}else if('strain' %in% names(x)){
res$name <- x[['strain']]
}
res$parentID <- parent
res$NodeID <- id
res$isTip <- isTip
return(res)
}
build_node_attrs <- function(x){
x <- unlist(x)
index <- grepl('\\.value$', names(x))
names(x)[index] <- gsub('\\.value$', '', names(x)[index])
x <- tibble::as_tibble(t(x))
return(x)
}
|
### computation of the power under H_1, for an already known critical value of the test
### i.e. the power does not use the data any more
### here, the functions, which were introduced in CriticalValue.R, are used in a different context
### the power is evaluated for a specific H_1, while the critical value is obtained based on the distribution of the test statistic under H_0
rm(list=ls())
### init.
Delta=0.2;
K=1.1;
sigma=2;
lambda=0.2;
nr=10; #within VW
mystep=0.1;
### functions
fd=function(z)
{out=-z*dnorm(z);
list(out=out)}#
f3=function(z)
{out=(-z^3+3*z)*dnorm(z);
list(out=out)}#
gd=function(z)
{out=(lambda-1)*z*dnorm(z) - (lambda*z/sigma^2)*dnorm(z,sd=sigma);
list(out=out)}#
g3=function(z)
{out= (3*z+(1-lambda)*z^3-3*z*lambda)*dnorm(z) + (3*z*lambda/sigma^4 - z^3*lambda/sigma^6)*dnorm(z,sd=sigma);
list(out=out)}#
F=function(z,delta,kappa)
{out=pnorm(z) + delta *fd(z)$out/2 + kappa * delta^2 * f3(z)$out/24;
list(out=out)}#
G0=function(z)
{out = (1-lambda)*pnorm(z) + lambda*pnorm(z, mean=0, sd=sigma);
list(out=out)}#
G=function(z,delta,kappa)
{out = G0(z)$out + delta *gd(z)$out/2 + kappa * delta^2 * g3(z)$out/24;
if (out>1) print(c("attention",out));
list(out=out)}#
VW=function(z) ###### finding the supremum
{#print("VW");
pv=pw=rep(0,nr+4); #random choices + boundaries
for (i in 1:nr)
{delta=runif(1, min=0, max=Delta);
kappa=runif(1, min=0, max=K);
pv[i]=F(z,delta,kappa)$out;
pw[i]=G(z,delta,kappa)$out;
}#for
pv[nr+1]=F(z,delta=0,kappa=0)$out; pv[nr+2]=F(z,delta=0,kappa=K)$out;
pv[nr+3]=F(z,delta=Delta,kappa=0)$out; pv[nr+4]=F(z,delta=Delta,kappa=K)$out;
pw[nr+1]=G(z,delta=0,kappa=0)$out; pw[nr+2]=G(z,delta=0,kappa=K)$out;
pw[nr+3]=G(z,delta=Delta,kappa=0)$out; pw[nr+4]=G(z,delta=Delta,kappa=K)$out;
vout=max(pv); wout=max(pw); #print(c(z,vout,wout));
list(vout=vout, wout=wout);
}#VW
deriv=function(a,b,myint)
{#derivative of vectors a,b
p=length(a);
derv=derw=rep(0.5,p);
myzero=p/2+0.5; #print(c("zero", myzero));
for (i in 2:(p-1))
derv[i]=(a[i+1]-a[i-1])/(2*mystep);
for (i in 2:(myzero-1))
{#derv[i]=(a[i+1]-a[i-1])/(2*mystep);
derw[i]=(b[i+1]-b[i-1])/(2*mystep);
}
for (i in (myzero+1):(p-1))
{#derv[i]=(a[i+1]-a[i-1])/(2*mystep);
derw[i]=(b[i+1]-b[i-1])/(2*mystep);
}
# print(cbind(a,b,derv,derw,derw/derv));#values of the derivatives
# x11();
# jpeg("U:/obr11.jpg", height=5,width=5,units="in", res=600);
# plot(myint[2:(p-1)],derv[2:(p-1)], xlab=" ", ylab=" ", ylim=c(-0.1,0.5), xlim=c(-5,5)); lines(myint[2:(p-1)],derv[2:(p-1)]);
# plot(myint[2:(myzero-1)],derv[2:(myzero-1)], xlab=" ", ylab=" ", ylim=c(-0.05,1.2), xlim=c(-5,5)); lines(myint[2:(myzero-1)],derv[2:(myzero-1)]);
# points(myint[(myzero+1):(p-1)],derv[(myzero+1):(p-1)], xlab=" ", ylab=" "); lines(myint[(myzero+1):(p-1)],derv[(myzero+1):(p-1)]);
# points(myint[2:(myzero-1)],derw[2:(myzero-1)], col="red"); lines(myint[2:(myzero-1)],derw[2:(myzero-1)], col="red");
# points(myint[(myzero+1):(p-1)],derw[(myzero+1):(p-1)], col="red"); lines(myint[(myzero+1):(p-1)],derw[(myzero+1):(p-1)], col="red");
po=derw/derv;
# points(myint[2:(myzero-1)],po[2:(myzero-1)], col="brown"); lines(myint[2:(myzero-1)], po[2:(myzero-1)], col="brown");
# points(myint[(myzero+1):(p-1)], po[(myzero+1):(p-1)], col="brown"); lines(myint[(myzero+1):(p-1)], po[(myzero+1):(p-1)], col="brown");
# dev.off()
list(x=myint,podil=po)
}#deriv
testh=function(zz,x,po)#technical
{if (x[1]<zz) {lo=which.max(x[x<zz]);}
else {lo=1;}
#print(c(zz,lo,lo+1));
out=po[lo];
#if (abs(lo-zz)>abs((lo+1)-zz))
# out=po[up];#nedef.
#print("testh"); print(c(zz,out));
list(out=out)
}#testh
mytest=function(x,po,p) ### for real data
{#plot(x,po, ylim=c(-0.05,2)); #simplistic, not needed
mydata=myval=rep(0,20); mydata[1:15]=rnorm(15); mydata[16:20]=rnorm(5,mean=0,sd=2.5);
for (i in 1:length(mydata))
myval[i]=testh(mydata[i],x,po)$out;
krith=prod(myval);
#print("in mytest, generated data"); print(mydata);
#print("in mytest, individual likelihood values");
#print(myval);
#print(c("in mytest, critical value", krith));
list(krith=krith)
}#mytest
main=function()
{myint=seq(-5,5,mystep); #=-5:5; #or enumeration
#index of zero: length(myint)/2+0.5;
vv=ww=rep(0,length(myint));
for (i in 1:length(myint))
{rm=VW(myint[i]);
vv[i]=rm$vout;
ww[i]=rm$wout;}
rm=ww[2:length(myint)]-ww[1:(length(myint)-1)]; #must be non-negative
# print(cbind(myint,vv,ww,rm));
# x11();
# jpeg("U:/obr11.jpg", height=5,width=5,units="in", res=600);
# plot(myint, vv, ylim=c(0,1.2), xlab=" ", ylab=" ");
# points(myint, ww, col="red", pch=3);
sm=deriv(vv,ww,myint);
podil=sm$podil; x=sm$x;
sm=mytest(x,podil,length(myint));
# dev.off()
}#main
myrepeat=function(nc)
{out=rep(0,nc);
print(c("i, critical value"));
for (i in 1:nc)
{out[i]=main()$krith;
print(c(i,out[i]));#critical value
}
#plot(out);
list(out=out)
}#myrepeat
### critical value
myrun=function()
{nc=100;
my=opakovat(nc)$out;
plot(sort(my));
print(sum(my>1.29)/nc);
}# myrun() | /Power.R | permissive | jankalinaUI/Likelihood-ratio-testing-under-measurement-errors | R | false | false | 5,221 | r | ### computation of the power under H_1, for an already known critical value of the test
### i.e. the power does not use the data any more
### here, the functions, which were introduced in CriticalValue.R, are used in a different context
### the power is evaluated for a specific H_1, while the critical value is obtained based on the distribution of the test statistic under H_0
rm(list=ls())
### init.
Delta=0.2;
K=1.1;
sigma=2;
lambda=0.2;
nr=10; #within VW
mystep=0.1;
### functions
fd=function(z)
{out=-z*dnorm(z);
list(out=out)}#
f3=function(z)
{out=(-z^3+3*z)*dnorm(z);
list(out=out)}#
gd=function(z)
{out=(lambda-1)*z*dnorm(z) - (lambda*z/sigma^2)*dnorm(z,sd=sigma);
list(out=out)}#
g3=function(z)
{out= (3*z+(1-lambda)*z^3-3*z*lambda)*dnorm(z) + (3*z*lambda/sigma^4 - z^3*lambda/sigma^6)*dnorm(z,sd=sigma);
list(out=out)}#
F=function(z,delta,kappa)
{out=pnorm(z) + delta *fd(z)$out/2 + kappa * delta^2 * f3(z)$out/24;
list(out=out)}#
G0=function(z)
{out = (1-lambda)*pnorm(z) + lambda*pnorm(z, mean=0, sd=sigma);
list(out=out)}#
G=function(z,delta,kappa)
{out = G0(z)$out + delta *gd(z)$out/2 + kappa * delta^2 * g3(z)$out/24;
if (out>1) print(c("attention",out));
list(out=out)}#
VW=function(z) ###### finding the supremum
{#print("VW");
pv=pw=rep(0,nr+4); #random choices + boundaries
for (i in 1:nr)
{delta=runif(1, min=0, max=Delta);
kappa=runif(1, min=0, max=K);
pv[i]=F(z,delta,kappa)$out;
pw[i]=G(z,delta,kappa)$out;
}#for
pv[nr+1]=F(z,delta=0,kappa=0)$out; pv[nr+2]=F(z,delta=0,kappa=K)$out;
pv[nr+3]=F(z,delta=Delta,kappa=0)$out; pv[nr+4]=F(z,delta=Delta,kappa=K)$out;
pw[nr+1]=G(z,delta=0,kappa=0)$out; pw[nr+2]=G(z,delta=0,kappa=K)$out;
pw[nr+3]=G(z,delta=Delta,kappa=0)$out; pw[nr+4]=G(z,delta=Delta,kappa=K)$out;
vout=max(pv); wout=max(pw); #print(c(z,vout,wout));
list(vout=vout, wout=wout);
}#VW
deriv=function(a,b,myint)
{#derivative of vectors a,b
p=length(a);
derv=derw=rep(0.5,p);
myzero=p/2+0.5; #print(c("zero", myzero));
for (i in 2:(p-1))
derv[i]=(a[i+1]-a[i-1])/(2*mystep);
for (i in 2:(myzero-1))
{#derv[i]=(a[i+1]-a[i-1])/(2*mystep);
derw[i]=(b[i+1]-b[i-1])/(2*mystep);
}
for (i in (myzero+1):(p-1))
{#derv[i]=(a[i+1]-a[i-1])/(2*mystep);
derw[i]=(b[i+1]-b[i-1])/(2*mystep);
}
# print(cbind(a,b,derv,derw,derw/derv));#values of the derivatives
# x11();
# jpeg("U:/obr11.jpg", height=5,width=5,units="in", res=600);
# plot(myint[2:(p-1)],derv[2:(p-1)], xlab=" ", ylab=" ", ylim=c(-0.1,0.5), xlim=c(-5,5)); lines(myint[2:(p-1)],derv[2:(p-1)]);
# plot(myint[2:(myzero-1)],derv[2:(myzero-1)], xlab=" ", ylab=" ", ylim=c(-0.05,1.2), xlim=c(-5,5)); lines(myint[2:(myzero-1)],derv[2:(myzero-1)]);
# points(myint[(myzero+1):(p-1)],derv[(myzero+1):(p-1)], xlab=" ", ylab=" "); lines(myint[(myzero+1):(p-1)],derv[(myzero+1):(p-1)]);
# points(myint[2:(myzero-1)],derw[2:(myzero-1)], col="red"); lines(myint[2:(myzero-1)],derw[2:(myzero-1)], col="red");
# points(myint[(myzero+1):(p-1)],derw[(myzero+1):(p-1)], col="red"); lines(myint[(myzero+1):(p-1)],derw[(myzero+1):(p-1)], col="red");
po=derw/derv;
# points(myint[2:(myzero-1)],po[2:(myzero-1)], col="brown"); lines(myint[2:(myzero-1)], po[2:(myzero-1)], col="brown");
# points(myint[(myzero+1):(p-1)], po[(myzero+1):(p-1)], col="brown"); lines(myint[(myzero+1):(p-1)], po[(myzero+1):(p-1)], col="brown");
# dev.off()
list(x=myint,podil=po)
}#deriv
testh=function(zz,x,po)#technical
{if (x[1]<zz) {lo=which.max(x[x<zz]);}
else {lo=1;}
#print(c(zz,lo,lo+1));
out=po[lo];
#if (abs(lo-zz)>abs((lo+1)-zz))
# out=po[up];#nedef.
#print("testh"); print(c(zz,out));
list(out=out)
}#testh
mytest=function(x,po,p) ### for real data
{#plot(x,po, ylim=c(-0.05,2)); #simplistic, not needed
mydata=myval=rep(0,20); mydata[1:15]=rnorm(15); mydata[16:20]=rnorm(5,mean=0,sd=2.5);
for (i in 1:length(mydata))
myval[i]=testh(mydata[i],x,po)$out;
krith=prod(myval);
#print("in mytest, generated data"); print(mydata);
#print("in mytest, individual likelihood values");
#print(myval);
#print(c("in mytest, critical value", krith));
list(krith=krith)
}#mytest
main=function()
{myint=seq(-5,5,mystep); #=-5:5; #or enumeration
#index of zero: length(myint)/2+0.5;
vv=ww=rep(0,length(myint));
for (i in 1:length(myint))
{rm=VW(myint[i]);
vv[i]=rm$vout;
ww[i]=rm$wout;}
rm=ww[2:length(myint)]-ww[1:(length(myint)-1)]; #must be non-negative
# print(cbind(myint,vv,ww,rm));
# x11();
# jpeg("U:/obr11.jpg", height=5,width=5,units="in", res=600);
# plot(myint, vv, ylim=c(0,1.2), xlab=" ", ylab=" ");
# points(myint, ww, col="red", pch=3);
sm=deriv(vv,ww,myint);
podil=sm$podil; x=sm$x;
sm=mytest(x,podil,length(myint));
# dev.off()
}#main
myrepeat=function(nc)
{out=rep(0,nc);
print(c("i, critical value"));
for (i in 1:nc)
{out[i]=main()$krith;
print(c(i,out[i]));#critical value
}
#plot(out);
list(out=out)
}#myrepeat
### critical value
myrun=function()
{nc=100;
my=opakovat(nc)$out;
plot(sort(my));
print(sum(my>1.29)/nc);
}# myrun() |
## Assigment 2 from Coursera R Programming July 2014
## Written by mvelasco (maria.velasco.c@gmail)
## Requierement: This function creates a special "matrix"
## object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
ma <- NULL
set <- function(y) {
x <<- y
ma <<- NULL
}
get <- function() x
setinverse <- function(solve) ma <<- solve
getinverse <- function() ma
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Requirement: this function computes the inverse of the
## special "matrix" returned by makeCacheMatrix above. If the
## inverse has already been calculated (and the matrix has not
## changed), then the cachesolve should retrieve the inverse
## from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
ma <- x$getinverse()
if(!is.null(ma)) {
message("getting cached data")
return(m)
}
data <- x$get()
ma <- solve(data, ...)
ma
}
| /cachematrix.R | no_license | mvelascoc/ProgrammingAssignment2 | R | false | false | 1,060 | r | ## Assigment 2 from Coursera R Programming July 2014
## Written by mvelasco (maria.velasco.c@gmail)
## Requierement: This function creates a special "matrix"
## object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
ma <- NULL
set <- function(y) {
x <<- y
ma <<- NULL
}
get <- function() x
setinverse <- function(solve) ma <<- solve
getinverse <- function() ma
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Requirement: this function computes the inverse of the
## special "matrix" returned by makeCacheMatrix above. If the
## inverse has already been calculated (and the matrix has not
## changed), then the cachesolve should retrieve the inverse
## from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
ma <- x$getinverse()
if(!is.null(ma)) {
message("getting cached data")
return(m)
}
data <- x$get()
ma <- solve(data, ...)
ma
}
|
#' @title att_gt_het2
#'
#'
#' @description \code{att_gt_het2} computes the difference of average treatment effects across two subpopulations
#' in DID setups where there are more than two periods of data and
#' allowing for treatment to occur at different points in time. Here, we assume same trends
#' between the two supopulations. See Marcus ans Sant'Anna (2020) for a detailed description.
#'
#' @param outcome The outcome y (in quotations, always!)
#' @param data The name of the data.frame that contains the data
#' @param tname The name of the column containing the time periods
#' @param idname The individual (cross-sectional unit) id name
#' @param first.treat.name The name of the variable in \code{data} that contains the first
#' period when a particular observation is treated. This should be a positive
#' number for all observations in treated groups. It should be 0 for observations
#' in the untreated group.
#' @param nevertreated Boolean for using the group which is never treated in the sample as the comparison unit. Default is TRUE.
#' @param het The name of the column containing the (binary) categories for heterogeneity
#' @param aggte boolean for whether or not to compute aggregate treatment effect parameters, default TRUE
#' @param maxe maximum values of periods ahead to be computed in event study. Only used if aggte = T.
#' @param mine minimum values of periods ahead to be computed in event study. Only used if aggte = T.
#' @param w The name of the column containing the sampling weights. If not set, all observations have same weight.
#' @param alp the significance level, default is 0.05
#' @param bstrap Boolean for whether or not to compute standard errors using
#' the multiplier boostrap. If standard errors are clustered, then one
#' must set \code{bstrap=TRUE}. Default is \code{TRUE}.
#' @param biters The number of boostrap iterations to use. The default is 1000,
#' and this is only applicable if \code{bstrap=TRUE}.
#' @param clustervars A vector of variables to cluster on. At most, there
#' can be two variables (otherwise will throw an error) and one of these
#' must be the same as idname which allows for clustering at the individual
#' level.
#' @param cband Boolean for whether or not to compute a uniform confidence
#' band that covers all of the group-time average treatment effects
#' with fixed probability \code{1-alp}. The default is \code{TRUE}
#' and the resulting standard errors will be pointwise.
#' @param printdetails Boolean for showing detailed results or not
#'
#' @param method The method for estimating the propensity score when covariates
#' are included (not implemented)
#' @param seedvec Optional value to set random seed; can possibly be used
#' in conjunction with bootstrapping standard errors#' (not implemented)
#' @param pl Boolean for whether or not to use parallel processing (not implemented) is TRUE.
#' @param cores The number of cores to use for parallel processing (not implemented)
#'
#' @references Callaway, Brantly and Sant'Anna, Pedro. "Difference-in-Differences with Multiple Time Periods and an Application on the Minimum Wage and Employment." Working Paper <https://ssrn.com/abstract=3148250> (2018).
#' @return \code{MP} object
#'
#' @export
att_gt_het2 <-function(outcome, data,
tname, idname=NULL,first.treat.name,
nevertreated = T,
het,
aggte=TRUE,
maxe = NULL,
mine = NULL,
w=NULL,
alp=0.05,
bstrap=T, biters=1000, clustervars=NULL,
cband=T,
printdetails=TRUE,
seedvec=NULL, pl=FALSE, cores=2,method="logit")
{
#-------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------
# Data pre-processing and error checking
#-------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------
## make sure that data is a data.frame
df <- data
## make sure that data is a data.frame
## this gets around RStudio's default of reading data as tibble
if (!all( class(df) == "data.frame")) {
#warning("class of data object was not data.frame; converting...")
df <- as.data.frame(df)
}
# weights if null
if(is.character(w)) w <- df[, as.character(w)]
if(is.null(w)) {
w <- as.vector(rep(1, nrow(df)))
} else if(min(w) < 0) stop("'w' must be non-negative")
# het if null
if(is.null(het)) {
stop("Please specifiy 'het'. If het=NULL, use 'att_gt' instead of 'att_gt_het'.")
}
if(is.character(het)) het <- df[, as.character(het)]
het.dim <- length(unique(het))
if(het.dim!=2) {
stop("'het' must be a binary variable.")
}
df$w <- w
df$w1 <- w * (het==1)
df$w0 <- w * (het==0)
df$y <- df[, as.character(outcome)] ##df[,as.character(formula.tools::lhs(formla))]
##figure out the dates and make balanced panel
tlist <- unique(df[,tname])[order(unique(df[,tname]))] ## this is going to be from smallest to largest
flist <- unique(df[,first.treat.name])[order(unique(df[,first.treat.name]))]
# Check if there is a never treated grup
if ( length(flist[flist==0]) == 0) {
if(nevertreated){
stop("It seems you do not have a never-treated group in the data. If you do have a never-treated group in the data, make sure to set data[,first.treat.name] = 0 for the observation in this group. Otherwise, select nevertreated = F so you can use the not-yet treated units as a comparison group.")
} else {
warning("It seems like that there is not a never-treated group in the data. In this case, we cannot identity the ATT(g,t) for the group that is treated las, nor any ATT(g,t) for t higher than or equal to the largest g.\n \nIf you do have a never-treated group in the data, make sure to set data[,first.treat.name] = 0 for the observation in this group.")
# Drop all time periods with time periods >= latest treated
df <- base::subset(df,(df[,tname] < max(flist)))
# Replace last treated time with zero
lines.gmax = df[,first.treat.name]==max(flist)
df[lines.gmax,first.treat.name] <- 0
##figure out the dates
tlist <- unique(df[,tname])[order(unique(df[,tname]))] ## this is going to be from smallest to largest
# Figure out the groups
flist <- unique(df[,first.treat.name])[order(unique(df[,first.treat.name]))]
}
}
# First treated groups
flist <- flist[flist>0]
##################################
## do some error checking
if (!is.numeric(tlist)) {
warning("not guaranteed to order time periods correclty if they are not numeric")
}
## check that first.treat doesn't change across periods for particular individuals
if (!all(sapply( split(df, df[,idname]), function(df) {
length(unique(df[,first.treat.name]))==1
}))) {
stop("Error: the value of first.treat must be the same across all periods for each particular individual.")
}
####################################
# How many time periods
tlen <- length(tlist)
# How many treated groups
flen <- length(flist)
df <- BMisc::makeBalancedPanel(df, idname, tname)
#dta is used to get a matrix of size n (like in cross sectional data)
dta <- df[ df[,tname]==tlist[1], ] ## use this for the influence function
#-------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------
# Compute all ATT(g,t) for each het group
#-------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
# Results for het==1
#----------------------------------------------------------------------------
results_het1 <- compute.att_gt_het2(flen, tlen, flist, tlist, df, dta, first.treat.name,
outcome, tname, idname, method, seedvec,
pl, cores, printdetails, nevertreated, het=1)
fatt_het1 <- results_het1$fatt
inffunc_het1 <- results_het1$inffunc
#----------------------------------------------------------------------------
# Results for het==0
#----------------------------------------------------------------------------
results_het0 <- compute.att_gt_het2(flen, tlen, flist, tlist, df, dta, first.treat.name,
outcome, tname, idname, method, seedvec,
pl, cores, printdetails, nevertreated, het=0)
fatt_het0 <- results_het0$fatt
inffunc_het0 <- results_het0$inffunc
#----------------------------------------------------------------------------
## process the results from computing the spatt
group <- c()
tt <- c()
att_het1 <- c()
att_het0 <- c()
i <- 1
inffunc1_het1 <- matrix(0, ncol=flen*(tlen), nrow=nrow(dta)) ## note, this might not work in unbalanced case
inffunc1_het0 <- matrix(0, ncol=flen*(tlen), nrow=nrow(dta))
for (f in 1:length(flist)) {
for (s in 1:(length(tlist))) {
group[i] <- fatt_het1[[i]]$group
tt[i] <- fatt_het1[[i]]$year
att_het1[i] <- fatt_het1[[i]]$att
att_het0[i] <- fatt_het0[[i]]$att
inffunc1_het1[,i] <- inffunc_het1[f,s,]
inffunc1_het0[,i] <- inffunc_het0[f,s,]
i <- i+1
}
}
# THIS IS ANALOGOUS TO CLUSTER ROBUST STD ERRORS (in our specific setup)
n <- nrow(dta)
V <- NULL
#-------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------
# Compute all summaries of the ATT(g,t)
#-------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------
aggeffects <- NULL
aggeffects_het1 <- NULL
aggeffects_het0 <- NULL
if (aggte) {
aggeffects_het1 <- compute.aggte_het(flist, tlist, group, tt, att_het1, first.treat.name, inffunc1_het1,
n, clustervars, dta, idname, bstrap, biters, alp, maxe, mine, het=1)
aggeffects_het0 <- compute.aggte_het(flist, tlist, group, tt, att_het0, first.treat.name, inffunc1_het0,
n, clustervars, dta, idname, bstrap, biters, alp, maxe, mine,het=0)
aggeffects <- list(simple.att = aggeffects_het1$simple.att - aggeffects_het0$simple.att,
simple.att.inf.func = aggeffects_het1$simple.att.inf.func - aggeffects_het0$simple.att.inf.func,
dynamic.att = aggeffects_het1$dynamic.att - aggeffects_het0$dynamic.att,
dynamic.att.inf.func = aggeffects_het1$dynamic.att.inf.func - aggeffects_het0$dynamic.att.inf.func,
dynamic.att.e = aggeffects_het1$dynamic.att.e - aggeffects_het0$dynamic.att.e,
dyn.inf.func.e = aggeffects_het1$dyn.inf.func.e - aggeffects_het0$dyn.inf.func.e,
e = aggeffects_het1$e
)
getSE_inf <- function(thisinffunc) {
if (bstrap) {
if (idname %in% clustervars) {
clustervars <- clustervars[-which(clustervars==idname)]
}
if (length(clustervars) > 1) {
stop("can't handle that many cluster variables")
}
bout <- lapply(1:biters, FUN=function(b) {
if (length(clustervars) > 0) {
n1 <- length(unique(dta[,clustervars]))
Vb <- matrix(sample(c(-1,1), n1, replace=T))
Vb <- cbind.data.frame(unique(dta[,clustervars]), Vb)
Ub <- data.frame(dta[,clustervars])
Ub <- Vb[match(Ub[,1], Vb[,1]),]
Ub <- Ub[,-1]
} else {
Ub <- sample(c(-1,1), n, replace=T)
}
Rb <- base::mean(Ub*(thisinffunc), na.rm = T)
Rb
})
bres <- as.vector(simplify2array(bout))
bSigma <- (quantile(bres, .75, type=1, na.rm = T) - quantile(bres, .25, type=1, na.rm = T)) /
(qnorm(.75) - qnorm(.25))
return(as.numeric(bSigma))
} else {
return(sqrt( mean( (thisinffunc)^2 ) /n ))
}
}
aggeffects$simple.se <- getSE_inf(as.matrix(aggeffects$simple.att.inf.func))
aggeffects$dynamic.se <- getSE_inf(as.matrix(aggeffects$dynamic.att.inf.func))
aggeffects$dynamic.se.e <- sqrt(colMeans((aggeffects$dyn.inf.func.e)^2)/n)
aggeffects$c.dynamic <- qnorm(1 - alp/2)
# Bootstrap for simulatanerous Conf. Int for the event study
if (bstrap) {
if (idname %in% clustervars) {
clustervars <- clustervars[-which(clustervars==idname)]
}
if (length(clustervars) > 1) {
stop("can't handle that many cluster variables")
}
## new version
bout <- lapply(1:biters, FUN=function(b) {
if (length(clustervars) > 0) {
n1 <- length(unique(dta[,clustervars]))
Vb <- matrix(sample(c(-1,1), n1, replace=T))
Vb <- cbind.data.frame(unique(dta[,clustervars]), Vb)
Ub <- data.frame(dta[,clustervars])
Ub <- Vb[match(Ub[,1], Vb[,1]),]
Ub <- Ub[,-1]
} else {
Ub <- sample(c(-1,1), n, replace=T)
}
##Ub <- sample(c(-1,1), n, replace=T)
Rb <- (base::colMeans(Ub*(aggeffects$dyn.inf.func.e), na.rm = T))
Rb
})
bres <- t(simplify2array(bout))
# Non-degenerate dimensions
ndg.dim <- base::colSums(bres)!=0
#V.dynamic <- cov(bres)
bres <- bres[,ndg.dim]
#V.dynamic <- cov(bres)
bSigma <- apply(bres, 2, function(b) (quantile(b, .75, type=1,na.rm = T) - quantile(b, .25, type=1,na.rm = T))/(qnorm(.75) - qnorm(.25)))
bT <- apply(bres, 1, function(b) max( abs(b/bSigma)))
aggeffects$c.dynamic <- quantile(bT, 1-alp, type=1,na.rm = T)
aggeffects$dynamic.se.e <- rep(0,length(ndg.dim))
aggeffects$dynamic.se.e[ndg.dim] <- as.numeric(bSigma)
}
}
out <- list(group=group,
t=tt,
att_het1=att_het1, att_het0=att_het0,
inffunc_het1=inffunc_het1, inffunc_het0=inffunc_het0,
n=n,
aggte_het1=aggeffects_het1, aggte_het0=aggeffects_het0,
aggte = aggeffects,
alp = alp)
return(out)
}
| /R/att_gt_het2.R | no_license | JiaziChen111/did2 | R | false | false | 14,814 | r | #' @title att_gt_het2
#'
#'
#' @description \code{att_gt_het2} computes the difference of average treatment effects across two subpopulations
#' in DID setups where there are more than two periods of data and
#' allowing for treatment to occur at different points in time. Here, we assume same trends
#' between the two supopulations. See Marcus ans Sant'Anna (2020) for a detailed description.
#'
#' @param outcome The outcome y (in quotations, always!)
#' @param data The name of the data.frame that contains the data
#' @param tname The name of the column containing the time periods
#' @param idname The individual (cross-sectional unit) id name
#' @param first.treat.name The name of the variable in \code{data} that contains the first
#' period when a particular observation is treated. This should be a positive
#' number for all observations in treated groups. It should be 0 for observations
#' in the untreated group.
#' @param nevertreated Boolean for using the group which is never treated in the sample as the comparison unit. Default is TRUE.
#' @param het The name of the column containing the (binary) categories for heterogeneity
#' @param aggte boolean for whether or not to compute aggregate treatment effect parameters, default TRUE
#' @param maxe maximum values of periods ahead to be computed in event study. Only used if aggte = T.
#' @param mine minimum values of periods ahead to be computed in event study. Only used if aggte = T.
#' @param w The name of the column containing the sampling weights. If not set, all observations have same weight.
#' @param alp the significance level, default is 0.05
#' @param bstrap Boolean for whether or not to compute standard errors using
#' the multiplier boostrap. If standard errors are clustered, then one
#' must set \code{bstrap=TRUE}. Default is \code{TRUE}.
#' @param biters The number of boostrap iterations to use. The default is 1000,
#' and this is only applicable if \code{bstrap=TRUE}.
#' @param clustervars A vector of variables to cluster on. At most, there
#' can be two variables (otherwise will throw an error) and one of these
#' must be the same as idname which allows for clustering at the individual
#' level.
#' @param cband Boolean for whether or not to compute a uniform confidence
#' band that covers all of the group-time average treatment effects
#' with fixed probability \code{1-alp}. The default is \code{TRUE}
#' and the resulting standard errors will be pointwise.
#' @param printdetails Boolean for showing detailed results or not
#'
#' @param method The method for estimating the propensity score when covariates
#' are included (not implemented)
#' @param seedvec Optional value to set random seed; can possibly be used
#' in conjunction with bootstrapping standard errors#' (not implemented)
#' @param pl Boolean for whether or not to use parallel processing (not implemented) is TRUE.
#' @param cores The number of cores to use for parallel processing (not implemented)
#'
#' @references Callaway, Brantly and Sant'Anna, Pedro. "Difference-in-Differences with Multiple Time Periods and an Application on the Minimum Wage and Employment." Working Paper <https://ssrn.com/abstract=3148250> (2018).
#' @return \code{MP} object
#'
#' @export
att_gt_het2 <-function(outcome, data,
tname, idname=NULL,first.treat.name,
nevertreated = T,
het,
aggte=TRUE,
maxe = NULL,
mine = NULL,
w=NULL,
alp=0.05,
bstrap=T, biters=1000, clustervars=NULL,
cband=T,
printdetails=TRUE,
seedvec=NULL, pl=FALSE, cores=2,method="logit")
{
#-------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------
# Data pre-processing and error checking
#-------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------
## make sure that data is a data.frame
df <- data
## make sure that data is a data.frame
## this gets around RStudio's default of reading data as tibble
if (!all( class(df) == "data.frame")) {
#warning("class of data object was not data.frame; converting...")
df <- as.data.frame(df)
}
# weights if null
if(is.character(w)) w <- df[, as.character(w)]
if(is.null(w)) {
w <- as.vector(rep(1, nrow(df)))
} else if(min(w) < 0) stop("'w' must be non-negative")
# het if null
if(is.null(het)) {
stop("Please specifiy 'het'. If het=NULL, use 'att_gt' instead of 'att_gt_het'.")
}
if(is.character(het)) het <- df[, as.character(het)]
het.dim <- length(unique(het))
if(het.dim!=2) {
stop("'het' must be a binary variable.")
}
df$w <- w
df$w1 <- w * (het==1)
df$w0 <- w * (het==0)
df$y <- df[, as.character(outcome)] ##df[,as.character(formula.tools::lhs(formla))]
##figure out the dates and make balanced panel
tlist <- unique(df[,tname])[order(unique(df[,tname]))] ## this is going to be from smallest to largest
flist <- unique(df[,first.treat.name])[order(unique(df[,first.treat.name]))]
# Check if there is a never treated grup
if ( length(flist[flist==0]) == 0) {
if(nevertreated){
stop("It seems you do not have a never-treated group in the data. If you do have a never-treated group in the data, make sure to set data[,first.treat.name] = 0 for the observation in this group. Otherwise, select nevertreated = F so you can use the not-yet treated units as a comparison group.")
} else {
warning("It seems like that there is not a never-treated group in the data. In this case, we cannot identity the ATT(g,t) for the group that is treated las, nor any ATT(g,t) for t higher than or equal to the largest g.\n \nIf you do have a never-treated group in the data, make sure to set data[,first.treat.name] = 0 for the observation in this group.")
# Drop all time periods with time periods >= latest treated
df <- base::subset(df,(df[,tname] < max(flist)))
# Replace last treated time with zero
lines.gmax = df[,first.treat.name]==max(flist)
df[lines.gmax,first.treat.name] <- 0
##figure out the dates
tlist <- unique(df[,tname])[order(unique(df[,tname]))] ## this is going to be from smallest to largest
# Figure out the groups
flist <- unique(df[,first.treat.name])[order(unique(df[,first.treat.name]))]
}
}
# First treated groups
flist <- flist[flist>0]
##################################
## do some error checking
if (!is.numeric(tlist)) {
warning("not guaranteed to order time periods correclty if they are not numeric")
}
## check that first.treat doesn't change across periods for particular individuals
if (!all(sapply( split(df, df[,idname]), function(df) {
length(unique(df[,first.treat.name]))==1
}))) {
stop("Error: the value of first.treat must be the same across all periods for each particular individual.")
}
####################################
# How many time periods
tlen <- length(tlist)
# How many treated groups
flen <- length(flist)
df <- BMisc::makeBalancedPanel(df, idname, tname)
#dta is used to get a matrix of size n (like in cross sectional data)
dta <- df[ df[,tname]==tlist[1], ] ## use this for the influence function
#-------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------
# Compute all ATT(g,t) for each het group
#-------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
# Results for het==1
#----------------------------------------------------------------------------
results_het1 <- compute.att_gt_het2(flen, tlen, flist, tlist, df, dta, first.treat.name,
outcome, tname, idname, method, seedvec,
pl, cores, printdetails, nevertreated, het=1)
fatt_het1 <- results_het1$fatt
inffunc_het1 <- results_het1$inffunc
#----------------------------------------------------------------------------
# Results for het==0
#----------------------------------------------------------------------------
results_het0 <- compute.att_gt_het2(flen, tlen, flist, tlist, df, dta, first.treat.name,
outcome, tname, idname, method, seedvec,
pl, cores, printdetails, nevertreated, het=0)
fatt_het0 <- results_het0$fatt
inffunc_het0 <- results_het0$inffunc
#----------------------------------------------------------------------------
## process the results from computing the spatt
group <- c()
tt <- c()
att_het1 <- c()
att_het0 <- c()
i <- 1
inffunc1_het1 <- matrix(0, ncol=flen*(tlen), nrow=nrow(dta)) ## note, this might not work in unbalanced case
inffunc1_het0 <- matrix(0, ncol=flen*(tlen), nrow=nrow(dta))
for (f in 1:length(flist)) {
for (s in 1:(length(tlist))) {
group[i] <- fatt_het1[[i]]$group
tt[i] <- fatt_het1[[i]]$year
att_het1[i] <- fatt_het1[[i]]$att
att_het0[i] <- fatt_het0[[i]]$att
inffunc1_het1[,i] <- inffunc_het1[f,s,]
inffunc1_het0[,i] <- inffunc_het0[f,s,]
i <- i+1
}
}
# THIS IS ANALOGOUS TO CLUSTER ROBUST STD ERRORS (in our specific setup)
n <- nrow(dta)
V <- NULL
#-------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------
# Compute all summaries of the ATT(g,t)
#-------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------
aggeffects <- NULL
aggeffects_het1 <- NULL
aggeffects_het0 <- NULL
if (aggte) {
aggeffects_het1 <- compute.aggte_het(flist, tlist, group, tt, att_het1, first.treat.name, inffunc1_het1,
n, clustervars, dta, idname, bstrap, biters, alp, maxe, mine, het=1)
aggeffects_het0 <- compute.aggte_het(flist, tlist, group, tt, att_het0, first.treat.name, inffunc1_het0,
n, clustervars, dta, idname, bstrap, biters, alp, maxe, mine,het=0)
aggeffects <- list(simple.att = aggeffects_het1$simple.att - aggeffects_het0$simple.att,
simple.att.inf.func = aggeffects_het1$simple.att.inf.func - aggeffects_het0$simple.att.inf.func,
dynamic.att = aggeffects_het1$dynamic.att - aggeffects_het0$dynamic.att,
dynamic.att.inf.func = aggeffects_het1$dynamic.att.inf.func - aggeffects_het0$dynamic.att.inf.func,
dynamic.att.e = aggeffects_het1$dynamic.att.e - aggeffects_het0$dynamic.att.e,
dyn.inf.func.e = aggeffects_het1$dyn.inf.func.e - aggeffects_het0$dyn.inf.func.e,
e = aggeffects_het1$e
)
getSE_inf <- function(thisinffunc) {
if (bstrap) {
if (idname %in% clustervars) {
clustervars <- clustervars[-which(clustervars==idname)]
}
if (length(clustervars) > 1) {
stop("can't handle that many cluster variables")
}
bout <- lapply(1:biters, FUN=function(b) {
if (length(clustervars) > 0) {
n1 <- length(unique(dta[,clustervars]))
Vb <- matrix(sample(c(-1,1), n1, replace=T))
Vb <- cbind.data.frame(unique(dta[,clustervars]), Vb)
Ub <- data.frame(dta[,clustervars])
Ub <- Vb[match(Ub[,1], Vb[,1]),]
Ub <- Ub[,-1]
} else {
Ub <- sample(c(-1,1), n, replace=T)
}
Rb <- base::mean(Ub*(thisinffunc), na.rm = T)
Rb
})
bres <- as.vector(simplify2array(bout))
bSigma <- (quantile(bres, .75, type=1, na.rm = T) - quantile(bres, .25, type=1, na.rm = T)) /
(qnorm(.75) - qnorm(.25))
return(as.numeric(bSigma))
} else {
return(sqrt( mean( (thisinffunc)^2 ) /n ))
}
}
aggeffects$simple.se <- getSE_inf(as.matrix(aggeffects$simple.att.inf.func))
aggeffects$dynamic.se <- getSE_inf(as.matrix(aggeffects$dynamic.att.inf.func))
aggeffects$dynamic.se.e <- sqrt(colMeans((aggeffects$dyn.inf.func.e)^2)/n)
aggeffects$c.dynamic <- qnorm(1 - alp/2)
# Bootstrap for simulatanerous Conf. Int for the event study
if (bstrap) {
if (idname %in% clustervars) {
clustervars <- clustervars[-which(clustervars==idname)]
}
if (length(clustervars) > 1) {
stop("can't handle that many cluster variables")
}
## new version
bout <- lapply(1:biters, FUN=function(b) {
if (length(clustervars) > 0) {
n1 <- length(unique(dta[,clustervars]))
Vb <- matrix(sample(c(-1,1), n1, replace=T))
Vb <- cbind.data.frame(unique(dta[,clustervars]), Vb)
Ub <- data.frame(dta[,clustervars])
Ub <- Vb[match(Ub[,1], Vb[,1]),]
Ub <- Ub[,-1]
} else {
Ub <- sample(c(-1,1), n, replace=T)
}
##Ub <- sample(c(-1,1), n, replace=T)
Rb <- (base::colMeans(Ub*(aggeffects$dyn.inf.func.e), na.rm = T))
Rb
})
bres <- t(simplify2array(bout))
# Non-degenerate dimensions
ndg.dim <- base::colSums(bres)!=0
#V.dynamic <- cov(bres)
bres <- bres[,ndg.dim]
#V.dynamic <- cov(bres)
bSigma <- apply(bres, 2, function(b) (quantile(b, .75, type=1,na.rm = T) - quantile(b, .25, type=1,na.rm = T))/(qnorm(.75) - qnorm(.25)))
bT <- apply(bres, 1, function(b) max( abs(b/bSigma)))
aggeffects$c.dynamic <- quantile(bT, 1-alp, type=1,na.rm = T)
aggeffects$dynamic.se.e <- rep(0,length(ndg.dim))
aggeffects$dynamic.se.e[ndg.dim] <- as.numeric(bSigma)
}
}
out <- list(group=group,
t=tt,
att_het1=att_het1, att_het0=att_het0,
inffunc_het1=inffunc_het1, inffunc_het0=inffunc_het0,
n=n,
aggte_het1=aggeffects_het1, aggte_het0=aggeffects_het0,
aggte = aggeffects,
alp = alp)
return(out)
}
|
# #
# 패스트 캠퍼스 온라인 #
# 금융 공학 / 퀀트 올인원 패키지 #
# R 프로그래밍 - 강사. 박찬엽 #
# #
# 단정한 데이터 tidyr
## 실습 데이터 준비
library(dplyr)
library(tqk)
### filter()와 같이 사용한 grepl() 함수는 데이터에 목표로하는 글자를
### 포함하는지를 TRUE/FALSE로 결과를 제공함.
### grepl("현대자동차", code_get()$name)
code_get() %>%
filter(grepl("현대자동차", name)) %>%
select(code) %>%
tqk_get(from = "2019-01-01", to = "2019-02-28") %>%
mutate(comp = "현대자동차") ->
hdcm
hdcm
## tidyr 패키지의 gather() 함수 실습
### hdcm 데이터를 거래량을 제외하고 long form으로 변경하세요.
### open, high, low, close, adjusted가 값으로 들어가면 됩니다.
library(tidyr)
hdcm %>%
gather(key = "type", value = "price")
hdcm %>%
gather(key = "type", value = "price", -date, -comp) ->
hdcm_v
hdcm %>%
select(-volume) %>%
gather(key = "type", value = "price", -date, -comp) ->
hdcm_long
hdcm_long
hdcm %>%
gather(key = "type", value = "price", -date, -comp) %>%
filter(type != "volume") ->
hdcm_vv
identical(hdcm_long, hdcm_vv)
## tidyr 패키지의 spread() 함수 실습
hdcm_long %>%
spread(type, price)
### 월, 일 컬럼을 만들고 개별 날을 컬럼으로 하는 wide form 종가 데이터를 만드세요.
library(lubridate)
hdcm %>%
mutate(month = month(date)) %>%
mutate(day = day(date)) %>%
select(comp, month, day, close) %>%
spread(day, close)
### stks18 실습 데이터 만들기
library(purrr)
code_get() %>%
slice(11:20) ->
code_info
code_info %>%
select(code) %>%
map_dfr(
~ tqk_get(.x, from = "2018-01-01", to = "2018-12-31") %>%
mutate(code = .x)
) %>%
left_join(code_info %>% select(code, name), by = "code") %>%
select(-code) ->
stks18
### 각 회사의 월별 평균 종가를 출력하세요.
### wide form으로 출력하는 것이 한눈에 보기 좋습니다.
stks18 %>%
mutate(month = month(date)) %>%
group_by(name, month) %>%
summarise(mclose = mean(close)) %>%
spread(month, mclose)
## tidyr 패키지의 separate() 함수 실습
### 데이터 준비
library(readr)
url <- "https://github.com/mrchypark/sejongFinData/raw/master/dataAll.csv"
download.file(url,destfile = "./dataAll.csv")
findata <-
read_csv("./dataAll.csv", locale = locale(encoding = "cp949")) %>%
rename(company = country)
findata %>%
select(company, year) ->
findata
### year 컬럼을 separate() 함수로 별도의 컬럼들로 나눔
### sep 에 [^[:alnum:]]+ 정규표현식이 기본값으로 있어서
### 글자, 숫자가 아닌 값으로 나누기를 제공
findata %>%
separate(year, into = c("year","month","standard"))
### convert 옵션으로 자료형을 처리할 수 있음
findata %>%
separate(year, into = c("year","month","standard"), convert = T)
### 직접 sep에 나누기를 할 글자를 지정할 수 있음
### 정규표현식에서 "(" 괄호는 특별한 의미를 지니기 때문에
### \\ 이후에 작성해야 글자로 인식함.
findata %>%
separate(year, into = c("year","standard"), sep = "\\(")
### sep에 숫자를 넣을 수도 있는데, 글자 갯수를 기준으로 나누어 줌
findata %>%
separate(year, into = c("year","month","standard")) %>%
separate(standard, into = c("standard","Consolidated"), sep = 4)
## tidyr 패키지의 unite() 함수 실습
library(tqk)
code_info <- code_get()
code_info
### code와 name이 같은 의미를 지니므로 하나로 합칠 수 있음.
### 물론 실제로는 code가 key 역할이나 tqk_get() 함수의 입력 역할을 하기 때문에
### 최종 결과물에서 정리의 의미로 하나로 합치거나 하는 것이라고 가정.
### 여러 컬럼의 데이터를 합쳐서 하나의 컬럼으로 만드는 동작
### 새롭게 만들어지는 컬럼 이름을 먼저 작성
### 이후 대상이 되는 컬럼 이름을 나열.
code_info %>%
unite("company", name, code)
### sep 옵션으로 어떤 글자를 이용하여 연결할지 결정.
### 기본값은 _(언더바)
code_info %>%
unite("company", name, code, sep = "-")
code_info %>%
unite("company", name, code, sep = "(") %>%
mutate(company = paste0(company,")"))
### 개인적으로는 mutate() 함수와 paste0() 함수를 함께 사용하는 편.
### paste0() 함수는 글자를 합치는 기능을 제공.
code_info %>%
mutate(company = paste0(name, "(",code,")"))
### transmute() 함수로 필요한 컬럼만 출력
code_info %>%
transmute(company = paste0(name, "(",code,")"), market)
| /R/06_tidyr.R | no_license | mrchypark/fcf-R-basic | R | false | false | 4,751 | r | # #
# 패스트 캠퍼스 온라인 #
# 금융 공학 / 퀀트 올인원 패키지 #
# R 프로그래밍 - 강사. 박찬엽 #
# #
# 단정한 데이터 tidyr
## 실습 데이터 준비
library(dplyr)
library(tqk)
### filter()와 같이 사용한 grepl() 함수는 데이터에 목표로하는 글자를
### 포함하는지를 TRUE/FALSE로 결과를 제공함.
### grepl("현대자동차", code_get()$name)
code_get() %>%
filter(grepl("현대자동차", name)) %>%
select(code) %>%
tqk_get(from = "2019-01-01", to = "2019-02-28") %>%
mutate(comp = "현대자동차") ->
hdcm
hdcm
## tidyr 패키지의 gather() 함수 실습
### hdcm 데이터를 거래량을 제외하고 long form으로 변경하세요.
### open, high, low, close, adjusted가 값으로 들어가면 됩니다.
library(tidyr)
hdcm %>%
gather(key = "type", value = "price")
hdcm %>%
gather(key = "type", value = "price", -date, -comp) ->
hdcm_v
hdcm %>%
select(-volume) %>%
gather(key = "type", value = "price", -date, -comp) ->
hdcm_long
hdcm_long
hdcm %>%
gather(key = "type", value = "price", -date, -comp) %>%
filter(type != "volume") ->
hdcm_vv
identical(hdcm_long, hdcm_vv)
## tidyr 패키지의 spread() 함수 실습
hdcm_long %>%
spread(type, price)
### 월, 일 컬럼을 만들고 개별 날을 컬럼으로 하는 wide form 종가 데이터를 만드세요.
library(lubridate)
hdcm %>%
mutate(month = month(date)) %>%
mutate(day = day(date)) %>%
select(comp, month, day, close) %>%
spread(day, close)
### stks18 실습 데이터 만들기
library(purrr)
code_get() %>%
slice(11:20) ->
code_info
code_info %>%
select(code) %>%
map_dfr(
~ tqk_get(.x, from = "2018-01-01", to = "2018-12-31") %>%
mutate(code = .x)
) %>%
left_join(code_info %>% select(code, name), by = "code") %>%
select(-code) ->
stks18
### 각 회사의 월별 평균 종가를 출력하세요.
### wide form으로 출력하는 것이 한눈에 보기 좋습니다.
stks18 %>%
mutate(month = month(date)) %>%
group_by(name, month) %>%
summarise(mclose = mean(close)) %>%
spread(month, mclose)
## tidyr 패키지의 separate() 함수 실습
### 데이터 준비
library(readr)
url <- "https://github.com/mrchypark/sejongFinData/raw/master/dataAll.csv"
download.file(url,destfile = "./dataAll.csv")
findata <-
read_csv("./dataAll.csv", locale = locale(encoding = "cp949")) %>%
rename(company = country)
findata %>%
select(company, year) ->
findata
### year 컬럼을 separate() 함수로 별도의 컬럼들로 나눔
### sep 에 [^[:alnum:]]+ 정규표현식이 기본값으로 있어서
### 글자, 숫자가 아닌 값으로 나누기를 제공
findata %>%
separate(year, into = c("year","month","standard"))
### convert 옵션으로 자료형을 처리할 수 있음
findata %>%
separate(year, into = c("year","month","standard"), convert = T)
### 직접 sep에 나누기를 할 글자를 지정할 수 있음
### 정규표현식에서 "(" 괄호는 특별한 의미를 지니기 때문에
### \\ 이후에 작성해야 글자로 인식함.
findata %>%
separate(year, into = c("year","standard"), sep = "\\(")
### sep에 숫자를 넣을 수도 있는데, 글자 갯수를 기준으로 나누어 줌
findata %>%
separate(year, into = c("year","month","standard")) %>%
separate(standard, into = c("standard","Consolidated"), sep = 4)
## tidyr 패키지의 unite() 함수 실습
library(tqk)
code_info <- code_get()
code_info
### code와 name이 같은 의미를 지니므로 하나로 합칠 수 있음.
### 물론 실제로는 code가 key 역할이나 tqk_get() 함수의 입력 역할을 하기 때문에
### 최종 결과물에서 정리의 의미로 하나로 합치거나 하는 것이라고 가정.
### 여러 컬럼의 데이터를 합쳐서 하나의 컬럼으로 만드는 동작
### 새롭게 만들어지는 컬럼 이름을 먼저 작성
### 이후 대상이 되는 컬럼 이름을 나열.
code_info %>%
unite("company", name, code)
### sep 옵션으로 어떤 글자를 이용하여 연결할지 결정.
### 기본값은 _(언더바)
code_info %>%
unite("company", name, code, sep = "-")
code_info %>%
unite("company", name, code, sep = "(") %>%
mutate(company = paste0(company,")"))
### 개인적으로는 mutate() 함수와 paste0() 함수를 함께 사용하는 편.
### paste0() 함수는 글자를 합치는 기능을 제공.
code_info %>%
mutate(company = paste0(name, "(",code,")"))
### transmute() 함수로 필요한 컬럼만 출력
code_info %>%
transmute(company = paste0(name, "(",code,")"), market)
|
library(moments)
library(mnormt)
library(psych)
library(sp)
library(raster)
library(corrplot)
install.packages("mnormt")
a<-read.table(file="BasicMaterials-IC.csv",header=TRUE,sep=',')
a
fix(a)
attach(a)
View(a)
str(a)
head(a)
tail(a)
#I.Statistici descriptive
summary(a)
hist(a$Price, col="coral", main="PRICE")
plot(density(a$Price))
table(a$Industry)
barplot(table(a$Industry))
pie(table(a$Industry))
industries<-c("Petroleum","Oil & Gas","Mining","Metallurgy","Manufacturing","Chemicals","Steel")
f<-c(20,9,5,2,2,1,1)
pie(f,labels=industries,col=rainbow(7),main="PIE")
df<-data.frame(industries,f)
df
df$proportie<-df$f
df$proportie
procente<-100*(f/sum(f))
procente
barplot(f,names.arg=industries,col=rainbow(7),xlab="Industries",ylab="Frequencies",cex.names=0.8,main="Distributie")
cov<-cov(a[,2:9])
cov
cor<-cor(a[,2:9])
cor
sd<-apply(a, 2, sd)
sd
skewness(a[,2:9])
kurtosis(a[,2:9])
#Verificarea existentei outlierilor
par(mfrow=c(4,4))
boxplot(Price,main="Price",col="red")
boxplot(Change,main="Change",col="green")
boxplot(Price.Sales,main="Price/Sales",col="blue")
boxplot(Price.Book,main="Price/Book",col="blue4")
hist(Price,main="Price",col="red")
hist(Change,main="Change",col="green")
hist(Price.Sales,main="Price/Sales",col="blue")
hist(Price.Book,main="Price/Book",col="blue4")
par(mfrow=c(4,4))
boxplot(Revenue,main="Revenue",col="pink")
boxplot(MkCap,main="MkCap",col="brown")
boxplot(ROA,main="ROA",col="cyan")
boxplot(ROE,main="ROE",col="chocolate")
hist(Revenue,main="Revenue",col="pink")
hist(MkCap,main="MkCap",col="brown")
hist(ROA,main="ROA",col="cyan")
hist(ROE,main="ROE",col="chocolate")
model<-lm(Revenue~Price)
model
plot(Revenue, Price, col="coral2")
abline(lm(Price~Revenue), col="purple")
# II. ANALIZA COMPONENTELOR PRINCIPALE
# Standardizarea datelor: mean=0, sd=1
acp<-a[,2:9]
acp
date_std=scale(acp,scale=TRUE)
date_std
head(date_std)
#Componente principale
pc=princomp(date_std,cor=TRUE)
sd=pc$sd
valpr=sd*sd
procentA= valpr*100/8
procentC=cumsum(procentA)
v=zapsmall(data.frame(valpr,procentA,procentC))
v
summary(pc)
scree_plot=prcomp(date_std)
plot(scree_plot,type="l",main="Scree plot")
plot(pc,type="barplot")
biplot(pc)
#Vectori proprii si valori proprii
ev<-eigen(cov(date_std))
ev
loadings(pc)
#vectpr=zapsmall(pc$loadings)
#vectpr
scoruriprinc=zapsmall(pc$scores)
scoruriprinc
#Matricea corelatiilor
c=zapsmall(pc$scores)
corFact=zapsmall(cor(date_std,c))
corFact
corrplot(cor(date_std,c),method="circle")
#############################################
#Algoritmi de clusterizare
View(a)
require (cluster)
require(factoextra)
#Creare dateframe cu variabile numerice
b <- a[,2:9]
b
rownames(b, do.NULL = TRUE, prefix = "row")
rownames(b)<- a$Symbol #etichetarea randurilor cu numele tarilor
View(b)
#standardizarea observatiilor in vederea aplicarii analizei cluster
standardizare <- function(x) {(x - mean(x))/sd(x)} #standardizarea observatiilor
datestd <-apply(b,2,standardizare)
datestd
#calcularea distantelor dintre obiecte
distance <- dist(as.matrix(datestd))
# analiza cluster metoda Ward
hc.w <- hclust(distance, method="ward.D2")
plot(hc.w, labels = b$Symbol, hang=-1, col="coral2")
rect.hclust(hc.w, k = 3, border = 2:5)
member.w <- cutree(hc.w,k=3)
member.w
install.packages("dbscan")
library(dbscan)
install.packages("fpc")
library(fpc)
#K-MEANS
rezultat.kmeans<-kmeans(datestd,3)
rezultat.kmeans
table(a$Industry, rezultat.kmeans$cluster)
kNNdistplot(datestd,k=3) #kNN-k nearest neighbours
abline(h=1.8,col="red")
db<-dbscan(datestd,eps=1.8,MinPts=3)
db
fviz_cluster(db,datestd,ellipse=TRUE,geom="points")
table(a$Industry,db$cluster)
plotcluster(datestd,db$cluster)
db_vector<-db[['cluster']]
db_vector
dist<-dist(datestd)
dist
silueta<-silhouette(db_vector,dist)
silueta
fviz_silhouette(silueta)
#Fuzzy C-MEANS
library(factoextra)
library(cluster)
library(dbscan)
library(e1071)
rezultat<-cmeans(datestd, 3, 100, 2, method="cmeans")
rezultat
# 3=nr de clustere, 100= nr de iteratii, 2=parametrii de fuzzificare
rezultat$centers
rezultat$membership
rezultat$cluster
#Reprezentarea grafica a punctelor
plot(datestd, col=rezultat$cluster)
points(rezultat$centers[,c(1,2)], col=1:3, pch="*", cex=3)
##########################################
#Arbori de decizie
df1<-data.frame(datestd)
df1
df2<-data.frame(a[,10])
df2
df<-cbind(df1, df2)
df
colnames(df)[colnames(df)=="a...10."] <- "Industry"
df
ind<-sample(2,nrow(df),replace=TRUE,prob=c(0.7,0.3)) //Extragerea a 2 esantioane din setul de date
ind #Extragere cu revenire - Apartenenta la cele 2 esantioane
traindata<-df[ind==1,]
traindata
testdata<-df[ind==2,]
testdata
formula<-Industry~.
formula
ctree<-ctree(formula, data=traindata)
ctree
table(predict(ctree),traindata$Industry)
plot(ctree)
print(ictree)
plot(ctree, type="simple")
predictie<-predict(ctree,traindata,type="response")
predictie #predictie etichete
confuzie<-table(traindata$Industry,predictie)
confuzie #arata ce s-a previzionat corect
classAgreement(confuzie)
#diag=0.97->97% de date corect etichetate
#kappa=0.95->95% acord f bun intre etichetele reale si cele previzionate
mean(predictie !=traindata$Industry)
predictie1<-predict(ctree,testdata,type="response")
predictie1
confuzie1<-table(testdata$Industry,predictie1)
confuzie1
classAgreement(confuzie1)
mean(predictie1 !=testdata$Industry)
library(tree)
library(ISLR)
#Pruning the tree
set.seed(3)
cv.tree<-cv.tree(ctree,FUN=prune.misclass)
cv.tree
names(cv.tree) #size-marime arbore si dev-indicator pt puritatea nodului
plot(cv.tree$size,cv.tree$dev,type="b")
install.packages("pROC")
library(pROC)
install.packages("rpart")
library(rpart)
#Curba ROC
df1<-data.frame(datestd)
df1
df2<-data.frame(a[,10])
df2
df<-cbind(df1, df2)
df
colnames(df)[colnames(df)=="a...10."] <- "Industry"
df
fix(df)
attach(df)
VenituriMari<-ifelse(Revenue>=0.1,"Yes","No")
VenituriMari
df=data.frame(df, VenituriMari)
df=df[ ,-5]
df
names(df)
set.seed(123)
antrenare<-sample(1:nrow(df),nrow(df)/2)
antrenare
testare=-antrenare
setantrenare<-df[antrenare,]
setantrenare
settestare<-df[testare,]
settestare
arbore<-rpart(as.factor(VenituriMari)~.,data=setantrenare,method="class")
arbore
plot(arbore,uniform=TRUE)#uniform -spatiere verticala a nodurilor
text(arbore,use.n=TRUE,all=TRUE,cex=0.8)
print(arbore)
#loss-obiecte incorect clasificate
#yval-clasa majoritara a acelui nod
#yprob-vectorul de probabilitati
#root 200 79 no (0.6050000 0.3950000)
predictie<-predict(arbore,settestare,type="class")
predictie
matriceconfuzie<-table(settestare$VenituriMari,predictie)
matriceconfuzie
#94 si 59 sunt obs corect previzionate
(94+59)/(94+21+26+59)
#0.76 76% din date sunt corect previzionate
prob<-predict(arbore,settestare,type="prob")
head(prob)
curbaROC<-roc(settestare$VenituriMari,prob[,"Yes"])
curbaROC
plot(curbaROC)
auc(curbaROC) #area under curve
printcp(arbore) #complex parameter-cant cu care divizarea nodului imbunatateste eroarea relativa de clasificare
#nsplit=nr de noduri terminale
#rel error=eroare relativa
#x error=eroare de validare incrucisata
#xstd=abaterea standard
#criteriul de alegere: xerror sa fie minim
plotcp(arbore,col="red")
arborecuratat<-prune(arbore,cp=arbore$cptable[which.min(arbore$cptable[ ,"xerror"]),"CP"])
arborecuratat
plot(arborecuratat,uniform=TRUE)
text(arborecuratat,use.n=TRUE,all=TRUE,cex=0.8)
predictie1<-predict(arborecuratat,settestare,type="class")
predictie1
matriceconfuzie1<-table(settestare$VenituriMari,predictie1)
matriceconfuzie1
#Arbori de regresie
install.packages("tree")
library(tree)
install.packages("MASS")
library(MASS)
set.seed(234)
antrenare<-sample(1:nrow(df),nrow(df)/2)
antrenare
arbore<-tree(ROE~.,df,subset=antrenare)
arbore
plot(arbore)
text(arbore,pretty=0)
cv.tree<-cv.tree(arbore, FUN=prune.misclass)
cv.tree
#SVM
install.packages('e1071',dependencies=TRUE)
install.packages("dplyr")
library(dplyr)
library(e1071)
library(MASS)
df
df<-df %>% select(7,8,9)
df
index <- 1:nrow(df)
index
testindex<- sample(index, trunc(length(index)/3))
testindex
settestare<- df[testindex,]
settestare
setantrenare<- df[-testindex,]
setantrenare
model<-svm(Industry~.,data = setantrenare)
model
plot(model,df)
prediction <- predict(model, settestare[,-3])
prediction
tab <- table(pred = prediction, true = settestare[,3])
tab
classAgreement(tab)
datenoi<-data.frame(ROA=c(-0.235665,0.120007),ROE=c(0.735665,-0.140607))
datenoi
predict(model,datenoi)
predict(model,datenoi,prob=TRUE)
predict
######################
#Retele neuronale
install.packages("neuralnet")
library(neuralnet)
setantrenare<- df[sample(1:40, 20),]
setantrenare
setantrenare$petroleum <- c(setantrenare$Industry == "petroleum")
setantrenare$oilgas <- c(setantrenare$Industry == "oil&gas")
setantrenare$mining <- c(setantrenare$Industry == "mining")
setantrenare$manufacturing <- c(setantrenare$Industry == "manufacturing")
setantrenare$metallurgy <- c(setantrenare$Industry == "metallurgy")
setantrenare$chemicals <- c(setantrenare$Industry == "chemicals")
setantrenare$steel <- c(setantrenare$Industry == "steel")
setantrenare
settestare$Industry <- NULL
#Se antrenează reţeaua neuronală care conţine 3 noduri în stratul ascuns.
retea<-neuralnet(petroleum+oilgas+mining+manufacturing+metallurgy+chemicals+steel~Price+Change+Price.Sales+Price.Book+Revenue+MkCap+ROA+ROE, setantrenare, hidden=7, lifesign="full")
retea
plot(retea, rep="best", intercept=FALSE)
#Incarcare date analiza
a<-read.table(file="BasicMaterials-IC.csv",header=TRUE,sep=',')
a
b <- a[,2:9]
b
rownames(b, do.NULL = TRUE, prefix = "row")
rownames(b)<- a$Symbol #etichetarea randurilor cu numele tarilor
View(b)
#standardizarea observatiilor in vederea aplicarii analizei cluster
standardizare <- function(x) {(x - mean(x))/sd(x)} #standardizarea observatiilor
datestd <-apply(b,2,standardizare)
datestd
df1<-data.frame(datestd)
df1
df2<-data.frame(a[,10])
df2
df<-cbind(df1, df2)
df
colnames(df)[colnames(df)=="a...10."] <- "Industry"
df
################################
#Regresia logistica multinomiala
install.packages("MASS")
library(MASS)
install.packages("nnet")
library(nnet)
df$Industry.f<-factor(df$Industry)
df$Industry.f
df$ref<-relevel(df$Industry.f, ref="petroleum")
df$ref
model<-multinom(ref~Revenue+ROA+ROE, data=df, traice=FALSE)
model
summary(model)
predict(model, df)
predict(model, df, type="prob")
predict(model, df[c(3,7,17),], type="prob")
confuzie<-table(df$Industry[1:40], predict(model, df[1:40, ]))
confuzie
mean(df$Industry[1:40]==predict(model, df[1:40,]))
######################
#Retele neuronale
#install.packages("neuralnet")
library(neuralnet)
setantrenare<- df[sample(1:40, 20),]
setantrenare
setantrenare$petroleum <- c(setantrenare$Industry == "petroleum")
setantrenare$oilgas <- c(setantrenare$Industry == "oil&gas")
setantrenare$mining <- c(setantrenare$Industry == "mining")
setantrenare$manufacturing <- c(setantrenare$Industry == "manufacturing")
setantrenare$metallurgy <- c(setantrenare$Industry == "metallurgy")
setantrenare$chemicals <- c(setantrenare$Industry == "chemicals")
setantrenare$steel <- c(setantrenare$Industry == "steel")
setantrenare
setantrenare$Industry <- NULL
#Se antrenează reţeaua neuronală care conţine 3 noduri în stratul ascuns.
retea<-neuralnet(petroleum+oilgas+mining+manufacturing+metallurgy+chemicals+steel~Price+Change+Price.Sales+Price.Book+Revenue+MkCap+ROA+ROE, setantrenare, hidden=7, lifesign="full")
retea
plot(retea, rep="best", intercept=FALSE)
predictie<-compute(retea,df[-8])$net.result
predictie | /InteligentaComputationala.R | no_license | MarilenaGrosu/Computational-Intelligence | R | false | false | 11,919 | r | library(moments)
library(mnormt)
library(psych)
library(sp)
library(raster)
library(corrplot)
install.packages("mnormt")
a<-read.table(file="BasicMaterials-IC.csv",header=TRUE,sep=',')
a
fix(a)
attach(a)
View(a)
str(a)
head(a)
tail(a)
#I.Statistici descriptive
summary(a)
hist(a$Price, col="coral", main="PRICE")
plot(density(a$Price))
table(a$Industry)
barplot(table(a$Industry))
pie(table(a$Industry))
industries<-c("Petroleum","Oil & Gas","Mining","Metallurgy","Manufacturing","Chemicals","Steel")
f<-c(20,9,5,2,2,1,1)
pie(f,labels=industries,col=rainbow(7),main="PIE")
df<-data.frame(industries,f)
df
df$proportie<-df$f
df$proportie
procente<-100*(f/sum(f))
procente
barplot(f,names.arg=industries,col=rainbow(7),xlab="Industries",ylab="Frequencies",cex.names=0.8,main="Distributie")
cov<-cov(a[,2:9])
cov
cor<-cor(a[,2:9])
cor
sd<-apply(a, 2, sd)
sd
skewness(a[,2:9])
kurtosis(a[,2:9])
#Verificarea existentei outlierilor
par(mfrow=c(4,4))
boxplot(Price,main="Price",col="red")
boxplot(Change,main="Change",col="green")
boxplot(Price.Sales,main="Price/Sales",col="blue")
boxplot(Price.Book,main="Price/Book",col="blue4")
hist(Price,main="Price",col="red")
hist(Change,main="Change",col="green")
hist(Price.Sales,main="Price/Sales",col="blue")
hist(Price.Book,main="Price/Book",col="blue4")
par(mfrow=c(4,4))
boxplot(Revenue,main="Revenue",col="pink")
boxplot(MkCap,main="MkCap",col="brown")
boxplot(ROA,main="ROA",col="cyan")
boxplot(ROE,main="ROE",col="chocolate")
hist(Revenue,main="Revenue",col="pink")
hist(MkCap,main="MkCap",col="brown")
hist(ROA,main="ROA",col="cyan")
hist(ROE,main="ROE",col="chocolate")
model<-lm(Revenue~Price)
model
plot(Revenue, Price, col="coral2")
abline(lm(Price~Revenue), col="purple")
# II. ANALIZA COMPONENTELOR PRINCIPALE
# Standardizarea datelor: mean=0, sd=1
acp<-a[,2:9]
acp
date_std=scale(acp,scale=TRUE)
date_std
head(date_std)
#Componente principale
pc=princomp(date_std,cor=TRUE)
sd=pc$sd
valpr=sd*sd
procentA= valpr*100/8
procentC=cumsum(procentA)
v=zapsmall(data.frame(valpr,procentA,procentC))
v
summary(pc)
scree_plot=prcomp(date_std)
plot(scree_plot,type="l",main="Scree plot")
plot(pc,type="barplot")
biplot(pc)
#Vectori proprii si valori proprii
ev<-eigen(cov(date_std))
ev
loadings(pc)
#vectpr=zapsmall(pc$loadings)
#vectpr
scoruriprinc=zapsmall(pc$scores)
scoruriprinc
#Matricea corelatiilor
c=zapsmall(pc$scores)
corFact=zapsmall(cor(date_std,c))
corFact
corrplot(cor(date_std,c),method="circle")
#############################################
#Algoritmi de clusterizare
View(a)
require (cluster)
require(factoextra)
#Creare dateframe cu variabile numerice
b <- a[,2:9]
b
rownames(b, do.NULL = TRUE, prefix = "row")
rownames(b)<- a$Symbol #etichetarea randurilor cu numele tarilor
View(b)
#standardizarea observatiilor in vederea aplicarii analizei cluster
standardizare <- function(x) {(x - mean(x))/sd(x)} #standardizarea observatiilor
datestd <-apply(b,2,standardizare)
datestd
#calcularea distantelor dintre obiecte
distance <- dist(as.matrix(datestd))
# analiza cluster metoda Ward
hc.w <- hclust(distance, method="ward.D2")
plot(hc.w, labels = b$Symbol, hang=-1, col="coral2")
rect.hclust(hc.w, k = 3, border = 2:5)
member.w <- cutree(hc.w,k=3)
member.w
install.packages("dbscan")
library(dbscan)
install.packages("fpc")
library(fpc)
#K-MEANS
rezultat.kmeans<-kmeans(datestd,3)
rezultat.kmeans
table(a$Industry, rezultat.kmeans$cluster)
kNNdistplot(datestd,k=3) #kNN-k nearest neighbours
abline(h=1.8,col="red")
db<-dbscan(datestd,eps=1.8,MinPts=3)
db
fviz_cluster(db,datestd,ellipse=TRUE,geom="points")
table(a$Industry,db$cluster)
plotcluster(datestd,db$cluster)
db_vector<-db[['cluster']]
db_vector
dist<-dist(datestd)
dist
silueta<-silhouette(db_vector,dist)
silueta
fviz_silhouette(silueta)
#Fuzzy C-MEANS
library(factoextra)
library(cluster)
library(dbscan)
library(e1071)
rezultat<-cmeans(datestd, 3, 100, 2, method="cmeans")
rezultat
# 3=nr de clustere, 100= nr de iteratii, 2=parametrii de fuzzificare
rezultat$centers
rezultat$membership
rezultat$cluster
#Reprezentarea grafica a punctelor
plot(datestd, col=rezultat$cluster)
points(rezultat$centers[,c(1,2)], col=1:3, pch="*", cex=3)
##########################################
#Arbori de decizie
df1<-data.frame(datestd)
df1
df2<-data.frame(a[,10])
df2
df<-cbind(df1, df2)
df
colnames(df)[colnames(df)=="a...10."] <- "Industry"
df
ind<-sample(2,nrow(df),replace=TRUE,prob=c(0.7,0.3)) //Extragerea a 2 esantioane din setul de date
ind #Extragere cu revenire - Apartenenta la cele 2 esantioane
traindata<-df[ind==1,]
traindata
testdata<-df[ind==2,]
testdata
formula<-Industry~.
formula
ctree<-ctree(formula, data=traindata)
ctree
table(predict(ctree),traindata$Industry)
plot(ctree)
print(ictree)
plot(ctree, type="simple")
predictie<-predict(ctree,traindata,type="response")
predictie #predictie etichete
confuzie<-table(traindata$Industry,predictie)
confuzie #arata ce s-a previzionat corect
classAgreement(confuzie)
#diag=0.97->97% de date corect etichetate
#kappa=0.95->95% acord f bun intre etichetele reale si cele previzionate
mean(predictie !=traindata$Industry)
predictie1<-predict(ctree,testdata,type="response")
predictie1
confuzie1<-table(testdata$Industry,predictie1)
confuzie1
classAgreement(confuzie1)
mean(predictie1 !=testdata$Industry)
library(tree)
library(ISLR)
#Pruning the tree
set.seed(3)
cv.tree<-cv.tree(ctree,FUN=prune.misclass)
cv.tree
names(cv.tree) #size-marime arbore si dev-indicator pt puritatea nodului
plot(cv.tree$size,cv.tree$dev,type="b")
install.packages("pROC")
library(pROC)
install.packages("rpart")
library(rpart)
#Curba ROC
df1<-data.frame(datestd)
df1
df2<-data.frame(a[,10])
df2
df<-cbind(df1, df2)
df
colnames(df)[colnames(df)=="a...10."] <- "Industry"
df
fix(df)
attach(df)
VenituriMari<-ifelse(Revenue>=0.1,"Yes","No")
VenituriMari
df=data.frame(df, VenituriMari)
df=df[ ,-5]
df
names(df)
set.seed(123)
antrenare<-sample(1:nrow(df),nrow(df)/2)
antrenare
testare=-antrenare
setantrenare<-df[antrenare,]
setantrenare
settestare<-df[testare,]
settestare
arbore<-rpart(as.factor(VenituriMari)~.,data=setantrenare,method="class")
arbore
plot(arbore,uniform=TRUE)#uniform -spatiere verticala a nodurilor
text(arbore,use.n=TRUE,all=TRUE,cex=0.8)
print(arbore)
#loss-obiecte incorect clasificate
#yval-clasa majoritara a acelui nod
#yprob-vectorul de probabilitati
#root 200 79 no (0.6050000 0.3950000)
predictie<-predict(arbore,settestare,type="class")
predictie
matriceconfuzie<-table(settestare$VenituriMari,predictie)
matriceconfuzie
#94 si 59 sunt obs corect previzionate
(94+59)/(94+21+26+59)
#0.76 76% din date sunt corect previzionate
prob<-predict(arbore,settestare,type="prob")
head(prob)
curbaROC<-roc(settestare$VenituriMari,prob[,"Yes"])
curbaROC
plot(curbaROC)
auc(curbaROC) #area under curve
printcp(arbore) #complex parameter-cant cu care divizarea nodului imbunatateste eroarea relativa de clasificare
#nsplit=nr de noduri terminale
#rel error=eroare relativa
#x error=eroare de validare incrucisata
#xstd=abaterea standard
#criteriul de alegere: xerror sa fie minim
plotcp(arbore,col="red")
arborecuratat<-prune(arbore,cp=arbore$cptable[which.min(arbore$cptable[ ,"xerror"]),"CP"])
arborecuratat
plot(arborecuratat,uniform=TRUE)
text(arborecuratat,use.n=TRUE,all=TRUE,cex=0.8)
predictie1<-predict(arborecuratat,settestare,type="class")
predictie1
matriceconfuzie1<-table(settestare$VenituriMari,predictie1)
matriceconfuzie1
#Arbori de regresie
install.packages("tree")
library(tree)
install.packages("MASS")
library(MASS)
set.seed(234)
antrenare<-sample(1:nrow(df),nrow(df)/2)
antrenare
arbore<-tree(ROE~.,df,subset=antrenare)
arbore
plot(arbore)
text(arbore,pretty=0)
cv.tree<-cv.tree(arbore, FUN=prune.misclass)
cv.tree
#SVM
install.packages('e1071',dependencies=TRUE)
install.packages("dplyr")
library(dplyr)
library(e1071)
library(MASS)
df
df<-df %>% select(7,8,9)
df
index <- 1:nrow(df)
index
testindex<- sample(index, trunc(length(index)/3))
testindex
settestare<- df[testindex,]
settestare
setantrenare<- df[-testindex,]
setantrenare
model<-svm(Industry~.,data = setantrenare)
model
plot(model,df)
prediction <- predict(model, settestare[,-3])
prediction
tab <- table(pred = prediction, true = settestare[,3])
tab
classAgreement(tab)
datenoi<-data.frame(ROA=c(-0.235665,0.120007),ROE=c(0.735665,-0.140607))
datenoi
predict(model,datenoi)
predict(model,datenoi,prob=TRUE)
predict
######################
#Retele neuronale
install.packages("neuralnet")
library(neuralnet)
setantrenare<- df[sample(1:40, 20),]
setantrenare
setantrenare$petroleum <- c(setantrenare$Industry == "petroleum")
setantrenare$oilgas <- c(setantrenare$Industry == "oil&gas")
setantrenare$mining <- c(setantrenare$Industry == "mining")
setantrenare$manufacturing <- c(setantrenare$Industry == "manufacturing")
setantrenare$metallurgy <- c(setantrenare$Industry == "metallurgy")
setantrenare$chemicals <- c(setantrenare$Industry == "chemicals")
setantrenare$steel <- c(setantrenare$Industry == "steel")
setantrenare
settestare$Industry <- NULL
#Se antrenează reţeaua neuronală care conţine 3 noduri în stratul ascuns.
retea<-neuralnet(petroleum+oilgas+mining+manufacturing+metallurgy+chemicals+steel~Price+Change+Price.Sales+Price.Book+Revenue+MkCap+ROA+ROE, setantrenare, hidden=7, lifesign="full")
retea
plot(retea, rep="best", intercept=FALSE)
#Incarcare date analiza
a<-read.table(file="BasicMaterials-IC.csv",header=TRUE,sep=',')
a
b <- a[,2:9]
b
rownames(b, do.NULL = TRUE, prefix = "row")
rownames(b)<- a$Symbol #etichetarea randurilor cu numele tarilor
View(b)
#standardizarea observatiilor in vederea aplicarii analizei cluster
standardizare <- function(x) {(x - mean(x))/sd(x)} #standardizarea observatiilor
datestd <-apply(b,2,standardizare)
datestd
df1<-data.frame(datestd)
df1
df2<-data.frame(a[,10])
df2
df<-cbind(df1, df2)
df
colnames(df)[colnames(df)=="a...10."] <- "Industry"
df
################################
#Regresia logistica multinomiala
install.packages("MASS")
library(MASS)
install.packages("nnet")
library(nnet)
df$Industry.f<-factor(df$Industry)
df$Industry.f
df$ref<-relevel(df$Industry.f, ref="petroleum")
df$ref
model<-multinom(ref~Revenue+ROA+ROE, data=df, traice=FALSE)
model
summary(model)
predict(model, df)
predict(model, df, type="prob")
predict(model, df[c(3,7,17),], type="prob")
confuzie<-table(df$Industry[1:40], predict(model, df[1:40, ]))
confuzie
mean(df$Industry[1:40]==predict(model, df[1:40,]))
######################
#Retele neuronale
#install.packages("neuralnet")
library(neuralnet)
setantrenare<- df[sample(1:40, 20),]
setantrenare
setantrenare$petroleum <- c(setantrenare$Industry == "petroleum")
setantrenare$oilgas <- c(setantrenare$Industry == "oil&gas")
setantrenare$mining <- c(setantrenare$Industry == "mining")
setantrenare$manufacturing <- c(setantrenare$Industry == "manufacturing")
setantrenare$metallurgy <- c(setantrenare$Industry == "metallurgy")
setantrenare$chemicals <- c(setantrenare$Industry == "chemicals")
setantrenare$steel <- c(setantrenare$Industry == "steel")
setantrenare
setantrenare$Industry <- NULL
#Se antrenează reţeaua neuronală care conţine 3 noduri în stratul ascuns.
retea<-neuralnet(petroleum+oilgas+mining+manufacturing+metallurgy+chemicals+steel~Price+Change+Price.Sales+Price.Book+Revenue+MkCap+ROA+ROE, setantrenare, hidden=7, lifesign="full")
retea
plot(retea, rep="best", intercept=FALSE)
predictie<-compute(retea,df[-8])$net.result
predictie |
\name{kdrobust}
\alias{kdrobust}
\title{Kernel Density Methods with Robust Bias-Corrected Inference}
\description{
\code{\link{kdrobust}} implements kernel density point estimators, with robust bias-corrected confidence intervals and inference procedures developed in Calonico, Cattaneo and Farrell (2018). See also Calonico, Cattaneo and Farrell (2020) for related optimality results.
It also implements other estimation and inference procedures available in the literature. See Wand and Jones (1995) for background references.
Companion commands: \code{\link{kdbwselect}} for kernel density data-driven bandwidth selection, and \code{\link{nprobust.plot}} for plotting results.
A detailed introduction to this command is given in Calonico, Cattaneo and Farrell (2019). For more details, and related Stata and R packages useful for empirical analysis, visit \url{https://nppackages.github.io/}.
}
\usage{
kdrobust(x, eval = NULL, neval = NULL, h = NULL, b = NULL, rho = 1,
kernel = "epa", bwselect = NULL, bwcheck = 21, imsegrid=30, level = 95, subset = NULL)
}
\arguments{
\item{x}{independent variable.}
\item{eval}{vector of evaluation point(s). By default it uses 30 equally spaced points over to support of \code{x}.}
\item{neval}{number of quantile-spaced evaluation points on support of \code{x}. Default is \code{neval=30}.}
\item{h}{main bandwidth used to construct the kernel density point estimator. Can be either scalar (same bandwidth for all evaluation points), or vector of same dimension as \code{eval}. If not specified, bandwidth \code{h} is computed by the companion command \code{\link{kdbwselect}}.}
\item{b}{bias bandwidth used to construct the bias-correction estimator. Can be either scalar (same bandwidth for all evaluation points), or vector of same dimension as \code{eval}. By default it is set equal to \code{h}. If \code{rho} is set to zero, \code{b} is computed by the companion command \code{\link{kdbwselect}}.}
\item{rho}{Sets \code{b=h/rho}. Default is \code{rho = 1}.}
\item{kernel}{kernel function used to construct local polynomial estimators. Options are \code{epa} for the epanechnikov kernel, \code{tri} for the triangular kernel and \code{uni} for the uniform kernel. Default is \code{kernel = epa}.}
\item{bwselect}{bandwidth selection procedure to be used via \code{\link{lpbwselect}}. By default it computes \code{h} and sets \code{b=h/rho} (with \code{rho=1} by default). It computes both \code{h} and \code{b} if \code{rho} is set equal to zero. Options are:
\code{mse-dpi} second-generation DPI implementation of MSE-optimal bandwidth. Default option if only one evaluation point is chosen.
\code{imse-dpi} second-generation DPI implementation of IMSE-optimal bandwidth (computed using a grid of evaluation points). Default option if more than one evaluation point is chosen.
\code{imse-rot} ROT implementation of IMSE-optimal bandwidth (computed using a grid of evaluation points).
\code{ce-dpi} second generation DPI implementation of CE-optimal bandwidth.
\code{ce-rot} ROT implementation of CE-optimal bandwidth.
\code{all} reports all available bandwidth selection procedures.
Note: MSE = Mean Square Error; IMSE = Integrated Mean Squared Error; CE = Coverage Error; DPI = Direct Plug-in; ROT = Rule-of-Thumb. For details on implementation see Calonico, Cattaneo and Farrell (2019).}
\item{bwcheck}{if a positive integer is provided, then the selected bandwidth is enlarged so that at least \code{bwcheck} effective observations are available at each evaluation point. Default is \code{bwcheck = 21}.}
\item{imsegrid}{number of evaluations points used to compute the IMSE bandwidth selector. Default is \code{imsegrid = 30}.}
\item{level}{confidence level used for confidence intervals; default is \code{level = 95}.}
\item{subset}{optional rule specifying a subset of observations to be used.}
}
\value{
\item{Estimate}{A matrix containing \code{eval} (grid points), \code{h}, \code{b} (bandwidths), \code{N}
(effective sample sizes), \code{tau.us} (point estimates with p-th order kernel function),
\code{tau.bc} (bias corrected point estimates, \code{se.us} (standard error corresponding to \code{tau.us}), and \code{se.rb} (robust standard error).}
\item{opt}{A list containing options passed to the function.}
}
\references{
Calonico, S., M. D. Cattaneo, and M. H. Farrell. 2018. \href{https://nppackages.github.io/references/Calonico-Cattaneo-Farrell_2018_JASA.pdf}{On the Effect of Bias Estimation on Coverage Accuracy in Nonparametric Inference}. Journal of the American Statistical Association, 113(522): 767-779. \doi{doi:10.1080/01621459.2017.1285776}.
Calonico, S., M. D. Cattaneo, and M. H. Farrell. 2019. \href{https://nppackages.github.io/references/Calonico-Cattaneo-Farrell_2019_JSS.pdf}{nprobust: Nonparametric Kernel-Based Estimation and Robust Bias-Corrected Inference}. Journal of Statistical Software, 91(8): 1-33. \doi{http://dx.doi.org/10.18637/jss.v091.i08}.
Calonico, S., M. D. Cattaneo, and M. H. Farrell. 2020. \href{https://nppackages.github.io/references/Calonico-Cattaneo-Farrell_2020_CEopt.pdf}{Coverage Error Optimal Confidence Intervals for Local Polynomial Regression}. Working Paper.
Fan, J., and Gijbels, I. 1996. Local polynomial modelling and its applications, London: Chapman and Hall.
Wand, M., and Jones, M. 1995. Kernel Smoothing, Florida: Chapman & Hall/CRC.
}
\author{
Sebastian Calonico, Columbia University, New York, NY. \email{sebastian.calonico@columbia.edu}.
Matias D. Cattaneo, Princeton University, Princeton, NJ. \email{cattaneo@princeton.edu}.
Max H. Farrell, University of Chicago, Chicago, IL. \email{max.farrell@chicagobooth.edu}.
}
\examples{
x <- rnorm(500)
est <- kdrobust(x)
summary(est)
}
\keyword{ LPR }
\keyword{ Robust Estimation }
\seealso{
\code{\link{kdbwselect}}
} | /man/kdrobust.Rd | no_license | cran/nprobust | R | false | false | 5,979 | rd | \name{kdrobust}
\alias{kdrobust}
\title{Kernel Density Methods with Robust Bias-Corrected Inference}
\description{
\code{\link{kdrobust}} implements kernel density point estimators, with robust bias-corrected confidence intervals and inference procedures developed in Calonico, Cattaneo and Farrell (2018). See also Calonico, Cattaneo and Farrell (2020) for related optimality results.
It also implements other estimation and inference procedures available in the literature. See Wand and Jones (1995) for background references.
Companion commands: \code{\link{kdbwselect}} for kernel density data-driven bandwidth selection, and \code{\link{nprobust.plot}} for plotting results.
A detailed introduction to this command is given in Calonico, Cattaneo and Farrell (2019). For more details, and related Stata and R packages useful for empirical analysis, visit \url{https://nppackages.github.io/}.
}
\usage{
kdrobust(x, eval = NULL, neval = NULL, h = NULL, b = NULL, rho = 1,
kernel = "epa", bwselect = NULL, bwcheck = 21, imsegrid=30, level = 95, subset = NULL)
}
\arguments{
\item{x}{independent variable.}
\item{eval}{vector of evaluation point(s). By default it uses 30 equally spaced points over to support of \code{x}.}
\item{neval}{number of quantile-spaced evaluation points on support of \code{x}. Default is \code{neval=30}.}
\item{h}{main bandwidth used to construct the kernel density point estimator. Can be either scalar (same bandwidth for all evaluation points), or vector of same dimension as \code{eval}. If not specified, bandwidth \code{h} is computed by the companion command \code{\link{kdbwselect}}.}
\item{b}{bias bandwidth used to construct the bias-correction estimator. Can be either scalar (same bandwidth for all evaluation points), or vector of same dimension as \code{eval}. By default it is set equal to \code{h}. If \code{rho} is set to zero, \code{b} is computed by the companion command \code{\link{kdbwselect}}.}
\item{rho}{Sets \code{b=h/rho}. Default is \code{rho = 1}.}
\item{kernel}{kernel function used to construct local polynomial estimators. Options are \code{epa} for the epanechnikov kernel, \code{tri} for the triangular kernel and \code{uni} for the uniform kernel. Default is \code{kernel = epa}.}
\item{bwselect}{bandwidth selection procedure to be used via \code{\link{lpbwselect}}. By default it computes \code{h} and sets \code{b=h/rho} (with \code{rho=1} by default). It computes both \code{h} and \code{b} if \code{rho} is set equal to zero. Options are:
\code{mse-dpi} second-generation DPI implementation of MSE-optimal bandwidth. Default option if only one evaluation point is chosen.
\code{imse-dpi} second-generation DPI implementation of IMSE-optimal bandwidth (computed using a grid of evaluation points). Default option if more than one evaluation point is chosen.
\code{imse-rot} ROT implementation of IMSE-optimal bandwidth (computed using a grid of evaluation points).
\code{ce-dpi} second generation DPI implementation of CE-optimal bandwidth.
\code{ce-rot} ROT implementation of CE-optimal bandwidth.
\code{all} reports all available bandwidth selection procedures.
Note: MSE = Mean Square Error; IMSE = Integrated Mean Squared Error; CE = Coverage Error; DPI = Direct Plug-in; ROT = Rule-of-Thumb. For details on implementation see Calonico, Cattaneo and Farrell (2019).}
\item{bwcheck}{if a positive integer is provided, then the selected bandwidth is enlarged so that at least \code{bwcheck} effective observations are available at each evaluation point. Default is \code{bwcheck = 21}.}
\item{imsegrid}{number of evaluations points used to compute the IMSE bandwidth selector. Default is \code{imsegrid = 30}.}
\item{level}{confidence level used for confidence intervals; default is \code{level = 95}.}
\item{subset}{optional rule specifying a subset of observations to be used.}
}
\value{
\item{Estimate}{A matrix containing \code{eval} (grid points), \code{h}, \code{b} (bandwidths), \code{N}
(effective sample sizes), \code{tau.us} (point estimates with p-th order kernel function),
\code{tau.bc} (bias corrected point estimates, \code{se.us} (standard error corresponding to \code{tau.us}), and \code{se.rb} (robust standard error).}
\item{opt}{A list containing options passed to the function.}
}
\references{
Calonico, S., M. D. Cattaneo, and M. H. Farrell. 2018. \href{https://nppackages.github.io/references/Calonico-Cattaneo-Farrell_2018_JASA.pdf}{On the Effect of Bias Estimation on Coverage Accuracy in Nonparametric Inference}. Journal of the American Statistical Association, 113(522): 767-779. \doi{doi:10.1080/01621459.2017.1285776}.
Calonico, S., M. D. Cattaneo, and M. H. Farrell. 2019. \href{https://nppackages.github.io/references/Calonico-Cattaneo-Farrell_2019_JSS.pdf}{nprobust: Nonparametric Kernel-Based Estimation and Robust Bias-Corrected Inference}. Journal of Statistical Software, 91(8): 1-33. \doi{http://dx.doi.org/10.18637/jss.v091.i08}.
Calonico, S., M. D. Cattaneo, and M. H. Farrell. 2020. \href{https://nppackages.github.io/references/Calonico-Cattaneo-Farrell_2020_CEopt.pdf}{Coverage Error Optimal Confidence Intervals for Local Polynomial Regression}. Working Paper.
Fan, J., and Gijbels, I. 1996. Local polynomial modelling and its applications, London: Chapman and Hall.
Wand, M., and Jones, M. 1995. Kernel Smoothing, Florida: Chapman & Hall/CRC.
}
\author{
Sebastian Calonico, Columbia University, New York, NY. \email{sebastian.calonico@columbia.edu}.
Matias D. Cattaneo, Princeton University, Princeton, NJ. \email{cattaneo@princeton.edu}.
Max H. Farrell, University of Chicago, Chicago, IL. \email{max.farrell@chicagobooth.edu}.
}
\examples{
x <- rnorm(500)
est <- kdrobust(x)
summary(est)
}
\keyword{ LPR }
\keyword{ Robust Estimation }
\seealso{
\code{\link{kdbwselect}}
} |
library(RCurl)
library(dplyr)
library(ape)
library(reticulate)
Sys.setenv(RETICULATE_PYTHON = "/usr/local/bin/python3")
use_python("/usr/local/bin/python3") #define your python version
#py_run_string("
#import re
#import urllib
#")
# Despite you have defined modules in you python script, regrettably
# you will still need to call them via import when using reticulate package:
urllib <- reticulate::import("urllib", convert = F)
#futhermore, if there is a module inside a directory, you also must to define it
# owing to it seems reticulate packages can deal with it directly
urllib$request
re <- reticulate::import("re", convert = F)
source_python("python/worms.py", convert = F)
AuditionBarcodes <- function(species, matches = NULL, include_ncbi=F){ ##function for only using with public data
# a compressed way to show bin's composition is the json format
bin_information_json <- function(bin_input){
lapply(bin_input, function(x){
paste("'",
unique(x$bin),
"':{",
paste(
paste("'",x$species_name,"':", x$n, sep = ""),
collapse = ","),
"}",
sep = "")}) %>%
unlist(.) %>%
paste(., collapse = ", ") %>%
return(.)
}
frames = lapply(species, function(x){
if(include_ncbi){
meta.by.barcodes1 = SpecimenData(taxon = x) %>%
dplyr::select(processid, bin_uri, species_name, institution_storing) %>%
dplyr::mutate_if(is.factor, as.character) %>%
dplyr::filter(
grepl("BOLD", bin_uri),
!grepl("*unvouchered", institution_storing)
)
}else{
meta.by.barcodes1 = SpecimenData(taxon = x) %>%
dplyr::select(processid, bin_uri, species_name, institution_storing) %>%
dplyr::mutate_if(is.factor, as.character) %>%
dplyr::filter(
grepl("BOLD", bin_uri),
!grepl("Mined from GenBank, NCBI", institution_storing),
!grepl("*unvouchered", institution_storing)
)
}
## Total number of records and institutions storing barcodes of the query
## This includes either public or private data.
js0 = getURL(
paste("http://www.boldsystems.org/index.php/API_Tax/TaxonSearch?taxName=",
gsub(" ","%20", x), sep = "")) %>%
gsub('.*\"taxid\":', "", x = .) %>%
gsub(',\"taxon\".*', "", x = .) %>%
paste("http://www.boldsystems.org/index.php/API_Tax/TaxonData?taxId=", . ,
"&dataTypes=all", sep = "") %>% getURL(url = .) %>%
gsub('.*\"depositry\":\\{', "", x = .) %>%
gsub('\\}.*', "", x = .) %>% gsub('\"', "", x = .) %>%
strsplit(x = ., split = ",") %>% .[[1]] %>%
strsplit(x = ., split = "\\:") %>%
lapply(., function(x){
if(include_ncbi){
tmp = x[!grepl("*unvouchered", x[1])]
}else{
tmp = x[!grepl("Mined from GenBank", x[1]) &
!grepl(" NCBI", x[1]) &
!grepl("*unvouchered", x[1])]
}
data.frame(institutions = tmp[1], records = as.numeric(tmp[2]))
}) %>%
do.call("rbind", .) %>%
.[!is.na(.$records),]
if(nrow(meta.by.barcodes1) == 0 && sum(js0$records, na.rm = T) == 0){
if ( is.null( matches ) )
obs = "Barcodes mined from GenBank or unvouchered"
else
obs = paste("There were ", matches, " matches. Barcodes mined from GenBank, NCBI.", sep = "")
data.frame(Grades = "F",
Observations = obs,
BIN_structure = "")
}
else if(nrow(meta.by.barcodes1) <= 3 && sum(js0$records, na.rm = T) != 0){
if ( is.null( matches ) )
obs = paste("Insufficient data. Institution storing: ",
length(js0$institutions),
". Total specimen records: ",
sum(js0$records, na.rm = T),
sep = "")
else
obs = paste("There were ",
matches ,
" matches. Insufficient data. Institution storing: ",
length(js0$institutions),
". Specimen records: ",
sum(js0$records, na.rm = T),
sep = "")
data.frame(Grades = "D",
Observations = obs,
BIN_structure = "")
}
else{
##species and their number of records by bin:
bin = lapply(unique(meta.by.barcodes1$bin_uri), function(x){
#x = "BOLD:ACE4593"
if(include_ncbi){
SpecimenData(bin = x) %>%
dplyr::select(species_name, institution_storing) %>%
dplyr::filter(
grepl("[A-Z][a-z]+ [a-z]+$",species_name), #just considering species level
!grepl("*unvouchered", institution_storing),
!grepl("[A-Z][a-z]+ sp[p|\\.]{0,2}$",species_name) #just considering species level
) %>%
dplyr::group_by(species_name) %>%
dplyr::summarise(institutes = length(unique(institution_storing)),
n = length(species_name))%>%
mutate(bin = x)
}else{
SpecimenData(bin = x) %>%
dplyr::select(species_name, institution_storing) %>%
dplyr::filter(
grepl("[A-Z][a-z]+ [a-z]+$",species_name), #just considering species level
!grepl("Mined from GenBank, NCBI", institution_storing),
!grepl("*unvouchered", institution_storing),
!grepl("[A-Z][a-z]+ sp[p|\\.]{0,2}$",species_name) #just considering species level
) %>%
dplyr::group_by(species_name) %>%
dplyr::summarise(institutes = length(unique(institution_storing)),
n = length(species_name))%>%
mutate(bin = x)
}
})
names(bin) = unique(meta.by.barcodes1$bin_uri)
#table with accepted names per each species
table = sapply(unique(do.call('rbind', bin)$species_name),
function(x){
#it gets currently accepted names
Worms(x)$get_accepted_name() %>%
as.character(.)
})
# upon having accepted names into table, assess possible synonyms within
# elements of the list bin
bin = lapply(bin, function(x){
#it assumes that bold has species names correctly written
#validated_names contains the match between species names of each element
# of the list bin and 'table'. It is ordenated according to position of
# species name on each element of the list.
validated_names = as.character(table[match(x$species_name, names(table))])
data.frame(species_name = validated_names,
x[,2:4]) %>%
dplyr::group_by(species_name, bin) %>%
dplyr::summarise(n = sum(n),
institutes = sum(institutes)) %>%
dplyr::ungroup() %>%
dplyr::mutate_if(is.factor, as.character)
})
# this new assignment of bin is about species number contained on list's nodes.
# since it is ordened by their lenghts, merging status of bin would appear first
bin = sapply(bin, function(x){length(x$species_name)}) %>%
sort(., decreasing = T) %>%
names(.) %>%
bin[.]
if(length(unique(meta.by.barcodes1$bin_uri)) > 1){
if(length(unique(do.call('rbind', bin)$species_name)) > 1){
if ( is.null( matches ) )
obs = "Mixtured BIN"
else
obs = paste("There were ", matches ," matches. Mixtured BIN and it's composed by species such as: ",
paste(unique(do.call('rbind', bin)$species_name), collapse = ", "), sep = "")
data.frame(Grades = "E**",
Observations = obs,
BIN_structure = bin_information_json(bin_input = bin))
}else{
if ( is.null( matches ) )
obs = "Splitted BIN"
else
obs = paste("There were ", matches, " matches. Assessment of intraspecific divergences is still needed.",
sep = "")
data.frame(Grades = "C",
Observations = obs,
BIN_structure = bin_information_json(bin_input = bin))
}
}else{
if(length(unique(bin[[1]]$species_name)) == 1 &&
sum(bin[[1]]$institutes) > 1 ){
if ( is.null( matches ) )
obs = "Matched BIN with external congruence"
else
obs = paste("There were ", matches , " matches. External congruence.", sep = "")
data.frame(Grades = "A",
Observations =obs,
BIN_structure = bin_information_json(bin_input = bin))
}else if(length(unique(bin[[1]]$species_name)) == 1 &&
sum(bin[[1]]$institutes) == 1 ){
if ( is.null( matches ) )
obs = "Matched BIN with internal congruence only"
else
obs = paste("There were ", matches , " matches. Internal congruence.", sep = "")
data.frame(Grades = "B",
Observations = obs,
BIN_structure = bin_information_json(bin_input = bin))
}else{
if ( is.null( matches ) )
obs = "Merged BIN"
else
obs = paste("There were ", matches,
" matches. ", paste(unique(unique.bin$species_name), collapse = ","),
" shared the same BIN.",
sep = "")
data.frame(Grades = "E*",
Observations = obs,
BIN_structure = bin_information_json(bin_input = bin))
}
}
}
})
return(do.call('rbind', frames))
}
| /r/AuditionBarcode.v.2.R | permissive | Ulises-Rosas/BOLD-mineR | R | false | false | 12,135 | r | library(RCurl)
library(dplyr)
library(ape)
library(reticulate)
Sys.setenv(RETICULATE_PYTHON = "/usr/local/bin/python3")
use_python("/usr/local/bin/python3") #define your python version
#py_run_string("
#import re
#import urllib
#")
# Despite you have defined modules in you python script, regrettably
# you will still need to call them via import when using reticulate package:
urllib <- reticulate::import("urllib", convert = F)
#futhermore, if there is a module inside a directory, you also must to define it
# owing to it seems reticulate packages can deal with it directly
urllib$request
re <- reticulate::import("re", convert = F)
source_python("python/worms.py", convert = F)
AuditionBarcodes <- function(species, matches = NULL, include_ncbi=F){ ##function for only using with public data
# a compressed way to show bin's composition is the json format
bin_information_json <- function(bin_input){
lapply(bin_input, function(x){
paste("'",
unique(x$bin),
"':{",
paste(
paste("'",x$species_name,"':", x$n, sep = ""),
collapse = ","),
"}",
sep = "")}) %>%
unlist(.) %>%
paste(., collapse = ", ") %>%
return(.)
}
frames = lapply(species, function(x){
if(include_ncbi){
meta.by.barcodes1 = SpecimenData(taxon = x) %>%
dplyr::select(processid, bin_uri, species_name, institution_storing) %>%
dplyr::mutate_if(is.factor, as.character) %>%
dplyr::filter(
grepl("BOLD", bin_uri),
!grepl("*unvouchered", institution_storing)
)
}else{
meta.by.barcodes1 = SpecimenData(taxon = x) %>%
dplyr::select(processid, bin_uri, species_name, institution_storing) %>%
dplyr::mutate_if(is.factor, as.character) %>%
dplyr::filter(
grepl("BOLD", bin_uri),
!grepl("Mined from GenBank, NCBI", institution_storing),
!grepl("*unvouchered", institution_storing)
)
}
## Total number of records and institutions storing barcodes of the query
## This includes either public or private data.
js0 = getURL(
paste("http://www.boldsystems.org/index.php/API_Tax/TaxonSearch?taxName=",
gsub(" ","%20", x), sep = "")) %>%
gsub('.*\"taxid\":', "", x = .) %>%
gsub(',\"taxon\".*', "", x = .) %>%
paste("http://www.boldsystems.org/index.php/API_Tax/TaxonData?taxId=", . ,
"&dataTypes=all", sep = "") %>% getURL(url = .) %>%
gsub('.*\"depositry\":\\{', "", x = .) %>%
gsub('\\}.*', "", x = .) %>% gsub('\"', "", x = .) %>%
strsplit(x = ., split = ",") %>% .[[1]] %>%
strsplit(x = ., split = "\\:") %>%
lapply(., function(x){
if(include_ncbi){
tmp = x[!grepl("*unvouchered", x[1])]
}else{
tmp = x[!grepl("Mined from GenBank", x[1]) &
!grepl(" NCBI", x[1]) &
!grepl("*unvouchered", x[1])]
}
data.frame(institutions = tmp[1], records = as.numeric(tmp[2]))
}) %>%
do.call("rbind", .) %>%
.[!is.na(.$records),]
if(nrow(meta.by.barcodes1) == 0 && sum(js0$records, na.rm = T) == 0){
if ( is.null( matches ) )
obs = "Barcodes mined from GenBank or unvouchered"
else
obs = paste("There were ", matches, " matches. Barcodes mined from GenBank, NCBI.", sep = "")
data.frame(Grades = "F",
Observations = obs,
BIN_structure = "")
}
else if(nrow(meta.by.barcodes1) <= 3 && sum(js0$records, na.rm = T) != 0){
if ( is.null( matches ) )
obs = paste("Insufficient data. Institution storing: ",
length(js0$institutions),
". Total specimen records: ",
sum(js0$records, na.rm = T),
sep = "")
else
obs = paste("There were ",
matches ,
" matches. Insufficient data. Institution storing: ",
length(js0$institutions),
". Specimen records: ",
sum(js0$records, na.rm = T),
sep = "")
data.frame(Grades = "D",
Observations = obs,
BIN_structure = "")
}
else{
##species and their number of records by bin:
bin = lapply(unique(meta.by.barcodes1$bin_uri), function(x){
#x = "BOLD:ACE4593"
if(include_ncbi){
SpecimenData(bin = x) %>%
dplyr::select(species_name, institution_storing) %>%
dplyr::filter(
grepl("[A-Z][a-z]+ [a-z]+$",species_name), #just considering species level
!grepl("*unvouchered", institution_storing),
!grepl("[A-Z][a-z]+ sp[p|\\.]{0,2}$",species_name) #just considering species level
) %>%
dplyr::group_by(species_name) %>%
dplyr::summarise(institutes = length(unique(institution_storing)),
n = length(species_name))%>%
mutate(bin = x)
}else{
SpecimenData(bin = x) %>%
dplyr::select(species_name, institution_storing) %>%
dplyr::filter(
grepl("[A-Z][a-z]+ [a-z]+$",species_name), #just considering species level
!grepl("Mined from GenBank, NCBI", institution_storing),
!grepl("*unvouchered", institution_storing),
!grepl("[A-Z][a-z]+ sp[p|\\.]{0,2}$",species_name) #just considering species level
) %>%
dplyr::group_by(species_name) %>%
dplyr::summarise(institutes = length(unique(institution_storing)),
n = length(species_name))%>%
mutate(bin = x)
}
})
names(bin) = unique(meta.by.barcodes1$bin_uri)
#table with accepted names per each species
table = sapply(unique(do.call('rbind', bin)$species_name),
function(x){
#it gets currently accepted names
Worms(x)$get_accepted_name() %>%
as.character(.)
})
# upon having accepted names into table, assess possible synonyms within
# elements of the list bin
bin = lapply(bin, function(x){
#it assumes that bold has species names correctly written
#validated_names contains the match between species names of each element
# of the list bin and 'table'. It is ordenated according to position of
# species name on each element of the list.
validated_names = as.character(table[match(x$species_name, names(table))])
data.frame(species_name = validated_names,
x[,2:4]) %>%
dplyr::group_by(species_name, bin) %>%
dplyr::summarise(n = sum(n),
institutes = sum(institutes)) %>%
dplyr::ungroup() %>%
dplyr::mutate_if(is.factor, as.character)
})
# this new assignment of bin is about species number contained on list's nodes.
# since it is ordened by their lenghts, merging status of bin would appear first
bin = sapply(bin, function(x){length(x$species_name)}) %>%
sort(., decreasing = T) %>%
names(.) %>%
bin[.]
if(length(unique(meta.by.barcodes1$bin_uri)) > 1){
if(length(unique(do.call('rbind', bin)$species_name)) > 1){
if ( is.null( matches ) )
obs = "Mixtured BIN"
else
obs = paste("There were ", matches ," matches. Mixtured BIN and it's composed by species such as: ",
paste(unique(do.call('rbind', bin)$species_name), collapse = ", "), sep = "")
data.frame(Grades = "E**",
Observations = obs,
BIN_structure = bin_information_json(bin_input = bin))
}else{
if ( is.null( matches ) )
obs = "Splitted BIN"
else
obs = paste("There were ", matches, " matches. Assessment of intraspecific divergences is still needed.",
sep = "")
data.frame(Grades = "C",
Observations = obs,
BIN_structure = bin_information_json(bin_input = bin))
}
}else{
if(length(unique(bin[[1]]$species_name)) == 1 &&
sum(bin[[1]]$institutes) > 1 ){
if ( is.null( matches ) )
obs = "Matched BIN with external congruence"
else
obs = paste("There were ", matches , " matches. External congruence.", sep = "")
data.frame(Grades = "A",
Observations =obs,
BIN_structure = bin_information_json(bin_input = bin))
}else if(length(unique(bin[[1]]$species_name)) == 1 &&
sum(bin[[1]]$institutes) == 1 ){
if ( is.null( matches ) )
obs = "Matched BIN with internal congruence only"
else
obs = paste("There were ", matches , " matches. Internal congruence.", sep = "")
data.frame(Grades = "B",
Observations = obs,
BIN_structure = bin_information_json(bin_input = bin))
}else{
if ( is.null( matches ) )
obs = "Merged BIN"
else
obs = paste("There were ", matches,
" matches. ", paste(unique(unique.bin$species_name), collapse = ","),
" shared the same BIN.",
sep = "")
data.frame(Grades = "E*",
Observations = obs,
BIN_structure = bin_information_json(bin_input = bin))
}
}
}
})
return(do.call('rbind', frames))
}
|
#comments drafted by a '#' symbol
#to run the command-line > ctrl + enter
1+1
2^8
log10(1000) #function: function_name(argument1, argument2, ...)
pi
cos(60) # in radians!
cos(60*pi/180) #changing to degrees
#assignment with = or <-
x = 3
y = 2.4916
#printing
x
x+y
round(y, 2)
round(x+y)
name = "University of Agrculture"
name
#Basic data types:
class(name) #character
class(x) #numeric
z = 3L
class(z) #integer
#Vectors - collection of elements
v1 = c(4, 8, 10)
v2 = c(1:3)
v3 = vector() #empty vector
v4 = vector("numeric")
v5 = c("cat", "dog", "parrot")
#TRUE or FALSE - logical
v1 == 1
v1 == 8
v5 == "cat"
1 == 1
1 == 2
TRUE == TRUE
v1 + v2
class(v5)
#Matrices
m1 = matrix(1:9, nrow = 3, ncol = 3)
m1
m2 = matrix(c("cat","dog", "parrot", "cow", "elephant", "bee", "owl", "giraffe", "mouse"), nrow = 3, ncol = 3)
m2
class(m1)
class(m2)
typeof(m1)
typeof(m2)
#often in R, we will read some objects, such as databases, they are ususally in .csv format
tab = read.csv("D:/11_Git/zajeciaR/DL_dane_cw1.csv", sep = ";", dec = ",") #excel "provides" csv data seperated with semicolon not commas,
#so in read.csv I have to specify that, plus - in polish language the character used for decimal points is comma while in R it's a dot
#so I also have to specify that dec = ","
#But it of course depends on the format of data that you use!
#the same as above is when using function read.csv2:
tab = read.csv2("D:/09_Dydaktyka/kurs_R/DL_dane_cw1.csv")
#what class object is tab?
class(tab) #a data frame
#let's check what's inside
str(tab)
summary(tab)
#subsetting a dataframe
tab$Slope
tab[,4]
tab[7,19]
tab[7,]
tab[,c(3:4, 9:13)]
unique(tab$District)
#simple scatterplots using plot()
plot(tab$Age, tab$HL)
plot(tab$Elevation, tab$SI)
plot(tab$HL, tab$Dg)
#other plot types
scatter.smooth(tab$HL, tab$Dg)
boxplot(tab$HL)
plot(density(tab$HL))
hist(tab$HL)
#missing values - NA (Not Available)
mean(tab$Age)
min(tab$TPI200)
is.na(tab$TPI200)
min(tab$TPI200, na.rm = TRUE)
sd(tab$TPI200, na.rm = TRUE)
mean(tab$TPI200, na.rm = TRUE)
#subsetting dataframe and assigning it to a new object
tab2 = tab[,c(3:4, 9:13, 18)]
pairs(tab2)
#correlation coeeficients and matrices
?cor #to check help for some function use ?
cor(tab2)
cor(tab2, use = "complete.obs")
cor(tab2, method = "spearman")
#packages installaltion and loading - install only once, loading in every new project
#install.packages("corrplot")
library(corrplot)
?corrplot
m.cor = cor(tab2, use = "complete.obs")
corrplot(m.cor, method = "color", type = "upper")
corrplot.mixed(m.cor, lower.col = "black", upper = "circle")
corrplot(m.cor, type = "upper", method = "color")
#linear regression - lm()
?lm
reg_lin = lm(HL ~ Age, tab)
reg_lin
summary(reg_lin)
plot(reg_lin)
scatter.smooth(tab$Age, tab$HL)
#predicting "new" values based on regression model
pred_HL = predict(reg_lin, tab)
pred_HL
plot(tab$HL, pred_HL)
#multiple linear regression
reg_mul = lm(HL ~ Age + Elevation, tab)
summary(reg_mul)
reg_mul2 = lm(SI ~ Age + HL, tab)
summary(reg_mul2)
scatter.smooth(tab$SI, tab$HL)
#polynomial regression - SI as a function of elevation
scatter.smooth(tab$Elevation, tab$SI)
reg_poly = lm(tab$SI ~ poly(tab$Elevation,2))
summary(reg_poly)
#comparison with linear regression
reg_lin = lm(tab$SI ~ tab$Elevation)
summary(reg_lin)
#ithere are many other methods for regressions such as GAM or machine learning techniques...
#GGPLOT2 package - for visualization
library(ggplot2)
ggplot(tab, aes(Elevation, SI))
ggplot(tab, aes(Elevation, SI))+
geom_point()
#the same as above:
ggplot(tab)+
geom_point(aes(Elevation, SI))
ggplot(tab, aes(Elevation, SI))+
geom_point(color = "steelblue", size = 5, alpha = 0.6)+
geom_smooth(se = 0, color = "black", size = 1.2)+
xlim(500,1300)+
ylim(15, 40)+
labs(title = "Elevations vs Site Index", x = "Site Index", y = "Elevation")
p = ggplot(tab, aes(Elevation, SI, color = Aspect, size = Age)) + #colors and sizes related to other variables!
geom_point(alpha = 0.6)+
#geom_hline(yintercept = 40, size = 1.2, alpha = 0.6)+
#geom_smooth(size =2, se = 0)+
xlim(500, 1400)+
theme_bw()
p + geom_hline(yintercept = 40, size = 1.2, alpha = 0.6)
#ggplot - adding regression line - different ways
reg1 = lm(tab$SI ~ tab$Elevation)
coefficients(reg1)
ggplot(tab, aes(Elevation, SI))+
geom_point( color = "black")+
geom_abline(intercept = 51.650001, slope = -0.02083231, color = "blue")
#geom_point(aes(Elevation, predict(reg1, tab)), color = "orange", size = 1.4)
#stat_smooth(method = "lm", formula = y ~ poly(x,2), color = "red", se= 0)+
#stat_smooth(method = "lm", formula = y ~ x, color = "darkgreen", se = 0)
#other types of plots in ggplot2:
ggplot(tab, aes(x = HL))+
geom_histogram()
ggplot(tab, aes(x = Age, fill = District))+
geom_density(alpha= 0.5)
#geom_vline(aes(xintercept = mean(Age)), linetype = "dashed", size = 1)+
#theme(legend.position = "bottom")
ggplot(tab, aes(x = Age))+
geom_density(alpha= 0.5)+
geom_vline(aes(xintercept = mean(Age)), linetype = "dashed", size = 1)+
theme(legend.position = "bottom")+
facet_grid(.~District)
ggplot(tab, aes(y = HL, x = geology, color = geology))+
geom_boxplot()+
geom_jitter()
##-----------------------------------------------------------------------------------------------------------------------------------------------------------------
# Homework:
#1 - create two regression models - simple linear and polynomial, which explain the relationship between HL (response variable) and Dg (predictor variable)
#2 - using ggplot create plot with real observations as points
#3 - add two regression lines (in different colors)
#4 - set size of points according to the age and set alpha to 0.4
#5 - describe x lab as "diameter", y lab as "Height" and the title as "Linear vs polynomial regression"
| /script1.R | no_license | egrabska/zajeciaR | R | false | false | 5,922 | r | #comments drafted by a '#' symbol
#to run the command-line > ctrl + enter
1+1
2^8
log10(1000) #function: function_name(argument1, argument2, ...)
pi
cos(60) # in radians!
cos(60*pi/180) #changing to degrees
#assignment with = or <-
x = 3
y = 2.4916
#printing
x
x+y
round(y, 2)
round(x+y)
name = "University of Agrculture"
name
#Basic data types:
class(name) #character
class(x) #numeric
z = 3L
class(z) #integer
#Vectors - collection of elements
v1 = c(4, 8, 10)
v2 = c(1:3)
v3 = vector() #empty vector
v4 = vector("numeric")
v5 = c("cat", "dog", "parrot")
#TRUE or FALSE - logical
v1 == 1
v1 == 8
v5 == "cat"
1 == 1
1 == 2
TRUE == TRUE
v1 + v2
class(v5)
#Matrices
m1 = matrix(1:9, nrow = 3, ncol = 3)
m1
m2 = matrix(c("cat","dog", "parrot", "cow", "elephant", "bee", "owl", "giraffe", "mouse"), nrow = 3, ncol = 3)
m2
class(m1)
class(m2)
typeof(m1)
typeof(m2)
#often in R, we will read some objects, such as databases, they are ususally in .csv format
tab = read.csv("D:/11_Git/zajeciaR/DL_dane_cw1.csv", sep = ";", dec = ",") #excel "provides" csv data seperated with semicolon not commas,
#so in read.csv I have to specify that, plus - in polish language the character used for decimal points is comma while in R it's a dot
#so I also have to specify that dec = ","
#But it of course depends on the format of data that you use!
#the same as above is when using function read.csv2:
tab = read.csv2("D:/09_Dydaktyka/kurs_R/DL_dane_cw1.csv")
#what class object is tab?
class(tab) #a data frame
#let's check what's inside
str(tab)
summary(tab)
#subsetting a dataframe
tab$Slope
tab[,4]
tab[7,19]
tab[7,]
tab[,c(3:4, 9:13)]
unique(tab$District)
#simple scatterplots using plot()
plot(tab$Age, tab$HL)
plot(tab$Elevation, tab$SI)
plot(tab$HL, tab$Dg)
#other plot types
scatter.smooth(tab$HL, tab$Dg)
boxplot(tab$HL)
plot(density(tab$HL))
hist(tab$HL)
#missing values - NA (Not Available)
mean(tab$Age)
min(tab$TPI200)
is.na(tab$TPI200)
min(tab$TPI200, na.rm = TRUE)
sd(tab$TPI200, na.rm = TRUE)
mean(tab$TPI200, na.rm = TRUE)
#subsetting dataframe and assigning it to a new object
tab2 = tab[,c(3:4, 9:13, 18)]
pairs(tab2)
#correlation coeeficients and matrices
?cor #to check help for some function use ?
cor(tab2)
cor(tab2, use = "complete.obs")
cor(tab2, method = "spearman")
#packages installaltion and loading - install only once, loading in every new project
#install.packages("corrplot")
library(corrplot)
?corrplot
m.cor = cor(tab2, use = "complete.obs")
corrplot(m.cor, method = "color", type = "upper")
corrplot.mixed(m.cor, lower.col = "black", upper = "circle")
corrplot(m.cor, type = "upper", method = "color")
#linear regression - lm()
?lm
reg_lin = lm(HL ~ Age, tab)
reg_lin
summary(reg_lin)
plot(reg_lin)
scatter.smooth(tab$Age, tab$HL)
#predicting "new" values based on regression model
pred_HL = predict(reg_lin, tab)
pred_HL
plot(tab$HL, pred_HL)
#multiple linear regression
reg_mul = lm(HL ~ Age + Elevation, tab)
summary(reg_mul)
reg_mul2 = lm(SI ~ Age + HL, tab)
summary(reg_mul2)
scatter.smooth(tab$SI, tab$HL)
#polynomial regression - SI as a function of elevation
scatter.smooth(tab$Elevation, tab$SI)
reg_poly = lm(tab$SI ~ poly(tab$Elevation,2))
summary(reg_poly)
#comparison with linear regression
reg_lin = lm(tab$SI ~ tab$Elevation)
summary(reg_lin)
#ithere are many other methods for regressions such as GAM or machine learning techniques...
#GGPLOT2 package - for visualization
library(ggplot2)
ggplot(tab, aes(Elevation, SI))
ggplot(tab, aes(Elevation, SI))+
geom_point()
#the same as above:
ggplot(tab)+
geom_point(aes(Elevation, SI))
ggplot(tab, aes(Elevation, SI))+
geom_point(color = "steelblue", size = 5, alpha = 0.6)+
geom_smooth(se = 0, color = "black", size = 1.2)+
xlim(500,1300)+
ylim(15, 40)+
labs(title = "Elevations vs Site Index", x = "Site Index", y = "Elevation")
p = ggplot(tab, aes(Elevation, SI, color = Aspect, size = Age)) + #colors and sizes related to other variables!
geom_point(alpha = 0.6)+
#geom_hline(yintercept = 40, size = 1.2, alpha = 0.6)+
#geom_smooth(size =2, se = 0)+
xlim(500, 1400)+
theme_bw()
p + geom_hline(yintercept = 40, size = 1.2, alpha = 0.6)
#ggplot - adding regression line - different ways
reg1 = lm(tab$SI ~ tab$Elevation)
coefficients(reg1)
ggplot(tab, aes(Elevation, SI))+
geom_point( color = "black")+
geom_abline(intercept = 51.650001, slope = -0.02083231, color = "blue")
#geom_point(aes(Elevation, predict(reg1, tab)), color = "orange", size = 1.4)
#stat_smooth(method = "lm", formula = y ~ poly(x,2), color = "red", se= 0)+
#stat_smooth(method = "lm", formula = y ~ x, color = "darkgreen", se = 0)
#other types of plots in ggplot2:
ggplot(tab, aes(x = HL))+
geom_histogram()
ggplot(tab, aes(x = Age, fill = District))+
geom_density(alpha= 0.5)
#geom_vline(aes(xintercept = mean(Age)), linetype = "dashed", size = 1)+
#theme(legend.position = "bottom")
ggplot(tab, aes(x = Age))+
geom_density(alpha= 0.5)+
geom_vline(aes(xintercept = mean(Age)), linetype = "dashed", size = 1)+
theme(legend.position = "bottom")+
facet_grid(.~District)
ggplot(tab, aes(y = HL, x = geology, color = geology))+
geom_boxplot()+
geom_jitter()
##-----------------------------------------------------------------------------------------------------------------------------------------------------------------
# Homework:
#1 - create two regression models - simple linear and polynomial, which explain the relationship between HL (response variable) and Dg (predictor variable)
#2 - using ggplot create plot with real observations as points
#3 - add two regression lines (in different colors)
#4 - set size of points according to the age and set alpha to 0.4
#5 - describe x lab as "diameter", y lab as "Height" and the title as "Linear vs polynomial regression"
|
# for Plot1, https://www.coursera.org/learn/exploratory-data-analysis/peer/ylVFo/course-project-1
# checking whether directory exists otherwise create the directory
if (!file.exists("Data")) { dir.create("Data")}
# checking whether the file has already been downloaded otherwise download the same
if (!file.exists("Data/household_power_consumption.txt")) {
fileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
zfile <- "./Data/exdata_data_household_power_consumption.zip"
message("***** downloading file, this can take up to a few minutes *****")
download.file(fileURL, destfile=zfile, method="curl")
unzip(zfile, exdir="./Data")}
# loading entire data
message("***** reading the full file - this can take a while *****")
data <- read.table(file="./Data/household_power_consumption.txt", header = TRUE, sep=";", na.strings = "?")
# subsetting to 01 and 02 February 2007 data
data <- subset(data, as.character(Date) %in% c("1/2/2007", "2/2/2007"))
data$Global_active_power <- as.numeric(data$Global_active_power)
# ploting data
png(filename = "plot1.png",
width = 480, height = 480)
par( mar = c(5, 6.5, 4, 2))
hist(data$Global_active_power,col="red" ,
xlab="Global Active Power (kilowatts)",
ylab= "Frequency",
main="Global Active Power")
dev.off()
| /plot1.R | no_license | bkiesewe/ExData_Plotting1 | R | false | false | 1,348 | r | # for Plot1, https://www.coursera.org/learn/exploratory-data-analysis/peer/ylVFo/course-project-1
# checking whether directory exists otherwise create the directory
if (!file.exists("Data")) { dir.create("Data")}
# checking whether the file has already been downloaded otherwise download the same
if (!file.exists("Data/household_power_consumption.txt")) {
fileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
zfile <- "./Data/exdata_data_household_power_consumption.zip"
message("***** downloading file, this can take up to a few minutes *****")
download.file(fileURL, destfile=zfile, method="curl")
unzip(zfile, exdir="./Data")}
# loading entire data
message("***** reading the full file - this can take a while *****")
data <- read.table(file="./Data/household_power_consumption.txt", header = TRUE, sep=";", na.strings = "?")
# subsetting to 01 and 02 February 2007 data
data <- subset(data, as.character(Date) %in% c("1/2/2007", "2/2/2007"))
data$Global_active_power <- as.numeric(data$Global_active_power)
# ploting data
png(filename = "plot1.png",
width = 480, height = 480)
par( mar = c(5, 6.5, 4, 2))
hist(data$Global_active_power,col="red" ,
xlab="Global Active Power (kilowatts)",
ylab= "Frequency",
main="Global Active Power")
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_frame_functions.R
\name{as_num_matrix}
\alias{as_num_matrix}
\title{Convert a data.frame to a numeric matrix, including factors.}
\usage{
as_num_matrix(df, skip_chr = T)
}
\arguments{
\item{df}{(data.frame) A data.frame with variables.}
\item{skip_chr}{(lgl scalar) Whether to skip character columns (default). If false, they are converted to non-ordered factors.}
}
\description{
Returns a numeric matrix. Ordered factors are converted to numbers, while non-ordered factors are split into dummy variables using the first level as the the reference.
}
\details{
Factors with only two levels are kept as they are.
}
\examples{
head(as_num_matrix(iris)) #Convert iris to purely numerics. Two variables are created because the original had 3 levels.
}
| /man/as_num_matrix.Rd | permissive | sbibauw/kirkegaard | R | false | true | 833 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_frame_functions.R
\name{as_num_matrix}
\alias{as_num_matrix}
\title{Convert a data.frame to a numeric matrix, including factors.}
\usage{
as_num_matrix(df, skip_chr = T)
}
\arguments{
\item{df}{(data.frame) A data.frame with variables.}
\item{skip_chr}{(lgl scalar) Whether to skip character columns (default). If false, they are converted to non-ordered factors.}
}
\description{
Returns a numeric matrix. Ordered factors are converted to numbers, while non-ordered factors are split into dummy variables using the first level as the the reference.
}
\details{
Factors with only two levels are kept as they are.
}
\examples{
head(as_num_matrix(iris)) #Convert iris to purely numerics. Two variables are created because the original had 3 levels.
}
|
obsFileName <- function(directory, obs) {
filename <- character(length(obs))
for(i in 1:length(obs))
{
if (obs[i]<10) {
filename[i] = paste(directory, "/","00", obs[i], ".csv", sep="")
} else if (obs[i] >= 10 && obs[i] < 100) {
filename[i] = paste(directory, "/", "0", obs[i], ".csv", sep="")
} else {
filename[i] = paste(directory, "/", obs[i], ".csv", sep="")
}
}
return(filename)
}
| /ObsPath.R | no_license | kumarpawan0522/AirPollution | R | false | false | 449 | r |
obsFileName <- function(directory, obs) {
filename <- character(length(obs))
for(i in 1:length(obs))
{
if (obs[i]<10) {
filename[i] = paste(directory, "/","00", obs[i], ".csv", sep="")
} else if (obs[i] >= 10 && obs[i] < 100) {
filename[i] = paste(directory, "/", "0", obs[i], ".csv", sep="")
} else {
filename[i] = paste(directory, "/", obs[i], ".csv", sep="")
}
}
return(filename)
}
|
#' Evaluate an expression in an environment.
#'
#' \code{expr_eval()} is a lightweight version of the base function
#' \code{\link[base]{eval}()}. It does not accept supplementary data,
#' but it is more efficient and does not clutter the evaluation stack.
#' Technically, \code{expr_eval()} is a simple wrapper around the C
#' function \code{Rf_eval()}.
#'
#' \code{base::eval()} inserts two call frames in the stack, the
#' second of which features the \code{envir} parameter as frame
#' environment. This may unnecessarily clutter the evaluation stack
#' and it can change evaluation semantics with stack sensitive
#' functions in the case where \code{env} is an evaluation environment
#' of a stack frame (see \code{\link{eval_stack}()}). Since the base
#' function \code{eval()} creates a new evaluation context with
#' \code{env} as frame environment there are actually two contexts
#' with the same evaluation environment on the stack when \code{expr}
#' is evaluated. Thus, any command that looks up frames on the stack
#' (stack sensitive functions) may find the parasite frame set up by
#' \code{eval()} rather than the original frame targetted by
#' \code{env}. As a result, code evaluated with \code{base::eval()}
#' does not have the property of stack consistency, and stack
#' sensitive functions like \code{\link[base]{return}()},
#' \code{\link[base]{parent.frame}()} may return misleading results.
#'
#' @param expr An expression to evaluate.
#' @param env The environment in which to evaluate the expression.
#' @useDynLib rlang rlang_eval
#' @seealso with_env
#' @export
#' @examples
#' # expr_eval() works just like base::eval():
#' env <- new_env(data = list(foo = "bar"))
#' expr <- quote(foo)
#' expr_eval(expr, env)
#'
#' # To explore the consequences of stack inconsistent semantics, let's
#' # create a function that evaluates `parent.frame()` deep in the call
#' # stack, in an environment corresponding to a frame in the middle of
#' # the stack. For consistency we R's lazy evaluation semantics, we'd
#' # expect to get the caller of that frame as result:
#' fn <- function(eval_fn) {
#' list(
#' returned_env = middle(eval_fn),
#' actual_env = env()
#' )
#' }
#' middle <- function(eval_fn) {
#' deep(eval_fn, env())
#' }
#' deep <- function(eval_fn, eval_env) {
#' expr <- quote(parent.frame())
#' eval_fn(expr, eval_env)
#' }
#'
#' # With expr_eval(), we do get the expected environment:
#' fn(rlang::expr_eval)
#'
#' # But that's not the case with base::eval():
#' fn(base::eval)
#'
#' # Another difference of expr_eval() compared to base::eval() is
#' # that it does not insert parasite frames in the evaluation stack:
#' get_stack <- quote(identity(eval_stack()))
#' expr_eval(get_stack)
#' eval(get_stack)
expr_eval <- function(expr, env = parent.frame()) {
.Call(rlang_eval, expr, env)
}
#' Turn an expression to a label.
#'
#' \code{expr_text()} turns the expression into a single string;
#' \code{expr_label()} formats it nicely for use in messages.
#'
#' @param expr An expression to labellise.
#' @export
#' @examples
#' # To labellise a function argument, first capture it with
#' # substitute():
#' fn <- function(x) expr_label(substitute(x))
#' fn(x:y)
#'
#' # Strings are encoded
#' expr_label("a\nb")
#'
#' # Names and expressions are quoted with ``
#' expr_label(quote(x))
#' expr_label(quote(a + b + c))
#'
#' # Long expressions are collapsed
#' expr_label(quote(foo({
#' 1 + 2
#' print(x)
#' })))
expr_label <- function(expr) {
if (is.character(expr)) {
encodeString(expr, quote = '"')
} else if (is.atomic(expr)) {
format(expr)
} else if (is.name(expr)) {
paste0("`", as.character(expr), "`")
} else {
chr <- deparse(expr)
if (length(chr) > 1) {
dot_call <- lang(expr[[1]], quote(...))
chr <- paste(deparse(dot_call), collapse = "\n")
}
paste0("`", chr, "`")
}
}
#' @export
#' @rdname expr_label
#' @param width Width of each line.
#' @param nlines Maximum number of lines to extract.
expr_text <- function(expr, width = 60L, nlines = Inf) {
str <- deparse(expr, width.cutoff = width)
if (length(str) > nlines) {
str <- c(str[seq_len(nlines - 1)], "...")
}
paste0(str, collapse = "\n")
}
#' Set and get an expression.
#'
#' These helpers are useful to make your function work generically
#' with tidy quotes and raw expressions. First call `get_expr()` to
#' extract an expression. Once you're done processing the expression,
#' call `set_expr()` on the original object to update the expression.
#' You can return the result of `set_expr()`, either a formula or an
#' expression depending on the input type. Note that `set_expr()` does
#' not change its input, it creates a new object.
#'
#' `as_generic_expr()` is helpful when your function accepts frames as
#' input but should be able to call `set_expr()` at the
#' end. `set_expr()` does not work on frames because it does not make
#' sense to modify this kind of object. In this case, first call
#' `as_generic_expr()` to transform the input to an object that
#' supports `set_expr()`. It transforms frame objects to a raw
#' expression, and return formula quotes and raw expressions without
#' changes.
#'
#' @param x An expression or one-sided formula. In addition,
#' `set_expr()` and `as_generic_expr()` accept frames.
#' @param value An updated expression.
#' @return The updated original input for `set_expr()`. A raw
#' expression for `get_expr()`. `as_generic_expr()` returns an
#' expression or formula quote.
#' @export
#' @examples
#' f <- ~foo(bar)
#' e <- quote(foo(bar))
#' frame <- identity(identity(eval_frame()))
#'
#' get_expr(f)
#' get_expr(e)
#' get_expr(frame)
#'
#' as_generic_expr(f)
#' as_generic_expr(e)
#' as_generic_expr(frame)
#'
#' set_expr(f, quote(baz))
#' set_expr(e, quote(baz))
#' @md
set_expr <- function(x, value) {
if (is_fquote(x)) {
f_rhs(x) <- value
x
} else {
value
}
}
#' @rdname set_expr
#' @export
get_expr <- function(x) {
if (is_fquote(x)) {
f_rhs(x)
} else if (inherits(x, "frame")) {
x$expr
} else {
x
}
}
#' @rdname set_expr
#' @export
as_generic_expr <- function(x) {
if (is_frame(x)) {
x$expr
} else {
x
}
}
# More permissive than is_tidy_quote()
is_fquote <- function(x) {
typeof(x) == "language" &&
identical(node_car(x), quote(`~`)) &&
length(x) == 2L
}
| /R/lang-expr.R | no_license | jmpasmoi/rlang | R | false | false | 6,374 | r | #' Evaluate an expression in an environment.
#'
#' \code{expr_eval()} is a lightweight version of the base function
#' \code{\link[base]{eval}()}. It does not accept supplementary data,
#' but it is more efficient and does not clutter the evaluation stack.
#' Technically, \code{expr_eval()} is a simple wrapper around the C
#' function \code{Rf_eval()}.
#'
#' \code{base::eval()} inserts two call frames in the stack, the
#' second of which features the \code{envir} parameter as frame
#' environment. This may unnecessarily clutter the evaluation stack
#' and it can change evaluation semantics with stack sensitive
#' functions in the case where \code{env} is an evaluation environment
#' of a stack frame (see \code{\link{eval_stack}()}). Since the base
#' function \code{eval()} creates a new evaluation context with
#' \code{env} as frame environment there are actually two contexts
#' with the same evaluation environment on the stack when \code{expr}
#' is evaluated. Thus, any command that looks up frames on the stack
#' (stack sensitive functions) may find the parasite frame set up by
#' \code{eval()} rather than the original frame targetted by
#' \code{env}. As a result, code evaluated with \code{base::eval()}
#' does not have the property of stack consistency, and stack
#' sensitive functions like \code{\link[base]{return}()},
#' \code{\link[base]{parent.frame}()} may return misleading results.
#'
#' @param expr An expression to evaluate.
#' @param env The environment in which to evaluate the expression.
#' @useDynLib rlang rlang_eval
#' @seealso with_env
#' @export
#' @examples
#' # expr_eval() works just like base::eval():
#' env <- new_env(data = list(foo = "bar"))
#' expr <- quote(foo)
#' expr_eval(expr, env)
#'
#' # To explore the consequences of stack inconsistent semantics, let's
#' # create a function that evaluates `parent.frame()` deep in the call
#' # stack, in an environment corresponding to a frame in the middle of
#' # the stack. For consistency we R's lazy evaluation semantics, we'd
#' # expect to get the caller of that frame as result:
#' fn <- function(eval_fn) {
#' list(
#' returned_env = middle(eval_fn),
#' actual_env = env()
#' )
#' }
#' middle <- function(eval_fn) {
#' deep(eval_fn, env())
#' }
#' deep <- function(eval_fn, eval_env) {
#' expr <- quote(parent.frame())
#' eval_fn(expr, eval_env)
#' }
#'
#' # With expr_eval(), we do get the expected environment:
#' fn(rlang::expr_eval)
#'
#' # But that's not the case with base::eval():
#' fn(base::eval)
#'
#' # Another difference of expr_eval() compared to base::eval() is
#' # that it does not insert parasite frames in the evaluation stack:
#' get_stack <- quote(identity(eval_stack()))
#' expr_eval(get_stack)
#' eval(get_stack)
expr_eval <- function(expr, env = parent.frame()) {
.Call(rlang_eval, expr, env)
}
#' Turn an expression to a label.
#'
#' \code{expr_text()} turns the expression into a single string;
#' \code{expr_label()} formats it nicely for use in messages.
#'
#' @param expr An expression to labellise.
#' @export
#' @examples
#' # To labellise a function argument, first capture it with
#' # substitute():
#' fn <- function(x) expr_label(substitute(x))
#' fn(x:y)
#'
#' # Strings are encoded
#' expr_label("a\nb")
#'
#' # Names and expressions are quoted with ``
#' expr_label(quote(x))
#' expr_label(quote(a + b + c))
#'
#' # Long expressions are collapsed
#' expr_label(quote(foo({
#' 1 + 2
#' print(x)
#' })))
expr_label <- function(expr) {
if (is.character(expr)) {
encodeString(expr, quote = '"')
} else if (is.atomic(expr)) {
format(expr)
} else if (is.name(expr)) {
paste0("`", as.character(expr), "`")
} else {
chr <- deparse(expr)
if (length(chr) > 1) {
dot_call <- lang(expr[[1]], quote(...))
chr <- paste(deparse(dot_call), collapse = "\n")
}
paste0("`", chr, "`")
}
}
#' @export
#' @rdname expr_label
#' @param width Width of each line.
#' @param nlines Maximum number of lines to extract.
expr_text <- function(expr, width = 60L, nlines = Inf) {
str <- deparse(expr, width.cutoff = width)
if (length(str) > nlines) {
str <- c(str[seq_len(nlines - 1)], "...")
}
paste0(str, collapse = "\n")
}
#' Set and get an expression.
#'
#' These helpers are useful to make your function work generically
#' with tidy quotes and raw expressions. First call `get_expr()` to
#' extract an expression. Once you're done processing the expression,
#' call `set_expr()` on the original object to update the expression.
#' You can return the result of `set_expr()`, either a formula or an
#' expression depending on the input type. Note that `set_expr()` does
#' not change its input, it creates a new object.
#'
#' `as_generic_expr()` is helpful when your function accepts frames as
#' input but should be able to call `set_expr()` at the
#' end. `set_expr()` does not work on frames because it does not make
#' sense to modify this kind of object. In this case, first call
#' `as_generic_expr()` to transform the input to an object that
#' supports `set_expr()`. It transforms frame objects to a raw
#' expression, and return formula quotes and raw expressions without
#' changes.
#'
#' @param x An expression or one-sided formula. In addition,
#' `set_expr()` and `as_generic_expr()` accept frames.
#' @param value An updated expression.
#' @return The updated original input for `set_expr()`. A raw
#' expression for `get_expr()`. `as_generic_expr()` returns an
#' expression or formula quote.
#' @export
#' @examples
#' f <- ~foo(bar)
#' e <- quote(foo(bar))
#' frame <- identity(identity(eval_frame()))
#'
#' get_expr(f)
#' get_expr(e)
#' get_expr(frame)
#'
#' as_generic_expr(f)
#' as_generic_expr(e)
#' as_generic_expr(frame)
#'
#' set_expr(f, quote(baz))
#' set_expr(e, quote(baz))
#' @md
set_expr <- function(x, value) {
if (is_fquote(x)) {
f_rhs(x) <- value
x
} else {
value
}
}
#' @rdname set_expr
#' @export
get_expr <- function(x) {
if (is_fquote(x)) {
f_rhs(x)
} else if (inherits(x, "frame")) {
x$expr
} else {
x
}
}
#' @rdname set_expr
#' @export
as_generic_expr <- function(x) {
if (is_frame(x)) {
x$expr
} else {
x
}
}
# More permissive than is_tidy_quote()
is_fquote <- function(x) {
typeof(x) == "language" &&
identical(node_car(x), quote(`~`)) &&
length(x) == 2L
}
|
#'
#' @title Read distributions as a csv.
#'
#' @description Read distributions as a csv with two columns species and area
#'
#' @param data.file archivo csv de entrada.
#'
Read.Data <-
function (data.File) {
initial.Distribution <- read.csv(data.File,header=T,sep=" ")
final.Distribution1 <- table(initial.Distribution$species,initial.Distribution$area)
final.Distribution2 <-as.data.frame.array(final.Distribution1)
final.Distribution2$species <- levels(as.factor(initial.Distribution$species))
return(final.Distribution2)
}
| /R/Read.data.R | no_license | vivianaayus/jrich | R | false | false | 591 | r | #'
#' @title Read distributions as a csv.
#'
#' @description Read distributions as a csv with two columns species and area
#'
#' @param data.file archivo csv de entrada.
#'
Read.Data <-
function (data.File) {
initial.Distribution <- read.csv(data.File,header=T,sep=" ")
final.Distribution1 <- table(initial.Distribution$species,initial.Distribution$area)
final.Distribution2 <-as.data.frame.array(final.Distribution1)
final.Distribution2$species <- levels(as.factor(initial.Distribution$species))
return(final.Distribution2)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/robomaker_operations.R
\name{robomaker_create_simulation_job}
\alias{robomaker_create_simulation_job}
\title{Creates a simulation job}
\usage{
robomaker_create_simulation_job(clientRequestToken, outputLocation,
loggingConfig, maxJobDurationInSeconds, iamRole, failureBehavior,
robotApplications, simulationApplications, dataSources, tags, vpcConfig,
compute)
}
\arguments{
\item{clientRequestToken}{Unique, case-sensitive identifier that you provide to ensure the
idempotency of the request.}
\item{outputLocation}{Location for output files generated by the simulation job.}
\item{loggingConfig}{The logging configuration.}
\item{maxJobDurationInSeconds}{[required] The maximum simulation job duration in seconds (up to 14 days or
1,209,600 seconds. When \code{maxJobDurationInSeconds} is reached, the
simulation job will status will transition to \code{Completed}.}
\item{iamRole}{[required] The IAM role name that allows the simulation instance to call the AWS
APIs that are specified in its associated policies on your behalf. This
is how credentials are passed in to your simulation job.}
\item{failureBehavior}{The failure behavior the simulation job.
\subsection{Continue}{
Restart the simulation job in the same host instance.
}
\subsection{Fail}{
Stop the simulation job and terminate the instance.
}}
\item{robotApplications}{The robot application to use in the simulation job.}
\item{simulationApplications}{The simulation application to use in the simulation job.}
\item{dataSources}{Specify data sources to mount read-only files from S3 into your
simulation. These files are available under
\verb{/opt/robomaker/datasources/data_source_name}.
There is a limit of 100 files and a combined size of 25GB for all
\code{DataSourceConfig} objects.}
\item{tags}{A map that contains tag keys and tag values that are attached to the
simulation job.}
\item{vpcConfig}{If your simulation job accesses resources in a VPC, you provide this
parameter identifying the list of security group IDs and subnet IDs.
These must belong to the same VPC. You must provide at least one
security group and one subnet ID.}
\item{compute}{Compute information for the simulation job.}
}
\value{
A list with the following syntax:\preformatted{list(
arn = "string",
status = "Pending"|"Preparing"|"Running"|"Restarting"|"Completed"|"Failed"|"RunningFailed"|"Terminating"|"Terminated"|"Canceled",
lastStartedAt = as.POSIXct(
"2015-01-01"
),
lastUpdatedAt = as.POSIXct(
"2015-01-01"
),
failureBehavior = "Fail"|"Continue",
failureCode = "InternalServiceError"|"RobotApplicationCrash"|"SimulationApplicationCrash"|"BadPermissionsRobotApplication"|"BadPermissionsSimulationApplication"|"BadPermissionsS3Object"|"BadPermissionsS3Output"|"BadPermissionsCloudwatchLogs"|"SubnetIpLimitExceeded"|"ENILimitExceeded"|"BadPermissionsUserCredentials"|"InvalidBundleRobotApplication"|"InvalidBundleSimulationApplication"|"InvalidS3Resource"|"LimitExceeded"|"MismatchedEtag"|"RobotApplicationVersionMismatchedEtag"|"SimulationApplicationVersionMismatchedEtag"|"ResourceNotFound"|"RequestThrottled"|"BatchTimedOut"|"BatchCanceled"|"InvalidInput"|"WrongRegionS3Bucket"|"WrongRegionS3Output"|"WrongRegionRobotApplication"|"WrongRegionSimulationApplication",
clientRequestToken = "string",
outputLocation = list(
s3Bucket = "string",
s3Prefix = "string"
),
loggingConfig = list(
recordAllRosTopics = TRUE|FALSE
),
maxJobDurationInSeconds = 123,
simulationTimeMillis = 123,
iamRole = "string",
robotApplications = list(
list(
application = "string",
applicationVersion = "string",
launchConfig = list(
packageName = "string",
launchFile = "string",
environmentVariables = list(
"string"
),
portForwardingConfig = list(
portMappings = list(
list(
jobPort = 123,
applicationPort = 123,
enableOnPublicIp = TRUE|FALSE
)
)
),
streamUI = TRUE|FALSE
)
)
),
simulationApplications = list(
list(
application = "string",
applicationVersion = "string",
launchConfig = list(
packageName = "string",
launchFile = "string",
environmentVariables = list(
"string"
),
portForwardingConfig = list(
portMappings = list(
list(
jobPort = 123,
applicationPort = 123,
enableOnPublicIp = TRUE|FALSE
)
)
),
streamUI = TRUE|FALSE
),
worldConfigs = list(
list(
world = "string"
)
)
)
),
dataSources = list(
list(
name = "string",
s3Bucket = "string",
s3Keys = list(
list(
s3Key = "string",
etag = "string"
)
)
)
),
tags = list(
"string"
),
vpcConfig = list(
subnets = list(
"string"
),
securityGroups = list(
"string"
),
vpcId = "string",
assignPublicIp = TRUE|FALSE
),
compute = list(
simulationUnitLimit = 123
)
)
}
}
\description{
Creates a simulation job.
After 90 days, simulation jobs expire and will be deleted. They will no
longer be accessible.
}
\section{Request syntax}{
\preformatted{svc$create_simulation_job(
clientRequestToken = "string",
outputLocation = list(
s3Bucket = "string",
s3Prefix = "string"
),
loggingConfig = list(
recordAllRosTopics = TRUE|FALSE
),
maxJobDurationInSeconds = 123,
iamRole = "string",
failureBehavior = "Fail"|"Continue",
robotApplications = list(
list(
application = "string",
applicationVersion = "string",
launchConfig = list(
packageName = "string",
launchFile = "string",
environmentVariables = list(
"string"
),
portForwardingConfig = list(
portMappings = list(
list(
jobPort = 123,
applicationPort = 123,
enableOnPublicIp = TRUE|FALSE
)
)
),
streamUI = TRUE|FALSE
)
)
),
simulationApplications = list(
list(
application = "string",
applicationVersion = "string",
launchConfig = list(
packageName = "string",
launchFile = "string",
environmentVariables = list(
"string"
),
portForwardingConfig = list(
portMappings = list(
list(
jobPort = 123,
applicationPort = 123,
enableOnPublicIp = TRUE|FALSE
)
)
),
streamUI = TRUE|FALSE
),
worldConfigs = list(
list(
world = "string"
)
)
)
),
dataSources = list(
list(
name = "string",
s3Bucket = "string",
s3Keys = list(
"string"
)
)
),
tags = list(
"string"
),
vpcConfig = list(
subnets = list(
"string"
),
securityGroups = list(
"string"
),
assignPublicIp = TRUE|FALSE
),
compute = list(
simulationUnitLimit = 123
)
)
}
}
\keyword{internal}
| /cran/paws.robotics/man/robomaker_create_simulation_job.Rd | permissive | paws-r/paws | R | false | true | 7,300 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/robomaker_operations.R
\name{robomaker_create_simulation_job}
\alias{robomaker_create_simulation_job}
\title{Creates a simulation job}
\usage{
robomaker_create_simulation_job(clientRequestToken, outputLocation,
loggingConfig, maxJobDurationInSeconds, iamRole, failureBehavior,
robotApplications, simulationApplications, dataSources, tags, vpcConfig,
compute)
}
\arguments{
\item{clientRequestToken}{Unique, case-sensitive identifier that you provide to ensure the
idempotency of the request.}
\item{outputLocation}{Location for output files generated by the simulation job.}
\item{loggingConfig}{The logging configuration.}
\item{maxJobDurationInSeconds}{[required] The maximum simulation job duration in seconds (up to 14 days or
1,209,600 seconds. When \code{maxJobDurationInSeconds} is reached, the
simulation job will status will transition to \code{Completed}.}
\item{iamRole}{[required] The IAM role name that allows the simulation instance to call the AWS
APIs that are specified in its associated policies on your behalf. This
is how credentials are passed in to your simulation job.}
\item{failureBehavior}{The failure behavior the simulation job.
\subsection{Continue}{
Restart the simulation job in the same host instance.
}
\subsection{Fail}{
Stop the simulation job and terminate the instance.
}}
\item{robotApplications}{The robot application to use in the simulation job.}
\item{simulationApplications}{The simulation application to use in the simulation job.}
\item{dataSources}{Specify data sources to mount read-only files from S3 into your
simulation. These files are available under
\verb{/opt/robomaker/datasources/data_source_name}.
There is a limit of 100 files and a combined size of 25GB for all
\code{DataSourceConfig} objects.}
\item{tags}{A map that contains tag keys and tag values that are attached to the
simulation job.}
\item{vpcConfig}{If your simulation job accesses resources in a VPC, you provide this
parameter identifying the list of security group IDs and subnet IDs.
These must belong to the same VPC. You must provide at least one
security group and one subnet ID.}
\item{compute}{Compute information for the simulation job.}
}
\value{
A list with the following syntax:\preformatted{list(
arn = "string",
status = "Pending"|"Preparing"|"Running"|"Restarting"|"Completed"|"Failed"|"RunningFailed"|"Terminating"|"Terminated"|"Canceled",
lastStartedAt = as.POSIXct(
"2015-01-01"
),
lastUpdatedAt = as.POSIXct(
"2015-01-01"
),
failureBehavior = "Fail"|"Continue",
failureCode = "InternalServiceError"|"RobotApplicationCrash"|"SimulationApplicationCrash"|"BadPermissionsRobotApplication"|"BadPermissionsSimulationApplication"|"BadPermissionsS3Object"|"BadPermissionsS3Output"|"BadPermissionsCloudwatchLogs"|"SubnetIpLimitExceeded"|"ENILimitExceeded"|"BadPermissionsUserCredentials"|"InvalidBundleRobotApplication"|"InvalidBundleSimulationApplication"|"InvalidS3Resource"|"LimitExceeded"|"MismatchedEtag"|"RobotApplicationVersionMismatchedEtag"|"SimulationApplicationVersionMismatchedEtag"|"ResourceNotFound"|"RequestThrottled"|"BatchTimedOut"|"BatchCanceled"|"InvalidInput"|"WrongRegionS3Bucket"|"WrongRegionS3Output"|"WrongRegionRobotApplication"|"WrongRegionSimulationApplication",
clientRequestToken = "string",
outputLocation = list(
s3Bucket = "string",
s3Prefix = "string"
),
loggingConfig = list(
recordAllRosTopics = TRUE|FALSE
),
maxJobDurationInSeconds = 123,
simulationTimeMillis = 123,
iamRole = "string",
robotApplications = list(
list(
application = "string",
applicationVersion = "string",
launchConfig = list(
packageName = "string",
launchFile = "string",
environmentVariables = list(
"string"
),
portForwardingConfig = list(
portMappings = list(
list(
jobPort = 123,
applicationPort = 123,
enableOnPublicIp = TRUE|FALSE
)
)
),
streamUI = TRUE|FALSE
)
)
),
simulationApplications = list(
list(
application = "string",
applicationVersion = "string",
launchConfig = list(
packageName = "string",
launchFile = "string",
environmentVariables = list(
"string"
),
portForwardingConfig = list(
portMappings = list(
list(
jobPort = 123,
applicationPort = 123,
enableOnPublicIp = TRUE|FALSE
)
)
),
streamUI = TRUE|FALSE
),
worldConfigs = list(
list(
world = "string"
)
)
)
),
dataSources = list(
list(
name = "string",
s3Bucket = "string",
s3Keys = list(
list(
s3Key = "string",
etag = "string"
)
)
)
),
tags = list(
"string"
),
vpcConfig = list(
subnets = list(
"string"
),
securityGroups = list(
"string"
),
vpcId = "string",
assignPublicIp = TRUE|FALSE
),
compute = list(
simulationUnitLimit = 123
)
)
}
}
\description{
Creates a simulation job.
After 90 days, simulation jobs expire and will be deleted. They will no
longer be accessible.
}
\section{Request syntax}{
\preformatted{svc$create_simulation_job(
clientRequestToken = "string",
outputLocation = list(
s3Bucket = "string",
s3Prefix = "string"
),
loggingConfig = list(
recordAllRosTopics = TRUE|FALSE
),
maxJobDurationInSeconds = 123,
iamRole = "string",
failureBehavior = "Fail"|"Continue",
robotApplications = list(
list(
application = "string",
applicationVersion = "string",
launchConfig = list(
packageName = "string",
launchFile = "string",
environmentVariables = list(
"string"
),
portForwardingConfig = list(
portMappings = list(
list(
jobPort = 123,
applicationPort = 123,
enableOnPublicIp = TRUE|FALSE
)
)
),
streamUI = TRUE|FALSE
)
)
),
simulationApplications = list(
list(
application = "string",
applicationVersion = "string",
launchConfig = list(
packageName = "string",
launchFile = "string",
environmentVariables = list(
"string"
),
portForwardingConfig = list(
portMappings = list(
list(
jobPort = 123,
applicationPort = 123,
enableOnPublicIp = TRUE|FALSE
)
)
),
streamUI = TRUE|FALSE
),
worldConfigs = list(
list(
world = "string"
)
)
)
),
dataSources = list(
list(
name = "string",
s3Bucket = "string",
s3Keys = list(
"string"
)
)
),
tags = list(
"string"
),
vpcConfig = list(
subnets = list(
"string"
),
securityGroups = list(
"string"
),
assignPublicIp = TRUE|FALSE
),
compute = list(
simulationUnitLimit = 123
)
)
}
}
\keyword{internal}
|
EMVS.logit=function(y,x,epsilon=.0005,v0s=5,nu.1=1000,nu.gam=1,lambda.var=.001,a=1,b=ncol(x),
beta.initial=rep(1,p),sigma.initial=1,theta.inital=.5,temp=1,p=ncol(x),n=nrow(x),SDCD.length=50){
if(length(beta.initial)==0){
beta.initial=rep(1,p)
}
L=length(v0s)
cat("\n")
cat("\n","Running Logit across v0's","\n")
cat(rep("",times=(L+1)),sep="|")
cat("\n")
intersects=numeric(L) # intersection points between posterior weighted spike and slab
log_post=numeric(L) # logarithm of the g-function models associated with v0s
sigma.Vec=numeric(L)
theta.Vec=numeric(L)
log_post=numeric(L)
index.Vec=numeric(L)
beta.Vec=matrix(0,L,p) # L x p matrix of MAP beta estimates for each spike
p.Star.Vec=matrix(0,L,p) # L x p matrix of conditional posterior inclusion probabilities
for (i in (1:L)){
nu.0=v0s[i]
beta.Current=beta.initial
beta.new=beta.initial
sigma.EM=sigma.initial
theta.EM=theta.inital
eps=epsilon+1
iter.index=1
while(eps>epsilon && iter.index<20){
d.Star=rep(NA,p)
p.Star=rep(NA,p)
for(j in 1:p){
gam.one=dnorm(beta.Current[j],0,sigma.EM*sqrt(nu.1))**temp*theta.EM**temp
gam.zero=dnorm(beta.Current[j],0,sigma.EM*sqrt(nu.0))**temp*(1-theta.EM)**temp
p.Star[j]=gam.one/(gam.one+gam.zero)
d.Star[j]=((1-p.Star[j])/nu.0)+(p.Star[j]/nu.1)
}
#cat("max p.Star", max(p.Star),"\n")
#cat("d.Star.EM: ", d.Star[1:5],"\n")
############### M STEP #######################
d.Star.Mat=diag(d.Star,p)
beta.Current=rep(NA,p)
count.while=0
while(is.na(min(beta.Current))){
beta.Current=CSDCD.logistic(p,n,x,y,d.Star,SDCD.length)
count.while=count.while+1
#cat("This is count.while:",count.while,"\n")
}
######## VARIANCE FORUMULA IS DIFFERENT FROM CONTINUOUS AND PROBIT CASE ###########
#sigma.EM[i]=sqrt((sum(log(1+exp(-y*x%*%beta.EM[i,])))+sum((sqrt(d.Star.Mat)%*%beta.EM[i,])**2)+lambda.var*nu.gam)/(n+p+nu.gam))
sigma.EM=sqrt((sum((sqrt(d.Star.Mat)%*%beta.Current)**2)+lambda.var*nu.gam)/(n+p+nu.gam+2))
theta.EM=(sum(p.Star)+a-1)/(a+b+p-2)
eps=max(abs(beta.new-beta.Current))
#print(eps)
beta.new=beta.Current
iter.index=iter.index+1
}
p.Star.Vec[i,]=p.Star
beta.Vec[i,]=beta.new
sigma.Vec[i]=sigma.EM
theta.Vec[i]=theta.EM
index.Vec[i]=iter.index
index=p.Star>0.5
c=sqrt(nu.1/v0s[i])
w=(1-theta.Vec[i])/theta.Vec[i]
if (w>0){
intersects[i]=sigma.Vec[i]*sqrt(v0s[i])*sqrt(2*log(w*c)*c^2/(c^2-1))}else{
intersects[i]=0}
cat("|",sep="")
}
list=list(betas=beta.Vec,intersects=intersects,sigmas=sigma.Vec,
niters=index.Vec,posts=p.Star.Vec,thetas=theta.Vec,v0s=v0s)
return(list)
} | /BinaryEMVS/R/EMVS.logit.R | no_license | ingted/R-Examples | R | false | false | 3,033 | r | EMVS.logit=function(y,x,epsilon=.0005,v0s=5,nu.1=1000,nu.gam=1,lambda.var=.001,a=1,b=ncol(x),
beta.initial=rep(1,p),sigma.initial=1,theta.inital=.5,temp=1,p=ncol(x),n=nrow(x),SDCD.length=50){
if(length(beta.initial)==0){
beta.initial=rep(1,p)
}
L=length(v0s)
cat("\n")
cat("\n","Running Logit across v0's","\n")
cat(rep("",times=(L+1)),sep="|")
cat("\n")
intersects=numeric(L) # intersection points between posterior weighted spike and slab
log_post=numeric(L) # logarithm of the g-function models associated with v0s
sigma.Vec=numeric(L)
theta.Vec=numeric(L)
log_post=numeric(L)
index.Vec=numeric(L)
beta.Vec=matrix(0,L,p) # L x p matrix of MAP beta estimates for each spike
p.Star.Vec=matrix(0,L,p) # L x p matrix of conditional posterior inclusion probabilities
for (i in (1:L)){
nu.0=v0s[i]
beta.Current=beta.initial
beta.new=beta.initial
sigma.EM=sigma.initial
theta.EM=theta.inital
eps=epsilon+1
iter.index=1
while(eps>epsilon && iter.index<20){
d.Star=rep(NA,p)
p.Star=rep(NA,p)
for(j in 1:p){
gam.one=dnorm(beta.Current[j],0,sigma.EM*sqrt(nu.1))**temp*theta.EM**temp
gam.zero=dnorm(beta.Current[j],0,sigma.EM*sqrt(nu.0))**temp*(1-theta.EM)**temp
p.Star[j]=gam.one/(gam.one+gam.zero)
d.Star[j]=((1-p.Star[j])/nu.0)+(p.Star[j]/nu.1)
}
#cat("max p.Star", max(p.Star),"\n")
#cat("d.Star.EM: ", d.Star[1:5],"\n")
############### M STEP #######################
d.Star.Mat=diag(d.Star,p)
beta.Current=rep(NA,p)
count.while=0
while(is.na(min(beta.Current))){
beta.Current=CSDCD.logistic(p,n,x,y,d.Star,SDCD.length)
count.while=count.while+1
#cat("This is count.while:",count.while,"\n")
}
######## VARIANCE FORUMULA IS DIFFERENT FROM CONTINUOUS AND PROBIT CASE ###########
#sigma.EM[i]=sqrt((sum(log(1+exp(-y*x%*%beta.EM[i,])))+sum((sqrt(d.Star.Mat)%*%beta.EM[i,])**2)+lambda.var*nu.gam)/(n+p+nu.gam))
sigma.EM=sqrt((sum((sqrt(d.Star.Mat)%*%beta.Current)**2)+lambda.var*nu.gam)/(n+p+nu.gam+2))
theta.EM=(sum(p.Star)+a-1)/(a+b+p-2)
eps=max(abs(beta.new-beta.Current))
#print(eps)
beta.new=beta.Current
iter.index=iter.index+1
}
p.Star.Vec[i,]=p.Star
beta.Vec[i,]=beta.new
sigma.Vec[i]=sigma.EM
theta.Vec[i]=theta.EM
index.Vec[i]=iter.index
index=p.Star>0.5
c=sqrt(nu.1/v0s[i])
w=(1-theta.Vec[i])/theta.Vec[i]
if (w>0){
intersects[i]=sigma.Vec[i]*sqrt(v0s[i])*sqrt(2*log(w*c)*c^2/(c^2-1))}else{
intersects[i]=0}
cat("|",sep="")
}
list=list(betas=beta.Vec,intersects=intersects,sigmas=sigma.Vec,
niters=index.Vec,posts=p.Star.Vec,thetas=theta.Vec,v0s=v0s)
return(list)
} |
library(dplyr)
setwd("/projects/korstanje-lab/ytakemon/JAC_DO_Kidney")
load("./RNAseq_data/DO188b_kidney_noprobs.RData")
# Protein: 6716
# Pairs: 6667
# Diff : 49
# annot.mrna: 22312
# expr.mrna : 22243
# diff : 69
# protein
pairs <- annot.protein[annot.protein$gene_id %in% annot.mrna$id,]
nopairs <- annot.protein[!(annot.protein$gene_id %in% annot.mrna$id),]
# What are the proteins that do not have mRNA information?
write.csv(nopairs, file = "./AnnotProt_notin_pair.csv", row.names = FALSE, quote = FALSE)
# What are the slopes of the proteins that are isoforms? do they switch quadrants?
dup <- annot.mrna[annot.mrna$duplicated == TRUE,]
dup_gene <- unique(dup$id)
df <- read.csv("./Anova_output/kidney_anova_slope_output.csv")
df <- select(df, id, gene_id, symbol, m.mRNA_Age.Sex, m.Prot_Age.Sex, p.mRNA_Age.Sex, p.Prot_Age.Sex)
iso <- df[df$gene_id %in% dup_gene,]
iso <- arrange(iso, gene_id)
iso$quadI <- ((iso$m.mRNA_Age.Sex > 0) & (iso$m.Prot_Age.Sex > 0))
iso$quadII <- ((iso$m.mRNA_Age.Sex < 0) & (iso$m.Prot_Age.Sex > 0))
iso$quadIII <- ((iso$m.mRNA_Age.Sex < 0) & (iso$m.Prot_Age.Sex < 0))
iso$quadIV <- ((iso$m.mRNA_Age.Sex > 0) & (iso$m.Prot_Age.Sex < 0))
write.csv(iso, file = "./Isoforms_slopes.csv", row.names = FALSE, quote = FALSE)
| /SpecificQueries/IsoformSlopeVariance.R | permissive | ytakemon/JAC_DO_Kidney | R | false | false | 1,265 | r | library(dplyr)
setwd("/projects/korstanje-lab/ytakemon/JAC_DO_Kidney")
load("./RNAseq_data/DO188b_kidney_noprobs.RData")
# Protein: 6716
# Pairs: 6667
# Diff : 49
# annot.mrna: 22312
# expr.mrna : 22243
# diff : 69
# protein
pairs <- annot.protein[annot.protein$gene_id %in% annot.mrna$id,]
nopairs <- annot.protein[!(annot.protein$gene_id %in% annot.mrna$id),]
# What are the proteins that do not have mRNA information?
write.csv(nopairs, file = "./AnnotProt_notin_pair.csv", row.names = FALSE, quote = FALSE)
# What are the slopes of the proteins that are isoforms? do they switch quadrants?
dup <- annot.mrna[annot.mrna$duplicated == TRUE,]
dup_gene <- unique(dup$id)
df <- read.csv("./Anova_output/kidney_anova_slope_output.csv")
df <- select(df, id, gene_id, symbol, m.mRNA_Age.Sex, m.Prot_Age.Sex, p.mRNA_Age.Sex, p.Prot_Age.Sex)
iso <- df[df$gene_id %in% dup_gene,]
iso <- arrange(iso, gene_id)
iso$quadI <- ((iso$m.mRNA_Age.Sex > 0) & (iso$m.Prot_Age.Sex > 0))
iso$quadII <- ((iso$m.mRNA_Age.Sex < 0) & (iso$m.Prot_Age.Sex > 0))
iso$quadIII <- ((iso$m.mRNA_Age.Sex < 0) & (iso$m.Prot_Age.Sex < 0))
iso$quadIV <- ((iso$m.mRNA_Age.Sex > 0) & (iso$m.Prot_Age.Sex < 0))
write.csv(iso, file = "./Isoforms_slopes.csv", row.names = FALSE, quote = FALSE)
|
library(spatstat)
### Name: quantess
### Title: Quantile Tessellation
### Aliases: quantess quantess.owin quantess.ppp quantess.im
### Keywords: spatial manip
### ** Examples
plot(quantess(letterR, "x", 5))
plot(quantess(bronzefilter, "x", 6))
points(unmark(bronzefilter))
opa <- par(mar=c(0,0,2,5))
A <- quantess(Window(bei), bei.extra$elev, 4)
plot(A, ribargs=list(las=1))
B <- quantess(bei, bei.extra$elev, 4)
tilenames(B) <- paste(spatstat.utils::ordinal(1:4), "quartile")
plot(B, ribargs=list(las=1))
points(bei, pch=".", cex=2, col="white")
par(opa)
| /data/genthat_extracted_code/spatstat/examples/quantess.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 590 | r | library(spatstat)
### Name: quantess
### Title: Quantile Tessellation
### Aliases: quantess quantess.owin quantess.ppp quantess.im
### Keywords: spatial manip
### ** Examples
plot(quantess(letterR, "x", 5))
plot(quantess(bronzefilter, "x", 6))
points(unmark(bronzefilter))
opa <- par(mar=c(0,0,2,5))
A <- quantess(Window(bei), bei.extra$elev, 4)
plot(A, ribargs=list(las=1))
B <- quantess(bei, bei.extra$elev, 4)
tilenames(B) <- paste(spatstat.utils::ordinal(1:4), "quartile")
plot(B, ribargs=list(las=1))
points(bei, pch=".", cex=2, col="white")
par(opa)
|
library(tidyverse)
library(caret)
# set.seed(1996) #if you are using R 3.5 or earlier
set.seed(1996, sample.kind="Rounding") #if you are using R 3.6 or later
n <- 1000
p <- 10000
x <- matrix(rnorm(n*p), n, p)
colnames(x) <- paste("x", 1:ncol(x), sep = "_")
y <- rbinom(n, 1, 0.5) %>% factor()
x_subset <- x[ ,sample(p, 100)]
head(x_subset)
fit <- train(x_subset, y, method = "glm")
fit$results
# did these steps already... WOOw.
# install.packages("BiocManager")
# BiocManager::install("genefilter")
library(genefilter)
tt <- colttests(x, y)
tt
head(tt)
pvals <- tt$p.value
ind <- which(pvals<=0.01)
ind
# where the idk??? begins
# set.seed(1996) #if you are using R 3.5 or earlier
set.seed(1996, sample.kind="Rounding") #if you are using R 3.6 or later
n <- 1000
p <- 10000
x <- matrix(rnorm(n*p), n, p)
colnames(x) <- paste("x", 1:ncol(x), sep = "_")
y <- rbinom(n, 1, 0.5) %>% factor()
x_subset <- x[ ,ind]
zed <- x[,ind]
head(zed)
zed_fit <- train(zed, y, method = "glm")
zed_fit$results$Accuracy
fit <- train(x_subset, y, method = "knn", tuneGrid = data.frame(k = seq(101, 301, 25)))
ggplot(fit)
kxk <- seq(1,7,2)
library(dslabs)
data("tissue_gene_expression")
head(tissue_gene_expression)
train(tissue_gene_expression$x, tissue_gene_expression$y, method = "knn", tuneGrid = data.frame(k = seq(1,7,2)))
| /ML/ML4-2a1.R | no_license | sboersma91/whatQQ | R | false | false | 1,319 | r | library(tidyverse)
library(caret)
# set.seed(1996) #if you are using R 3.5 or earlier
set.seed(1996, sample.kind="Rounding") #if you are using R 3.6 or later
n <- 1000
p <- 10000
x <- matrix(rnorm(n*p), n, p)
colnames(x) <- paste("x", 1:ncol(x), sep = "_")
y <- rbinom(n, 1, 0.5) %>% factor()
x_subset <- x[ ,sample(p, 100)]
head(x_subset)
fit <- train(x_subset, y, method = "glm")
fit$results
# did these steps already... WOOw.
# install.packages("BiocManager")
# BiocManager::install("genefilter")
library(genefilter)
tt <- colttests(x, y)
tt
head(tt)
pvals <- tt$p.value
ind <- which(pvals<=0.01)
ind
# where the idk??? begins
# set.seed(1996) #if you are using R 3.5 or earlier
set.seed(1996, sample.kind="Rounding") #if you are using R 3.6 or later
n <- 1000
p <- 10000
x <- matrix(rnorm(n*p), n, p)
colnames(x) <- paste("x", 1:ncol(x), sep = "_")
y <- rbinom(n, 1, 0.5) %>% factor()
x_subset <- x[ ,ind]
zed <- x[,ind]
head(zed)
zed_fit <- train(zed, y, method = "glm")
zed_fit$results$Accuracy
fit <- train(x_subset, y, method = "knn", tuneGrid = data.frame(k = seq(101, 301, 25)))
ggplot(fit)
kxk <- seq(1,7,2)
library(dslabs)
data("tissue_gene_expression")
head(tissue_gene_expression)
train(tissue_gene_expression$x, tissue_gene_expression$y, method = "knn", tuneGrid = data.frame(k = seq(1,7,2)))
|
#' Calendar API Objects
#' Manipulates events and other calendar data.
#'
#' Auto-generated code by googleAuthR::gar_create_api_objects
#' at 2016-09-04 00:00:52
#' filename: /Users/mark/dev/R/autoGoogleAPI/googlecalendarv3.auto/R/calendar_objects.R
#' api_json: api_json
#'
#' Objects for use by the functions created by googleAuthR::gar_create_api_skeleton
#' Acl Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param etag ETag of the collection
#' @param items List of rules on the access control list
#' @param nextPageToken Token used to access the next page of this result
#' @param nextSyncToken Token used at a later point in time to retrieve only the entries that have changed since this result was returned
#'
#' @return Acl object
#'
#' @family Acl functions
#' @export
Acl <- function(etag = NULL, items = NULL, nextPageToken = NULL, nextSyncToken = NULL) {
structure(list(etag = etag, items = items, kind = `calendar#acl`, nextPageToken = nextPageToken,
nextSyncToken = nextSyncToken), class = "gar_Acl")
}
#' AclRule Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param AclRule.scope The \link{AclRule.scope} object or list of objects
#' @param etag ETag of the resource
#' @param id Identifier of the ACL rule
#' @param role The role assigned to the scope
#' @param scope The scope of the rule
#'
#' @return AclRule object
#'
#' @family AclRule functions
#' @export
AclRule <- function(AclRule.scope = NULL, etag = NULL, id = NULL, role = NULL, scope = NULL) {
structure(list(AclRule.scope = AclRule.scope, etag = etag, id = id, kind = `calendar#aclRule`,
role = role, scope = scope), class = "gar_AclRule")
}
#' AclRule.scope Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' The scope of the rule.
#'
#' @param type The type of the scope
#' @param value The email address of a user or group, or the name of a domain, depending on the scope type
#'
#' @return AclRule.scope object
#'
#' @family AclRule functions
#' @export
AclRule.scope <- function(type = NULL, value = NULL) {
structure(list(type = type, value = value), class = "gar_AclRule.scope")
}
#' Calendar Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param description Description of the calendar
#' @param etag ETag of the resource
#' @param id Identifier of the calendar
#' @param location Geographic location of the calendar as free-form text
#' @param summary Title of the calendar
#' @param timeZone The time zone of the calendar
#'
#' @return Calendar object
#'
#' @family Calendar functions
#' @export
Calendar <- function(description = NULL, etag = NULL, id = NULL, location = NULL,
summary = NULL, timeZone = NULL) {
structure(list(description = description, etag = etag, id = id, kind = `calendar#calendar`,
location = location, summary = summary, timeZone = timeZone), class = "gar_Calendar")
}
#' CalendarList Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param etag ETag of the collection
#' @param items Calendars that are present on the user's calendar list
#' @param nextPageToken Token used to access the next page of this result
#' @param nextSyncToken Token used at a later point in time to retrieve only the entries that have changed since this result was returned
#'
#' @return CalendarList object
#'
#' @family CalendarList functions
#' @export
CalendarList <- function(etag = NULL, items = NULL, nextPageToken = NULL, nextSyncToken = NULL) {
structure(list(etag = etag, items = items, kind = `calendar#calendarList`, nextPageToken = nextPageToken,
nextSyncToken = nextSyncToken), class = "gar_CalendarList")
}
#' CalendarListEntry Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param CalendarListEntry.notificationSettings The \link{CalendarListEntry.notificationSettings} object or list of objects
#' @param accessRole The effective access role that the authenticated user has on the calendar
#' @param backgroundColor The main color of the calendar in the hexadecimal format '#0088aa'
#' @param colorId The color of the calendar
#' @param defaultReminders The default reminders that the authenticated user has for this calendar
#' @param deleted Whether this calendar list entry has been deleted from the calendar list
#' @param description Description of the calendar
#' @param etag ETag of the resource
#' @param foregroundColor The foreground color of the calendar in the hexadecimal format '#ffffff'
#' @param hidden Whether the calendar has been hidden from the list
#' @param id Identifier of the calendar
#' @param location Geographic location of the calendar as free-form text
#' @param notificationSettings The notifications that the authenticated user is receiving for this calendar
#' @param primary Whether the calendar is the primary calendar of the authenticated user
#' @param selected Whether the calendar content shows up in the calendar UI
#' @param summary Title of the calendar
#' @param summaryOverride The summary that the authenticated user has set for this calendar
#' @param timeZone The time zone of the calendar
#'
#' @return CalendarListEntry object
#'
#' @family CalendarListEntry functions
#' @export
CalendarListEntry <- function(CalendarListEntry.notificationSettings = NULL, accessRole = NULL,
backgroundColor = NULL, colorId = NULL, defaultReminders = NULL, deleted = NULL,
description = NULL, etag = NULL, foregroundColor = NULL, hidden = NULL, id = NULL,
location = NULL, notificationSettings = NULL, primary = NULL, selected = NULL,
summary = NULL, summaryOverride = NULL, timeZone = NULL) {
structure(list(CalendarListEntry.notificationSettings = CalendarListEntry.notificationSettings,
accessRole = accessRole, backgroundColor = backgroundColor, colorId = colorId,
defaultReminders = defaultReminders, deleted = false, description = description,
etag = etag, foregroundColor = foregroundColor, hidden = false, id = id,
kind = `calendar#calendarListEntry`, location = location, notificationSettings = notificationSettings,
primary = false, selected = false, summary = summary, summaryOverride = summaryOverride,
timeZone = timeZone), class = "gar_CalendarListEntry")
}
#' CalendarListEntry.notificationSettings Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' The notifications that the authenticated user is receiving for this calendar.
#'
#' @param notifications The list of notifications set for this calendar
#'
#' @return CalendarListEntry.notificationSettings object
#'
#' @family CalendarListEntry functions
#' @export
CalendarListEntry.notificationSettings <- function(notifications = NULL) {
structure(list(notifications = notifications), class = "gar_CalendarListEntry.notificationSettings")
}
#' CalendarNotification Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param method The method used to deliver the notification
#' @param type The type of notification
#'
#' @return CalendarNotification object
#'
#' @family CalendarNotification functions
#' @export
CalendarNotification <- function(method = NULL, type = NULL) {
structure(list(method = method, type = type), class = "gar_CalendarNotification")
}
#' Channel Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param Channel.params The \link{Channel.params} object or list of objects
#' @param address The address where notifications are delivered for this channel
#' @param expiration Date and time of notification channel expiration, expressed as a Unix timestamp, in milliseconds
#' @param id A UUID or similar unique string that identifies this channel
#' @param params Additional parameters controlling delivery channel behavior
#' @param payload A Boolean value to indicate whether payload is wanted
#' @param resourceId An opaque ID that identifies the resource being watched on this channel
#' @param resourceUri A version-specific identifier for the watched resource
#' @param token An arbitrary string delivered to the target address with each notification delivered over this channel
#' @param type The type of delivery mechanism used for this channel
#'
#' @return Channel object
#'
#' @family Channel functions
#' @export
Channel <- function(Channel.params = NULL, address = NULL, expiration = NULL, id = NULL,
params = NULL, payload = NULL, resourceId = NULL, resourceUri = NULL, token = NULL,
type = NULL) {
structure(list(Channel.params = Channel.params, address = address, expiration = expiration,
id = id, kind = `api#channel`, params = params, payload = payload, resourceId = resourceId,
resourceUri = resourceUri, token = token, type = type), class = "gar_Channel")
}
#' Channel.params Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Additional parameters controlling delivery channel behavior. Optional.
#'
#'
#'
#' @return Channel.params object
#'
#' @family Channel functions
#' @export
Channel.params <- function() {
list()
}
#' ColorDefinition Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param background The background color associated with this color definition
#' @param foreground The foreground color that can be used to write on top of a background with 'background' color
#'
#' @return ColorDefinition object
#'
#' @family ColorDefinition functions
#' @export
ColorDefinition <- function(background = NULL, foreground = NULL) {
structure(list(background = background, foreground = foreground), class = "gar_ColorDefinition")
}
#' Colors Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param Colors.calendar The \link{Colors.calendar} object or list of objects
#' @param Colors.event The \link{Colors.event} object or list of objects
#' @param calendar A global palette of calendar colors, mapping from the color ID to its definition
#' @param event A global palette of event colors, mapping from the color ID to its definition
#' @param updated Last modification time of the color palette (as a RFC3339 timestamp)
#'
#' @return Colors object
#'
#' @family Colors functions
#' @export
Colors <- function(Colors.calendar = NULL, Colors.event = NULL, calendar = NULL,
event = NULL, updated = NULL) {
structure(list(Colors.calendar = Colors.calendar, Colors.event = Colors.event,
calendar = calendar, event = event, kind = `calendar#colors`, updated = updated),
class = "gar_Colors")
}
#' Colors.calendar Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' A global palette of calendar colors, mapping from the color ID to its definition. A calendarListEntry resource refers to one of these color IDs in its color field. Read-only.
#'
#'
#'
#' @return Colors.calendar object
#'
#' @family Colors functions
#' @export
Colors.calendar <- function() {
list()
}
#' Colors.event Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' A global palette of event colors, mapping from the color ID to its definition. An event resource may refer to one of these color IDs in its color field. Read-only.
#'
#'
#'
#' @return Colors.event object
#'
#' @family Colors functions
#' @export
Colors.event <- function() {
list()
}
#' Error Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param domain Domain, or broad category, of the error
#' @param reason Specific reason for the error
#'
#' @return Error object
#'
#' @family Error functions
#' @export
Error <- function(domain = NULL, reason = NULL) {
structure(list(domain = domain, reason = reason), class = "gar_Error")
}
#' Event Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param Event.creator The \link{Event.creator} object or list of objects
#' @param Event.extendedProperties The \link{Event.extendedProperties} object or list of objects
#' @param Event.extendedProperties.private The \link{Event.extendedProperties.private} object or list of objects
#' @param Event.extendedProperties.shared The \link{Event.extendedProperties.shared} object or list of objects
#' @param Event.gadget The \link{Event.gadget} object or list of objects
#' @param Event.gadget.preferences The \link{Event.gadget.preferences} object or list of objects
#' @param Event.organizer The \link{Event.organizer} object or list of objects
#' @param Event.reminders The \link{Event.reminders} object or list of objects
#' @param Event.source The \link{Event.source} object or list of objects
#' @param anyoneCanAddSelf Whether anyone can invite themselves to the event (currently works for Google+ events only)
#' @param attachments File attachments for the event
#' @param attendees The attendees of the event
#' @param attendeesOmitted Whether attendees may have been omitted from the event's representation
#' @param colorId The color of the event
#' @param created Creation time of the event (as a RFC3339 timestamp)
#' @param creator The creator of the event
#' @param description Description of the event
#' @param end The (exclusive) end time of the event
#' @param endTimeUnspecified Whether the end time is actually unspecified
#' @param etag ETag of the resource
#' @param extendedProperties Extended properties of the event
#' @param gadget A gadget that extends this event
#' @param guestsCanInviteOthers Whether attendees other than the organizer can invite others to the event
#' @param guestsCanModify Whether attendees other than the organizer can modify the event
#' @param guestsCanSeeOtherGuests Whether attendees other than the organizer can see who the event's attendees are
#' @param hangoutLink An absolute link to the Google+ hangout associated with this event
#' @param htmlLink An absolute link to this event in the Google Calendar Web UI
#' @param iCalUID Event unique identifier as defined in RFC5545
#' @param id Opaque identifier of the event
#' @param location Geographic location of the event as free-form text
#' @param locked Whether this is a locked event copy where no changes can be made to the main event fields 'summary', 'description', 'location', 'start', 'end' or 'recurrence'
#' @param organizer The organizer of the event
#' @param originalStartTime For an instance of a recurring event, this is the time at which this event would start according to the recurrence data in the recurring event identified by recurringEventId
#' @param privateCopy Whether this is a private event copy where changes are not shared with other copies on other calendars
#' @param recurrence List of RRULE, EXRULE, RDATE and EXDATE lines for a recurring event, as specified in RFC5545
#' @param recurringEventId For an instance of a recurring event, this is the id of the recurring event to which this instance belongs
#' @param reminders Information about the event's reminders for the authenticated user
#' @param sequence Sequence number as per iCalendar
#' @param source Source from which the event was created
#' @param start The (inclusive) start time of the event
#' @param status Status of the event
#' @param summary Title of the event
#' @param transparency Whether the event blocks time on the calendar
#' @param updated Last modification time of the event (as a RFC3339 timestamp)
#' @param visibility Visibility of the event
#'
#' @return Event object
#'
#' @family Event functions
#' @export
Event <- function(Event.creator = NULL, Event.extendedProperties = NULL, Event.extendedProperties.private = NULL,
Event.extendedProperties.shared = NULL, Event.gadget = NULL, Event.gadget.preferences = NULL,
Event.organizer = NULL, Event.reminders = NULL, Event.source = NULL, anyoneCanAddSelf = NULL,
attachments = NULL, attendees = NULL, attendeesOmitted = NULL, colorId = NULL,
created = NULL, creator = NULL, description = NULL, end = NULL, endTimeUnspecified = NULL,
etag = NULL, extendedProperties = NULL, gadget = NULL, guestsCanInviteOthers = NULL,
guestsCanModify = NULL, guestsCanSeeOtherGuests = NULL, hangoutLink = NULL, htmlLink = NULL,
iCalUID = NULL, id = NULL, location = NULL, locked = NULL, organizer = NULL,
originalStartTime = NULL, privateCopy = NULL, recurrence = NULL, recurringEventId = NULL,
reminders = NULL, sequence = NULL, source = NULL, start = NULL, status = NULL,
summary = NULL, transparency = NULL, updated = NULL, visibility = NULL) {
structure(list(Event.creator = Event.creator, Event.extendedProperties = Event.extendedProperties,
Event.extendedProperties.private = Event.extendedProperties.private, Event.extendedProperties.shared = Event.extendedProperties.shared,
Event.gadget = Event.gadget, Event.gadget.preferences = Event.gadget.preferences,
Event.organizer = Event.organizer, Event.reminders = Event.reminders, Event.source = Event.source,
anyoneCanAddSelf = false, attachments = attachments, attendees = attendees,
attendeesOmitted = false, colorId = colorId, created = created, creator = creator,
description = description, end = end, endTimeUnspecified = false, etag = etag,
extendedProperties = extendedProperties, gadget = gadget, guestsCanInviteOthers = true,
guestsCanModify = false, guestsCanSeeOtherGuests = true, hangoutLink = hangoutLink,
htmlLink = htmlLink, iCalUID = iCalUID, id = id, kind = `calendar#event`,
location = location, locked = false, organizer = organizer, originalStartTime = originalStartTime,
privateCopy = false, recurrence = recurrence, recurringEventId = recurringEventId,
reminders = reminders, sequence = sequence, source = source, start = start,
status = status, summary = summary, transparency = opaque, updated = updated,
visibility = default), class = "gar_Event")
}
#' Event.creator Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' The creator of the event. Read-only.
#'
#' @param displayName The creator's name, if available
#' @param email The creator's email address, if available
#' @param id The creator's Profile ID, if available
#' @param self Whether the creator corresponds to the calendar on which this copy of the event appears
#'
#' @return Event.creator object
#'
#' @family Event functions
#' @export
Event.creator <- function(displayName = NULL, email = NULL, id = NULL, self = NULL) {
structure(list(displayName = displayName, email = email, id = id, self = false),
class = "gar_Event.creator")
}
#' Event.extendedProperties Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Extended properties of the event.
#'
#' @param Event.extendedProperties.private The \link{Event.extendedProperties.private} object or list of objects
#' @param Event.extendedProperties.shared The \link{Event.extendedProperties.shared} object or list of objects
#' @param private Properties that are private to the copy of the event that appears on this calendar
#' @param shared Properties that are shared between copies of the event on other attendees' calendars
#'
#' @return Event.extendedProperties object
#'
#' @family Event functions
#' @export
Event.extendedProperties <- function(Event.extendedProperties.private = NULL, Event.extendedProperties.shared = NULL,
private = NULL, shared = NULL) {
structure(list(Event.extendedProperties.private = Event.extendedProperties.private,
Event.extendedProperties.shared = Event.extendedProperties.shared, private = private,
shared = shared), class = "gar_Event.extendedProperties")
}
#' Event.extendedProperties.private Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Properties that are private to the copy of the event that appears on this calendar.
#'
#'
#'
#' @return Event.extendedProperties.private object
#'
#' @family Event functions
#' @export
Event.extendedProperties.private <- function() {
list()
}
#' Event.extendedProperties.shared Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Properties that are shared between copies of the event on other attendees' calendars.
#'
#'
#'
#' @return Event.extendedProperties.shared object
#'
#' @family Event functions
#' @export
Event.extendedProperties.shared <- function() {
list()
}
#' Event.gadget Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' A gadget that extends this event.
#'
#' @param Event.gadget.preferences The \link{Event.gadget.preferences} object or list of objects
#' @param display The gadget's display mode
#' @param height The gadget's height in pixels
#' @param iconLink The gadget's icon URL
#' @param link The gadget's URL
#' @param preferences Preferences
#' @param title The gadget's title
#' @param type The gadget's type
#' @param width The gadget's width in pixels
#'
#' @return Event.gadget object
#'
#' @family Event functions
#' @export
Event.gadget <- function(Event.gadget.preferences = NULL, display = NULL, height = NULL,
iconLink = NULL, link = NULL, preferences = NULL, title = NULL, type = NULL,
width = NULL) {
structure(list(Event.gadget.preferences = Event.gadget.preferences, display = display,
height = height, iconLink = iconLink, link = link, preferences = preferences,
title = title, type = type, width = width), class = "gar_Event.gadget")
}
#' Event.gadget.preferences Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Preferences.
#'
#'
#'
#' @return Event.gadget.preferences object
#'
#' @family Event functions
#' @export
Event.gadget.preferences <- function() {
list()
}
#' Event.organizer Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' The organizer of the event. If the organizer is also an attendee, this is indicated with a separate entry in attendees with the organizer field set to True. To change the organizer, use the move operation. Read-only, except when importing an event.
#'
#' @param displayName The organizer's name, if available
#' @param email The organizer's email address, if available
#' @param id The organizer's Profile ID, if available
#' @param self Whether the organizer corresponds to the calendar on which this copy of the event appears
#'
#' @return Event.organizer object
#'
#' @family Event functions
#' @export
Event.organizer <- function(displayName = NULL, email = NULL, id = NULL, self = NULL) {
structure(list(displayName = displayName, email = email, id = id, self = false),
class = "gar_Event.organizer")
}
#' Event.reminders Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Information about the event's reminders for the authenticated user.
#'
#' @param overrides If the event doesn't use the default reminders, this lists the reminders specific to the event, or, if not set, indicates that no reminders are set for this event
#' @param useDefault Whether the default reminders of the calendar apply to the event
#'
#' @return Event.reminders object
#'
#' @family Event functions
#' @export
Event.reminders <- function(overrides = NULL, useDefault = NULL) {
structure(list(overrides = overrides, useDefault = useDefault), class = "gar_Event.reminders")
}
#' Event.source Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Source from which the event was created. For example, a web page, an email message or any document identifiable by an URL with HTTP or HTTPS scheme. Can only be seen or modified by the creator of the event.
#'
#' @param title Title of the source; for example a title of a web page or an email subject
#' @param url URL of the source pointing to a resource
#'
#' @return Event.source object
#'
#' @family Event functions
#' @export
Event.source <- function(title = NULL, url = NULL) {
structure(list(title = title, url = url), class = "gar_Event.source")
}
#' EventAttachment Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param fileId ID of the attached file
#' @param fileUrl URL link to the attachment
#' @param iconLink URL link to the attachment's icon
#' @param mimeType Internet media type (MIME type) of the attachment
#' @param title Attachment title
#'
#' @return EventAttachment object
#'
#' @family EventAttachment functions
#' @export
EventAttachment <- function(fileId = NULL, fileUrl = NULL, iconLink = NULL, mimeType = NULL,
title = NULL) {
structure(list(fileId = fileId, fileUrl = fileUrl, iconLink = iconLink, mimeType = mimeType,
title = title), class = "gar_EventAttachment")
}
#' EventAttendee Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param additionalGuests Number of additional guests
#' @param comment The attendee's response comment
#' @param displayName The attendee's name, if available
#' @param email The attendee's email address, if available
#' @param id The attendee's Profile ID, if available
#' @param optional Whether this is an optional attendee
#' @param organizer Whether the attendee is the organizer of the event
#' @param resource Whether the attendee is a resource
#' @param responseStatus The attendee's response status
#' @param self Whether this entry represents the calendar on which this copy of the event appears
#'
#' @return EventAttendee object
#'
#' @family EventAttendee functions
#' @export
EventAttendee <- function(additionalGuests = NULL, comment = NULL, displayName = NULL,
email = NULL, id = NULL, optional = NULL, organizer = NULL, resource = NULL,
responseStatus = NULL, self = NULL) {
structure(list(additionalGuests = `0`, comment = comment, displayName = displayName,
email = email, id = id, optional = false, organizer = organizer, resource = false,
responseStatus = responseStatus, self = false), class = "gar_EventAttendee")
}
#' EventDateTime Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param date The date, in the format 'yyyy-mm-dd', if this is an all-day event
#' @param dateTime The time, as a combined date-time value (formatted according to RFC3339)
#' @param timeZone The time zone in which the time is specified
#'
#' @return EventDateTime object
#'
#' @family EventDateTime functions
#' @export
EventDateTime <- function(date = NULL, dateTime = NULL, timeZone = NULL) {
structure(list(date = date, dateTime = dateTime, timeZone = timeZone), class = "gar_EventDateTime")
}
#' EventReminder Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param method The method used by this reminder
#' @param minutes Number of minutes before the start of the event when the reminder should trigger
#'
#' @return EventReminder object
#'
#' @family EventReminder functions
#' @export
EventReminder <- function(method = NULL, minutes = NULL) {
structure(list(method = method, minutes = minutes), class = "gar_EventReminder")
}
#' Events Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param accessRole The user's access role for this calendar
#' @param defaultReminders The default reminders on the calendar for the authenticated user
#' @param description Description of the calendar
#' @param etag ETag of the collection
#' @param items List of events on the calendar
#' @param nextPageToken Token used to access the next page of this result
#' @param nextSyncToken Token used at a later point in time to retrieve only the entries that have changed since this result was returned
#' @param summary Title of the calendar
#' @param timeZone The time zone of the calendar
#' @param updated Last modification time of the calendar (as a RFC3339 timestamp)
#'
#' @return Events object
#'
#' @family Events functions
#' @export
Events <- function(accessRole = NULL, defaultReminders = NULL, description = NULL,
etag = NULL, items = NULL, nextPageToken = NULL, nextSyncToken = NULL, summary = NULL,
timeZone = NULL, updated = NULL) {
structure(list(accessRole = accessRole, defaultReminders = defaultReminders,
description = description, etag = etag, items = items, kind = `calendar#events`,
nextPageToken = nextPageToken, nextSyncToken = nextSyncToken, summary = summary,
timeZone = timeZone, updated = updated), class = "gar_Events")
}
#' FreeBusyCalendar Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param busy List of time ranges during which this calendar should be regarded as busy
#' @param errors Optional error(s) (if computation for the calendar failed)
#'
#' @return FreeBusyCalendar object
#'
#' @family FreeBusyCalendar functions
#' @export
FreeBusyCalendar <- function(busy = NULL, errors = NULL) {
structure(list(busy = busy, errors = errors), class = "gar_FreeBusyCalendar")
}
#' FreeBusyGroup Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param calendars List of calendars' identifiers within a group
#' @param errors Optional error(s) (if computation for the group failed)
#'
#' @return FreeBusyGroup object
#'
#' @family FreeBusyGroup functions
#' @export
FreeBusyGroup <- function(calendars = NULL, errors = NULL) {
structure(list(calendars = calendars, errors = errors), class = "gar_FreeBusyGroup")
}
#' FreeBusyRequest Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param calendarExpansionMax Maximal number of calendars for which FreeBusy information is to be provided
#' @param groupExpansionMax Maximal number of calendar identifiers to be provided for a single group
#' @param items List of calendars and/or groups to query
#' @param timeMax The end of the interval for the query
#' @param timeMin The start of the interval for the query
#' @param timeZone Time zone used in the response
#'
#' @return FreeBusyRequest object
#'
#' @family FreeBusyRequest functions
#' @export
FreeBusyRequest <- function(calendarExpansionMax = NULL, groupExpansionMax = NULL,
items = NULL, timeMax = NULL, timeMin = NULL, timeZone = NULL) {
structure(list(calendarExpansionMax = calendarExpansionMax, groupExpansionMax = groupExpansionMax,
items = items, timeMax = timeMax, timeMin = timeMin, timeZone = UTC), class = "gar_FreeBusyRequest")
}
#' FreeBusyRequestItem Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param id The identifier of a calendar or a group
#'
#' @return FreeBusyRequestItem object
#'
#' @family FreeBusyRequestItem functions
#' @export
FreeBusyRequestItem <- function(id = NULL) {
structure(list(id = id), class = "gar_FreeBusyRequestItem")
}
#' FreeBusyResponse Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param FreeBusyResponse.calendars The \link{FreeBusyResponse.calendars} object or list of objects
#' @param FreeBusyResponse.groups The \link{FreeBusyResponse.groups} object or list of objects
#' @param calendars List of free/busy information for calendars
#' @param groups Expansion of groups
#' @param timeMax The end of the interval
#' @param timeMin The start of the interval
#'
#' @return FreeBusyResponse object
#'
#' @family FreeBusyResponse functions
#' @export
FreeBusyResponse <- function(FreeBusyResponse.calendars = NULL, FreeBusyResponse.groups = NULL,
calendars = NULL, groups = NULL, timeMax = NULL, timeMin = NULL) {
structure(list(FreeBusyResponse.calendars = FreeBusyResponse.calendars, FreeBusyResponse.groups = FreeBusyResponse.groups,
calendars = calendars, groups = groups, kind = `calendar#freeBusy`, timeMax = timeMax,
timeMin = timeMin), class = "gar_FreeBusyResponse")
}
#' FreeBusyResponse.calendars Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' List of free/busy information for calendars.
#'
#'
#'
#' @return FreeBusyResponse.calendars object
#'
#' @family FreeBusyResponse functions
#' @export
FreeBusyResponse.calendars <- function() {
list()
}
#' FreeBusyResponse.groups Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Expansion of groups.
#'
#'
#'
#' @return FreeBusyResponse.groups object
#'
#' @family FreeBusyResponse functions
#' @export
FreeBusyResponse.groups <- function() {
list()
}
#' Setting Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param etag ETag of the resource
#' @param id The id of the user setting
#' @param value Value of the user setting
#'
#' @return Setting object
#'
#' @family Setting functions
#' @export
Setting <- function(etag = NULL, id = NULL, value = NULL) {
structure(list(etag = etag, id = id, kind = `calendar#setting`, value = value),
class = "gar_Setting")
}
#' Settings Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param etag Etag of the collection
#' @param items List of user settings
#' @param nextPageToken Token used to access the next page of this result
#' @param nextSyncToken Token used at a later point in time to retrieve only the entries that have changed since this result was returned
#'
#' @return Settings object
#'
#' @family Settings functions
#' @export
Settings <- function(etag = NULL, items = NULL, nextPageToken = NULL, nextSyncToken = NULL) {
structure(list(etag = etag, items = items, kind = `calendar#settings`, nextPageToken = nextPageToken,
nextSyncToken = nextSyncToken), class = "gar_Settings")
}
#' TimePeriod Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param end The (exclusive) end of the time period
#' @param start The (inclusive) start of the time period
#'
#' @return TimePeriod object
#'
#' @family TimePeriod functions
#' @export
TimePeriod <- function(end = NULL, start = NULL) {
structure(list(end = end, start = start), class = "gar_TimePeriod")
}
| /googlecalendarv3.auto/R/calendar_objects.R | permissive | Phippsy/autoGoogleAPI | R | false | false | 35,373 | r | #' Calendar API Objects
#' Manipulates events and other calendar data.
#'
#' Auto-generated code by googleAuthR::gar_create_api_objects
#' at 2016-09-04 00:00:52
#' filename: /Users/mark/dev/R/autoGoogleAPI/googlecalendarv3.auto/R/calendar_objects.R
#' api_json: api_json
#'
#' Objects for use by the functions created by googleAuthR::gar_create_api_skeleton
#' Acl Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param etag ETag of the collection
#' @param items List of rules on the access control list
#' @param nextPageToken Token used to access the next page of this result
#' @param nextSyncToken Token used at a later point in time to retrieve only the entries that have changed since this result was returned
#'
#' @return Acl object
#'
#' @family Acl functions
#' @export
Acl <- function(etag = NULL, items = NULL, nextPageToken = NULL, nextSyncToken = NULL) {
structure(list(etag = etag, items = items, kind = `calendar#acl`, nextPageToken = nextPageToken,
nextSyncToken = nextSyncToken), class = "gar_Acl")
}
#' AclRule Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param AclRule.scope The \link{AclRule.scope} object or list of objects
#' @param etag ETag of the resource
#' @param id Identifier of the ACL rule
#' @param role The role assigned to the scope
#' @param scope The scope of the rule
#'
#' @return AclRule object
#'
#' @family AclRule functions
#' @export
AclRule <- function(AclRule.scope = NULL, etag = NULL, id = NULL, role = NULL, scope = NULL) {
structure(list(AclRule.scope = AclRule.scope, etag = etag, id = id, kind = `calendar#aclRule`,
role = role, scope = scope), class = "gar_AclRule")
}
#' AclRule.scope Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' The scope of the rule.
#'
#' @param type The type of the scope
#' @param value The email address of a user or group, or the name of a domain, depending on the scope type
#'
#' @return AclRule.scope object
#'
#' @family AclRule functions
#' @export
AclRule.scope <- function(type = NULL, value = NULL) {
structure(list(type = type, value = value), class = "gar_AclRule.scope")
}
#' Calendar Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param description Description of the calendar
#' @param etag ETag of the resource
#' @param id Identifier of the calendar
#' @param location Geographic location of the calendar as free-form text
#' @param summary Title of the calendar
#' @param timeZone The time zone of the calendar
#'
#' @return Calendar object
#'
#' @family Calendar functions
#' @export
Calendar <- function(description = NULL, etag = NULL, id = NULL, location = NULL,
summary = NULL, timeZone = NULL) {
structure(list(description = description, etag = etag, id = id, kind = `calendar#calendar`,
location = location, summary = summary, timeZone = timeZone), class = "gar_Calendar")
}
#' CalendarList Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param etag ETag of the collection
#' @param items Calendars that are present on the user's calendar list
#' @param nextPageToken Token used to access the next page of this result
#' @param nextSyncToken Token used at a later point in time to retrieve only the entries that have changed since this result was returned
#'
#' @return CalendarList object
#'
#' @family CalendarList functions
#' @export
CalendarList <- function(etag = NULL, items = NULL, nextPageToken = NULL, nextSyncToken = NULL) {
structure(list(etag = etag, items = items, kind = `calendar#calendarList`, nextPageToken = nextPageToken,
nextSyncToken = nextSyncToken), class = "gar_CalendarList")
}
#' CalendarListEntry Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param CalendarListEntry.notificationSettings The \link{CalendarListEntry.notificationSettings} object or list of objects
#' @param accessRole The effective access role that the authenticated user has on the calendar
#' @param backgroundColor The main color of the calendar in the hexadecimal format '#0088aa'
#' @param colorId The color of the calendar
#' @param defaultReminders The default reminders that the authenticated user has for this calendar
#' @param deleted Whether this calendar list entry has been deleted from the calendar list
#' @param description Description of the calendar
#' @param etag ETag of the resource
#' @param foregroundColor The foreground color of the calendar in the hexadecimal format '#ffffff'
#' @param hidden Whether the calendar has been hidden from the list
#' @param id Identifier of the calendar
#' @param location Geographic location of the calendar as free-form text
#' @param notificationSettings The notifications that the authenticated user is receiving for this calendar
#' @param primary Whether the calendar is the primary calendar of the authenticated user
#' @param selected Whether the calendar content shows up in the calendar UI
#' @param summary Title of the calendar
#' @param summaryOverride The summary that the authenticated user has set for this calendar
#' @param timeZone The time zone of the calendar
#'
#' @return CalendarListEntry object
#'
#' @family CalendarListEntry functions
#' @export
CalendarListEntry <- function(CalendarListEntry.notificationSettings = NULL, accessRole = NULL,
backgroundColor = NULL, colorId = NULL, defaultReminders = NULL, deleted = NULL,
description = NULL, etag = NULL, foregroundColor = NULL, hidden = NULL, id = NULL,
location = NULL, notificationSettings = NULL, primary = NULL, selected = NULL,
summary = NULL, summaryOverride = NULL, timeZone = NULL) {
structure(list(CalendarListEntry.notificationSettings = CalendarListEntry.notificationSettings,
accessRole = accessRole, backgroundColor = backgroundColor, colorId = colorId,
defaultReminders = defaultReminders, deleted = false, description = description,
etag = etag, foregroundColor = foregroundColor, hidden = false, id = id,
kind = `calendar#calendarListEntry`, location = location, notificationSettings = notificationSettings,
primary = false, selected = false, summary = summary, summaryOverride = summaryOverride,
timeZone = timeZone), class = "gar_CalendarListEntry")
}
#' CalendarListEntry.notificationSettings Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' The notifications that the authenticated user is receiving for this calendar.
#'
#' @param notifications The list of notifications set for this calendar
#'
#' @return CalendarListEntry.notificationSettings object
#'
#' @family CalendarListEntry functions
#' @export
CalendarListEntry.notificationSettings <- function(notifications = NULL) {
structure(list(notifications = notifications), class = "gar_CalendarListEntry.notificationSettings")
}
#' CalendarNotification Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param method The method used to deliver the notification
#' @param type The type of notification
#'
#' @return CalendarNotification object
#'
#' @family CalendarNotification functions
#' @export
CalendarNotification <- function(method = NULL, type = NULL) {
structure(list(method = method, type = type), class = "gar_CalendarNotification")
}
#' Channel Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param Channel.params The \link{Channel.params} object or list of objects
#' @param address The address where notifications are delivered for this channel
#' @param expiration Date and time of notification channel expiration, expressed as a Unix timestamp, in milliseconds
#' @param id A UUID or similar unique string that identifies this channel
#' @param params Additional parameters controlling delivery channel behavior
#' @param payload A Boolean value to indicate whether payload is wanted
#' @param resourceId An opaque ID that identifies the resource being watched on this channel
#' @param resourceUri A version-specific identifier for the watched resource
#' @param token An arbitrary string delivered to the target address with each notification delivered over this channel
#' @param type The type of delivery mechanism used for this channel
#'
#' @return Channel object
#'
#' @family Channel functions
#' @export
Channel <- function(Channel.params = NULL, address = NULL, expiration = NULL, id = NULL,
params = NULL, payload = NULL, resourceId = NULL, resourceUri = NULL, token = NULL,
type = NULL) {
structure(list(Channel.params = Channel.params, address = address, expiration = expiration,
id = id, kind = `api#channel`, params = params, payload = payload, resourceId = resourceId,
resourceUri = resourceUri, token = token, type = type), class = "gar_Channel")
}
#' Channel.params Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Additional parameters controlling delivery channel behavior. Optional.
#'
#'
#'
#' @return Channel.params object
#'
#' @family Channel functions
#' @export
Channel.params <- function() {
list()
}
#' ColorDefinition Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param background The background color associated with this color definition
#' @param foreground The foreground color that can be used to write on top of a background with 'background' color
#'
#' @return ColorDefinition object
#'
#' @family ColorDefinition functions
#' @export
ColorDefinition <- function(background = NULL, foreground = NULL) {
structure(list(background = background, foreground = foreground), class = "gar_ColorDefinition")
}
#' Colors Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param Colors.calendar The \link{Colors.calendar} object or list of objects
#' @param Colors.event The \link{Colors.event} object or list of objects
#' @param calendar A global palette of calendar colors, mapping from the color ID to its definition
#' @param event A global palette of event colors, mapping from the color ID to its definition
#' @param updated Last modification time of the color palette (as a RFC3339 timestamp)
#'
#' @return Colors object
#'
#' @family Colors functions
#' @export
Colors <- function(Colors.calendar = NULL, Colors.event = NULL, calendar = NULL,
event = NULL, updated = NULL) {
structure(list(Colors.calendar = Colors.calendar, Colors.event = Colors.event,
calendar = calendar, event = event, kind = `calendar#colors`, updated = updated),
class = "gar_Colors")
}
#' Colors.calendar Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' A global palette of calendar colors, mapping from the color ID to its definition. A calendarListEntry resource refers to one of these color IDs in its color field. Read-only.
#'
#'
#'
#' @return Colors.calendar object
#'
#' @family Colors functions
#' @export
Colors.calendar <- function() {
list()
}
#' Colors.event Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' A global palette of event colors, mapping from the color ID to its definition. An event resource may refer to one of these color IDs in its color field. Read-only.
#'
#'
#'
#' @return Colors.event object
#'
#' @family Colors functions
#' @export
Colors.event <- function() {
list()
}
#' Error Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param domain Domain, or broad category, of the error
#' @param reason Specific reason for the error
#'
#' @return Error object
#'
#' @family Error functions
#' @export
Error <- function(domain = NULL, reason = NULL) {
structure(list(domain = domain, reason = reason), class = "gar_Error")
}
#' Event Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param Event.creator The \link{Event.creator} object or list of objects
#' @param Event.extendedProperties The \link{Event.extendedProperties} object or list of objects
#' @param Event.extendedProperties.private The \link{Event.extendedProperties.private} object or list of objects
#' @param Event.extendedProperties.shared The \link{Event.extendedProperties.shared} object or list of objects
#' @param Event.gadget The \link{Event.gadget} object or list of objects
#' @param Event.gadget.preferences The \link{Event.gadget.preferences} object or list of objects
#' @param Event.organizer The \link{Event.organizer} object or list of objects
#' @param Event.reminders The \link{Event.reminders} object or list of objects
#' @param Event.source The \link{Event.source} object or list of objects
#' @param anyoneCanAddSelf Whether anyone can invite themselves to the event (currently works for Google+ events only)
#' @param attachments File attachments for the event
#' @param attendees The attendees of the event
#' @param attendeesOmitted Whether attendees may have been omitted from the event's representation
#' @param colorId The color of the event
#' @param created Creation time of the event (as a RFC3339 timestamp)
#' @param creator The creator of the event
#' @param description Description of the event
#' @param end The (exclusive) end time of the event
#' @param endTimeUnspecified Whether the end time is actually unspecified
#' @param etag ETag of the resource
#' @param extendedProperties Extended properties of the event
#' @param gadget A gadget that extends this event
#' @param guestsCanInviteOthers Whether attendees other than the organizer can invite others to the event
#' @param guestsCanModify Whether attendees other than the organizer can modify the event
#' @param guestsCanSeeOtherGuests Whether attendees other than the organizer can see who the event's attendees are
#' @param hangoutLink An absolute link to the Google+ hangout associated with this event
#' @param htmlLink An absolute link to this event in the Google Calendar Web UI
#' @param iCalUID Event unique identifier as defined in RFC5545
#' @param id Opaque identifier of the event
#' @param location Geographic location of the event as free-form text
#' @param locked Whether this is a locked event copy where no changes can be made to the main event fields 'summary', 'description', 'location', 'start', 'end' or 'recurrence'
#' @param organizer The organizer of the event
#' @param originalStartTime For an instance of a recurring event, this is the time at which this event would start according to the recurrence data in the recurring event identified by recurringEventId
#' @param privateCopy Whether this is a private event copy where changes are not shared with other copies on other calendars
#' @param recurrence List of RRULE, EXRULE, RDATE and EXDATE lines for a recurring event, as specified in RFC5545
#' @param recurringEventId For an instance of a recurring event, this is the id of the recurring event to which this instance belongs
#' @param reminders Information about the event's reminders for the authenticated user
#' @param sequence Sequence number as per iCalendar
#' @param source Source from which the event was created
#' @param start The (inclusive) start time of the event
#' @param status Status of the event
#' @param summary Title of the event
#' @param transparency Whether the event blocks time on the calendar
#' @param updated Last modification time of the event (as a RFC3339 timestamp)
#' @param visibility Visibility of the event
#'
#' @return Event object
#'
#' @family Event functions
#' @export
Event <- function(Event.creator = NULL, Event.extendedProperties = NULL, Event.extendedProperties.private = NULL,
Event.extendedProperties.shared = NULL, Event.gadget = NULL, Event.gadget.preferences = NULL,
Event.organizer = NULL, Event.reminders = NULL, Event.source = NULL, anyoneCanAddSelf = NULL,
attachments = NULL, attendees = NULL, attendeesOmitted = NULL, colorId = NULL,
created = NULL, creator = NULL, description = NULL, end = NULL, endTimeUnspecified = NULL,
etag = NULL, extendedProperties = NULL, gadget = NULL, guestsCanInviteOthers = NULL,
guestsCanModify = NULL, guestsCanSeeOtherGuests = NULL, hangoutLink = NULL, htmlLink = NULL,
iCalUID = NULL, id = NULL, location = NULL, locked = NULL, organizer = NULL,
originalStartTime = NULL, privateCopy = NULL, recurrence = NULL, recurringEventId = NULL,
reminders = NULL, sequence = NULL, source = NULL, start = NULL, status = NULL,
summary = NULL, transparency = NULL, updated = NULL, visibility = NULL) {
structure(list(Event.creator = Event.creator, Event.extendedProperties = Event.extendedProperties,
Event.extendedProperties.private = Event.extendedProperties.private, Event.extendedProperties.shared = Event.extendedProperties.shared,
Event.gadget = Event.gadget, Event.gadget.preferences = Event.gadget.preferences,
Event.organizer = Event.organizer, Event.reminders = Event.reminders, Event.source = Event.source,
anyoneCanAddSelf = false, attachments = attachments, attendees = attendees,
attendeesOmitted = false, colorId = colorId, created = created, creator = creator,
description = description, end = end, endTimeUnspecified = false, etag = etag,
extendedProperties = extendedProperties, gadget = gadget, guestsCanInviteOthers = true,
guestsCanModify = false, guestsCanSeeOtherGuests = true, hangoutLink = hangoutLink,
htmlLink = htmlLink, iCalUID = iCalUID, id = id, kind = `calendar#event`,
location = location, locked = false, organizer = organizer, originalStartTime = originalStartTime,
privateCopy = false, recurrence = recurrence, recurringEventId = recurringEventId,
reminders = reminders, sequence = sequence, source = source, start = start,
status = status, summary = summary, transparency = opaque, updated = updated,
visibility = default), class = "gar_Event")
}
#' Event.creator Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' The creator of the event. Read-only.
#'
#' @param displayName The creator's name, if available
#' @param email The creator's email address, if available
#' @param id The creator's Profile ID, if available
#' @param self Whether the creator corresponds to the calendar on which this copy of the event appears
#'
#' @return Event.creator object
#'
#' @family Event functions
#' @export
Event.creator <- function(displayName = NULL, email = NULL, id = NULL, self = NULL) {
structure(list(displayName = displayName, email = email, id = id, self = false),
class = "gar_Event.creator")
}
#' Event.extendedProperties Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Extended properties of the event.
#'
#' @param Event.extendedProperties.private The \link{Event.extendedProperties.private} object or list of objects
#' @param Event.extendedProperties.shared The \link{Event.extendedProperties.shared} object or list of objects
#' @param private Properties that are private to the copy of the event that appears on this calendar
#' @param shared Properties that are shared between copies of the event on other attendees' calendars
#'
#' @return Event.extendedProperties object
#'
#' @family Event functions
#' @export
Event.extendedProperties <- function(Event.extendedProperties.private = NULL, Event.extendedProperties.shared = NULL,
private = NULL, shared = NULL) {
structure(list(Event.extendedProperties.private = Event.extendedProperties.private,
Event.extendedProperties.shared = Event.extendedProperties.shared, private = private,
shared = shared), class = "gar_Event.extendedProperties")
}
#' Event.extendedProperties.private Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Properties that are private to the copy of the event that appears on this calendar.
#'
#'
#'
#' @return Event.extendedProperties.private object
#'
#' @family Event functions
#' @export
Event.extendedProperties.private <- function() {
list()
}
#' Event.extendedProperties.shared Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Properties that are shared between copies of the event on other attendees' calendars.
#'
#'
#'
#' @return Event.extendedProperties.shared object
#'
#' @family Event functions
#' @export
Event.extendedProperties.shared <- function() {
list()
}
#' Event.gadget Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' A gadget that extends this event.
#'
#' @param Event.gadget.preferences The \link{Event.gadget.preferences} object or list of objects
#' @param display The gadget's display mode
#' @param height The gadget's height in pixels
#' @param iconLink The gadget's icon URL
#' @param link The gadget's URL
#' @param preferences Preferences
#' @param title The gadget's title
#' @param type The gadget's type
#' @param width The gadget's width in pixels
#'
#' @return Event.gadget object
#'
#' @family Event functions
#' @export
Event.gadget <- function(Event.gadget.preferences = NULL, display = NULL, height = NULL,
iconLink = NULL, link = NULL, preferences = NULL, title = NULL, type = NULL,
width = NULL) {
structure(list(Event.gadget.preferences = Event.gadget.preferences, display = display,
height = height, iconLink = iconLink, link = link, preferences = preferences,
title = title, type = type, width = width), class = "gar_Event.gadget")
}
#' Event.gadget.preferences Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Preferences.
#'
#'
#'
#' @return Event.gadget.preferences object
#'
#' @family Event functions
#' @export
Event.gadget.preferences <- function() {
list()
}
#' Event.organizer Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' The organizer of the event. If the organizer is also an attendee, this is indicated with a separate entry in attendees with the organizer field set to True. To change the organizer, use the move operation. Read-only, except when importing an event.
#'
#' @param displayName The organizer's name, if available
#' @param email The organizer's email address, if available
#' @param id The organizer's Profile ID, if available
#' @param self Whether the organizer corresponds to the calendar on which this copy of the event appears
#'
#' @return Event.organizer object
#'
#' @family Event functions
#' @export
Event.organizer <- function(displayName = NULL, email = NULL, id = NULL, self = NULL) {
structure(list(displayName = displayName, email = email, id = id, self = false),
class = "gar_Event.organizer")
}
#' Event.reminders Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Information about the event's reminders for the authenticated user.
#'
#' @param overrides If the event doesn't use the default reminders, this lists the reminders specific to the event, or, if not set, indicates that no reminders are set for this event
#' @param useDefault Whether the default reminders of the calendar apply to the event
#'
#' @return Event.reminders object
#'
#' @family Event functions
#' @export
Event.reminders <- function(overrides = NULL, useDefault = NULL) {
structure(list(overrides = overrides, useDefault = useDefault), class = "gar_Event.reminders")
}
#' Event.source Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Source from which the event was created. For example, a web page, an email message or any document identifiable by an URL with HTTP or HTTPS scheme. Can only be seen or modified by the creator of the event.
#'
#' @param title Title of the source; for example a title of a web page or an email subject
#' @param url URL of the source pointing to a resource
#'
#' @return Event.source object
#'
#' @family Event functions
#' @export
Event.source <- function(title = NULL, url = NULL) {
structure(list(title = title, url = url), class = "gar_Event.source")
}
#' EventAttachment Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param fileId ID of the attached file
#' @param fileUrl URL link to the attachment
#' @param iconLink URL link to the attachment's icon
#' @param mimeType Internet media type (MIME type) of the attachment
#' @param title Attachment title
#'
#' @return EventAttachment object
#'
#' @family EventAttachment functions
#' @export
EventAttachment <- function(fileId = NULL, fileUrl = NULL, iconLink = NULL, mimeType = NULL,
title = NULL) {
structure(list(fileId = fileId, fileUrl = fileUrl, iconLink = iconLink, mimeType = mimeType,
title = title), class = "gar_EventAttachment")
}
#' EventAttendee Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param additionalGuests Number of additional guests
#' @param comment The attendee's response comment
#' @param displayName The attendee's name, if available
#' @param email The attendee's email address, if available
#' @param id The attendee's Profile ID, if available
#' @param optional Whether this is an optional attendee
#' @param organizer Whether the attendee is the organizer of the event
#' @param resource Whether the attendee is a resource
#' @param responseStatus The attendee's response status
#' @param self Whether this entry represents the calendar on which this copy of the event appears
#'
#' @return EventAttendee object
#'
#' @family EventAttendee functions
#' @export
EventAttendee <- function(additionalGuests = NULL, comment = NULL, displayName = NULL,
email = NULL, id = NULL, optional = NULL, organizer = NULL, resource = NULL,
responseStatus = NULL, self = NULL) {
structure(list(additionalGuests = `0`, comment = comment, displayName = displayName,
email = email, id = id, optional = false, organizer = organizer, resource = false,
responseStatus = responseStatus, self = false), class = "gar_EventAttendee")
}
#' EventDateTime Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param date The date, in the format 'yyyy-mm-dd', if this is an all-day event
#' @param dateTime The time, as a combined date-time value (formatted according to RFC3339)
#' @param timeZone The time zone in which the time is specified
#'
#' @return EventDateTime object
#'
#' @family EventDateTime functions
#' @export
EventDateTime <- function(date = NULL, dateTime = NULL, timeZone = NULL) {
structure(list(date = date, dateTime = dateTime, timeZone = timeZone), class = "gar_EventDateTime")
}
#' EventReminder Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param method The method used by this reminder
#' @param minutes Number of minutes before the start of the event when the reminder should trigger
#'
#' @return EventReminder object
#'
#' @family EventReminder functions
#' @export
EventReminder <- function(method = NULL, minutes = NULL) {
structure(list(method = method, minutes = minutes), class = "gar_EventReminder")
}
#' Events Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param accessRole The user's access role for this calendar
#' @param defaultReminders The default reminders on the calendar for the authenticated user
#' @param description Description of the calendar
#' @param etag ETag of the collection
#' @param items List of events on the calendar
#' @param nextPageToken Token used to access the next page of this result
#' @param nextSyncToken Token used at a later point in time to retrieve only the entries that have changed since this result was returned
#' @param summary Title of the calendar
#' @param timeZone The time zone of the calendar
#' @param updated Last modification time of the calendar (as a RFC3339 timestamp)
#'
#' @return Events object
#'
#' @family Events functions
#' @export
Events <- function(accessRole = NULL, defaultReminders = NULL, description = NULL,
etag = NULL, items = NULL, nextPageToken = NULL, nextSyncToken = NULL, summary = NULL,
timeZone = NULL, updated = NULL) {
structure(list(accessRole = accessRole, defaultReminders = defaultReminders,
description = description, etag = etag, items = items, kind = `calendar#events`,
nextPageToken = nextPageToken, nextSyncToken = nextSyncToken, summary = summary,
timeZone = timeZone, updated = updated), class = "gar_Events")
}
#' FreeBusyCalendar Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param busy List of time ranges during which this calendar should be regarded as busy
#' @param errors Optional error(s) (if computation for the calendar failed)
#'
#' @return FreeBusyCalendar object
#'
#' @family FreeBusyCalendar functions
#' @export
FreeBusyCalendar <- function(busy = NULL, errors = NULL) {
structure(list(busy = busy, errors = errors), class = "gar_FreeBusyCalendar")
}
#' FreeBusyGroup Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param calendars List of calendars' identifiers within a group
#' @param errors Optional error(s) (if computation for the group failed)
#'
#' @return FreeBusyGroup object
#'
#' @family FreeBusyGroup functions
#' @export
FreeBusyGroup <- function(calendars = NULL, errors = NULL) {
structure(list(calendars = calendars, errors = errors), class = "gar_FreeBusyGroup")
}
#' FreeBusyRequest Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param calendarExpansionMax Maximal number of calendars for which FreeBusy information is to be provided
#' @param groupExpansionMax Maximal number of calendar identifiers to be provided for a single group
#' @param items List of calendars and/or groups to query
#' @param timeMax The end of the interval for the query
#' @param timeMin The start of the interval for the query
#' @param timeZone Time zone used in the response
#'
#' @return FreeBusyRequest object
#'
#' @family FreeBusyRequest functions
#' @export
FreeBusyRequest <- function(calendarExpansionMax = NULL, groupExpansionMax = NULL,
items = NULL, timeMax = NULL, timeMin = NULL, timeZone = NULL) {
structure(list(calendarExpansionMax = calendarExpansionMax, groupExpansionMax = groupExpansionMax,
items = items, timeMax = timeMax, timeMin = timeMin, timeZone = UTC), class = "gar_FreeBusyRequest")
}
#' FreeBusyRequestItem Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param id The identifier of a calendar or a group
#'
#' @return FreeBusyRequestItem object
#'
#' @family FreeBusyRequestItem functions
#' @export
FreeBusyRequestItem <- function(id = NULL) {
structure(list(id = id), class = "gar_FreeBusyRequestItem")
}
#' FreeBusyResponse Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param FreeBusyResponse.calendars The \link{FreeBusyResponse.calendars} object or list of objects
#' @param FreeBusyResponse.groups The \link{FreeBusyResponse.groups} object or list of objects
#' @param calendars List of free/busy information for calendars
#' @param groups Expansion of groups
#' @param timeMax The end of the interval
#' @param timeMin The start of the interval
#'
#' @return FreeBusyResponse object
#'
#' @family FreeBusyResponse functions
#' @export
FreeBusyResponse <- function(FreeBusyResponse.calendars = NULL, FreeBusyResponse.groups = NULL,
calendars = NULL, groups = NULL, timeMax = NULL, timeMin = NULL) {
structure(list(FreeBusyResponse.calendars = FreeBusyResponse.calendars, FreeBusyResponse.groups = FreeBusyResponse.groups,
calendars = calendars, groups = groups, kind = `calendar#freeBusy`, timeMax = timeMax,
timeMin = timeMin), class = "gar_FreeBusyResponse")
}
#' FreeBusyResponse.calendars Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' List of free/busy information for calendars.
#'
#'
#'
#' @return FreeBusyResponse.calendars object
#'
#' @family FreeBusyResponse functions
#' @export
FreeBusyResponse.calendars <- function() {
list()
}
#' FreeBusyResponse.groups Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Expansion of groups.
#'
#'
#'
#' @return FreeBusyResponse.groups object
#'
#' @family FreeBusyResponse functions
#' @export
FreeBusyResponse.groups <- function() {
list()
}
#' Setting Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param etag ETag of the resource
#' @param id The id of the user setting
#' @param value Value of the user setting
#'
#' @return Setting object
#'
#' @family Setting functions
#' @export
Setting <- function(etag = NULL, id = NULL, value = NULL) {
structure(list(etag = etag, id = id, kind = `calendar#setting`, value = value),
class = "gar_Setting")
}
#' Settings Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param etag Etag of the collection
#' @param items List of user settings
#' @param nextPageToken Token used to access the next page of this result
#' @param nextSyncToken Token used at a later point in time to retrieve only the entries that have changed since this result was returned
#'
#' @return Settings object
#'
#' @family Settings functions
#' @export
Settings <- function(etag = NULL, items = NULL, nextPageToken = NULL, nextSyncToken = NULL) {
structure(list(etag = etag, items = items, kind = `calendar#settings`, nextPageToken = nextPageToken,
nextSyncToken = nextSyncToken), class = "gar_Settings")
}
#' TimePeriod Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param end The (exclusive) end of the time period
#' @param start The (inclusive) start of the time period
#'
#' @return TimePeriod object
#'
#' @family TimePeriod functions
#' @export
TimePeriod <- function(end = NULL, start = NULL) {
structure(list(end = end, start = start), class = "gar_TimePeriod")
}
|
#' Generate PSA dataset of CEA parameters
#'
#' \code{generate_psa_params} generates PSA input dataset by sampling decision
#' model parameters from their distributions. The sample of the calibrated
#' parameters is a draw from their posterior distribution obtained with the
#' IMIS algorithm.
#' @param n_sim Number of PSA samples.
#' @param seed Seed for reproducibility of Monte Carlo sampling.
#' @return
#' A data frame with \code{n_sim} rows and 15 columns of parameters for PSA.
#' Each row is a parameter set sampled from distributions that characterize
#' their uncertainty
#' @examples
#' generate_psa_params()
#' @export
generate_psa_params <- function(n_sim = 1000, seed = 20190220){ # User defined
## Load calibrated parameters
data("m_calib_post")
n_sim <- nrow(m_calib_post)
set_seed <- seed
df_psa_params <- data.frame(
### Calibrated parameters
m_calib_post,
### Transition probabilities (per cycle)
p_HS1 = rbeta(n_sim, 30, 170), # probability to become sick when healthy
p_S1H = rbeta(n_sim, 60, 60) , # probability to become healthy when sick
### State rewards
## Costs
c_H = rgamma(n_sim, shape = 100, scale = 20) , # cost of remaining one cycle in state H
c_S1 = rgamma(n_sim, shape = 177.8, scale = 22.5), # cost of remaining one cycle in state S1
c_S2 = rgamma(n_sim, shape = 225, scale = 66.7) , # cost of remaining one cycle in state S2
c_Trt = rgamma(n_sim, shape = 73.5, scale = 163.3), # cost of treatment (per cycle)
c_D = 0 , # cost of being in the death state
## Utilities
u_H = truncnorm::rtruncnorm(n_sim, mean = 1, sd = 0.01, b = 1), # utility when healthy
u_S1 = truncnorm::rtruncnorm(n_sim, mean = 0.75, sd = 0.02, b = 1), # utility when sick
u_S2 = truncnorm::rtruncnorm(n_sim, mean = 0.50, sd = 0.03, b = 1), # utility when sicker
u_D = 0 , # utility when dead
u_Trt = truncnorm::rtruncnorm(n_sim, mean = 0.95, sd = 0.02, b = 1) # utility when being treated
)
return(df_psa_params)
}
| /R/05a_probabilistic_analysis_functions.R | permissive | LopezM-Mauricio/trying-darthpack | R | false | false | 2,148 | r | #' Generate PSA dataset of CEA parameters
#'
#' \code{generate_psa_params} generates PSA input dataset by sampling decision
#' model parameters from their distributions. The sample of the calibrated
#' parameters is a draw from their posterior distribution obtained with the
#' IMIS algorithm.
#' @param n_sim Number of PSA samples.
#' @param seed Seed for reproducibility of Monte Carlo sampling.
#' @return
#' A data frame with \code{n_sim} rows and 15 columns of parameters for PSA.
#' Each row is a parameter set sampled from distributions that characterize
#' their uncertainty
#' @examples
#' generate_psa_params()
#' @export
generate_psa_params <- function(n_sim = 1000, seed = 20190220){ # User defined
## Load calibrated parameters
data("m_calib_post")
n_sim <- nrow(m_calib_post)
set_seed <- seed
df_psa_params <- data.frame(
### Calibrated parameters
m_calib_post,
### Transition probabilities (per cycle)
p_HS1 = rbeta(n_sim, 30, 170), # probability to become sick when healthy
p_S1H = rbeta(n_sim, 60, 60) , # probability to become healthy when sick
### State rewards
## Costs
c_H = rgamma(n_sim, shape = 100, scale = 20) , # cost of remaining one cycle in state H
c_S1 = rgamma(n_sim, shape = 177.8, scale = 22.5), # cost of remaining one cycle in state S1
c_S2 = rgamma(n_sim, shape = 225, scale = 66.7) , # cost of remaining one cycle in state S2
c_Trt = rgamma(n_sim, shape = 73.5, scale = 163.3), # cost of treatment (per cycle)
c_D = 0 , # cost of being in the death state
## Utilities
u_H = truncnorm::rtruncnorm(n_sim, mean = 1, sd = 0.01, b = 1), # utility when healthy
u_S1 = truncnorm::rtruncnorm(n_sim, mean = 0.75, sd = 0.02, b = 1), # utility when sick
u_S2 = truncnorm::rtruncnorm(n_sim, mean = 0.50, sd = 0.03, b = 1), # utility when sicker
u_D = 0 , # utility when dead
u_Trt = truncnorm::rtruncnorm(n_sim, mean = 0.95, sd = 0.02, b = 1) # utility when being treated
)
return(df_psa_params)
}
|
PLS_glm_wvc <- function(dataY,dataX,nt=2,dataPredictY=dataX,modele="pls",family=NULL,scaleX=TRUE,scaleY=NULL,keepcoeffs=FALSE,keepstd.coeffs=FALSE,tol_Xi=10^(-12),weights,method="logistic",verbose=TRUE) {
##################################################
# #
# Initialization and formatting the inputs #
# #
##################################################
if(verbose){cat("____************************************************____\n")}
if(any(apply(is.na(dataX),MARGIN=2,"all"))){return(vector("list",0)); cat("One of the columns of dataX is completely filled with missing data"); stop()}
if(any(apply(is.na(dataX),MARGIN=1,"all"))){return(vector("list",0)); cat("One of the rows of dataX is completely filled with missing data"); stop()}
if(identical(dataPredictY,dataX)){PredYisdataX <- TRUE} else {PredYisdataX <- FALSE}
if(!PredYisdataX){
if(any(apply(is.na(dataPredictY),MARGIN=2,"all"))){return(vector("list",0)); cat("One of the columns of dataPredictY is completely filled with missing data"); stop()}
if(any(apply(is.na(dataPredictY),MARGIN=1,"all"))){return(vector("list",0)); cat("One of the rows of dataPredictY is completely filled with missing data"); stop()}
}
if(missing(weights)){NoWeights=TRUE} else {if(all(weights==rep(1,length(dataY)))){NoWeights=TRUE} else {NoWeights=FALSE}}
if(any(is.na(dataX))) {na.miss.X <- TRUE} else na.miss.X <- FALSE
if(any(is.na(dataY))) {na.miss.Y <- TRUE} else na.miss.Y <- FALSE
if(any(is.na(dataPredictY))) {na.miss.PredictY <- TRUE} else {na.miss.PredictY <- FALSE}
if(na.miss.X|na.miss.Y){naive=TRUE; if(verbose){cat(paste("Only naive DoF can be used with missing data\n",sep=""))}; if(!NoWeights){if(verbose){cat(paste("Weights cannot be used with missing data\n",sep=""))}}}
if(!NoWeights){naive=TRUE; if(verbose){cat(paste("Only naive DoF can be used with weighted PLS\n",sep=""))}}
if (!is.data.frame(dataX)) {dataX <- data.frame(dataX)}
if (is.null(modele) & !is.null(family)) {modele<-"pls-glm-family"}
if (!(modele %in% c("pls","pls-glm-logistic","pls-glm-family","pls-glm-Gamma","pls-glm-gaussian","pls-glm-inverse.gaussian","pls-glm-poisson","pls-glm-polr"))) {print(modele);stop("'modele' not recognized")}
if (!(modele %in% "pls-glm-family") & !is.null(family)) {stop("Set 'modele=pls-glm-family' to use the family option")}
if (modele=="pls") {family<-NULL}
if (modele=="pls-glm-Gamma") {family<-Gamma(link = "inverse")}
if (modele=="pls-glm-gaussian") {family<-gaussian(link = "identity")}
if (modele=="pls-glm-inverse.gaussian") {family<-inverse.gaussian(link = "1/mu^2")}
if (modele=="pls-glm-logistic") {family<-binomial(link = "logit")}
if (modele=="pls-glm-poisson") {family<-poisson(link = "log")}
if (modele=="pls-glm-polr") {family<-NULL}
if (!is.null(family)) {
if (is.character(family)) {family <- get(family, mode = "function", envir = parent.frame(n=sys.nframe()))}
if (is.function(family)) {family <- family()}
if (is.language(family)) {family <- eval(family)}
}
if (modele %in% c("pls-glm-family","pls-glm-Gamma","pls-glm-gaussian","pls-glm-inverse.gaussian","pls-glm-logistic","pls-glm-poisson")) {if(verbose){print(family)}}
if (modele %in% c("pls-glm-polr")) {if(verbose){cat("\nModel:", modele, "\n");cat("Method:", method, "\n\n")}}
if (modele=="pls") {if(verbose){cat("\nModel:", modele, "\n\n")}}
scaleY <- NULL
if (is.null(scaleY)) {
if (!(modele %in% c("pls"))) {scaleY <- FALSE} else {scaleY <- TRUE}
}
if (scaleY) {if(NoWeights){RepY <- scale(dataY)} else {meanY <- weighted.mean(dataY,weights); stdevY <- sqrt((length(dataY)-1)/length(dataY)*weighted.mean((dataY-meanY)^2,weights)); RepY <- (dataY-meanY)/stdevY; attr(RepY,"scaled:center") <- meanY ; attr(RepY,"scaled:scale") <- stdevY}}
else {
RepY <- dataY
attr(RepY,"scaled:center") <- 0
attr(RepY,"scaled:scale") <- 1
}
if (scaleX) {if(NoWeights){ExpliX <- scale(dataX)} else {meanX <- apply(dataX,2,weighted.mean,weights); stdevX <- sqrt((length(dataY)-1)/length(dataY)*apply((sweep(dataX,2,meanX))^2,2,weighted.mean,weights)); ExpliX <- sweep(sweep(dataX, 2, meanX), 2 ,stdevX, "/"); attr(ExpliX,"scaled:center") <- meanX ; attr(ExpliX,"scaled:scale") <- stdevX}
if(PredYisdataX){PredictY <- ExpliX} else {PredictY <- sweep(sweep(dataPredictY, 2, attr(ExpliX,"scaled:center")), 2 ,attr(ExpliX,"scaled:scale"), "/")}
}
else {
ExpliX <- dataX
attr(ExpliX,"scaled:center") <- rep(0,ncol(dataX))
attr(ExpliX,"scaled:scale") <- rep(1,ncol(dataX))
PredictY <- (dataPredictY)
}
if(is.null(colnames(ExpliX))){colnames(ExpliX)<-paste("X",1:ncol(ExpliX),sep=".")}
if(is.null(rownames(ExpliX))){rownames(ExpliX)<-1:nrow(ExpliX)}
XXNA <- !(is.na(ExpliX))
YNA <- !(is.na(RepY))
if(PredYisdataX){PredictYNA <- XXNA} else {PredictYNA <- !is.na(PredictY)}
ExpliXwotNA <- as.matrix(ExpliX)
ExpliXwotNA[!XXNA] <- 0
XXwotNA <- as.matrix(ExpliX)
XXwotNA[!XXNA] <- 0
dataXwotNA <- as.matrix(dataX)
dataXwotNA[!XXNA] <- 0
YwotNA <- as.matrix(RepY)
YwotNA[!YNA] <- 0
dataYwotNA <- as.matrix(dataY)
dataYwotNA[!YNA] <- 0
if(PredYisdataX){PredictYwotNA <- XXwotNA} else {
PredictYwotNA <- as.matrix(PredictY)
PredictYwotNA [is.na(PredictY)] <- 0
}
if (modele %in% "pls-glm-polr") {
dataY <- as.factor(dataY)
YwotNA <- as.factor(YwotNA)}
res <- list(nr=nrow(ExpliX),nc=ncol(ExpliX),ww=NULL,wwnorm=NULL,wwetoile=NULL,tt=NULL,pp=NULL,CoeffC=NULL,uscores=NULL,YChapeau=NULL,residYChapeau=NULL,RepY=RepY,na.miss.Y=na.miss.Y,YNA=YNA,residY=RepY,ExpliX=ExpliX,na.miss.X=na.miss.X,XXNA=XXNA,residXX=ExpliX,PredictY=PredictYwotNA,RSS=rep(NA,nt),RSSresidY=rep(NA,nt),R2=rep(NA,nt),R2residY=rep(NA,nt),press.ind=NULL,press.tot=NULL,Q2cum=rep(NA, nt),family=family,ttPredictY = NULL,typeVC="none",listValsPredictY=NULL)
if(NoWeights){res$weights<-rep(1L,res$nr)} else {res$weights<-weights}
res$temppred <- NULL
##############################################
###### PLS ######
##############################################
if (modele %in% "pls") {
if (scaleY) {res$YChapeau=rep(attr(RepY,"scaled:center"),nrow(ExpliX))
res$residYChapeau=rep(0,nrow(ExpliX))}
else
{res$YChapeau=rep(mean(RepY),nrow(ExpliX))
res$residYChapeau=rep(mean(RepY),nrow(ExpliX))}
}
################################################
################################################
## ##
## Beginning of the loop for the components ##
## ##
################################################
################################################
res$computed_nt <- 0
break_nt <- FALSE
break_nt_vc <- FALSE
for (kk in 1:nt) {
temptest <- sqrt(colSums(res$residXX^2, na.rm=TRUE))
if(any(temptest<tol_Xi)) {
break_nt <- TRUE
if (is.null(names(which(temptest<tol_Xi)))) {
if(verbose){cat(paste("Warning : ",paste(names(which(temptest<tol_Xi)),sep="",collapse=" ")," < 10^{-12}\n",sep=""))}
} else {
if(verbose){cat(paste("Warning : ",paste((which(temptest<tol_Xi)),sep="",collapse=" ")," < 10^{-12}\n",sep=""))}
}
if(verbose){cat(paste("Warning only ",res$computed_nt," components could thus be extracted\n",sep=""))}
break
}
res$computed_nt <- kk
XXwotNA <- as.matrix(res$residXX)
XXwotNA[!XXNA] <- 0
YwotNA <- as.matrix(res$residY)
YwotNA[!YNA] <- 0
tempww <- rep(0,res$nc)
##############################################
# #
# Weight computation for each model #
# #
##############################################
##############################################
###### PLS ######
##############################################
if (modele %in% "pls") {
if(NoWeights){
tempww <- t(XXwotNA)%*%YwotNA/(t(XXNA)%*%YwotNA^2)
}
if(!NoWeights){
tempww <- t(XXwotNA*weights)%*%YwotNA/(t(XXNA*weights)%*%YwotNA^2)
}
}
##############################################
###### PLS-GLM ######
##############################################
if (modele %in% c("pls-glm-logistic","pls-glm-family","pls-glm-Gamma","pls-glm-gaussian","pls-glm-inverse.gaussian","pls-glm-poisson")) {
XXwotNA[!XXNA] <- NA
for (jj in 1:(res$nc)) {
tempww[jj] <- coef(glm(YwotNA~cbind(res$tt,XXwotNA[,jj]),family=family))[kk+1]
}
XXwotNA[!XXNA] <- 0
rm(jj)}
##############################################
###### PLS-GLM-POLR ######
##############################################
if (modele %in% c("pls-glm-polr")) {
YwotNA <- as.factor(YwotNA)
XXwotNA[!XXNA] <- NA
library(MASS)
tts <- res$tt
for (jj in 1:(res$nc)) {
tempww[jj] <- -1*MASS::polr(YwotNA~cbind(tts,XXwotNA[,jj]),na.action=na.exclude,method=method)$coef[kk]
}
XXwotNA[!XXNA] <- 0
rm(jj,tts)}
##############################################
# #
# Computation of the components (model free) #
# #
##############################################
tempwwnorm <- tempww/sqrt(drop(crossprod(tempww)))
temptt <- XXwotNA%*%tempwwnorm/(XXNA%*%(tempwwnorm^2))
temppp <- rep(0,res$nc)
for (jj in 1:(res$nc)) {
temppp[jj] <- crossprod(temptt,XXwotNA[,jj])/drop(crossprod(XXNA[,jj],temptt^2))
}
res$residXX <- XXwotNA-temptt%*%temppp
if (na.miss.X & !na.miss.Y) {
for (ii in 1:res$nr) {
if(rcond(t(cbind(res$pp,temppp)[XXNA[ii,],,drop=FALSE])%*%cbind(res$pp,temppp)[XXNA[ii,],,drop=FALSE])<tol_Xi) {
break_nt <- TRUE; res$computed_nt <- kk-1
if(verbose){cat(paste("Warning : reciprocal condition number of t(cbind(res$pp,temppp)[XXNA[",ii,",],,drop=FALSE])%*%cbind(res$pp,temppp)[XXNA[",ii,",],,drop=FALSE] < 10^{-12}\n",sep=""))}
if(verbose){cat(paste("Warning only ",res$computed_nt," components could thus be extracted\n",sep=""))}
break
}
}
rm(ii)
if(break_nt) {break}
}
if(!PredYisdataX){
if (na.miss.PredictY & !na.miss.Y) {
for (ii in 1:nrow(PredictYwotNA)) {
if(rcond(t(cbind(res$pp,temppp)[PredictYNA[ii,],,drop=FALSE])%*%cbind(res$pp,temppp)[PredictYNA[ii,],,drop=FALSE])<tol_Xi) {
break_nt <- TRUE; res$computed_nt <- kk-1
if(verbose){cat(paste("Warning : reciprocal condition number of t(cbind(res$pp,temppp)[PredictYNA[",ii,",,drop=FALSE],])%*%cbind(res$pp,temppp)[PredictYNA[",ii,",,drop=FALSE],] < 10^{-12}\n",sep=""))}
if(verbose){cat(paste("Warning only ",res$computed_nt," components could thus be extracted\n",sep=""))}
break
}
}
rm(ii)
if(break_nt) {break}
}
}
res$ww <- cbind(res$ww,tempww)
res$wwnorm <- cbind(res$wwnorm,tempwwnorm)
res$tt <- cbind(res$tt,temptt)
res$pp <- cbind(res$pp,temppp)
##############################################
# #
# Computation of the coefficients #
# of the model with kk components #
# #
##############################################
##############################################
###### PLS ######
##############################################
if (modele == "pls") {
if (kk==1) {
tempCoeffC <- solve(t(res$tt[YNA])%*%res$tt[YNA])%*%t(res$tt[YNA])%*%YwotNA[YNA]
res$CoeffCFull <- matrix(c(tempCoeffC,rep(NA,nt-kk)),ncol=1)
tempCoeffConstante <- 0
} else {
if (!(na.miss.X | na.miss.Y)) {
tempCoeffC <- c(rep(0,kk-1),solve(t(res$tt[YNA,kk])%*%res$tt[YNA,kk])%*%t(res$tt[YNA,kk])%*%YwotNA[YNA])
tempCoeffConstante <- 0
res$CoeffCFull <- cbind(res$CoeffCFull,c(tempCoeffC,rep(NA,nt-kk)))
}
else
{
tempCoeffC <- c(rep(0,kk-1),solve(t(res$tt[YNA,kk])%*%res$tt[YNA,kk])%*%t(res$tt[YNA,kk])%*%YwotNA[YNA])
tempCoeffConstante <- 0
res$CoeffCFull <- cbind(res$CoeffCFull,c(tempCoeffC,rep(NA,nt-kk)))
}
}
res$wwetoile <- (res$wwnorm)%*%solve(t(res$pp)%*%res$wwnorm)
res$CoeffC <- diag(res$CoeffCFull)
res$CoeffConstante <- tempCoeffConstante
res$Std.Coeffs <- rbind(tempCoeffConstante,res$wwetoile%*%res$CoeffC)
rownames(res$Std.Coeffs) <- c("Intercept",colnames(ExpliX))
}
##############################################
###### PLS-GLM ######
##############################################
if (modele %in% c("pls-glm-logistic","pls-glm-family","pls-glm-Gamma","pls-glm-gaussian","pls-glm-inverse.gaussian","pls-glm-poisson")) {
if (kk==1) {
tempconstglm <- glm(YwotNA~1,family=family)
res$Coeffsmodel_vals <- rbind(summary(tempconstglm)$coefficients,matrix(rep(NA,4*nt),ncol=4))
rm(tempconstglm)
tt<-res$tt
tempregglm <- glm(YwotNA~tt,family=family)
rm(tt)
res$Coeffsmodel_vals <- cbind(res$Coeffsmodel_vals,rbind(summary(tempregglm)$coefficients,matrix(rep(NA,4*(nt-kk)),ncol=4)))
tempCoeffC <- as.vector(coef(tempregglm))
res$CoeffCFull <- matrix(c(tempCoeffC,rep(NA,nt-kk)),ncol=1)
tempCoeffConstante <- tempCoeffC[1]
res$CoeffConstante <- tempCoeffConstante
tempCoeffC <- tempCoeffC[-1]
} else {
if (!(na.miss.X | na.miss.Y)) {
tt<-res$tt
tempregglm <- glm(YwotNA~tt,family=family)
rm(tt)
res$Coeffsmodel_vals <- cbind(res$Coeffsmodel_vals,rbind(summary(tempregglm)$coefficients,matrix(rep(NA,4*(nt-kk)),ncol=4)))
tempCoeffC <- as.vector(coef(tempregglm))
res$CoeffCFull <- cbind(res$CoeffCFull,c(tempCoeffC,rep(NA,nt-kk)))
tempCoeffConstante <- tempCoeffC[1]
res$CoeffConstante <- cbind(res$CoeffConstante,tempCoeffConstante)
tempCoeffC <- tempCoeffC[-1]
}
else
{
tt<-res$tt
tempregglm <- glm(YwotNA~tt,family=family)
rm(tt)
res$Coeffsmodel_vals <- cbind(res$Coeffsmodel_vals,rbind(summary(tempregglm)$coefficients,matrix(rep(NA,4*(nt-kk)),ncol=4)))
tempCoeffC <- as.vector(coef(tempregglm))
res$CoeffCFull <- cbind(res$CoeffCFull,c(tempCoeffC,rep(NA,nt-kk)))
tempCoeffConstante <- tempCoeffC[1]
res$CoeffConstante <- cbind(res$CoeffConstante,tempCoeffConstante)
tempCoeffC <- tempCoeffC[-1]
}
}
res$wwetoile <- (res$wwnorm)%*%solve(t(res$pp)%*%res$wwnorm)
res$CoeffC <- tempCoeffC
res$Std.Coeffs <- rbind(tempCoeffConstante,res$wwetoile%*%res$CoeffC)
rownames(res$Std.Coeffs) <- c("Intercept",colnames(ExpliX))
}
##############################################
###### PLS-GLM-POLR ######
##############################################
if (modele %in% c("pls-glm-polr")) {
if (kk==1) {
tempconstpolr <- MASS::polr(YwotNA~1,na.action=na.exclude,Hess=TRUE,method=method)
res$Coeffsmodel_vals <- rbind(summary(tempconstpolr)$coefficients,matrix(rep(NA,3*nt),ncol=3))
rm(tempconstpolr)
tts <- res$tt
tempregpolr <- MASS::polr(YwotNA~tts,na.action=na.exclude,Hess=TRUE,method=method)
rm(tts)
res$Coeffsmodel_vals <- cbind(res$Coeffsmodel_vals,rbind(summary(tempregpolr)$coefficients,matrix(rep(NA,3*(nt-kk)),ncol=3)))
tempCoeffC <- -1*as.vector(tempregpolr$coef)
tempCoeffConstante <- as.vector(tempregpolr$zeta)
res$CoeffCFull <- matrix(c(tempCoeffConstante,tempCoeffC,rep(NA,nt-kk)),ncol=1)
res$CoeffConstante <- tempCoeffConstante
} else {
if (!(na.miss.X | na.miss.Y)) {
tts <- res$tt
tempregpolr <- MASS::polr(YwotNA~tts,na.action=na.exclude,Hess=TRUE,method=method)
rm(tts)
res$Coeffsmodel_vals <- cbind(res$Coeffsmodel_vals,rbind(summary(tempregpolr)$coefficients,matrix(rep(NA,3*(nt-kk)),ncol=3)))
tempCoeffC <- -1*as.vector(tempregpolr$coef)
tempCoeffConstante <- as.vector(tempregpolr$zeta)
res$CoeffCFull <- cbind(res$CoeffCFull,c(tempCoeffConstante,tempCoeffC,rep(NA,nt-kk)))
res$CoeffConstante <- cbind(res$CoeffConstante,tempCoeffConstante)
}
else
{
tts <- res$tt
tempregpolr <- MASS::polr(YwotNA~tts,na.action=na.exclude,Hess=TRUE,method=method)
rm(tts)
res$Coeffsmodel_vals <- cbind(res$Coeffsmodel_vals,rbind(summary(tempregpolr)$coefficients,matrix(rep(NA,3*(nt-kk)),ncol=3)))
tempCoeffC <- -1*as.vector(tempregpolr$coef)
tempCoeffConstante <- as.vector(tempregpolr$zeta)
res$CoeffCFull <- cbind(res$CoeffCFull,c(tempCoeffConstante,tempCoeffC,rep(NA,nt-kk)))
res$CoeffConstante <- cbind(res$CoeffConstante,tempCoeffConstante)
}
}
res$wwetoile <- (res$wwnorm)%*%solve(t(res$pp)%*%res$wwnorm)
res$CoeffC <- tempCoeffC
res$Std.Coeffs <- as.matrix(rbind(as.matrix(tempCoeffConstante),res$wwetoile%*%res$CoeffC))
rownames(res$Std.Coeffs) <- c(names(tempregpolr$zeta),colnames(ExpliX))
}
##############################################
# #
# Prediction of the components #
# as if missing values (model free) #
# For cross-validating the GLM #
# #
##############################################
if (!(na.miss.X | na.miss.Y)) {
##############################################
# #
# Cross validation #
# without missing value #
# #
##############################################
##############################################
###### PLS ######
##############################################
if (modele == "pls") {
res$residYChapeau <- res$tt%*%tempCoeffC
tempCoeffs <- res$wwetoile%*%res$CoeffC*attr(res$RepY,"scaled:scale")/attr(res$ExpliX,"scaled:scale")
tempConstante <- attr(res$RepY,"scaled:center")-sum(tempCoeffs*attr(res$ExpliX,"scaled:center"))
res$Coeffs <- rbind(tempConstante,tempCoeffs)
res$YChapeau <- attr(res$RepY,"scaled:center")+attr(res$RepY,"scaled:scale")*res$tt%*%res$CoeffC
res$Yresidus <- dataY-res$YChapeau
}
##############################################
##############################################
###### PLS-GLM ######
##############################################
if (modele %in% c("pls-glm-logistic","pls-glm-family","pls-glm-Gamma","pls-glm-gaussian","pls-glm-inverse.gaussian","pls-glm-poisson")) {
res$residYChapeau <- tempregglm$linear.predictors
tempCoeffs <- res$wwetoile%*%res$CoeffC*attr(res$RepY,"scaled:scale")/attr(res$ExpliX,"scaled:scale")
tempConstante <- attr(res$RepY,"scaled:center")-sum(tempCoeffs*attr(res$ExpliX,"scaled:center"))+attr(res$RepY,"scaled:scale")*res$Std.Coeffs[1]
res$Coeffs <- rbind(tempConstante,tempCoeffs)
res$YChapeau <- tempregglm$fitted.values
res$Yresidus <- dataY-res$YChapeau
}
##############################################
###### PLS-POLR ######
##############################################
if (modele %in% c("pls-glm-polr")) {
tempCoeffs <- res$wwetoile%*%res$CoeffC*attr(res$RepY,"scaled:scale")/attr(res$ExpliX,"scaled:scale")
tempConstante <- attr(res$RepY,"scaled:center")-sum(tempCoeffs*attr(res$ExpliX,"scaled:center"))+attr(res$RepY,"scaled:scale")* tempCoeffConstante
res$Coeffs <- rbind(as.matrix(tempConstante),tempCoeffs)
rownames(res$Coeffs) <- rownames(res$Std.Coeffs)
}
##############################################
}
else {
if (na.miss.X & !na.miss.Y) {
##############################################
# #
# Cross validation #
# with missing value(s) #
# #
##############################################
if (kk==1) {
if(verbose){cat("____There are some NAs in X but not in Y____\n")}
}
##############################################
###### PLS ######
##############################################
if (modele == "pls") {
res$residYChapeau <- res$tt%*%tempCoeffC
tempCoeffs <- res$wwetoile%*%res$CoeffC*attr(res$RepY,"scaled:scale")/attr(res$ExpliX,"scaled:scale")
tempConstante <- attr(res$RepY,"scaled:center")-sum(tempCoeffs*attr(res$ExpliX,"scaled:center"))
res$Coeffs <- rbind(tempConstante,tempCoeffs)
res$YChapeau <- attr(res$RepY,"scaled:center")+attr(res$RepY,"scaled:scale")*res$tt%*%res$CoeffC
res$Yresidus <- dataY-res$YChapeau
}
##############################################
##############################################
###### PLS-GLM ######
##############################################
if (modele %in% c("pls-glm-logistic","pls-glm-family","pls-glm-Gamma","pls-glm-gaussian","pls-glm-inverse.gaussian","pls-glm-poisson")) {
res$residYChapeau <- tempregglm$linear.predictors
tempCoeffs <- res$wwetoile%*%res$CoeffC*attr(res$RepY,"scaled:scale")/attr(res$ExpliX,"scaled:scale")
tempConstante <- attr(res$RepY,"scaled:center")-sum(tempCoeffs*attr(res$ExpliX,"scaled:center"))
res$Coeffs <- rbind(tempConstante,tempCoeffs)
res$YChapeau <- tempregglm$fitted.values
res$Yresidus <- dataY-res$YChapeau
}
##############################################
###### PLS-POLR ######
##############################################
if (modele %in% c("pls-glm-polr")) {
tempCoeffs <- res$wwetoile%*%res$CoeffC*attr(res$RepY,"scaled:scale")/attr(res$ExpliX,"scaled:scale")
tempConstante <- attr(res$RepY,"scaled:center")-sum(tempCoeffs*attr(res$ExpliX,"scaled:center"))+attr(res$RepY,"scaled:scale")* tempCoeffConstante
res$Coeffs <- rbind(as.matrix(tempConstante),tempCoeffs)
rownames(res$Coeffs) <- rownames(res$Std.Coeffs)
}
##############################################
}
else {
if (kk==1) {
if(verbose){cat("____There are some NAs both in X and Y____\n")}
}
}
}
##############################################
# #
# Update and end of loop cleaning #
# (Especially useful for PLS) #
# #
##############################################
##############################################
###### PLS ######
##############################################
if (modele == "pls") {
res$uscores <- cbind(res$uscores,res$residY/res$CoeffC[kk])
res$residY <- res$residY - res$tt%*%tempCoeffC
res$residusY <- cbind(res$residusY,res$residY)
rm(tempww)
rm(tempwwnorm)
rm(temptt)
rm(temppp)
rm(tempCoeffC)
rm(tempCoeffs)
rm(tempConstante)
}
##############################################
###### PLS-GLM ######
##############################################
if (modele %in% c("pls-glm-logistic","pls-glm-family","pls-glm-Gamma","pls-glm-gaussian","pls-glm-inverse.gaussian","pls-glm-poisson")) {
res$residY <- res$residY
res$residusY <- cbind(res$residusY,res$residY)
rm(tempww)
rm(tempwwnorm)
rm(temptt)
rm(temppp)
rm(tempCoeffC)
rm(tempCoeffs)
rm(tempConstante)
}
##############################################
###### PLS-GLM-POLR ######
##############################################
if (modele %in% c("pls-glm-polr")) {
res$residY <- res$residY
res$residusY <- cbind(res$residusY,res$residY)
rm(tempww)
rm(tempwwnorm)
rm(temptt)
rm(temppp)
rm(tempCoeffC)
}
if(res$computed_nt==0){
if(verbose){cat("No component could be extracted please check the data for NA only lines or columns\n")}; stop()
}
##############################################
# #
# Predicting components #
# #
##############################################
if (!(na.miss.PredictY | na.miss.Y)) {
if(kk==1){
if(verbose){cat("____Predicting X without NA neither in X nor in Y____\n")}
}
res$ttPredictY <- PredictYwotNA%*%res$wwetoile
colnames(res$ttPredictY) <- paste("tt",1:kk,sep="")
}
else {
if (na.miss.PredictY & !na.miss.Y) {
if(kk==1){
if(verbose){cat("____Predicting X with NA in X and not in Y____\n")}
}
res$ttPredictY <- NULL
for (ii in 1:nrow(PredictYwotNA)) {
res$ttPredictY <- rbind(res$ttPredictY,t(solve(t(res$pp[PredictYNA[ii,],,drop=FALSE])%*%res$pp[PredictYNA[ii,],,drop=FALSE])%*%t(res$pp[PredictYNA[ii,],,drop=FALSE])%*%(PredictYwotNA[ii,])[PredictYNA[ii,]]))
}
colnames(res$ttPredictY) <- paste("tt",1:kk,sep="")
}
else {
if(kk==1){
if(verbose){cat("____There are some NAs both in X and Y____\n")}
}
}
}
##############################################
# #
# Computing RSS, PRESS, #
# Chi2, Q2 and Q2cum #
# #
##############################################
##############################################
###### PLS ######
##############################################
##############################################
###### PLS-GLM ######
##############################################
##############################################
###### PLS-GLM-POLR ######
##############################################
##########################################
# #
# Predicting responses #
# #
##########################################
##############################################
###### PLS ######
##############################################
if (modele == "pls") {
res$listValsPredictY <- cbind(res$listValsPredictY,attr(res$RepY,"scaled:center")+attr(res$RepY,"scaled:scale")*res$ttPredictY%*%res$CoeffC)
}
##############################################
###### PLS-GLM ######
##############################################
if (modele %in% c("pls-glm-logistic","pls-glm-family","pls-glm-Gamma","pls-glm-gaussian","pls-glm-inverse.gaussian","pls-glm-poisson")) {
tt <- res$ttPredictY
res$listValsPredictY <- cbind(res$listValsPredictY,predict(object=tempregglm,newdata=data.frame(tt),type = "response"))
}
##############################################
###### PLS-GLM-POLR ######
##############################################
if (modele %in% c("pls-glm-polr")) {
tts <- res$ttPredictY
if(kk==1){
res$listValsPredictY <- list(predict(tempregpolr,predict(tempregpolr, data.frame(tts=I(tts))),type="probs"))
} else {
res$listValsPredictY <- c(res$listValsPredictY,list(predict(tempregpolr,predict(tempregpolr, data.frame(tts=I(tts))),type="probs")))
}
attr(res$listValsPredictY,"numberlevels") <- nlevels(dataY)
attr(res$listValsPredictY,"modele") <- modele
}
if(verbose){cat("____Component____",kk,"____\n")}
}
##############################################
##############################################
## ##
## End of the loop on the components ##
## ##
##############################################
##############################################
if(verbose){cat("****________________________________________________****\n")}
if(verbose){cat("\n")}
if (!keepcoeffs) {
if (!keepstd.coeffs) {return(list(valsPredict=res$listValsPredictY))} else {return(list(valsPredict=res$listValsPredictY, std.coeffs=res$Std.Coeffs))}}
else {
if (!keepstd.coeffs) {return(list(valsPredict=res$listValsPredictY, coeffs=res$Coeffs))} else {return(list(valsPredict=res$listValsPredictY, coeffs=res$Coeffs, std.coeffs=res$Std.Coeffs))}
}
}
| /plsRglm/R/PLS_glm_wvc.R | no_license | ingted/R-Examples | R | false | false | 26,798 | r | PLS_glm_wvc <- function(dataY,dataX,nt=2,dataPredictY=dataX,modele="pls",family=NULL,scaleX=TRUE,scaleY=NULL,keepcoeffs=FALSE,keepstd.coeffs=FALSE,tol_Xi=10^(-12),weights,method="logistic",verbose=TRUE) {
##################################################
# #
# Initialization and formatting the inputs #
# #
##################################################
if(verbose){cat("____************************************************____\n")}
if(any(apply(is.na(dataX),MARGIN=2,"all"))){return(vector("list",0)); cat("One of the columns of dataX is completely filled with missing data"); stop()}
if(any(apply(is.na(dataX),MARGIN=1,"all"))){return(vector("list",0)); cat("One of the rows of dataX is completely filled with missing data"); stop()}
if(identical(dataPredictY,dataX)){PredYisdataX <- TRUE} else {PredYisdataX <- FALSE}
if(!PredYisdataX){
if(any(apply(is.na(dataPredictY),MARGIN=2,"all"))){return(vector("list",0)); cat("One of the columns of dataPredictY is completely filled with missing data"); stop()}
if(any(apply(is.na(dataPredictY),MARGIN=1,"all"))){return(vector("list",0)); cat("One of the rows of dataPredictY is completely filled with missing data"); stop()}
}
if(missing(weights)){NoWeights=TRUE} else {if(all(weights==rep(1,length(dataY)))){NoWeights=TRUE} else {NoWeights=FALSE}}
if(any(is.na(dataX))) {na.miss.X <- TRUE} else na.miss.X <- FALSE
if(any(is.na(dataY))) {na.miss.Y <- TRUE} else na.miss.Y <- FALSE
if(any(is.na(dataPredictY))) {na.miss.PredictY <- TRUE} else {na.miss.PredictY <- FALSE}
if(na.miss.X|na.miss.Y){naive=TRUE; if(verbose){cat(paste("Only naive DoF can be used with missing data\n",sep=""))}; if(!NoWeights){if(verbose){cat(paste("Weights cannot be used with missing data\n",sep=""))}}}
if(!NoWeights){naive=TRUE; if(verbose){cat(paste("Only naive DoF can be used with weighted PLS\n",sep=""))}}
if (!is.data.frame(dataX)) {dataX <- data.frame(dataX)}
if (is.null(modele) & !is.null(family)) {modele<-"pls-glm-family"}
if (!(modele %in% c("pls","pls-glm-logistic","pls-glm-family","pls-glm-Gamma","pls-glm-gaussian","pls-glm-inverse.gaussian","pls-glm-poisson","pls-glm-polr"))) {print(modele);stop("'modele' not recognized")}
if (!(modele %in% "pls-glm-family") & !is.null(family)) {stop("Set 'modele=pls-glm-family' to use the family option")}
if (modele=="pls") {family<-NULL}
if (modele=="pls-glm-Gamma") {family<-Gamma(link = "inverse")}
if (modele=="pls-glm-gaussian") {family<-gaussian(link = "identity")}
if (modele=="pls-glm-inverse.gaussian") {family<-inverse.gaussian(link = "1/mu^2")}
if (modele=="pls-glm-logistic") {family<-binomial(link = "logit")}
if (modele=="pls-glm-poisson") {family<-poisson(link = "log")}
if (modele=="pls-glm-polr") {family<-NULL}
if (!is.null(family)) {
if (is.character(family)) {family <- get(family, mode = "function", envir = parent.frame(n=sys.nframe()))}
if (is.function(family)) {family <- family()}
if (is.language(family)) {family <- eval(family)}
}
if (modele %in% c("pls-glm-family","pls-glm-Gamma","pls-glm-gaussian","pls-glm-inverse.gaussian","pls-glm-logistic","pls-glm-poisson")) {if(verbose){print(family)}}
if (modele %in% c("pls-glm-polr")) {if(verbose){cat("\nModel:", modele, "\n");cat("Method:", method, "\n\n")}}
if (modele=="pls") {if(verbose){cat("\nModel:", modele, "\n\n")}}
scaleY <- NULL
if (is.null(scaleY)) {
if (!(modele %in% c("pls"))) {scaleY <- FALSE} else {scaleY <- TRUE}
}
if (scaleY) {if(NoWeights){RepY <- scale(dataY)} else {meanY <- weighted.mean(dataY,weights); stdevY <- sqrt((length(dataY)-1)/length(dataY)*weighted.mean((dataY-meanY)^2,weights)); RepY <- (dataY-meanY)/stdevY; attr(RepY,"scaled:center") <- meanY ; attr(RepY,"scaled:scale") <- stdevY}}
else {
RepY <- dataY
attr(RepY,"scaled:center") <- 0
attr(RepY,"scaled:scale") <- 1
}
if (scaleX) {if(NoWeights){ExpliX <- scale(dataX)} else {meanX <- apply(dataX,2,weighted.mean,weights); stdevX <- sqrt((length(dataY)-1)/length(dataY)*apply((sweep(dataX,2,meanX))^2,2,weighted.mean,weights)); ExpliX <- sweep(sweep(dataX, 2, meanX), 2 ,stdevX, "/"); attr(ExpliX,"scaled:center") <- meanX ; attr(ExpliX,"scaled:scale") <- stdevX}
if(PredYisdataX){PredictY <- ExpliX} else {PredictY <- sweep(sweep(dataPredictY, 2, attr(ExpliX,"scaled:center")), 2 ,attr(ExpliX,"scaled:scale"), "/")}
}
else {
ExpliX <- dataX
attr(ExpliX,"scaled:center") <- rep(0,ncol(dataX))
attr(ExpliX,"scaled:scale") <- rep(1,ncol(dataX))
PredictY <- (dataPredictY)
}
if(is.null(colnames(ExpliX))){colnames(ExpliX)<-paste("X",1:ncol(ExpliX),sep=".")}
if(is.null(rownames(ExpliX))){rownames(ExpliX)<-1:nrow(ExpliX)}
XXNA <- !(is.na(ExpliX))
YNA <- !(is.na(RepY))
if(PredYisdataX){PredictYNA <- XXNA} else {PredictYNA <- !is.na(PredictY)}
ExpliXwotNA <- as.matrix(ExpliX)
ExpliXwotNA[!XXNA] <- 0
XXwotNA <- as.matrix(ExpliX)
XXwotNA[!XXNA] <- 0
dataXwotNA <- as.matrix(dataX)
dataXwotNA[!XXNA] <- 0
YwotNA <- as.matrix(RepY)
YwotNA[!YNA] <- 0
dataYwotNA <- as.matrix(dataY)
dataYwotNA[!YNA] <- 0
if(PredYisdataX){PredictYwotNA <- XXwotNA} else {
PredictYwotNA <- as.matrix(PredictY)
PredictYwotNA [is.na(PredictY)] <- 0
}
if (modele %in% "pls-glm-polr") {
dataY <- as.factor(dataY)
YwotNA <- as.factor(YwotNA)}
res <- list(nr=nrow(ExpliX),nc=ncol(ExpliX),ww=NULL,wwnorm=NULL,wwetoile=NULL,tt=NULL,pp=NULL,CoeffC=NULL,uscores=NULL,YChapeau=NULL,residYChapeau=NULL,RepY=RepY,na.miss.Y=na.miss.Y,YNA=YNA,residY=RepY,ExpliX=ExpliX,na.miss.X=na.miss.X,XXNA=XXNA,residXX=ExpliX,PredictY=PredictYwotNA,RSS=rep(NA,nt),RSSresidY=rep(NA,nt),R2=rep(NA,nt),R2residY=rep(NA,nt),press.ind=NULL,press.tot=NULL,Q2cum=rep(NA, nt),family=family,ttPredictY = NULL,typeVC="none",listValsPredictY=NULL)
if(NoWeights){res$weights<-rep(1L,res$nr)} else {res$weights<-weights}
res$temppred <- NULL
##############################################
###### PLS ######
##############################################
if (modele %in% "pls") {
if (scaleY) {res$YChapeau=rep(attr(RepY,"scaled:center"),nrow(ExpliX))
res$residYChapeau=rep(0,nrow(ExpliX))}
else
{res$YChapeau=rep(mean(RepY),nrow(ExpliX))
res$residYChapeau=rep(mean(RepY),nrow(ExpliX))}
}
################################################
################################################
## ##
## Beginning of the loop for the components ##
## ##
################################################
################################################
res$computed_nt <- 0
break_nt <- FALSE
break_nt_vc <- FALSE
for (kk in 1:nt) {
temptest <- sqrt(colSums(res$residXX^2, na.rm=TRUE))
if(any(temptest<tol_Xi)) {
break_nt <- TRUE
if (is.null(names(which(temptest<tol_Xi)))) {
if(verbose){cat(paste("Warning : ",paste(names(which(temptest<tol_Xi)),sep="",collapse=" ")," < 10^{-12}\n",sep=""))}
} else {
if(verbose){cat(paste("Warning : ",paste((which(temptest<tol_Xi)),sep="",collapse=" ")," < 10^{-12}\n",sep=""))}
}
if(verbose){cat(paste("Warning only ",res$computed_nt," components could thus be extracted\n",sep=""))}
break
}
res$computed_nt <- kk
XXwotNA <- as.matrix(res$residXX)
XXwotNA[!XXNA] <- 0
YwotNA <- as.matrix(res$residY)
YwotNA[!YNA] <- 0
tempww <- rep(0,res$nc)
##############################################
# #
# Weight computation for each model #
# #
##############################################
##############################################
###### PLS ######
##############################################
if (modele %in% "pls") {
if(NoWeights){
tempww <- t(XXwotNA)%*%YwotNA/(t(XXNA)%*%YwotNA^2)
}
if(!NoWeights){
tempww <- t(XXwotNA*weights)%*%YwotNA/(t(XXNA*weights)%*%YwotNA^2)
}
}
##############################################
###### PLS-GLM ######
##############################################
if (modele %in% c("pls-glm-logistic","pls-glm-family","pls-glm-Gamma","pls-glm-gaussian","pls-glm-inverse.gaussian","pls-glm-poisson")) {
XXwotNA[!XXNA] <- NA
for (jj in 1:(res$nc)) {
tempww[jj] <- coef(glm(YwotNA~cbind(res$tt,XXwotNA[,jj]),family=family))[kk+1]
}
XXwotNA[!XXNA] <- 0
rm(jj)}
##############################################
###### PLS-GLM-POLR ######
##############################################
if (modele %in% c("pls-glm-polr")) {
YwotNA <- as.factor(YwotNA)
XXwotNA[!XXNA] <- NA
library(MASS)
tts <- res$tt
for (jj in 1:(res$nc)) {
tempww[jj] <- -1*MASS::polr(YwotNA~cbind(tts,XXwotNA[,jj]),na.action=na.exclude,method=method)$coef[kk]
}
XXwotNA[!XXNA] <- 0
rm(jj,tts)}
##############################################
# #
# Computation of the components (model free) #
# #
##############################################
tempwwnorm <- tempww/sqrt(drop(crossprod(tempww)))
temptt <- XXwotNA%*%tempwwnorm/(XXNA%*%(tempwwnorm^2))
temppp <- rep(0,res$nc)
for (jj in 1:(res$nc)) {
temppp[jj] <- crossprod(temptt,XXwotNA[,jj])/drop(crossprod(XXNA[,jj],temptt^2))
}
res$residXX <- XXwotNA-temptt%*%temppp
if (na.miss.X & !na.miss.Y) {
for (ii in 1:res$nr) {
if(rcond(t(cbind(res$pp,temppp)[XXNA[ii,],,drop=FALSE])%*%cbind(res$pp,temppp)[XXNA[ii,],,drop=FALSE])<tol_Xi) {
break_nt <- TRUE; res$computed_nt <- kk-1
if(verbose){cat(paste("Warning : reciprocal condition number of t(cbind(res$pp,temppp)[XXNA[",ii,",],,drop=FALSE])%*%cbind(res$pp,temppp)[XXNA[",ii,",],,drop=FALSE] < 10^{-12}\n",sep=""))}
if(verbose){cat(paste("Warning only ",res$computed_nt," components could thus be extracted\n",sep=""))}
break
}
}
rm(ii)
if(break_nt) {break}
}
if(!PredYisdataX){
if (na.miss.PredictY & !na.miss.Y) {
for (ii in 1:nrow(PredictYwotNA)) {
if(rcond(t(cbind(res$pp,temppp)[PredictYNA[ii,],,drop=FALSE])%*%cbind(res$pp,temppp)[PredictYNA[ii,],,drop=FALSE])<tol_Xi) {
break_nt <- TRUE; res$computed_nt <- kk-1
if(verbose){cat(paste("Warning : reciprocal condition number of t(cbind(res$pp,temppp)[PredictYNA[",ii,",,drop=FALSE],])%*%cbind(res$pp,temppp)[PredictYNA[",ii,",,drop=FALSE],] < 10^{-12}\n",sep=""))}
if(verbose){cat(paste("Warning only ",res$computed_nt," components could thus be extracted\n",sep=""))}
break
}
}
rm(ii)
if(break_nt) {break}
}
}
res$ww <- cbind(res$ww,tempww)
res$wwnorm <- cbind(res$wwnorm,tempwwnorm)
res$tt <- cbind(res$tt,temptt)
res$pp <- cbind(res$pp,temppp)
##############################################
# #
# Computation of the coefficients #
# of the model with kk components #
# #
##############################################
##############################################
###### PLS ######
##############################################
if (modele == "pls") {
if (kk==1) {
tempCoeffC <- solve(t(res$tt[YNA])%*%res$tt[YNA])%*%t(res$tt[YNA])%*%YwotNA[YNA]
res$CoeffCFull <- matrix(c(tempCoeffC,rep(NA,nt-kk)),ncol=1)
tempCoeffConstante <- 0
} else {
if (!(na.miss.X | na.miss.Y)) {
tempCoeffC <- c(rep(0,kk-1),solve(t(res$tt[YNA,kk])%*%res$tt[YNA,kk])%*%t(res$tt[YNA,kk])%*%YwotNA[YNA])
tempCoeffConstante <- 0
res$CoeffCFull <- cbind(res$CoeffCFull,c(tempCoeffC,rep(NA,nt-kk)))
}
else
{
tempCoeffC <- c(rep(0,kk-1),solve(t(res$tt[YNA,kk])%*%res$tt[YNA,kk])%*%t(res$tt[YNA,kk])%*%YwotNA[YNA])
tempCoeffConstante <- 0
res$CoeffCFull <- cbind(res$CoeffCFull,c(tempCoeffC,rep(NA,nt-kk)))
}
}
res$wwetoile <- (res$wwnorm)%*%solve(t(res$pp)%*%res$wwnorm)
res$CoeffC <- diag(res$CoeffCFull)
res$CoeffConstante <- tempCoeffConstante
res$Std.Coeffs <- rbind(tempCoeffConstante,res$wwetoile%*%res$CoeffC)
rownames(res$Std.Coeffs) <- c("Intercept",colnames(ExpliX))
}
##############################################
###### PLS-GLM ######
##############################################
if (modele %in% c("pls-glm-logistic","pls-glm-family","pls-glm-Gamma","pls-glm-gaussian","pls-glm-inverse.gaussian","pls-glm-poisson")) {
if (kk==1) {
tempconstglm <- glm(YwotNA~1,family=family)
res$Coeffsmodel_vals <- rbind(summary(tempconstglm)$coefficients,matrix(rep(NA,4*nt),ncol=4))
rm(tempconstglm)
tt<-res$tt
tempregglm <- glm(YwotNA~tt,family=family)
rm(tt)
res$Coeffsmodel_vals <- cbind(res$Coeffsmodel_vals,rbind(summary(tempregglm)$coefficients,matrix(rep(NA,4*(nt-kk)),ncol=4)))
tempCoeffC <- as.vector(coef(tempregglm))
res$CoeffCFull <- matrix(c(tempCoeffC,rep(NA,nt-kk)),ncol=1)
tempCoeffConstante <- tempCoeffC[1]
res$CoeffConstante <- tempCoeffConstante
tempCoeffC <- tempCoeffC[-1]
} else {
if (!(na.miss.X | na.miss.Y)) {
tt<-res$tt
tempregglm <- glm(YwotNA~tt,family=family)
rm(tt)
res$Coeffsmodel_vals <- cbind(res$Coeffsmodel_vals,rbind(summary(tempregglm)$coefficients,matrix(rep(NA,4*(nt-kk)),ncol=4)))
tempCoeffC <- as.vector(coef(tempregglm))
res$CoeffCFull <- cbind(res$CoeffCFull,c(tempCoeffC,rep(NA,nt-kk)))
tempCoeffConstante <- tempCoeffC[1]
res$CoeffConstante <- cbind(res$CoeffConstante,tempCoeffConstante)
tempCoeffC <- tempCoeffC[-1]
}
else
{
tt<-res$tt
tempregglm <- glm(YwotNA~tt,family=family)
rm(tt)
res$Coeffsmodel_vals <- cbind(res$Coeffsmodel_vals,rbind(summary(tempregglm)$coefficients,matrix(rep(NA,4*(nt-kk)),ncol=4)))
tempCoeffC <- as.vector(coef(tempregglm))
res$CoeffCFull <- cbind(res$CoeffCFull,c(tempCoeffC,rep(NA,nt-kk)))
tempCoeffConstante <- tempCoeffC[1]
res$CoeffConstante <- cbind(res$CoeffConstante,tempCoeffConstante)
tempCoeffC <- tempCoeffC[-1]
}
}
res$wwetoile <- (res$wwnorm)%*%solve(t(res$pp)%*%res$wwnorm)
res$CoeffC <- tempCoeffC
res$Std.Coeffs <- rbind(tempCoeffConstante,res$wwetoile%*%res$CoeffC)
rownames(res$Std.Coeffs) <- c("Intercept",colnames(ExpliX))
}
##############################################
###### PLS-GLM-POLR ######
##############################################
if (modele %in% c("pls-glm-polr")) {
if (kk==1) {
tempconstpolr <- MASS::polr(YwotNA~1,na.action=na.exclude,Hess=TRUE,method=method)
res$Coeffsmodel_vals <- rbind(summary(tempconstpolr)$coefficients,matrix(rep(NA,3*nt),ncol=3))
rm(tempconstpolr)
tts <- res$tt
tempregpolr <- MASS::polr(YwotNA~tts,na.action=na.exclude,Hess=TRUE,method=method)
rm(tts)
res$Coeffsmodel_vals <- cbind(res$Coeffsmodel_vals,rbind(summary(tempregpolr)$coefficients,matrix(rep(NA,3*(nt-kk)),ncol=3)))
tempCoeffC <- -1*as.vector(tempregpolr$coef)
tempCoeffConstante <- as.vector(tempregpolr$zeta)
res$CoeffCFull <- matrix(c(tempCoeffConstante,tempCoeffC,rep(NA,nt-kk)),ncol=1)
res$CoeffConstante <- tempCoeffConstante
} else {
if (!(na.miss.X | na.miss.Y)) {
tts <- res$tt
tempregpolr <- MASS::polr(YwotNA~tts,na.action=na.exclude,Hess=TRUE,method=method)
rm(tts)
res$Coeffsmodel_vals <- cbind(res$Coeffsmodel_vals,rbind(summary(tempregpolr)$coefficients,matrix(rep(NA,3*(nt-kk)),ncol=3)))
tempCoeffC <- -1*as.vector(tempregpolr$coef)
tempCoeffConstante <- as.vector(tempregpolr$zeta)
res$CoeffCFull <- cbind(res$CoeffCFull,c(tempCoeffConstante,tempCoeffC,rep(NA,nt-kk)))
res$CoeffConstante <- cbind(res$CoeffConstante,tempCoeffConstante)
}
else
{
tts <- res$tt
tempregpolr <- MASS::polr(YwotNA~tts,na.action=na.exclude,Hess=TRUE,method=method)
rm(tts)
res$Coeffsmodel_vals <- cbind(res$Coeffsmodel_vals,rbind(summary(tempregpolr)$coefficients,matrix(rep(NA,3*(nt-kk)),ncol=3)))
tempCoeffC <- -1*as.vector(tempregpolr$coef)
tempCoeffConstante <- as.vector(tempregpolr$zeta)
res$CoeffCFull <- cbind(res$CoeffCFull,c(tempCoeffConstante,tempCoeffC,rep(NA,nt-kk)))
res$CoeffConstante <- cbind(res$CoeffConstante,tempCoeffConstante)
}
}
res$wwetoile <- (res$wwnorm)%*%solve(t(res$pp)%*%res$wwnorm)
res$CoeffC <- tempCoeffC
res$Std.Coeffs <- as.matrix(rbind(as.matrix(tempCoeffConstante),res$wwetoile%*%res$CoeffC))
rownames(res$Std.Coeffs) <- c(names(tempregpolr$zeta),colnames(ExpliX))
}
##############################################
# #
# Prediction of the components #
# as if missing values (model free) #
# For cross-validating the GLM #
# #
##############################################
if (!(na.miss.X | na.miss.Y)) {
##############################################
# #
# Cross validation #
# without missing value #
# #
##############################################
##############################################
###### PLS ######
##############################################
if (modele == "pls") {
res$residYChapeau <- res$tt%*%tempCoeffC
tempCoeffs <- res$wwetoile%*%res$CoeffC*attr(res$RepY,"scaled:scale")/attr(res$ExpliX,"scaled:scale")
tempConstante <- attr(res$RepY,"scaled:center")-sum(tempCoeffs*attr(res$ExpliX,"scaled:center"))
res$Coeffs <- rbind(tempConstante,tempCoeffs)
res$YChapeau <- attr(res$RepY,"scaled:center")+attr(res$RepY,"scaled:scale")*res$tt%*%res$CoeffC
res$Yresidus <- dataY-res$YChapeau
}
##############################################
##############################################
###### PLS-GLM ######
##############################################
if (modele %in% c("pls-glm-logistic","pls-glm-family","pls-glm-Gamma","pls-glm-gaussian","pls-glm-inverse.gaussian","pls-glm-poisson")) {
res$residYChapeau <- tempregglm$linear.predictors
tempCoeffs <- res$wwetoile%*%res$CoeffC*attr(res$RepY,"scaled:scale")/attr(res$ExpliX,"scaled:scale")
tempConstante <- attr(res$RepY,"scaled:center")-sum(tempCoeffs*attr(res$ExpliX,"scaled:center"))+attr(res$RepY,"scaled:scale")*res$Std.Coeffs[1]
res$Coeffs <- rbind(tempConstante,tempCoeffs)
res$YChapeau <- tempregglm$fitted.values
res$Yresidus <- dataY-res$YChapeau
}
##############################################
###### PLS-POLR ######
##############################################
if (modele %in% c("pls-glm-polr")) {
tempCoeffs <- res$wwetoile%*%res$CoeffC*attr(res$RepY,"scaled:scale")/attr(res$ExpliX,"scaled:scale")
tempConstante <- attr(res$RepY,"scaled:center")-sum(tempCoeffs*attr(res$ExpliX,"scaled:center"))+attr(res$RepY,"scaled:scale")* tempCoeffConstante
res$Coeffs <- rbind(as.matrix(tempConstante),tempCoeffs)
rownames(res$Coeffs) <- rownames(res$Std.Coeffs)
}
##############################################
}
else {
if (na.miss.X & !na.miss.Y) {
##############################################
# #
# Cross validation #
# with missing value(s) #
# #
##############################################
if (kk==1) {
if(verbose){cat("____There are some NAs in X but not in Y____\n")}
}
##############################################
###### PLS ######
##############################################
if (modele == "pls") {
res$residYChapeau <- res$tt%*%tempCoeffC
tempCoeffs <- res$wwetoile%*%res$CoeffC*attr(res$RepY,"scaled:scale")/attr(res$ExpliX,"scaled:scale")
tempConstante <- attr(res$RepY,"scaled:center")-sum(tempCoeffs*attr(res$ExpliX,"scaled:center"))
res$Coeffs <- rbind(tempConstante,tempCoeffs)
res$YChapeau <- attr(res$RepY,"scaled:center")+attr(res$RepY,"scaled:scale")*res$tt%*%res$CoeffC
res$Yresidus <- dataY-res$YChapeau
}
##############################################
##############################################
###### PLS-GLM ######
##############################################
if (modele %in% c("pls-glm-logistic","pls-glm-family","pls-glm-Gamma","pls-glm-gaussian","pls-glm-inverse.gaussian","pls-glm-poisson")) {
res$residYChapeau <- tempregglm$linear.predictors
tempCoeffs <- res$wwetoile%*%res$CoeffC*attr(res$RepY,"scaled:scale")/attr(res$ExpliX,"scaled:scale")
tempConstante <- attr(res$RepY,"scaled:center")-sum(tempCoeffs*attr(res$ExpliX,"scaled:center"))
res$Coeffs <- rbind(tempConstante,tempCoeffs)
res$YChapeau <- tempregglm$fitted.values
res$Yresidus <- dataY-res$YChapeau
}
##############################################
###### PLS-POLR ######
##############################################
if (modele %in% c("pls-glm-polr")) {
tempCoeffs <- res$wwetoile%*%res$CoeffC*attr(res$RepY,"scaled:scale")/attr(res$ExpliX,"scaled:scale")
tempConstante <- attr(res$RepY,"scaled:center")-sum(tempCoeffs*attr(res$ExpliX,"scaled:center"))+attr(res$RepY,"scaled:scale")* tempCoeffConstante
res$Coeffs <- rbind(as.matrix(tempConstante),tempCoeffs)
rownames(res$Coeffs) <- rownames(res$Std.Coeffs)
}
##############################################
}
else {
if (kk==1) {
if(verbose){cat("____There are some NAs both in X and Y____\n")}
}
}
}
##############################################
# #
# Update and end of loop cleaning #
# (Especially useful for PLS) #
# #
##############################################
##############################################
###### PLS ######
##############################################
if (modele == "pls") {
res$uscores <- cbind(res$uscores,res$residY/res$CoeffC[kk])
res$residY <- res$residY - res$tt%*%tempCoeffC
res$residusY <- cbind(res$residusY,res$residY)
rm(tempww)
rm(tempwwnorm)
rm(temptt)
rm(temppp)
rm(tempCoeffC)
rm(tempCoeffs)
rm(tempConstante)
}
##############################################
###### PLS-GLM ######
##############################################
if (modele %in% c("pls-glm-logistic","pls-glm-family","pls-glm-Gamma","pls-glm-gaussian","pls-glm-inverse.gaussian","pls-glm-poisson")) {
res$residY <- res$residY
res$residusY <- cbind(res$residusY,res$residY)
rm(tempww)
rm(tempwwnorm)
rm(temptt)
rm(temppp)
rm(tempCoeffC)
rm(tempCoeffs)
rm(tempConstante)
}
##############################################
###### PLS-GLM-POLR ######
##############################################
if (modele %in% c("pls-glm-polr")) {
res$residY <- res$residY
res$residusY <- cbind(res$residusY,res$residY)
rm(tempww)
rm(tempwwnorm)
rm(temptt)
rm(temppp)
rm(tempCoeffC)
}
if(res$computed_nt==0){
if(verbose){cat("No component could be extracted please check the data for NA only lines or columns\n")}; stop()
}
##############################################
# #
# Predicting components #
# #
##############################################
if (!(na.miss.PredictY | na.miss.Y)) {
if(kk==1){
if(verbose){cat("____Predicting X without NA neither in X nor in Y____\n")}
}
res$ttPredictY <- PredictYwotNA%*%res$wwetoile
colnames(res$ttPredictY) <- paste("tt",1:kk,sep="")
}
else {
if (na.miss.PredictY & !na.miss.Y) {
if(kk==1){
if(verbose){cat("____Predicting X with NA in X and not in Y____\n")}
}
res$ttPredictY <- NULL
for (ii in 1:nrow(PredictYwotNA)) {
res$ttPredictY <- rbind(res$ttPredictY,t(solve(t(res$pp[PredictYNA[ii,],,drop=FALSE])%*%res$pp[PredictYNA[ii,],,drop=FALSE])%*%t(res$pp[PredictYNA[ii,],,drop=FALSE])%*%(PredictYwotNA[ii,])[PredictYNA[ii,]]))
}
colnames(res$ttPredictY) <- paste("tt",1:kk,sep="")
}
else {
if(kk==1){
if(verbose){cat("____There are some NAs both in X and Y____\n")}
}
}
}
##############################################
# #
# Computing RSS, PRESS, #
# Chi2, Q2 and Q2cum #
# #
##############################################
##############################################
###### PLS ######
##############################################
##############################################
###### PLS-GLM ######
##############################################
##############################################
###### PLS-GLM-POLR ######
##############################################
##########################################
# #
# Predicting responses #
# #
##########################################
##############################################
###### PLS ######
##############################################
if (modele == "pls") {
res$listValsPredictY <- cbind(res$listValsPredictY,attr(res$RepY,"scaled:center")+attr(res$RepY,"scaled:scale")*res$ttPredictY%*%res$CoeffC)
}
##############################################
###### PLS-GLM ######
##############################################
if (modele %in% c("pls-glm-logistic","pls-glm-family","pls-glm-Gamma","pls-glm-gaussian","pls-glm-inverse.gaussian","pls-glm-poisson")) {
tt <- res$ttPredictY
res$listValsPredictY <- cbind(res$listValsPredictY,predict(object=tempregglm,newdata=data.frame(tt),type = "response"))
}
##############################################
###### PLS-GLM-POLR ######
##############################################
if (modele %in% c("pls-glm-polr")) {
tts <- res$ttPredictY
if(kk==1){
res$listValsPredictY <- list(predict(tempregpolr,predict(tempregpolr, data.frame(tts=I(tts))),type="probs"))
} else {
res$listValsPredictY <- c(res$listValsPredictY,list(predict(tempregpolr,predict(tempregpolr, data.frame(tts=I(tts))),type="probs")))
}
attr(res$listValsPredictY,"numberlevels") <- nlevels(dataY)
attr(res$listValsPredictY,"modele") <- modele
}
if(verbose){cat("____Component____",kk,"____\n")}
}
##############################################
##############################################
## ##
## End of the loop on the components ##
## ##
##############################################
##############################################
if(verbose){cat("****________________________________________________****\n")}
if(verbose){cat("\n")}
if (!keepcoeffs) {
if (!keepstd.coeffs) {return(list(valsPredict=res$listValsPredictY))} else {return(list(valsPredict=res$listValsPredictY, std.coeffs=res$Std.Coeffs))}}
else {
if (!keepstd.coeffs) {return(list(valsPredict=res$listValsPredictY, coeffs=res$Coeffs))} else {return(list(valsPredict=res$listValsPredictY, coeffs=res$Coeffs, std.coeffs=res$Std.Coeffs))}
}
}
|
library(kerasformula)
### Name: plot_confusion
### Title: plot_confusion
### Aliases: plot_confusion
### ** Examples
if(is_keras_available()){
model_tanh <- kms(Species ~ ., iris,
activation = "tanh", Nepochs=5,
units=4, seed=1, verbose=0)
model_softmax <- kms(Species ~ ., iris,
activation = "softmax", Nepochs=5,
units=4, seed=1, verbose=0)
model_relu <- kms(Species ~ ., iris,
activation = "relu", Nepochs=5,
units=4, seed=1, verbose=0)
plot_confusion(model_tanh, model_softmax, model_relu,
title="Species",
subtitle="Activation Function Comparison")
}
| /data/genthat_extracted_code/kerasformula/examples/plot_confusion.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 779 | r | library(kerasformula)
### Name: plot_confusion
### Title: plot_confusion
### Aliases: plot_confusion
### ** Examples
if(is_keras_available()){
model_tanh <- kms(Species ~ ., iris,
activation = "tanh", Nepochs=5,
units=4, seed=1, verbose=0)
model_softmax <- kms(Species ~ ., iris,
activation = "softmax", Nepochs=5,
units=4, seed=1, verbose=0)
model_relu <- kms(Species ~ ., iris,
activation = "relu", Nepochs=5,
units=4, seed=1, verbose=0)
plot_confusion(model_tanh, model_softmax, model_relu,
title="Species",
subtitle="Activation Function Comparison")
}
|
library(SECP)
### Name: fds2s
### Title: Mass fractal dimension of sampling 2D clusters
### Aliases: fds2s
### ** Examples
# # # # # # # # # # # # # # # # #
# Example 1: Isotropic set cover
# # # # # # # # # # # # # # # # #
pc <- .592746
p1 <- pc - .03
p2 <- pc + .03
lx <- 33; ss <- (lx+1)/2
rf1 <- fssi20(n=100, x=lx, p=p1)
rf2 <- fssi20(n=100, x=lx, p=p2)
bnd <- isc2s(k=9, x=dim(rf1))
fd1 <- fds2s(rfq=rf1, bnd=bnd)
fd2 <- fds2s(rfq=rf2, bnd=bnd)
w1 <- fd1$model[,"w"]; w2 <- fd2$model[,"w"]
r1 <- fd1$model[,"r"]; r2 <- fd2$model[,"r"]
rr <- seq(min(r1)-.2, max(r1)+.2, length=100)
ww1 <- predict(fd1, newdata=list(r=rr), interval="conf")
ww2 <- predict(fd2, newdata=list(r=rr), interval="conf")
s1 <- paste(round(confint(fd1)[2,], digits=3), collapse=", ")
s2 <- paste(round(confint(fd2)[2,], digits=3), collapse=", ")
x <- y <- seq(lx)
par(mfrow=c(2,2), mar=c(3,3,3,1), mgp=c(2,1,0))
image(x, y, rf1, zlim=c(0, .7), cex.main=1,
main=paste("Isotropic set cover and\n",
"a 2D clusters frequency with\n",
"(1,0)-neighborhood and p=",
round(p1, digits=3), sep=""))
rect(bnd["x1",], bnd["y1",], bnd["x2",], bnd["y2",])
abline(h=ss, lty=2); abline(v=ss, lty=2)
image(x, y, rf2, zlim=c(0, .7), cex.main=1,
main=paste("Isotropic set cover and\n",
"a 2D clusters frequency with\n",
"(1,0)-neighborhood and p=",
round(p2, digits=3), sep=""))
rect(bnd["x1",], bnd["y1",], bnd["x2",], bnd["y2",])
abline(h=ss, lty=2); abline(v=ss, lty=2)
plot(r1, w1, pch=3, ylim=range(c(w1,w2)), cex.main=1,
main=paste("0.95 confidence interval for the mass\n",
"fractal dimension is (",s1,")", sep=""))
matlines(rr, ww1, lty=c(1,2,2), col=c("black","red","red"))
plot(r2, w2, pch=3, ylim=range(c(w1,w2)), cex.main=1,
main=paste("0.95 confidence interval for the mass\n",
"fractal dimension is (",s2,")", sep=""))
matlines(rr, ww2, lty=c(1,2,2), col=c("black","red","red"))
# # # # # # # # # # # # # # # # # #
# Example 2: Anisotropic set cover, dir=2
# # # # # # # # # # # # # # # # # #
pc <- .592746
p1 <- pc - .03
p2 <- pc + .03
lx <- 33; ss <- (lx+1)/2
ssy <- seq(lx+2, 2*lx-1)
rf1 <- fssi20(n=100, x=lx, p=p1, set=ssy, all=FALSE)
rf2 <- fssi20(n=100, x=lx, p=p2, set=ssy, all=FALSE)
bnd <- asc2s(k=9, x=dim(rf1), dir=2)
fd1 <- fds2s(rfq=rf1, bnd=bnd)
fd2 <- fds2s(rfq=rf2, bnd=bnd)
w1 <- fd1$model[,"w"]; w2 <- fd2$model[,"w"]
r1 <- fd1$model[,"r"]; r2 <- fd2$model[,"r"]
rr <- seq(min(r1)-.2, max(r1)+.2, length=100)
ww1 <- predict(fd1, newdata=list(r=rr), interval="conf")
ww2 <- predict(fd2, newdata=list(r=rr), interval="conf")
s1 <- paste(round(confint(fd1)[2,], digits=3), collapse=", ")
s2 <- paste(round(confint(fd2)[2,], digits=3), collapse=", ")
x <- y <- seq(lx)
par(mfrow=c(2,2), mar=c(3,3,3,1), mgp=c(2,1,0))
image(x, y, rf1, zlim=c(0, .7), cex.main=1,
main=paste("Anisotropic set cover and\n",
"a 2D clusters frequency with\n",
"(1,0)-neighborhood and p=",
round(p1, digits=3), sep=""))
rect(bnd["x1",], bnd["y1",], bnd["x2",], bnd["y2",])
abline(v=ss, lty=2)
image(x, y, rf2, zlim=c(0, .7), cex.main=1,
main=paste("Anisotropic set cover and\n",
"a 2D clusters frequency with\n",
"(1,0)-neighborhood and p=",
round(p2, digits=3), sep=""))
rect(bnd["x1",], bnd["y1",], bnd["x2",], bnd["y2",])
abline(v=ss, lty=2)
plot(r1, w1, pch=3, ylim=range(c(w1,w2)), cex.main=1,
main=paste("0.95 confidence interval for the mass\n",
"fractal dimension is (",s1,")", sep=""))
matlines(rr, ww1, lty=c(1,2,2), col=c("black","red","red"))
plot(r2, w2, pch=3, ylim=range(c(w1,w2)), cex.main=1,
main=paste("0.95 confidence interval for the mass\n",
"fractal dimension is (",s2,")", sep=""))
matlines(rr, ww2, lty=c(1,2,2), col=c("black","red","red"))
| /data/genthat_extracted_code/SECP/examples/fds2s.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 3,958 | r | library(SECP)
### Name: fds2s
### Title: Mass fractal dimension of sampling 2D clusters
### Aliases: fds2s
### ** Examples
# # # # # # # # # # # # # # # # #
# Example 1: Isotropic set cover
# # # # # # # # # # # # # # # # #
pc <- .592746
p1 <- pc - .03
p2 <- pc + .03
lx <- 33; ss <- (lx+1)/2
rf1 <- fssi20(n=100, x=lx, p=p1)
rf2 <- fssi20(n=100, x=lx, p=p2)
bnd <- isc2s(k=9, x=dim(rf1))
fd1 <- fds2s(rfq=rf1, bnd=bnd)
fd2 <- fds2s(rfq=rf2, bnd=bnd)
w1 <- fd1$model[,"w"]; w2 <- fd2$model[,"w"]
r1 <- fd1$model[,"r"]; r2 <- fd2$model[,"r"]
rr <- seq(min(r1)-.2, max(r1)+.2, length=100)
ww1 <- predict(fd1, newdata=list(r=rr), interval="conf")
ww2 <- predict(fd2, newdata=list(r=rr), interval="conf")
s1 <- paste(round(confint(fd1)[2,], digits=3), collapse=", ")
s2 <- paste(round(confint(fd2)[2,], digits=3), collapse=", ")
x <- y <- seq(lx)
par(mfrow=c(2,2), mar=c(3,3,3,1), mgp=c(2,1,0))
image(x, y, rf1, zlim=c(0, .7), cex.main=1,
main=paste("Isotropic set cover and\n",
"a 2D clusters frequency with\n",
"(1,0)-neighborhood and p=",
round(p1, digits=3), sep=""))
rect(bnd["x1",], bnd["y1",], bnd["x2",], bnd["y2",])
abline(h=ss, lty=2); abline(v=ss, lty=2)
image(x, y, rf2, zlim=c(0, .7), cex.main=1,
main=paste("Isotropic set cover and\n",
"a 2D clusters frequency with\n",
"(1,0)-neighborhood and p=",
round(p2, digits=3), sep=""))
rect(bnd["x1",], bnd["y1",], bnd["x2",], bnd["y2",])
abline(h=ss, lty=2); abline(v=ss, lty=2)
plot(r1, w1, pch=3, ylim=range(c(w1,w2)), cex.main=1,
main=paste("0.95 confidence interval for the mass\n",
"fractal dimension is (",s1,")", sep=""))
matlines(rr, ww1, lty=c(1,2,2), col=c("black","red","red"))
plot(r2, w2, pch=3, ylim=range(c(w1,w2)), cex.main=1,
main=paste("0.95 confidence interval for the mass\n",
"fractal dimension is (",s2,")", sep=""))
matlines(rr, ww2, lty=c(1,2,2), col=c("black","red","red"))
# # # # # # # # # # # # # # # # # #
# Example 2: Anisotropic set cover, dir=2
# # # # # # # # # # # # # # # # # #
pc <- .592746
p1 <- pc - .03
p2 <- pc + .03
lx <- 33; ss <- (lx+1)/2
ssy <- seq(lx+2, 2*lx-1)
rf1 <- fssi20(n=100, x=lx, p=p1, set=ssy, all=FALSE)
rf2 <- fssi20(n=100, x=lx, p=p2, set=ssy, all=FALSE)
bnd <- asc2s(k=9, x=dim(rf1), dir=2)
fd1 <- fds2s(rfq=rf1, bnd=bnd)
fd2 <- fds2s(rfq=rf2, bnd=bnd)
w1 <- fd1$model[,"w"]; w2 <- fd2$model[,"w"]
r1 <- fd1$model[,"r"]; r2 <- fd2$model[,"r"]
rr <- seq(min(r1)-.2, max(r1)+.2, length=100)
ww1 <- predict(fd1, newdata=list(r=rr), interval="conf")
ww2 <- predict(fd2, newdata=list(r=rr), interval="conf")
s1 <- paste(round(confint(fd1)[2,], digits=3), collapse=", ")
s2 <- paste(round(confint(fd2)[2,], digits=3), collapse=", ")
x <- y <- seq(lx)
par(mfrow=c(2,2), mar=c(3,3,3,1), mgp=c(2,1,0))
image(x, y, rf1, zlim=c(0, .7), cex.main=1,
main=paste("Anisotropic set cover and\n",
"a 2D clusters frequency with\n",
"(1,0)-neighborhood and p=",
round(p1, digits=3), sep=""))
rect(bnd["x1",], bnd["y1",], bnd["x2",], bnd["y2",])
abline(v=ss, lty=2)
image(x, y, rf2, zlim=c(0, .7), cex.main=1,
main=paste("Anisotropic set cover and\n",
"a 2D clusters frequency with\n",
"(1,0)-neighborhood and p=",
round(p2, digits=3), sep=""))
rect(bnd["x1",], bnd["y1",], bnd["x2",], bnd["y2",])
abline(v=ss, lty=2)
plot(r1, w1, pch=3, ylim=range(c(w1,w2)), cex.main=1,
main=paste("0.95 confidence interval for the mass\n",
"fractal dimension is (",s1,")", sep=""))
matlines(rr, ww1, lty=c(1,2,2), col=c("black","red","red"))
plot(r2, w2, pch=3, ylim=range(c(w1,w2)), cex.main=1,
main=paste("0.95 confidence interval for the mass\n",
"fractal dimension is (",s2,")", sep=""))
matlines(rr, ww2, lty=c(1,2,2), col=c("black","red","red"))
|
############################################################################
##### Figure 6: Effect of Riot Destruction on Prosocial Behavior (IV) ######
############################################################################
rm(list=ls())
# load required libraries
library(AER)
library(ggplot2)
library(readstata13)
library(spdep) # conflict with older version of dplyr if error message is shown update dplyr to 0.8.0
library(tseries)
# read data
data <- read.dta13("./kyrgyzstan.dta")
##### Cleaning
# recode variables
data$affected <- as.integer(data$affected)
data$affected <- data$affected - 1
data$pd_in <- as.integer(data$pd_in)
data$pd_out <- as.integer(data$pd_out)
# generate new variables
data$distance <- data$apc_min_distance
# rename variable
data$social_cap_retro <- data$leadership
# subset data set according to ethnic groups
data_uzbek <- data[which(data$ethnicity=="Uzbek"),]
# scale variables
data_uzbek$pd_in_scale <- scale(data_uzbek$pd_in)
data_uzbek$dg_in_scale <- scale(data_uzbek$dg_in)
data_uzbek$pd_out_scale <- scale(data_uzbek$pd_out)
data_uzbek$dg_out_scale <- scale(data_uzbek$dg_out)
data_uzbek$cooperation_index <- rowSums(cbind(data_uzbek$pd_in_scale,
data_uzbek$dg_in_scale,
data_uzbek$pd_out_scale,
data_uzbek$dg_out_scale), na.rm=T)/4
##### Figure
# First stage #
data_uzbek$distance <- 1-data_uzbek$apc_min_distance
dataAgg <- aggregate(data_uzbek[,c("affected", "distance")],
list(data_uzbek$id_psu),
mean)
# run first stage regressions for individual and aggregate data
first_stage_ind <- lm(affected ~ distance, data=data_uzbek)
first_stage_psu <- lm(affected ~ distance, data=dataAgg)
# IV models
# run iv regressions
model11 <- lm(pd_in_scale ~ distance , data=data_uzbek)
model12 <- ivreg(pd_in_scale ~ affected | apc_min_distance, data = data_uzbek)
model21 <- lm(dg_in_scale ~ distance , data=data_uzbek)
model22 <- ivreg(dg_in_scale ~ affected | apc_min_distance , data = data_uzbek)
model31 <- lm(pd_out_scale ~ distance , data=data_uzbek)
model32 <- ivreg(pd_out_scale ~ affected | apc_min_distance , data = data_uzbek)
model41 <- lm(dg_out_scale ~ distance , data=data_uzbek)
model42 <- ivreg(dg_out_scale ~ affected | apc_min_distance , data = data_uzbek)
model51 <- lm(cooperation_index ~ distance , data=data_uzbek)
model52 <- ivreg(cooperation_index ~ affected | apc_min_distance , data = data_uzbek)
# aggregate data
dataAgg <- aggregate(data_uzbek[,c("apc_min_distance", "distance", "pd_in_scale", "dg_in_scale",
"pd_out_scale", "dg_out_scale", "cooperation_index", "affected",
"economy_index", "state_index", "social_cap_retro")],
list(data_uzbek$id_psu),
mean)
names(dataAgg)[1] <- "psu"
dataAgg <- dataAgg[!is.na(dataAgg$social_cap_retro),]
# load and edit the travel time matrix
ttmat <- read.matrix("z.travel_time.csv", header = T, sep = ";", skip = 0)
row.names(ttmat) <- ttmat[,1]
ttmat <- ttmat[,2:ncol(ttmat)]
ttmat <- ttmat[row.names(ttmat) %in% dataAgg$psu, colnames(ttmat) %in% dataAgg$psu]
ttmat_sort <- ttmat[order(as.numeric(row.names(ttmat))),]
ttmat_sort <- ttmat_sort[,order(as.numeric(colnames(ttmat_sort)))]
ttlistw <- mat2listw(ttmat_sort)
# formulas
f1 <- pd_in_scale ~ distance
f2 <- dg_in_scale ~ distance
f3 <- pd_out_scale ~ distance
f4 <- dg_out_scale ~ distance
f5 <- cooperation_index ~ distance
#basic OLS models
model1 <- lm(pd_in_scale ~ distance , data=dataAgg)
model2 <- lm(dg_in_scale ~ distance , data=dataAgg)
model3 <- lm(pd_out_scale ~ distance , data=dataAgg)
model4 <- lm(dg_out_scale ~ distance , data=dataAgg)
model5 <- lm(cooperation_index ~ distance , data=dataAgg)
# spatial models
dataAgg <- dataAgg[order(dataAgg$psu),]
model13 <- errorsarlm(f1, data=dataAgg, ttlistw, tol.solve=1.0e-30)
model23 <- errorsarlm(f2, data=dataAgg, ttlistw, tol.solve=1.0e-30)
model33 <- errorsarlm(f3, data=dataAgg, ttlistw, tol.solve=1.0e-30)
model43 <- errorsarlm(f4, data=dataAgg, ttlistw, tol.solve=1.0e-30)
model53 <- errorsarlm(f5, data=dataAgg, ttlistw, tol.solve=1.0e-30)
# extract coefficients and standard errors
model11Frame <- data.frame(Variable = rownames(summary(model11)$coef),
Coefficient = summary(model11)$coef[, 1],
SE = summary(model11)$coef[, 2],
modelName = "PD ingroup")[2,]
model21Frame <- data.frame(Variable = rownames(summary(model21)$coef),
Coefficient = summary(model21)$coef[, 1],
SE = summary(model21)$coef[, 2],
modelName = "DG ingroup")[2,]
model31Frame <- data.frame(Variable = rownames(summary(model31)$coef),
Coefficient = summary(model31)$coef[, 1],
SE = summary(model31)$coef[, 2],
modelName = "PD outgroup")[2,]
model41Frame <- data.frame(Variable = rownames(summary(model41)$coef),
Coefficient = summary(model41)$coef[, 1],
SE = summary(model41)$coef[, 2],
modelName = "DG outgroup")[2,]
model51Frame <- data.frame(Variable = rownames(summary(model51)$coef),
Coefficient = summary(model51)$coef[, 1],
SE = summary(model51)$coef[, 2],
modelName = "Index")[2,]
model12Frame <- data.frame(Variable = rownames(summary(model12)$coef),
Coefficient = summary(model12)$coef[, 1],
SE = summary(model12)$coef[, 2],
modelName = "PD ingroup")[2,]
model22Frame <- data.frame(Variable = rownames(summary(model22)$coef),
Coefficient = summary(model22)$coef[, 1],
SE = summary(model22)$coef[, 2],
modelName = "DG ingroup")[2,]
model32Frame <- data.frame(Variable = rownames(summary(model32)$coef),
Coefficient = summary(model32)$coef[, 1],
SE = summary(model32)$coef[, 2],
modelName = "PD outgroup")[2,]
model42Frame <- data.frame(Variable = rownames(summary(model42)$coef),
Coefficient = summary(model42)$coef[, 1],
SE = summary(model42)$coef[, 2],
modelName = "DG outgroup")[2,]
model52Frame <- data.frame(Variable = rownames(summary(model52)$coef),
Coefficient = summary(model52)$coef[, 1],
SE = summary(model52)$coef[, 2],
modelName = "Index")[2,]
model13Frame <- data.frame(Variable = "affected",
Coefficient = model13$coefficients[2],
SE = model13$rest.se[2],
modelName = "Prisoner's Dilemma ingroup")
model23Frame <- data.frame(Variable = "affected",
Coefficient = model23$coefficients[2],
SE = model23$rest.se[2],
modelName = "Dictator Game ingroup")
model33Frame <- data.frame(Variable = "affected",
Coefficient = model33$coefficients[2],
SE = model33$rest.se[2],
modelName = "Prisoner's Dilemma outgroup")
model43Frame <- data.frame(Variable = "affected",
Coefficient = model43$coefficients[2],
SE = model43$rest.se[2],
modelName = "Dictator Game outgroup")
model53Frame <- data.frame(Variable = "affected",
Coefficient = model53$coefficients[2],
SE = model53$rest.se[2],
modelName = "Index")
# bind all models to dataframes
# Instruments
allModelFrame1 <- data.frame(rbind(model11Frame, model21Frame, model31Frame, model41Frame, model51Frame))
allModelFrame1$Variable <- c(1,2,3,4,5)
allModelFrame1$Variable <- factor(allModelFrame1$Variable, labels=c("Prisoner's Dilemma Ingroup", "Dictator Game Ingroup", "Prisoner's Dilemma Outgroup", "Dictator Game Outgroup", "Prosociality- index"))
levels(allModelFrame1$Variable) <- gsub(" ", "\n", levels(allModelFrame1$Variable))
# 2SLS
allModelFrame2 <- data.frame(rbind(model12Frame, model22Frame, model32Frame, model42Frame, model52Frame))
allModelFrame2$Variable <- c(1,2,3,4,5)
allModelFrame2$Variable <- factor(allModelFrame2$Variable, labels=c("Prisoner's Dilemma Ingroup", "Dictator Game Ingroup", "Prisoner's Dilemma Outgroup", "Dictator Game Outgroup", "Prosociality- index"))
levels(allModelFrame2$Variable) <- gsub(" ", "\n", levels(allModelFrame2$Variable))
# Instrument (SAM)
allModelFrame3 <- data.frame(rbind(model13Frame, model23Frame, model33Frame, model43Frame, model53Frame))
allModelFrame3$Variable <- c(1,2,3,4,5)
allModelFrame3$Variable <- factor(allModelFrame3$Variable, labels=c("Prisoner's Dilemma Ingroup", "Dictator Game Ingroup", "Prisoner's Dilemma Outgroup", "Dictator Game Outgroup", "Prosociality- index"))
levels(allModelFrame3$Variable) <- gsub(" ", "\n", levels(allModelFrame3$Variable))
# rowbind all models
allModelFram <- rbind(allModelFrame1, allModelFrame2, allModelFrame3)
allModelFram$matrix_style <- rep(c("Instrument", "2SLS", "Instrument (SAM)"),each=5)
# set multipliers for confidence intervals
interval1 <- -qnorm((1-0.90)/2) # 90% multiplier
interval2 <- -qnorm((1-0.95)/2) # 95% multiplier
# set up dodge
pd = position_dodge(0.5)
# build plot
figure6 <- ggplot(allModelFram, aes(shape=matrix_style)) +
geom_hline(yintercept = 0, colour = gray(1/2), lty = 2) +
geom_linerange(aes(x = Variable, ymin = Coefficient - SE*interval1,
ymax = Coefficient + SE*interval1),
lwd = 1, position = pd) +
geom_linerange(aes(x = Variable, ymin = Coefficient - SE*interval2,
ymax = Coefficient + SE*interval2),
lwd = 1/4, position = pd) +
geom_point(aes(x = Variable, y = Coefficient, shape = matrix_style),
position = pd,fill = "WHITE", size = 3) +
coord_flip(ylim = c(-0.95,0.22)) + theme_bw() +
theme(legend.position="bottom") +
scale_shape_manual(values = c(23, 24, 25), name ="") +
ylab("") + xlab("") +
theme(text = element_text(size=18, family="Times"))
# plot output
figure6
| /Original Paper and Code/Original Code/Figure6.R | no_license | CianStryker/Prosocial_Behavior | R | false | false | 10,677 | r | ############################################################################
##### Figure 6: Effect of Riot Destruction on Prosocial Behavior (IV) ######
############################################################################
rm(list=ls())
# load required libraries
library(AER)
library(ggplot2)
library(readstata13)
library(spdep) # conflict with older version of dplyr if error message is shown update dplyr to 0.8.0
library(tseries)
# read data
data <- read.dta13("./kyrgyzstan.dta")
##### Cleaning
# recode variables
data$affected <- as.integer(data$affected)
data$affected <- data$affected - 1
data$pd_in <- as.integer(data$pd_in)
data$pd_out <- as.integer(data$pd_out)
# generate new variables
data$distance <- data$apc_min_distance
# rename variable
data$social_cap_retro <- data$leadership
# subset data set according to ethnic groups
data_uzbek <- data[which(data$ethnicity=="Uzbek"),]
# scale variables
data_uzbek$pd_in_scale <- scale(data_uzbek$pd_in)
data_uzbek$dg_in_scale <- scale(data_uzbek$dg_in)
data_uzbek$pd_out_scale <- scale(data_uzbek$pd_out)
data_uzbek$dg_out_scale <- scale(data_uzbek$dg_out)
data_uzbek$cooperation_index <- rowSums(cbind(data_uzbek$pd_in_scale,
data_uzbek$dg_in_scale,
data_uzbek$pd_out_scale,
data_uzbek$dg_out_scale), na.rm=T)/4
##### Figure
# First stage #
data_uzbek$distance <- 1-data_uzbek$apc_min_distance
dataAgg <- aggregate(data_uzbek[,c("affected", "distance")],
list(data_uzbek$id_psu),
mean)
# run first stage regressions for individual and aggregate data
first_stage_ind <- lm(affected ~ distance, data=data_uzbek)
first_stage_psu <- lm(affected ~ distance, data=dataAgg)
# IV models
# run iv regressions
model11 <- lm(pd_in_scale ~ distance , data=data_uzbek)
model12 <- ivreg(pd_in_scale ~ affected | apc_min_distance, data = data_uzbek)
model21 <- lm(dg_in_scale ~ distance , data=data_uzbek)
model22 <- ivreg(dg_in_scale ~ affected | apc_min_distance , data = data_uzbek)
model31 <- lm(pd_out_scale ~ distance , data=data_uzbek)
model32 <- ivreg(pd_out_scale ~ affected | apc_min_distance , data = data_uzbek)
model41 <- lm(dg_out_scale ~ distance , data=data_uzbek)
model42 <- ivreg(dg_out_scale ~ affected | apc_min_distance , data = data_uzbek)
model51 <- lm(cooperation_index ~ distance , data=data_uzbek)
model52 <- ivreg(cooperation_index ~ affected | apc_min_distance , data = data_uzbek)
# aggregate data
dataAgg <- aggregate(data_uzbek[,c("apc_min_distance", "distance", "pd_in_scale", "dg_in_scale",
"pd_out_scale", "dg_out_scale", "cooperation_index", "affected",
"economy_index", "state_index", "social_cap_retro")],
list(data_uzbek$id_psu),
mean)
names(dataAgg)[1] <- "psu"
dataAgg <- dataAgg[!is.na(dataAgg$social_cap_retro),]
# load and edit the travel time matrix
ttmat <- read.matrix("z.travel_time.csv", header = T, sep = ";", skip = 0)
row.names(ttmat) <- ttmat[,1]
ttmat <- ttmat[,2:ncol(ttmat)]
ttmat <- ttmat[row.names(ttmat) %in% dataAgg$psu, colnames(ttmat) %in% dataAgg$psu]
ttmat_sort <- ttmat[order(as.numeric(row.names(ttmat))),]
ttmat_sort <- ttmat_sort[,order(as.numeric(colnames(ttmat_sort)))]
ttlistw <- mat2listw(ttmat_sort)
# formulas
f1 <- pd_in_scale ~ distance
f2 <- dg_in_scale ~ distance
f3 <- pd_out_scale ~ distance
f4 <- dg_out_scale ~ distance
f5 <- cooperation_index ~ distance
#basic OLS models
model1 <- lm(pd_in_scale ~ distance , data=dataAgg)
model2 <- lm(dg_in_scale ~ distance , data=dataAgg)
model3 <- lm(pd_out_scale ~ distance , data=dataAgg)
model4 <- lm(dg_out_scale ~ distance , data=dataAgg)
model5 <- lm(cooperation_index ~ distance , data=dataAgg)
# spatial models
dataAgg <- dataAgg[order(dataAgg$psu),]
model13 <- errorsarlm(f1, data=dataAgg, ttlistw, tol.solve=1.0e-30)
model23 <- errorsarlm(f2, data=dataAgg, ttlistw, tol.solve=1.0e-30)
model33 <- errorsarlm(f3, data=dataAgg, ttlistw, tol.solve=1.0e-30)
model43 <- errorsarlm(f4, data=dataAgg, ttlistw, tol.solve=1.0e-30)
model53 <- errorsarlm(f5, data=dataAgg, ttlistw, tol.solve=1.0e-30)
# extract coefficients and standard errors
model11Frame <- data.frame(Variable = rownames(summary(model11)$coef),
Coefficient = summary(model11)$coef[, 1],
SE = summary(model11)$coef[, 2],
modelName = "PD ingroup")[2,]
model21Frame <- data.frame(Variable = rownames(summary(model21)$coef),
Coefficient = summary(model21)$coef[, 1],
SE = summary(model21)$coef[, 2],
modelName = "DG ingroup")[2,]
model31Frame <- data.frame(Variable = rownames(summary(model31)$coef),
Coefficient = summary(model31)$coef[, 1],
SE = summary(model31)$coef[, 2],
modelName = "PD outgroup")[2,]
model41Frame <- data.frame(Variable = rownames(summary(model41)$coef),
Coefficient = summary(model41)$coef[, 1],
SE = summary(model41)$coef[, 2],
modelName = "DG outgroup")[2,]
model51Frame <- data.frame(Variable = rownames(summary(model51)$coef),
Coefficient = summary(model51)$coef[, 1],
SE = summary(model51)$coef[, 2],
modelName = "Index")[2,]
model12Frame <- data.frame(Variable = rownames(summary(model12)$coef),
Coefficient = summary(model12)$coef[, 1],
SE = summary(model12)$coef[, 2],
modelName = "PD ingroup")[2,]
model22Frame <- data.frame(Variable = rownames(summary(model22)$coef),
Coefficient = summary(model22)$coef[, 1],
SE = summary(model22)$coef[, 2],
modelName = "DG ingroup")[2,]
model32Frame <- data.frame(Variable = rownames(summary(model32)$coef),
Coefficient = summary(model32)$coef[, 1],
SE = summary(model32)$coef[, 2],
modelName = "PD outgroup")[2,]
model42Frame <- data.frame(Variable = rownames(summary(model42)$coef),
Coefficient = summary(model42)$coef[, 1],
SE = summary(model42)$coef[, 2],
modelName = "DG outgroup")[2,]
model52Frame <- data.frame(Variable = rownames(summary(model52)$coef),
Coefficient = summary(model52)$coef[, 1],
SE = summary(model52)$coef[, 2],
modelName = "Index")[2,]
model13Frame <- data.frame(Variable = "affected",
Coefficient = model13$coefficients[2],
SE = model13$rest.se[2],
modelName = "Prisoner's Dilemma ingroup")
model23Frame <- data.frame(Variable = "affected",
Coefficient = model23$coefficients[2],
SE = model23$rest.se[2],
modelName = "Dictator Game ingroup")
model33Frame <- data.frame(Variable = "affected",
Coefficient = model33$coefficients[2],
SE = model33$rest.se[2],
modelName = "Prisoner's Dilemma outgroup")
model43Frame <- data.frame(Variable = "affected",
Coefficient = model43$coefficients[2],
SE = model43$rest.se[2],
modelName = "Dictator Game outgroup")
model53Frame <- data.frame(Variable = "affected",
Coefficient = model53$coefficients[2],
SE = model53$rest.se[2],
modelName = "Index")
# bind all models to dataframes
# Instruments
allModelFrame1 <- data.frame(rbind(model11Frame, model21Frame, model31Frame, model41Frame, model51Frame))
allModelFrame1$Variable <- c(1,2,3,4,5)
allModelFrame1$Variable <- factor(allModelFrame1$Variable, labels=c("Prisoner's Dilemma Ingroup", "Dictator Game Ingroup", "Prisoner's Dilemma Outgroup", "Dictator Game Outgroup", "Prosociality- index"))
levels(allModelFrame1$Variable) <- gsub(" ", "\n", levels(allModelFrame1$Variable))
# 2SLS
allModelFrame2 <- data.frame(rbind(model12Frame, model22Frame, model32Frame, model42Frame, model52Frame))
allModelFrame2$Variable <- c(1,2,3,4,5)
allModelFrame2$Variable <- factor(allModelFrame2$Variable, labels=c("Prisoner's Dilemma Ingroup", "Dictator Game Ingroup", "Prisoner's Dilemma Outgroup", "Dictator Game Outgroup", "Prosociality- index"))
levels(allModelFrame2$Variable) <- gsub(" ", "\n", levels(allModelFrame2$Variable))
# Instrument (SAM)
allModelFrame3 <- data.frame(rbind(model13Frame, model23Frame, model33Frame, model43Frame, model53Frame))
allModelFrame3$Variable <- c(1,2,3,4,5)
allModelFrame3$Variable <- factor(allModelFrame3$Variable, labels=c("Prisoner's Dilemma Ingroup", "Dictator Game Ingroup", "Prisoner's Dilemma Outgroup", "Dictator Game Outgroup", "Prosociality- index"))
levels(allModelFrame3$Variable) <- gsub(" ", "\n", levels(allModelFrame3$Variable))
# rowbind all models
allModelFram <- rbind(allModelFrame1, allModelFrame2, allModelFrame3)
allModelFram$matrix_style <- rep(c("Instrument", "2SLS", "Instrument (SAM)"),each=5)
# set multipliers for confidence intervals
interval1 <- -qnorm((1-0.90)/2) # 90% multiplier
interval2 <- -qnorm((1-0.95)/2) # 95% multiplier
# set up dodge
pd = position_dodge(0.5)
# build plot
figure6 <- ggplot(allModelFram, aes(shape=matrix_style)) +
geom_hline(yintercept = 0, colour = gray(1/2), lty = 2) +
geom_linerange(aes(x = Variable, ymin = Coefficient - SE*interval1,
ymax = Coefficient + SE*interval1),
lwd = 1, position = pd) +
geom_linerange(aes(x = Variable, ymin = Coefficient - SE*interval2,
ymax = Coefficient + SE*interval2),
lwd = 1/4, position = pd) +
geom_point(aes(x = Variable, y = Coefficient, shape = matrix_style),
position = pd,fill = "WHITE", size = 3) +
coord_flip(ylim = c(-0.95,0.22)) + theme_bw() +
theme(legend.position="bottom") +
scale_shape_manual(values = c(23, 24, 25), name ="") +
ylab("") + xlab("") +
theme(text = element_text(size=18, family="Times"))
# plot output
figure6
|
#' @export
convert_dec2bin <- function(x,len=32){
b <- as.integer(rev(intToBits(x)))
if(len < 32)
b <- b[ (32-len+1):32 ]
if(len > 32)
b <- c( rep(0,len-32) , b)
b
}
| /R/convert_dec2bin.R | no_license | cran/QuantumOps | R | false | false | 174 | r | #' @export
convert_dec2bin <- function(x,len=32){
b <- as.integer(rev(intToBits(x)))
if(len < 32)
b <- b[ (32-len+1):32 ]
if(len > 32)
b <- c( rep(0,len-32) , b)
b
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/normalizeTissueAware.R
\name{normalizeTissueAware}
\alias{normalizeTissueAware}
\title{Normalize in a tissue aware context}
\source{
The function qsmooth comes from the qsmooth packages
currently available on github under user 'kokrah'.
}
\usage{
normalizeTissueAware(obj, groups, normalizationMethod = c("qsmooth",
"quantile"), ...)
}
\arguments{
\item{obj}{ExpressionSet object}
\item{groups}{Vector of labels for each sample or a column name of the phenoData slot
for the ids to filter. Default is the column names}
\item{normalizationMethod}{Choice of 'qsmooth' or 'quantile'}
\item{...}{Options for \code{\link{qsmooth}} function or \code{\link[limma]{normalizeQuantiles}}}
}
\value{
ExpressionSet object with an assayData called normalizedMatrix
}
\description{
This function provides a wrapper to various normalization methods developed.
Currently it only wraps qsmooth and quantile normalization returning a log-transformed
normalized matrix. qsmooth is a normalization approach that normalizes samples in
a condition aware manner.
}
\examples{
data(skin)
normalizeTissueAware(skin,"SMTSD")
}
| /man/normalizeTissueAware.Rd | no_license | jnpaulson/yarn | R | false | true | 1,185 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/normalizeTissueAware.R
\name{normalizeTissueAware}
\alias{normalizeTissueAware}
\title{Normalize in a tissue aware context}
\source{
The function qsmooth comes from the qsmooth packages
currently available on github under user 'kokrah'.
}
\usage{
normalizeTissueAware(obj, groups, normalizationMethod = c("qsmooth",
"quantile"), ...)
}
\arguments{
\item{obj}{ExpressionSet object}
\item{groups}{Vector of labels for each sample or a column name of the phenoData slot
for the ids to filter. Default is the column names}
\item{normalizationMethod}{Choice of 'qsmooth' or 'quantile'}
\item{...}{Options for \code{\link{qsmooth}} function or \code{\link[limma]{normalizeQuantiles}}}
}
\value{
ExpressionSet object with an assayData called normalizedMatrix
}
\description{
This function provides a wrapper to various normalization methods developed.
Currently it only wraps qsmooth and quantile normalization returning a log-transformed
normalized matrix. qsmooth is a normalization approach that normalizes samples in
a condition aware manner.
}
\examples{
data(skin)
normalizeTissueAware(skin,"SMTSD")
}
|
select <- function(x)
{
# dist$fit[which.min(x$fit)]
as.numeric(names(which.min(x$fit)))
}
| /emma/R/select.R | no_license | ingted/R-Examples | R | false | false | 102 | r | select <- function(x)
{
# dist$fit[which.min(x$fit)]
as.numeric(names(which.min(x$fit)))
}
|
#'
#'
#'
#'
#'
#'
#'
#'
Sbackward<-function(initsa,x,y,theta=NULL){
n<-length(x)
l<-length(initsa$states)
if(is.null(theta)){
scor_func<-scores(initsa)
}else{
scor_func<-scores(NULL,theta)
}
B<-matrix(data = NA, nrow = n+1,ncol = l)
for(s in 1:l){
B[n+1,s]<-0
}
for(k in n:1){
for(s in l:1){
B[k,s]<-Inf
for(s_dash in l:1){
score_col<-paste(toString(y[k]),toString(initsa$states[s]), sep=',')
score_row<-paste(toString(x[k]),toString(initsa$states[s_dash]), sep=',')
B[k,s]=min(B[k,s],(scor_func$scoress[score_row,score_col]+B[k+1,s_dash]))
}
}
}
colnames(B)<-initsa$states
w<-Inf
for(s in 1:l){
w<-min(w[s],B[1,s])
}
return(B)
}
| /R/Sbackward.R | no_license | cran/SAutomata | R | false | false | 769 | r | #'
#'
#'
#'
#'
#'
#'
#'
Sbackward<-function(initsa,x,y,theta=NULL){
n<-length(x)
l<-length(initsa$states)
if(is.null(theta)){
scor_func<-scores(initsa)
}else{
scor_func<-scores(NULL,theta)
}
B<-matrix(data = NA, nrow = n+1,ncol = l)
for(s in 1:l){
B[n+1,s]<-0
}
for(k in n:1){
for(s in l:1){
B[k,s]<-Inf
for(s_dash in l:1){
score_col<-paste(toString(y[k]),toString(initsa$states[s]), sep=',')
score_row<-paste(toString(x[k]),toString(initsa$states[s_dash]), sep=',')
B[k,s]=min(B[k,s],(scor_func$scoress[score_row,score_col]+B[k+1,s_dash]))
}
}
}
colnames(B)<-initsa$states
w<-Inf
for(s in 1:l){
w<-min(w[s],B[1,s])
}
return(B)
}
|
#' Get Species
#' Lookup, and correct, species names
#'
#' @param uspp character vector of unique species names to be checked
#' @param x a character
#'
#' @details
#' Lookup species names using taxize, with option to only lookup names not in a reference data.table of previously-checked names. Currently looks up 1 species name at a time, but function could be modified to look up chunks. Relies heavily on taxize.
#'
#' @return
#' A data.table with 2 columns; "spp" column contains unchecked species names, "sppCorr" contains corrected (checked) species names
#'
#' @seealso \code{\link{getCmmn}}, \code{\link{getTax}}
#'
#' @export
getSpp <- function(uspp){
# Break unique species names into chunks (currently trivial)
uspp.chunks <- as.character(cut(seq_along(uspp), length(uspp))) # right now breaking into chunks of length 1
u.uspp.chunks <- unique(uspp.chunks) # unique chunks
# Loop through species to look up
spp.pb <- txtProgressBar(min=1, max=length(u.uspp.chunks), style=3) # create progress bar for lookup process
for(s in seq_along(u.uspp.chunks)){ # for each chunk ...
# Define chunks and get current species to check
t.chunk <- u.uspp.chunks[s] # get the cut() reslt corresponding to current chunk of species to look up
t.uspp <- uspp[uspp.chunks==t.chunk] # turn the chunk name into species names
# Look up current spcies
t.spp.corr1.names <- taxize::gnr_resolve(t.uspp, stripauthority=TRUE, http="get", resolve_once=TRUE) # check w/ taxize
t.spp.corr1 <- data.table(grb.spp1(t.spp.corr1.names)) # store checked name in data.table
# Accumulate lookup results
if(s==1){ # if first iteration, create spp.corr1
spp.corr1 <- t.spp.corr1
}else{ # otherwise rbind() to accumulate
spp.corr1 <- rbind(spp.corr1, t.spp.corr1)
}
setTxtProgressBar(spp.pb, s) # update progress
} # exit looping through lookup
close(spp.pb) # close progress bar
setnames(spp.corr1, c("submitted_name", "matched_name"), c("spp", "sppCorr"))
# ===========================
# = Some manual corrections =
# ===========================
# spp.corr1[is.na(sppCorr)]
spp.corr1[spp=="Antipatharian", sppCorr:="Antipatharia"]
spp.corr1[spp=="Gorgonian", sppCorr:="Gorgonacea"]
spp.corr1[spp=="Gymothorax igromargiatus", sppCorr:="Gymnothorax nigromargiatus"]
spp.corr1[spp=="Micropaope uttigii", sppCorr:="Micropanope nuttingi"]
spp.corr1[spp=="Neptheid", sppCorr:="Neptheidae"]
spp.corr1[spp=="Ogocephalidae", sppCorr:="Ogcocephalidae"]
spp.corr1[spp=="Raioides", sppCorr:="Raioidea"]
spp.corr1[spp=="Seapen", sppCorr:="Pennatulacea"]
spp.corr1[spp=="Eoraja siusmexicaus", sppCorr:="Neoraja sinusmexicanus"]
}
#' @describeIn getSpp Count the number of N's in a word
countN <- function(x){ # count the number of times the letter "n" appears
sapply(strsplit(x,""), FUN=function(x)length(grep("n",x)))
}
#' @describeIn getSpp Grab Species (helper function)
grb.spp1 <- function(x) {
tryCatch(
{
# x <- x$results
x <- x[!duplicated(x[,"matched_name"]),]
adjN <- pmax(countN(x$matched_name) - countN(x$submitted_name), 0)*0.01 # gets bonus match score if the matched name has more n's, because n's appear to be missing a lot
x$score <- x$score + adjN
x <- x[max(which.max(x[,"score"]),1),c("submitted_name","matched_name")]
if(x[,"matched_name"]==""){x[,"matched_name"] <- NA}
return(x)
},
error=function(cond){
tryCatch(
{
data.frame(submitted_name=x$results[1, "submitted_name"], matched_name=as.character(NA))
},
error=function(cond){data.frame(submitted_name=NA, matched_name=NA)}
)
}
)
} | /R/tax.getSpp.R | no_license | rBatt/trawlData | R | false | false | 3,609 | r | #' Get Species
#' Lookup, and correct, species names
#'
#' @param uspp character vector of unique species names to be checked
#' @param x a character
#'
#' @details
#' Lookup species names using taxize, with option to only lookup names not in a reference data.table of previously-checked names. Currently looks up 1 species name at a time, but function could be modified to look up chunks. Relies heavily on taxize.
#'
#' @return
#' A data.table with 2 columns; "spp" column contains unchecked species names, "sppCorr" contains corrected (checked) species names
#'
#' @seealso \code{\link{getCmmn}}, \code{\link{getTax}}
#'
#' @export
getSpp <- function(uspp){
# Break unique species names into chunks (currently trivial)
uspp.chunks <- as.character(cut(seq_along(uspp), length(uspp))) # right now breaking into chunks of length 1
u.uspp.chunks <- unique(uspp.chunks) # unique chunks
# Loop through species to look up
spp.pb <- txtProgressBar(min=1, max=length(u.uspp.chunks), style=3) # create progress bar for lookup process
for(s in seq_along(u.uspp.chunks)){ # for each chunk ...
# Define chunks and get current species to check
t.chunk <- u.uspp.chunks[s] # get the cut() reslt corresponding to current chunk of species to look up
t.uspp <- uspp[uspp.chunks==t.chunk] # turn the chunk name into species names
# Look up current spcies
t.spp.corr1.names <- taxize::gnr_resolve(t.uspp, stripauthority=TRUE, http="get", resolve_once=TRUE) # check w/ taxize
t.spp.corr1 <- data.table(grb.spp1(t.spp.corr1.names)) # store checked name in data.table
# Accumulate lookup results
if(s==1){ # if first iteration, create spp.corr1
spp.corr1 <- t.spp.corr1
}else{ # otherwise rbind() to accumulate
spp.corr1 <- rbind(spp.corr1, t.spp.corr1)
}
setTxtProgressBar(spp.pb, s) # update progress
} # exit looping through lookup
close(spp.pb) # close progress bar
setnames(spp.corr1, c("submitted_name", "matched_name"), c("spp", "sppCorr"))
# ===========================
# = Some manual corrections =
# ===========================
# spp.corr1[is.na(sppCorr)]
spp.corr1[spp=="Antipatharian", sppCorr:="Antipatharia"]
spp.corr1[spp=="Gorgonian", sppCorr:="Gorgonacea"]
spp.corr1[spp=="Gymothorax igromargiatus", sppCorr:="Gymnothorax nigromargiatus"]
spp.corr1[spp=="Micropaope uttigii", sppCorr:="Micropanope nuttingi"]
spp.corr1[spp=="Neptheid", sppCorr:="Neptheidae"]
spp.corr1[spp=="Ogocephalidae", sppCorr:="Ogcocephalidae"]
spp.corr1[spp=="Raioides", sppCorr:="Raioidea"]
spp.corr1[spp=="Seapen", sppCorr:="Pennatulacea"]
spp.corr1[spp=="Eoraja siusmexicaus", sppCorr:="Neoraja sinusmexicanus"]
}
#' @describeIn getSpp Count the number of N's in a word
countN <- function(x){ # count the number of times the letter "n" appears
sapply(strsplit(x,""), FUN=function(x)length(grep("n",x)))
}
#' @describeIn getSpp Grab Species (helper function)
grb.spp1 <- function(x) {
tryCatch(
{
# x <- x$results
x <- x[!duplicated(x[,"matched_name"]),]
adjN <- pmax(countN(x$matched_name) - countN(x$submitted_name), 0)*0.01 # gets bonus match score if the matched name has more n's, because n's appear to be missing a lot
x$score <- x$score + adjN
x <- x[max(which.max(x[,"score"]),1),c("submitted_name","matched_name")]
if(x[,"matched_name"]==""){x[,"matched_name"] <- NA}
return(x)
},
error=function(cond){
tryCatch(
{
data.frame(submitted_name=x$results[1, "submitted_name"], matched_name=as.character(NA))
},
error=function(cond){data.frame(submitted_name=NA, matched_name=NA)}
)
}
)
} |
#' Modeltime best workflow from a set of models
#'
#' @description get best workflows generated from the `modeltime_wfs_fit()` function output.
#'
#' @details the best model is selected based on a specific metric ('mae', 'mape','mase','smape','rmse','rsq').
#' The default is to minimize the metric. However, if the model is being selected based on rsq
#' minimize should be FALSE.
#'
#' @param .wfs_results a tibble generated from the `modeltime_wfs_fit()` function.
#' @param .model string or number, It can be supplied as follows: “top n,” “Top n” or “tOp n”, where n is the number
#' of best models to select; n, where n is the number of best models to select; name of the
#' workflow or workflows to select.
#' @param .metric metric to get best model from ('mae', 'mape','mase','smape','rmse','rsq')
#' @param .minimize a boolean indicating whether to minimize (TRUE) or maximize (FALSE) the metric.
#'
#' @return a tibble containing the best model based on the selected metric.
#' @export
#'
#' @examples
#' library(dplyr)
#' library(earth)
#' data <- sknifedatar::data_avellaneda %>% mutate(date=as.Date(date)) %>% filter(date<'2012-06-01')
#'
#' recipe_date <- recipes::recipe(value ~ ., data = data) %>%
#' recipes::step_date(date, features = c('dow','doy','week','month','year'))
#'
#' mars <- parsnip::mars(mode = 'regression') %>%
#' parsnip::set_engine('earth')
#'
#' wfsets <- workflowsets::workflow_set(
#' preproc = list(
#' R_date = recipe_date),
#' models = list(M_mars = mars),
#' cross = TRUE)
#'
#' wffits <- sknifedatar::modeltime_wfs_fit(.wfsets = wfsets,
#' .split_prop = 0.8,
#' .serie=data)
#'
#' sknifedatar::modeltime_wfs_bestmodel(.wfs_results = wffits,
#' .metric='rsq',
#' .minimize = FALSE)
#'
modeltime_wfs_bestmodel <- function(.wfs_results, .model = NULL, .metric = "rmse", .minimize = TRUE){
# Rank models
rank_models <- sknifedatar::modeltime_wfs_rank(.wfs_results,
rank_metric = .metric,
minimize = .minimize)
#Select model
if(is.null(.model)){
best_model <- rank_models %>% head(1)
.model <- best_model$.model_id
}
#All models
if(.model == "all") .model <- nrow(rank_models)
#Select number top models
if(is.numeric(.model)){
if(.model > nrow(rank_models)) stop('The number of top models requested is higher than the number of models supplied')
best_model <- rank_models %>% head(.model)
.model <- best_model$.model_id
}
#Select top models with top sting
top_str_val <- tolower(.model)
top_str_val <- trimws(top_str_val)
top_str_val <- gsub("\\s+"," ",top_str_val)
top_str_val <- strsplit(top_str_val, " ") %>% unlist()
if(length(.model) == 1 & top_str_val[1] == "top") {
if(is.na(top_str_val[2])) stop('Enter a number that accompanies the word "top"')
if(is.na(top_str_val[2] %>% as.numeric())) stop('the word that accompanies the word "top" is not a number')
if(top_str_val[2] %>% as.numeric() > nrow(rank_models)) stop('The number of top models requested is higher than the number of models supplied')
best_model <- rank_models %>% head(top_str_val[2] %>% as.numeric())
.model <- best_model$.model_id
}
#Validation of models names
if(any(!.model %in% rank_models$.model_id)) stop('some of the model names passed in the ".model" argument do not match the model names in the supplied workflow set object')
#Select models def
rank_models %>%
dplyr::filter(.model_id %in% .model) %>%
dplyr::select(.model_id, rank, .model_desc, .fit_model)
}
| /R/modeltime_wfs_bestmodel.R | permissive | dedenistiawan/sknifedatar | R | false | false | 3,916 | r | #' Modeltime best workflow from a set of models
#'
#' @description get best workflows generated from the `modeltime_wfs_fit()` function output.
#'
#' @details the best model is selected based on a specific metric ('mae', 'mape','mase','smape','rmse','rsq').
#' The default is to minimize the metric. However, if the model is being selected based on rsq
#' minimize should be FALSE.
#'
#' @param .wfs_results a tibble generated from the `modeltime_wfs_fit()` function.
#' @param .model string or number, It can be supplied as follows: “top n,” “Top n” or “tOp n”, where n is the number
#' of best models to select; n, where n is the number of best models to select; name of the
#' workflow or workflows to select.
#' @param .metric metric to get best model from ('mae', 'mape','mase','smape','rmse','rsq')
#' @param .minimize a boolean indicating whether to minimize (TRUE) or maximize (FALSE) the metric.
#'
#' @return a tibble containing the best model based on the selected metric.
#' @export
#'
#' @examples
#' library(dplyr)
#' library(earth)
#' data <- sknifedatar::data_avellaneda %>% mutate(date=as.Date(date)) %>% filter(date<'2012-06-01')
#'
#' recipe_date <- recipes::recipe(value ~ ., data = data) %>%
#' recipes::step_date(date, features = c('dow','doy','week','month','year'))
#'
#' mars <- parsnip::mars(mode = 'regression') %>%
#' parsnip::set_engine('earth')
#'
#' wfsets <- workflowsets::workflow_set(
#' preproc = list(
#' R_date = recipe_date),
#' models = list(M_mars = mars),
#' cross = TRUE)
#'
#' wffits <- sknifedatar::modeltime_wfs_fit(.wfsets = wfsets,
#' .split_prop = 0.8,
#' .serie=data)
#'
#' sknifedatar::modeltime_wfs_bestmodel(.wfs_results = wffits,
#' .metric='rsq',
#' .minimize = FALSE)
#'
modeltime_wfs_bestmodel <- function(.wfs_results, .model = NULL, .metric = "rmse", .minimize = TRUE){
# Rank models
rank_models <- sknifedatar::modeltime_wfs_rank(.wfs_results,
rank_metric = .metric,
minimize = .minimize)
#Select model
if(is.null(.model)){
best_model <- rank_models %>% head(1)
.model <- best_model$.model_id
}
#All models
if(.model == "all") .model <- nrow(rank_models)
#Select number top models
if(is.numeric(.model)){
if(.model > nrow(rank_models)) stop('The number of top models requested is higher than the number of models supplied')
best_model <- rank_models %>% head(.model)
.model <- best_model$.model_id
}
#Select top models with top sting
top_str_val <- tolower(.model)
top_str_val <- trimws(top_str_val)
top_str_val <- gsub("\\s+"," ",top_str_val)
top_str_val <- strsplit(top_str_val, " ") %>% unlist()
if(length(.model) == 1 & top_str_val[1] == "top") {
if(is.na(top_str_val[2])) stop('Enter a number that accompanies the word "top"')
if(is.na(top_str_val[2] %>% as.numeric())) stop('the word that accompanies the word "top" is not a number')
if(top_str_val[2] %>% as.numeric() > nrow(rank_models)) stop('The number of top models requested is higher than the number of models supplied')
best_model <- rank_models %>% head(top_str_val[2] %>% as.numeric())
.model <- best_model$.model_id
}
#Validation of models names
if(any(!.model %in% rank_models$.model_id)) stop('some of the model names passed in the ".model" argument do not match the model names in the supplied workflow set object')
#Select models def
rank_models %>%
dplyr::filter(.model_id %in% .model) %>%
dplyr::select(.model_id, rank, .model_desc, .fit_model)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_type.R
\docType{methods}
\name{data_type-methods}
\alias{data_type-methods}
\alias{data_type,character-method}
\title{Extract Image data_type attribute}
\usage{
\S4method{data_type}{character}(object)
}
\arguments{
\item{object}{is a filename to pass to \link{fslval}}
}
\description{
data_type method for character types
}
| /man/data_type-methods.Rd | no_license | muschellij2/fslr | R | false | true | 407 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_type.R
\docType{methods}
\name{data_type-methods}
\alias{data_type-methods}
\alias{data_type,character-method}
\title{Extract Image data_type attribute}
\usage{
\S4method{data_type}{character}(object)
}
\arguments{
\item{object}{is a filename to pass to \link{fslval}}
}
\description{
data_type method for character types
}
|
##################################### HEADER ################################
# SCRIPTNAME: process.R
# DESCRIPTION: Processes brc data and comments from staging files, error/qc checks
# WRITTEN BY: Dan Crocker
# DATE OF LAST UPDATE:
##############################################################################.
### This function will process staged data and comments prior to submittal
### Processing includes:
# Checking for valid entries
# Adding "Not-Recorded" for blank/NULL/NA entries
# Processed data frames will display below the action buttons
# Any data errors will display and prevent data submit button
PROCESS1 <- function(){
### Read the staged data and comments from the csv
df_data <- read.table(stagedDataCSV, stringsAsFactors = FALSE, header = T, sep = " " , na.strings = "NA")
t_fields <- table_fields %>%
filter(row_number() <= 31)
# Which columns have numeric data?
num_cols <- t_fields$shiny_input[t_fields$col_type == "numeric"]
text_cols <- t_fields$shiny_input[t_fields$col_type %in% c("text","factor")]
text_cols <- text_cols[text_cols != "photos"]
### Convert empty numeric records to -999999
df_data <- df_data %>% mutate_at(num_cols, ~replace(., is.na(.), -999999))
df_data <- df_data %>% mutate_at(text_cols, ~str_replace(., "FALSE", "Not Recorded"))
### Convert all blanks, NA, and NULLs to "Not Recorded"
df_data[is.na(df_data)] <- "Not Recorded"
df_data[df_data == ""] <- "Not Recorded"
df_data[df_data == "NULL"] <- "Not Recorded"
### Perform any other checks on data here:
### Overwrite the csv with the updates:
write.table(x = df_data, file = stagedDataCSV,
row.names = FALSE, na = "", quote = TRUE,
qmethod = "d", append = FALSE)
### PROCESS COMMENTS ####
if (file.exists(stagedCommentsCSV)) {
df_comments <- read.table(stagedCommentsCSV, stringsAsFactors = FALSE, header = T, sep = " " , na.strings = "NA")
if (nrow(df_comments) > 0) {
### Do any manipulations needed here...
### Overwrite the csv with the updates:
write.table(x = df_comments, file = stagedCommentsCSV,
row.names = FALSE, na = "", quote = TRUE,
qmethod = "d", append = FALSE)
} else {
df_comments <<- NULL
}
} else {
df_comments <<- NULL
}
dfs <- list()
dfs$data <- df_data
dfs$comments <- df_comments
return(dfs)
}
| /funs/processSubmit.R | no_license | dancrocker/BRCWQDM | R | false | false | 2,324 | r | ##################################### HEADER ################################
# SCRIPTNAME: process.R
# DESCRIPTION: Processes brc data and comments from staging files, error/qc checks
# WRITTEN BY: Dan Crocker
# DATE OF LAST UPDATE:
##############################################################################.
### This function will process staged data and comments prior to submittal
### Processing includes:
# Checking for valid entries
# Adding "Not-Recorded" for blank/NULL/NA entries
# Processed data frames will display below the action buttons
# Any data errors will display and prevent data submit button
PROCESS1 <- function(){
### Read the staged data and comments from the csv
df_data <- read.table(stagedDataCSV, stringsAsFactors = FALSE, header = T, sep = " " , na.strings = "NA")
t_fields <- table_fields %>%
filter(row_number() <= 31)
# Which columns have numeric data?
num_cols <- t_fields$shiny_input[t_fields$col_type == "numeric"]
text_cols <- t_fields$shiny_input[t_fields$col_type %in% c("text","factor")]
text_cols <- text_cols[text_cols != "photos"]
### Convert empty numeric records to -999999
df_data <- df_data %>% mutate_at(num_cols, ~replace(., is.na(.), -999999))
df_data <- df_data %>% mutate_at(text_cols, ~str_replace(., "FALSE", "Not Recorded"))
### Convert all blanks, NA, and NULLs to "Not Recorded"
df_data[is.na(df_data)] <- "Not Recorded"
df_data[df_data == ""] <- "Not Recorded"
df_data[df_data == "NULL"] <- "Not Recorded"
### Perform any other checks on data here:
### Overwrite the csv with the updates:
write.table(x = df_data, file = stagedDataCSV,
row.names = FALSE, na = "", quote = TRUE,
qmethod = "d", append = FALSE)
### PROCESS COMMENTS ####
if (file.exists(stagedCommentsCSV)) {
df_comments <- read.table(stagedCommentsCSV, stringsAsFactors = FALSE, header = T, sep = " " , na.strings = "NA")
if (nrow(df_comments) > 0) {
### Do any manipulations needed here...
### Overwrite the csv with the updates:
write.table(x = df_comments, file = stagedCommentsCSV,
row.names = FALSE, na = "", quote = TRUE,
qmethod = "d", append = FALSE)
} else {
df_comments <<- NULL
}
} else {
df_comments <<- NULL
}
dfs <- list()
dfs$data <- df_data
dfs$comments <- df_comments
return(dfs)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/WriteBib.R
\name{WriteBib}
\alias{WriteBib}
\title{Create a BibTeX File from a BibEntry Object}
\usage{
WriteBib(bib, file = "references.bib", biblatex = TRUE, append = FALSE,
verbose = TRUE, ...)
}
\arguments{
\item{bib}{a BibEntry object to be written to file}
\item{file}{character string naming a file, should; end in \dQuote{.bib}}
\item{biblatex}{boolean; if \code{TRUE}, \code{\link{toBiblatex}} is used and no conversions of the BibEntry object
are done; if \code{FALSE} entries will be converted as described in \code{\link{toBibtex.BibEntry}}.}
\item{append}{as in \code{\link{write.bib}}}
\item{verbose}{as in \code{\link{write.bib}}}
\item{...}{additional arguments passed to \code{\link{writeLines}}}
}
\value{
\code{bib} - invisibly
}
\description{
Creates a Bibtex File from a BibEntry object for use with either BibTeX or BibLaTex.
}
\note{
To write the contents of \code{bib} \dQuote{as is}, the argument \code{biblatex} should be \code{TRUE}, otherwise
conversion is done as in \code{\link{toBibtex.BibEntry}}.
}
\examples{
bib <- ReadCrossRef(query = '10.1080/01621459.2012.699793')
## Write bib if no server error
if (length(bib)){
tfile <- tempfile(fileext = ".bib")
WriteBib(bib, tfile, biblatex = TRUE)
identical(ReadBib(tfile), bib)
unlink(tfile)
}
}
\author{
McLean, M. W. based on \code{\link{write.bib}} by Gaujoux, R. in package \code{bibtex}.
}
\seealso{
\code{\link{write.bib}}, \code{\link{ReadBib}}, \code{\link{toBibtex.BibEntry}},
\code{\link{toBiblatex}}, \code{\link{BibEntry}}
}
\keyword{IO}
| /man/WriteBib.Rd | no_license | huangrh/RefManageR | R | false | true | 1,623 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/WriteBib.R
\name{WriteBib}
\alias{WriteBib}
\title{Create a BibTeX File from a BibEntry Object}
\usage{
WriteBib(bib, file = "references.bib", biblatex = TRUE, append = FALSE,
verbose = TRUE, ...)
}
\arguments{
\item{bib}{a BibEntry object to be written to file}
\item{file}{character string naming a file, should; end in \dQuote{.bib}}
\item{biblatex}{boolean; if \code{TRUE}, \code{\link{toBiblatex}} is used and no conversions of the BibEntry object
are done; if \code{FALSE} entries will be converted as described in \code{\link{toBibtex.BibEntry}}.}
\item{append}{as in \code{\link{write.bib}}}
\item{verbose}{as in \code{\link{write.bib}}}
\item{...}{additional arguments passed to \code{\link{writeLines}}}
}
\value{
\code{bib} - invisibly
}
\description{
Creates a Bibtex File from a BibEntry object for use with either BibTeX or BibLaTex.
}
\note{
To write the contents of \code{bib} \dQuote{as is}, the argument \code{biblatex} should be \code{TRUE}, otherwise
conversion is done as in \code{\link{toBibtex.BibEntry}}.
}
\examples{
bib <- ReadCrossRef(query = '10.1080/01621459.2012.699793')
## Write bib if no server error
if (length(bib)){
tfile <- tempfile(fileext = ".bib")
WriteBib(bib, tfile, biblatex = TRUE)
identical(ReadBib(tfile), bib)
unlink(tfile)
}
}
\author{
McLean, M. W. based on \code{\link{write.bib}} by Gaujoux, R. in package \code{bibtex}.
}
\seealso{
\code{\link{write.bib}}, \code{\link{ReadBib}}, \code{\link{toBibtex.BibEntry}},
\code{\link{toBiblatex}}, \code{\link{BibEntry}}
}
\keyword{IO}
|
# convert genotype probabilities from Srivastava et al (2017) as R/qtl2 probs object + map
#
# supplemental data for Srivastava et al. (2017) Genomes of the Mouse
# Collaborative Cross. Genetics 206:537-556, doi:10.1534/genetics.116.198838
# available at Zenodo, doi:10.5281/zenodo.377036
library(data.table)
library(qtl2)
library(qtl2convert)
prob_dir <- "../RawData/Prob36"
files <- list.files(prob_dir, pattern=".csv$")
strains <- sub("b38V01.csv$", "", files)
strains <- sub("-", "/", strains, fixed=TRUE)
message("Reading probabilities")
v <- lapply(files, function(file) data.table::fread(file.path(prob_dir, file),
data.table=FALSE))
# grab map
map <- v[[1]][,1:3]
map[,3] <- map[,3]/1e6
map <- map[map$chromosome %in% c(1:19,"X"),]
pmap <- map_df_to_list(map, chr_column="chromosome", pos_column="position(B38)")
probs <- vector("list", 20)
names(probs) <- c(1:19,"X")
message("Reorganizing probabilities")
for(chr in names(probs)) {
probs[[chr]] <- array(dim=c(length(v), ncol(v[[1]])-3, length(pmap[[chr]])))
dimnames(probs[[chr]]) <- list(strains, colnames(v[[1]])[-(1:3)], names(pmap[[chr]]))
for(i in seq_along(v)) {
probs[[chr]][i,,] <- t(v[[i]][v[[i]][,2]==chr, -(1:3)])
}
}
# drop all but the first 8 genotypes
# force to sum to 1 with no missing values
message("Reducing to 8 states")
probs8 <- probs
for(chr in names(probs)) {
probs8[[chr]] <- probs[[chr]][, 1:8, ] + 1e-8
for(i in 1:nrow(probs[[chr]]))
probs8[[chr]][i,,] <- t(t(probs8[[chr]][i,,]) / colSums(probs8[[chr]][i,,]))
}
attr(probs8, "crosstype") <- "risib8"
attr(probs8, "is_x_chr") <- setNames(rep(c(FALSE,TRUE), c(19,1)), c(1:19,"X"))
attr(probs8, "alleles") <- LETTERS[1:8]
attr(probs8, "alleleprobs") <- FALSE
class(probs8) <- c("calc_genoprob", "list")
message("Saving to files")
saveRDS(probs8, "cc_rawprobs.rds")
saveRDS(pmap, "cc_rawprobs_pmap.rds")
| /CC/R/convert_cc_probs.R | no_license | kbroman/qtl2data | R | false | false | 1,940 | r | # convert genotype probabilities from Srivastava et al (2017) as R/qtl2 probs object + map
#
# supplemental data for Srivastava et al. (2017) Genomes of the Mouse
# Collaborative Cross. Genetics 206:537-556, doi:10.1534/genetics.116.198838
# available at Zenodo, doi:10.5281/zenodo.377036
library(data.table)
library(qtl2)
library(qtl2convert)
prob_dir <- "../RawData/Prob36"
files <- list.files(prob_dir, pattern=".csv$")
strains <- sub("b38V01.csv$", "", files)
strains <- sub("-", "/", strains, fixed=TRUE)
message("Reading probabilities")
v <- lapply(files, function(file) data.table::fread(file.path(prob_dir, file),
data.table=FALSE))
# grab map
map <- v[[1]][,1:3]
map[,3] <- map[,3]/1e6
map <- map[map$chromosome %in% c(1:19,"X"),]
pmap <- map_df_to_list(map, chr_column="chromosome", pos_column="position(B38)")
probs <- vector("list", 20)
names(probs) <- c(1:19,"X")
message("Reorganizing probabilities")
for(chr in names(probs)) {
probs[[chr]] <- array(dim=c(length(v), ncol(v[[1]])-3, length(pmap[[chr]])))
dimnames(probs[[chr]]) <- list(strains, colnames(v[[1]])[-(1:3)], names(pmap[[chr]]))
for(i in seq_along(v)) {
probs[[chr]][i,,] <- t(v[[i]][v[[i]][,2]==chr, -(1:3)])
}
}
# drop all but the first 8 genotypes
# force to sum to 1 with no missing values
message("Reducing to 8 states")
probs8 <- probs
for(chr in names(probs)) {
probs8[[chr]] <- probs[[chr]][, 1:8, ] + 1e-8
for(i in 1:nrow(probs[[chr]]))
probs8[[chr]][i,,] <- t(t(probs8[[chr]][i,,]) / colSums(probs8[[chr]][i,,]))
}
attr(probs8, "crosstype") <- "risib8"
attr(probs8, "is_x_chr") <- setNames(rep(c(FALSE,TRUE), c(19,1)), c(1:19,"X"))
attr(probs8, "alleles") <- LETTERS[1:8]
attr(probs8, "alleleprobs") <- FALSE
class(probs8) <- c("calc_genoprob", "list")
message("Saving to files")
saveRDS(probs8, "cc_rawprobs.rds")
saveRDS(pmap, "cc_rawprobs_pmap.rds")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/descriptionStats.R
\name{describeProp}
\alias{describeProp}
\title{A function that returns a description proportion that contains
the number and the percentage}
\usage{
describeProp(x, html = TRUE, digits = 1, number_first = TRUE,
useNA = c("ifany", "no", "always"), useNA.digits = digits, default_ref,
percentage_sign = TRUE, language = "en", ...)
}
\arguments{
\item{x}{The variable that you want the statistics for}
\item{html}{If HTML compatible output should be used. If \code{FALSE}
it outputs LaTeX formatting}
\item{digits}{The number of decimals used}
\item{number_first}{If the number should be given or if the percentage
should be presented first. The second is encapsulated in parentheses ().
This is only used together with the useNA variable.}
\item{useNA}{This indicates if missing should be added as a separate
row below all other. See \code{\link[base]{table}} for \code{useNA}-options.
\emph{Note:} defaults to ifany and not "no" as \code{\link[base]{table}} does.}
\item{useNA.digits}{The number of digits to use for the
missing percentage, defaults to the overall \code{digits}.}
\item{default_ref}{The default reference, either first,
the level name or a number within the levels. If left out
it defaults to the first value.}
\item{percentage_sign}{If you want to suppress the percentage sign you
can set this variable to FALSE. You can also choose something else that
the default \% if you so wish by setting this variable. Note, this is
only used when combined with the missing information.}
\item{language}{The ISO-639-1 two-letter code for the language of
interest. Currently only english is distinguished from the ISO
format using a ',' as the separator in the \code{\link{txtInt}}
function.}
\item{...}{Passed on to \code{\link{describeFactors}}}
}
\value{
\code{string} A string formatted for either LaTeX or HTML
}
\description{
A function that returns a description proportion that contains
the number and the percentage
}
\examples{
describeProp(factor(sample(50, x=c("A","B", NA), replace=TRUE)))
}
\seealso{
Other descriptive functions: \code{\link{describeFactors}},
\code{\link{describeMean}}, \code{\link{describeMedian}},
\code{\link{getDescriptionStatsBy}},
\code{\link{getPvalWilcox}}
}
| /man/describeProp.Rd | no_license | lemna/Gmisc | R | false | true | 2,324 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/descriptionStats.R
\name{describeProp}
\alias{describeProp}
\title{A function that returns a description proportion that contains
the number and the percentage}
\usage{
describeProp(x, html = TRUE, digits = 1, number_first = TRUE,
useNA = c("ifany", "no", "always"), useNA.digits = digits, default_ref,
percentage_sign = TRUE, language = "en", ...)
}
\arguments{
\item{x}{The variable that you want the statistics for}
\item{html}{If HTML compatible output should be used. If \code{FALSE}
it outputs LaTeX formatting}
\item{digits}{The number of decimals used}
\item{number_first}{If the number should be given or if the percentage
should be presented first. The second is encapsulated in parentheses ().
This is only used together with the useNA variable.}
\item{useNA}{This indicates if missing should be added as a separate
row below all other. See \code{\link[base]{table}} for \code{useNA}-options.
\emph{Note:} defaults to ifany and not "no" as \code{\link[base]{table}} does.}
\item{useNA.digits}{The number of digits to use for the
missing percentage, defaults to the overall \code{digits}.}
\item{default_ref}{The default reference, either first,
the level name or a number within the levels. If left out
it defaults to the first value.}
\item{percentage_sign}{If you want to suppress the percentage sign you
can set this variable to FALSE. You can also choose something else that
the default \% if you so wish by setting this variable. Note, this is
only used when combined with the missing information.}
\item{language}{The ISO-639-1 two-letter code for the language of
interest. Currently only english is distinguished from the ISO
format using a ',' as the separator in the \code{\link{txtInt}}
function.}
\item{...}{Passed on to \code{\link{describeFactors}}}
}
\value{
\code{string} A string formatted for either LaTeX or HTML
}
\description{
A function that returns a description proportion that contains
the number and the percentage
}
\examples{
describeProp(factor(sample(50, x=c("A","B", NA), replace=TRUE)))
}
\seealso{
Other descriptive functions: \code{\link{describeFactors}},
\code{\link{describeMean}}, \code{\link{describeMedian}},
\code{\link{getDescriptionStatsBy}},
\code{\link{getPvalWilcox}}
}
|
# The Broad Institute
# SOFTWARE COPYRIGHT NOTICE AGREEMENT
# This software and its documentation are copyright 2003 by the
# Broad Institute/Massachusetts Institute of Technology.
# All rights are reserved.
#
# This software is supplied without any warranty or guaranteed support
# whatsoever. Neither the Broad Institute nor MIT can be responsible for
# its use, misuse, or functionality.
# G S E A -- Gene Set Enrichment Analysis
# Auxiliary functions and definitions
GSEA.GeneRanking <- function(A, class.labels, gene.labels, nperm, permutation.type = 0, sigma.correction = "GeneCluster", fraction=1.0, replace=F, reverse.sign= F) {
# This function ranks the genes according to the signal to noise ratio for the actual phenotype and also random permutations and bootstrap
# subsamples of both the observed and random phenotypes. It uses matrix operations to implement the signal to noise calculation
# in stages and achieves fast execution speed. It supports two types of permutations: random (unbalanced) and balanced.
# It also supports subsampling and bootstrap by using masking and multiple-count variables. When "fraction" is set to 1 (default)
# the there is no subsampling or boostrapping and the matrix of observed signal to noise ratios will have the same value for
# all permutations. This is wasteful but allows to support all the multiple options with the same code. Notice that the second
# matrix for the null distribution will still have the values for the random permutations
# (null distribution). This mode (fraction = 1.0) is the defaults, the recommended one and the one used in the examples.
# It is also the one that has be tested more thoroughly. The resampling and boostrapping options are intersting to obtain
# smooth estimates of the observed distribution but its is left for the expert user who may want to perform some sanity
# checks before trusting the code.
#
# Inputs:
# A: Matrix of gene expression values (rows are genes, columns are samples)
# class.labels: Phenotype of class disticntion of interest. A vector of binary labels having first the 1's and then the 0's
# gene.labels: gene labels. Vector of probe ids or accession numbers for the rows of the expression matrix
# nperm: Number of random permutations/bootstraps to perform
# permutation.type: Permutation type: 0 = unbalanced, 1 = balanced. For experts only (default: 0)
# sigma.correction: Correction to the signal to noise ratio (Default = GeneCluster, a choice to support the way it was handled in a previous package)
# fraction: Subsampling fraction. Set to 1.0 (no resampling). For experts only (default: 1.0)
# replace: Resampling mode (replacement or not replacement). For experts only (default: F)
# reverse.sign: Reverse direction of gene list (default = F)
#
# Outputs:
# s2n.matrix: Matrix with random permuted or bootstraps signal to noise ratios (rows are genes, columns are permutations or bootstrap subsamplings
# obs.s2n.matrix: Matrix with observed signal to noise ratios (rows are genes, columns are boostraps subsamplings. If fraction is set to 1.0 then all the columns have the same values
# order.matrix: Matrix with the orderings that will sort the columns of the obs.s2n.matrix in decreasing s2n order
# obs.order.matrix: Matrix with the orderings that will sort the columns of the s2n.matrix in decreasing s2n order
#
# The Broad Institute
# SOFTWARE COPYRIGHT NOTICE AGREEMENT
# This software and its documentation are copyright 2003 by the
# Broad Institute/Massachusetts Institute of Technology.
# All rights are reserved.
#
# This software is supplied without any warranty or guaranteed support
# whatsoever. Neither the Broad Institute nor MIT can be responsible for
# its use, misuse, or functionality.
A <- A + 0.00000001
N <- length(A[,1])
Ns <- length(A[1,])
subset.mask <- matrix(0, nrow=Ns, ncol=nperm)
reshuffled.class.labels1 <- matrix(0, nrow=Ns, ncol=nperm)
reshuffled.class.labels2 <- matrix(0, nrow=Ns, ncol=nperm)
class.labels1 <- matrix(0, nrow=Ns, ncol=nperm)
class.labels2 <- matrix(0, nrow=Ns, ncol=nperm)
order.matrix <- matrix(0, nrow = N, ncol = nperm)
obs.order.matrix <- matrix(0, nrow = N, ncol = nperm)
s2n.matrix <- matrix(0, nrow = N, ncol = nperm)
obs.s2n.matrix <- matrix(0, nrow = N, ncol = nperm)
obs.gene.labels <- vector(length = N, mode="character")
obs.gene.descs <- vector(length = N, mode="character")
obs.gene.symbols <- vector(length = N, mode="character")
M1 <- matrix(0, nrow = N, ncol = nperm)
M2 <- matrix(0, nrow = N, ncol = nperm)
S1 <- matrix(0, nrow = N, ncol = nperm)
S2 <- matrix(0, nrow = N, ncol = nperm)
gc()
C <- split(class.labels, class.labels)
class1.size <- length(C[[1]])
class2.size <- length(C[[2]])
class1.index <- seq(1, class1.size, 1)
class2.index <- seq(class1.size + 1, class1.size + class2.size, 1)
for (r in 1:nperm) {
class1.subset <- sample(class1.index, size = ceiling(class1.size*fraction), replace = replace)
class2.subset <- sample(class2.index, size = ceiling(class2.size*fraction), replace = replace)
class1.subset.size <- length(class1.subset)
class2.subset.size <- length(class2.subset)
subset.class1 <- rep(0, class1.size)
for (i in 1:class1.size) {
if (is.element(class1.index[i], class1.subset)) {
subset.class1[i] <- 1
}
}
subset.class2 <- rep(0, class2.size)
for (i in 1:class2.size) {
if (is.element(class2.index[i], class2.subset)) {
subset.class2[i] <- 1
}
}
subset.mask[, r] <- as.numeric(c(subset.class1, subset.class2))
fraction.class1 <- class1.size/Ns
fraction.class2 <- class2.size/Ns
if (permutation.type == 0) { # random (unbalanced) permutation
full.subset <- c(class1.subset, class2.subset)
label1.subset <- sample(full.subset, size = Ns * fraction.class1)
reshuffled.class.labels1[, r] <- rep(0, Ns)
reshuffled.class.labels2[, r] <- rep(0, Ns)
class.labels1[, r] <- rep(0, Ns)
class.labels2[, r] <- rep(0, Ns)
for (i in 1:Ns) {
m1 <- sum(!is.na(match(label1.subset, i)))
m2 <- sum(!is.na(match(full.subset, i)))
reshuffled.class.labels1[i, r] <- m1
reshuffled.class.labels2[i, r] <- m2 - m1
if (i <= class1.size) {
class.labels1[i, r] <- m2
class.labels2[i, r] <- 0
} else {
class.labels1[i, r] <- 0
class.labels2[i, r] <- m2
}
}
} else if (permutation.type == 1) { # proportional (balanced) permutation
class1.label1.subset <- sample(class1.subset, size = ceiling(class1.subset.size*fraction.class1))
class2.label1.subset <- sample(class2.subset, size = floor(class2.subset.size*fraction.class1))
reshuffled.class.labels1[, r] <- rep(0, Ns)
reshuffled.class.labels2[, r] <- rep(0, Ns)
class.labels1[, r] <- rep(0, Ns)
class.labels2[, r] <- rep(0, Ns)
for (i in 1:Ns) {
if (i <= class1.size) {
m1 <- sum(!is.na(match(class1.label1.subset, i)))
m2 <- sum(!is.na(match(class1.subset, i)))
reshuffled.class.labels1[i, r] <- m1
reshuffled.class.labels2[i, r] <- m2 - m1
class.labels1[i, r] <- m2
class.labels2[i, r] <- 0
} else {
m1 <- sum(!is.na(match(class2.label1.subset, i)))
m2 <- sum(!is.na(match(class2.subset, i)))
reshuffled.class.labels1[i, r] <- m1
reshuffled.class.labels2[i, r] <- m2 - m1
class.labels1[i, r] <- 0
class.labels2[i, r] <- m2
}
}
}
}
# compute S2N for the random permutation matrix
P <- reshuffled.class.labels1 * subset.mask
n1 <- sum(P[,1])
M1 <- A %*% P
M1 <- M1/n1
gc()
A2 <- A*A
S1 <- A2 %*% P
S1 <- S1/n1 - M1*M1
S1 <- sqrt(abs((n1/(n1-1)) * S1))
gc()
P <- reshuffled.class.labels2 * subset.mask
n2 <- sum(P[,1])
M2 <- A %*% P
M2 <- M2/n2
gc()
A2 <- A*A
S2 <- A2 %*% P
S2 <- S2/n2 - M2*M2
S2 <- sqrt(abs((n2/(n2-1)) * S2))
rm(P)
rm(A2)
gc()
if (sigma.correction == "GeneCluster") { # small sigma "fix" as used in GeneCluster
S2 <- ifelse(0.2*abs(M2) < S2, S2, 0.2*abs(M2))
S2 <- ifelse(S2 == 0, 0.2, S2)
S1 <- ifelse(0.2*abs(M1) < S1, S1, 0.2*abs(M1))
S1 <- ifelse(S1 == 0, 0.2, S1)
gc()
}
M1 <- M1 - M2
rm(M2)
gc()
S1 <- S1 + S2
rm(S2)
gc()
s2n.matrix <- M1/S1
if (reverse.sign == T) {
s2n.matrix <- - s2n.matrix
}
gc()
for (r in 1:nperm) {
order.matrix[, r] <- order(s2n.matrix[, r], decreasing=T)
}
# compute S2N for the "observed" permutation matrix
P <- class.labels1 * subset.mask
n1 <- sum(P[,1])
M1 <- A %*% P
M1 <- M1/n1
gc()
A2 <- A*A
S1 <- A2 %*% P
S1 <- S1/n1 - M1*M1
S1 <- sqrt(abs((n1/(n1-1)) * S1))
gc()
P <- class.labels2 * subset.mask
n2 <- sum(P[,1])
M2 <- A %*% P
M2 <- M2/n2
gc()
A2 <- A*A
S2 <- A2 %*% P
S2 <- S2/n2 - M2*M2
S2 <- sqrt(abs((n2/(n2-1)) * S2))
rm(P)
rm(A2)
gc()
if (sigma.correction == "GeneCluster") { # small sigma "fix" as used in GeneCluster
S2 <- ifelse(0.2*abs(M2) < S2, S2, 0.2*abs(M2))
S2 <- ifelse(S2 == 0, 0.2, S2)
S1 <- ifelse(0.2*abs(M1) < S1, S1, 0.2*abs(M1))
S1 <- ifelse(S1 == 0, 0.2, S1)
gc()
}
M1 <- M1 - M2
rm(M2)
gc()
S1 <- S1 + S2
rm(S2)
gc()
obs.s2n.matrix <- M1/S1
gc()
if (reverse.sign == T) {
obs.s2n.matrix <- - obs.s2n.matrix
}
for (r in 1:nperm) {
obs.order.matrix[,r] <- order(obs.s2n.matrix[,r], decreasing=T)
}
return(list(s2n.matrix = s2n.matrix,
obs.s2n.matrix = obs.s2n.matrix,
order.matrix = order.matrix,
obs.order.matrix = obs.order.matrix))
}
GSEA.EnrichmentScore <- function(gene.list, gene.set, weighted.score.type = 1, correl.vector = NULL) {
#
# Computes the weighted GSEA score of gene.set in gene.list.
# The weighted score type is the exponent of the correlation
# weight: 0 (unweighted = Kolmogorov-Smirnov), 1 (weighted), and 2 (over-weighted). When the score type is 1 or 2 it is
# necessary to input the correlation vector with the values in the same order as in the gene list.
#
# Inputs:
# gene.list: The ordered gene list (e.g. integers indicating the original position in the input dataset)
# gene.set: A gene set (e.g. integers indicating the location of those genes in the input dataset)
# weighted.score.type: Type of score: weight: 0 (unweighted = Kolmogorov-Smirnov), 1 (weighted), and 2 (over-weighted)
# correl.vector: A vector with the coorelations (e.g. signal to noise scores) corresponding to the genes in the gene list
#
# Outputs:
# ES: Enrichment score (real number between -1 and +1)
# arg.ES: Location in gene.list where the peak running enrichment occurs (peak of the "mountain")
# RES: Numerical vector containing the running enrichment score for all locations in the gene list
# tag.indicator: Binary vector indicating the location of the gene sets (1's) in the gene list
#
# The Broad Institute
# SOFTWARE COPYRIGHT NOTICE AGREEMENT
# This software and its documentation are copyright 2003 by the
# Broad Institute/Massachusetts Institute of Technology.
# All rights are reserved.
#
# This software is supplied without any warranty or guaranteed support
# whatsoever. Neither the Broad Institute nor MIT can be responsible for
# its use, misuse, or functionality.
tag.indicator <- sign(match(gene.list, gene.set, nomatch=0)) # notice that the sign is 0 (no tag) or 1 (tag)
no.tag.indicator <- 1 - tag.indicator
N <- length(gene.list)
Nh <- length(gene.set)
Nm <- N - Nh
if (weighted.score.type == 0) {
correl.vector <- rep(1, N)
}
alpha <- weighted.score.type
correl.vector <- abs(correl.vector**alpha)
sum.correl.tag <- sum(correl.vector[tag.indicator == 1])
norm.tag <- 1.0/sum.correl.tag
norm.no.tag <- 1.0/Nm
RES <- cumsum(tag.indicator * correl.vector * norm.tag - no.tag.indicator * norm.no.tag)
max.ES <- max(RES)
min.ES <- min(RES)
if (max.ES > - min.ES) {
# ES <- max.ES
ES <- signif(max.ES, digits = 5)
arg.ES <- which.max(RES)
} else {
# ES <- min.ES
ES <- signif(min.ES, digits=5)
arg.ES <- which.min(RES)
}
return(list(ES = ES, arg.ES = arg.ES, RES = RES, indicator = tag.indicator))
}
OLD.GSEA.EnrichmentScore <- function(gene.list, gene.set) {
#
# Computes the original GSEA score from Mootha et al 2003 of gene.set in gene.list
#
# Inputs:
# gene.list: The ordered gene list (e.g. integers indicating the original position in the input dataset)
# gene.set: A gene set (e.g. integers indicating the location of those genes in the input dataset)
#
# Outputs:
# ES: Enrichment score (real number between -1 and +1)
# arg.ES: Location in gene.list where the peak running enrichment occurs (peak of the "mountain")
# RES: Numerical vector containing the running enrichment score for all locations in the gene list
# tag.indicator: Binary vector indicating the location of the gene sets (1's) in the gene list
#
# The Broad Institute
# SOFTWARE COPYRIGHT NOTICE AGREEMENT
# This software and its documentation are copyright 2003 by the
# Broad Institute/Massachusetts Institute of Technology.
# All rights are reserved.
#
# This software is supplied without any warranty or guaranteed support
# whatsoever. Neither the Broad Institute nor MIT can be responsible for
# its use, misuse, or functionality.
tag.indicator <- sign(match(gene.list, gene.set, nomatch=0)) # notice that the sign is 0 (no tag) or 1 (tag)
no.tag.indicator <- 1 - tag.indicator
N <- length(gene.list)
Nh <- length(gene.set)
Nm <- N - Nh
norm.tag <- sqrt((N - Nh)/Nh)
norm.no.tag <- sqrt(Nh/(N - Nh))
RES <- cumsum(tag.indicator * norm.tag - no.tag.indicator * norm.no.tag)
max.ES <- max(RES)
min.ES <- min(RES)
if (max.ES > - min.ES) {
ES <- signif(max.ES, digits=5)
arg.ES <- which.max(RES)
} else {
ES <- signif(min.ES, digits=5)
arg.ES <- which.min(RES)
}
return(list(ES = ES, arg.ES = arg.ES, RES = RES, indicator = tag.indicator))
}
GSEA.EnrichmentScore2 <- function(gene.list, gene.set, weighted.score.type = 1, correl.vector = NULL) {
#
# Computes the weighted GSEA score of gene.set in gene.list. It is the same calculation as in
# GSEA.EnrichmentScore but faster (x8) without producing the RES, arg.RES and tag.indicator outputs.
# This call is intended to be used to asses the enrichment of random permutations rather than the
# observed one.
# The weighted score type is the exponent of the correlation
# weight: 0 (unweighted = Kolmogorov-Smirnov), 1 (weighted), and 2 (over-weighted). When the score type is 1 or 2 it is
# necessary to input the correlation vector with the values in the same order as in the gene list.
#
# Inputs:
# gene.list: The ordered gene list (e.g. integers indicating the original position in the input dataset)
# gene.set: A gene set (e.g. integers indicating the location of those genes in the input dataset)
# weighted.score.type: Type of score: weight: 0 (unweighted = Kolmogorov-Smirnov), 1 (weighted), and 2 (over-weighted)
# correl.vector: A vector with the coorelations (e.g. signal to noise scores) corresponding to the genes in the gene list
#
# Outputs:
# ES: Enrichment score (real number between -1 and +1)
#
# The Broad Institute
# SOFTWARE COPYRIGHT NOTICE AGREEMENT
# This software and its documentation are copyright 2003 by the
# Broad Institute/Massachusetts Institute of Technology.
# All rights are reserved.
#
# This software is supplied without any warranty or guaranteed support
# whatsoever. Neither the Broad Institute nor MIT can be responsible for
# its use, misuse, or functionality.
N <- length(gene.list)
Nh <- length(gene.set)
Nm <- N - Nh
loc.vector <- vector(length=N, mode="numeric")
peak.res.vector <- vector(length=Nh, mode="numeric")
valley.res.vector <- vector(length=Nh, mode="numeric")
tag.correl.vector <- vector(length=Nh, mode="numeric")
tag.diff.vector <- vector(length=Nh, mode="numeric")
tag.loc.vector <- vector(length=Nh, mode="numeric")
loc.vector[gene.list] <- seq(1, N)
tag.loc.vector <- loc.vector[gene.set]
tag.loc.vector <- sort(tag.loc.vector, decreasing = F)
if (weighted.score.type == 0) {
tag.correl.vector <- rep(1, Nh)
} else if (weighted.score.type == 1) {
tag.correl.vector <- correl.vector[tag.loc.vector]
tag.correl.vector <- abs(tag.correl.vector)
} else if (weighted.score.type == 2) {
tag.correl.vector <- correl.vector[tag.loc.vector]*correl.vector[tag.loc.vector]
tag.correl.vector <- abs(tag.correl.vector)
} else {
tag.correl.vector <- correl.vector[tag.loc.vector]**weighted.score.type
tag.correl.vector <- abs(tag.correl.vector)
}
norm.tag <- 1.0/sum(tag.correl.vector)
tag.correl.vector <- tag.correl.vector * norm.tag
norm.no.tag <- 1.0/Nm
tag.diff.vector[1] <- (tag.loc.vector[1] - 1)
tag.diff.vector[2:Nh] <- tag.loc.vector[2:Nh] - tag.loc.vector[1:(Nh - 1)] - 1
tag.diff.vector <- tag.diff.vector * norm.no.tag
peak.res.vector <- cumsum(tag.correl.vector - tag.diff.vector)
valley.res.vector <- peak.res.vector - tag.correl.vector
max.ES <- max(peak.res.vector)
min.ES <- min(valley.res.vector)
ES <- signif(ifelse(max.ES > - min.ES, max.ES, min.ES), digits=5)
return(list(ES = ES))
}
GSEA.HeatMapPlot <- function(V, row.names = F, col.labels, col.classes, col.names = F, main = " ", xlab=" ", ylab=" ") {
#
# Plots a heatmap "pinkogram" of a gene expression matrix including phenotype vector and gene, sample and phenotype labels
#
# The Broad Institute
# SOFTWARE COPYRIGHT NOTICE AGREEMENT
# This software and its documentation are copyright 2003 by the
# Broad Institute/Massachusetts Institute of Technology.
# All rights are reserved.
#
# This software is supplied without any warranty or guaranteed support
# whatsoever. Neither the Broad Institute nor MIT can be responsible for
# its use, misuse, or functionality.
n.rows <- length(V[,1])
n.cols <- length(V[1,])
row.mean <- apply(V, MARGIN=1, FUN=mean)
row.sd <- apply(V, MARGIN=1, FUN=sd)
row.n <- length(V[,1])
for (i in 1:n.rows) {
if (row.sd[i] == 0) {
V[i,] <- 0
} else {
V[i,] <- (V[i,] - row.mean[i])/(0.5 * row.sd[i])
}
V[i,] <- ifelse(V[i,] < -6, -6, V[i,])
V[i,] <- ifelse(V[i,] > 6, 6, V[i,])
}
mycol <- c("#0000FF", "#0000FF", "#4040FF", "#7070FF", "#8888FF", "#A9A9FF", "#D5D5FF", "#EEE5EE", "#FFAADA", "#FF9DB0", "#FF7080", "#FF5A5A", "#FF4040", "#FF0D1D", "#FF0000") # blue-pinkogram colors. The first and last are the colors to indicate the class vector (phenotype). This is the 1998-vintage, pre-gene cluster, original pinkogram color map
mid.range.V <- mean(range(V)) - 0.1
heatm <- matrix(0, nrow = n.rows + 1, ncol = n.cols)
heatm[1:n.rows,] <- V[seq(n.rows, 1, -1),]
heatm[n.rows + 1,] <- ifelse(col.labels == 0, 7, -7)
image(1:n.cols, 1:(n.rows + 1), t(heatm), col=mycol, axes=FALSE, main=main, xlab= xlab, ylab=ylab)
if (length(row.names) > 1) {
numC <- nchar(row.names)
size.row.char <- 35/(n.rows + 5)
size.col.char <- 25/(n.cols + 5)
maxl <- floor(n.rows/1.6)
for (i in 1:n.rows) {
row.names[i] <- substr(row.names[i], 1, maxl)
}
row.names <- c(row.names[seq(n.rows, 1, -1)], "Class")
axis(2, at=1:(n.rows + 1), labels=row.names, adj= 0.5, tick=FALSE, las = 1, cex.axis=size.row.char, font.axis=2, line=-1)
}
if (length(col.names) > 1) {
axis(1, at=1:n.cols, labels=col.names, tick=FALSE, las = 3, cex.axis=size.col.char, font.axis=2, line=-1)
}
C <- split(col.labels, col.labels)
class1.size <- length(C[[1]])
class2.size <- length(C[[2]])
axis(3, at=c(floor(class1.size/2),class1.size + floor(class2.size/2)), labels=col.classes, tick=FALSE, las = 1, cex.axis=1.25, font.axis=2, line=-1)
return()
}
GSEA.Res2Frame <- function(filename = "NULL") {
#
# Reads a gene expression dataset in RES format and converts it into an R data frame
#
# The Broad Institute
# SOFTWARE COPYRIGHT NOTICE AGREEMENT
# This software and its documentation are copyright 2003 by the
# Broad Institute/Massachusetts Institute of Technology.
# All rights are reserved.
#
# This software is supplied without any warranty or guaranteed support
# whatsoever. Neither the Broad Institute nor MIT can be responsible for
# its use, misuse, or functionality.
header.cont <- readLines(filename, n = 1)
temp <- unlist(strsplit(header.cont, "\t"))
colst <- length(temp)
header.labels <- temp[seq(3, colst, 2)]
ds <- read.delim(filename, header=F, row.names = 2, sep="\t", skip=3, blank.lines.skip=T, comment.char="", as.is=T)
colst <- length(ds[1,])
cols <- (colst - 1)/2
rows <- length(ds[,1])
A <- matrix(nrow=rows - 1, ncol=cols)
A <- ds[1:rows, seq(2, colst, 2)]
table1 <- data.frame(A)
names(table1) <- header.labels
return(table1)
}
GSEA.Gct2Frame <- function(filename = "NULL") {
#
# Reads a gene expression dataset in GCT format and converts it into an R data frame
#
# The Broad Institute
# SOFTWARE COPYRIGHT NOTICE AGREEMENT
# This software and its documentation are copyright 2003 by the
# Broad Institute/Massachusetts Institute of Technology.
# All rights are reserved.
#
# This software is supplied without any warranty or guaranteed support
# whatsoever. Neither the Broad Institute nor MIT can be responsible for
# its use, misuse, or functionality.
ds <- read.delim(filename, header=T, sep="\t", skip=2, row.names=1, blank.lines.skip=T, comment.char="", as.is=T)
ds <- ds[-1]
return(ds)
}
GSEA.Gct2Frame2 <- function(filename = "NULL") {
#
# Reads a gene expression dataset in GCT format and converts it into an R data frame
#
# The Broad Institute
# SOFTWARE COPYRIGHT NOTICE AGREEMENT
# This software and its documentation are copyright 2003 by the
# Broad Institute/Massachusetts Institute of Technology.
# All rights are reserved.
#
# This software is supplied without any warranty or guaranteed support
# whatsoever. Neither the Broad Institute nor MIT can be responsible for
# its use, misuse, or functionality.
content <- readLines(filename)
content <- content[-1]
content <- content[-1]
col.names <- noquote(unlist(strsplit(content[1], "\t")))
col.names <- col.names[c(-1, -2)]
num.cols <- length(col.names)
content <- content[-1]
num.lines <- length(content)
row.nam <- vector(length=num.lines, mode="character")
row.des <- vector(length=num.lines, mode="character")
m <- matrix(0, nrow=num.lines, ncol=num.cols)
for (i in 1:num.lines) {
line.list <- noquote(unlist(strsplit(content[i], "\t")))
row.nam[i] <- noquote(line.list[1])
row.des[i] <- noquote(line.list[2])
line.list <- line.list[c(-1, -2)]
for (j in 1:length(line.list)) {
m[i, j] <- as.numeric(line.list[j])
}
}
ds <- data.frame(m)
names(ds) <- col.names
row.names(ds) <- row.nam
return(ds)
}
GSEA.ReadClsFile <- function(file = "NULL") {
#
# Reads a class vector CLS file and defines phenotype and class labels vectors for the samples in a gene expression file (RES or GCT format)
#
# The Broad Institute
# SOFTWARE COPYRIGHT NOTICE AGREEMENT
# This software and its documentation are copyright 2003 by the
# Broad Institute/Massachusetts Institute of Technology.
# All rights are reserved.
#
# This software is supplied without any warranty or guaranteed support
# whatsoever. Neither the Broad Institute nor MIT can be responsible for
# its use, misuse, or functionality.
cls.cont <- readLines(file)
num.lines <- length(cls.cont)
class.list <- unlist(strsplit(cls.cont[[3]], " "))
s <- length(class.list)
t <- table(class.list)
l <- length(t)
phen <- vector(length=l, mode="character")
phen.label <- vector(length=l, mode="numeric")
class.v <- vector(length=s, mode="numeric")
for (i in 1:l) {
phen[i] <- noquote(names(t)[i])
phen.label[i] <- i - 1
}
for (i in 1:s) {
for (j in 1:l) {
if (class.list[i] == phen[j]) {
class.v[i] <- phen.label[j]
}
}
}
return(list(phen = phen, class.v = class.v))
}
GSEA.Threshold <- function(V, thres, ceil) {
#
# Threshold and ceiling pre-processing for gene expression matrix
#
# The Broad Institute
# SOFTWARE COPYRIGHT NOTICE AGREEMENT
# This software and its documentation are copyright 2003 by the
# Broad Institute/Massachusetts Institute of Technology.
# All rights are reserved.
#
# This software is supplied without any warranty or guaranteed support
# whatsoever. Neither the Broad Institute nor MIT can be responsible for
# its use, misuse, or functionality.
V[V < thres] <- thres
V[V > ceil] <- ceil
return(V)
}
GSEA.VarFilter <- function(V, fold, delta, gene.names = "NULL") {
#
# Variation filter pre-processing for gene expression matrix
#
# The Broad Institute
# SOFTWARE COPYRIGHT NOTICE AGREEMENT
# This software and its documentation are copyright 2003 by the
# Broad Institute/Massachusetts Institute of Technology.
# All rights are reserved.
#
# This software is supplied without any warranty or guaranteed support
# whatsoever. Neither the Broad Institute nor MIT can be responsible for
# its use, misuse, or functionality.
cols <- length(V[1,])
rows <- length(V[,1])
row.max <- apply(V, MARGIN=1, FUN=max)
row.min <- apply(V, MARGIN=1, FUN=min)
flag <- array(dim=rows)
flag <- (row.max /row.min > fold) & (row.max - row.min > delta)
size <- sum(flag)
B <- matrix(0, nrow = size, ncol = cols)
j <- 1
if (gene.names == "NULL") {
for (i in 1:rows) {
if (flag[i]) {
B[j,] <- V[i,]
j <- j + 1
}
}
return(B)
} else {
new.list <- vector(mode = "character", length = size)
for (i in 1:rows) {
if (flag[i]) {
B[j,] <- V[i,]
new.list[j] <- gene.names[i]
j <- j + 1
}
}
return(list(V = B, new.list = new.list))
}
}
GSEA.NormalizeRows <- function(V) {
#
# Stardardize rows of a gene expression matrix
#
# The Broad Institute
# SOFTWARE COPYRIGHT NOTICE AGREEMENT
# This software and its documentation are copyright 2003 by the
# Broad Institute/Massachusetts Institute of Technology.
# All rights are reserved.
#
# This software is supplied without any warranty or guaranteed support
# whatsoever. Neither the Broad Institute nor MIT can be responsible for
# its use, misuse, or functionality.
row.mean <- apply(V, MARGIN=1, FUN=mean)
row.sd <- apply(V, MARGIN=1, FUN=sd)
row.n <- length(V[,1])
for (i in 1:row.n) {
if (row.sd[i] == 0) {
V[i,] <- 0
} else {
V[i,] <- (V[i,] - row.mean[i])/row.sd[i]
}
}
return(V)
}
GSEA.NormalizeCols <- function(V) {
#
# Stardardize columns of a gene expression matrix
#
# The Broad Institute
# SOFTWARE COPYRIGHT NOTICE AGREEMENT
# This software and its documentation are copyright 2003 by the
# Broad Institute/Massachusetts Institute of Technology.
# All rights are reserved.
#
# This software is supplied without any warranty or guaranteed support
# whatsoever. Neither the Broad Institute nor MIT can be responsible for
# its use, misuse, or functionality.
col.mean <- apply(V, MARGIN=2, FUN=mean)
col.sd <- apply(V, MARGIN=2, FUN=sd)
col.n <- length(V[1,])
for (i in 1:col.n) {
if (col.sd[i] == 0) {
V[i,] <- 0
} else {
V[,i] <- (V[,i] - col.mean[i])/col.sd[i]
}
}
return(V)
}
# end of auxiliary functions
# ----------------------------------------------------------------------------------------
# Main GSEA Analysis Function that implements the entire methodology
GSEA <- function(
input.ds,
input.cls,
gene.ann = "",
gs.db,
gs.ann = "",
output.directory = "",
doc.string = "GSEA.analysis",
non.interactive.run = F,
reshuffling.type = "sample.labels",
nperm = 1000,
weighted.score.type = 1,
nom.p.val.threshold = -1,
fwer.p.val.threshold = -1,
fdr.q.val.threshold = 0.25,
topgs = 10,
adjust.FDR.q.val = F,
gs.size.threshold.min = 25,
gs.size.threshold.max = 500,
reverse.sign = F,
preproc.type = 0,
random.seed = 123456,
perm.type = 0,
fraction = 1.0,
replace = F,
save.intermediate.results = F,
OLD.GSEA = F,
use.fast.enrichment.routine = T) {
# This is a methodology for the analysis of global molecular profiles called Gene Set Enrichment Analysis (GSEA). It determines
# whether an a priori defined set of genes shows statistically significant, concordant differences between two biological
# states (e.g. phenotypes). GSEA operates on all genes from an experiment, rank ordered by the signal to noise ratio and
# determines whether members of an a priori defined gene set are nonrandomly distributed towards the top or bottom of the
# list and thus may correspond to an important biological process. To assess significance the program uses an empirical
# permutation procedure to test deviation from random that preserves correlations between genes.
#
# For details see Subramanian et al 2005
#
# Inputs:
# input.ds: Input gene expression Affymetrix dataset file in RES or GCT format
# input.cls: Input class vector (phenotype) file in CLS format
# gene.ann.file: Gene microarray annotation file (Affymetrix Netaffyx *.csv format) (default: none)
# gs.file: Gene set database in GMT format
# output.directory: Directory where to store output and results (default: .)
# doc.string: Documentation string used as a prefix to name result files (default: "GSEA.analysis")
# non.interactive.run: Run in interactive (i.e. R GUI) or batch (R command line) mode (default: F)
# reshuffling.type: Type of permutation reshuffling: "sample.labels" or "gene.labels" (default: "sample.labels")
# nperm: Number of random permutations (default: 1000)
# weighted.score.type: Enrichment correlation-based weighting: 0=no weight (KS), 1=standard weigth, 2 = over-weigth (default: 1)
# nom.p.val.threshold: Significance threshold for nominal p-vals for gene sets (default: -1, no thres)
# fwer.p.val.threshold: Significance threshold for FWER p-vals for gene sets (default: -1, no thres)
# fdr.q.val.threshold: Significance threshold for FDR q-vals for gene sets (default: 0.25)
# topgs: Besides those passing test, number of top scoring gene sets used for detailed reports (default: 10)
# adjust.FDR.q.val: Adjust the FDR q-vals (default: F)
# gs.size.threshold.min: Minimum size (in genes) for database gene sets to be considered (default: 25)
# gs.size.threshold.max: Maximum size (in genes) for database gene sets to be considered (default: 500)
# reverse.sign: Reverse direction of gene list (pos. enrichment becomes negative, etc.) (default: F)
# preproc.type: Preprocessing normalization: 0=none, 1=col(z-score)., 2=col(rank) and row(z-score)., 3=col(rank). (default: 0)
# random.seed: Random number generator seed. (default: 123456)
# perm.type: Permutation type: 0 = unbalanced, 1 = balanced. For experts only (default: 0)
# fraction: Subsampling fraction. Set to 1.0 (no resampling). For experts only (default: 1.0)
# replace: Resampling mode (replacement or not replacement). For experts only (default: F)
# OLD.GSEA: if TRUE compute the OLD GSEA of Mootha et al 2003
# use.fast.enrichment.routine: if true it uses a faster version to compute random perm. enrichment "GSEA.EnrichmentScore2"
#
# Output:
# The results of the method are stored in the "output.directory" specified by the user as part of the input parameters.
# The results files are:
# - Two tab-separated global result text files (one for each phenotype). These files are labeled according to the doc
# string prefix and the phenotype name from the CLS file: <doc.string>.SUMMARY.RESULTS.REPORT.<phenotype>.txt
# - One set of global plots. They include a.- gene list correlation profile, b.- global observed and null densities, c.- heat map
# for the entire sorted dataset, and d.- p-values vs. NES plot. These plots are in a single JPEG file named
# <doc.string>.global.plots.<phenotype>.jpg. When the program is run interactively these plots appear on a window in the R GUI.
# - A variable number of tab-separated gene result text files according to how many sets pass any of the significance thresholds
# ("nom.p.val.threshold," "fwer.p.val.threshold," and "fdr.q.val.threshold") and how many are specified in the "topgs"
# parameter. These files are named: <doc.string>.<gene set name>.report.txt.
# - A variable number of gene set plots (one for each gene set report file). These plots include a.- Gene set running enrichment
# "mountain" plot, b.- gene set null distribution and c.- heat map for genes in the gene set. These plots are stored in a
# single JPEG file named <doc.string>.<gene set name>.jpg.
# The format (columns) for the global result files is as follows.
# GS : Gene set name.
# SIZE : Size of the set in genes.
# SOURCE : Set definition or source.
# ES : Enrichment score.
# NES : Normalized (multiplicative rescaling) normalized enrichment score.
# NOM p-val : Nominal p-value (from the null distribution of the gene set).
# FDR q-val: False discovery rate q-values
# FWER p-val: Family wise error rate p-values.
# Tag %: Percent of gene set before running enrichment peak.
# Gene %: Percent of gene list before running enrichment peak.
# Signal : enrichment signal strength.
# FDR (median): FDR q-values from the median of the null distributions.
# glob.p.val: P-value using a global statistic (number of sets above the set's NES).
#
# The rows are sorted by the NES values (from maximum positive or negative NES to minimum)
#
# The format (columns) for the gene set result files is as follows.
#
# #: Gene number in the (sorted) gene set
# GENE : gene name. For example the probe accession number, gene symbol or the gene identifier gin the dataset.
# SYMBOL : gene symbol from the gene annotation file.
# DESC : gene description (title) from the gene annotation file.
# LIST LOC : location of the gene in the sorted gene list.
# S2N : signal to noise ratio (correlation) of the gene in the gene list.
# RES : value of the running enrichment score at the gene location.
# CORE_ENRICHMENT: is this gene is the "core enrichment" section of the list? Yes or No variable specifying in the gene location is before (positive ES) or after (negative ES) the running enrichment peak.
#
# The rows are sorted by the gene location in the gene list.
# The function call to GSEA returns a two element list containing the two global result reports as data frames ($report1, $report2).
#
# results1: Global output report for first phenotype
# result2: Global putput report for second phenotype
#
# The Broad Institute
# SOFTWARE COPYRIGHT NOTICE AGREEMENT
# This software and its documentation are copyright 2003 by the
# Broad Institute/Massachusetts Institute of Technology.
# All rights are reserved.
#
# This software is supplied without any warranty or guaranteed support
# whatsoever. Neither the Broad Institute nor MIT can be responsible for
# its use, misuse, or functionality.
print(" *** Running GSEA Analysis...")
if (OLD.GSEA == T) {
print("Running OLD GSEA from Mootha et al 2003")
}
# Copy input parameters to log file
if (output.directory != "") {
filename <- paste(output.directory, doc.string, "_params.txt", sep="", collapse="")
time.string <- as.character(as.POSIXlt(Sys.time(),"GMT"))
write(paste("Run of GSEA on ", time.string), file=filename)
if (is.data.frame(input.ds)) {
# write(paste("input.ds=", quote(input.ds), sep=" "), file=filename, append=T)
} else {
write(paste("input.ds=", input.ds, sep=" "), file=filename, append=T)
}
if (is.list(input.cls)) {
# write(paste("input.cls=", input.cls, sep=" "), file=filename, append=T)
} else {
write(paste("input.cls=", input.cls, sep=" "), file=filename, append=T)
}
if (is.data.frame(gene.ann)) {
# write(paste("gene.ann =", gene.ann, sep=" "), file=filename, append=T)
} else {
write(paste("gene.ann =", gene.ann, sep=" "), file=filename, append=T)
}
if (regexpr(pattern=".gmt", gs.db[1]) == -1) {
# write(paste("gs.db=", gs.db, sep=" "), file=filename, append=T)
} else {
write(paste("gs.db=", gs.db, sep=" "), file=filename, append=T)
}
if (is.data.frame(gs.ann)) {
# write(paste("gene.ann =", gene.ann, sep=" "), file=filename, append=T)
} else {
write(paste("gs.ann =", gs.ann, sep=" "), file=filename, append=T)
}
write(paste("output.directory =", output.directory, sep=" "), file=filename, append=T)
write(paste("doc.string = ", doc.string, sep=" "), file=filename, append=T)
write(paste("non.interactive.run =", non.interactive.run, sep=" "), file=filename, append=T)
write(paste("reshuffling.type =", reshuffling.type, sep=" "), file=filename, append=T)
write(paste("nperm =", nperm, sep=" "), file=filename, append=T)
write(paste("weighted.score.type =", weighted.score.type, sep=" "), file=filename, append=T)
write(paste("nom.p.val.threshold =", nom.p.val.threshold, sep=" "), file=filename, append=T)
write(paste("fwer.p.val.threshold =", fwer.p.val.threshold, sep=" "), file=filename, append=T)
write(paste("fdr.q.val.threshold =", fdr.q.val.threshold, sep=" "), file=filename, append=T)
write(paste("topgs =", topgs, sep=" "), file=filename, append=T)
write(paste("adjust.FDR.q.val =", adjust.FDR.q.val, sep=" "), file=filename, append=T)
write(paste("gs.size.threshold.min =", gs.size.threshold.min, sep=" "), file=filename, append=T)
write(paste("gs.size.threshold.max =", gs.size.threshold.max, sep=" "), file=filename, append=T)
write(paste("reverse.sign =", reverse.sign, sep=" "), file=filename, append=T)
write(paste("preproc.type =", preproc.type, sep=" "), file=filename, append=T)
write(paste("random.seed =", random.seed, sep=" "), file=filename, append=T)
write(paste("perm.type =", perm.type, sep=" "), file=filename, append=T)
write(paste("fraction =", fraction, sep=" "), file=filename, append=T)
write(paste("replace =", replace, sep=" "), file=filename, append=T)
}
# Start of GSEA methodology
if (.Platform$OS.type == "windows") {
memory.limit(6000000000)
memory.limit()
# print(c("Start memory size=", memory.size()))
}
# Read input data matrix
set.seed(seed=random.seed, kind = NULL)
adjust.param <- 0.5
gc()
time1 <- proc.time()
if (is.data.frame(input.ds)) {
dataset <- input.ds
} else {
if (regexpr(pattern=".gct", input.ds) == -1) {
dataset <- GSEA.Res2Frame(filename = input.ds)
} else {
# dataset <- GSEA.Gct2Frame(filename = input.ds)
dataset <- GSEA.Gct2Frame2(filename = input.ds)
}
}
gene.labels <- row.names(dataset)
sample.names <- names(dataset)
A <- data.matrix(dataset)
dim(A)
cols <- length(A[1,])
rows <- length(A[,1])
# preproc.type control the type of pre-processing: threshold, variation filter, normalization
if (preproc.type == 1) { # Column normalize (Z-score)
A <- GSEA.NormalizeCols(A)
} else if (preproc.type == 2) { # Column (rank) and row (Z-score) normalize
for (j in 1:cols) { # column rank normalization
A[,j] <- rank(A[,j])
}
A <- GSEA.NormalizeRows(A)
} else if (preproc.type == 3) { # Column (rank) norm.
for (j in 1:cols) { # column rank normalization
A[,j] <- rank(A[,j])
}
}
# Read input class vector
if(is.list(input.cls)) {
CLS <- input.cls
} else {
CLS <- GSEA.ReadClsFile(file=input.cls)
}
class.labels <- CLS$class.v
class.phen <- CLS$phen
if (reverse.sign == T) {
phen1 <- class.phen[2]
phen2 <- class.phen[1]
} else {
phen1 <- class.phen[1]
phen2 <- class.phen[2]
}
# sort samples according to phenotype
col.index <- order(class.labels, decreasing=F)
class.labels <- class.labels[col.index]
sample.names <- sample.names[col.index]
for (j in 1:rows) {
A[j, ] <- A[j, col.index]
}
names(A) <- sample.names
# Read input gene set database
if (regexpr(pattern=".gmt", gs.db[1]) == -1) {
temp <- gs.db
} else {
temp <- readLines(gs.db)
}
max.Ng <- length(temp)
temp.size.G <- vector(length = max.Ng, mode = "numeric")
for (i in 1:max.Ng) {
temp.size.G[i] <- length(unlist(strsplit(temp[[i]], "\t"))) - 2
}
max.size.G <- max(temp.size.G)
print(max.size.G)
print(max.Ng)
gs <- matrix(rep("null", max.Ng*max.size.G), nrow=max.Ng, ncol= max.size.G)
temp.names <- vector(length = max.Ng, mode = "character")
temp.desc <- vector(length = max.Ng, mode = "character")
gs.count <- 1
for (i in 1:max.Ng) {
gene.set.size <- length(unlist(strsplit(temp[[i]], "\t"))) - 2
gs.line <- noquote(unlist(strsplit(temp[[i]], "\t")))
gene.set.name <- gs.line[1]
gene.set.desc <- gs.line[2]
gene.set.tags <- vector(length = gene.set.size, mode = "character")
for (j in 1:gene.set.size) {
gene.set.tags[j] <- gs.line[j + 2]
}
existing.set <- is.element(gene.set.tags, gene.labels)
set.size <- length(existing.set[existing.set == T])
if ((set.size < gs.size.threshold.min) || (set.size > gs.size.threshold.max)) next
temp.size.G[gs.count] <- set.size
gs[gs.count,] <- c(gene.set.tags[existing.set], rep("null", max.size.G - temp.size.G[gs.count]))
temp.names[gs.count] <- gene.set.name
temp.desc[gs.count] <- gene.set.desc
gs.count <- gs.count + 1
}
Ng <- gs.count - 1
gs.names <- vector(length = Ng, mode = "character")
gs.desc <- vector(length = Ng, mode = "character")
size.G <- vector(length = Ng, mode = "numeric")
gs.names <- temp.names[1:Ng]
gs.desc <- temp.desc[1:Ng]
size.G <- temp.size.G[1:Ng]
N <- length(A[,1])
Ns <- length(A[1,])
print(c("Number of genes:", N))
print(c("Number of Gene Sets:", Ng))
print(c("Number of samples:", Ns))
print(c("Original number of Gene Sets:", max.Ng))
print(c("Maximum gene set size:", max.size.G))
# Read gene and gene set annotations if gene annotation file was provided
all.gene.descs <- vector(length = N, mode ="character")
all.gene.symbols <- vector(length = N, mode ="character")
all.gs.descs <- vector(length = Ng, mode ="character")
if (is.data.frame(gene.ann)) {
temp <- gene.ann
a.size <- length(temp[,1])
print(c("Number of gene annotation file entries:", a.size))
accs <- as.character(temp[,1])
locs <- match(gene.labels, accs)
all.gene.descs <- as.character(temp[locs, "Gene.Title"])
all.gene.symbols <- as.character(temp[locs, "Gene.Symbol"])
rm(temp)
} else if (gene.ann == "") {
for (i in 1:N) {
all.gene.descs[i] <- gene.labels[i]
all.gene.symbols[i] <- gene.labels[i]
}
} else {
temp <- read.delim(gene.ann, header=T, sep=",", comment.char="", as.is=T)
a.size <- length(temp[,1])
print(c("Number of gene annotation file entries:", a.size))
accs <- as.character(temp[,1])
locs <- match(gene.labels, accs)
all.gene.descs <- as.character(temp[locs, "Gene.Title"])
all.gene.symbols <- as.character(temp[locs, "Gene.Symbol"])
rm(temp)
}
if (is.data.frame(gs.ann)) {
temp <- gs.ann
a.size <- length(temp[,1])
print(c("Number of gene set annotation file entries:", a.size))
accs <- as.character(temp[,1])
locs <- match(gs.names, accs)
all.gs.descs <- as.character(temp[locs, "SOURCE"])
rm(temp)
} else if (gs.ann == "") {
for (i in 1:Ng) {
all.gs.descs[i] <- gs.desc[i]
}
} else {
temp <- read.delim(gs.ann, header=T, sep="\t", comment.char="", as.is=T)
a.size <- length(temp[,1])
print(c("Number of gene set annotation file entries:", a.size))
accs <- as.character(temp[,1])
locs <- match(gs.names, accs)
all.gs.descs <- as.character(temp[locs, "SOURCE"])
rm(temp)
}
Obs.indicator <- matrix(nrow= Ng, ncol=N)
Obs.RES <- matrix(nrow= Ng, ncol=N)
Obs.ES <- vector(length = Ng, mode = "numeric")
Obs.arg.ES <- vector(length = Ng, mode = "numeric")
Obs.ES.norm <- vector(length = Ng, mode = "numeric")
time2 <- proc.time()
# GSEA methodology
# Compute observed and random permutation gene rankings
obs.s2n <- vector(length=N, mode="numeric")
signal.strength <- vector(length=Ng, mode="numeric")
tag.frac <- vector(length=Ng, mode="numeric")
gene.frac <- vector(length=Ng, mode="numeric")
coherence.ratio <- vector(length=Ng, mode="numeric")
obs.phi.norm <- matrix(nrow = Ng, ncol = nperm)
correl.matrix <- matrix(nrow = N, ncol = nperm)
obs.correl.matrix <- matrix(nrow = N, ncol = nperm)
order.matrix <- matrix(nrow = N, ncol = nperm)
obs.order.matrix <- matrix(nrow = N, ncol = nperm)
nperm.per.call <- 100
n.groups <- nperm %/% nperm.per.call
n.rem <- nperm %% nperm.per.call
n.perms <- c(rep(nperm.per.call, n.groups), n.rem)
n.ends <- cumsum(n.perms)
n.starts <- n.ends - n.perms + 1
if (n.rem == 0) {
n.tot <- n.groups
} else {
n.tot <- n.groups + 1
}
for (nk in 1:n.tot) {
call.nperm <- n.perms[nk]
print(paste("Computing ranked list for actual and permuted phenotypes.......permutations: ", n.starts[nk], "--", n.ends[nk], sep=" "))
O <- GSEA.GeneRanking(A, class.labels, gene.labels, call.nperm, permutation.type = perm.type, sigma.correction = "GeneCluster", fraction=fraction, replace=replace, reverse.sign = reverse.sign)
gc()
order.matrix[,n.starts[nk]:n.ends[nk]] <- O$order.matrix
obs.order.matrix[,n.starts[nk]:n.ends[nk]] <- O$obs.order.matrix
correl.matrix[,n.starts[nk]:n.ends[nk]] <- O$s2n.matrix
obs.correl.matrix[,n.starts[nk]:n.ends[nk]] <- O$obs.s2n.matrix
rm(O)
}
obs.s2n <- apply(obs.correl.matrix, 1, median) # using median to assign enrichment scores
obs.index <- order(obs.s2n, decreasing=T)
obs.s2n <- sort(obs.s2n, decreasing=T)
obs.gene.labels <- gene.labels[obs.index]
obs.gene.descs <- all.gene.descs[obs.index]
obs.gene.symbols <- all.gene.symbols[obs.index]
for (r in 1:nperm) {
correl.matrix[, r] <- correl.matrix[order.matrix[,r], r]
}
for (r in 1:nperm) {
obs.correl.matrix[, r] <- obs.correl.matrix[obs.order.matrix[,r], r]
}
gene.list2 <- obs.index
for (i in 1:Ng) {
print(paste("Computing observed enrichment for gene set:", i, gs.names[i], sep=" "))
gene.set <- gs[i,gs[i,] != "null"]
gene.set2 <- vector(length=length(gene.set), mode = "numeric")
gene.set2 <- match(gene.set, gene.labels)
if (OLD.GSEA == F) {
GSEA.results <- GSEA.EnrichmentScore(gene.list=gene.list2, gene.set=gene.set2, weighted.score.type=weighted.score.type, correl.vector = obs.s2n)
} else {
GSEA.results <- OLD.GSEA.EnrichmentScore(gene.list=gene.list2, gene.set=gene.set2)
}
Obs.ES[i] <- GSEA.results$ES
Obs.arg.ES[i] <- GSEA.results$arg.ES
Obs.RES[i,] <- GSEA.results$RES
Obs.indicator[i,] <- GSEA.results$indicator
if (Obs.ES[i] >= 0) { # compute signal strength
tag.frac[i] <- sum(Obs.indicator[i,1:Obs.arg.ES[i]])/size.G[i]
gene.frac[i] <- Obs.arg.ES[i]/N
} else {
tag.frac[i] <- sum(Obs.indicator[i, Obs.arg.ES[i]:N])/size.G[i]
gene.frac[i] <- (N - Obs.arg.ES[i] + 1)/N
}
signal.strength[i] <- tag.frac[i] * (1 - gene.frac[i]) * (N / (N - size.G[i]))
}
# Compute enrichment for random permutations
phi <- matrix(nrow = Ng, ncol = nperm)
phi.norm <- matrix(nrow = Ng, ncol = nperm)
obs.phi <- matrix(nrow = Ng, ncol = nperm)
if (reshuffling.type == "sample.labels") { # reshuffling phenotype labels
for (i in 1:Ng) {
print(paste("Computing random permutations' enrichment for gene set:", i, gs.names[i], sep=" "))
gene.set <- gs[i,gs[i,] != "null"]
gene.set2 <- vector(length=length(gene.set), mode = "numeric")
gene.set2 <- match(gene.set, gene.labels)
for (r in 1:nperm) {
gene.list2 <- order.matrix[,r]
if (use.fast.enrichment.routine == F) {
GSEA.results <- GSEA.EnrichmentScore(gene.list=gene.list2, gene.set=gene.set2, weighted.score.type=weighted.score.type, correl.vector=correl.matrix[, r])
} else {
GSEA.results <- GSEA.EnrichmentScore2(gene.list=gene.list2, gene.set=gene.set2, weighted.score.type=weighted.score.type, correl.vector=correl.matrix[, r])
}
phi[i, r] <- GSEA.results$ES
}
if (fraction < 1.0) { # if resampling then compute ES for all observed rankings
for (r in 1:nperm) {
obs.gene.list2 <- obs.order.matrix[,r]
if (use.fast.enrichment.routine == F) {
GSEA.results <- GSEA.EnrichmentScore(gene.list=obs.gene.list2, gene.set=gene.set2, weighted.score.type=weighted.score.type, correl.vector=obs.correl.matrix[, r])
} else {
GSEA.results <- GSEA.EnrichmentScore2(gene.list=obs.gene.list2, gene.set=gene.set2, weighted.score.type=weighted.score.type, correl.vector=obs.correl.matrix[, r])
}
obs.phi[i, r] <- GSEA.results$ES
}
} else { # if no resampling then compute only one column (and fill the others with the same value)
obs.gene.list2 <- obs.order.matrix[,1]
if (use.fast.enrichment.routine == F) {
GSEA.results <- GSEA.EnrichmentScore(gene.list=obs.gene.list2, gene.set=gene.set2, weighted.score.type=weighted.score.type, correl.vector=obs.correl.matrix[, r])
} else {
GSEA.results <- GSEA.EnrichmentScore2(gene.list=obs.gene.list2, gene.set=gene.set2, weighted.score.type=weighted.score.type, correl.vector=obs.correl.matrix[, r])
}
obs.phi[i, 1] <- GSEA.results$ES
for (r in 2:nperm) {
obs.phi[i, r] <- obs.phi[i, 1]
}
}
gc()
}
} else if (reshuffling.type == "gene.labels") { # reshuffling gene labels
for (i in 1:Ng) {
gene.set <- gs[i,gs[i,] != "null"]
gene.set2 <- vector(length=length(gene.set), mode = "numeric")
gene.set2 <- match(gene.set, gene.labels)
for (r in 1:nperm) {
reshuffled.gene.labels <- sample(1:rows)
if (use.fast.enrichment.routine == F) {
GSEA.results <- GSEA.EnrichmentScore(gene.list=reshuffled.gene.labels, gene.set=gene.set2, weighted.score.type=weighted.score.type, correl.vector=obs.s2n)
} else {
GSEA.results <- GSEA.EnrichmentScore2(gene.list=reshuffled.gene.labels, gene.set=gene.set2, weighted.score.type=weighted.score.type, correl.vector=obs.s2n)
}
phi[i, r] <- GSEA.results$ES
}
if (fraction < 1.0) { # if resampling then compute ES for all observed rankings
for (r in 1:nperm) {
obs.gene.list2 <- obs.order.matrix[,r]
if (use.fast.enrichment.routine == F) {
GSEA.results <- GSEA.EnrichmentScore(gene.list=obs.gene.list2, gene.set=gene.set2, weighted.score.type=weighted.score.type, correl.vector=obs.correl.matrix[, r])
} else {
GSEA.results <- GSEA.EnrichmentScore2(gene.list=obs.gene.list2, gene.set=gene.set2, weighted.score.type=weighted.score.type, correl.vector=obs.correl.matrix[, r])
}
obs.phi[i, r] <- GSEA.results$ES
}
} else { # if no resampling then compute only one column (and fill the others with the same value)
obs.gene.list2 <- obs.order.matrix[,1]
if (use.fast.enrichment.routine == F) {
GSEA.results <- GSEA.EnrichmentScore(gene.list=obs.gene.list2, gene.set=gene.set2, weighted.score.type=weighted.score.type, correl.vector=obs.correl.matrix[, r])
} else {
GSEA.results <- GSEA.EnrichmentScore2(gene.list=obs.gene.list2, gene.set=gene.set2, weighted.score.type=weighted.score.type, correl.vector=obs.correl.matrix[, r])
}
obs.phi[i, 1] <- GSEA.results$ES
for (r in 2:nperm) {
obs.phi[i, r] <- obs.phi[i, 1]
}
}
gc()
}
}
# Compute 3 types of p-values
# Find nominal p-values
print("Computing nominal p-values...")
p.vals <- matrix(0, nrow = Ng, ncol = 2)
if (OLD.GSEA == F) {
for (i in 1:Ng) {
pos.phi <- NULL
neg.phi <- NULL
for (j in 1:nperm) {
if (phi[i, j] >= 0) {
pos.phi <- c(pos.phi, phi[i, j])
} else {
neg.phi <- c(neg.phi, phi[i, j])
}
}
ES.value <- Obs.ES[i]
if (ES.value >= 0) {
p.vals[i, 1] <- signif(sum(pos.phi >= ES.value)/length(pos.phi), digits=5)
} else {
p.vals[i, 1] <- signif(sum(neg.phi <= ES.value)/length(neg.phi), digits=5)
}
}
} else { # For OLD GSEA compute the p-val using positive and negative values in the same histogram
for (i in 1:Ng) {
if (Obs.ES[i] >= 0) {
p.vals[i, 1] <- sum(phi[i,] >= Obs.ES[i])/length(phi[i,])
p.vals[i, 1] <- signif(p.vals[i, 1], digits=5)
} else {
p.vals[i, 1] <- sum(phi[i,] <= Obs.ES[i])/length(phi[i,])
p.vals[i, 1] <- signif(p.vals[i, 1], digits=5)
}
}
}
# Find effective size
erf <- function (x)
{
2 * pnorm(sqrt(2) * x)
}
KS.mean <- function(N) { # KS mean as a function of set size N
S <- 0
for (k in -100:100) {
if (k == 0) next
S <- S + 4 * (-1)**(k + 1) * (0.25 * exp(-2 * k * k * N) - sqrt(2 * pi) * erf(sqrt(2 * N) * k)/(16 * k * sqrt(N)))
}
return(abs(S))
}
# KS.mean.table <- vector(length=5000, mode="numeric")
# for (i in 1:5000) {
# KS.mean.table[i] <- KS.mean(i)
# }
# KS.size <- vector(length=Ng, mode="numeric")
# Rescaling normalization for each gene set null
print("Computing rescaling normalization for each gene set null...")
if (OLD.GSEA == F) {
for (i in 1:Ng) {
pos.phi <- NULL
neg.phi <- NULL
for (j in 1:nperm) {
if (phi[i, j] >= 0) {
pos.phi <- c(pos.phi, phi[i, j])
} else {
neg.phi <- c(neg.phi, phi[i, j])
}
}
pos.m <- mean(pos.phi)
neg.m <- mean(abs(neg.phi))
# if (Obs.ES[i] >= 0) {
# KS.size[i] <- which.min(abs(KS.mean.table - pos.m))
# } else {
# KS.size[i] <- which.min(abs(KS.mean.table - neg.m))
# }
pos.phi <- pos.phi/pos.m
neg.phi <- neg.phi/neg.m
for (j in 1:nperm) {
if (phi[i, j] >= 0) {
phi.norm[i, j] <- phi[i, j]/pos.m
} else {
phi.norm[i, j] <- phi[i, j]/neg.m
}
}
for (j in 1:nperm) {
if (obs.phi[i, j] >= 0) {
obs.phi.norm[i, j] <- obs.phi[i, j]/pos.m
} else {
obs.phi.norm[i, j] <- obs.phi[i, j]/neg.m
}
}
if (Obs.ES[i] >= 0) {
Obs.ES.norm[i] <- Obs.ES[i]/pos.m
} else {
Obs.ES.norm[i] <- Obs.ES[i]/neg.m
}
}
} else { # For OLD GSEA does not normalize using empirical scaling
for (i in 1:Ng) {
for (j in 1:nperm) {
phi.norm[i, j] <- phi[i, j]/400
}
for (j in 1:nperm) {
obs.phi.norm[i, j] <- obs.phi[i, j]/400
}
Obs.ES.norm[i] <- Obs.ES[i]/400
}
}
# Save intermedite results
if (save.intermediate.results == T) {
filename <- paste(output.directory, doc.string, ".phi.txt", sep="", collapse="")
write.table(phi, file = filename, quote=F, col.names= F, row.names=F, sep = "\t")
filename <- paste(output.directory, doc.string, ".obs.phi.txt", sep="", collapse="")
write.table(obs.phi, file = filename, quote=F, col.names= F, row.names=F, sep = "\t")
filename <- paste(output.directory, doc.string, ".phi.norm.txt", sep="", collapse="")
write.table(phi.norm, file = filename, quote=F, col.names= F, row.names=F, sep = "\t")
filename <- paste(output.directory, doc.string, ".obs.phi.norm.txt", sep="", collapse="")
write.table(obs.phi.norm, file = filename, quote=F, col.names= F, row.names=F, sep = "\t")
filename <- paste(output.directory, doc.string, ".Obs.ES.txt", sep="", collapse="")
write.table(Obs.ES, file = filename, quote=F, col.names= F, row.names=F, sep = "\t")
filename <- paste(output.directory, doc.string, ".Obs.ES.norm.txt", sep="", collapse="")
write.table(Obs.ES.norm, file = filename, quote=F, col.names= F, row.names=F, sep = "\t")
}
# Compute FWER p-vals
print("Computing FWER p-values...")
if (OLD.GSEA == F) {
max.ES.vals.p <- NULL
max.ES.vals.n <- NULL
for (j in 1:nperm) {
pos.phi <- NULL
neg.phi <- NULL
for (i in 1:Ng) {
if (phi.norm[i, j] >= 0) {
pos.phi <- c(pos.phi, phi.norm[i, j])
} else {
neg.phi <- c(neg.phi, phi.norm[i, j])
}
}
if (length(pos.phi) > 0) {
max.ES.vals.p <- c(max.ES.vals.p, max(pos.phi))
}
if (length(neg.phi) > 0) {
max.ES.vals.n <- c(max.ES.vals.n, min(neg.phi))
}
}
for (i in 1:Ng) {
ES.value <- Obs.ES.norm[i]
if (Obs.ES.norm[i] >= 0) {
p.vals[i, 2] <- signif(sum(max.ES.vals.p >= ES.value)/length(max.ES.vals.p), digits=5)
} else {
p.vals[i, 2] <- signif(sum(max.ES.vals.n <= ES.value)/length(max.ES.vals.n), digits=5)
}
}
} else { # For OLD GSEA compute the FWER using positive and negative values in the same histogram
max.ES.vals <- NULL
for (j in 1:nperm) {
max.NES <- max(phi.norm[,j])
min.NES <- min(phi.norm[,j])
if (max.NES > - min.NES) {
max.val <- max.NES
} else {
max.val <- min.NES
}
max.ES.vals <- c(max.ES.vals, max.val)
}
for (i in 1:Ng) {
if (Obs.ES.norm[i] >= 0) {
p.vals[i, 2] <- sum(max.ES.vals >= Obs.ES.norm[i])/length(max.ES.vals)
} else {
p.vals[i, 2] <- sum(max.ES.vals <= Obs.ES.norm[i])/length(max.ES.vals)
}
p.vals[i, 2] <- signif(p.vals[i, 2], digits=4)
}
}
# Compute FDRs
print("Computing FDR q-values...")
NES <- vector(length=Ng, mode="numeric")
phi.norm.mean <- vector(length=Ng, mode="numeric")
obs.phi.norm.mean <- vector(length=Ng, mode="numeric")
phi.norm.median <- vector(length=Ng, mode="numeric")
obs.phi.norm.median <- vector(length=Ng, mode="numeric")
phi.norm.mean <- vector(length=Ng, mode="numeric")
obs.phi.mean <- vector(length=Ng, mode="numeric")
FDR.mean <- vector(length=Ng, mode="numeric")
FDR.median <- vector(length=Ng, mode="numeric")
phi.norm.median.d <- vector(length=Ng, mode="numeric")
obs.phi.norm.median.d <- vector(length=Ng, mode="numeric")
Obs.ES.index <- order(Obs.ES.norm, decreasing=T)
Orig.index <- seq(1, Ng)
Orig.index <- Orig.index[Obs.ES.index]
Orig.index <- order(Orig.index, decreasing=F)
Obs.ES.norm.sorted <- Obs.ES.norm[Obs.ES.index]
gs.names.sorted <- gs.names[Obs.ES.index]
for (k in 1:Ng) {
NES[k] <- Obs.ES.norm.sorted[k]
ES.value <- NES[k]
count.col <- vector(length=nperm, mode="numeric")
obs.count.col <- vector(length=nperm, mode="numeric")
for (i in 1:nperm) {
phi.vec <- phi.norm[,i]
obs.phi.vec <- obs.phi.norm[,i]
if (ES.value >= 0) {
count.col.norm <- sum(phi.vec >= 0)
obs.count.col.norm <- sum(obs.phi.vec >= 0)
count.col[i] <- ifelse(count.col.norm > 0, sum(phi.vec >= ES.value)/count.col.norm, 0)
obs.count.col[i] <- ifelse(obs.count.col.norm > 0, sum(obs.phi.vec >= ES.value)/obs.count.col.norm, 0)
} else {
count.col.norm <- sum(phi.vec < 0)
obs.count.col.norm <- sum(obs.phi.vec < 0)
count.col[i] <- ifelse(count.col.norm > 0, sum(phi.vec <= ES.value)/count.col.norm, 0)
obs.count.col[i] <- ifelse(obs.count.col.norm > 0, sum(obs.phi.vec <= ES.value)/obs.count.col.norm, 0)
}
}
phi.norm.mean[k] <- mean(count.col)
obs.phi.norm.mean[k] <- mean(obs.count.col)
phi.norm.median[k] <- median(count.col)
obs.phi.norm.median[k] <- median(obs.count.col)
FDR.mean[k] <- ifelse(phi.norm.mean[k]/obs.phi.norm.mean[k] < 1, phi.norm.mean[k]/obs.phi.norm.mean[k], 1)
FDR.median[k] <- ifelse(phi.norm.median[k]/obs.phi.norm.median[k] < 1, phi.norm.median[k]/obs.phi.norm.median[k], 1)
}
# adjust q-values
if (adjust.FDR.q.val == T) {
pos.nes <- length(NES[NES >= 0])
min.FDR.mean <- FDR.mean[pos.nes]
min.FDR.median <- FDR.median[pos.nes]
for (k in seq(pos.nes - 1, 1, -1)) {
if (FDR.mean[k] < min.FDR.mean) {
min.FDR.mean <- FDR.mean[k]
}
if (min.FDR.mean < FDR.mean[k]) {
FDR.mean[k] <- min.FDR.mean
}
}
neg.nes <- pos.nes + 1
min.FDR.mean <- FDR.mean[neg.nes]
min.FDR.median <- FDR.median[neg.nes]
for (k in seq(neg.nes + 1, Ng)) {
if (FDR.mean[k] < min.FDR.mean) {
min.FDR.mean <- FDR.mean[k]
}
if (min.FDR.mean < FDR.mean[k]) {
FDR.mean[k] <- min.FDR.mean
}
}
}
obs.phi.norm.mean.sorted <- obs.phi.norm.mean[Orig.index]
phi.norm.mean.sorted <- phi.norm.mean[Orig.index]
FDR.mean.sorted <- FDR.mean[Orig.index]
FDR.median.sorted <- FDR.median[Orig.index]
# Compute global statistic
glob.p.vals <- vector(length=Ng, mode="numeric")
NULL.pass <- vector(length=nperm, mode="numeric")
OBS.pass <- vector(length=nperm, mode="numeric")
for (k in 1:Ng) {
NES[k] <- Obs.ES.norm.sorted[k]
if (NES[k] >= 0) {
for (i in 1:nperm) {
NULL.pos <- sum(phi.norm[,i] >= 0)
NULL.pass[i] <- ifelse(NULL.pos > 0, sum(phi.norm[,i] >= NES[k])/NULL.pos, 0)
OBS.pos <- sum(obs.phi.norm[,i] >= 0)
OBS.pass[i] <- ifelse(OBS.pos > 0, sum(obs.phi.norm[,i] >= NES[k])/OBS.pos, 0)
}
} else {
for (i in 1:nperm) {
NULL.neg <- sum(phi.norm[,i] < 0)
NULL.pass[i] <- ifelse(NULL.neg > 0, sum(phi.norm[,i] <= NES[k])/NULL.neg, 0)
OBS.neg <- sum(obs.phi.norm[,i] < 0)
OBS.pass[i] <- ifelse(OBS.neg > 0, sum(obs.phi.norm[,i] <= NES[k])/OBS.neg, 0)
}
}
glob.p.vals[k] <- sum(NULL.pass >= mean(OBS.pass))/nperm
}
glob.p.vals.sorted <- glob.p.vals[Orig.index]
# Produce results report
print("Producing result tables and plots...")
Obs.ES <- signif(Obs.ES, digits=5)
Obs.ES.norm <- signif(Obs.ES.norm, digits=5)
p.vals <- signif(p.vals, digits=4)
signal.strength <- signif(signal.strength, digits=3)
tag.frac <- signif(tag.frac, digits=3)
gene.frac <- signif(gene.frac, digits=3)
FDR.mean.sorted <- signif(FDR.mean.sorted, digits=5)
FDR.median.sorted <- signif(FDR.median.sorted, digits=5)
glob.p.vals.sorted <- signif(glob.p.vals.sorted, digits=5)
report <- data.frame(cbind(gs.names, size.G, all.gs.descs, Obs.ES, Obs.ES.norm, p.vals[,1], FDR.mean.sorted, p.vals[,2], tag.frac, gene.frac, signal.strength, FDR.median.sorted, glob.p.vals.sorted))
names(report) <- c("GS", "SIZE", "SOURCE", "ES", "NES", "NOM p-val", "FDR q-val", "FWER p-val", "Tag %", "Gene %", "Signal", "FDR (median)", "glob.p.val")
# print(report)
report2 <- report
report.index2 <- order(Obs.ES.norm, decreasing=T)
for (i in 1:Ng) {
report2[i,] <- report[report.index2[i],]
}
report3 <- report
report.index3 <- order(Obs.ES.norm, decreasing=F)
for (i in 1:Ng) {
report3[i,] <- report[report.index3[i],]
}
phen1.rows <- length(Obs.ES.norm[Obs.ES.norm >= 0])
phen2.rows <- length(Obs.ES.norm[Obs.ES.norm < 0])
report.phen1 <- report2[1:phen1.rows,]
report.phen2 <- report3[1:phen2.rows,]
if (output.directory != "") {
if (phen1.rows > 0) {
filename <- paste(output.directory, doc.string, ".SUMMARY.RESULTS.REPORT.", phen1,".txt", sep="", collapse="")
write.table(report.phen1, file = filename, quote=F, row.names=F, sep = "\t")
}
if (phen2.rows > 0) {
filename <- paste(output.directory, doc.string, ".SUMMARY.RESULTS.REPORT.", phen2,".txt", sep="", collapse="")
write.table(report.phen2, file = filename, quote=F, row.names=F, sep = "\t")
}
}
# Global plots
if (output.directory != "") {
if (non.interactive.run == F) {
if (.Platform$OS.type == "windows") {
glob.filename <- paste(output.directory, doc.string, ".global.plots", sep="", collapse="")
windows(width = 10, height = 10)
} else if (.Platform$OS.type == "unix") {
glob.filename <- paste(output.directory, doc.string, ".global.plots.pdf", sep="", collapse="")
pdf(file=glob.filename, height = 10, width = 10)
}
} else {
if (.Platform$OS.type == "unix") {
glob.filename <- paste(output.directory, doc.string, ".global.plots.pdf", sep="", collapse="")
pdf(file=glob.filename, height = 10, width = 10)
} else if (.Platform$OS.type == "windows") {
glob.filename <- paste(output.directory, doc.string, ".global.plots.pdf", sep="", collapse="")
pdf(file=glob.filename, height = 10, width = 10)
}
}
}
nf <- layout(matrix(c(1,2,3,4), 2, 2, byrow=T), c(1,1), c(1,1), TRUE)
# plot S2N correlation profile
location <- 1:N
max.corr <- max(obs.s2n)
min.corr <- min(obs.s2n)
x <- plot(location, obs.s2n, ylab = "Signal to Noise Ratio (S2N)", xlab = "Gene List Location", main = "Gene List Correlation (S2N) Profile", type = "l", lwd = 2, cex = 0.9, col = 1)
for (i in seq(1, N, 20)) {
lines(c(i, i), c(0, obs.s2n[i]), lwd = 3, cex = 0.9, col = colors()[12]) # shading of correlation plot
}
x <- points(location, obs.s2n, type = "l", lwd = 2, cex = 0.9, col = 1)
lines(c(1, N), c(0, 0), lwd = 2, lty = 1, cex = 0.9, col = 1) # zero correlation horizontal line
temp <- order(abs(obs.s2n), decreasing=T)
arg.correl <- temp[N]
lines(c(arg.correl, arg.correl), c(min.corr, 0.7*max.corr), lwd = 2, lty = 3, cex = 0.9, col = 1) # zero correlation vertical line
area.bias <- signif(100*(sum(obs.s2n[1:arg.correl]) + sum(obs.s2n[arg.correl:N]))/sum(abs(obs.s2n[1:N])), digits=3)
area.phen <- ifelse(area.bias >= 0, phen1, phen2)
delta.string <- paste("Corr. Area Bias to \"", area.phen, "\" =", abs(area.bias), "%", sep="", collapse="")
zero.crossing.string <- paste("Zero Crossing at location ", arg.correl, " (", signif(100*arg.correl/N, digits=3), " %)")
leg.txt <- c(delta.string, zero.crossing.string)
legend(x=N/10, y=max.corr, bty="n", bg = "white", legend=leg.txt, cex = 0.9)
leg.txt <- paste("\"", phen1, "\" ", sep="", collapse="")
text(x=1, y=-0.05*max.corr, adj = c(0, 1), labels=leg.txt, cex = 0.9)
leg.txt <- paste("\"", phen2, "\" ", sep="", collapse="")
text(x=N, y=0.05*max.corr, adj = c(1, 0), labels=leg.txt, cex = 0.9)
if (Ng > 1) { # make these plots only if there are multiple gene sets.
# compute plots of actual (weighted) null and observed
phi.densities.pos <- matrix(0, nrow=512, ncol=nperm)
phi.densities.neg <- matrix(0, nrow=512, ncol=nperm)
obs.phi.densities.pos <- matrix(0, nrow=512, ncol=nperm)
obs.phi.densities.neg <- matrix(0, nrow=512, ncol=nperm)
phi.density.mean.pos <- vector(length=512, mode = "numeric")
phi.density.mean.neg <- vector(length=512, mode = "numeric")
obs.phi.density.mean.pos <- vector(length=512, mode = "numeric")
obs.phi.density.mean.neg <- vector(length=512, mode = "numeric")
phi.density.median.pos <- vector(length=512, mode = "numeric")
phi.density.median.neg <- vector(length=512, mode = "numeric")
obs.phi.density.median.pos <- vector(length=512, mode = "numeric")
obs.phi.density.median.neg <- vector(length=512, mode = "numeric")
x.coor.pos <- vector(length=512, mode = "numeric")
x.coor.neg <- vector(length=512, mode = "numeric")
for (i in 1:nperm) {
pos.phi <- phi.norm[phi.norm[, i] >= 0, i]
if (length(pos.phi) > 2) {
temp <- density(pos.phi, adjust=adjust.param, n = 512, from=0, to=3.5)
} else {
temp <- list(x = 3.5*(seq(1, 512) - 1)/512, y = rep(0.001, 512))
}
phi.densities.pos[, i] <- temp$y
norm.factor <- sum(phi.densities.pos[, i])
phi.densities.pos[, i] <- phi.densities.pos[, i]/norm.factor
if (i == 1) {
x.coor.pos <- temp$x
}
neg.phi <- phi.norm[phi.norm[, i] < 0, i]
if (length(neg.phi) > 2) {
temp <- density(neg.phi, adjust=adjust.param, n = 512, from=-3.5, to=0)
} else {
temp <- list(x = 3.5*(seq(1, 512) - 1)/512, y = rep(0.001, 512))
}
phi.densities.neg[, i] <- temp$y
norm.factor <- sum(phi.densities.neg[, i])
phi.densities.neg[, i] <- phi.densities.neg[, i]/norm.factor
if (i == 1) {
x.coor.neg <- temp$x
}
pos.phi <- obs.phi.norm[obs.phi.norm[, i] >= 0, i]
if (length(pos.phi) > 2) {
temp <- density(pos.phi, adjust=adjust.param, n = 512, from=0, to=3.5)
} else {
temp <- list(x = 3.5*(seq(1, 512) - 1)/512, y = rep(0.001, 512))
}
obs.phi.densities.pos[, i] <- temp$y
norm.factor <- sum(obs.phi.densities.pos[, i])
obs.phi.densities.pos[, i] <- obs.phi.densities.pos[, i]/norm.factor
neg.phi <- obs.phi.norm[obs.phi.norm[, i] < 0, i]
if (length(neg.phi)> 2) {
temp <- density(neg.phi, adjust=adjust.param, n = 512, from=-3.5, to=0)
} else {
temp <- list(x = 3.5*(seq(1, 512) - 1)/512, y = rep(0.001, 512))
}
obs.phi.densities.neg[, i] <- temp$y
norm.factor <- sum(obs.phi.densities.neg[, i])
obs.phi.densities.neg[, i] <- obs.phi.densities.neg[, i]/norm.factor
}
phi.density.mean.pos <- apply(phi.densities.pos, 1, mean)
phi.density.mean.neg <- apply(phi.densities.neg, 1, mean)
obs.phi.density.mean.pos <- apply(obs.phi.densities.pos, 1, mean)
obs.phi.density.mean.neg <- apply(obs.phi.densities.neg, 1, mean)
phi.density.median.pos <- apply(phi.densities.pos, 1, median)
phi.density.median.neg <- apply(phi.densities.neg, 1, median)
obs.phi.density.median.pos <- apply(obs.phi.densities.pos, 1, median)
obs.phi.density.median.neg <- apply(obs.phi.densities.neg, 1, median)
x <- c(x.coor.neg, x.coor.pos)
x.plot.range <- range(x)
y1 <- c(phi.density.mean.neg, phi.density.mean.pos)
y2 <- c(obs.phi.density.mean.neg, obs.phi.density.mean.pos)
y.plot.range <- c(-0.3*max(c(y1, y2)), max(c(y1, y2)))
print(c(y.plot.range, max(c(y1, y2)), max(y1), max(y2)))
plot(x, y1, xlim = x.plot.range, ylim = 1.5*y.plot.range, type = "l", lwd = 2, col = 2, xlab = "NES", ylab = "P(NES)", main = "Global Observed and Null Densities (Area Normalized)")
y1.point <- y1[seq(1, length(x), 2)]
y2.point <- y2[seq(2, length(x), 2)]
x1.point <- x[seq(1, length(x), 2)]
x2.point <- x[seq(2, length(x), 2)]
# for (i in 1:length(x1.point)) {
# lines(c(x1.point[i], x1.point[i]), c(0, y1.point[i]), lwd = 3, cex = 0.9, col = colors()[555]) # shading
# }
#
# for (i in 1:length(x2.point)) {
# lines(c(x2.point[i], x2.point[i]), c(0, y2.point[i]), lwd = 3, cex = 0.9, col = colors()[29]) # shading
# }
points(x, y1, type = "l", lwd = 2, col = colors()[555])
points(x, y2, type = "l", lwd = 2, col = colors()[29])
for (i in 1:Ng) {
col <- ifelse(Obs.ES.norm[i] > 0, 2, 3)
lines(c(Obs.ES.norm[i], Obs.ES.norm[i]), c(-0.2*max(c(y1, y2)), 0), lwd = 1, lty = 1, col = 1)
}
leg.txt <- paste("Neg. ES: \"", phen2, " \" ", sep="", collapse="")
text(x=x.plot.range[1], y=-0.25*max(c(y1, y2)), adj = c(0, 1), labels=leg.txt, cex = 0.9)
leg.txt <- paste(" Pos. ES: \"", phen1, "\" ", sep="", collapse="")
text(x=x.plot.range[2], y=-0.25*max(c(y1, y2)), adj = c(1, 1), labels=leg.txt, cex = 0.9)
leg.txt <- c("Null Density", "Observed Density", "Observed NES values")
c.vec <- c(colors()[555], colors()[29], 1)
lty.vec <- c(1, 1, 1)
lwd.vec <- c(2, 2, 2)
legend(x=0, y=1.5*y.plot.range[2], bty="n", bg = "white", legend=leg.txt, lty = lty.vec, lwd = lwd.vec, col = c.vec, cex = 0.9)
B <- A[obs.index,]
if (N > 300) {
C <- rbind(B[1:100,], rep(0, Ns), rep(0, Ns), B[(floor(N/2) - 50 + 1):(floor(N/2) + 50),], rep(0, Ns), rep(0, Ns), B[(N - 100 + 1):N,])
}
rm(B)
GSEA.HeatMapPlot(V = C, col.labels = class.labels, col.classes = class.phen, main = "Heat Map for Genes in Dataset")
# p-vals plot
nom.p.vals <- p.vals[Obs.ES.index,1]
FWER.p.vals <- p.vals[Obs.ES.index,2]
plot.range <- 1.25*range(NES)
plot(NES, FDR.mean, ylim = c(0, 1), xlim = plot.range, col = 1, bg = 1, type="p", pch = 22, cex = 0.75, xlab = "NES", main = "p-values vs. NES", ylab ="p-val/q-val")
points(NES, nom.p.vals, type = "p", col = 2, bg = 2, pch = 22, cex = 0.75)
points(NES, FWER.p.vals, type = "p", col = colors()[577], bg = colors()[577], pch = 22, cex = 0.75)
leg.txt <- c("Nominal p-value", "FWER p-value", "FDR q-value")
c.vec <- c(2, colors()[577], 1)
pch.vec <- c(22, 22, 22)
legend(x=-0.5, y=0.5, bty="n", bg = "white", legend=leg.txt, pch = pch.vec, col = c.vec, pt.bg = c.vec, cex = 0.9)
lines(c(min(NES), max(NES)), c(nom.p.val.threshold, nom.p.val.threshold), lwd = 1, lty = 2, col = 2)
lines(c(min(NES), max(NES)), c(fwer.p.val.threshold, fwer.p.val.threshold), lwd = 1, lty = 2, col = colors()[577])
lines(c(min(NES), max(NES)), c(fdr.q.val.threshold, fdr.q.val.threshold), lwd = 1, lty = 2, col = 1)
if (non.interactive.run == F) {
if (.Platform$OS.type == "windows") {
savePlot(filename = glob.filename, type ="jpeg", device = dev.cur())
} else if (.Platform$OS.type == "unix") {
dev.off()
}
} else {
dev.off()
}
} # if Ng > 1
#----------------------------------------------------------------------------
# Produce report for each gene set passing the nominal, FWER or FDR test or the top topgs in each side
if (topgs > floor(Ng/2)) {
topgs <- floor(Ng/2)
}
for (i in 1:Ng) {
if ((p.vals[i, 1] <= nom.p.val.threshold) ||
(p.vals[i, 2] <= fwer.p.val.threshold) ||
(FDR.mean.sorted[i] <= fdr.q.val.threshold) ||
(is.element(i, c(Obs.ES.index[1:topgs], Obs.ES.index[(Ng - topgs + 1): Ng])))) {
# produce report per gene set
kk <- 1
gene.number <- vector(length = size.G[i], mode = "character")
gene.names <- vector(length = size.G[i], mode = "character")
gene.symbols <- vector(length = size.G[i], mode = "character")
gene.descs <- vector(length = size.G[i], mode = "character")
gene.list.loc <- vector(length = size.G[i], mode = "numeric")
core.enrichment <- vector(length = size.G[i], mode = "character")
gene.s2n <- vector(length = size.G[i], mode = "numeric")
gene.RES <- vector(length = size.G[i], mode = "numeric")
rank.list <- seq(1, N)
if (Obs.ES[i] >= 0) {
set.k <- seq(1, N, 1)
phen.tag <- phen1
loc <- match(i, Obs.ES.index)
} else {
set.k <- seq(N, 1, -1)
phen.tag <- phen2
loc <- Ng - match(i, Obs.ES.index) + 1
}
for (k in set.k) {
if (Obs.indicator[i, k] == 1) {
gene.number[kk] <- kk
gene.names[kk] <- obs.gene.labels[k]
gene.symbols[kk] <- substr(obs.gene.symbols[k], 1, 15)
gene.descs[kk] <- substr(obs.gene.descs[k], 1, 40)
gene.list.loc[kk] <- k
gene.s2n[kk] <- signif(obs.s2n[k], digits=3)
gene.RES[kk] <- signif(Obs.RES[i, k], digits = 3)
if (Obs.ES[i] >= 0) {
core.enrichment[kk] <- ifelse(gene.list.loc[kk] <= Obs.arg.ES[i], "YES", "NO")
} else {
core.enrichment[kk] <- ifelse(gene.list.loc[kk] > Obs.arg.ES[i], "YES", "NO")
}
kk <- kk + 1
}
}
gene.report <- data.frame(cbind(gene.number, gene.names, gene.symbols, gene.descs, gene.list.loc, gene.s2n, gene.RES, core.enrichment))
names(gene.report) <- c("#", "GENE", "SYMBOL", "DESC", "LIST LOC", "S2N", "RES", "CORE_ENRICHMENT")
# print(gene.report)
if (output.directory != "") {
filename <- paste(output.directory, doc.string, ".", gs.names[i], ".report.", phen.tag, ".", loc, ".txt", sep="", collapse="")
write.table(gene.report, file = filename, quote=F, row.names=F, sep = "\t")
if (non.interactive.run == F) {
if (.Platform$OS.type == "windows") {
gs.filename <- paste(output.directory, doc.string, ".", gs.names[i], ".plot.", phen.tag, ".", loc, sep="", collapse="")
windows(width = 14, height = 6)
} else if (.Platform$OS.type == "unix") {
gs.filename <- paste(output.directory, doc.string, ".", gs.names[i], ".plot.", phen.tag, ".", loc, ".pdf", sep="", collapse="")
pdf(file=gs.filename, height = 6, width = 14)
}
} else {
if (.Platform$OS.type == "unix") {
gs.filename <- paste(output.directory, doc.string, ".", gs.names[i], ".plot.", phen.tag, ".", loc, ".pdf", sep="", collapse="")
pdf(file=gs.filename, height = 6, width = 14)
} else if (.Platform$OS.type == "windows") {
gs.filename <- paste(output.directory, doc.string, ".", gs.names[i], ".plot.", phen.tag, ".", loc, ".pdf", sep="", collapse="")
pdf(file=gs.filename, height = 6, width = 14)
}
}
}
# nf <- layout(matrix(c(1,2,3), 1, 3, byrow=T), 1, c(1, 1, 1), TRUE)
nf <- layout(matrix(c(1,0,2), 1, 3, byrow=T), widths=c(1,0,1), heights=c(1,0,1))
ind <- 1:N
min.RES <- min(Obs.RES[i,])
max.RES <- max(Obs.RES[i,])
if (max.RES < 0.3) max.RES <- 0.3
if (min.RES > -0.3) min.RES <- -0.3
delta <- (max.RES - min.RES)*0.50
min.plot <- min.RES - 2*delta
max.plot <- max.RES
max.corr <- max(obs.s2n)
min.corr <- min(obs.s2n)
Obs.correl.vector.norm <- (obs.s2n - min.corr)/(max.corr - min.corr)*1.25*delta + min.plot
zero.corr.line <- (- min.corr/(max.corr - min.corr))*1.25*delta + min.plot
col <- ifelse(Obs.ES[i] > 0, 2, 4)
# Running enrichment plot
# sub.string <- paste("Number of genes: ", N, " (in list), ", size.G[i], " (in gene set)", sep = "", collapse="")
sub.string <- paste("ES =", signif(Obs.ES[i], digits = 3), " NES =", signif(Obs.ES.norm[i], digits=3), "Nom. p-val=", signif(p.vals[i, 1], digits = 3),"FWER=", signif(p.vals[i, 2], digits = 3), "FDR=", signif(FDR.mean.sorted[i], digits = 3))
# main.string <- paste("Gene Set ", i, ":", gs.names[i])
main.string <- paste("Gene Set:", gs.names[i])
# plot(ind, Obs.RES[i,], main = main.string, sub = sub.string, xlab = "Gene List Index", ylab = "Running Enrichment Score (RES)", xlim=c(1, N), ylim=c(min.plot, max.plot), type = "l", lwd = 2, cex = 1, col = col)
plot(ind, Obs.RES[i,], main = main.string, xlab = sub.string, ylab = "Running Enrichment Score (RES)", xlim=c(1, N), ylim=c(min.plot, max.plot), type = "l", lwd = 2, cex = 1, col = col)
for (j in seq(1, N, 20)) {
lines(c(j, j), c(zero.corr.line, Obs.correl.vector.norm[j]), lwd = 1, cex = 1, col = colors()[12]) # shading of correlation plot
}
lines(c(1, N), c(0, 0), lwd = 1, lty = 2, cex = 1, col = 1) # zero RES line
lines(c(Obs.arg.ES[i], Obs.arg.ES[i]), c(min.plot, max.plot), lwd = 1, lty = 3, cex = 1, col = col) # max enrichment vertical line
for (j in 1:N) {
if (Obs.indicator[i, j] == 1) {
lines(c(j, j), c(min.plot + 1.25*delta, min.plot + 1.75*delta), lwd = 1, lty = 1, cex = 1, col = 1) # enrichment tags
}
}
lines(ind, Obs.correl.vector.norm, type = "l", lwd = 1, cex = 1, col = 1)
lines(c(1, N), c(zero.corr.line, zero.corr.line), lwd = 1, lty = 1, cex = 1, col = 1) # zero correlation horizontal line
temp <- order(abs(obs.s2n), decreasing=T)
arg.correl <- temp[N]
lines(c(arg.correl, arg.correl), c(min.plot, max.plot), lwd = 1, lty = 3, cex = 1, col = 3) # zero crossing correlation vertical line
leg.txt <- paste("\"", phen1, "\" ", sep="", collapse="")
text(x=1, y=min.plot, adj = c(0, 0), labels=leg.txt, cex = 1.0)
leg.txt <- paste("\"", phen2, "\" ", sep="", collapse="")
text(x=N, y=min.plot, adj = c(1, 0), labels=leg.txt, cex = 1.0)
adjx <- ifelse(Obs.ES[i] > 0, 0, 1)
leg.txt <- paste("Peak at ", Obs.arg.ES[i], sep="", collapse="")
text(x=Obs.arg.ES[i], y=min.plot + 1.8*delta, adj = c(adjx, 0), labels=leg.txt, cex = 1.0)
leg.txt <- paste("Zero crossing at ", arg.correl, sep="", collapse="")
text(x=arg.correl, y=min.plot + 1.95*delta, adj = c(adjx, 0), labels=leg.txt, cex = 1.0)
# nominal p-val histogram
# sub.string <- paste("ES =", signif(Obs.ES[i], digits = 3), " NES =", signif(Obs.ES.norm[i], digits=3), "Nom. p-val=", signif(p.vals[i, 1], digits = 3),"FWER=", signif(p.vals[i, 2], digits = 3), "FDR=", signif(FDR.mean.sorted[i], digits = 3))
temp <- density(phi[i,], adjust=adjust.param)
x.plot.range <- range(temp$x)
y.plot.range <- c(-0.125*max(temp$y), 1.5*max(temp$y))
# plot(temp$x, temp$y, type = "l", sub = sub.string, xlim = x.plot.range, ylim = y.plot.range, lwd = 2, col = 2, main = "Gene Set Null Distribution", xlab = "ES", ylab="P(ES)")
x.loc <- which.min(abs(temp$x - Obs.ES[i]))
# lines(c(Obs.ES[i], Obs.ES[i]), c(0, temp$y[x.loc]), lwd = 2, lty = 1, cex = 1, col = 1)
# lines(x.plot.range, c(0, 0), lwd = 1, lty = 1, cex = 1, col = 1)
leg.txt <- c("Gene Set Null Density", "Observed Gene Set ES value")
c.vec <- c(2, 1)
lty.vec <- c(1, 1)
lwd.vec <- c(2, 2)
# legend(x=-0.2, y=y.plot.range[2], bty="n", bg = "white", legend=leg.txt, lty = lty.vec, lwd = lwd.vec, col = c.vec, cex = 1.0)
leg.txt <- paste("Neg. ES \"", phen2, "\" ", sep="", collapse="")
# text(x=x.plot.range[1], y=-0.1*max(temp$y), adj = c(0, 0), labels=leg.txt, cex = 1.0)
leg.txt <- paste(" Pos. ES: \"", phen1, "\" ", sep="", collapse="")
# text(x=x.plot.range[2], y=-0.1*max(temp$y), adj = c(1, 0), labels=leg.txt, cex = 1.0)
# create pinkogram for each gene set
kk <- 1
pinko <- matrix(0, nrow = size.G[i], ncol = cols)
pinko.gene.names <- vector(length = size.G[i], mode = "character")
for (k in 1:rows) {
if (Obs.indicator[i, k] == 1) {
pinko[kk,] <- A[obs.index[k],]
pinko.gene.names[kk] <- obs.gene.symbols[k]
kk <- kk + 1
}
}
GSEA.HeatMapPlot(V = pinko, row.names = pinko.gene.names, col.labels = class.labels, col.classes = class.phen, col.names = sample.names, main =" Heat Map for Genes in Gene Set", xlab=" ", ylab=" ")
if (non.interactive.run == F) {
if (.Platform$OS.type == "windows") {
savePlot(filename = gs.filename, type ="jpeg", device = dev.cur())
} else if (.Platform$OS.type == "unix") {
dev.off()
}
} else {
dev.off()
}
} # if p.vals thres
} # loop over gene sets
return(list(report1 = report.phen1, report2 = report.phen2))
} # end of definition of GSEA.analysis
GSEA.write.gct <- function (gct, filename)
{
f <- file(filename, "w")
cat("#1.2", "\n", file = f, append = TRUE, sep = "")
cat(dim(gct)[1], "\t", dim(gct)[2], "\n", file = f, append = TRUE, sep = "")
cat("Name", "\t", file = f, append = TRUE, sep = "")
cat("Description", file = f, append = TRUE, sep = "")
names <- names(gct)
cat("\t", names[1], file = f, append = TRUE, sep = "")
for (j in 2:length(names)) {
cat("\t", names[j], file = f, append = TRUE, sep = "")
}
cat("\n", file = f, append = TRUE, sep = "\t")
oldWarn <- options(warn = -1)
m <- matrix(nrow = dim(gct)[1], ncol = dim(gct)[2] + 2)
m[, 1] <- row.names(gct)
m[, 2] <- row.names(gct)
index <- 3
for (i in 1:dim(gct)[2]) {
m[, index] <- gct[, i]
index <- index + 1
}
write.table(m, file = f, append = TRUE, quote = FALSE, sep = "\t", eol = "\n", col.names = FALSE, row.names = FALSE)
close(f)
options(warn = 0)
return(gct)
}
GSEA.ConsPlot <- function(V, col.names, main = " ", sub = " ", xlab=" ", ylab=" ") {
# Plots a heatmap plot of a consensus matrix
cols <- length(V[1,])
B <- matrix(0, nrow=cols, ncol=cols)
max.val <- max(V)
min.val <- min(V)
for (i in 1:cols) {
for (j in 1:cols) {
k <- cols - i + 1
B[k, j] <- max.val - V[i, j] + min.val
}
}
# col.map <- c(rainbow(100, s = 1.0, v = 0.75, start = 0.0, end = 0.75, gamma = 1.5), "#BBBBBB", "#333333", "#FFFFFF")
col.map <- rev(c("#0000FF", "#4040FF", "#7070FF", "#8888FF", "#A9A9FF", "#D5D5FF", "#EEE5EE", "#FFAADA", "#FF9DB0", "#FF7080", "#FF5A5A", "#FF4040", "#FF0D1D"))
# max.size <- max(nchar(col.names))
par(mar = c(5, 15, 15, 5))
image(1:cols, 1:cols, t(B), col = col.map, axes=FALSE, main=main, sub=sub, xlab= xlab, ylab=ylab)
for (i in 1:cols) {
col.names[i] <- substr(col.names[i], 1, 25)
}
col.names2 <- rev(col.names)
size.col.char <- ifelse(cols < 15, 1, sqrt(15/cols))
axis(2, at=1:cols, labels=col.names2, adj= 0.5, tick=FALSE, las = 1, cex.axis=size.col.char, font.axis=1, line=-1)
axis(3, at=1:cols, labels=col.names, adj= 1, tick=FALSE, las = 3, cex.axis=size.col.char, font.axis=1, line=-1)
return()
}
GSEA.HeatMapPlot2 <- function(V, row.names = "NA", col.names = "NA", main = " ", sub = " ", xlab=" ", ylab=" ", color.map = "default") {
#
# Plots a heatmap of a matrix
n.rows <- length(V[,1])
n.cols <- length(V[1,])
if (color.map == "default") {
color.map <- rev(rainbow(100, s = 1.0, v = 0.75, start = 0.0, end = 0.75, gamma = 1.5))
}
heatm <- matrix(0, nrow = n.rows, ncol = n.cols)
heatm[1:n.rows,] <- V[seq(n.rows, 1, -1),]
par(mar = c(7, 15, 5, 5))
image(1:n.cols, 1:n.rows, t(heatm), col=color.map, axes=FALSE, main=main, sub = sub, xlab= xlab, ylab=ylab)
if (length(row.names) > 1) {
size.row.char <- ifelse(n.rows < 15, 1, sqrt(15/n.rows))
size.col.char <- ifelse(n.cols < 15, 1, sqrt(10/n.cols))
# size.col.char <- ifelse(n.cols < 2.5, 1, sqrt(2.5/n.cols))
for (i in 1:n.rows) {
row.names[i] <- substr(row.names[i], 1, 40)
}
row.names <- row.names[seq(n.rows, 1, -1)]
axis(2, at=1:n.rows, labels=row.names, adj= 0.5, tick=FALSE, las = 1, cex.axis=size.row.char, font.axis=1, line=-1)
}
if (length(col.names) > 1) {
axis(1, at=1:n.cols, labels=col.names, tick=FALSE, las = 3, cex.axis=size.col.char, font.axis=2, line=-1)
}
return()
}
GSEA.Analyze.Sets <- function(
directory,
topgs = "",
non.interactive.run = F,
height = 12,
width = 17) {
file.list <- list.files(directory)
files <- file.list[regexpr(pattern = ".report.", file.list) > 1]
max.sets <- length(files)
set.table <- matrix(nrow = max.sets, ncol = 5)
for (i in 1:max.sets) {
temp1 <- strsplit(files[i], split=".report.")
temp2 <- strsplit(temp1[[1]][1], split=".")
s <- length(temp2[[1]])
prefix.name <- paste(temp2[[1]][1:(s-1)], sep="", collapse="")
set.name <- temp2[[1]][s]
temp3 <- strsplit(temp1[[1]][2], split=".")
phenotype <- temp3[[1]][1]
seq.number <- temp3[[1]][2]
dataset <- paste(temp2[[1]][1:(s-1)], sep="", collapse=".")
set.table[i, 1] <- files[i]
set.table[i, 3] <- phenotype
set.table[i, 4] <- as.numeric(seq.number)
set.table[i, 5] <- dataset
# set.table[i, 2] <- paste(set.name, dataset, sep ="", collapse="")
set.table[i, 2] <- substr(set.name, 1, 20)
}
print(c("set name=", prefix.name))
doc.string <- prefix.name
set.table <- noquote(set.table)
phen.order <- order(set.table[, 3], decreasing = T)
set.table <- set.table[phen.order,]
phen1 <- names(table(set.table[,3]))[1]
phen2 <- names(table(set.table[,3]))[2]
set.table.phen1 <- set.table[set.table[,3] == phen1,]
set.table.phen2 <- set.table[set.table[,3] == phen2,]
seq.order <- order(as.numeric(set.table.phen1[, 4]), decreasing = F)
set.table.phen1 <- set.table.phen1[seq.order,]
seq.order <- order(as.numeric(set.table.phen2[, 4]), decreasing = F)
set.table.phen2 <- set.table.phen2[seq.order,]
# max.sets.phen1 <- length(set.table.phen1[,1])
# max.sets.phen2 <- length(set.table.phen2[,1])
if (topgs == "") {
max.sets.phen1 <- length(set.table.phen1[,1])
max.sets.phen2 <- length(set.table.phen2[,1])
} else {
max.sets.phen1 <- ifelse(topgs > length(set.table.phen1[,1]), length(set.table.phen1[,1]), topgs)
max.sets.phen2 <- ifelse(topgs > length(set.table.phen2[,1]), length(set.table.phen2[,1]), topgs)
}
# Analysis for phen1
leading.lists <- NULL
for (i in 1:max.sets.phen1) {
inputfile <- paste(directory, set.table.phen1[i, 1], sep="", collapse="")
gene.set <- read.table(file=inputfile, sep="\t", header=T, comment.char="", as.is=T)
leading.set <- as.vector(gene.set[gene.set[,"CORE_ENRICHMENT"] == "YES", "SYMBOL"])
leading.lists <- c(leading.lists, list(leading.set))
if (i == 1) {
all.leading.genes <- leading.set
} else{
all.leading.genes <- union(all.leading.genes, leading.set)
}
}
max.genes <- length(all.leading.genes)
M <- matrix(0, nrow=max.sets.phen1, ncol=max.genes)
for (i in 1:max.sets.phen1) {
M[i,] <- sign(match(all.leading.genes, as.vector(leading.lists[[i]]), nomatch=0)) # notice that the sign is 0 (no tag) or 1 (tag)
}
Inter <- matrix(0, nrow=max.sets.phen1, ncol=max.sets.phen1)
for (i in 1:max.sets.phen1) {
for (j in 1:max.sets.phen1) {
Inter[i, j] <- length(intersect(leading.lists[[i]], leading.lists[[j]]))/length(union(leading.lists[[i]], leading.lists[[j]]))
}
}
Itable <- data.frame(Inter)
names(Itable) <- set.table.phen1[1:max.sets.phen1, 2]
row.names(Itable) <- set.table.phen1[1:max.sets.phen1, 2]
if (non.interactive.run == F) {
if (.Platform$OS.type == "windows") {
filename <- paste(directory, doc.string, ".leading.overlap.", phen1, sep="", collapse="")
windows(height = width, width = width)
} else if (.Platform$OS.type == "unix") {
filename <- paste(directory, doc.string, ".leading.overlap.", phen1, ".pdf", sep="", collapse="")
pdf(file=filename, height = width, width = width)
}
} else {
if (.Platform$OS.type == "unix") {
filename <- paste(directory, doc.string, ".leading.overlap.", phen1, ".pdf", sep="", collapse="")
pdf(file=filename, height = width, width = width)
} else if (.Platform$OS.type == "windows") {
filename <- paste(directory, doc.string, ".leading.overlap.", phen1, ".pdf", sep="", collapse="")
pdf(file=filename, height = width, width = width)
}
}
GSEA.ConsPlot(Itable, col.names = set.table.phen1[1:max.sets.phen1, 2], main = " ", sub=paste("Leading Subsets Overlap ", doc.string, " - ", phen1, sep=""), xlab=" ", ylab=" ")
if (non.interactive.run == F) {
if (.Platform$OS.type == "windows") {
savePlot(filename = filename, type ="jpeg", device = dev.cur())
} else if (.Platform$OS.type == "unix") {
dev.off()
}
} else {
dev.off()
}
# Save leading subsets in a GCT file
D.phen1 <- data.frame(M)
names(D.phen1) <- all.leading.genes
row.names(D.phen1) <- set.table.phen1[1:max.sets.phen1, 2]
output <- paste(directory, doc.string, ".leading.genes.", phen1, ".gct", sep="")
GSEA.write.gct(D.phen1, filename=output)
# Save leading subsets as a single gene set in a .gmt file
row.header <- paste(doc.string, ".all.leading.genes.", phen1, sep="")
output.line <- paste(all.leading.genes, sep="\t", collapse="\t")
output.line <- paste(row.header, row.header, output.line, sep="\t", collapse="")
output <- paste(directory, doc.string, ".all.leading.genes.", phen1, ".gmt", sep="")
write(noquote(output.line), file = output, ncolumns = length(output.line))
if (non.interactive.run == F) {
if (.Platform$OS.type == "windows") {
filename <- paste(directory, doc.string, ".leading.assignment.", phen1, sep="", collapse="")
windows(height = height, width = width)
} else if (.Platform$OS.type == "unix") {
filename <- paste(directory, doc.string, ".leading.assignment.", phen1, ".pdf", sep="", collapse="")
pdf(file=filename, height = height, width = width)
}
} else {
if (.Platform$OS.type == "unix") {
filename <- paste(directory, doc.string, ".leading.assignment.", phen1, ".pdf", sep="", collapse="")
pdf(file=filename, height = height, width = width)
} else if (.Platform$OS.type == "windows") {
filename <- paste(directory, doc.string, ".leading.assignment.", phen1, ".pdf", sep="", collapse="")
pdf(file=filename, height = height, width = width)
}
}
cmap <- c("#AAAAFF", "#111166")
GSEA.HeatMapPlot2(V = data.matrix(D.phen1), row.names = row.names(D.phen1), col.names = names(D.phen1), main = "Leading Subsets Assignment", sub = paste(doc.string, " - ", phen1, sep=""), xlab=" ", ylab=" ", color.map = cmap)
if (non.interactive.run == F) {
if (.Platform$OS.type == "windows") {
savePlot(filename = filename, type ="jpeg", device = dev.cur())
} else if (.Platform$OS.type == "unix") {
dev.off()
}
} else {
dev.off()
}
DT1.phen1 <- data.matrix(t(D.phen1))
DT2.phen1 <- data.frame(DT1.phen1)
names(DT2.phen1) <- set.table.phen1[1:max.sets.phen1, 2]
row.names(DT2.phen1) <- all.leading.genes
# GSEA.write.gct(DT2.phen1, filename=outputfile2.phen1)
# Analysis for phen2
leading.lists <- NULL
for (i in 1:max.sets.phen2) {
inputfile <- paste(directory, set.table.phen2[i, 1], sep="", collapse="")
gene.set <- read.table(file=inputfile, sep="\t", header=T, comment.char="", as.is=T)
leading.set <- as.vector(gene.set[gene.set[,"CORE_ENRICHMENT"] == "YES", "SYMBOL"])
leading.lists <- c(leading.lists, list(leading.set))
if (i == 1) {
all.leading.genes <- leading.set
} else{
all.leading.genes <- union(all.leading.genes, leading.set)
}
}
max.genes <- length(all.leading.genes)
M <- matrix(0, nrow=max.sets.phen2, ncol=max.genes)
for (i in 1:max.sets.phen2) {
M[i,] <- sign(match(all.leading.genes, as.vector(leading.lists[[i]]), nomatch=0)) # notice that the sign is 0 (no tag) or 1 (tag)
}
Inter <- matrix(0, nrow=max.sets.phen2, ncol=max.sets.phen2)
for (i in 1:max.sets.phen2) {
for (j in 1:max.sets.phen2) {
Inter[i, j] <- length(intersect(leading.lists[[i]], leading.lists[[j]]))/length(union(leading.lists[[i]], leading.lists[[j]]))
}
}
Itable <- data.frame(Inter)
names(Itable) <- set.table.phen2[1:max.sets.phen2, 2]
row.names(Itable) <- set.table.phen2[1:max.sets.phen2, 2]
if (non.interactive.run == F) {
if (.Platform$OS.type == "windows") {
filename <- paste(directory, doc.string, ".leading.overlap.", phen2, sep="", collapse="")
windows(height = width, width = width)
} else if (.Platform$OS.type == "unix") {
filename <- paste(directory, doc.string, ".leading.overlap.", phen2, ".pdf", sep="", collapse="")
pdf(file=filename, height = width, width = width)
}
} else {
if (.Platform$OS.type == "unix") {
filename <- paste(directory, doc.string, ".leading.overlap.", phen2, ".pdf", sep="", collapse="")
pdf(file=filename, height = width, width = width)
} else if (.Platform$OS.type == "windows") {
filename <- paste(directory, doc.string, ".leading.overlap.", phen2, ".pdf", sep="", collapse="")
pdf(file=filename, height = width, width = width)
}
}
GSEA.ConsPlot(Itable, col.names = set.table.phen2[1:max.sets.phen2, 2], main = " ", sub=paste("Leading Subsets Overlap ", doc.string, " - ", phen2, sep=""), xlab=" ", ylab=" ")
if (non.interactive.run == F) {
if (.Platform$OS.type == "windows") {
savePlot(filename = filename, type ="jpeg", device = dev.cur())
} else if (.Platform$OS.type == "unix") {
dev.off()
}
} else {
dev.off()
}
# Save leading subsets in a GCT file
D.phen2 <- data.frame(M)
names(D.phen2) <- all.leading.genes
row.names(D.phen2) <- set.table.phen2[1:max.sets.phen2, 2]
output <- paste(directory, doc.string, ".leading.genes.", phen2, ".gct", sep="")
GSEA.write.gct(D.phen2, filename=output)
# Save primary subsets as a single gene set in a .gmt file
row.header <- paste(doc.string, ".all.leading.genes.", phen2, sep="")
output.line <- paste(all.leading.genes, sep="\t", collapse="\t")
output.line <- paste(row.header, row.header, output.line, sep="\t", collapse="")
output <- paste(directory, doc.string, ".all.leading.genes.", phen2, ".gmt", sep="")
write(noquote(output.line), file = output, ncolumns = length(output.line))
if (non.interactive.run == F) {
if (.Platform$OS.type == "windows") {
filename <- paste(directory, doc.string, ".leading.assignment.", phen2, sep="", collapse="")
windows(height = height, width = width)
} else if (.Platform$OS.type == "unix") {
filename <- paste(directory, doc.string, ".leading.assignment.", phen2, ".pdf", sep="", collapse="")
pdf(file=filename, height = height, width = width)
}
} else {
if (.Platform$OS.type == "unix") {
filename <- paste(directory, doc.string, ".leading.assignment.", phen2, ".pdf", sep="", collapse="")
pdf(file=filename, height = height, width = width)
} else if (.Platform$OS.type == "windows") {
filename <- paste(directory, doc.string, ".leading.assignment.", phen2, ".pdf", sep="", collapse="")
pdf(file=filename, height = height, width = width)
}
}
cmap <- c("#AAAAFF", "#111166")
GSEA.HeatMapPlot2(V = data.matrix(D.phen2), row.names = row.names(D.phen2), col.names = names(D.phen2), main = "Leading Subsets Assignment", sub = paste(doc.string, " - ", phen2, sep=""), xlab=" ", ylab=" ", color.map = cmap)
if (non.interactive.run == F) {
if (.Platform$OS.type == "windows") {
savePlot(filename = filename, type ="jpeg", device = dev.cur())
} else if (.Platform$OS.type == "unix") {
dev.off()
}
} else {
dev.off()
}
DT1.phen2 <- data.matrix(t(D.phen2))
DT2.phen2 <- data.frame(DT1.phen2)
names(DT2.phen2) <- set.table.phen2[1:max.sets.phen2, 2]
row.names(DT2.phen2) <- all.leading.genes
# GSEA.write.gct(DT2.phen2, filename=outputfile2.phen2)
# Resort columns and rows for phen1
A <- data.matrix(D.phen1)
A.row.names <- row.names(D.phen1)
A.names <- names(D.phen1)
# Max.genes
# init <- 1
# for (k in 1:max.sets.phen1) {
# end <- which.max(cumsum(A[k,]))
# if (end - init > 1) {
# B <- A[,init:end]
# B.names <- A.names[init:end]
# dist.matrix <- dist(t(B))
# HC <- hclust(dist.matrix, method="average")
## B <- B[,HC$order] + 0.2*(k %% 2)
# B <- B[,HC$order]
# A[,init:end] <- B
# A.names[init:end] <- B.names[HC$order]
# init <- end + 1
# }
# }
# windows(width=14, height=10)
# GSEA.HeatMapPlot2(V = A, row.names = A.row.names, col.names = A.names, sub = " ", main = paste("Primary Sets Assignment - ", doc.string, " - ", phen1, sep=""), xlab=" ", ylab=" ")
dist.matrix <- dist(t(A))
HC <- hclust(dist.matrix, method="average")
A <- A[, HC$order]
A.names <- A.names[HC$order]
dist.matrix <- dist(A)
HC <- hclust(dist.matrix, method="average")
A <- A[HC$order,]
A.row.names <- A.row.names[HC$order]
if (non.interactive.run == F) {
if (.Platform$OS.type == "windows") {
filename <- paste(directory, doc.string, ".leading.assignment.clustered.", phen1, sep="", collapse="")
windows(height = height, width = width)
} else if (.Platform$OS.type == "unix") {
filename <- paste(directory, doc.string, ".leading.assignment.clustered.", phen1, ".pdf", sep="", collapse="")
pdf(file=filename, height = height, width = width)
}
} else {
if (.Platform$OS.type == "unix") {
filename <- paste(directory, doc.string, ".leading.assignment.clustered.", phen1, ".pdf", sep="", collapse="")
pdf(file=filename, height = height, width = width)
} else if (.Platform$OS.type == "windows") {
filename <- paste(directory, doc.string, ".leading.assignment.clustered.", phen1, ".pdf", sep="", collapse="")
pdf(file=filename, height = height, width = width)
}
}
cmap <- c("#AAAAFF", "#111166")
# GSEA.HeatMapPlot2(V = A, row.names = A.row.names, col.names = A.names, main = "Leading Subsets Assignment (clustered)", sub = paste(doc.string, " - ", phen1, sep=""), xlab=" ", ylab=" ", color.map = cmap)
GSEA.HeatMapPlot2(V = t(A), row.names = A.names, col.names = A.row.names, main = "Leading Subsets Assignment (clustered)", sub = paste(doc.string, " - ", phen1, sep=""), xlab=" ", ylab=" ", color.map = cmap)
text.filename <- paste(directory, doc.string, ".leading.assignment.clustered.", phen1, ".txt", sep="", collapse="")
line.list <- c("Gene", A.row.names)
line.header <- paste(line.list, collapse="\t")
line.length <- length(A.row.names) + 1
write(line.header, file = text.filename, ncolumns = line.length)
write.table(t(A), file=text.filename, append = T, quote=F, col.names= F, row.names=T, sep = "\t")
if (non.interactive.run == F) {
if (.Platform$OS.type == "windows") {
savePlot(filename = filename, type ="jpeg", device = dev.cur())
} else if (.Platform$OS.type == "unix") {
dev.off()
}
} else {
dev.off()
}
# resort columns and rows for phen2
A <- data.matrix(D.phen2)
A.row.names <- row.names(D.phen2)
A.names <- names(D.phen2)
# Max.genes
# init <- 1
# for (k in 1:max.sets.phen2) {
# end <- which.max(cumsum(A[k,]))
# if (end - init > 1) {
# B <- A[,init:end]
# B.names <- A.names[init:end]
# dist.matrix <- dist(t(B))
# HC <- hclust(dist.matrix, method="average")
## B <- B[,HC$order] + 0.2*(k %% 2)
# B <- B[,HC$order]
# A[,init:end] <- B
# A.names[init:end] <- B.names[HC$order]
# init <- end + 1
# }
# }
# windows(width=14, height=10)
# GESA.HeatMapPlot2(V = A, row.names = A.row.names, col.names = A.names, sub = " ", main = paste("Primary Sets Assignment - ", doc.string, " - ", phen2, sep=""), xlab=" ", ylab=" ")
dist.matrix <- dist(t(A))
HC <- hclust(dist.matrix, method="average")
A <- A[, HC$order]
A.names <- A.names[HC$order]
dist.matrix <- dist(A)
HC <- hclust(dist.matrix, method="average")
A <- A[HC$order,]
A.row.names <- A.row.names[HC$order]
if (non.interactive.run == F) {
if (.Platform$OS.type == "windows") {
filename <- paste(directory, doc.string, ".leading.assignment.clustered.", phen2, sep="", collapse="")
windows(height = height, width = width)
} else if (.Platform$OS.type == "unix") {
filename <- paste(directory, doc.string, ".leading.assignment.clustered.", phen2, ".pdf", sep="", collapse="")
pdf(file=filename, height = height, width = width)
}
} else {
if (.Platform$OS.type == "unix") {
filename <- paste(directory, doc.string, ".leading.assignment.clustered.", phen2, ".pdf", sep="", collapse="")
pdf(file=filename, height = height, width = width)
} else if (.Platform$OS.type == "windows") {
filename <- paste(directory, doc.string, ".leading.assignment.clustered.", phen2, ".pdf", sep="", collapse="")
pdf(file=filename, height = height, width = width)
}
}
cmap <- c("#AAAAFF", "#111166")
# GSEA.HeatMapPlot2(V = A, row.names = A.row.names, col.names = A.names, main = "Leading Subsets Assignment (clustered)", sub = paste(doc.string, " - ", phen2, sep=""), xlab=" ", ylab=" ", color.map = cmap)
GSEA.HeatMapPlot2(V = t(A), row.names =A.names , col.names = A.row.names, main = "Leading Subsets Assignment (clustered)", sub = paste(doc.string, " - ", phen2, sep=""), xlab=" ", ylab=" ", color.map = cmap)
text.filename <- paste(directory, doc.string, ".leading.assignment.clustered.", phen2, ".txt", sep="", collapse="")
line.list <- c("Gene", A.row.names)
line.header <- paste(line.list, collapse="\t")
line.length <- length(A.row.names) + 1
write(line.header, file = text.filename, ncolumns = line.length)
write.table(t(A), file=text.filename, append = T, quote=F, col.names= F, row.names=T, sep = "\t")
if (non.interactive.run == F) {
if (.Platform$OS.type == "windows") {
savePlot(filename = filename, type ="jpeg", device = dev.cur())
} else if (.Platform$OS.type == "unix") {
dev.off()
}
} else {
dev.off()
}
}
| /scripts/GSEA.1.1.R | no_license | yasinkaymaz/Brainformatics | R | false | false | 110,344 | r | # The Broad Institute
# SOFTWARE COPYRIGHT NOTICE AGREEMENT
# This software and its documentation are copyright 2003 by the
# Broad Institute/Massachusetts Institute of Technology.
# All rights are reserved.
#
# This software is supplied without any warranty or guaranteed support
# whatsoever. Neither the Broad Institute nor MIT can be responsible for
# its use, misuse, or functionality.
# G S E A -- Gene Set Enrichment Analysis
# Auxiliary functions and definitions
GSEA.GeneRanking <- function(A, class.labels, gene.labels, nperm, permutation.type = 0, sigma.correction = "GeneCluster", fraction=1.0, replace=F, reverse.sign= F) {
# This function ranks the genes according to the signal to noise ratio for the actual phenotype and also random permutations and bootstrap
# subsamples of both the observed and random phenotypes. It uses matrix operations to implement the signal to noise calculation
# in stages and achieves fast execution speed. It supports two types of permutations: random (unbalanced) and balanced.
# It also supports subsampling and bootstrap by using masking and multiple-count variables. When "fraction" is set to 1 (default)
# the there is no subsampling or boostrapping and the matrix of observed signal to noise ratios will have the same value for
# all permutations. This is wasteful but allows to support all the multiple options with the same code. Notice that the second
# matrix for the null distribution will still have the values for the random permutations
# (null distribution). This mode (fraction = 1.0) is the defaults, the recommended one and the one used in the examples.
# It is also the one that has be tested more thoroughly. The resampling and boostrapping options are intersting to obtain
# smooth estimates of the observed distribution but its is left for the expert user who may want to perform some sanity
# checks before trusting the code.
#
# Inputs:
# A: Matrix of gene expression values (rows are genes, columns are samples)
# class.labels: Phenotype of class disticntion of interest. A vector of binary labels having first the 1's and then the 0's
# gene.labels: gene labels. Vector of probe ids or accession numbers for the rows of the expression matrix
# nperm: Number of random permutations/bootstraps to perform
# permutation.type: Permutation type: 0 = unbalanced, 1 = balanced. For experts only (default: 0)
# sigma.correction: Correction to the signal to noise ratio (Default = GeneCluster, a choice to support the way it was handled in a previous package)
# fraction: Subsampling fraction. Set to 1.0 (no resampling). For experts only (default: 1.0)
# replace: Resampling mode (replacement or not replacement). For experts only (default: F)
# reverse.sign: Reverse direction of gene list (default = F)
#
# Outputs:
# s2n.matrix: Matrix with random permuted or bootstraps signal to noise ratios (rows are genes, columns are permutations or bootstrap subsamplings
# obs.s2n.matrix: Matrix with observed signal to noise ratios (rows are genes, columns are boostraps subsamplings. If fraction is set to 1.0 then all the columns have the same values
# order.matrix: Matrix with the orderings that will sort the columns of the obs.s2n.matrix in decreasing s2n order
# obs.order.matrix: Matrix with the orderings that will sort the columns of the s2n.matrix in decreasing s2n order
#
# The Broad Institute
# SOFTWARE COPYRIGHT NOTICE AGREEMENT
# This software and its documentation are copyright 2003 by the
# Broad Institute/Massachusetts Institute of Technology.
# All rights are reserved.
#
# This software is supplied without any warranty or guaranteed support
# whatsoever. Neither the Broad Institute nor MIT can be responsible for
# its use, misuse, or functionality.
A <- A + 0.00000001
N <- length(A[,1])
Ns <- length(A[1,])
subset.mask <- matrix(0, nrow=Ns, ncol=nperm)
reshuffled.class.labels1 <- matrix(0, nrow=Ns, ncol=nperm)
reshuffled.class.labels2 <- matrix(0, nrow=Ns, ncol=nperm)
class.labels1 <- matrix(0, nrow=Ns, ncol=nperm)
class.labels2 <- matrix(0, nrow=Ns, ncol=nperm)
order.matrix <- matrix(0, nrow = N, ncol = nperm)
obs.order.matrix <- matrix(0, nrow = N, ncol = nperm)
s2n.matrix <- matrix(0, nrow = N, ncol = nperm)
obs.s2n.matrix <- matrix(0, nrow = N, ncol = nperm)
obs.gene.labels <- vector(length = N, mode="character")
obs.gene.descs <- vector(length = N, mode="character")
obs.gene.symbols <- vector(length = N, mode="character")
M1 <- matrix(0, nrow = N, ncol = nperm)
M2 <- matrix(0, nrow = N, ncol = nperm)
S1 <- matrix(0, nrow = N, ncol = nperm)
S2 <- matrix(0, nrow = N, ncol = nperm)
gc()
C <- split(class.labels, class.labels)
class1.size <- length(C[[1]])
class2.size <- length(C[[2]])
class1.index <- seq(1, class1.size, 1)
class2.index <- seq(class1.size + 1, class1.size + class2.size, 1)
for (r in 1:nperm) {
class1.subset <- sample(class1.index, size = ceiling(class1.size*fraction), replace = replace)
class2.subset <- sample(class2.index, size = ceiling(class2.size*fraction), replace = replace)
class1.subset.size <- length(class1.subset)
class2.subset.size <- length(class2.subset)
subset.class1 <- rep(0, class1.size)
for (i in 1:class1.size) {
if (is.element(class1.index[i], class1.subset)) {
subset.class1[i] <- 1
}
}
subset.class2 <- rep(0, class2.size)
for (i in 1:class2.size) {
if (is.element(class2.index[i], class2.subset)) {
subset.class2[i] <- 1
}
}
subset.mask[, r] <- as.numeric(c(subset.class1, subset.class2))
fraction.class1 <- class1.size/Ns
fraction.class2 <- class2.size/Ns
if (permutation.type == 0) { # random (unbalanced) permutation
full.subset <- c(class1.subset, class2.subset)
label1.subset <- sample(full.subset, size = Ns * fraction.class1)
reshuffled.class.labels1[, r] <- rep(0, Ns)
reshuffled.class.labels2[, r] <- rep(0, Ns)
class.labels1[, r] <- rep(0, Ns)
class.labels2[, r] <- rep(0, Ns)
for (i in 1:Ns) {
m1 <- sum(!is.na(match(label1.subset, i)))
m2 <- sum(!is.na(match(full.subset, i)))
reshuffled.class.labels1[i, r] <- m1
reshuffled.class.labels2[i, r] <- m2 - m1
if (i <= class1.size) {
class.labels1[i, r] <- m2
class.labels2[i, r] <- 0
} else {
class.labels1[i, r] <- 0
class.labels2[i, r] <- m2
}
}
} else if (permutation.type == 1) { # proportional (balanced) permutation
class1.label1.subset <- sample(class1.subset, size = ceiling(class1.subset.size*fraction.class1))
class2.label1.subset <- sample(class2.subset, size = floor(class2.subset.size*fraction.class1))
reshuffled.class.labels1[, r] <- rep(0, Ns)
reshuffled.class.labels2[, r] <- rep(0, Ns)
class.labels1[, r] <- rep(0, Ns)
class.labels2[, r] <- rep(0, Ns)
for (i in 1:Ns) {
if (i <= class1.size) {
m1 <- sum(!is.na(match(class1.label1.subset, i)))
m2 <- sum(!is.na(match(class1.subset, i)))
reshuffled.class.labels1[i, r] <- m1
reshuffled.class.labels2[i, r] <- m2 - m1
class.labels1[i, r] <- m2
class.labels2[i, r] <- 0
} else {
m1 <- sum(!is.na(match(class2.label1.subset, i)))
m2 <- sum(!is.na(match(class2.subset, i)))
reshuffled.class.labels1[i, r] <- m1
reshuffled.class.labels2[i, r] <- m2 - m1
class.labels1[i, r] <- 0
class.labels2[i, r] <- m2
}
}
}
}
# compute S2N for the random permutation matrix
P <- reshuffled.class.labels1 * subset.mask
n1 <- sum(P[,1])
M1 <- A %*% P
M1 <- M1/n1
gc()
A2 <- A*A
S1 <- A2 %*% P
S1 <- S1/n1 - M1*M1
S1 <- sqrt(abs((n1/(n1-1)) * S1))
gc()
P <- reshuffled.class.labels2 * subset.mask
n2 <- sum(P[,1])
M2 <- A %*% P
M2 <- M2/n2
gc()
A2 <- A*A
S2 <- A2 %*% P
S2 <- S2/n2 - M2*M2
S2 <- sqrt(abs((n2/(n2-1)) * S2))
rm(P)
rm(A2)
gc()
if (sigma.correction == "GeneCluster") { # small sigma "fix" as used in GeneCluster
S2 <- ifelse(0.2*abs(M2) < S2, S2, 0.2*abs(M2))
S2 <- ifelse(S2 == 0, 0.2, S2)
S1 <- ifelse(0.2*abs(M1) < S1, S1, 0.2*abs(M1))
S1 <- ifelse(S1 == 0, 0.2, S1)
gc()
}
M1 <- M1 - M2
rm(M2)
gc()
S1 <- S1 + S2
rm(S2)
gc()
s2n.matrix <- M1/S1
if (reverse.sign == T) {
s2n.matrix <- - s2n.matrix
}
gc()
for (r in 1:nperm) {
order.matrix[, r] <- order(s2n.matrix[, r], decreasing=T)
}
# compute S2N for the "observed" permutation matrix
P <- class.labels1 * subset.mask
n1 <- sum(P[,1])
M1 <- A %*% P
M1 <- M1/n1
gc()
A2 <- A*A
S1 <- A2 %*% P
S1 <- S1/n1 - M1*M1
S1 <- sqrt(abs((n1/(n1-1)) * S1))
gc()
P <- class.labels2 * subset.mask
n2 <- sum(P[,1])
M2 <- A %*% P
M2 <- M2/n2
gc()
A2 <- A*A
S2 <- A2 %*% P
S2 <- S2/n2 - M2*M2
S2 <- sqrt(abs((n2/(n2-1)) * S2))
rm(P)
rm(A2)
gc()
if (sigma.correction == "GeneCluster") { # small sigma "fix" as used in GeneCluster
S2 <- ifelse(0.2*abs(M2) < S2, S2, 0.2*abs(M2))
S2 <- ifelse(S2 == 0, 0.2, S2)
S1 <- ifelse(0.2*abs(M1) < S1, S1, 0.2*abs(M1))
S1 <- ifelse(S1 == 0, 0.2, S1)
gc()
}
M1 <- M1 - M2
rm(M2)
gc()
S1 <- S1 + S2
rm(S2)
gc()
obs.s2n.matrix <- M1/S1
gc()
if (reverse.sign == T) {
obs.s2n.matrix <- - obs.s2n.matrix
}
for (r in 1:nperm) {
obs.order.matrix[,r] <- order(obs.s2n.matrix[,r], decreasing=T)
}
return(list(s2n.matrix = s2n.matrix,
obs.s2n.matrix = obs.s2n.matrix,
order.matrix = order.matrix,
obs.order.matrix = obs.order.matrix))
}
GSEA.EnrichmentScore <- function(gene.list, gene.set, weighted.score.type = 1, correl.vector = NULL) {
#
# Computes the weighted GSEA score of gene.set in gene.list.
# The weighted score type is the exponent of the correlation
# weight: 0 (unweighted = Kolmogorov-Smirnov), 1 (weighted), and 2 (over-weighted). When the score type is 1 or 2 it is
# necessary to input the correlation vector with the values in the same order as in the gene list.
#
# Inputs:
# gene.list: The ordered gene list (e.g. integers indicating the original position in the input dataset)
# gene.set: A gene set (e.g. integers indicating the location of those genes in the input dataset)
# weighted.score.type: Type of score: weight: 0 (unweighted = Kolmogorov-Smirnov), 1 (weighted), and 2 (over-weighted)
# correl.vector: A vector with the coorelations (e.g. signal to noise scores) corresponding to the genes in the gene list
#
# Outputs:
# ES: Enrichment score (real number between -1 and +1)
# arg.ES: Location in gene.list where the peak running enrichment occurs (peak of the "mountain")
# RES: Numerical vector containing the running enrichment score for all locations in the gene list
# tag.indicator: Binary vector indicating the location of the gene sets (1's) in the gene list
#
# The Broad Institute
# SOFTWARE COPYRIGHT NOTICE AGREEMENT
# This software and its documentation are copyright 2003 by the
# Broad Institute/Massachusetts Institute of Technology.
# All rights are reserved.
#
# This software is supplied without any warranty or guaranteed support
# whatsoever. Neither the Broad Institute nor MIT can be responsible for
# its use, misuse, or functionality.
tag.indicator <- sign(match(gene.list, gene.set, nomatch=0)) # notice that the sign is 0 (no tag) or 1 (tag)
no.tag.indicator <- 1 - tag.indicator
N <- length(gene.list)
Nh <- length(gene.set)
Nm <- N - Nh
if (weighted.score.type == 0) {
correl.vector <- rep(1, N)
}
alpha <- weighted.score.type
correl.vector <- abs(correl.vector**alpha)
sum.correl.tag <- sum(correl.vector[tag.indicator == 1])
norm.tag <- 1.0/sum.correl.tag
norm.no.tag <- 1.0/Nm
RES <- cumsum(tag.indicator * correl.vector * norm.tag - no.tag.indicator * norm.no.tag)
max.ES <- max(RES)
min.ES <- min(RES)
if (max.ES > - min.ES) {
# ES <- max.ES
ES <- signif(max.ES, digits = 5)
arg.ES <- which.max(RES)
} else {
# ES <- min.ES
ES <- signif(min.ES, digits=5)
arg.ES <- which.min(RES)
}
return(list(ES = ES, arg.ES = arg.ES, RES = RES, indicator = tag.indicator))
}
OLD.GSEA.EnrichmentScore <- function(gene.list, gene.set) {
#
# Computes the original GSEA score from Mootha et al 2003 of gene.set in gene.list
#
# Inputs:
# gene.list: The ordered gene list (e.g. integers indicating the original position in the input dataset)
# gene.set: A gene set (e.g. integers indicating the location of those genes in the input dataset)
#
# Outputs:
# ES: Enrichment score (real number between -1 and +1)
# arg.ES: Location in gene.list where the peak running enrichment occurs (peak of the "mountain")
# RES: Numerical vector containing the running enrichment score for all locations in the gene list
# tag.indicator: Binary vector indicating the location of the gene sets (1's) in the gene list
#
# The Broad Institute
# SOFTWARE COPYRIGHT NOTICE AGREEMENT
# This software and its documentation are copyright 2003 by the
# Broad Institute/Massachusetts Institute of Technology.
# All rights are reserved.
#
# This software is supplied without any warranty or guaranteed support
# whatsoever. Neither the Broad Institute nor MIT can be responsible for
# its use, misuse, or functionality.
tag.indicator <- sign(match(gene.list, gene.set, nomatch=0)) # notice that the sign is 0 (no tag) or 1 (tag)
no.tag.indicator <- 1 - tag.indicator
N <- length(gene.list)
Nh <- length(gene.set)
Nm <- N - Nh
norm.tag <- sqrt((N - Nh)/Nh)
norm.no.tag <- sqrt(Nh/(N - Nh))
RES <- cumsum(tag.indicator * norm.tag - no.tag.indicator * norm.no.tag)
max.ES <- max(RES)
min.ES <- min(RES)
if (max.ES > - min.ES) {
ES <- signif(max.ES, digits=5)
arg.ES <- which.max(RES)
} else {
ES <- signif(min.ES, digits=5)
arg.ES <- which.min(RES)
}
return(list(ES = ES, arg.ES = arg.ES, RES = RES, indicator = tag.indicator))
}
GSEA.EnrichmentScore2 <- function(gene.list, gene.set, weighted.score.type = 1, correl.vector = NULL) {
#
# Computes the weighted GSEA score of gene.set in gene.list. It is the same calculation as in
# GSEA.EnrichmentScore but faster (x8) without producing the RES, arg.RES and tag.indicator outputs.
# This call is intended to be used to asses the enrichment of random permutations rather than the
# observed one.
# The weighted score type is the exponent of the correlation
# weight: 0 (unweighted = Kolmogorov-Smirnov), 1 (weighted), and 2 (over-weighted). When the score type is 1 or 2 it is
# necessary to input the correlation vector with the values in the same order as in the gene list.
#
# Inputs:
# gene.list: The ordered gene list (e.g. integers indicating the original position in the input dataset)
# gene.set: A gene set (e.g. integers indicating the location of those genes in the input dataset)
# weighted.score.type: Type of score: weight: 0 (unweighted = Kolmogorov-Smirnov), 1 (weighted), and 2 (over-weighted)
# correl.vector: A vector with the coorelations (e.g. signal to noise scores) corresponding to the genes in the gene list
#
# Outputs:
# ES: Enrichment score (real number between -1 and +1)
#
# The Broad Institute
# SOFTWARE COPYRIGHT NOTICE AGREEMENT
# This software and its documentation are copyright 2003 by the
# Broad Institute/Massachusetts Institute of Technology.
# All rights are reserved.
#
# This software is supplied without any warranty or guaranteed support
# whatsoever. Neither the Broad Institute nor MIT can be responsible for
# its use, misuse, or functionality.
N <- length(gene.list)
Nh <- length(gene.set)
Nm <- N - Nh
loc.vector <- vector(length=N, mode="numeric")
peak.res.vector <- vector(length=Nh, mode="numeric")
valley.res.vector <- vector(length=Nh, mode="numeric")
tag.correl.vector <- vector(length=Nh, mode="numeric")
tag.diff.vector <- vector(length=Nh, mode="numeric")
tag.loc.vector <- vector(length=Nh, mode="numeric")
loc.vector[gene.list] <- seq(1, N)
tag.loc.vector <- loc.vector[gene.set]
tag.loc.vector <- sort(tag.loc.vector, decreasing = F)
if (weighted.score.type == 0) {
tag.correl.vector <- rep(1, Nh)
} else if (weighted.score.type == 1) {
tag.correl.vector <- correl.vector[tag.loc.vector]
tag.correl.vector <- abs(tag.correl.vector)
} else if (weighted.score.type == 2) {
tag.correl.vector <- correl.vector[tag.loc.vector]*correl.vector[tag.loc.vector]
tag.correl.vector <- abs(tag.correl.vector)
} else {
tag.correl.vector <- correl.vector[tag.loc.vector]**weighted.score.type
tag.correl.vector <- abs(tag.correl.vector)
}
norm.tag <- 1.0/sum(tag.correl.vector)
tag.correl.vector <- tag.correl.vector * norm.tag
norm.no.tag <- 1.0/Nm
tag.diff.vector[1] <- (tag.loc.vector[1] - 1)
tag.diff.vector[2:Nh] <- tag.loc.vector[2:Nh] - tag.loc.vector[1:(Nh - 1)] - 1
tag.diff.vector <- tag.diff.vector * norm.no.tag
peak.res.vector <- cumsum(tag.correl.vector - tag.diff.vector)
valley.res.vector <- peak.res.vector - tag.correl.vector
max.ES <- max(peak.res.vector)
min.ES <- min(valley.res.vector)
ES <- signif(ifelse(max.ES > - min.ES, max.ES, min.ES), digits=5)
return(list(ES = ES))
}
GSEA.HeatMapPlot <- function(V, row.names = F, col.labels, col.classes, col.names = F, main = " ", xlab=" ", ylab=" ") {
#
# Plots a heatmap "pinkogram" of a gene expression matrix including phenotype vector and gene, sample and phenotype labels
#
# The Broad Institute
# SOFTWARE COPYRIGHT NOTICE AGREEMENT
# This software and its documentation are copyright 2003 by the
# Broad Institute/Massachusetts Institute of Technology.
# All rights are reserved.
#
# This software is supplied without any warranty or guaranteed support
# whatsoever. Neither the Broad Institute nor MIT can be responsible for
# its use, misuse, or functionality.
n.rows <- length(V[,1])
n.cols <- length(V[1,])
row.mean <- apply(V, MARGIN=1, FUN=mean)
row.sd <- apply(V, MARGIN=1, FUN=sd)
row.n <- length(V[,1])
for (i in 1:n.rows) {
if (row.sd[i] == 0) {
V[i,] <- 0
} else {
V[i,] <- (V[i,] - row.mean[i])/(0.5 * row.sd[i])
}
V[i,] <- ifelse(V[i,] < -6, -6, V[i,])
V[i,] <- ifelse(V[i,] > 6, 6, V[i,])
}
mycol <- c("#0000FF", "#0000FF", "#4040FF", "#7070FF", "#8888FF", "#A9A9FF", "#D5D5FF", "#EEE5EE", "#FFAADA", "#FF9DB0", "#FF7080", "#FF5A5A", "#FF4040", "#FF0D1D", "#FF0000") # blue-pinkogram colors. The first and last are the colors to indicate the class vector (phenotype). This is the 1998-vintage, pre-gene cluster, original pinkogram color map
mid.range.V <- mean(range(V)) - 0.1
heatm <- matrix(0, nrow = n.rows + 1, ncol = n.cols)
heatm[1:n.rows,] <- V[seq(n.rows, 1, -1),]
heatm[n.rows + 1,] <- ifelse(col.labels == 0, 7, -7)
image(1:n.cols, 1:(n.rows + 1), t(heatm), col=mycol, axes=FALSE, main=main, xlab= xlab, ylab=ylab)
if (length(row.names) > 1) {
numC <- nchar(row.names)
size.row.char <- 35/(n.rows + 5)
size.col.char <- 25/(n.cols + 5)
maxl <- floor(n.rows/1.6)
for (i in 1:n.rows) {
row.names[i] <- substr(row.names[i], 1, maxl)
}
row.names <- c(row.names[seq(n.rows, 1, -1)], "Class")
axis(2, at=1:(n.rows + 1), labels=row.names, adj= 0.5, tick=FALSE, las = 1, cex.axis=size.row.char, font.axis=2, line=-1)
}
if (length(col.names) > 1) {
axis(1, at=1:n.cols, labels=col.names, tick=FALSE, las = 3, cex.axis=size.col.char, font.axis=2, line=-1)
}
C <- split(col.labels, col.labels)
class1.size <- length(C[[1]])
class2.size <- length(C[[2]])
axis(3, at=c(floor(class1.size/2),class1.size + floor(class2.size/2)), labels=col.classes, tick=FALSE, las = 1, cex.axis=1.25, font.axis=2, line=-1)
return()
}
GSEA.Res2Frame <- function(filename = "NULL") {
#
# Reads a gene expression dataset in RES format and converts it into an R data frame
#
# The Broad Institute
# SOFTWARE COPYRIGHT NOTICE AGREEMENT
# This software and its documentation are copyright 2003 by the
# Broad Institute/Massachusetts Institute of Technology.
# All rights are reserved.
#
# This software is supplied without any warranty or guaranteed support
# whatsoever. Neither the Broad Institute nor MIT can be responsible for
# its use, misuse, or functionality.
header.cont <- readLines(filename, n = 1)
temp <- unlist(strsplit(header.cont, "\t"))
colst <- length(temp)
header.labels <- temp[seq(3, colst, 2)]
ds <- read.delim(filename, header=F, row.names = 2, sep="\t", skip=3, blank.lines.skip=T, comment.char="", as.is=T)
colst <- length(ds[1,])
cols <- (colst - 1)/2
rows <- length(ds[,1])
A <- matrix(nrow=rows - 1, ncol=cols)
A <- ds[1:rows, seq(2, colst, 2)]
table1 <- data.frame(A)
names(table1) <- header.labels
return(table1)
}
GSEA.Gct2Frame <- function(filename = "NULL") {
#
# Reads a gene expression dataset in GCT format and converts it into an R data frame
#
# The Broad Institute
# SOFTWARE COPYRIGHT NOTICE AGREEMENT
# This software and its documentation are copyright 2003 by the
# Broad Institute/Massachusetts Institute of Technology.
# All rights are reserved.
#
# This software is supplied without any warranty or guaranteed support
# whatsoever. Neither the Broad Institute nor MIT can be responsible for
# its use, misuse, or functionality.
ds <- read.delim(filename, header=T, sep="\t", skip=2, row.names=1, blank.lines.skip=T, comment.char="", as.is=T)
ds <- ds[-1]
return(ds)
}
GSEA.Gct2Frame2 <- function(filename = "NULL") {
#
# Reads a gene expression dataset in GCT format and converts it into an R data frame
#
# The Broad Institute
# SOFTWARE COPYRIGHT NOTICE AGREEMENT
# This software and its documentation are copyright 2003 by the
# Broad Institute/Massachusetts Institute of Technology.
# All rights are reserved.
#
# This software is supplied without any warranty or guaranteed support
# whatsoever. Neither the Broad Institute nor MIT can be responsible for
# its use, misuse, or functionality.
content <- readLines(filename)
content <- content[-1]
content <- content[-1]
col.names <- noquote(unlist(strsplit(content[1], "\t")))
col.names <- col.names[c(-1, -2)]
num.cols <- length(col.names)
content <- content[-1]
num.lines <- length(content)
row.nam <- vector(length=num.lines, mode="character")
row.des <- vector(length=num.lines, mode="character")
m <- matrix(0, nrow=num.lines, ncol=num.cols)
for (i in 1:num.lines) {
line.list <- noquote(unlist(strsplit(content[i], "\t")))
row.nam[i] <- noquote(line.list[1])
row.des[i] <- noquote(line.list[2])
line.list <- line.list[c(-1, -2)]
for (j in 1:length(line.list)) {
m[i, j] <- as.numeric(line.list[j])
}
}
ds <- data.frame(m)
names(ds) <- col.names
row.names(ds) <- row.nam
return(ds)
}
GSEA.ReadClsFile <- function(file = "NULL") {
#
# Reads a class vector CLS file and defines phenotype and class labels vectors for the samples in a gene expression file (RES or GCT format)
#
# The Broad Institute
# SOFTWARE COPYRIGHT NOTICE AGREEMENT
# This software and its documentation are copyright 2003 by the
# Broad Institute/Massachusetts Institute of Technology.
# All rights are reserved.
#
# This software is supplied without any warranty or guaranteed support
# whatsoever. Neither the Broad Institute nor MIT can be responsible for
# its use, misuse, or functionality.
cls.cont <- readLines(file)
num.lines <- length(cls.cont)
class.list <- unlist(strsplit(cls.cont[[3]], " "))
s <- length(class.list)
t <- table(class.list)
l <- length(t)
phen <- vector(length=l, mode="character")
phen.label <- vector(length=l, mode="numeric")
class.v <- vector(length=s, mode="numeric")
for (i in 1:l) {
phen[i] <- noquote(names(t)[i])
phen.label[i] <- i - 1
}
for (i in 1:s) {
for (j in 1:l) {
if (class.list[i] == phen[j]) {
class.v[i] <- phen.label[j]
}
}
}
return(list(phen = phen, class.v = class.v))
}
GSEA.Threshold <- function(V, thres, ceil) {
#
# Threshold and ceiling pre-processing for gene expression matrix
#
# The Broad Institute
# SOFTWARE COPYRIGHT NOTICE AGREEMENT
# This software and its documentation are copyright 2003 by the
# Broad Institute/Massachusetts Institute of Technology.
# All rights are reserved.
#
# This software is supplied without any warranty or guaranteed support
# whatsoever. Neither the Broad Institute nor MIT can be responsible for
# its use, misuse, or functionality.
V[V < thres] <- thres
V[V > ceil] <- ceil
return(V)
}
GSEA.VarFilter <- function(V, fold, delta, gene.names = "NULL") {
#
# Variation filter pre-processing for gene expression matrix
#
# The Broad Institute
# SOFTWARE COPYRIGHT NOTICE AGREEMENT
# This software and its documentation are copyright 2003 by the
# Broad Institute/Massachusetts Institute of Technology.
# All rights are reserved.
#
# This software is supplied without any warranty or guaranteed support
# whatsoever. Neither the Broad Institute nor MIT can be responsible for
# its use, misuse, or functionality.
cols <- length(V[1,])
rows <- length(V[,1])
row.max <- apply(V, MARGIN=1, FUN=max)
row.min <- apply(V, MARGIN=1, FUN=min)
flag <- array(dim=rows)
flag <- (row.max /row.min > fold) & (row.max - row.min > delta)
size <- sum(flag)
B <- matrix(0, nrow = size, ncol = cols)
j <- 1
if (gene.names == "NULL") {
for (i in 1:rows) {
if (flag[i]) {
B[j,] <- V[i,]
j <- j + 1
}
}
return(B)
} else {
new.list <- vector(mode = "character", length = size)
for (i in 1:rows) {
if (flag[i]) {
B[j,] <- V[i,]
new.list[j] <- gene.names[i]
j <- j + 1
}
}
return(list(V = B, new.list = new.list))
}
}
GSEA.NormalizeRows <- function(V) {
#
# Stardardize rows of a gene expression matrix
#
# The Broad Institute
# SOFTWARE COPYRIGHT NOTICE AGREEMENT
# This software and its documentation are copyright 2003 by the
# Broad Institute/Massachusetts Institute of Technology.
# All rights are reserved.
#
# This software is supplied without any warranty or guaranteed support
# whatsoever. Neither the Broad Institute nor MIT can be responsible for
# its use, misuse, or functionality.
row.mean <- apply(V, MARGIN=1, FUN=mean)
row.sd <- apply(V, MARGIN=1, FUN=sd)
row.n <- length(V[,1])
for (i in 1:row.n) {
if (row.sd[i] == 0) {
V[i,] <- 0
} else {
V[i,] <- (V[i,] - row.mean[i])/row.sd[i]
}
}
return(V)
}
GSEA.NormalizeCols <- function(V) {
#
# Stardardize columns of a gene expression matrix
#
# The Broad Institute
# SOFTWARE COPYRIGHT NOTICE AGREEMENT
# This software and its documentation are copyright 2003 by the
# Broad Institute/Massachusetts Institute of Technology.
# All rights are reserved.
#
# This software is supplied without any warranty or guaranteed support
# whatsoever. Neither the Broad Institute nor MIT can be responsible for
# its use, misuse, or functionality.
col.mean <- apply(V, MARGIN=2, FUN=mean)
col.sd <- apply(V, MARGIN=2, FUN=sd)
col.n <- length(V[1,])
for (i in 1:col.n) {
if (col.sd[i] == 0) {
V[i,] <- 0
} else {
V[,i] <- (V[,i] - col.mean[i])/col.sd[i]
}
}
return(V)
}
# end of auxiliary functions
# ----------------------------------------------------------------------------------------
# Main GSEA Analysis Function that implements the entire methodology
GSEA <- function(
input.ds,
input.cls,
gene.ann = "",
gs.db,
gs.ann = "",
output.directory = "",
doc.string = "GSEA.analysis",
non.interactive.run = F,
reshuffling.type = "sample.labels",
nperm = 1000,
weighted.score.type = 1,
nom.p.val.threshold = -1,
fwer.p.val.threshold = -1,
fdr.q.val.threshold = 0.25,
topgs = 10,
adjust.FDR.q.val = F,
gs.size.threshold.min = 25,
gs.size.threshold.max = 500,
reverse.sign = F,
preproc.type = 0,
random.seed = 123456,
perm.type = 0,
fraction = 1.0,
replace = F,
save.intermediate.results = F,
OLD.GSEA = F,
use.fast.enrichment.routine = T) {
# This is a methodology for the analysis of global molecular profiles called Gene Set Enrichment Analysis (GSEA). It determines
# whether an a priori defined set of genes shows statistically significant, concordant differences between two biological
# states (e.g. phenotypes). GSEA operates on all genes from an experiment, rank ordered by the signal to noise ratio and
# determines whether members of an a priori defined gene set are nonrandomly distributed towards the top or bottom of the
# list and thus may correspond to an important biological process. To assess significance the program uses an empirical
# permutation procedure to test deviation from random that preserves correlations between genes.
#
# For details see Subramanian et al 2005
#
# Inputs:
# input.ds: Input gene expression Affymetrix dataset file in RES or GCT format
# input.cls: Input class vector (phenotype) file in CLS format
# gene.ann.file: Gene microarray annotation file (Affymetrix Netaffyx *.csv format) (default: none)
# gs.file: Gene set database in GMT format
# output.directory: Directory where to store output and results (default: .)
# doc.string: Documentation string used as a prefix to name result files (default: "GSEA.analysis")
# non.interactive.run: Run in interactive (i.e. R GUI) or batch (R command line) mode (default: F)
# reshuffling.type: Type of permutation reshuffling: "sample.labels" or "gene.labels" (default: "sample.labels")
# nperm: Number of random permutations (default: 1000)
# weighted.score.type: Enrichment correlation-based weighting: 0=no weight (KS), 1=standard weigth, 2 = over-weigth (default: 1)
# nom.p.val.threshold: Significance threshold for nominal p-vals for gene sets (default: -1, no thres)
# fwer.p.val.threshold: Significance threshold for FWER p-vals for gene sets (default: -1, no thres)
# fdr.q.val.threshold: Significance threshold for FDR q-vals for gene sets (default: 0.25)
# topgs: Besides those passing test, number of top scoring gene sets used for detailed reports (default: 10)
# adjust.FDR.q.val: Adjust the FDR q-vals (default: F)
# gs.size.threshold.min: Minimum size (in genes) for database gene sets to be considered (default: 25)
# gs.size.threshold.max: Maximum size (in genes) for database gene sets to be considered (default: 500)
# reverse.sign: Reverse direction of gene list (pos. enrichment becomes negative, etc.) (default: F)
# preproc.type: Preprocessing normalization: 0=none, 1=col(z-score)., 2=col(rank) and row(z-score)., 3=col(rank). (default: 0)
# random.seed: Random number generator seed. (default: 123456)
# perm.type: Permutation type: 0 = unbalanced, 1 = balanced. For experts only (default: 0)
# fraction: Subsampling fraction. Set to 1.0 (no resampling). For experts only (default: 1.0)
# replace: Resampling mode (replacement or not replacement). For experts only (default: F)
# OLD.GSEA: if TRUE compute the OLD GSEA of Mootha et al 2003
# use.fast.enrichment.routine: if true it uses a faster version to compute random perm. enrichment "GSEA.EnrichmentScore2"
#
# Output:
# The results of the method are stored in the "output.directory" specified by the user as part of the input parameters.
# The results files are:
# - Two tab-separated global result text files (one for each phenotype). These files are labeled according to the doc
# string prefix and the phenotype name from the CLS file: <doc.string>.SUMMARY.RESULTS.REPORT.<phenotype>.txt
# - One set of global plots. They include a.- gene list correlation profile, b.- global observed and null densities, c.- heat map
# for the entire sorted dataset, and d.- p-values vs. NES plot. These plots are in a single JPEG file named
# <doc.string>.global.plots.<phenotype>.jpg. When the program is run interactively these plots appear on a window in the R GUI.
# - A variable number of tab-separated gene result text files according to how many sets pass any of the significance thresholds
# ("nom.p.val.threshold," "fwer.p.val.threshold," and "fdr.q.val.threshold") and how many are specified in the "topgs"
# parameter. These files are named: <doc.string>.<gene set name>.report.txt.
# - A variable number of gene set plots (one for each gene set report file). These plots include a.- Gene set running enrichment
# "mountain" plot, b.- gene set null distribution and c.- heat map for genes in the gene set. These plots are stored in a
# single JPEG file named <doc.string>.<gene set name>.jpg.
# The format (columns) for the global result files is as follows.
# GS : Gene set name.
# SIZE : Size of the set in genes.
# SOURCE : Set definition or source.
# ES : Enrichment score.
# NES : Normalized (multiplicative rescaling) normalized enrichment score.
# NOM p-val : Nominal p-value (from the null distribution of the gene set).
# FDR q-val: False discovery rate q-values
# FWER p-val: Family wise error rate p-values.
# Tag %: Percent of gene set before running enrichment peak.
# Gene %: Percent of gene list before running enrichment peak.
# Signal : enrichment signal strength.
# FDR (median): FDR q-values from the median of the null distributions.
# glob.p.val: P-value using a global statistic (number of sets above the set's NES).
#
# The rows are sorted by the NES values (from maximum positive or negative NES to minimum)
#
# The format (columns) for the gene set result files is as follows.
#
# #: Gene number in the (sorted) gene set
# GENE : gene name. For example the probe accession number, gene symbol or the gene identifier gin the dataset.
# SYMBOL : gene symbol from the gene annotation file.
# DESC : gene description (title) from the gene annotation file.
# LIST LOC : location of the gene in the sorted gene list.
# S2N : signal to noise ratio (correlation) of the gene in the gene list.
# RES : value of the running enrichment score at the gene location.
# CORE_ENRICHMENT: is this gene is the "core enrichment" section of the list? Yes or No variable specifying in the gene location is before (positive ES) or after (negative ES) the running enrichment peak.
#
# The rows are sorted by the gene location in the gene list.
# The function call to GSEA returns a two element list containing the two global result reports as data frames ($report1, $report2).
#
# results1: Global output report for first phenotype
# result2: Global putput report for second phenotype
#
# The Broad Institute
# SOFTWARE COPYRIGHT NOTICE AGREEMENT
# This software and its documentation are copyright 2003 by the
# Broad Institute/Massachusetts Institute of Technology.
# All rights are reserved.
#
# This software is supplied without any warranty or guaranteed support
# whatsoever. Neither the Broad Institute nor MIT can be responsible for
# its use, misuse, or functionality.
print(" *** Running GSEA Analysis...")
if (OLD.GSEA == T) {
print("Running OLD GSEA from Mootha et al 2003")
}
# Copy input parameters to log file
if (output.directory != "") {
filename <- paste(output.directory, doc.string, "_params.txt", sep="", collapse="")
time.string <- as.character(as.POSIXlt(Sys.time(),"GMT"))
write(paste("Run of GSEA on ", time.string), file=filename)
if (is.data.frame(input.ds)) {
# write(paste("input.ds=", quote(input.ds), sep=" "), file=filename, append=T)
} else {
write(paste("input.ds=", input.ds, sep=" "), file=filename, append=T)
}
if (is.list(input.cls)) {
# write(paste("input.cls=", input.cls, sep=" "), file=filename, append=T)
} else {
write(paste("input.cls=", input.cls, sep=" "), file=filename, append=T)
}
if (is.data.frame(gene.ann)) {
# write(paste("gene.ann =", gene.ann, sep=" "), file=filename, append=T)
} else {
write(paste("gene.ann =", gene.ann, sep=" "), file=filename, append=T)
}
if (regexpr(pattern=".gmt", gs.db[1]) == -1) {
# write(paste("gs.db=", gs.db, sep=" "), file=filename, append=T)
} else {
write(paste("gs.db=", gs.db, sep=" "), file=filename, append=T)
}
if (is.data.frame(gs.ann)) {
# write(paste("gene.ann =", gene.ann, sep=" "), file=filename, append=T)
} else {
write(paste("gs.ann =", gs.ann, sep=" "), file=filename, append=T)
}
write(paste("output.directory =", output.directory, sep=" "), file=filename, append=T)
write(paste("doc.string = ", doc.string, sep=" "), file=filename, append=T)
write(paste("non.interactive.run =", non.interactive.run, sep=" "), file=filename, append=T)
write(paste("reshuffling.type =", reshuffling.type, sep=" "), file=filename, append=T)
write(paste("nperm =", nperm, sep=" "), file=filename, append=T)
write(paste("weighted.score.type =", weighted.score.type, sep=" "), file=filename, append=T)
write(paste("nom.p.val.threshold =", nom.p.val.threshold, sep=" "), file=filename, append=T)
write(paste("fwer.p.val.threshold =", fwer.p.val.threshold, sep=" "), file=filename, append=T)
write(paste("fdr.q.val.threshold =", fdr.q.val.threshold, sep=" "), file=filename, append=T)
write(paste("topgs =", topgs, sep=" "), file=filename, append=T)
write(paste("adjust.FDR.q.val =", adjust.FDR.q.val, sep=" "), file=filename, append=T)
write(paste("gs.size.threshold.min =", gs.size.threshold.min, sep=" "), file=filename, append=T)
write(paste("gs.size.threshold.max =", gs.size.threshold.max, sep=" "), file=filename, append=T)
write(paste("reverse.sign =", reverse.sign, sep=" "), file=filename, append=T)
write(paste("preproc.type =", preproc.type, sep=" "), file=filename, append=T)
write(paste("random.seed =", random.seed, sep=" "), file=filename, append=T)
write(paste("perm.type =", perm.type, sep=" "), file=filename, append=T)
write(paste("fraction =", fraction, sep=" "), file=filename, append=T)
write(paste("replace =", replace, sep=" "), file=filename, append=T)
}
# Start of GSEA methodology
if (.Platform$OS.type == "windows") {
memory.limit(6000000000)
memory.limit()
# print(c("Start memory size=", memory.size()))
}
# Read input data matrix
set.seed(seed=random.seed, kind = NULL)
adjust.param <- 0.5
gc()
time1 <- proc.time()
if (is.data.frame(input.ds)) {
dataset <- input.ds
} else {
if (regexpr(pattern=".gct", input.ds) == -1) {
dataset <- GSEA.Res2Frame(filename = input.ds)
} else {
# dataset <- GSEA.Gct2Frame(filename = input.ds)
dataset <- GSEA.Gct2Frame2(filename = input.ds)
}
}
gene.labels <- row.names(dataset)
sample.names <- names(dataset)
A <- data.matrix(dataset)
dim(A)
cols <- length(A[1,])
rows <- length(A[,1])
# preproc.type control the type of pre-processing: threshold, variation filter, normalization
if (preproc.type == 1) { # Column normalize (Z-score)
A <- GSEA.NormalizeCols(A)
} else if (preproc.type == 2) { # Column (rank) and row (Z-score) normalize
for (j in 1:cols) { # column rank normalization
A[,j] <- rank(A[,j])
}
A <- GSEA.NormalizeRows(A)
} else if (preproc.type == 3) { # Column (rank) norm.
for (j in 1:cols) { # column rank normalization
A[,j] <- rank(A[,j])
}
}
# Read input class vector
if(is.list(input.cls)) {
CLS <- input.cls
} else {
CLS <- GSEA.ReadClsFile(file=input.cls)
}
class.labels <- CLS$class.v
class.phen <- CLS$phen
if (reverse.sign == T) {
phen1 <- class.phen[2]
phen2 <- class.phen[1]
} else {
phen1 <- class.phen[1]
phen2 <- class.phen[2]
}
# sort samples according to phenotype
col.index <- order(class.labels, decreasing=F)
class.labels <- class.labels[col.index]
sample.names <- sample.names[col.index]
for (j in 1:rows) {
A[j, ] <- A[j, col.index]
}
names(A) <- sample.names
# Read input gene set database
if (regexpr(pattern=".gmt", gs.db[1]) == -1) {
temp <- gs.db
} else {
temp <- readLines(gs.db)
}
max.Ng <- length(temp)
temp.size.G <- vector(length = max.Ng, mode = "numeric")
for (i in 1:max.Ng) {
temp.size.G[i] <- length(unlist(strsplit(temp[[i]], "\t"))) - 2
}
max.size.G <- max(temp.size.G)
print(max.size.G)
print(max.Ng)
gs <- matrix(rep("null", max.Ng*max.size.G), nrow=max.Ng, ncol= max.size.G)
temp.names <- vector(length = max.Ng, mode = "character")
temp.desc <- vector(length = max.Ng, mode = "character")
gs.count <- 1
for (i in 1:max.Ng) {
gene.set.size <- length(unlist(strsplit(temp[[i]], "\t"))) - 2
gs.line <- noquote(unlist(strsplit(temp[[i]], "\t")))
gene.set.name <- gs.line[1]
gene.set.desc <- gs.line[2]
gene.set.tags <- vector(length = gene.set.size, mode = "character")
for (j in 1:gene.set.size) {
gene.set.tags[j] <- gs.line[j + 2]
}
existing.set <- is.element(gene.set.tags, gene.labels)
set.size <- length(existing.set[existing.set == T])
if ((set.size < gs.size.threshold.min) || (set.size > gs.size.threshold.max)) next
temp.size.G[gs.count] <- set.size
gs[gs.count,] <- c(gene.set.tags[existing.set], rep("null", max.size.G - temp.size.G[gs.count]))
temp.names[gs.count] <- gene.set.name
temp.desc[gs.count] <- gene.set.desc
gs.count <- gs.count + 1
}
Ng <- gs.count - 1
gs.names <- vector(length = Ng, mode = "character")
gs.desc <- vector(length = Ng, mode = "character")
size.G <- vector(length = Ng, mode = "numeric")
gs.names <- temp.names[1:Ng]
gs.desc <- temp.desc[1:Ng]
size.G <- temp.size.G[1:Ng]
N <- length(A[,1])
Ns <- length(A[1,])
print(c("Number of genes:", N))
print(c("Number of Gene Sets:", Ng))
print(c("Number of samples:", Ns))
print(c("Original number of Gene Sets:", max.Ng))
print(c("Maximum gene set size:", max.size.G))
# Read gene and gene set annotations if gene annotation file was provided
all.gene.descs <- vector(length = N, mode ="character")
all.gene.symbols <- vector(length = N, mode ="character")
all.gs.descs <- vector(length = Ng, mode ="character")
if (is.data.frame(gene.ann)) {
temp <- gene.ann
a.size <- length(temp[,1])
print(c("Number of gene annotation file entries:", a.size))
accs <- as.character(temp[,1])
locs <- match(gene.labels, accs)
all.gene.descs <- as.character(temp[locs, "Gene.Title"])
all.gene.symbols <- as.character(temp[locs, "Gene.Symbol"])
rm(temp)
} else if (gene.ann == "") {
for (i in 1:N) {
all.gene.descs[i] <- gene.labels[i]
all.gene.symbols[i] <- gene.labels[i]
}
} else {
temp <- read.delim(gene.ann, header=T, sep=",", comment.char="", as.is=T)
a.size <- length(temp[,1])
print(c("Number of gene annotation file entries:", a.size))
accs <- as.character(temp[,1])
locs <- match(gene.labels, accs)
all.gene.descs <- as.character(temp[locs, "Gene.Title"])
all.gene.symbols <- as.character(temp[locs, "Gene.Symbol"])
rm(temp)
}
if (is.data.frame(gs.ann)) {
temp <- gs.ann
a.size <- length(temp[,1])
print(c("Number of gene set annotation file entries:", a.size))
accs <- as.character(temp[,1])
locs <- match(gs.names, accs)
all.gs.descs <- as.character(temp[locs, "SOURCE"])
rm(temp)
} else if (gs.ann == "") {
for (i in 1:Ng) {
all.gs.descs[i] <- gs.desc[i]
}
} else {
temp <- read.delim(gs.ann, header=T, sep="\t", comment.char="", as.is=T)
a.size <- length(temp[,1])
print(c("Number of gene set annotation file entries:", a.size))
accs <- as.character(temp[,1])
locs <- match(gs.names, accs)
all.gs.descs <- as.character(temp[locs, "SOURCE"])
rm(temp)
}
Obs.indicator <- matrix(nrow= Ng, ncol=N)
Obs.RES <- matrix(nrow= Ng, ncol=N)
Obs.ES <- vector(length = Ng, mode = "numeric")
Obs.arg.ES <- vector(length = Ng, mode = "numeric")
Obs.ES.norm <- vector(length = Ng, mode = "numeric")
time2 <- proc.time()
# GSEA methodology
# Compute observed and random permutation gene rankings
obs.s2n <- vector(length=N, mode="numeric")
signal.strength <- vector(length=Ng, mode="numeric")
tag.frac <- vector(length=Ng, mode="numeric")
gene.frac <- vector(length=Ng, mode="numeric")
coherence.ratio <- vector(length=Ng, mode="numeric")
obs.phi.norm <- matrix(nrow = Ng, ncol = nperm)
correl.matrix <- matrix(nrow = N, ncol = nperm)
obs.correl.matrix <- matrix(nrow = N, ncol = nperm)
order.matrix <- matrix(nrow = N, ncol = nperm)
obs.order.matrix <- matrix(nrow = N, ncol = nperm)
nperm.per.call <- 100
n.groups <- nperm %/% nperm.per.call
n.rem <- nperm %% nperm.per.call
n.perms <- c(rep(nperm.per.call, n.groups), n.rem)
n.ends <- cumsum(n.perms)
n.starts <- n.ends - n.perms + 1
if (n.rem == 0) {
n.tot <- n.groups
} else {
n.tot <- n.groups + 1
}
for (nk in 1:n.tot) {
call.nperm <- n.perms[nk]
print(paste("Computing ranked list for actual and permuted phenotypes.......permutations: ", n.starts[nk], "--", n.ends[nk], sep=" "))
O <- GSEA.GeneRanking(A, class.labels, gene.labels, call.nperm, permutation.type = perm.type, sigma.correction = "GeneCluster", fraction=fraction, replace=replace, reverse.sign = reverse.sign)
gc()
order.matrix[,n.starts[nk]:n.ends[nk]] <- O$order.matrix
obs.order.matrix[,n.starts[nk]:n.ends[nk]] <- O$obs.order.matrix
correl.matrix[,n.starts[nk]:n.ends[nk]] <- O$s2n.matrix
obs.correl.matrix[,n.starts[nk]:n.ends[nk]] <- O$obs.s2n.matrix
rm(O)
}
obs.s2n <- apply(obs.correl.matrix, 1, median) # using median to assign enrichment scores
obs.index <- order(obs.s2n, decreasing=T)
obs.s2n <- sort(obs.s2n, decreasing=T)
obs.gene.labels <- gene.labels[obs.index]
obs.gene.descs <- all.gene.descs[obs.index]
obs.gene.symbols <- all.gene.symbols[obs.index]
for (r in 1:nperm) {
correl.matrix[, r] <- correl.matrix[order.matrix[,r], r]
}
for (r in 1:nperm) {
obs.correl.matrix[, r] <- obs.correl.matrix[obs.order.matrix[,r], r]
}
gene.list2 <- obs.index
for (i in 1:Ng) {
print(paste("Computing observed enrichment for gene set:", i, gs.names[i], sep=" "))
gene.set <- gs[i,gs[i,] != "null"]
gene.set2 <- vector(length=length(gene.set), mode = "numeric")
gene.set2 <- match(gene.set, gene.labels)
if (OLD.GSEA == F) {
GSEA.results <- GSEA.EnrichmentScore(gene.list=gene.list2, gene.set=gene.set2, weighted.score.type=weighted.score.type, correl.vector = obs.s2n)
} else {
GSEA.results <- OLD.GSEA.EnrichmentScore(gene.list=gene.list2, gene.set=gene.set2)
}
Obs.ES[i] <- GSEA.results$ES
Obs.arg.ES[i] <- GSEA.results$arg.ES
Obs.RES[i,] <- GSEA.results$RES
Obs.indicator[i,] <- GSEA.results$indicator
if (Obs.ES[i] >= 0) { # compute signal strength
tag.frac[i] <- sum(Obs.indicator[i,1:Obs.arg.ES[i]])/size.G[i]
gene.frac[i] <- Obs.arg.ES[i]/N
} else {
tag.frac[i] <- sum(Obs.indicator[i, Obs.arg.ES[i]:N])/size.G[i]
gene.frac[i] <- (N - Obs.arg.ES[i] + 1)/N
}
signal.strength[i] <- tag.frac[i] * (1 - gene.frac[i]) * (N / (N - size.G[i]))
}
# Compute enrichment for random permutations
phi <- matrix(nrow = Ng, ncol = nperm)
phi.norm <- matrix(nrow = Ng, ncol = nperm)
obs.phi <- matrix(nrow = Ng, ncol = nperm)
if (reshuffling.type == "sample.labels") { # reshuffling phenotype labels
for (i in 1:Ng) {
print(paste("Computing random permutations' enrichment for gene set:", i, gs.names[i], sep=" "))
gene.set <- gs[i,gs[i,] != "null"]
gene.set2 <- vector(length=length(gene.set), mode = "numeric")
gene.set2 <- match(gene.set, gene.labels)
for (r in 1:nperm) {
gene.list2 <- order.matrix[,r]
if (use.fast.enrichment.routine == F) {
GSEA.results <- GSEA.EnrichmentScore(gene.list=gene.list2, gene.set=gene.set2, weighted.score.type=weighted.score.type, correl.vector=correl.matrix[, r])
} else {
GSEA.results <- GSEA.EnrichmentScore2(gene.list=gene.list2, gene.set=gene.set2, weighted.score.type=weighted.score.type, correl.vector=correl.matrix[, r])
}
phi[i, r] <- GSEA.results$ES
}
if (fraction < 1.0) { # if resampling then compute ES for all observed rankings
for (r in 1:nperm) {
obs.gene.list2 <- obs.order.matrix[,r]
if (use.fast.enrichment.routine == F) {
GSEA.results <- GSEA.EnrichmentScore(gene.list=obs.gene.list2, gene.set=gene.set2, weighted.score.type=weighted.score.type, correl.vector=obs.correl.matrix[, r])
} else {
GSEA.results <- GSEA.EnrichmentScore2(gene.list=obs.gene.list2, gene.set=gene.set2, weighted.score.type=weighted.score.type, correl.vector=obs.correl.matrix[, r])
}
obs.phi[i, r] <- GSEA.results$ES
}
} else { # if no resampling then compute only one column (and fill the others with the same value)
obs.gene.list2 <- obs.order.matrix[,1]
if (use.fast.enrichment.routine == F) {
GSEA.results <- GSEA.EnrichmentScore(gene.list=obs.gene.list2, gene.set=gene.set2, weighted.score.type=weighted.score.type, correl.vector=obs.correl.matrix[, r])
} else {
GSEA.results <- GSEA.EnrichmentScore2(gene.list=obs.gene.list2, gene.set=gene.set2, weighted.score.type=weighted.score.type, correl.vector=obs.correl.matrix[, r])
}
obs.phi[i, 1] <- GSEA.results$ES
for (r in 2:nperm) {
obs.phi[i, r] <- obs.phi[i, 1]
}
}
gc()
}
} else if (reshuffling.type == "gene.labels") { # reshuffling gene labels
for (i in 1:Ng) {
gene.set <- gs[i,gs[i,] != "null"]
gene.set2 <- vector(length=length(gene.set), mode = "numeric")
gene.set2 <- match(gene.set, gene.labels)
for (r in 1:nperm) {
reshuffled.gene.labels <- sample(1:rows)
if (use.fast.enrichment.routine == F) {
GSEA.results <- GSEA.EnrichmentScore(gene.list=reshuffled.gene.labels, gene.set=gene.set2, weighted.score.type=weighted.score.type, correl.vector=obs.s2n)
} else {
GSEA.results <- GSEA.EnrichmentScore2(gene.list=reshuffled.gene.labels, gene.set=gene.set2, weighted.score.type=weighted.score.type, correl.vector=obs.s2n)
}
phi[i, r] <- GSEA.results$ES
}
if (fraction < 1.0) { # if resampling then compute ES for all observed rankings
for (r in 1:nperm) {
obs.gene.list2 <- obs.order.matrix[,r]
if (use.fast.enrichment.routine == F) {
GSEA.results <- GSEA.EnrichmentScore(gene.list=obs.gene.list2, gene.set=gene.set2, weighted.score.type=weighted.score.type, correl.vector=obs.correl.matrix[, r])
} else {
GSEA.results <- GSEA.EnrichmentScore2(gene.list=obs.gene.list2, gene.set=gene.set2, weighted.score.type=weighted.score.type, correl.vector=obs.correl.matrix[, r])
}
obs.phi[i, r] <- GSEA.results$ES
}
} else { # if no resampling then compute only one column (and fill the others with the same value)
obs.gene.list2 <- obs.order.matrix[,1]
if (use.fast.enrichment.routine == F) {
GSEA.results <- GSEA.EnrichmentScore(gene.list=obs.gene.list2, gene.set=gene.set2, weighted.score.type=weighted.score.type, correl.vector=obs.correl.matrix[, r])
} else {
GSEA.results <- GSEA.EnrichmentScore2(gene.list=obs.gene.list2, gene.set=gene.set2, weighted.score.type=weighted.score.type, correl.vector=obs.correl.matrix[, r])
}
obs.phi[i, 1] <- GSEA.results$ES
for (r in 2:nperm) {
obs.phi[i, r] <- obs.phi[i, 1]
}
}
gc()
}
}
# Compute 3 types of p-values
# Find nominal p-values
print("Computing nominal p-values...")
p.vals <- matrix(0, nrow = Ng, ncol = 2)
if (OLD.GSEA == F) {
for (i in 1:Ng) {
pos.phi <- NULL
neg.phi <- NULL
for (j in 1:nperm) {
if (phi[i, j] >= 0) {
pos.phi <- c(pos.phi, phi[i, j])
} else {
neg.phi <- c(neg.phi, phi[i, j])
}
}
ES.value <- Obs.ES[i]
if (ES.value >= 0) {
p.vals[i, 1] <- signif(sum(pos.phi >= ES.value)/length(pos.phi), digits=5)
} else {
p.vals[i, 1] <- signif(sum(neg.phi <= ES.value)/length(neg.phi), digits=5)
}
}
} else { # For OLD GSEA compute the p-val using positive and negative values in the same histogram
for (i in 1:Ng) {
if (Obs.ES[i] >= 0) {
p.vals[i, 1] <- sum(phi[i,] >= Obs.ES[i])/length(phi[i,])
p.vals[i, 1] <- signif(p.vals[i, 1], digits=5)
} else {
p.vals[i, 1] <- sum(phi[i,] <= Obs.ES[i])/length(phi[i,])
p.vals[i, 1] <- signif(p.vals[i, 1], digits=5)
}
}
}
# Find effective size
erf <- function (x)
{
2 * pnorm(sqrt(2) * x)
}
KS.mean <- function(N) { # KS mean as a function of set size N
S <- 0
for (k in -100:100) {
if (k == 0) next
S <- S + 4 * (-1)**(k + 1) * (0.25 * exp(-2 * k * k * N) - sqrt(2 * pi) * erf(sqrt(2 * N) * k)/(16 * k * sqrt(N)))
}
return(abs(S))
}
# KS.mean.table <- vector(length=5000, mode="numeric")
# for (i in 1:5000) {
# KS.mean.table[i] <- KS.mean(i)
# }
# KS.size <- vector(length=Ng, mode="numeric")
# Rescaling normalization for each gene set null
print("Computing rescaling normalization for each gene set null...")
if (OLD.GSEA == F) {
for (i in 1:Ng) {
pos.phi <- NULL
neg.phi <- NULL
for (j in 1:nperm) {
if (phi[i, j] >= 0) {
pos.phi <- c(pos.phi, phi[i, j])
} else {
neg.phi <- c(neg.phi, phi[i, j])
}
}
pos.m <- mean(pos.phi)
neg.m <- mean(abs(neg.phi))
# if (Obs.ES[i] >= 0) {
# KS.size[i] <- which.min(abs(KS.mean.table - pos.m))
# } else {
# KS.size[i] <- which.min(abs(KS.mean.table - neg.m))
# }
pos.phi <- pos.phi/pos.m
neg.phi <- neg.phi/neg.m
for (j in 1:nperm) {
if (phi[i, j] >= 0) {
phi.norm[i, j] <- phi[i, j]/pos.m
} else {
phi.norm[i, j] <- phi[i, j]/neg.m
}
}
for (j in 1:nperm) {
if (obs.phi[i, j] >= 0) {
obs.phi.norm[i, j] <- obs.phi[i, j]/pos.m
} else {
obs.phi.norm[i, j] <- obs.phi[i, j]/neg.m
}
}
if (Obs.ES[i] >= 0) {
Obs.ES.norm[i] <- Obs.ES[i]/pos.m
} else {
Obs.ES.norm[i] <- Obs.ES[i]/neg.m
}
}
} else { # For OLD GSEA does not normalize using empirical scaling
for (i in 1:Ng) {
for (j in 1:nperm) {
phi.norm[i, j] <- phi[i, j]/400
}
for (j in 1:nperm) {
obs.phi.norm[i, j] <- obs.phi[i, j]/400
}
Obs.ES.norm[i] <- Obs.ES[i]/400
}
}
# Save intermedite results
if (save.intermediate.results == T) {
filename <- paste(output.directory, doc.string, ".phi.txt", sep="", collapse="")
write.table(phi, file = filename, quote=F, col.names= F, row.names=F, sep = "\t")
filename <- paste(output.directory, doc.string, ".obs.phi.txt", sep="", collapse="")
write.table(obs.phi, file = filename, quote=F, col.names= F, row.names=F, sep = "\t")
filename <- paste(output.directory, doc.string, ".phi.norm.txt", sep="", collapse="")
write.table(phi.norm, file = filename, quote=F, col.names= F, row.names=F, sep = "\t")
filename <- paste(output.directory, doc.string, ".obs.phi.norm.txt", sep="", collapse="")
write.table(obs.phi.norm, file = filename, quote=F, col.names= F, row.names=F, sep = "\t")
filename <- paste(output.directory, doc.string, ".Obs.ES.txt", sep="", collapse="")
write.table(Obs.ES, file = filename, quote=F, col.names= F, row.names=F, sep = "\t")
filename <- paste(output.directory, doc.string, ".Obs.ES.norm.txt", sep="", collapse="")
write.table(Obs.ES.norm, file = filename, quote=F, col.names= F, row.names=F, sep = "\t")
}
# Compute FWER p-vals
print("Computing FWER p-values...")
if (OLD.GSEA == F) {
max.ES.vals.p <- NULL
max.ES.vals.n <- NULL
for (j in 1:nperm) {
pos.phi <- NULL
neg.phi <- NULL
for (i in 1:Ng) {
if (phi.norm[i, j] >= 0) {
pos.phi <- c(pos.phi, phi.norm[i, j])
} else {
neg.phi <- c(neg.phi, phi.norm[i, j])
}
}
if (length(pos.phi) > 0) {
max.ES.vals.p <- c(max.ES.vals.p, max(pos.phi))
}
if (length(neg.phi) > 0) {
max.ES.vals.n <- c(max.ES.vals.n, min(neg.phi))
}
}
for (i in 1:Ng) {
ES.value <- Obs.ES.norm[i]
if (Obs.ES.norm[i] >= 0) {
p.vals[i, 2] <- signif(sum(max.ES.vals.p >= ES.value)/length(max.ES.vals.p), digits=5)
} else {
p.vals[i, 2] <- signif(sum(max.ES.vals.n <= ES.value)/length(max.ES.vals.n), digits=5)
}
}
} else { # For OLD GSEA compute the FWER using positive and negative values in the same histogram
max.ES.vals <- NULL
for (j in 1:nperm) {
max.NES <- max(phi.norm[,j])
min.NES <- min(phi.norm[,j])
if (max.NES > - min.NES) {
max.val <- max.NES
} else {
max.val <- min.NES
}
max.ES.vals <- c(max.ES.vals, max.val)
}
for (i in 1:Ng) {
if (Obs.ES.norm[i] >= 0) {
p.vals[i, 2] <- sum(max.ES.vals >= Obs.ES.norm[i])/length(max.ES.vals)
} else {
p.vals[i, 2] <- sum(max.ES.vals <= Obs.ES.norm[i])/length(max.ES.vals)
}
p.vals[i, 2] <- signif(p.vals[i, 2], digits=4)
}
}
# Compute FDRs
print("Computing FDR q-values...")
NES <- vector(length=Ng, mode="numeric")
phi.norm.mean <- vector(length=Ng, mode="numeric")
obs.phi.norm.mean <- vector(length=Ng, mode="numeric")
phi.norm.median <- vector(length=Ng, mode="numeric")
obs.phi.norm.median <- vector(length=Ng, mode="numeric")
phi.norm.mean <- vector(length=Ng, mode="numeric")
obs.phi.mean <- vector(length=Ng, mode="numeric")
FDR.mean <- vector(length=Ng, mode="numeric")
FDR.median <- vector(length=Ng, mode="numeric")
phi.norm.median.d <- vector(length=Ng, mode="numeric")
obs.phi.norm.median.d <- vector(length=Ng, mode="numeric")
Obs.ES.index <- order(Obs.ES.norm, decreasing=T)
Orig.index <- seq(1, Ng)
Orig.index <- Orig.index[Obs.ES.index]
Orig.index <- order(Orig.index, decreasing=F)
Obs.ES.norm.sorted <- Obs.ES.norm[Obs.ES.index]
gs.names.sorted <- gs.names[Obs.ES.index]
for (k in 1:Ng) {
NES[k] <- Obs.ES.norm.sorted[k]
ES.value <- NES[k]
count.col <- vector(length=nperm, mode="numeric")
obs.count.col <- vector(length=nperm, mode="numeric")
for (i in 1:nperm) {
phi.vec <- phi.norm[,i]
obs.phi.vec <- obs.phi.norm[,i]
if (ES.value >= 0) {
count.col.norm <- sum(phi.vec >= 0)
obs.count.col.norm <- sum(obs.phi.vec >= 0)
count.col[i] <- ifelse(count.col.norm > 0, sum(phi.vec >= ES.value)/count.col.norm, 0)
obs.count.col[i] <- ifelse(obs.count.col.norm > 0, sum(obs.phi.vec >= ES.value)/obs.count.col.norm, 0)
} else {
count.col.norm <- sum(phi.vec < 0)
obs.count.col.norm <- sum(obs.phi.vec < 0)
count.col[i] <- ifelse(count.col.norm > 0, sum(phi.vec <= ES.value)/count.col.norm, 0)
obs.count.col[i] <- ifelse(obs.count.col.norm > 0, sum(obs.phi.vec <= ES.value)/obs.count.col.norm, 0)
}
}
phi.norm.mean[k] <- mean(count.col)
obs.phi.norm.mean[k] <- mean(obs.count.col)
phi.norm.median[k] <- median(count.col)
obs.phi.norm.median[k] <- median(obs.count.col)
FDR.mean[k] <- ifelse(phi.norm.mean[k]/obs.phi.norm.mean[k] < 1, phi.norm.mean[k]/obs.phi.norm.mean[k], 1)
FDR.median[k] <- ifelse(phi.norm.median[k]/obs.phi.norm.median[k] < 1, phi.norm.median[k]/obs.phi.norm.median[k], 1)
}
# adjust q-values
if (adjust.FDR.q.val == T) {
pos.nes <- length(NES[NES >= 0])
min.FDR.mean <- FDR.mean[pos.nes]
min.FDR.median <- FDR.median[pos.nes]
for (k in seq(pos.nes - 1, 1, -1)) {
if (FDR.mean[k] < min.FDR.mean) {
min.FDR.mean <- FDR.mean[k]
}
if (min.FDR.mean < FDR.mean[k]) {
FDR.mean[k] <- min.FDR.mean
}
}
neg.nes <- pos.nes + 1
min.FDR.mean <- FDR.mean[neg.nes]
min.FDR.median <- FDR.median[neg.nes]
for (k in seq(neg.nes + 1, Ng)) {
if (FDR.mean[k] < min.FDR.mean) {
min.FDR.mean <- FDR.mean[k]
}
if (min.FDR.mean < FDR.mean[k]) {
FDR.mean[k] <- min.FDR.mean
}
}
}
obs.phi.norm.mean.sorted <- obs.phi.norm.mean[Orig.index]
phi.norm.mean.sorted <- phi.norm.mean[Orig.index]
FDR.mean.sorted <- FDR.mean[Orig.index]
FDR.median.sorted <- FDR.median[Orig.index]
# Compute global statistic
glob.p.vals <- vector(length=Ng, mode="numeric")
NULL.pass <- vector(length=nperm, mode="numeric")
OBS.pass <- vector(length=nperm, mode="numeric")
for (k in 1:Ng) {
NES[k] <- Obs.ES.norm.sorted[k]
if (NES[k] >= 0) {
for (i in 1:nperm) {
NULL.pos <- sum(phi.norm[,i] >= 0)
NULL.pass[i] <- ifelse(NULL.pos > 0, sum(phi.norm[,i] >= NES[k])/NULL.pos, 0)
OBS.pos <- sum(obs.phi.norm[,i] >= 0)
OBS.pass[i] <- ifelse(OBS.pos > 0, sum(obs.phi.norm[,i] >= NES[k])/OBS.pos, 0)
}
} else {
for (i in 1:nperm) {
NULL.neg <- sum(phi.norm[,i] < 0)
NULL.pass[i] <- ifelse(NULL.neg > 0, sum(phi.norm[,i] <= NES[k])/NULL.neg, 0)
OBS.neg <- sum(obs.phi.norm[,i] < 0)
OBS.pass[i] <- ifelse(OBS.neg > 0, sum(obs.phi.norm[,i] <= NES[k])/OBS.neg, 0)
}
}
glob.p.vals[k] <- sum(NULL.pass >= mean(OBS.pass))/nperm
}
glob.p.vals.sorted <- glob.p.vals[Orig.index]
# Produce results report
print("Producing result tables and plots...")
Obs.ES <- signif(Obs.ES, digits=5)
Obs.ES.norm <- signif(Obs.ES.norm, digits=5)
p.vals <- signif(p.vals, digits=4)
signal.strength <- signif(signal.strength, digits=3)
tag.frac <- signif(tag.frac, digits=3)
gene.frac <- signif(gene.frac, digits=3)
FDR.mean.sorted <- signif(FDR.mean.sorted, digits=5)
FDR.median.sorted <- signif(FDR.median.sorted, digits=5)
glob.p.vals.sorted <- signif(glob.p.vals.sorted, digits=5)
report <- data.frame(cbind(gs.names, size.G, all.gs.descs, Obs.ES, Obs.ES.norm, p.vals[,1], FDR.mean.sorted, p.vals[,2], tag.frac, gene.frac, signal.strength, FDR.median.sorted, glob.p.vals.sorted))
names(report) <- c("GS", "SIZE", "SOURCE", "ES", "NES", "NOM p-val", "FDR q-val", "FWER p-val", "Tag %", "Gene %", "Signal", "FDR (median)", "glob.p.val")
# print(report)
report2 <- report
report.index2 <- order(Obs.ES.norm, decreasing=T)
for (i in 1:Ng) {
report2[i,] <- report[report.index2[i],]
}
report3 <- report
report.index3 <- order(Obs.ES.norm, decreasing=F)
for (i in 1:Ng) {
report3[i,] <- report[report.index3[i],]
}
phen1.rows <- length(Obs.ES.norm[Obs.ES.norm >= 0])
phen2.rows <- length(Obs.ES.norm[Obs.ES.norm < 0])
report.phen1 <- report2[1:phen1.rows,]
report.phen2 <- report3[1:phen2.rows,]
if (output.directory != "") {
if (phen1.rows > 0) {
filename <- paste(output.directory, doc.string, ".SUMMARY.RESULTS.REPORT.", phen1,".txt", sep="", collapse="")
write.table(report.phen1, file = filename, quote=F, row.names=F, sep = "\t")
}
if (phen2.rows > 0) {
filename <- paste(output.directory, doc.string, ".SUMMARY.RESULTS.REPORT.", phen2,".txt", sep="", collapse="")
write.table(report.phen2, file = filename, quote=F, row.names=F, sep = "\t")
}
}
# Global plots
if (output.directory != "") {
if (non.interactive.run == F) {
if (.Platform$OS.type == "windows") {
glob.filename <- paste(output.directory, doc.string, ".global.plots", sep="", collapse="")
windows(width = 10, height = 10)
} else if (.Platform$OS.type == "unix") {
glob.filename <- paste(output.directory, doc.string, ".global.plots.pdf", sep="", collapse="")
pdf(file=glob.filename, height = 10, width = 10)
}
} else {
if (.Platform$OS.type == "unix") {
glob.filename <- paste(output.directory, doc.string, ".global.plots.pdf", sep="", collapse="")
pdf(file=glob.filename, height = 10, width = 10)
} else if (.Platform$OS.type == "windows") {
glob.filename <- paste(output.directory, doc.string, ".global.plots.pdf", sep="", collapse="")
pdf(file=glob.filename, height = 10, width = 10)
}
}
}
nf <- layout(matrix(c(1,2,3,4), 2, 2, byrow=T), c(1,1), c(1,1), TRUE)
# plot S2N correlation profile
location <- 1:N
max.corr <- max(obs.s2n)
min.corr <- min(obs.s2n)
x <- plot(location, obs.s2n, ylab = "Signal to Noise Ratio (S2N)", xlab = "Gene List Location", main = "Gene List Correlation (S2N) Profile", type = "l", lwd = 2, cex = 0.9, col = 1)
for (i in seq(1, N, 20)) {
lines(c(i, i), c(0, obs.s2n[i]), lwd = 3, cex = 0.9, col = colors()[12]) # shading of correlation plot
}
x <- points(location, obs.s2n, type = "l", lwd = 2, cex = 0.9, col = 1)
lines(c(1, N), c(0, 0), lwd = 2, lty = 1, cex = 0.9, col = 1) # zero correlation horizontal line
temp <- order(abs(obs.s2n), decreasing=T)
arg.correl <- temp[N]
lines(c(arg.correl, arg.correl), c(min.corr, 0.7*max.corr), lwd = 2, lty = 3, cex = 0.9, col = 1) # zero correlation vertical line
area.bias <- signif(100*(sum(obs.s2n[1:arg.correl]) + sum(obs.s2n[arg.correl:N]))/sum(abs(obs.s2n[1:N])), digits=3)
area.phen <- ifelse(area.bias >= 0, phen1, phen2)
delta.string <- paste("Corr. Area Bias to \"", area.phen, "\" =", abs(area.bias), "%", sep="", collapse="")
zero.crossing.string <- paste("Zero Crossing at location ", arg.correl, " (", signif(100*arg.correl/N, digits=3), " %)")
leg.txt <- c(delta.string, zero.crossing.string)
legend(x=N/10, y=max.corr, bty="n", bg = "white", legend=leg.txt, cex = 0.9)
leg.txt <- paste("\"", phen1, "\" ", sep="", collapse="")
text(x=1, y=-0.05*max.corr, adj = c(0, 1), labels=leg.txt, cex = 0.9)
leg.txt <- paste("\"", phen2, "\" ", sep="", collapse="")
text(x=N, y=0.05*max.corr, adj = c(1, 0), labels=leg.txt, cex = 0.9)
if (Ng > 1) { # make these plots only if there are multiple gene sets.
# compute plots of actual (weighted) null and observed
phi.densities.pos <- matrix(0, nrow=512, ncol=nperm)
phi.densities.neg <- matrix(0, nrow=512, ncol=nperm)
obs.phi.densities.pos <- matrix(0, nrow=512, ncol=nperm)
obs.phi.densities.neg <- matrix(0, nrow=512, ncol=nperm)
phi.density.mean.pos <- vector(length=512, mode = "numeric")
phi.density.mean.neg <- vector(length=512, mode = "numeric")
obs.phi.density.mean.pos <- vector(length=512, mode = "numeric")
obs.phi.density.mean.neg <- vector(length=512, mode = "numeric")
phi.density.median.pos <- vector(length=512, mode = "numeric")
phi.density.median.neg <- vector(length=512, mode = "numeric")
obs.phi.density.median.pos <- vector(length=512, mode = "numeric")
obs.phi.density.median.neg <- vector(length=512, mode = "numeric")
x.coor.pos <- vector(length=512, mode = "numeric")
x.coor.neg <- vector(length=512, mode = "numeric")
for (i in 1:nperm) {
pos.phi <- phi.norm[phi.norm[, i] >= 0, i]
if (length(pos.phi) > 2) {
temp <- density(pos.phi, adjust=adjust.param, n = 512, from=0, to=3.5)
} else {
temp <- list(x = 3.5*(seq(1, 512) - 1)/512, y = rep(0.001, 512))
}
phi.densities.pos[, i] <- temp$y
norm.factor <- sum(phi.densities.pos[, i])
phi.densities.pos[, i] <- phi.densities.pos[, i]/norm.factor
if (i == 1) {
x.coor.pos <- temp$x
}
neg.phi <- phi.norm[phi.norm[, i] < 0, i]
if (length(neg.phi) > 2) {
temp <- density(neg.phi, adjust=adjust.param, n = 512, from=-3.5, to=0)
} else {
temp <- list(x = 3.5*(seq(1, 512) - 1)/512, y = rep(0.001, 512))
}
phi.densities.neg[, i] <- temp$y
norm.factor <- sum(phi.densities.neg[, i])
phi.densities.neg[, i] <- phi.densities.neg[, i]/norm.factor
if (i == 1) {
x.coor.neg <- temp$x
}
pos.phi <- obs.phi.norm[obs.phi.norm[, i] >= 0, i]
if (length(pos.phi) > 2) {
temp <- density(pos.phi, adjust=adjust.param, n = 512, from=0, to=3.5)
} else {
temp <- list(x = 3.5*(seq(1, 512) - 1)/512, y = rep(0.001, 512))
}
obs.phi.densities.pos[, i] <- temp$y
norm.factor <- sum(obs.phi.densities.pos[, i])
obs.phi.densities.pos[, i] <- obs.phi.densities.pos[, i]/norm.factor
neg.phi <- obs.phi.norm[obs.phi.norm[, i] < 0, i]
if (length(neg.phi)> 2) {
temp <- density(neg.phi, adjust=adjust.param, n = 512, from=-3.5, to=0)
} else {
temp <- list(x = 3.5*(seq(1, 512) - 1)/512, y = rep(0.001, 512))
}
obs.phi.densities.neg[, i] <- temp$y
norm.factor <- sum(obs.phi.densities.neg[, i])
obs.phi.densities.neg[, i] <- obs.phi.densities.neg[, i]/norm.factor
}
phi.density.mean.pos <- apply(phi.densities.pos, 1, mean)
phi.density.mean.neg <- apply(phi.densities.neg, 1, mean)
obs.phi.density.mean.pos <- apply(obs.phi.densities.pos, 1, mean)
obs.phi.density.mean.neg <- apply(obs.phi.densities.neg, 1, mean)
phi.density.median.pos <- apply(phi.densities.pos, 1, median)
phi.density.median.neg <- apply(phi.densities.neg, 1, median)
obs.phi.density.median.pos <- apply(obs.phi.densities.pos, 1, median)
obs.phi.density.median.neg <- apply(obs.phi.densities.neg, 1, median)
x <- c(x.coor.neg, x.coor.pos)
x.plot.range <- range(x)
y1 <- c(phi.density.mean.neg, phi.density.mean.pos)
y2 <- c(obs.phi.density.mean.neg, obs.phi.density.mean.pos)
y.plot.range <- c(-0.3*max(c(y1, y2)), max(c(y1, y2)))
print(c(y.plot.range, max(c(y1, y2)), max(y1), max(y2)))
plot(x, y1, xlim = x.plot.range, ylim = 1.5*y.plot.range, type = "l", lwd = 2, col = 2, xlab = "NES", ylab = "P(NES)", main = "Global Observed and Null Densities (Area Normalized)")
y1.point <- y1[seq(1, length(x), 2)]
y2.point <- y2[seq(2, length(x), 2)]
x1.point <- x[seq(1, length(x), 2)]
x2.point <- x[seq(2, length(x), 2)]
# for (i in 1:length(x1.point)) {
# lines(c(x1.point[i], x1.point[i]), c(0, y1.point[i]), lwd = 3, cex = 0.9, col = colors()[555]) # shading
# }
#
# for (i in 1:length(x2.point)) {
# lines(c(x2.point[i], x2.point[i]), c(0, y2.point[i]), lwd = 3, cex = 0.9, col = colors()[29]) # shading
# }
points(x, y1, type = "l", lwd = 2, col = colors()[555])
points(x, y2, type = "l", lwd = 2, col = colors()[29])
for (i in 1:Ng) {
col <- ifelse(Obs.ES.norm[i] > 0, 2, 3)
lines(c(Obs.ES.norm[i], Obs.ES.norm[i]), c(-0.2*max(c(y1, y2)), 0), lwd = 1, lty = 1, col = 1)
}
leg.txt <- paste("Neg. ES: \"", phen2, " \" ", sep="", collapse="")
text(x=x.plot.range[1], y=-0.25*max(c(y1, y2)), adj = c(0, 1), labels=leg.txt, cex = 0.9)
leg.txt <- paste(" Pos. ES: \"", phen1, "\" ", sep="", collapse="")
text(x=x.plot.range[2], y=-0.25*max(c(y1, y2)), adj = c(1, 1), labels=leg.txt, cex = 0.9)
leg.txt <- c("Null Density", "Observed Density", "Observed NES values")
c.vec <- c(colors()[555], colors()[29], 1)
lty.vec <- c(1, 1, 1)
lwd.vec <- c(2, 2, 2)
legend(x=0, y=1.5*y.plot.range[2], bty="n", bg = "white", legend=leg.txt, lty = lty.vec, lwd = lwd.vec, col = c.vec, cex = 0.9)
B <- A[obs.index,]
if (N > 300) {
C <- rbind(B[1:100,], rep(0, Ns), rep(0, Ns), B[(floor(N/2) - 50 + 1):(floor(N/2) + 50),], rep(0, Ns), rep(0, Ns), B[(N - 100 + 1):N,])
}
rm(B)
GSEA.HeatMapPlot(V = C, col.labels = class.labels, col.classes = class.phen, main = "Heat Map for Genes in Dataset")
# p-vals plot
nom.p.vals <- p.vals[Obs.ES.index,1]
FWER.p.vals <- p.vals[Obs.ES.index,2]
plot.range <- 1.25*range(NES)
plot(NES, FDR.mean, ylim = c(0, 1), xlim = plot.range, col = 1, bg = 1, type="p", pch = 22, cex = 0.75, xlab = "NES", main = "p-values vs. NES", ylab ="p-val/q-val")
points(NES, nom.p.vals, type = "p", col = 2, bg = 2, pch = 22, cex = 0.75)
points(NES, FWER.p.vals, type = "p", col = colors()[577], bg = colors()[577], pch = 22, cex = 0.75)
leg.txt <- c("Nominal p-value", "FWER p-value", "FDR q-value")
c.vec <- c(2, colors()[577], 1)
pch.vec <- c(22, 22, 22)
legend(x=-0.5, y=0.5, bty="n", bg = "white", legend=leg.txt, pch = pch.vec, col = c.vec, pt.bg = c.vec, cex = 0.9)
lines(c(min(NES), max(NES)), c(nom.p.val.threshold, nom.p.val.threshold), lwd = 1, lty = 2, col = 2)
lines(c(min(NES), max(NES)), c(fwer.p.val.threshold, fwer.p.val.threshold), lwd = 1, lty = 2, col = colors()[577])
lines(c(min(NES), max(NES)), c(fdr.q.val.threshold, fdr.q.val.threshold), lwd = 1, lty = 2, col = 1)
if (non.interactive.run == F) {
if (.Platform$OS.type == "windows") {
savePlot(filename = glob.filename, type ="jpeg", device = dev.cur())
} else if (.Platform$OS.type == "unix") {
dev.off()
}
} else {
dev.off()
}
} # if Ng > 1
#----------------------------------------------------------------------------
# Produce report for each gene set passing the nominal, FWER or FDR test or the top topgs in each side
if (topgs > floor(Ng/2)) {
topgs <- floor(Ng/2)
}
for (i in 1:Ng) {
if ((p.vals[i, 1] <= nom.p.val.threshold) ||
(p.vals[i, 2] <= fwer.p.val.threshold) ||
(FDR.mean.sorted[i] <= fdr.q.val.threshold) ||
(is.element(i, c(Obs.ES.index[1:topgs], Obs.ES.index[(Ng - topgs + 1): Ng])))) {
# produce report per gene set
kk <- 1
gene.number <- vector(length = size.G[i], mode = "character")
gene.names <- vector(length = size.G[i], mode = "character")
gene.symbols <- vector(length = size.G[i], mode = "character")
gene.descs <- vector(length = size.G[i], mode = "character")
gene.list.loc <- vector(length = size.G[i], mode = "numeric")
core.enrichment <- vector(length = size.G[i], mode = "character")
gene.s2n <- vector(length = size.G[i], mode = "numeric")
gene.RES <- vector(length = size.G[i], mode = "numeric")
rank.list <- seq(1, N)
if (Obs.ES[i] >= 0) {
set.k <- seq(1, N, 1)
phen.tag <- phen1
loc <- match(i, Obs.ES.index)
} else {
set.k <- seq(N, 1, -1)
phen.tag <- phen2
loc <- Ng - match(i, Obs.ES.index) + 1
}
for (k in set.k) {
if (Obs.indicator[i, k] == 1) {
gene.number[kk] <- kk
gene.names[kk] <- obs.gene.labels[k]
gene.symbols[kk] <- substr(obs.gene.symbols[k], 1, 15)
gene.descs[kk] <- substr(obs.gene.descs[k], 1, 40)
gene.list.loc[kk] <- k
gene.s2n[kk] <- signif(obs.s2n[k], digits=3)
gene.RES[kk] <- signif(Obs.RES[i, k], digits = 3)
if (Obs.ES[i] >= 0) {
core.enrichment[kk] <- ifelse(gene.list.loc[kk] <= Obs.arg.ES[i], "YES", "NO")
} else {
core.enrichment[kk] <- ifelse(gene.list.loc[kk] > Obs.arg.ES[i], "YES", "NO")
}
kk <- kk + 1
}
}
gene.report <- data.frame(cbind(gene.number, gene.names, gene.symbols, gene.descs, gene.list.loc, gene.s2n, gene.RES, core.enrichment))
names(gene.report) <- c("#", "GENE", "SYMBOL", "DESC", "LIST LOC", "S2N", "RES", "CORE_ENRICHMENT")
# print(gene.report)
if (output.directory != "") {
filename <- paste(output.directory, doc.string, ".", gs.names[i], ".report.", phen.tag, ".", loc, ".txt", sep="", collapse="")
write.table(gene.report, file = filename, quote=F, row.names=F, sep = "\t")
if (non.interactive.run == F) {
if (.Platform$OS.type == "windows") {
gs.filename <- paste(output.directory, doc.string, ".", gs.names[i], ".plot.", phen.tag, ".", loc, sep="", collapse="")
windows(width = 14, height = 6)
} else if (.Platform$OS.type == "unix") {
gs.filename <- paste(output.directory, doc.string, ".", gs.names[i], ".plot.", phen.tag, ".", loc, ".pdf", sep="", collapse="")
pdf(file=gs.filename, height = 6, width = 14)
}
} else {
if (.Platform$OS.type == "unix") {
gs.filename <- paste(output.directory, doc.string, ".", gs.names[i], ".plot.", phen.tag, ".", loc, ".pdf", sep="", collapse="")
pdf(file=gs.filename, height = 6, width = 14)
} else if (.Platform$OS.type == "windows") {
gs.filename <- paste(output.directory, doc.string, ".", gs.names[i], ".plot.", phen.tag, ".", loc, ".pdf", sep="", collapse="")
pdf(file=gs.filename, height = 6, width = 14)
}
}
}
# nf <- layout(matrix(c(1,2,3), 1, 3, byrow=T), 1, c(1, 1, 1), TRUE)
nf <- layout(matrix(c(1,0,2), 1, 3, byrow=T), widths=c(1,0,1), heights=c(1,0,1))
ind <- 1:N
min.RES <- min(Obs.RES[i,])
max.RES <- max(Obs.RES[i,])
if (max.RES < 0.3) max.RES <- 0.3
if (min.RES > -0.3) min.RES <- -0.3
delta <- (max.RES - min.RES)*0.50
min.plot <- min.RES - 2*delta
max.plot <- max.RES
max.corr <- max(obs.s2n)
min.corr <- min(obs.s2n)
Obs.correl.vector.norm <- (obs.s2n - min.corr)/(max.corr - min.corr)*1.25*delta + min.plot
zero.corr.line <- (- min.corr/(max.corr - min.corr))*1.25*delta + min.plot
col <- ifelse(Obs.ES[i] > 0, 2, 4)
# Running enrichment plot
# sub.string <- paste("Number of genes: ", N, " (in list), ", size.G[i], " (in gene set)", sep = "", collapse="")
sub.string <- paste("ES =", signif(Obs.ES[i], digits = 3), " NES =", signif(Obs.ES.norm[i], digits=3), "Nom. p-val=", signif(p.vals[i, 1], digits = 3),"FWER=", signif(p.vals[i, 2], digits = 3), "FDR=", signif(FDR.mean.sorted[i], digits = 3))
# main.string <- paste("Gene Set ", i, ":", gs.names[i])
main.string <- paste("Gene Set:", gs.names[i])
# plot(ind, Obs.RES[i,], main = main.string, sub = sub.string, xlab = "Gene List Index", ylab = "Running Enrichment Score (RES)", xlim=c(1, N), ylim=c(min.plot, max.plot), type = "l", lwd = 2, cex = 1, col = col)
plot(ind, Obs.RES[i,], main = main.string, xlab = sub.string, ylab = "Running Enrichment Score (RES)", xlim=c(1, N), ylim=c(min.plot, max.plot), type = "l", lwd = 2, cex = 1, col = col)
for (j in seq(1, N, 20)) {
lines(c(j, j), c(zero.corr.line, Obs.correl.vector.norm[j]), lwd = 1, cex = 1, col = colors()[12]) # shading of correlation plot
}
lines(c(1, N), c(0, 0), lwd = 1, lty = 2, cex = 1, col = 1) # zero RES line
lines(c(Obs.arg.ES[i], Obs.arg.ES[i]), c(min.plot, max.plot), lwd = 1, lty = 3, cex = 1, col = col) # max enrichment vertical line
for (j in 1:N) {
if (Obs.indicator[i, j] == 1) {
lines(c(j, j), c(min.plot + 1.25*delta, min.plot + 1.75*delta), lwd = 1, lty = 1, cex = 1, col = 1) # enrichment tags
}
}
lines(ind, Obs.correl.vector.norm, type = "l", lwd = 1, cex = 1, col = 1)
lines(c(1, N), c(zero.corr.line, zero.corr.line), lwd = 1, lty = 1, cex = 1, col = 1) # zero correlation horizontal line
temp <- order(abs(obs.s2n), decreasing=T)
arg.correl <- temp[N]
lines(c(arg.correl, arg.correl), c(min.plot, max.plot), lwd = 1, lty = 3, cex = 1, col = 3) # zero crossing correlation vertical line
leg.txt <- paste("\"", phen1, "\" ", sep="", collapse="")
text(x=1, y=min.plot, adj = c(0, 0), labels=leg.txt, cex = 1.0)
leg.txt <- paste("\"", phen2, "\" ", sep="", collapse="")
text(x=N, y=min.plot, adj = c(1, 0), labels=leg.txt, cex = 1.0)
adjx <- ifelse(Obs.ES[i] > 0, 0, 1)
leg.txt <- paste("Peak at ", Obs.arg.ES[i], sep="", collapse="")
text(x=Obs.arg.ES[i], y=min.plot + 1.8*delta, adj = c(adjx, 0), labels=leg.txt, cex = 1.0)
leg.txt <- paste("Zero crossing at ", arg.correl, sep="", collapse="")
text(x=arg.correl, y=min.plot + 1.95*delta, adj = c(adjx, 0), labels=leg.txt, cex = 1.0)
# nominal p-val histogram
# sub.string <- paste("ES =", signif(Obs.ES[i], digits = 3), " NES =", signif(Obs.ES.norm[i], digits=3), "Nom. p-val=", signif(p.vals[i, 1], digits = 3),"FWER=", signif(p.vals[i, 2], digits = 3), "FDR=", signif(FDR.mean.sorted[i], digits = 3))
temp <- density(phi[i,], adjust=adjust.param)
x.plot.range <- range(temp$x)
y.plot.range <- c(-0.125*max(temp$y), 1.5*max(temp$y))
# plot(temp$x, temp$y, type = "l", sub = sub.string, xlim = x.plot.range, ylim = y.plot.range, lwd = 2, col = 2, main = "Gene Set Null Distribution", xlab = "ES", ylab="P(ES)")
x.loc <- which.min(abs(temp$x - Obs.ES[i]))
# lines(c(Obs.ES[i], Obs.ES[i]), c(0, temp$y[x.loc]), lwd = 2, lty = 1, cex = 1, col = 1)
# lines(x.plot.range, c(0, 0), lwd = 1, lty = 1, cex = 1, col = 1)
leg.txt <- c("Gene Set Null Density", "Observed Gene Set ES value")
c.vec <- c(2, 1)
lty.vec <- c(1, 1)
lwd.vec <- c(2, 2)
# legend(x=-0.2, y=y.plot.range[2], bty="n", bg = "white", legend=leg.txt, lty = lty.vec, lwd = lwd.vec, col = c.vec, cex = 1.0)
leg.txt <- paste("Neg. ES \"", phen2, "\" ", sep="", collapse="")
# text(x=x.plot.range[1], y=-0.1*max(temp$y), adj = c(0, 0), labels=leg.txt, cex = 1.0)
leg.txt <- paste(" Pos. ES: \"", phen1, "\" ", sep="", collapse="")
# text(x=x.plot.range[2], y=-0.1*max(temp$y), adj = c(1, 0), labels=leg.txt, cex = 1.0)
# create pinkogram for each gene set
kk <- 1
pinko <- matrix(0, nrow = size.G[i], ncol = cols)
pinko.gene.names <- vector(length = size.G[i], mode = "character")
for (k in 1:rows) {
if (Obs.indicator[i, k] == 1) {
pinko[kk,] <- A[obs.index[k],]
pinko.gene.names[kk] <- obs.gene.symbols[k]
kk <- kk + 1
}
}
GSEA.HeatMapPlot(V = pinko, row.names = pinko.gene.names, col.labels = class.labels, col.classes = class.phen, col.names = sample.names, main =" Heat Map for Genes in Gene Set", xlab=" ", ylab=" ")
if (non.interactive.run == F) {
if (.Platform$OS.type == "windows") {
savePlot(filename = gs.filename, type ="jpeg", device = dev.cur())
} else if (.Platform$OS.type == "unix") {
dev.off()
}
} else {
dev.off()
}
} # if p.vals thres
} # loop over gene sets
return(list(report1 = report.phen1, report2 = report.phen2))
} # end of definition of GSEA.analysis
GSEA.write.gct <- function (gct, filename)
{
f <- file(filename, "w")
cat("#1.2", "\n", file = f, append = TRUE, sep = "")
cat(dim(gct)[1], "\t", dim(gct)[2], "\n", file = f, append = TRUE, sep = "")
cat("Name", "\t", file = f, append = TRUE, sep = "")
cat("Description", file = f, append = TRUE, sep = "")
names <- names(gct)
cat("\t", names[1], file = f, append = TRUE, sep = "")
for (j in 2:length(names)) {
cat("\t", names[j], file = f, append = TRUE, sep = "")
}
cat("\n", file = f, append = TRUE, sep = "\t")
oldWarn <- options(warn = -1)
m <- matrix(nrow = dim(gct)[1], ncol = dim(gct)[2] + 2)
m[, 1] <- row.names(gct)
m[, 2] <- row.names(gct)
index <- 3
for (i in 1:dim(gct)[2]) {
m[, index] <- gct[, i]
index <- index + 1
}
write.table(m, file = f, append = TRUE, quote = FALSE, sep = "\t", eol = "\n", col.names = FALSE, row.names = FALSE)
close(f)
options(warn = 0)
return(gct)
}
GSEA.ConsPlot <- function(V, col.names, main = " ", sub = " ", xlab=" ", ylab=" ") {
# Plots a heatmap plot of a consensus matrix
cols <- length(V[1,])
B <- matrix(0, nrow=cols, ncol=cols)
max.val <- max(V)
min.val <- min(V)
for (i in 1:cols) {
for (j in 1:cols) {
k <- cols - i + 1
B[k, j] <- max.val - V[i, j] + min.val
}
}
# col.map <- c(rainbow(100, s = 1.0, v = 0.75, start = 0.0, end = 0.75, gamma = 1.5), "#BBBBBB", "#333333", "#FFFFFF")
col.map <- rev(c("#0000FF", "#4040FF", "#7070FF", "#8888FF", "#A9A9FF", "#D5D5FF", "#EEE5EE", "#FFAADA", "#FF9DB0", "#FF7080", "#FF5A5A", "#FF4040", "#FF0D1D"))
# max.size <- max(nchar(col.names))
par(mar = c(5, 15, 15, 5))
image(1:cols, 1:cols, t(B), col = col.map, axes=FALSE, main=main, sub=sub, xlab= xlab, ylab=ylab)
for (i in 1:cols) {
col.names[i] <- substr(col.names[i], 1, 25)
}
col.names2 <- rev(col.names)
size.col.char <- ifelse(cols < 15, 1, sqrt(15/cols))
axis(2, at=1:cols, labels=col.names2, adj= 0.5, tick=FALSE, las = 1, cex.axis=size.col.char, font.axis=1, line=-1)
axis(3, at=1:cols, labels=col.names, adj= 1, tick=FALSE, las = 3, cex.axis=size.col.char, font.axis=1, line=-1)
return()
}
GSEA.HeatMapPlot2 <- function(V, row.names = "NA", col.names = "NA", main = " ", sub = " ", xlab=" ", ylab=" ", color.map = "default") {
#
# Plots a heatmap of a matrix
n.rows <- length(V[,1])
n.cols <- length(V[1,])
if (color.map == "default") {
color.map <- rev(rainbow(100, s = 1.0, v = 0.75, start = 0.0, end = 0.75, gamma = 1.5))
}
heatm <- matrix(0, nrow = n.rows, ncol = n.cols)
heatm[1:n.rows,] <- V[seq(n.rows, 1, -1),]
par(mar = c(7, 15, 5, 5))
image(1:n.cols, 1:n.rows, t(heatm), col=color.map, axes=FALSE, main=main, sub = sub, xlab= xlab, ylab=ylab)
if (length(row.names) > 1) {
size.row.char <- ifelse(n.rows < 15, 1, sqrt(15/n.rows))
size.col.char <- ifelse(n.cols < 15, 1, sqrt(10/n.cols))
# size.col.char <- ifelse(n.cols < 2.5, 1, sqrt(2.5/n.cols))
for (i in 1:n.rows) {
row.names[i] <- substr(row.names[i], 1, 40)
}
row.names <- row.names[seq(n.rows, 1, -1)]
axis(2, at=1:n.rows, labels=row.names, adj= 0.5, tick=FALSE, las = 1, cex.axis=size.row.char, font.axis=1, line=-1)
}
if (length(col.names) > 1) {
axis(1, at=1:n.cols, labels=col.names, tick=FALSE, las = 3, cex.axis=size.col.char, font.axis=2, line=-1)
}
return()
}
GSEA.Analyze.Sets <- function(
directory,
topgs = "",
non.interactive.run = F,
height = 12,
width = 17) {
file.list <- list.files(directory)
files <- file.list[regexpr(pattern = ".report.", file.list) > 1]
max.sets <- length(files)
set.table <- matrix(nrow = max.sets, ncol = 5)
for (i in 1:max.sets) {
temp1 <- strsplit(files[i], split=".report.")
temp2 <- strsplit(temp1[[1]][1], split=".")
s <- length(temp2[[1]])
prefix.name <- paste(temp2[[1]][1:(s-1)], sep="", collapse="")
set.name <- temp2[[1]][s]
temp3 <- strsplit(temp1[[1]][2], split=".")
phenotype <- temp3[[1]][1]
seq.number <- temp3[[1]][2]
dataset <- paste(temp2[[1]][1:(s-1)], sep="", collapse=".")
set.table[i, 1] <- files[i]
set.table[i, 3] <- phenotype
set.table[i, 4] <- as.numeric(seq.number)
set.table[i, 5] <- dataset
# set.table[i, 2] <- paste(set.name, dataset, sep ="", collapse="")
set.table[i, 2] <- substr(set.name, 1, 20)
}
print(c("set name=", prefix.name))
doc.string <- prefix.name
set.table <- noquote(set.table)
phen.order <- order(set.table[, 3], decreasing = T)
set.table <- set.table[phen.order,]
phen1 <- names(table(set.table[,3]))[1]
phen2 <- names(table(set.table[,3]))[2]
set.table.phen1 <- set.table[set.table[,3] == phen1,]
set.table.phen2 <- set.table[set.table[,3] == phen2,]
seq.order <- order(as.numeric(set.table.phen1[, 4]), decreasing = F)
set.table.phen1 <- set.table.phen1[seq.order,]
seq.order <- order(as.numeric(set.table.phen2[, 4]), decreasing = F)
set.table.phen2 <- set.table.phen2[seq.order,]
# max.sets.phen1 <- length(set.table.phen1[,1])
# max.sets.phen2 <- length(set.table.phen2[,1])
if (topgs == "") {
max.sets.phen1 <- length(set.table.phen1[,1])
max.sets.phen2 <- length(set.table.phen2[,1])
} else {
max.sets.phen1 <- ifelse(topgs > length(set.table.phen1[,1]), length(set.table.phen1[,1]), topgs)
max.sets.phen2 <- ifelse(topgs > length(set.table.phen2[,1]), length(set.table.phen2[,1]), topgs)
}
# Analysis for phen1
leading.lists <- NULL
for (i in 1:max.sets.phen1) {
inputfile <- paste(directory, set.table.phen1[i, 1], sep="", collapse="")
gene.set <- read.table(file=inputfile, sep="\t", header=T, comment.char="", as.is=T)
leading.set <- as.vector(gene.set[gene.set[,"CORE_ENRICHMENT"] == "YES", "SYMBOL"])
leading.lists <- c(leading.lists, list(leading.set))
if (i == 1) {
all.leading.genes <- leading.set
} else{
all.leading.genes <- union(all.leading.genes, leading.set)
}
}
max.genes <- length(all.leading.genes)
M <- matrix(0, nrow=max.sets.phen1, ncol=max.genes)
for (i in 1:max.sets.phen1) {
M[i,] <- sign(match(all.leading.genes, as.vector(leading.lists[[i]]), nomatch=0)) # notice that the sign is 0 (no tag) or 1 (tag)
}
Inter <- matrix(0, nrow=max.sets.phen1, ncol=max.sets.phen1)
for (i in 1:max.sets.phen1) {
for (j in 1:max.sets.phen1) {
Inter[i, j] <- length(intersect(leading.lists[[i]], leading.lists[[j]]))/length(union(leading.lists[[i]], leading.lists[[j]]))
}
}
Itable <- data.frame(Inter)
names(Itable) <- set.table.phen1[1:max.sets.phen1, 2]
row.names(Itable) <- set.table.phen1[1:max.sets.phen1, 2]
if (non.interactive.run == F) {
if (.Platform$OS.type == "windows") {
filename <- paste(directory, doc.string, ".leading.overlap.", phen1, sep="", collapse="")
windows(height = width, width = width)
} else if (.Platform$OS.type == "unix") {
filename <- paste(directory, doc.string, ".leading.overlap.", phen1, ".pdf", sep="", collapse="")
pdf(file=filename, height = width, width = width)
}
} else {
if (.Platform$OS.type == "unix") {
filename <- paste(directory, doc.string, ".leading.overlap.", phen1, ".pdf", sep="", collapse="")
pdf(file=filename, height = width, width = width)
} else if (.Platform$OS.type == "windows") {
filename <- paste(directory, doc.string, ".leading.overlap.", phen1, ".pdf", sep="", collapse="")
pdf(file=filename, height = width, width = width)
}
}
GSEA.ConsPlot(Itable, col.names = set.table.phen1[1:max.sets.phen1, 2], main = " ", sub=paste("Leading Subsets Overlap ", doc.string, " - ", phen1, sep=""), xlab=" ", ylab=" ")
if (non.interactive.run == F) {
if (.Platform$OS.type == "windows") {
savePlot(filename = filename, type ="jpeg", device = dev.cur())
} else if (.Platform$OS.type == "unix") {
dev.off()
}
} else {
dev.off()
}
# Save leading subsets in a GCT file
D.phen1 <- data.frame(M)
names(D.phen1) <- all.leading.genes
row.names(D.phen1) <- set.table.phen1[1:max.sets.phen1, 2]
output <- paste(directory, doc.string, ".leading.genes.", phen1, ".gct", sep="")
GSEA.write.gct(D.phen1, filename=output)
# Save leading subsets as a single gene set in a .gmt file
row.header <- paste(doc.string, ".all.leading.genes.", phen1, sep="")
output.line <- paste(all.leading.genes, sep="\t", collapse="\t")
output.line <- paste(row.header, row.header, output.line, sep="\t", collapse="")
output <- paste(directory, doc.string, ".all.leading.genes.", phen1, ".gmt", sep="")
write(noquote(output.line), file = output, ncolumns = length(output.line))
if (non.interactive.run == F) {
if (.Platform$OS.type == "windows") {
filename <- paste(directory, doc.string, ".leading.assignment.", phen1, sep="", collapse="")
windows(height = height, width = width)
} else if (.Platform$OS.type == "unix") {
filename <- paste(directory, doc.string, ".leading.assignment.", phen1, ".pdf", sep="", collapse="")
pdf(file=filename, height = height, width = width)
}
} else {
if (.Platform$OS.type == "unix") {
filename <- paste(directory, doc.string, ".leading.assignment.", phen1, ".pdf", sep="", collapse="")
pdf(file=filename, height = height, width = width)
} else if (.Platform$OS.type == "windows") {
filename <- paste(directory, doc.string, ".leading.assignment.", phen1, ".pdf", sep="", collapse="")
pdf(file=filename, height = height, width = width)
}
}
cmap <- c("#AAAAFF", "#111166")
GSEA.HeatMapPlot2(V = data.matrix(D.phen1), row.names = row.names(D.phen1), col.names = names(D.phen1), main = "Leading Subsets Assignment", sub = paste(doc.string, " - ", phen1, sep=""), xlab=" ", ylab=" ", color.map = cmap)
if (non.interactive.run == F) {
if (.Platform$OS.type == "windows") {
savePlot(filename = filename, type ="jpeg", device = dev.cur())
} else if (.Platform$OS.type == "unix") {
dev.off()
}
} else {
dev.off()
}
DT1.phen1 <- data.matrix(t(D.phen1))
DT2.phen1 <- data.frame(DT1.phen1)
names(DT2.phen1) <- set.table.phen1[1:max.sets.phen1, 2]
row.names(DT2.phen1) <- all.leading.genes
# GSEA.write.gct(DT2.phen1, filename=outputfile2.phen1)
# Analysis for phen2
leading.lists <- NULL
for (i in 1:max.sets.phen2) {
inputfile <- paste(directory, set.table.phen2[i, 1], sep="", collapse="")
gene.set <- read.table(file=inputfile, sep="\t", header=T, comment.char="", as.is=T)
leading.set <- as.vector(gene.set[gene.set[,"CORE_ENRICHMENT"] == "YES", "SYMBOL"])
leading.lists <- c(leading.lists, list(leading.set))
if (i == 1) {
all.leading.genes <- leading.set
} else{
all.leading.genes <- union(all.leading.genes, leading.set)
}
}
max.genes <- length(all.leading.genes)
M <- matrix(0, nrow=max.sets.phen2, ncol=max.genes)
for (i in 1:max.sets.phen2) {
M[i,] <- sign(match(all.leading.genes, as.vector(leading.lists[[i]]), nomatch=0)) # notice that the sign is 0 (no tag) or 1 (tag)
}
Inter <- matrix(0, nrow=max.sets.phen2, ncol=max.sets.phen2)
for (i in 1:max.sets.phen2) {
for (j in 1:max.sets.phen2) {
Inter[i, j] <- length(intersect(leading.lists[[i]], leading.lists[[j]]))/length(union(leading.lists[[i]], leading.lists[[j]]))
}
}
Itable <- data.frame(Inter)
names(Itable) <- set.table.phen2[1:max.sets.phen2, 2]
row.names(Itable) <- set.table.phen2[1:max.sets.phen2, 2]
if (non.interactive.run == F) {
if (.Platform$OS.type == "windows") {
filename <- paste(directory, doc.string, ".leading.overlap.", phen2, sep="", collapse="")
windows(height = width, width = width)
} else if (.Platform$OS.type == "unix") {
filename <- paste(directory, doc.string, ".leading.overlap.", phen2, ".pdf", sep="", collapse="")
pdf(file=filename, height = width, width = width)
}
} else {
if (.Platform$OS.type == "unix") {
filename <- paste(directory, doc.string, ".leading.overlap.", phen2, ".pdf", sep="", collapse="")
pdf(file=filename, height = width, width = width)
} else if (.Platform$OS.type == "windows") {
filename <- paste(directory, doc.string, ".leading.overlap.", phen2, ".pdf", sep="", collapse="")
pdf(file=filename, height = width, width = width)
}
}
GSEA.ConsPlot(Itable, col.names = set.table.phen2[1:max.sets.phen2, 2], main = " ", sub=paste("Leading Subsets Overlap ", doc.string, " - ", phen2, sep=""), xlab=" ", ylab=" ")
if (non.interactive.run == F) {
if (.Platform$OS.type == "windows") {
savePlot(filename = filename, type ="jpeg", device = dev.cur())
} else if (.Platform$OS.type == "unix") {
dev.off()
}
} else {
dev.off()
}
# Save leading subsets in a GCT file
D.phen2 <- data.frame(M)
names(D.phen2) <- all.leading.genes
row.names(D.phen2) <- set.table.phen2[1:max.sets.phen2, 2]
output <- paste(directory, doc.string, ".leading.genes.", phen2, ".gct", sep="")
GSEA.write.gct(D.phen2, filename=output)
# Save primary subsets as a single gene set in a .gmt file
row.header <- paste(doc.string, ".all.leading.genes.", phen2, sep="")
output.line <- paste(all.leading.genes, sep="\t", collapse="\t")
output.line <- paste(row.header, row.header, output.line, sep="\t", collapse="")
output <- paste(directory, doc.string, ".all.leading.genes.", phen2, ".gmt", sep="")
write(noquote(output.line), file = output, ncolumns = length(output.line))
if (non.interactive.run == F) {
if (.Platform$OS.type == "windows") {
filename <- paste(directory, doc.string, ".leading.assignment.", phen2, sep="", collapse="")
windows(height = height, width = width)
} else if (.Platform$OS.type == "unix") {
filename <- paste(directory, doc.string, ".leading.assignment.", phen2, ".pdf", sep="", collapse="")
pdf(file=filename, height = height, width = width)
}
} else {
if (.Platform$OS.type == "unix") {
filename <- paste(directory, doc.string, ".leading.assignment.", phen2, ".pdf", sep="", collapse="")
pdf(file=filename, height = height, width = width)
} else if (.Platform$OS.type == "windows") {
filename <- paste(directory, doc.string, ".leading.assignment.", phen2, ".pdf", sep="", collapse="")
pdf(file=filename, height = height, width = width)
}
}
cmap <- c("#AAAAFF", "#111166")
GSEA.HeatMapPlot2(V = data.matrix(D.phen2), row.names = row.names(D.phen2), col.names = names(D.phen2), main = "Leading Subsets Assignment", sub = paste(doc.string, " - ", phen2, sep=""), xlab=" ", ylab=" ", color.map = cmap)
if (non.interactive.run == F) {
if (.Platform$OS.type == "windows") {
savePlot(filename = filename, type ="jpeg", device = dev.cur())
} else if (.Platform$OS.type == "unix") {
dev.off()
}
} else {
dev.off()
}
DT1.phen2 <- data.matrix(t(D.phen2))
DT2.phen2 <- data.frame(DT1.phen2)
names(DT2.phen2) <- set.table.phen2[1:max.sets.phen2, 2]
row.names(DT2.phen2) <- all.leading.genes
# GSEA.write.gct(DT2.phen2, filename=outputfile2.phen2)
# Resort columns and rows for phen1
A <- data.matrix(D.phen1)
A.row.names <- row.names(D.phen1)
A.names <- names(D.phen1)
# Max.genes
# init <- 1
# for (k in 1:max.sets.phen1) {
# end <- which.max(cumsum(A[k,]))
# if (end - init > 1) {
# B <- A[,init:end]
# B.names <- A.names[init:end]
# dist.matrix <- dist(t(B))
# HC <- hclust(dist.matrix, method="average")
## B <- B[,HC$order] + 0.2*(k %% 2)
# B <- B[,HC$order]
# A[,init:end] <- B
# A.names[init:end] <- B.names[HC$order]
# init <- end + 1
# }
# }
# windows(width=14, height=10)
# GSEA.HeatMapPlot2(V = A, row.names = A.row.names, col.names = A.names, sub = " ", main = paste("Primary Sets Assignment - ", doc.string, " - ", phen1, sep=""), xlab=" ", ylab=" ")
dist.matrix <- dist(t(A))
HC <- hclust(dist.matrix, method="average")
A <- A[, HC$order]
A.names <- A.names[HC$order]
dist.matrix <- dist(A)
HC <- hclust(dist.matrix, method="average")
A <- A[HC$order,]
A.row.names <- A.row.names[HC$order]
if (non.interactive.run == F) {
if (.Platform$OS.type == "windows") {
filename <- paste(directory, doc.string, ".leading.assignment.clustered.", phen1, sep="", collapse="")
windows(height = height, width = width)
} else if (.Platform$OS.type == "unix") {
filename <- paste(directory, doc.string, ".leading.assignment.clustered.", phen1, ".pdf", sep="", collapse="")
pdf(file=filename, height = height, width = width)
}
} else {
if (.Platform$OS.type == "unix") {
filename <- paste(directory, doc.string, ".leading.assignment.clustered.", phen1, ".pdf", sep="", collapse="")
pdf(file=filename, height = height, width = width)
} else if (.Platform$OS.type == "windows") {
filename <- paste(directory, doc.string, ".leading.assignment.clustered.", phen1, ".pdf", sep="", collapse="")
pdf(file=filename, height = height, width = width)
}
}
cmap <- c("#AAAAFF", "#111166")
# GSEA.HeatMapPlot2(V = A, row.names = A.row.names, col.names = A.names, main = "Leading Subsets Assignment (clustered)", sub = paste(doc.string, " - ", phen1, sep=""), xlab=" ", ylab=" ", color.map = cmap)
GSEA.HeatMapPlot2(V = t(A), row.names = A.names, col.names = A.row.names, main = "Leading Subsets Assignment (clustered)", sub = paste(doc.string, " - ", phen1, sep=""), xlab=" ", ylab=" ", color.map = cmap)
text.filename <- paste(directory, doc.string, ".leading.assignment.clustered.", phen1, ".txt", sep="", collapse="")
line.list <- c("Gene", A.row.names)
line.header <- paste(line.list, collapse="\t")
line.length <- length(A.row.names) + 1
write(line.header, file = text.filename, ncolumns = line.length)
write.table(t(A), file=text.filename, append = T, quote=F, col.names= F, row.names=T, sep = "\t")
if (non.interactive.run == F) {
if (.Platform$OS.type == "windows") {
savePlot(filename = filename, type ="jpeg", device = dev.cur())
} else if (.Platform$OS.type == "unix") {
dev.off()
}
} else {
dev.off()
}
# resort columns and rows for phen2
A <- data.matrix(D.phen2)
A.row.names <- row.names(D.phen2)
A.names <- names(D.phen2)
# Max.genes
# init <- 1
# for (k in 1:max.sets.phen2) {
# end <- which.max(cumsum(A[k,]))
# if (end - init > 1) {
# B <- A[,init:end]
# B.names <- A.names[init:end]
# dist.matrix <- dist(t(B))
# HC <- hclust(dist.matrix, method="average")
## B <- B[,HC$order] + 0.2*(k %% 2)
# B <- B[,HC$order]
# A[,init:end] <- B
# A.names[init:end] <- B.names[HC$order]
# init <- end + 1
# }
# }
# windows(width=14, height=10)
# GESA.HeatMapPlot2(V = A, row.names = A.row.names, col.names = A.names, sub = " ", main = paste("Primary Sets Assignment - ", doc.string, " - ", phen2, sep=""), xlab=" ", ylab=" ")
dist.matrix <- dist(t(A))
HC <- hclust(dist.matrix, method="average")
A <- A[, HC$order]
A.names <- A.names[HC$order]
dist.matrix <- dist(A)
HC <- hclust(dist.matrix, method="average")
A <- A[HC$order,]
A.row.names <- A.row.names[HC$order]
if (non.interactive.run == F) {
if (.Platform$OS.type == "windows") {
filename <- paste(directory, doc.string, ".leading.assignment.clustered.", phen2, sep="", collapse="")
windows(height = height, width = width)
} else if (.Platform$OS.type == "unix") {
filename <- paste(directory, doc.string, ".leading.assignment.clustered.", phen2, ".pdf", sep="", collapse="")
pdf(file=filename, height = height, width = width)
}
} else {
if (.Platform$OS.type == "unix") {
filename <- paste(directory, doc.string, ".leading.assignment.clustered.", phen2, ".pdf", sep="", collapse="")
pdf(file=filename, height = height, width = width)
} else if (.Platform$OS.type == "windows") {
filename <- paste(directory, doc.string, ".leading.assignment.clustered.", phen2, ".pdf", sep="", collapse="")
pdf(file=filename, height = height, width = width)
}
}
cmap <- c("#AAAAFF", "#111166")
# GSEA.HeatMapPlot2(V = A, row.names = A.row.names, col.names = A.names, main = "Leading Subsets Assignment (clustered)", sub = paste(doc.string, " - ", phen2, sep=""), xlab=" ", ylab=" ", color.map = cmap)
GSEA.HeatMapPlot2(V = t(A), row.names =A.names , col.names = A.row.names, main = "Leading Subsets Assignment (clustered)", sub = paste(doc.string, " - ", phen2, sep=""), xlab=" ", ylab=" ", color.map = cmap)
text.filename <- paste(directory, doc.string, ".leading.assignment.clustered.", phen2, ".txt", sep="", collapse="")
line.list <- c("Gene", A.row.names)
line.header <- paste(line.list, collapse="\t")
line.length <- length(A.row.names) + 1
write(line.header, file = text.filename, ncolumns = line.length)
write.table(t(A), file=text.filename, append = T, quote=F, col.names= F, row.names=T, sep = "\t")
if (non.interactive.run == F) {
if (.Platform$OS.type == "windows") {
savePlot(filename = filename, type ="jpeg", device = dev.cur())
} else if (.Platform$OS.type == "unix") {
dev.off()
}
} else {
dev.off()
}
}
|
## Añadir etiquetas a los gráficos
library(tidyverse)
datos <- mtcars
datos <- datos %>%
mutate_at(vars(c("cyl", "vs", "am", "gear", "carb")), factor)
glimpse(datos)
## Gráfico Inicial
graf_ejemplo <- datos %>%
ggplot(aes(x = hp, y = mpg, col = gear, shape = vs)) +
geom_point(size = 2)
graf_ejemplo
## Añadir Etiquetas (labels)
graf_ejemplo +
labs(title = "Gráfico de dispersión de hp vs mpg",
subtitle = "A mayor hp se observa menor mpg",
x = "hp \n(Caballos de fuerza)",
y = "mpg \n(Millas por galón)",
col = "Cantidad de cambios",
shape = "Tiene forma en V",
caption = "Fuente de datos: mtcars")
| /05 - Añadir etiquetas a los gráficos (títulos).R | no_license | delany-ramirez/ggplot2-tutorial | R | false | false | 680 | r | ## Añadir etiquetas a los gráficos
library(tidyverse)
datos <- mtcars
datos <- datos %>%
mutate_at(vars(c("cyl", "vs", "am", "gear", "carb")), factor)
glimpse(datos)
## Gráfico Inicial
graf_ejemplo <- datos %>%
ggplot(aes(x = hp, y = mpg, col = gear, shape = vs)) +
geom_point(size = 2)
graf_ejemplo
## Añadir Etiquetas (labels)
graf_ejemplo +
labs(title = "Gráfico de dispersión de hp vs mpg",
subtitle = "A mayor hp se observa menor mpg",
x = "hp \n(Caballos de fuerza)",
y = "mpg \n(Millas por galón)",
col = "Cantidad de cambios",
shape = "Tiene forma en V",
caption = "Fuente de datos: mtcars")
|
#' Start or stop indexing a document or many documents.
#'
#' @import httr
#' @param dbname Database name. (charcter)
#' @param endpoint the endpoint, defaults to localhost (http://127.0.0.1)
#' @param port port to connect to, defaults to 9200
#' @param what One of start (default) of stop.
#' @details The function returns TRUE. Though note that this can result even
#' if the database does not exist in CouchDB.
#' @references See docs for the Elasticsearch River plugin \url{#} that lets you
#' easily index CouchDB databases.
#' @examples \dontrun{
#' library(devtools)
#' install_github("sckott/sofa")
#' library(sofa)
#' sofa_createdb(dbname='mydb')
#' es_cdbriver_index(dbname='mydb')
#' es_cdbriver_index(dbname='mydb', what='stop')
#' }
#' @export
es_index <- function(conn, what='start')
{
if(what=='start'){
call_ <- sprintf("%s:%s/_river/%s/_meta", endpoint, port, dbname)
args <- paste0('{ "type" : "couchdb", "couchdb" : { "host" : "localhost", "port" : 5984, "db" : "', dbname, '", "filter" : null } }')
tt <- PUT(url = call_, body=args)
stop_for_status(tt)
content(tt)[1]
} else
{
call_ <- sprintf("%s:%s/_river/%s", endpoint, port, dbname)
DELETE(url = call_)
message("elastic river stopped")
}
} | /R/es_index.r | permissive | pqrkchqps/elastic | R | false | false | 1,265 | r | #' Start or stop indexing a document or many documents.
#'
#' @import httr
#' @param dbname Database name. (charcter)
#' @param endpoint the endpoint, defaults to localhost (http://127.0.0.1)
#' @param port port to connect to, defaults to 9200
#' @param what One of start (default) of stop.
#' @details The function returns TRUE. Though note that this can result even
#' if the database does not exist in CouchDB.
#' @references See docs for the Elasticsearch River plugin \url{#} that lets you
#' easily index CouchDB databases.
#' @examples \dontrun{
#' library(devtools)
#' install_github("sckott/sofa")
#' library(sofa)
#' sofa_createdb(dbname='mydb')
#' es_cdbriver_index(dbname='mydb')
#' es_cdbriver_index(dbname='mydb', what='stop')
#' }
#' @export
es_index <- function(conn, what='start')
{
if(what=='start'){
call_ <- sprintf("%s:%s/_river/%s/_meta", endpoint, port, dbname)
args <- paste0('{ "type" : "couchdb", "couchdb" : { "host" : "localhost", "port" : 5984, "db" : "', dbname, '", "filter" : null } }')
tt <- PUT(url = call_, body=args)
stop_for_status(tt)
content(tt)[1]
} else
{
call_ <- sprintf("%s:%s/_river/%s", endpoint, port, dbname)
DELETE(url = call_)
message("elastic river stopped")
}
} |
source("packages.R")
data(neuroblastoma, package="neuroblastoma")
big.dt <- data.table(neuroblastoma$profiles)[, list(data=.N), by=list(profile.id, chromosome)][, list(min.data=min(data), max.data=max(data)), by=list(profile.id)][max.data==max(max.data)]
labels.xz.vec <- Sys.glob("data/*/labels.csv.xz")
N.folds <- 6
for(set.i in seq_along(labels.xz.vec)){
labels.xz <- labels.xz.vec[[set.i]]
labels.cmd <- paste("xzcat", labels.xz)
labels.dt <- fread(cmd=labels.cmd)
prob.dt <- labels.dt[, list(
labels=.N
), by=list(sequenceID)]
eval.cmd <- sub("labels", "evaluation", labels.cmd)
eval.dt <- fread(cmd=eval.cmd)
head(match.dt <- namedCapture::df_match_variable(
prob.dt,
sequenceID=list(
profileID="[0-9]+",
"_",
chrom="chr.*")))
table(match.dt$sequenceID.chrom)
randcol <- function(dt, col.name, n.folds=N.folds){
unique.folds <- 1:n.folds
col.vec <- dt[[col.name]]
u.vec <- unique(col.vec)
fold <- sample(rep(unique.folds, l=length(u.vec)))
names(fold) <- u.vec
fold[paste(col.vec)]
}
fun.list <- list(
chrom=function(dt){
as.integer(factor(dt$sequenceID.chrom))
},
profileSize=function(dt){
randcol(dt, "sequenceID.profileID", N.folds/2)+3*ifelse(
dt$sequenceID.profileID %in% big.dt$profile.id, 0, 1)
},
profileID=function(dt){
randcol(dt, "sequenceID.profileID")
},
sequenceID=function(dt){
randcol(dt, "sequenceID")
})
for(split.name in names(fun.list)){
fun <- fun.list[[split.name]]
set.seed(1)
fold.vec <- fun(match.dt)
print(table(fold.vec))
cv.dir <- file.path(dirname(labels.xz), "cv", paste0("R-3.6.0-", split.name))
prob.folds <- prob.dt[, data.table(
sequenceID, fold=fold.vec)]
fold.counts <- prob.folds[, list(
folds=length(unique(fold))
), by=list(sequenceID)]
bad <- fold.counts[folds != 1]
if(nrow(bad)){
print(bad)
stop("some sequenceID numbers appear in more than one fold")
}
print(auc.dt <- prob.folds[, {
pred.dt <- data.table(sequenceID, pred.log.lambda=0)
L <- penaltyLearning::ROChange(eval.dt, pred.dt, "sequenceID")
p <- L$thresholds[threshold=="predicted"]
list(auc=L$auc, possible.fn=p$possible.fn, possible.fp=p$possible.fp)
}, by=list(fold)])
u.folds <- unique(prob.folds)[order(sequenceID)]
dir.create(cv.dir, showWarnings=FALSE, recursive=TRUE)
print(folds.csv <- file.path(cv.dir, "folds.csv"))
fwrite(u.folds, folds.csv)
}
}
| /cv.R | no_license | akhikolla/neuroblastoma | R | false | false | 2,530 | r | source("packages.R")
data(neuroblastoma, package="neuroblastoma")
big.dt <- data.table(neuroblastoma$profiles)[, list(data=.N), by=list(profile.id, chromosome)][, list(min.data=min(data), max.data=max(data)), by=list(profile.id)][max.data==max(max.data)]
labels.xz.vec <- Sys.glob("data/*/labels.csv.xz")
N.folds <- 6
for(set.i in seq_along(labels.xz.vec)){
labels.xz <- labels.xz.vec[[set.i]]
labels.cmd <- paste("xzcat", labels.xz)
labels.dt <- fread(cmd=labels.cmd)
prob.dt <- labels.dt[, list(
labels=.N
), by=list(sequenceID)]
eval.cmd <- sub("labels", "evaluation", labels.cmd)
eval.dt <- fread(cmd=eval.cmd)
head(match.dt <- namedCapture::df_match_variable(
prob.dt,
sequenceID=list(
profileID="[0-9]+",
"_",
chrom="chr.*")))
table(match.dt$sequenceID.chrom)
randcol <- function(dt, col.name, n.folds=N.folds){
unique.folds <- 1:n.folds
col.vec <- dt[[col.name]]
u.vec <- unique(col.vec)
fold <- sample(rep(unique.folds, l=length(u.vec)))
names(fold) <- u.vec
fold[paste(col.vec)]
}
fun.list <- list(
chrom=function(dt){
as.integer(factor(dt$sequenceID.chrom))
},
profileSize=function(dt){
randcol(dt, "sequenceID.profileID", N.folds/2)+3*ifelse(
dt$sequenceID.profileID %in% big.dt$profile.id, 0, 1)
},
profileID=function(dt){
randcol(dt, "sequenceID.profileID")
},
sequenceID=function(dt){
randcol(dt, "sequenceID")
})
for(split.name in names(fun.list)){
fun <- fun.list[[split.name]]
set.seed(1)
fold.vec <- fun(match.dt)
print(table(fold.vec))
cv.dir <- file.path(dirname(labels.xz), "cv", paste0("R-3.6.0-", split.name))
prob.folds <- prob.dt[, data.table(
sequenceID, fold=fold.vec)]
fold.counts <- prob.folds[, list(
folds=length(unique(fold))
), by=list(sequenceID)]
bad <- fold.counts[folds != 1]
if(nrow(bad)){
print(bad)
stop("some sequenceID numbers appear in more than one fold")
}
print(auc.dt <- prob.folds[, {
pred.dt <- data.table(sequenceID, pred.log.lambda=0)
L <- penaltyLearning::ROChange(eval.dt, pred.dt, "sequenceID")
p <- L$thresholds[threshold=="predicted"]
list(auc=L$auc, possible.fn=p$possible.fn, possible.fp=p$possible.fp)
}, by=list(fold)])
u.folds <- unique(prob.folds)[order(sequenceID)]
dir.create(cv.dir, showWarnings=FALSE, recursive=TRUE)
print(folds.csv <- file.path(cv.dir, "folds.csv"))
fwrite(u.folds, folds.csv)
}
}
|
### phylosampling plotting functions ###
###############################################################################
# A function producing a plot of three variables
# using the phylosampling function specified
# and fixed sensititvity and specficity
plt.eq <- function(chi, # number: specificity of the linkage criteria
eta, # number: sensitivity of the linkage criteria
R=1, # [optional] number: effective reproductive number
rho, # vector: values of rho to evaluate
M, # vector: values of M to evaluate
x="rho", # string: which variable to put on the x axis
eq, # string: phylosampling function to evaluate
lbls=c("",""), # labels for plot as: c("xlab","ylab")
inverse=FALSE, # [optional] TRUE to plot 1 minus result of equation
legend=TRUE # [optional] TRUE to show legend to the right of the plot
){
# set up the dataframe to be used in plotting
if (x == "rho"){
g <- expand.grid(rho)
names(g) <- c('x')
for (i in seq(1,length(M))){
cname <- paste("M=",M[i],sep="") # set name for column to be added
if (inverse == FALSE){
g <- cbind(g, eq(chi, g$x, M[i], eta, R))
}
else {
g <- cbind(g, 1-eq(chi, g$x, M[i], eta, R))
}
colnames(g)[length(colnames(g))] <- cname
}
}
else if (x == "M"){
g <- expand.grid(M)
names(g) <- c('x')
for (i in seq(1,length(rho))){
cname <- paste("rho=",rho[i],sep="") # set name for column to be added
if (inverse == FALSE){
g <- cbind(g, eq(chi, rho[i], g$x, eta, R))
}
else {
g <- cbind(g, 1-eq(chi, rho[i], g$x, eta, R))
}
colnames(g)[length(colnames(g))] <- cname
}
}
else {
return("Error: x axis variable must be either rho or M")
}
# set up the plot
melted.g <- melt(g, id = 'x')
ggplot(melted.g, aes(x = x, y = value, colour = variable)) +
geom_line(show.legend = legend) +
xlab(lbls[1]) +
ylab(lbls[2])
}
###############################################################################
# A function producing a heatmap of the false discovery rate
# for different values of sensititvity and specficity
plt.heatmap <- function(chi, # vector: specificity of the linkage criteria
eta, # vector: sensitivity of the linkage criteria
R=0, # number: effective reproductive number
rho, # number: sampling proportion
M, # number: sample size
eq # string: phylosampling function to evaluate
){
g <- expand.grid(chi,eta)
names(g) <- c('chi','eta')
g <- cbind(g, 1-eq(chi = g$chi, eta = g$eta, rho = rho, M = M, R = R))
colnames(g)[length(colnames(g))] <- "FDR"
levelplot(FDR ~ chi*eta, data = g,
col.regions = sequential_hcl(100)[length(sequential_hcl(100)):1])
}
| /R/sw.func.plot.R | no_license | gilesjohnr/PhyloSampling | R | false | false | 3,242 | r | ### phylosampling plotting functions ###
###############################################################################
# A function producing a plot of three variables
# using the phylosampling function specified
# and fixed sensititvity and specficity
plt.eq <- function(chi, # number: specificity of the linkage criteria
eta, # number: sensitivity of the linkage criteria
R=1, # [optional] number: effective reproductive number
rho, # vector: values of rho to evaluate
M, # vector: values of M to evaluate
x="rho", # string: which variable to put on the x axis
eq, # string: phylosampling function to evaluate
lbls=c("",""), # labels for plot as: c("xlab","ylab")
inverse=FALSE, # [optional] TRUE to plot 1 minus result of equation
legend=TRUE # [optional] TRUE to show legend to the right of the plot
){
# set up the dataframe to be used in plotting
if (x == "rho"){
g <- expand.grid(rho)
names(g) <- c('x')
for (i in seq(1,length(M))){
cname <- paste("M=",M[i],sep="") # set name for column to be added
if (inverse == FALSE){
g <- cbind(g, eq(chi, g$x, M[i], eta, R))
}
else {
g <- cbind(g, 1-eq(chi, g$x, M[i], eta, R))
}
colnames(g)[length(colnames(g))] <- cname
}
}
else if (x == "M"){
g <- expand.grid(M)
names(g) <- c('x')
for (i in seq(1,length(rho))){
cname <- paste("rho=",rho[i],sep="") # set name for column to be added
if (inverse == FALSE){
g <- cbind(g, eq(chi, rho[i], g$x, eta, R))
}
else {
g <- cbind(g, 1-eq(chi, rho[i], g$x, eta, R))
}
colnames(g)[length(colnames(g))] <- cname
}
}
else {
return("Error: x axis variable must be either rho or M")
}
# set up the plot
melted.g <- melt(g, id = 'x')
ggplot(melted.g, aes(x = x, y = value, colour = variable)) +
geom_line(show.legend = legend) +
xlab(lbls[1]) +
ylab(lbls[2])
}
###############################################################################
# A function producing a heatmap of the false discovery rate
# for different values of sensititvity and specficity
plt.heatmap <- function(chi, # vector: specificity of the linkage criteria
eta, # vector: sensitivity of the linkage criteria
R=0, # number: effective reproductive number
rho, # number: sampling proportion
M, # number: sample size
eq # string: phylosampling function to evaluate
){
g <- expand.grid(chi,eta)
names(g) <- c('chi','eta')
g <- cbind(g, 1-eq(chi = g$chi, eta = g$eta, rho = rho, M = M, R = R))
colnames(g)[length(colnames(g))] <- "FDR"
levelplot(FDR ~ chi*eta, data = g,
col.regions = sequential_hcl(100)[length(sequential_hcl(100)):1])
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visSelectNodes.R
\name{visSelectNodes}
\alias{visSelectNodes}
\title{Function to select node(s) from network, with shiny only.}
\usage{
visSelectNodes(graph, id, highlightEdges = TRUE, clickEvent = TRUE)
}
\arguments{
\item{graph}{: a \code{\link{visNetworkProxy}} object}
\item{id}{: vector of id, node(s) to select}
\item{highlightEdges}{: Boolean. highlight Edges also ? Default to TRUE}
\item{clickEvent}{: Boolean. Launch click event ? (highlightNearest for example) Default to TRUE}
}
\description{
Function to select node(s) from network, with shiny only.
}
\examples{
\dontrun{
# have a look to :
shiny::runApp(system.file("shiny", package = "visNetwork"))
}
}
\references{
See online documentation \url{https://datastorm-open.github.io/visNetwork/}
}
\seealso{
\link{visNodes} for nodes options, \link{visEdges} for edges options, \link{visGroups} for groups options,
\link{visLegend} for adding legend, \link{visOptions} for custom option, \link{visLayout} & \link{visHierarchicalLayout} for layout,
\link{visPhysics} for control physics, \link{visInteraction} for interaction, \link{visNetworkProxy} & \link{visFocus} & \link{visFit} for animation within shiny,
\link{visDocumentation}, \link{visEvents}, \link{visConfigure} ...
}
| /man/visSelectNodes.Rd | no_license | cran/visNetwork | R | false | true | 1,368 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visSelectNodes.R
\name{visSelectNodes}
\alias{visSelectNodes}
\title{Function to select node(s) from network, with shiny only.}
\usage{
visSelectNodes(graph, id, highlightEdges = TRUE, clickEvent = TRUE)
}
\arguments{
\item{graph}{: a \code{\link{visNetworkProxy}} object}
\item{id}{: vector of id, node(s) to select}
\item{highlightEdges}{: Boolean. highlight Edges also ? Default to TRUE}
\item{clickEvent}{: Boolean. Launch click event ? (highlightNearest for example) Default to TRUE}
}
\description{
Function to select node(s) from network, with shiny only.
}
\examples{
\dontrun{
# have a look to :
shiny::runApp(system.file("shiny", package = "visNetwork"))
}
}
\references{
See online documentation \url{https://datastorm-open.github.io/visNetwork/}
}
\seealso{
\link{visNodes} for nodes options, \link{visEdges} for edges options, \link{visGroups} for groups options,
\link{visLegend} for adding legend, \link{visOptions} for custom option, \link{visLayout} & \link{visHierarchicalLayout} for layout,
\link{visPhysics} for control physics, \link{visInteraction} for interaction, \link{visNetworkProxy} & \link{visFocus} & \link{visFit} for animation within shiny,
\link{visDocumentation}, \link{visEvents}, \link{visConfigure} ...
}
|
library(dplyr)
ss.bounds <- readRDS("ss.bounds.rds")
alpha <- 0.025
method <- 'wn'
scenario <- 19
param <- 1
anal_type <- "sing"
ss <- ss.bounds%>%
dplyr::filter(method == "wn", scenario.id == scenario)
do_val <- 0.15
x1 <- parallel::mclapply(X = 1:10000,
mc.cores = parallel::detectCores() - 1,
FUN= function(x) {
library(tidyr, warn.conflicts = F, quietly = T)
library(dplyr, warn.conflicts = F, quietly = T)
library(purrr, warn.conflicts = F, quietly = T)
library(reshape2, warn.conflicts = F, quietly = T)
library(mice, warn.conflicts = F, quietly = T)
library(MASS, warn.conflicts = F, quietly = T)
library(nibinom)
set.seed(10000*scenario + x)
#generate full data with desired correlation structure
dt0 <- sim_cont(p_C = ss$p_C, p_T = ss$p_C, n_arm = ss$n.arm,
mu1 = 4, mu2 = 100, sigma1 = 1, sigma2 = 20, r12 = -0.3, b1 = 0.1, b2 = -0.01)
ci.full <- dt0%>%wn_ci(ss$M2,'y')
#define missingness parameters and do rates
m.param <- mpars(do = do_val, atype = anal_type)
#impose missing values and perform analysis
ci.miss <- m.param%>%
dplyr::mutate(results = purrr::pmap(list(b_trt=bt, b_y=by, b_x1=bx1, b_x2=bx2, b_ty = b.ty),
miss_gen_an, dt = dt0, do = do_val,
ci_method = wn_ci,
sing_anal = T,
mice_anal = F,
m2 = ss$M2, seed = 10000*scenario + x,
method = method,
alpha = alpha
))%>%
dplyr::select(missing, results)%>%
dplyr::mutate(scenario.id = ss$scenario.id,
p_C = ss$p_C,
M2 = ss$M2,
type = 't.H1',
do = do_val,
sim.id = x)
ci.all <- list(ci.full, ci.miss)%>%purrr::set_names(c("ci.full","ci.miss"))
return(ci.all)
})
#to summarize power from the simulated data
source('funs/h0.sing.sum.R')
h0.sing.sum(x1)%>%
dplyr::select(-mean.bias)
| /sim_pgms/wn/do15/2xcontH1_sc19_do15_sing.R | no_license | yuliasidi/nibinom_apply | R | false | false | 2,221 | r | library(dplyr)
ss.bounds <- readRDS("ss.bounds.rds")
alpha <- 0.025
method <- 'wn'
scenario <- 19
param <- 1
anal_type <- "sing"
ss <- ss.bounds%>%
dplyr::filter(method == "wn", scenario.id == scenario)
do_val <- 0.15
x1 <- parallel::mclapply(X = 1:10000,
mc.cores = parallel::detectCores() - 1,
FUN= function(x) {
library(tidyr, warn.conflicts = F, quietly = T)
library(dplyr, warn.conflicts = F, quietly = T)
library(purrr, warn.conflicts = F, quietly = T)
library(reshape2, warn.conflicts = F, quietly = T)
library(mice, warn.conflicts = F, quietly = T)
library(MASS, warn.conflicts = F, quietly = T)
library(nibinom)
set.seed(10000*scenario + x)
#generate full data with desired correlation structure
dt0 <- sim_cont(p_C = ss$p_C, p_T = ss$p_C, n_arm = ss$n.arm,
mu1 = 4, mu2 = 100, sigma1 = 1, sigma2 = 20, r12 = -0.3, b1 = 0.1, b2 = -0.01)
ci.full <- dt0%>%wn_ci(ss$M2,'y')
#define missingness parameters and do rates
m.param <- mpars(do = do_val, atype = anal_type)
#impose missing values and perform analysis
ci.miss <- m.param%>%
dplyr::mutate(results = purrr::pmap(list(b_trt=bt, b_y=by, b_x1=bx1, b_x2=bx2, b_ty = b.ty),
miss_gen_an, dt = dt0, do = do_val,
ci_method = wn_ci,
sing_anal = T,
mice_anal = F,
m2 = ss$M2, seed = 10000*scenario + x,
method = method,
alpha = alpha
))%>%
dplyr::select(missing, results)%>%
dplyr::mutate(scenario.id = ss$scenario.id,
p_C = ss$p_C,
M2 = ss$M2,
type = 't.H1',
do = do_val,
sim.id = x)
ci.all <- list(ci.full, ci.miss)%>%purrr::set_names(c("ci.full","ci.miss"))
return(ci.all)
})
#to summarize power from the simulated data
source('funs/h0.sing.sum.R')
h0.sing.sum(x1)%>%
dplyr::select(-mean.bias)
|
myData <- read.table("household_power_consumption.txt", sep=";", na.strings = "?",header=TRUE, stringsAsFactors =FALSE)
plotData <- subset(myData, myData$Date == "1/2/2007" | myData$Date == "2/2/2007")
plotData$DateTime <- strptime(paste(plotData$Date, plotData$Time), "%d/%m/%Y %H:%M:%S")
png("plot2.png", width = 480, height = 480)
plot(plotData$DateTime,plotData$Global_active_power,type="l",xlab="",ylab="Global Active Power (kilowatts)")
dev.off() | /plot2.R | no_license | Abyrk/ExData_Plotting1 | R | false | false | 452 | r | myData <- read.table("household_power_consumption.txt", sep=";", na.strings = "?",header=TRUE, stringsAsFactors =FALSE)
plotData <- subset(myData, myData$Date == "1/2/2007" | myData$Date == "2/2/2007")
plotData$DateTime <- strptime(paste(plotData$Date, plotData$Time), "%d/%m/%Y %H:%M:%S")
png("plot2.png", width = 480, height = 480)
plot(plotData$DateTime,plotData$Global_active_power,type="l",xlab="",ylab="Global Active Power (kilowatts)")
dev.off() |
#' Change Point Test for Regression
#'
#' @name mcusum.test-defunct
#'
#' @seealso \code{\link{funtimes-defunct}}
#'
#' @keywords internal
NULL
#' @rdname funtimes-defunct
#' @section \code{mcusum.test}:
#' For \code{mcusum.test}, use \code{\link{mcusum_test}}.
#'
#' @author Vyacheslav Lyubchich
#'
#' @export
#'
mcusum.test <- function(...)
{
.Defunct("mcusum_test", msg = "mcusum.test is defunct (removed). Use mcusum_test instead.")
}
| /R/mcusum.test.R | no_license | cran/funtimes | R | false | false | 470 | r | #' Change Point Test for Regression
#'
#' @name mcusum.test-defunct
#'
#' @seealso \code{\link{funtimes-defunct}}
#'
#' @keywords internal
NULL
#' @rdname funtimes-defunct
#' @section \code{mcusum.test}:
#' For \code{mcusum.test}, use \code{\link{mcusum_test}}.
#'
#' @author Vyacheslav Lyubchich
#'
#' @export
#'
mcusum.test <- function(...)
{
.Defunct("mcusum_test", msg = "mcusum.test is defunct (removed). Use mcusum_test instead.")
}
|
evaluate_variance <- function(coverage, nSamples, wSites, lab_pool, minus_condition,
use_cpp = TRUE, s.size, Designs, Global_lower) {
## Get dictionary {a:1, b:2, c:3, d:4,...}
getDict <- function(s) {
# only for lower case mapping
return(match(tolower(s), letters))
}
## create pair sample for within labels
create_labels <- function(nSamples) {
if (nSamples > 3) {
# create colnames for Within matrix
matLab <- combn(letters[1:nSamples], 2)
labs <- apply(matLab, 2, FUN = function(x) paste(x[1], x[2], sep='') )
temp <- c()
check_labs <- rep(FALSE, length(labs)); names(check_labs) <- labs
for (l in labs) {
l_exclude <- setdiff(letters[1:nSamples], c( substr(l, 1, 1), substr(l, 2, 2)))
mat_exclude <- apply(combn(l_exclude, 2), 2, FUN = function(x) paste(x[1], x[2], sep='') )
for (i in 1:length(mat_exclude)) {
if (!check_labs[mat_exclude[i]]) {
temp <- c(temp, paste(l, mat_exclude[i], sep = ''))
}
}
check_labs[l] <- TRUE
}
temp_plus <- toupper(temp)
temp <- unlist(lapply(temp, function(s) paste(substr(s, 1, 2), substr(s, 3, 4), sep = ' vs ' )))
temp <- c(temp,unlist(lapply(temp_plus, function(s) paste(substr(s, 1, 2), substr(s, 3, 4), sep = ' vs ' ))))
return(list('withinLabel' = temp))
}
else if (nSamples == 3) {
temp <- c("ab vs ac", "ab vs bc", "ac vs bc",
"AB vs AC", "AB vs BC", "AC vs BC")
return(list('withinLabel' = temp))
}
}
## get index {1,2,...} for within's labels
create_indexList <- function(nSamples) {
indexList <- list()
if (nSamples > 3) {
withinLabel <- create_labels(nSamples)[[1]]
numTests <- floor(length(withinLabel) / 2)
minusLabel <- withinLabel[1:numTests]
# sampleNames <- unlist(strsplit(minusLabel, ' vs '))
for (i in 1:numTests) {
pairSample <- unlist(strsplit(minusLabel[i], ' vs '))
s <- c(substr(pairSample[1], 1, 1), substr(pairSample[1], 2, 2),
substr(pairSample[2], 1, 1), substr(pairSample[2], 2, 2))
s <- sapply(s, getDict)
names(s) <- NULL
indexList[[i]] <- s
}
return(indexList)
}
else if (nSamples == 3) {
indexList[[1]] <- c(1,2,1,3) # ab vs ac
indexList[[2]] <- c(1,2,2,3) # ab vs bc
indexList[[3]] <- c(1,3,2,3) # ac vs bc
return(indexList)
}
}
### MAIN ###
Var <- list()
sitesUnused <- c()
## n = 4: ab vs cd, ac vs bd, ad vs bc, and other three for second cond
## n = 3: ab vs ac, ab vs bc, ac vs bc, and other three for second cond
withinLabel <- create_labels(nSamples)[['withinLabel']]
for (bin in 1:length(wSites)) {
print(paste('bin = ', bin))
if (length(wSites[[bin]]) > 0 ) {
sites <- wSites[[bin]]
df <- data.frame()
varList <- list()
minGlobal <- Inf
count <- 1
if (length(sites)>0) {
for (site in sites) {
# print(paste(' -------------------- site = ', site))
testList <- list()
withinX <- withinY <- list()
numTests <- c()
indexList <- list()
if (minus_condition == TRUE) {
indexList <- create_indexList(nSamples)
numTests <- length(indexList)
for (tt in 1:numTests) {
ids <- indexList[[tt]]
withinX[[tt]] <- coverage[[site]][ids[1:2], ]
withinY[[tt]] <- coverage[[site]][ids[3:4],]
}
}
else {
indexList <- create_indexList(nSamples)
numTests <- length(indexList)
for (tt in 1:numTests) {
ids <- indexList[[tt]] + nSamples
withinX[[tt]] <- coverage[[site]][ids[1:2], ]
withinY[[tt]] <- coverage[[site]][ids[3:4],]
}
}
if ( dim(withinX[[1]])[2] < s.size ) {
if (use_cpp) {
for (tt in 1:numTests) {
X <- withinX[[tt]]
Y <- withinY[[tt]]
testList[[tt]] <- tan::compute_Var(X, Y, na_rm = TRUE, pool = FALSE)
}
}
else {
for (tt in 1:numTests) {
X <- withinX[[tt]]
Y <- withinY[[tt]]
testList[[tt]] <- tan::AN.test(X, Y, na_rm = TRUE)
}
}
}
else {
design <- Designs[site, ]
if (use_cpp) {
for (tt in 1:numTests) {
X <- withinX[[tt]]
Y <- withinY[[tt]]
testList[[tt]] <- tan::compute_Var(X[, design], Y[, design], na_rm = TRUE, pool = FALSE)
}
}
else {
for (tt in 1:numTests) {
X <- withinX[[tt]]
Y <- withinY[[tt]]
testList[[tt]] <- tan::AN.test(X[, design], Y[, design], na_rm = TRUE)
}
}
}
lenIndices <- c()
for (tt in 1:numTests) {
test_ <- testList[[tt]]
lenIndices <- c(lenIndices, length(test_$varX), length(test_$varY))
}
minIndex <- min(lenIndices)
## check minIndex > Global_lower (lower bound for pooled var vector of each bins)
if (minIndex > Global_lower) {
if (minGlobal > minIndex) {
minGlobal <- minIndex
}
## df <- data.frame('ab' = test1$varX[1:minIndex], 'ac' = test2$varX[1:minIndex],
## 'ad' = test3$varX[1:minIndex], 'bc' = test3$varY[1:minIndex],
## 'bd' = test2$varY[1:minIndex], 'cd' = test1$varY[1:minIndex])
df <- data.frame(matrix(NA, nrow = minIndex, ncol = length(lab_pool)))
if (nSamples > 3) {
col_id <- 1
for (tt in 1:numTests) {
test_ <- testList[[tt]]
ids <- indexList[[tt]]
df[, col_id] <- test_$varX[1:minIndex]
df[, col_id + 1] <- test_$varY[1:minIndex]
colnames(df)[col_id:(col_id+1)] <- c(paste(letters[ids[1:2]], collapse = ""),
paste(letters[ids[3:4]], collapse = ""))
col_id <- col_id + 2
}
} else if (nSamples == 3) {
df <- data.frame('ab' = testList[[1]]$varX[1:minIndex],
'ac' = testList[[1]]$varY[1:minIndex],
'bc' = testList[[2]]$varY[1:minIndex])
}
varList[[count]] <- df
count <- count + 1
}
# store all unused sites (minIndex < Global_lower): minIndex = 0 -> var empty due to flat peak,
# or many repeated counts, or peakLength too small. 03/24/17
else {
sitesUnused <- c(sitesUnused, site)
}
} # end of for (site in sites)
## Pooling variances across sites in bin
poolVar <- list()
print(paste(" +++ minGlobal = ", minGlobal, sep = ""))
## Case: minGlobal < Inf
if (minGlobal < Inf) {
matVar <- matrix(NA, nrow = length(varList), ncol = minGlobal) # @: Case minGlobal = Inf
for (pair in lab_pool) {
for (i in 1:length(varList)) {
matVar[i, ] <- varList[[i]][1:minGlobal, pair]
}
var <- apply(matVar, 2, function(x) quantile(x, probs = poolQuant, na.rm = TRUE))
if ( length(var) >= movAve ) {
var <- tan::movingAverage(var, movAve)
}
poolVar[[pair]] <- var
}
Var[[bin]] <- poolVar
}
## Case: minGlobal = Inf
else {
message("minGlobal = Inf: 1. Variance vector for this bin returned NA,
and/or 2. Sites in this bin stored in sitesUnused slot")
Var[[bin]] <- NA
}
}
}
}
return(list('Var' = Var, 'sitesUnused' = sitesUnused))
}
.calculateVariance <- function(object, minus_condition, Global_lower, poolQuant, movAve, ...) {
if (object@nSamples > 2 ) {
if (minus_condition == TRUE) {
print("Calculating Variance for first condition")
} else {
print("Calculating Variance for second condition")
}
}
else if (object@nSamples == 2) {
print("Calculating pool Variance for both conditions")
}
### MAIN ###
Var <- list()
if (object@nSamples > 2) {
print(paste("Calculating pooled variance for sample size n = ", object@nSamples), sep = "")
## n=4: lab_pool <- c('ab', 'ac', 'ad', 'bc', 'bd', 'cd')
## n=3: lab_pool <- c('ab', 'ac', 'bc')
lab_pool <- colnames(object@Ns)[1:(dim(object@Ns)[2] / 2)]
sitesUnused <- c()
resultList <- evaluate_variance(coverage = object@coverage, nSamples = object@nSamples,
wSites = object@wSites, lab_pool = lab_pool,
minus_condition = minus_condition, use_cpp = use_cpp,
s.size = object@s.size, Designs = object@Designs,
Global_lower = Global_lower)
Var <- resultList[['Var']]
sitesUnused <- resultList[['sitesUnused']]
object@sitesUnused <- unique(c(object@sitesUnused, sitesUnused))
} # end of if (n > 2)
else if (object@nSamples == 2) {
print(paste("Calculating pooled variance for sample size n = ", object@nSamples), sep = "")
Var <- list()
lab_pool <- c('ab', 'aA', 'aB', 'AB', 'bB', 'Ab')
sitesUnused <- c()
for (bin in 1:length(object@wSites)) {
print(paste('bin = ', bin))
sites <- object@wSites[[bin]]
df <- data.frame()
varList <- list()
minGlobal <- Inf
count <- 1
if (length(sites) > 0) {
for (site in sites) {
# print(paste('site = ', site))
geta <- object@coverage[[site]][1,]
getb <- object@coverage[[site]][2,]
getA <- object@coverage[[site]][3,]
getB <- object@coverage[[site]][4,]
X1 <- rbind( geta, getb)
Y1 <- rbind( getA, getB)
X2 <- rbind( geta, getA)
Y2 <- rbind( getb, getB)
X3 <- rbind( geta, getB)
Y3 <- rbind( getA, getb)
if ( dim(X1)[2] < object@s.size ) {
if (use_cpp) {
test1 <- tan::compute_Var(X1, Y1, na_rm = TRUE, pool = FALSE)
test2 <- tan::compute_Var(X2, Y2, na_rm = TRUE, pool = FALSE)
test3 <- tan::compute_Var(X3, Y3, na_rm = TRUE, pool = FALSE)
}
else {
test1 <- tan::AN.test(X1, Y1, na.rm=TRUE)
test2 <- tan::AN.test(X2, Y2, na.rm=TRUE)
test3 <- tan::AN.test(X3, Y3, na.rm=TRUE)
}
}
else {
design <- object@Designs[site, ]
if (use_cpp) {
test1 <- tan::compute_Var(X1[, design], Y1[, design], na_rm = TRUE, pool = FALSE)
test2 <- tan::compute_Var(X2[, design], Y2[, design], na_rm = TRUE, pool = FALSE)
test3 <- tan::compute_Var(X3[, design], Y3[, design], na_rm = TRUE, pool = FALSE)
}
else {
test1 <- tan::AN.test(X1[, design], Y1[, design], na.rm=TRUE)
test2 <- tan::AN.test(X2[, design], Y2[, design], na.rm=TRUE)
test3 <- tan::AN.test(X3[, design], Y3[, design], na.rm=TRUE)
}
}
minIndex <- min(c(length(test1$varX), length(test2$varX),length(test3$varX),
length(test1$varY), length(test2$varY), length(test3$varY)))
## check minIndex > Global_lower (lower bound for pooled var vector of each bins)
if (minIndex > Global_lower) {
if (minGlobal > minIndex) {
minGlobal <- minIndex
}
df <- data.frame('ab' = test1$varX[1:minIndex], 'aA' = test2$varX[1:minIndex], 'aB' = test3$varX[1:minIndex],
'AB' = test1$varY[1:minIndex], 'bB' = test2$varY[1:minIndex], 'Ab' = test3$varY[1:minIndex])
varList[[count]] <- df
count <- count + 1 # keep track of all sites in bin i
} # end of if (minIndex > Global_lower)
# store all unused sites (minIndex < Global_lower): minIndex = 0 -> var empty due to flat peak,
# or manyrepeated counts: 03/24/17
else {
sitesUnused <- c(sitesUnused, site)
}
} # end of for (site in sites)
## Pooling variances across sites in bin
poolVar <- list()
print(paste(" +++ minGlobal = ", minGlobal, sep = ""))
## Case: minGlobal < Inf
if (minGlobal < Inf) {
matVar <- matrix(NA, nrow = length(varList), ncol = minGlobal)
for (pair in lab_pool) {
for (i in 1:length(varList)) {
matVar[i,] <- varList[[i]][1:minGlobal, pair]
}
var <- apply(matVar, 2, function(x) quantile(x, probs = poolQuant, na.rm = TRUE))
if ( length(var) >= movAve ) {
var <- tan::movingAverage(var, movAve)
}
poolVar[[pair]] <- var
}
Var[[bin]] <- poolVar
}
## Case: minGlobal = Inf
else {
message("minGlobal = Inf: 1. Variance vector for this bin returned NA, and 2. Sites in this bin stored in sitesUnused slot")
Var[[bin]] <- NA
}
}
} # end of bin
object@sitesUnused <- unique(c(object@sitesUnused, sitesUnused))
}
# return results
if (object@nSamples == 2) {
object@poolVar <- Var
}
else if (object@nSamples > 2 ) {
if (minus_condition) {
object@minusVar <- Var
} else {
object@plusVar <- Var
}
}
object
}
setMethod("calculateVariance", signature("tanDb"), .calculateVariance)
| /R/calculateVariance.R | no_license | duydnguyen/tan | R | false | false | 16,820 | r | evaluate_variance <- function(coverage, nSamples, wSites, lab_pool, minus_condition,
use_cpp = TRUE, s.size, Designs, Global_lower) {
## Get dictionary {a:1, b:2, c:3, d:4,...}
getDict <- function(s) {
# only for lower case mapping
return(match(tolower(s), letters))
}
## create pair sample for within labels
create_labels <- function(nSamples) {
if (nSamples > 3) {
# create colnames for Within matrix
matLab <- combn(letters[1:nSamples], 2)
labs <- apply(matLab, 2, FUN = function(x) paste(x[1], x[2], sep='') )
temp <- c()
check_labs <- rep(FALSE, length(labs)); names(check_labs) <- labs
for (l in labs) {
l_exclude <- setdiff(letters[1:nSamples], c( substr(l, 1, 1), substr(l, 2, 2)))
mat_exclude <- apply(combn(l_exclude, 2), 2, FUN = function(x) paste(x[1], x[2], sep='') )
for (i in 1:length(mat_exclude)) {
if (!check_labs[mat_exclude[i]]) {
temp <- c(temp, paste(l, mat_exclude[i], sep = ''))
}
}
check_labs[l] <- TRUE
}
temp_plus <- toupper(temp)
temp <- unlist(lapply(temp, function(s) paste(substr(s, 1, 2), substr(s, 3, 4), sep = ' vs ' )))
temp <- c(temp,unlist(lapply(temp_plus, function(s) paste(substr(s, 1, 2), substr(s, 3, 4), sep = ' vs ' ))))
return(list('withinLabel' = temp))
}
else if (nSamples == 3) {
temp <- c("ab vs ac", "ab vs bc", "ac vs bc",
"AB vs AC", "AB vs BC", "AC vs BC")
return(list('withinLabel' = temp))
}
}
## get index {1,2,...} for within's labels
create_indexList <- function(nSamples) {
indexList <- list()
if (nSamples > 3) {
withinLabel <- create_labels(nSamples)[[1]]
numTests <- floor(length(withinLabel) / 2)
minusLabel <- withinLabel[1:numTests]
# sampleNames <- unlist(strsplit(minusLabel, ' vs '))
for (i in 1:numTests) {
pairSample <- unlist(strsplit(minusLabel[i], ' vs '))
s <- c(substr(pairSample[1], 1, 1), substr(pairSample[1], 2, 2),
substr(pairSample[2], 1, 1), substr(pairSample[2], 2, 2))
s <- sapply(s, getDict)
names(s) <- NULL
indexList[[i]] <- s
}
return(indexList)
}
else if (nSamples == 3) {
indexList[[1]] <- c(1,2,1,3) # ab vs ac
indexList[[2]] <- c(1,2,2,3) # ab vs bc
indexList[[3]] <- c(1,3,2,3) # ac vs bc
return(indexList)
}
}
### MAIN ###
Var <- list()
sitesUnused <- c()
## n = 4: ab vs cd, ac vs bd, ad vs bc, and other three for second cond
## n = 3: ab vs ac, ab vs bc, ac vs bc, and other three for second cond
withinLabel <- create_labels(nSamples)[['withinLabel']]
for (bin in 1:length(wSites)) {
print(paste('bin = ', bin))
if (length(wSites[[bin]]) > 0 ) {
sites <- wSites[[bin]]
df <- data.frame()
varList <- list()
minGlobal <- Inf
count <- 1
if (length(sites)>0) {
for (site in sites) {
# print(paste(' -------------------- site = ', site))
testList <- list()
withinX <- withinY <- list()
numTests <- c()
indexList <- list()
if (minus_condition == TRUE) {
indexList <- create_indexList(nSamples)
numTests <- length(indexList)
for (tt in 1:numTests) {
ids <- indexList[[tt]]
withinX[[tt]] <- coverage[[site]][ids[1:2], ]
withinY[[tt]] <- coverage[[site]][ids[3:4],]
}
}
else {
indexList <- create_indexList(nSamples)
numTests <- length(indexList)
for (tt in 1:numTests) {
ids <- indexList[[tt]] + nSamples
withinX[[tt]] <- coverage[[site]][ids[1:2], ]
withinY[[tt]] <- coverage[[site]][ids[3:4],]
}
}
if ( dim(withinX[[1]])[2] < s.size ) {
if (use_cpp) {
for (tt in 1:numTests) {
X <- withinX[[tt]]
Y <- withinY[[tt]]
testList[[tt]] <- tan::compute_Var(X, Y, na_rm = TRUE, pool = FALSE)
}
}
else {
for (tt in 1:numTests) {
X <- withinX[[tt]]
Y <- withinY[[tt]]
testList[[tt]] <- tan::AN.test(X, Y, na_rm = TRUE)
}
}
}
else {
design <- Designs[site, ]
if (use_cpp) {
for (tt in 1:numTests) {
X <- withinX[[tt]]
Y <- withinY[[tt]]
testList[[tt]] <- tan::compute_Var(X[, design], Y[, design], na_rm = TRUE, pool = FALSE)
}
}
else {
for (tt in 1:numTests) {
X <- withinX[[tt]]
Y <- withinY[[tt]]
testList[[tt]] <- tan::AN.test(X[, design], Y[, design], na_rm = TRUE)
}
}
}
lenIndices <- c()
for (tt in 1:numTests) {
test_ <- testList[[tt]]
lenIndices <- c(lenIndices, length(test_$varX), length(test_$varY))
}
minIndex <- min(lenIndices)
## check minIndex > Global_lower (lower bound for pooled var vector of each bins)
if (minIndex > Global_lower) {
if (minGlobal > minIndex) {
minGlobal <- minIndex
}
## df <- data.frame('ab' = test1$varX[1:minIndex], 'ac' = test2$varX[1:minIndex],
## 'ad' = test3$varX[1:minIndex], 'bc' = test3$varY[1:minIndex],
## 'bd' = test2$varY[1:minIndex], 'cd' = test1$varY[1:minIndex])
df <- data.frame(matrix(NA, nrow = minIndex, ncol = length(lab_pool)))
if (nSamples > 3) {
col_id <- 1
for (tt in 1:numTests) {
test_ <- testList[[tt]]
ids <- indexList[[tt]]
df[, col_id] <- test_$varX[1:minIndex]
df[, col_id + 1] <- test_$varY[1:minIndex]
colnames(df)[col_id:(col_id+1)] <- c(paste(letters[ids[1:2]], collapse = ""),
paste(letters[ids[3:4]], collapse = ""))
col_id <- col_id + 2
}
} else if (nSamples == 3) {
df <- data.frame('ab' = testList[[1]]$varX[1:minIndex],
'ac' = testList[[1]]$varY[1:minIndex],
'bc' = testList[[2]]$varY[1:minIndex])
}
varList[[count]] <- df
count <- count + 1
}
# store all unused sites (minIndex < Global_lower): minIndex = 0 -> var empty due to flat peak,
# or many repeated counts, or peakLength too small. 03/24/17
else {
sitesUnused <- c(sitesUnused, site)
}
} # end of for (site in sites)
## Pooling variances across sites in bin
poolVar <- list()
print(paste(" +++ minGlobal = ", minGlobal, sep = ""))
## Case: minGlobal < Inf
if (minGlobal < Inf) {
matVar <- matrix(NA, nrow = length(varList), ncol = minGlobal) # @: Case minGlobal = Inf
for (pair in lab_pool) {
for (i in 1:length(varList)) {
matVar[i, ] <- varList[[i]][1:minGlobal, pair]
}
var <- apply(matVar, 2, function(x) quantile(x, probs = poolQuant, na.rm = TRUE))
if ( length(var) >= movAve ) {
var <- tan::movingAverage(var, movAve)
}
poolVar[[pair]] <- var
}
Var[[bin]] <- poolVar
}
## Case: minGlobal = Inf
else {
message("minGlobal = Inf: 1. Variance vector for this bin returned NA,
and/or 2. Sites in this bin stored in sitesUnused slot")
Var[[bin]] <- NA
}
}
}
}
return(list('Var' = Var, 'sitesUnused' = sitesUnused))
}
.calculateVariance <- function(object, minus_condition, Global_lower, poolQuant, movAve, ...) {
if (object@nSamples > 2 ) {
if (minus_condition == TRUE) {
print("Calculating Variance for first condition")
} else {
print("Calculating Variance for second condition")
}
}
else if (object@nSamples == 2) {
print("Calculating pool Variance for both conditions")
}
### MAIN ###
Var <- list()
if (object@nSamples > 2) {
print(paste("Calculating pooled variance for sample size n = ", object@nSamples), sep = "")
## n=4: lab_pool <- c('ab', 'ac', 'ad', 'bc', 'bd', 'cd')
## n=3: lab_pool <- c('ab', 'ac', 'bc')
lab_pool <- colnames(object@Ns)[1:(dim(object@Ns)[2] / 2)]
sitesUnused <- c()
resultList <- evaluate_variance(coverage = object@coverage, nSamples = object@nSamples,
wSites = object@wSites, lab_pool = lab_pool,
minus_condition = minus_condition, use_cpp = use_cpp,
s.size = object@s.size, Designs = object@Designs,
Global_lower = Global_lower)
Var <- resultList[['Var']]
sitesUnused <- resultList[['sitesUnused']]
object@sitesUnused <- unique(c(object@sitesUnused, sitesUnused))
} # end of if (n > 2)
else if (object@nSamples == 2) {
print(paste("Calculating pooled variance for sample size n = ", object@nSamples), sep = "")
Var <- list()
lab_pool <- c('ab', 'aA', 'aB', 'AB', 'bB', 'Ab')
sitesUnused <- c()
for (bin in 1:length(object@wSites)) {
print(paste('bin = ', bin))
sites <- object@wSites[[bin]]
df <- data.frame()
varList <- list()
minGlobal <- Inf
count <- 1
if (length(sites) > 0) {
for (site in sites) {
# print(paste('site = ', site))
geta <- object@coverage[[site]][1,]
getb <- object@coverage[[site]][2,]
getA <- object@coverage[[site]][3,]
getB <- object@coverage[[site]][4,]
X1 <- rbind( geta, getb)
Y1 <- rbind( getA, getB)
X2 <- rbind( geta, getA)
Y2 <- rbind( getb, getB)
X3 <- rbind( geta, getB)
Y3 <- rbind( getA, getb)
if ( dim(X1)[2] < object@s.size ) {
if (use_cpp) {
test1 <- tan::compute_Var(X1, Y1, na_rm = TRUE, pool = FALSE)
test2 <- tan::compute_Var(X2, Y2, na_rm = TRUE, pool = FALSE)
test3 <- tan::compute_Var(X3, Y3, na_rm = TRUE, pool = FALSE)
}
else {
test1 <- tan::AN.test(X1, Y1, na.rm=TRUE)
test2 <- tan::AN.test(X2, Y2, na.rm=TRUE)
test3 <- tan::AN.test(X3, Y3, na.rm=TRUE)
}
}
else {
design <- object@Designs[site, ]
if (use_cpp) {
test1 <- tan::compute_Var(X1[, design], Y1[, design], na_rm = TRUE, pool = FALSE)
test2 <- tan::compute_Var(X2[, design], Y2[, design], na_rm = TRUE, pool = FALSE)
test3 <- tan::compute_Var(X3[, design], Y3[, design], na_rm = TRUE, pool = FALSE)
}
else {
test1 <- tan::AN.test(X1[, design], Y1[, design], na.rm=TRUE)
test2 <- tan::AN.test(X2[, design], Y2[, design], na.rm=TRUE)
test3 <- tan::AN.test(X3[, design], Y3[, design], na.rm=TRUE)
}
}
minIndex <- min(c(length(test1$varX), length(test2$varX),length(test3$varX),
length(test1$varY), length(test2$varY), length(test3$varY)))
## check minIndex > Global_lower (lower bound for pooled var vector of each bins)
if (minIndex > Global_lower) {
if (minGlobal > minIndex) {
minGlobal <- minIndex
}
df <- data.frame('ab' = test1$varX[1:minIndex], 'aA' = test2$varX[1:minIndex], 'aB' = test3$varX[1:minIndex],
'AB' = test1$varY[1:minIndex], 'bB' = test2$varY[1:minIndex], 'Ab' = test3$varY[1:minIndex])
varList[[count]] <- df
count <- count + 1 # keep track of all sites in bin i
} # end of if (minIndex > Global_lower)
# store all unused sites (minIndex < Global_lower): minIndex = 0 -> var empty due to flat peak,
# or manyrepeated counts: 03/24/17
else {
sitesUnused <- c(sitesUnused, site)
}
} # end of for (site in sites)
## Pooling variances across sites in bin
poolVar <- list()
print(paste(" +++ minGlobal = ", minGlobal, sep = ""))
## Case: minGlobal < Inf
if (minGlobal < Inf) {
matVar <- matrix(NA, nrow = length(varList), ncol = minGlobal)
for (pair in lab_pool) {
for (i in 1:length(varList)) {
matVar[i,] <- varList[[i]][1:minGlobal, pair]
}
var <- apply(matVar, 2, function(x) quantile(x, probs = poolQuant, na.rm = TRUE))
if ( length(var) >= movAve ) {
var <- tan::movingAverage(var, movAve)
}
poolVar[[pair]] <- var
}
Var[[bin]] <- poolVar
}
## Case: minGlobal = Inf
else {
message("minGlobal = Inf: 1. Variance vector for this bin returned NA, and 2. Sites in this bin stored in sitesUnused slot")
Var[[bin]] <- NA
}
}
} # end of bin
object@sitesUnused <- unique(c(object@sitesUnused, sitesUnused))
}
# return results
if (object@nSamples == 2) {
object@poolVar <- Var
}
else if (object@nSamples > 2 ) {
if (minus_condition) {
object@minusVar <- Var
} else {
object@plusVar <- Var
}
}
object
}
setMethod("calculateVariance", signature("tanDb"), .calculateVariance)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gapminder_api.R
\name{gapminder_api}
\alias{gapminder_api}
\title{Gapminder Pipeline API}
\usage{
gapminder_api(port = 8001)
}
\arguments{
\item{port}{Define port to serve API on - default is 8001}
}
\value{
deploys a plumber API on port 8001
}
\description{
Gapminder Pipeline API
}
\examples{
NULL
}
| /man/gapminder_api.Rd | permissive | chapmandu2/gapminderpl | R | false | true | 380 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gapminder_api.R
\name{gapminder_api}
\alias{gapminder_api}
\title{Gapminder Pipeline API}
\usage{
gapminder_api(port = 8001)
}
\arguments{
\item{port}{Define port to serve API on - default is 8001}
}
\value{
deploys a plumber API on port 8001
}
\description{
Gapminder Pipeline API
}
\examples{
NULL
}
|
library(igraph)
# グラフ読み込み
g <- read.graph("2/japangraph_pajek_xy2_eng.net", format="pajek")
# グラフの各点の次数を取る
degs <- degree(g, V(g), mode="all")
# 次数のヒストグラム表示
png("2/degs_hist.png")
hist(degs, breaks=0:max(degs))
dev.off()
# 各点ごとの次数を棒グラフ表示
png("2/city_degs_plot.png")
barplot(degs, las=2, xlab='city', ylab='degrees', cex.names=.6)
dev.off()
# グラフを表示,次数が大きいほど点のサイズを大きく表示
png("2/degs_size_map.png")
plot(g, vertex.size=degs*1.5, vertex.label.cex=.5)
dev.off()
# 全ての点のうち次数が最大の点を赤色で示し,それらの点に限りidを付与して表示
png("2/visualize_max_degs.png")
V(g)$color <- "lightblue"
V(g)[which(degs==max(degs))]$color <- "red"
g_tmp <- g
V(g_tmp)$name = ""
V(g_tmp)[which(degs==max(degs))]$name <- V(g)[which(degs==max(degs))]$name
plot(g_tmp, vertex.size=degs*1.5, vertex.label.cex=1, )
dev.off() | /2/2-1.R | no_license | N-Hirahara/R_kadai | R | false | false | 984 | r | library(igraph)
# グラフ読み込み
g <- read.graph("2/japangraph_pajek_xy2_eng.net", format="pajek")
# グラフの各点の次数を取る
degs <- degree(g, V(g), mode="all")
# 次数のヒストグラム表示
png("2/degs_hist.png")
hist(degs, breaks=0:max(degs))
dev.off()
# 各点ごとの次数を棒グラフ表示
png("2/city_degs_plot.png")
barplot(degs, las=2, xlab='city', ylab='degrees', cex.names=.6)
dev.off()
# グラフを表示,次数が大きいほど点のサイズを大きく表示
png("2/degs_size_map.png")
plot(g, vertex.size=degs*1.5, vertex.label.cex=.5)
dev.off()
# 全ての点のうち次数が最大の点を赤色で示し,それらの点に限りidを付与して表示
png("2/visualize_max_degs.png")
V(g)$color <- "lightblue"
V(g)[which(degs==max(degs))]$color <- "red"
g_tmp <- g
V(g_tmp)$name = ""
V(g_tmp)[which(degs==max(degs))]$name <- V(g)[which(degs==max(degs))]$name
plot(g_tmp, vertex.size=degs*1.5, vertex.label.cex=1, )
dev.off() |
#' Returns the number of available Open Access objects in the Met Collection and their objectIDs.
#' @param limit Limits the number of objectIDs returned by the function.
#' @param include_IDs If set to TRUE, the function will return objectIDs, observing the limit argument. Defaults to FALSE.
#'
#' @export
all_Met_objects <-
function(limit = FALSE,
include_IDs = FALSE) {
request <-
httr::GET(url = "https://collectionapi.metmuseum.org/public/collection/v1/objects")
data <- httr::content(request)
object_ID <- unlist(data$objectIDs)
if (limit != FALSE) {
object_ID <- object_ID[1:limit]
}
print(
paste(
length(unlist(data$objectIDs)),
" objects avilable in the Metropolitan Mueseum's Open Access Database.",
sep = ""
)
)
if (include_IDs == TRUE) {
print(paste(
"Function included the first ",
length(object_ID),
" objectIDs in output.",
sep = ""
))
object_ID
}
}
| /R/all_Met_objects.R | permissive | athvedt/theMetR | R | false | false | 1,022 | r | #' Returns the number of available Open Access objects in the Met Collection and their objectIDs.
#' @param limit Limits the number of objectIDs returned by the function.
#' @param include_IDs If set to TRUE, the function will return objectIDs, observing the limit argument. Defaults to FALSE.
#'
#' @export
all_Met_objects <-
function(limit = FALSE,
include_IDs = FALSE) {
request <-
httr::GET(url = "https://collectionapi.metmuseum.org/public/collection/v1/objects")
data <- httr::content(request)
object_ID <- unlist(data$objectIDs)
if (limit != FALSE) {
object_ID <- object_ID[1:limit]
}
print(
paste(
length(unlist(data$objectIDs)),
" objects avilable in the Metropolitan Mueseum's Open Access Database.",
sep = ""
)
)
if (include_IDs == TRUE) {
print(paste(
"Function included the first ",
length(object_ID),
" objectIDs in output.",
sep = ""
))
object_ID
}
}
|
setwd("~/Documents/Football Analytics/Football Database/CSV")
library(XML)
library(RCurl)
library(dplyr)
library(data.table)
load("~/Documents/Football Analytics/Football Database/R Code/combine.RData")
year=seq(2009,2021,by=1)
year=as.character(year)
final_roster=data.frame()
for(zz in 1:length(year)){
url_nfl <- paste("https://raw.githubusercontent.com/mrcaseb/nflfastR-roster/master/data/seasons/roster_",year[zz],".csv",sep="")
roster <- read.csv(url(url_nfl))
#Clean Data
roster <- roster[ , c("season", "team", "position", "jersey_number", "status",
"full_name", "first_name", "last_name", "birth_date", "height",
"weight", "college", "high_school", "gsis_id", "espn_id", "sportradar_id",
"yahoo_id", "rotowire_id", "pff_id")]
final_roster=rbind(final_roster,roster)
}
save(final_roster,file="~/Documents/Football Analytics/Football Database/R Code/roster.RData")
| /scrapers/nfl_scraper.R | no_license | dfricci/nfl_draft_wr_2021 | R | false | false | 974 | r | setwd("~/Documents/Football Analytics/Football Database/CSV")
library(XML)
library(RCurl)
library(dplyr)
library(data.table)
load("~/Documents/Football Analytics/Football Database/R Code/combine.RData")
year=seq(2009,2021,by=1)
year=as.character(year)
final_roster=data.frame()
for(zz in 1:length(year)){
url_nfl <- paste("https://raw.githubusercontent.com/mrcaseb/nflfastR-roster/master/data/seasons/roster_",year[zz],".csv",sep="")
roster <- read.csv(url(url_nfl))
#Clean Data
roster <- roster[ , c("season", "team", "position", "jersey_number", "status",
"full_name", "first_name", "last_name", "birth_date", "height",
"weight", "college", "high_school", "gsis_id", "espn_id", "sportradar_id",
"yahoo_id", "rotowire_id", "pff_id")]
final_roster=rbind(final_roster,roster)
}
save(final_roster,file="~/Documents/Football Analytics/Football Database/R Code/roster.RData")
|
## Load package
library("SPUTNIK")
## Create ms.image-class object
msIm <- msImage(values = matrix(rnorm(200), 40, 50), name = "test", scale = TRUE)
## Smooth the image colors
msImSmoothed <- smoothImage(msIm, sigma = 5)
| /R/examples/msImage_smoothImage.R | no_license | cran/SPUTNIK | R | false | false | 231 | r | ## Load package
library("SPUTNIK")
## Create ms.image-class object
msIm <- msImage(values = matrix(rnorm(200), 40, 50), name = "test", scale = TRUE)
## Smooth the image colors
msImSmoothed <- smoothImage(msIm, sigma = 5)
|
require(testthat)
require(httr)
context("Basic features")
test_that("Creation of server works", {
webServer.skeleton()
test <- "hello world"
save(test, file="myRWebServer/data/test.rda")
cat("myFun <- sum", file="myRWebServer/lib/myFun.R")
cat("run <- function(x,y, ...) as.numeric(x) + as.numeric(y)",
file="myRWebServer/R/test.R")
cat("run <- function(...) test",
file="myRWebServer/R/test2.R")
cat("run <- function(x, y, ...) myFun(as.numeric(x), as.numeric(y))",
file="myRWebServer/R/test3.R")
cat("/test/{x}/{y} /test\n", file="myRWebServer/routes")
expect_output(startWebServer("myRWebServer"),
"Server started on port 8080")
})
test_that("Server is running", {
res <- GET("http://localhost:8080/test?x=1&y=2")
expect_equal(content(res, "text"), "3")
})
test_that("Data files have been loaded", {
res <- GET("http://localhost:8080/test2")
expect_equal(content(res, "text"), "hello world")
})
test_that("R files have been sourced", {
res <- GET("http://localhost:8080/test3?x=1&y=1")
expect_equal(content(res, "text"), "2")
})
test_that("Routes work", {
res <- GET("http://localhost:8080/test/1/2")
expect_equal(content(res, "text"), "3")
})
test_that("Server has stopped", {
stopWebServer()
expect_error(GET("http://localhost:8080/test?x=1&y=2"))
})
system("rm -r myRWebServer")
| /inst/tests/testBasicFeatures.R | no_license | FrancoisGuillem/RWebServer | R | false | false | 1,384 | r | require(testthat)
require(httr)
context("Basic features")
test_that("Creation of server works", {
webServer.skeleton()
test <- "hello world"
save(test, file="myRWebServer/data/test.rda")
cat("myFun <- sum", file="myRWebServer/lib/myFun.R")
cat("run <- function(x,y, ...) as.numeric(x) + as.numeric(y)",
file="myRWebServer/R/test.R")
cat("run <- function(...) test",
file="myRWebServer/R/test2.R")
cat("run <- function(x, y, ...) myFun(as.numeric(x), as.numeric(y))",
file="myRWebServer/R/test3.R")
cat("/test/{x}/{y} /test\n", file="myRWebServer/routes")
expect_output(startWebServer("myRWebServer"),
"Server started on port 8080")
})
test_that("Server is running", {
res <- GET("http://localhost:8080/test?x=1&y=2")
expect_equal(content(res, "text"), "3")
})
test_that("Data files have been loaded", {
res <- GET("http://localhost:8080/test2")
expect_equal(content(res, "text"), "hello world")
})
test_that("R files have been sourced", {
res <- GET("http://localhost:8080/test3?x=1&y=1")
expect_equal(content(res, "text"), "2")
})
test_that("Routes work", {
res <- GET("http://localhost:8080/test/1/2")
expect_equal(content(res, "text"), "3")
})
test_that("Server has stopped", {
stopWebServer()
expect_error(GET("http://localhost:8080/test?x=1&y=2"))
})
system("rm -r myRWebServer")
|
# users can either step through this file, or call this file with
# r -f example.R
# THIS ASSUMES THAT THE TESTHINT DATABASE EXISTS. The recipe for building that
# database is in ../dbInitialization/createHintTest.sql
# THIS EXAMPLE USES THE BRAIN HINT OUTPUT MADE BY RUNNING make hint at /scratch/data/footprints
print(date())
#-------------------------------------------------------------------------------
# set path to hint output
data.path <- "/scratch/shared/footprints/adrenal_gland_wellington_16"
output_path=paste(data.path,"/TFBS_OUTPUT",sep="")
dir.create(output_path, showWarnings = FALSE)
bdbag.path<-"/scratch/shared/footprints/adrenal_gland_16"
dir.create(bdbag.path, showWarnings = FALSE)
#-------------------------------------------------------------------------------
# establish database connections:
if(!exists("db.wellington"))
db.wellington <- "adrenal_gland_wellington_16"
if(!exists("db.fimo"))
db.fimo <- "fimo"
#-------------------------------------------------------------------------------
# Source the libraries
source("/scratch/galaxy/test/generate_db/src/dependencies.R")
source("/scratch/galaxy/test/generate_db/src/dbFunctions.R")
source("/scratch/galaxy/test/generate_db/src/tableParsing.R")
source("/scratch/galaxy/test/generate_db/src/main_Bioc.R")
if(!interactive()){
chromosomes <- paste0("chr",c(1:22,"X","Y","MT"))
# Create parallel structure here
library(BiocParallel)
register(MulticoreParam(workers = 10, stop.on.error = FALSE, log = TRUE), default = TRUE)
# Run on all 24 possible chromosomes at once
result <- bptry(bplapply(chromosomes, fillAllSamplesByChromosome,
dbConnection = db.wellington,
fimo = db.fimo,
minid = "adrenal_gland_wellington_16.minid",
dbUser = "trena",
dbTable = "adrenal_gland_wellington_16",
sourcePath = data.path,
isTest = FALSE,
method = "WELLINGTON",
Fill_DB_Enable=FALSE))
}
cmd=paste("tar -zcvf ", bdbag.path, "/", db.wellington,".tar.gz ", output_path, sep="")
system(cmd, intern = TRUE)
unlink(output_path,recursive=TRUE)
#print(bpok(result))
#print("Database fill complete")
#print(date())
| /generate_db/master/adrenal_gland_16/wellington.R | no_license | xtmgah/genomics-footprint | R | false | false | 2,226 | r | # users can either step through this file, or call this file with
# r -f example.R
# THIS ASSUMES THAT THE TESTHINT DATABASE EXISTS. The recipe for building that
# database is in ../dbInitialization/createHintTest.sql
# THIS EXAMPLE USES THE BRAIN HINT OUTPUT MADE BY RUNNING make hint at /scratch/data/footprints
print(date())
#-------------------------------------------------------------------------------
# set path to hint output
data.path <- "/scratch/shared/footprints/adrenal_gland_wellington_16"
output_path=paste(data.path,"/TFBS_OUTPUT",sep="")
dir.create(output_path, showWarnings = FALSE)
bdbag.path<-"/scratch/shared/footprints/adrenal_gland_16"
dir.create(bdbag.path, showWarnings = FALSE)
#-------------------------------------------------------------------------------
# establish database connections:
if(!exists("db.wellington"))
db.wellington <- "adrenal_gland_wellington_16"
if(!exists("db.fimo"))
db.fimo <- "fimo"
#-------------------------------------------------------------------------------
# Source the libraries
source("/scratch/galaxy/test/generate_db/src/dependencies.R")
source("/scratch/galaxy/test/generate_db/src/dbFunctions.R")
source("/scratch/galaxy/test/generate_db/src/tableParsing.R")
source("/scratch/galaxy/test/generate_db/src/main_Bioc.R")
if(!interactive()){
chromosomes <- paste0("chr",c(1:22,"X","Y","MT"))
# Create parallel structure here
library(BiocParallel)
register(MulticoreParam(workers = 10, stop.on.error = FALSE, log = TRUE), default = TRUE)
# Run on all 24 possible chromosomes at once
result <- bptry(bplapply(chromosomes, fillAllSamplesByChromosome,
dbConnection = db.wellington,
fimo = db.fimo,
minid = "adrenal_gland_wellington_16.minid",
dbUser = "trena",
dbTable = "adrenal_gland_wellington_16",
sourcePath = data.path,
isTest = FALSE,
method = "WELLINGTON",
Fill_DB_Enable=FALSE))
}
cmd=paste("tar -zcvf ", bdbag.path, "/", db.wellington,".tar.gz ", output_path, sep="")
system(cmd, intern = TRUE)
unlink(output_path,recursive=TRUE)
#print(bpok(result))
#print("Database fill complete")
#print(date())
|
\name{current.ratio}
\alias{current.ratio}
\title{current ratio -- Liquidity ratios measure the firm's ability to satisfy its short-term obligations as they come due.}
\usage{
current.ratio(ca, cl)
}
\arguments{
\item{ca}{current assets}
\item{cl}{current liabilities}
}
\description{
current ratio -- Liquidity ratios measure the firm's
ability to satisfy its short-term obligations as they come
due.
}
\examples{
current.ratio(ca=8000,cl=2000)
}
\seealso{
\code{\link{cash.ratio}}
\code{\link{quick.ratio}}
}
| /man/current.ratio.Rd | no_license | asheshwor/FinCal | R | false | false | 518 | rd | \name{current.ratio}
\alias{current.ratio}
\title{current ratio -- Liquidity ratios measure the firm's ability to satisfy its short-term obligations as they come due.}
\usage{
current.ratio(ca, cl)
}
\arguments{
\item{ca}{current assets}
\item{cl}{current liabilities}
}
\description{
current ratio -- Liquidity ratios measure the firm's
ability to satisfy its short-term obligations as they come
due.
}
\examples{
current.ratio(ca=8000,cl=2000)
}
\seealso{
\code{\link{cash.ratio}}
\code{\link{quick.ratio}}
}
|
testlist <- list(bytes1 = c(-138099516L, -993737532L, NA, 16179069L, -2029379485L, 1869509492L, 704643071L, -12713985L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -255L, 3487013L, 892668209L, 825261437L, 2105147263L, 2139062271L, 2139064841L, -822083585L, -3L, 19135997L, -33685635L, 1090986495L, -158662726L, -1162167622L, 1085984335L, -1L, 65589L, 889295872L, 1L, 167862016L, 32125L, NA, -310378496L, 3342335L, 19136511L, -32768L, 2L, 50397183L, -1L), pmutation = -1.40444776422717e+306)
result <- do.call(mcga:::ByteCodeMutation,testlist)
str(result) | /mcga/inst/testfiles/ByteCodeMutation/libFuzzer_ByteCodeMutation/ByteCodeMutation_valgrind_files/1612803344-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 551 | r | testlist <- list(bytes1 = c(-138099516L, -993737532L, NA, 16179069L, -2029379485L, 1869509492L, 704643071L, -12713985L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -255L, 3487013L, 892668209L, 825261437L, 2105147263L, 2139062271L, 2139064841L, -822083585L, -3L, 19135997L, -33685635L, 1090986495L, -158662726L, -1162167622L, 1085984335L, -1L, 65589L, 889295872L, 1L, 167862016L, 32125L, NA, -310378496L, 3342335L, 19136511L, -32768L, 2L, 50397183L, -1L), pmutation = -1.40444776422717e+306)
result <- do.call(mcga:::ByteCodeMutation,testlist)
str(result) |
context("Spellchecker")
test_that("School funding report checks out", {
expect_null(check_spelling("./SchoolFunding/SchoolFunding.tex",
known.correct = c("SRS", "SE.XPD.TOTL.GD.XS", "WDI", "SSNP", "underfunded", "overfund[a-z]*", "NMS", "WPI", "DET", "phas", "NP", "SATs", "ENG", "th", "stds", "RCTs", "CAGR"), ignore.lines = 1551))
})
test_that("Check spelling of multiple input document", {
expect_error(check_spelling("./spellcheck_multi_input/spellcheck_multi_input.tex"),
regexp = "failed on above line")
})
test_that("Abbreviations", {
expect_error(check_spelling("spellcheck-abbrevs.tex"))
})
test_that("Initalisms", {
expect_null(check_spelling("./spelling/abbrev/abbrev-defd-ok.tex"))
expect_null(check_spelling("./spelling/abbrev/abbrev-defd-ok-2.tex"))
expect_null(check_spelling("./spelling/abbrev/HILDA-ok.tex"))
expect_equal(extract_validate_abbreviations(readLines("./spelling/abbrev/abbrev-defd-ok-stopwords.tex")),
c("QXFEoC", "AIAS"))
expect_equal(extract_validate_abbreviations(readLines("./spelling/abbrev/abbrev-plural.tex")),
c("LVR"))
})
test_that("Initialism checking doesn't fail if at start of sentence", {
expect_null(check_spelling("./spelling/abbrev/abbrev-at-line-start.tex"))
})
test_that("Add to dictionary, ignore spelling in", {
expect_error(check_spelling("./spelling/add_to_dictionary-wrong.tex"), regexp = "[Ss]pellcheck failed")
expect_error(check_spelling("./spelling/ignore_spelling_in-wrong.tex", pre_release = FALSE), regexp = "[Ss]pellcheck failed")
expect_null(check_spelling("./spelling/add_to_dictionary-ok.tex"))
expect_null(check_spelling("./spelling/ignore_spelling_in-ok.tex", pre_release = FALSE))
expect_null(check_spelling("./spelling/ignore_spelling_in-ok-2.tex", pre_release = FALSE))
expect_error(check_spelling("./spelling/ignore_spelling_in-ok.tex"), regexp = "pre_release = TRUE")
})
test_that("Stop if present", {
expect_error(check_spelling("./stop_if_present/should-stop.tex"), regexp = "skillset")
expect_error(check_spelling("./stop_if_present/should-stop-2.tex"), regexp = "skillset")
expect_error(check_spelling("./stop_if_present/stop_even_if_added.tex"), regexp = "skillset")
expect_error(check_spelling("./stop_if_present_inputs/stop-if-held-in-inputs.tex"), regexp = "skillset")
expect_error(check_spelling("./stop_if_present/should-stop-3.tex"), regexp = "percent")
expect_null(check_spelling("./stop_if_present/should-not-stop.tex"))
})
test_that("Lower-case governments should error", {
expect_error(check_spelling("./spelling/Govt/NSWgovt.tex"), regexp = "uppercase G")
expect_error(check_spelling("./spelling/Govt/ACTgovt.tex"), regexp = "uppercase G")
expect_error(check_spelling("./spelling/Govt/NTgovt.tex"), regexp = "uppercase G")
expect_error(check_spelling("./spelling/Govt/Queenslandgovt.tex"), regexp = "uppercase G")
expect_error(check_spelling("./spelling/Govt/WAgovt.tex"), regexp = "uppercase G")
})
test_that("Lower-case governments ok in some cases", {
expect_null(check_spelling("./spelling/Govt/lc-govt-ok.tex"))
expect_null(check_spelling("./spelling/Govt/plural-ok.tex"))
})
test_that("Vrefrange keys are ok", {
expect_null(check_spelling("./spelling/vrefrange.tex"))
})
test_that("Chaprefrange", {
expect_null(check_spelling("./spelling/chaprefrange.tex"))
})
| /tests/testthat/test_spellcheck.R | no_license | HughParsonage/grattanReporter | R | false | false | 3,404 | r | context("Spellchecker")
test_that("School funding report checks out", {
expect_null(check_spelling("./SchoolFunding/SchoolFunding.tex",
known.correct = c("SRS", "SE.XPD.TOTL.GD.XS", "WDI", "SSNP", "underfunded", "overfund[a-z]*", "NMS", "WPI", "DET", "phas", "NP", "SATs", "ENG", "th", "stds", "RCTs", "CAGR"), ignore.lines = 1551))
})
test_that("Check spelling of multiple input document", {
expect_error(check_spelling("./spellcheck_multi_input/spellcheck_multi_input.tex"),
regexp = "failed on above line")
})
test_that("Abbreviations", {
expect_error(check_spelling("spellcheck-abbrevs.tex"))
})
test_that("Initalisms", {
expect_null(check_spelling("./spelling/abbrev/abbrev-defd-ok.tex"))
expect_null(check_spelling("./spelling/abbrev/abbrev-defd-ok-2.tex"))
expect_null(check_spelling("./spelling/abbrev/HILDA-ok.tex"))
expect_equal(extract_validate_abbreviations(readLines("./spelling/abbrev/abbrev-defd-ok-stopwords.tex")),
c("QXFEoC", "AIAS"))
expect_equal(extract_validate_abbreviations(readLines("./spelling/abbrev/abbrev-plural.tex")),
c("LVR"))
})
test_that("Initialism checking doesn't fail if at start of sentence", {
expect_null(check_spelling("./spelling/abbrev/abbrev-at-line-start.tex"))
})
test_that("Add to dictionary, ignore spelling in", {
expect_error(check_spelling("./spelling/add_to_dictionary-wrong.tex"), regexp = "[Ss]pellcheck failed")
expect_error(check_spelling("./spelling/ignore_spelling_in-wrong.tex", pre_release = FALSE), regexp = "[Ss]pellcheck failed")
expect_null(check_spelling("./spelling/add_to_dictionary-ok.tex"))
expect_null(check_spelling("./spelling/ignore_spelling_in-ok.tex", pre_release = FALSE))
expect_null(check_spelling("./spelling/ignore_spelling_in-ok-2.tex", pre_release = FALSE))
expect_error(check_spelling("./spelling/ignore_spelling_in-ok.tex"), regexp = "pre_release = TRUE")
})
test_that("Stop if present", {
expect_error(check_spelling("./stop_if_present/should-stop.tex"), regexp = "skillset")
expect_error(check_spelling("./stop_if_present/should-stop-2.tex"), regexp = "skillset")
expect_error(check_spelling("./stop_if_present/stop_even_if_added.tex"), regexp = "skillset")
expect_error(check_spelling("./stop_if_present_inputs/stop-if-held-in-inputs.tex"), regexp = "skillset")
expect_error(check_spelling("./stop_if_present/should-stop-3.tex"), regexp = "percent")
expect_null(check_spelling("./stop_if_present/should-not-stop.tex"))
})
test_that("Lower-case governments should error", {
expect_error(check_spelling("./spelling/Govt/NSWgovt.tex"), regexp = "uppercase G")
expect_error(check_spelling("./spelling/Govt/ACTgovt.tex"), regexp = "uppercase G")
expect_error(check_spelling("./spelling/Govt/NTgovt.tex"), regexp = "uppercase G")
expect_error(check_spelling("./spelling/Govt/Queenslandgovt.tex"), regexp = "uppercase G")
expect_error(check_spelling("./spelling/Govt/WAgovt.tex"), regexp = "uppercase G")
})
test_that("Lower-case governments ok in some cases", {
expect_null(check_spelling("./spelling/Govt/lc-govt-ok.tex"))
expect_null(check_spelling("./spelling/Govt/plural-ok.tex"))
})
test_that("Vrefrange keys are ok", {
expect_null(check_spelling("./spelling/vrefrange.tex"))
})
test_that("Chaprefrange", {
expect_null(check_spelling("./spelling/chaprefrange.tex"))
})
|
################################################################################
# TODO LIST
# TODO: ...
################################################################################
# CHANGE LOG (last 20 changes)
# 06.08.2017: Added audit trail.
# 13.07.2017: Fixed issue with button handlers.
# 13.07.2017: Fixed narrow dropdown with hidden argument ellipsize = "none".
# 07.07.2017: Replaced 'droplist' with 'gcombobox'.
# 07.07.2017: Removed argument 'border' for 'gbutton'.
# 10.05.2016: Added new option 'limit' to remove high ratios from the result.
# 10.05.2016: Added attributes to result.
# 10.05.2016: 'Save as' textbox expandable.
# 28.08.2015: Added importFrom.
# 05.05.2015: Changed parameter 'ignoreCase' to 'ignore.case' for 'checkSubset' function.
# 13.12.2014: Added kit dropdown and kit attribute to result.
# 04.12.2014: First version.
#' @title Calculate Spectral Pull-up
#'
#' @description
#' GUI wrapper for the \code{\link{calculatePullup}} function.
#'
#' @details
#' Simplifies the use of the \code{\link{calculatePullup}} function by
#' providing a graphical user interface.
#'
#' @param env environment in which to search for data frames and save result.
#' @param savegui logical indicating if GUI settings should be saved in the environment.
#' @param debug logical indicating printing debug information.
#' @param parent widget to get focus when finished.
#'
#' @return TRUE
#'
#' @export
#'
#' @importFrom utils help head str
#' @importFrom graphics title
#'
#' @seealso \code{\link{calculatePullup}}, \code{\link{checkSubset}}
#'
calculatePullup_gui <- function(env=parent.frame(), savegui=NULL,
debug=FALSE, parent=NULL){
# Global variables.
.gData <- NULL
.gDataName <- NULL
.gRef <- NULL
.gRefName <- NULL
if(debug){
print(paste("IN:", match.call()[[1]]))
}
# WINDOW ####################################################################
if(debug){
print("WINDOW")
}
# Main window.
w <- gwindow(title="Calculate spectral pull-up", visible=FALSE)
# Runs when window is closed.
addHandlerDestroy(w, handler = function (h, ...) {
# Save GUI state.
.saveSettings()
# Focus on parent window.
if(!is.null(parent)){
focus(parent)
}
})
gv <- ggroup(horizontal=FALSE,
spacing=5,
use.scrollwindow=FALSE,
container = w,
expand=TRUE)
# Help button group.
gh <- ggroup(container = gv, expand=FALSE, fill="both")
savegui_chk <- gcheckbox(text="Save GUI settings", checked=FALSE, container=gh)
addSpring(gh)
help_btn <- gbutton(text="Help", container=gh)
addHandlerChanged(help_btn, handler = function(h, ...) {
# Open help page for function.
print(help("calculatePullup_gui", help_type="html"))
})
# FRAME 0 ###################################################################
if(debug){
print("FRAME 0")
}
f0 <- gframe(text = "Datasets",
horizontal=TRUE,
spacing = 5,
container = gv)
g0 <- glayout(container = f0, spacing = 1)
# Dataset -------------------------------------------------------------------
g0[1,1] <- glabel(text="Select dataset:", container=g0)
dfs <- c("<Select a dataset>", listObjects(env=env, obj.class="data.frame"))
g0[1,2] <- g0_data_drp <- gcombobox(items=dfs,
selected = 1,
editable = FALSE,
container = g0,
ellipsize = "none")
g0[1,3] <- g0_data_samples_lbl <- glabel(text=" 0 samples", container=g0)
addHandlerChanged(g0_data_drp, handler = function (h, ...) {
val_obj <- svalue(g0_data_drp)
# Check if suitable.
requiredCol <- c("Sample.Name", "Allele", "Marker", "Dye", "Height", "Size", "Data.Point")
ok <- checkDataset(name=val_obj, reqcol=requiredCol,
slim=TRUE, slimcol="Height",
env=env, parent=w, debug=debug)
if(ok){
# Load or change components.
# get dataset.
.gData <<- get(val_obj, envir=env)
.gDataName <<- val_obj
svalue(g0_data_samples_lbl) <- paste(length(unique(.gData$Sample.Name)),
"samples.")
# Suggest a name for result.
svalue(f4_save_edt) <- paste(val_obj, "_pullup", sep="")
# Detect kit.
kitIndex <- detectKit(.gData, index=TRUE)
# Select in dropdown.
svalue(f4_kit_drp, index=TRUE) <- kitIndex
} else {
# Reset components.
.gData <<- NULL
.gDataName <<- NULL
svalue(g0_data_drp, index=TRUE) <- 1
svalue(g0_data_samples_lbl) <- " 0 samples"
svalue(f4_save_edt) <- ""
}
} )
# Reference -----------------------------------------------------------------
g0[2,1] <- glabel(text="Select reference dataset:", container=g0)
# NB! dfs defined in previous section.
g0[2,2] <- g0_ref_drp <- gcombobox(items=dfs,
selected = 1,
editable = FALSE,
container = g0,
ellipsize = "none")
g0[2,3] <- g0_ref_samples_lbl <- glabel(text=" 0 references", container=g0)
addHandlerChanged(g0_ref_drp, handler = function (h, ...) {
val_obj <- svalue(g0_ref_drp)
# Check if suitable.
requiredCol <- c("Sample.Name", "Marker", "Allele")
ok <- checkDataset(name=val_obj, reqcol=requiredCol,
slim=TRUE, slimcol="Allele",
env=env, parent=w, debug=debug)
if(ok){
# Load or change components.
.gRef <<- get(val_obj, envir=env)
.gRefName <<- val_obj
svalue(g0_ref_samples_lbl) <- paste(length(unique(.gRef$Sample.Name)),
"samples.")
} else {
# Reset components.
.gRef <<- NULL
.gRefName <<- NULL
svalue(g0_ref_drp, index=TRUE) <- 1
svalue(g0_ref_samples_lbl) <- " 0 references"
}
} )
# CHECK ---------------------------------------------------------------------
if(debug){
print("CHECK")
}
g0[3,2] <- g0_check_btn <- gbutton(text="Check subsetting", container=g0)
addHandlerChanged(g0_check_btn, handler = function(h, ...) {
# Get values.
val_data <- .gData
val_ref <- .gRef
val_ignore <- svalue(f1_ignore_chk)
val_word <- svalue(f1_word_chk)
if (!is.null(.gData) || !is.null(.gRef)){
chksubset_w <- gwindow(title = "Check subsetting",
visible = FALSE, name=title,
width = NULL, height= NULL, parent=w,
handler = NULL, action = NULL)
chksubset_txt <- checkSubset(data=val_data,
ref=val_ref,
console=FALSE,
ignore.case=val_ignore,
word=val_word)
gtext (text = chksubset_txt, width = NULL, height = 300, font.attr = NULL,
wrap = FALSE, container = chksubset_w)
visible(chksubset_w) <- TRUE
} else {
gmessage(msg="Data frame is NULL!\n\n
Make sure to select a dataset and a reference set",
title="Error",
icon = "error")
}
} )
# FRAME 1 ###################################################################
if(debug){
print("FRAME 1")
}
f1 <- gframe(text = "Options",
horizontal=FALSE,
spacing = 10,
container = gv)
f1_ignore_chk <- gcheckbox(text="Ignore case",
checked=TRUE,
container=f1)
f1_word_chk <- gcheckbox(text="Add word boundaries",
checked = FALSE,
container = f1)
f1_ol_chk <- gcheckbox(text="Remove off-ladder peaks",
checked = FALSE,
container = f1)
# LAYOUT --------------------------------------------------------------------
f1g1 <- glayout(container = f1, spacing = 1)
f1g1[1,1] <- glabel(text="Pullup analysis range (data points) around known alleles: ", container=f1g1)
f1g1[1,2] <- f1_pullup_spb <- gspinbutton(from=0, to=1000, by=10, value=6, container=f1g1)
f1g1[2,1] <- glabel(text="Blocking range (data points) around known alleles: ", container=f1g1)
f1g1[2,2] <- f1_block_spb <- gspinbutton(from=0, to=1000, by=10, value=70, container=f1g1)
f1g1[3,1] <- glabel(text="Discard pull-ups with ratio: > ", container=f1g1)
f1g1[3,2] <- f1_limit_spb <- gspinbutton(from=0, to=10, by=0.1, value=1, container=f1g1)
f1_discard_chk <- gcheckbox(text="Discard alleles with no pullup from the result table",
checked = FALSE,
container = f1)
# FRAME 4 ###################################################################
if(debug){
print("FRAME 4")
}
f4 <- gframe(text = "Save as",
horizontal=TRUE,
spacing = 5,
container = gv)
glabel(text="Name for result:", container=f4)
f4_save_edt <- gedit(text="", container=f4, expand = TRUE)
glabel(text=" Kit attribute:", container=f4)
f4_kit_drp <- gcombobox(items=getKit(), selected = 1,
editable = FALSE, container = f4, ellipsize = "none")
# BUTTON ####################################################################
if(debug){
print("BUTTON")
}
calculate_btn <- gbutton(text="Calculate", container=gv)
addHandlerClicked(calculate_btn, handler = function(h, ...) {
# Get values.
val_data <- .gData
val_ref <- .gRef
val_name_data <- .gDataName
val_name_ref <- .gRefName
val_ignore <- svalue(f1_ignore_chk)
val_word <- svalue(f1_word_chk)
val_ol <- svalue(f1_ol_chk)
val_pullup <- svalue(f1_pullup_spb)
val_block <- svalue(f1_block_spb)
val_limit <- svalue(f1_limit_spb)
val_discard <- svalue(f1_discard_chk)
val_name <- svalue(f4_save_edt)
val_kit <- svalue(f4_kit_drp)
if(debug){
print("Read Values:")
print("val_data")
print(head(val_data))
print("val_ref")
print(head(val_ref))
print("val_ignore")
print(val_ignore)
print("val_word")
print(val_word)
print("val_ol")
print(val_ol)
print("val_pullup")
print(val_pullup)
print("val_block")
print(val_block)
print("val_limit")
print(val_limit)
print("val_name")
print(val_name)
}
# Check if data.
if(!is.null(.gData) & !is.null(.gRef)){
# Check for NA's in dye column.
if(!any(is.na(.gData$Dye))){
# Change button.
blockHandlers(calculate_btn)
svalue(calculate_btn) <- "Processing..."
unblockHandlers(calculate_btn)
enabled(calculate_btn) <- FALSE
datanew <- calculatePullup(data=val_data,
ref=val_ref,
pullup.range=val_pullup,
block.range=val_block,
ol.rm=val_ol,
ignore.case=val_ignore,
word=val_word,
discard=val_discard,
limit=val_limit,
debug=debug)
# Add attributes to result.
attr(datanew, which="kit") <- val_kit
# Create key-value pairs to log.
keys <- list("data", "ref", "pullup.range", "block.range", "ol.rm",
"ignore.case", "word", "discard", "limit")
values <- list(val_name_data, val_name_ref, val_pullup, val_block, val_ol,
val_ignore, val_word, val_discard, val_limit)
# Update audit trail.
datanew <- auditTrail(obj = datanew, key = keys, value = values,
label = "calculatePullup_gui", arguments = FALSE,
package = "strvalidator")
# Save data.
saveObject(name=val_name, object=datanew, parent=w, env=env)
if(debug){
print(str(datanew))
print(head(datanew))
print(paste("EXIT:", match.call()[[1]]))
}
# Close GUI.
dispose(w)
} else {
message <- "'NA' in 'Dye' column. \nUse add dye function to fix."
gmessage(message, title="NA detected!",
icon = "error",
parent = w)
}
} else {
message <- "A dataset and a reference dataset have to be selected."
gmessage(message, title="Datasets not selected",
icon = "error",
parent = w)
}
} )
# INTERNAL FUNCTIONS ########################################################
.loadSavedSettings <- function(){
# First check status of save flag.
if(!is.null(savegui)){
svalue(savegui_chk) <- savegui
enabled(savegui_chk) <- FALSE
if(debug){
print("Save GUI status set!")
}
} else {
# Load save flag.
if(exists(".strvalidator_calculatePullup_gui_savegui", envir=env, inherits = FALSE)){
svalue(savegui_chk) <- get(".strvalidator_calculatePullup_gui_savegui", envir=env)
}
if(debug){
print("Save GUI status loaded!")
}
}
if(debug){
print(svalue(savegui_chk))
}
# Then load settings if true.
if(svalue(savegui_chk)){
if(exists(".strvalidator_calculatePullup_gui_window", envir=env, inherits = FALSE)){
svalue(f1_pullup_spb) <- get(".strvalidator_calculatePullup_gui_window", envir=env)
}
if(exists(".strvalidator_calculatePullup_gui_block", envir=env, inherits = FALSE)){
svalue(f1_block_spb) <- get(".strvalidator_calculatePullup_gui_block", envir=env)
}
if(exists(".strvalidator_calculatePullup_gui_limit", envir=env, inherits = FALSE)){
svalue(f1_limit_spb) <- get(".strvalidator_calculatePullup_gui_limit", envir=env)
}
if(exists(".strvalidator_calculatePullup_gui_ol", envir=env, inherits = FALSE)){
svalue(f1_ol_chk) <- get(".strvalidator_calculatePullup_gui_ol", envir=env)
}
if(exists(".strvalidator_calculatePullup_gui_ignore", envir=env, inherits = FALSE)){
svalue(f1_ignore_chk) <- get(".strvalidator_calculatePullup_gui_ignore", envir=env)
}
if(exists(".strvalidator_calculatePullup_gui_word", envir=env, inherits = FALSE)){
svalue(f1_word_chk) <- get(".strvalidator_calculatePullup_gui_word", envir=env)
}
if(exists(".strvalidator_calculatePullup_gui_discard", envir=env, inherits = FALSE)){
svalue(f1_discard_chk) <- get(".strvalidator_calculatePullup_gui_discard", envir=env)
}
if(debug){
print("Saved settings loaded!")
}
}
}
.saveSettings <- function(){
# Then save settings if true.
if(svalue(savegui_chk)){
assign(x=".strvalidator_calculatePullup_gui_savegui", value=svalue(savegui_chk), envir=env)
assign(x=".strvalidator_calculatePullup_gui_window", value=svalue(f1_pullup_spb), envir=env)
assign(x=".strvalidator_calculatePullup_gui_block", value=svalue(f1_block_spb), envir=env)
assign(x=".strvalidator_calculatePullup_gui_limit", value=svalue(f1_limit_spb), envir=env)
assign(x=".strvalidator_calculatePullup_gui_ol", value=svalue(f1_ol_chk), envir=env)
assign(x=".strvalidator_calculatePullup_gui_ignore", value=svalue(f1_ignore_chk), envir=env)
assign(x=".strvalidator_calculatePullup_gui_word", value=svalue(f1_word_chk), envir=env)
assign(x=".strvalidator_calculatePullup_gui_discard", value=svalue(f1_discard_chk), envir=env)
} else { # or remove all saved values if false.
if(exists(".strvalidator_calculatePullup_gui_savegui", envir=env, inherits = FALSE)){
remove(".strvalidator_calculatePullup_gui_savegui", envir = env)
}
if(exists(".strvalidator_calculatePullup_gui_window", envir=env, inherits = FALSE)){
remove(".strvalidator_calculatePullup_gui_window", envir = env)
}
if(exists(".strvalidator_calculatePullup_gui_block", envir=env, inherits = FALSE)){
remove(".strvalidator_calculatePullup_gui_block", envir = env)
}
if(exists(".strvalidator_calculatePullup_gui_limit", envir=env, inherits = FALSE)){
remove(".strvalidator_calculatePullup_gui_limit", envir = env)
}
if(exists(".strvalidator_calculatePullup_gui_ol", envir=env, inherits = FALSE)){
remove(".strvalidator_calculatePullup_gui_ol", envir = env)
}
if(exists(".strvalidator_calculatePullup_gui_ignore", envir=env, inherits = FALSE)){
remove(".strvalidator_calculatePullup_gui_ignore", envir = env)
}
if(exists(".strvalidator_calculatePullup_gui_word", envir=env, inherits = FALSE)){
remove(".strvalidator_calculatePullup_gui_word", envir = env)
}
if(exists(".strvalidator_calculatePullup_gui_discard", envir=env, inherits = FALSE)){
remove(".strvalidator_calculatePullup_gui_discard", envir = env)
}
if(debug){
print("Settings cleared!")
}
}
if(debug){
print("Settings saved!")
}
}
# END GUI ###################################################################
# Load GUI settings.
.loadSavedSettings()
# Show GUI.
visible(w) <- TRUE
focus(w)
}
| /R/calculatePullup_gui.r | no_license | mokshasoft/strvalidator | R | false | false | 18,165 | r | ################################################################################
# TODO LIST
# TODO: ...
################################################################################
# CHANGE LOG (last 20 changes)
# 06.08.2017: Added audit trail.
# 13.07.2017: Fixed issue with button handlers.
# 13.07.2017: Fixed narrow dropdown with hidden argument ellipsize = "none".
# 07.07.2017: Replaced 'droplist' with 'gcombobox'.
# 07.07.2017: Removed argument 'border' for 'gbutton'.
# 10.05.2016: Added new option 'limit' to remove high ratios from the result.
# 10.05.2016: Added attributes to result.
# 10.05.2016: 'Save as' textbox expandable.
# 28.08.2015: Added importFrom.
# 05.05.2015: Changed parameter 'ignoreCase' to 'ignore.case' for 'checkSubset' function.
# 13.12.2014: Added kit dropdown and kit attribute to result.
# 04.12.2014: First version.
#' @title Calculate Spectral Pull-up
#'
#' @description
#' GUI wrapper for the \code{\link{calculatePullup}} function.
#'
#' @details
#' Simplifies the use of the \code{\link{calculatePullup}} function by
#' providing a graphical user interface.
#'
#' @param env environment in which to search for data frames and save result.
#' @param savegui logical indicating if GUI settings should be saved in the environment.
#' @param debug logical indicating printing debug information.
#' @param parent widget to get focus when finished.
#'
#' @return TRUE
#'
#' @export
#'
#' @importFrom utils help head str
#' @importFrom graphics title
#'
#' @seealso \code{\link{calculatePullup}}, \code{\link{checkSubset}}
#'
calculatePullup_gui <- function(env=parent.frame(), savegui=NULL,
debug=FALSE, parent=NULL){
# Global variables.
.gData <- NULL
.gDataName <- NULL
.gRef <- NULL
.gRefName <- NULL
if(debug){
print(paste("IN:", match.call()[[1]]))
}
# WINDOW ####################################################################
if(debug){
print("WINDOW")
}
# Main window.
w <- gwindow(title="Calculate spectral pull-up", visible=FALSE)
# Runs when window is closed.
addHandlerDestroy(w, handler = function (h, ...) {
# Save GUI state.
.saveSettings()
# Focus on parent window.
if(!is.null(parent)){
focus(parent)
}
})
gv <- ggroup(horizontal=FALSE,
spacing=5,
use.scrollwindow=FALSE,
container = w,
expand=TRUE)
# Help button group.
gh <- ggroup(container = gv, expand=FALSE, fill="both")
savegui_chk <- gcheckbox(text="Save GUI settings", checked=FALSE, container=gh)
addSpring(gh)
help_btn <- gbutton(text="Help", container=gh)
addHandlerChanged(help_btn, handler = function(h, ...) {
# Open help page for function.
print(help("calculatePullup_gui", help_type="html"))
})
# FRAME 0 ###################################################################
if(debug){
print("FRAME 0")
}
f0 <- gframe(text = "Datasets",
horizontal=TRUE,
spacing = 5,
container = gv)
g0 <- glayout(container = f0, spacing = 1)
# Dataset -------------------------------------------------------------------
g0[1,1] <- glabel(text="Select dataset:", container=g0)
dfs <- c("<Select a dataset>", listObjects(env=env, obj.class="data.frame"))
g0[1,2] <- g0_data_drp <- gcombobox(items=dfs,
selected = 1,
editable = FALSE,
container = g0,
ellipsize = "none")
g0[1,3] <- g0_data_samples_lbl <- glabel(text=" 0 samples", container=g0)
addHandlerChanged(g0_data_drp, handler = function (h, ...) {
val_obj <- svalue(g0_data_drp)
# Check if suitable.
requiredCol <- c("Sample.Name", "Allele", "Marker", "Dye", "Height", "Size", "Data.Point")
ok <- checkDataset(name=val_obj, reqcol=requiredCol,
slim=TRUE, slimcol="Height",
env=env, parent=w, debug=debug)
if(ok){
# Load or change components.
# get dataset.
.gData <<- get(val_obj, envir=env)
.gDataName <<- val_obj
svalue(g0_data_samples_lbl) <- paste(length(unique(.gData$Sample.Name)),
"samples.")
# Suggest a name for result.
svalue(f4_save_edt) <- paste(val_obj, "_pullup", sep="")
# Detect kit.
kitIndex <- detectKit(.gData, index=TRUE)
# Select in dropdown.
svalue(f4_kit_drp, index=TRUE) <- kitIndex
} else {
# Reset components.
.gData <<- NULL
.gDataName <<- NULL
svalue(g0_data_drp, index=TRUE) <- 1
svalue(g0_data_samples_lbl) <- " 0 samples"
svalue(f4_save_edt) <- ""
}
} )
# Reference -----------------------------------------------------------------
g0[2,1] <- glabel(text="Select reference dataset:", container=g0)
# NB! dfs defined in previous section.
g0[2,2] <- g0_ref_drp <- gcombobox(items=dfs,
selected = 1,
editable = FALSE,
container = g0,
ellipsize = "none")
g0[2,3] <- g0_ref_samples_lbl <- glabel(text=" 0 references", container=g0)
addHandlerChanged(g0_ref_drp, handler = function (h, ...) {
val_obj <- svalue(g0_ref_drp)
# Check if suitable.
requiredCol <- c("Sample.Name", "Marker", "Allele")
ok <- checkDataset(name=val_obj, reqcol=requiredCol,
slim=TRUE, slimcol="Allele",
env=env, parent=w, debug=debug)
if(ok){
# Load or change components.
.gRef <<- get(val_obj, envir=env)
.gRefName <<- val_obj
svalue(g0_ref_samples_lbl) <- paste(length(unique(.gRef$Sample.Name)),
"samples.")
} else {
# Reset components.
.gRef <<- NULL
.gRefName <<- NULL
svalue(g0_ref_drp, index=TRUE) <- 1
svalue(g0_ref_samples_lbl) <- " 0 references"
}
} )
# CHECK ---------------------------------------------------------------------
if(debug){
print("CHECK")
}
g0[3,2] <- g0_check_btn <- gbutton(text="Check subsetting", container=g0)
addHandlerChanged(g0_check_btn, handler = function(h, ...) {
# Get values.
val_data <- .gData
val_ref <- .gRef
val_ignore <- svalue(f1_ignore_chk)
val_word <- svalue(f1_word_chk)
if (!is.null(.gData) || !is.null(.gRef)){
chksubset_w <- gwindow(title = "Check subsetting",
visible = FALSE, name=title,
width = NULL, height= NULL, parent=w,
handler = NULL, action = NULL)
chksubset_txt <- checkSubset(data=val_data,
ref=val_ref,
console=FALSE,
ignore.case=val_ignore,
word=val_word)
gtext (text = chksubset_txt, width = NULL, height = 300, font.attr = NULL,
wrap = FALSE, container = chksubset_w)
visible(chksubset_w) <- TRUE
} else {
gmessage(msg="Data frame is NULL!\n\n
Make sure to select a dataset and a reference set",
title="Error",
icon = "error")
}
} )
# FRAME 1 ###################################################################
if(debug){
print("FRAME 1")
}
f1 <- gframe(text = "Options",
horizontal=FALSE,
spacing = 10,
container = gv)
f1_ignore_chk <- gcheckbox(text="Ignore case",
checked=TRUE,
container=f1)
f1_word_chk <- gcheckbox(text="Add word boundaries",
checked = FALSE,
container = f1)
f1_ol_chk <- gcheckbox(text="Remove off-ladder peaks",
checked = FALSE,
container = f1)
# LAYOUT --------------------------------------------------------------------
f1g1 <- glayout(container = f1, spacing = 1)
f1g1[1,1] <- glabel(text="Pullup analysis range (data points) around known alleles: ", container=f1g1)
f1g1[1,2] <- f1_pullup_spb <- gspinbutton(from=0, to=1000, by=10, value=6, container=f1g1)
f1g1[2,1] <- glabel(text="Blocking range (data points) around known alleles: ", container=f1g1)
f1g1[2,2] <- f1_block_spb <- gspinbutton(from=0, to=1000, by=10, value=70, container=f1g1)
f1g1[3,1] <- glabel(text="Discard pull-ups with ratio: > ", container=f1g1)
f1g1[3,2] <- f1_limit_spb <- gspinbutton(from=0, to=10, by=0.1, value=1, container=f1g1)
f1_discard_chk <- gcheckbox(text="Discard alleles with no pullup from the result table",
checked = FALSE,
container = f1)
# FRAME 4 ###################################################################
if(debug){
print("FRAME 4")
}
f4 <- gframe(text = "Save as",
horizontal=TRUE,
spacing = 5,
container = gv)
glabel(text="Name for result:", container=f4)
f4_save_edt <- gedit(text="", container=f4, expand = TRUE)
glabel(text=" Kit attribute:", container=f4)
f4_kit_drp <- gcombobox(items=getKit(), selected = 1,
editable = FALSE, container = f4, ellipsize = "none")
# BUTTON ####################################################################
if(debug){
print("BUTTON")
}
calculate_btn <- gbutton(text="Calculate", container=gv)
addHandlerClicked(calculate_btn, handler = function(h, ...) {
# Get values.
val_data <- .gData
val_ref <- .gRef
val_name_data <- .gDataName
val_name_ref <- .gRefName
val_ignore <- svalue(f1_ignore_chk)
val_word <- svalue(f1_word_chk)
val_ol <- svalue(f1_ol_chk)
val_pullup <- svalue(f1_pullup_spb)
val_block <- svalue(f1_block_spb)
val_limit <- svalue(f1_limit_spb)
val_discard <- svalue(f1_discard_chk)
val_name <- svalue(f4_save_edt)
val_kit <- svalue(f4_kit_drp)
if(debug){
print("Read Values:")
print("val_data")
print(head(val_data))
print("val_ref")
print(head(val_ref))
print("val_ignore")
print(val_ignore)
print("val_word")
print(val_word)
print("val_ol")
print(val_ol)
print("val_pullup")
print(val_pullup)
print("val_block")
print(val_block)
print("val_limit")
print(val_limit)
print("val_name")
print(val_name)
}
# Check if data.
if(!is.null(.gData) & !is.null(.gRef)){
# Check for NA's in dye column.
if(!any(is.na(.gData$Dye))){
# Change button.
blockHandlers(calculate_btn)
svalue(calculate_btn) <- "Processing..."
unblockHandlers(calculate_btn)
enabled(calculate_btn) <- FALSE
datanew <- calculatePullup(data=val_data,
ref=val_ref,
pullup.range=val_pullup,
block.range=val_block,
ol.rm=val_ol,
ignore.case=val_ignore,
word=val_word,
discard=val_discard,
limit=val_limit,
debug=debug)
# Add attributes to result.
attr(datanew, which="kit") <- val_kit
# Create key-value pairs to log.
keys <- list("data", "ref", "pullup.range", "block.range", "ol.rm",
"ignore.case", "word", "discard", "limit")
values <- list(val_name_data, val_name_ref, val_pullup, val_block, val_ol,
val_ignore, val_word, val_discard, val_limit)
# Update audit trail.
datanew <- auditTrail(obj = datanew, key = keys, value = values,
label = "calculatePullup_gui", arguments = FALSE,
package = "strvalidator")
# Save data.
saveObject(name=val_name, object=datanew, parent=w, env=env)
if(debug){
print(str(datanew))
print(head(datanew))
print(paste("EXIT:", match.call()[[1]]))
}
# Close GUI.
dispose(w)
} else {
message <- "'NA' in 'Dye' column. \nUse add dye function to fix."
gmessage(message, title="NA detected!",
icon = "error",
parent = w)
}
} else {
message <- "A dataset and a reference dataset have to be selected."
gmessage(message, title="Datasets not selected",
icon = "error",
parent = w)
}
} )
# INTERNAL FUNCTIONS ########################################################
.loadSavedSettings <- function(){
# First check status of save flag.
if(!is.null(savegui)){
svalue(savegui_chk) <- savegui
enabled(savegui_chk) <- FALSE
if(debug){
print("Save GUI status set!")
}
} else {
# Load save flag.
if(exists(".strvalidator_calculatePullup_gui_savegui", envir=env, inherits = FALSE)){
svalue(savegui_chk) <- get(".strvalidator_calculatePullup_gui_savegui", envir=env)
}
if(debug){
print("Save GUI status loaded!")
}
}
if(debug){
print(svalue(savegui_chk))
}
# Then load settings if true.
if(svalue(savegui_chk)){
if(exists(".strvalidator_calculatePullup_gui_window", envir=env, inherits = FALSE)){
svalue(f1_pullup_spb) <- get(".strvalidator_calculatePullup_gui_window", envir=env)
}
if(exists(".strvalidator_calculatePullup_gui_block", envir=env, inherits = FALSE)){
svalue(f1_block_spb) <- get(".strvalidator_calculatePullup_gui_block", envir=env)
}
if(exists(".strvalidator_calculatePullup_gui_limit", envir=env, inherits = FALSE)){
svalue(f1_limit_spb) <- get(".strvalidator_calculatePullup_gui_limit", envir=env)
}
if(exists(".strvalidator_calculatePullup_gui_ol", envir=env, inherits = FALSE)){
svalue(f1_ol_chk) <- get(".strvalidator_calculatePullup_gui_ol", envir=env)
}
if(exists(".strvalidator_calculatePullup_gui_ignore", envir=env, inherits = FALSE)){
svalue(f1_ignore_chk) <- get(".strvalidator_calculatePullup_gui_ignore", envir=env)
}
if(exists(".strvalidator_calculatePullup_gui_word", envir=env, inherits = FALSE)){
svalue(f1_word_chk) <- get(".strvalidator_calculatePullup_gui_word", envir=env)
}
if(exists(".strvalidator_calculatePullup_gui_discard", envir=env, inherits = FALSE)){
svalue(f1_discard_chk) <- get(".strvalidator_calculatePullup_gui_discard", envir=env)
}
if(debug){
print("Saved settings loaded!")
}
}
}
.saveSettings <- function(){
# Then save settings if true.
if(svalue(savegui_chk)){
assign(x=".strvalidator_calculatePullup_gui_savegui", value=svalue(savegui_chk), envir=env)
assign(x=".strvalidator_calculatePullup_gui_window", value=svalue(f1_pullup_spb), envir=env)
assign(x=".strvalidator_calculatePullup_gui_block", value=svalue(f1_block_spb), envir=env)
assign(x=".strvalidator_calculatePullup_gui_limit", value=svalue(f1_limit_spb), envir=env)
assign(x=".strvalidator_calculatePullup_gui_ol", value=svalue(f1_ol_chk), envir=env)
assign(x=".strvalidator_calculatePullup_gui_ignore", value=svalue(f1_ignore_chk), envir=env)
assign(x=".strvalidator_calculatePullup_gui_word", value=svalue(f1_word_chk), envir=env)
assign(x=".strvalidator_calculatePullup_gui_discard", value=svalue(f1_discard_chk), envir=env)
} else { # or remove all saved values if false.
if(exists(".strvalidator_calculatePullup_gui_savegui", envir=env, inherits = FALSE)){
remove(".strvalidator_calculatePullup_gui_savegui", envir = env)
}
if(exists(".strvalidator_calculatePullup_gui_window", envir=env, inherits = FALSE)){
remove(".strvalidator_calculatePullup_gui_window", envir = env)
}
if(exists(".strvalidator_calculatePullup_gui_block", envir=env, inherits = FALSE)){
remove(".strvalidator_calculatePullup_gui_block", envir = env)
}
if(exists(".strvalidator_calculatePullup_gui_limit", envir=env, inherits = FALSE)){
remove(".strvalidator_calculatePullup_gui_limit", envir = env)
}
if(exists(".strvalidator_calculatePullup_gui_ol", envir=env, inherits = FALSE)){
remove(".strvalidator_calculatePullup_gui_ol", envir = env)
}
if(exists(".strvalidator_calculatePullup_gui_ignore", envir=env, inherits = FALSE)){
remove(".strvalidator_calculatePullup_gui_ignore", envir = env)
}
if(exists(".strvalidator_calculatePullup_gui_word", envir=env, inherits = FALSE)){
remove(".strvalidator_calculatePullup_gui_word", envir = env)
}
if(exists(".strvalidator_calculatePullup_gui_discard", envir=env, inherits = FALSE)){
remove(".strvalidator_calculatePullup_gui_discard", envir = env)
}
if(debug){
print("Settings cleared!")
}
}
if(debug){
print("Settings saved!")
}
}
# END GUI ###################################################################
# Load GUI settings.
.loadSavedSettings()
# Show GUI.
visible(w) <- TRUE
focus(w)
}
|
#Import necessasry libraries
library(data.table)
library(tibble)
#Create function to paste in tissue name
"%&%" = function(a,b) paste(a,b,sep="")
#Create list of model for file input
model_list <- c("ALL", "AFA", "CAU", "CHN", "HIS")
drug_list <- c("arac", "cape", "carbo", "cis", "dauno", "etop", "pacl", "peme")
#Make a data frame with all results from each model per drug per pop
#Read in file
#Add column containing model name
#Compile significant subsets into single data frame
for(drug in drug_list){
for(model in model_list){
output <- fread("/home/ashley/LCL_chemotherapy/YRI/YRI_pwas_results/adj_assoc_output/YRI_" %&% drug %&% "_PCAIR_PAV_filtered_" %&% model %&% "_baseline_rho0.1_zpval0.05.adj.txt")
output <- add_column(output, model = model, .before = "chr")
output <- add_column(output, drug = drug, .before = "chr")
if(exists("all_assoc")){
all_assoc<-merge(x = all_assoc, y = output, all = TRUE)
}
else{
all_assoc<-output
}
}
}
all_sig<-subset(all_assoc, pvalues_adjusted_BH < .75)
most_sig<-subset(all_assoc, pvalues_adjusted_BH < .1)
BF_sig<-subset(all_assoc, pvalues_adjusted_bonferroni < .05)
#Output data frames into directory
fwrite(all_assoc, "/home/ashley/LCL_chemotherapy/YRI/YRI_pwas_results/adj_assoc_output/YRI_allassoc_PCAIR_PAV_filtered_baseline_rho0.1_zpval0.05.adj.txt", na = "NA", quote = F, sep = "\t", col.names = T)
fwrite(all_sig, "/home/ashley/LCL_chemotherapy/YRI/YRI_pwas_results/adj_assoc_output/YRI_sig_PCAIR_PAV_filtered_baseline_rho0.1_zpval0.05.adj.txt", na = "NA", quote = F, sep = "\t", col.names = T)
fwrite(most_sig, "/home/ashley/LCL_chemotherapy/YRI/YRI_pwas_results/adj_assoc_output/YRI_most_sig_PCAIR_PAV_filtered_baseline_rho0.1_zpval0.05.adj.txt", na = "NA", quote = F, sep = "\t", col.names = T)
fwrite(BF_sig, "/home/ashley/LCL_chemotherapy/YRI/YRI_pwas_results/adj_assoc_output/YRI_BFsig_PCAIR_PAV_filtered_baseline_rho0.1_zpval0.05.adj.txt", na = "NA", quote = F, sep = "\t", col.names = T)
| /scripts/05_significant_hits.R | no_license | rnaimehaom/chemo_toxicity_pwas | R | false | false | 2,020 | r | #Import necessasry libraries
library(data.table)
library(tibble)
#Create function to paste in tissue name
"%&%" = function(a,b) paste(a,b,sep="")
#Create list of model for file input
model_list <- c("ALL", "AFA", "CAU", "CHN", "HIS")
drug_list <- c("arac", "cape", "carbo", "cis", "dauno", "etop", "pacl", "peme")
#Make a data frame with all results from each model per drug per pop
#Read in file
#Add column containing model name
#Compile significant subsets into single data frame
for(drug in drug_list){
for(model in model_list){
output <- fread("/home/ashley/LCL_chemotherapy/YRI/YRI_pwas_results/adj_assoc_output/YRI_" %&% drug %&% "_PCAIR_PAV_filtered_" %&% model %&% "_baseline_rho0.1_zpval0.05.adj.txt")
output <- add_column(output, model = model, .before = "chr")
output <- add_column(output, drug = drug, .before = "chr")
if(exists("all_assoc")){
all_assoc<-merge(x = all_assoc, y = output, all = TRUE)
}
else{
all_assoc<-output
}
}
}
all_sig<-subset(all_assoc, pvalues_adjusted_BH < .75)
most_sig<-subset(all_assoc, pvalues_adjusted_BH < .1)
BF_sig<-subset(all_assoc, pvalues_adjusted_bonferroni < .05)
#Output data frames into directory
fwrite(all_assoc, "/home/ashley/LCL_chemotherapy/YRI/YRI_pwas_results/adj_assoc_output/YRI_allassoc_PCAIR_PAV_filtered_baseline_rho0.1_zpval0.05.adj.txt", na = "NA", quote = F, sep = "\t", col.names = T)
fwrite(all_sig, "/home/ashley/LCL_chemotherapy/YRI/YRI_pwas_results/adj_assoc_output/YRI_sig_PCAIR_PAV_filtered_baseline_rho0.1_zpval0.05.adj.txt", na = "NA", quote = F, sep = "\t", col.names = T)
fwrite(most_sig, "/home/ashley/LCL_chemotherapy/YRI/YRI_pwas_results/adj_assoc_output/YRI_most_sig_PCAIR_PAV_filtered_baseline_rho0.1_zpval0.05.adj.txt", na = "NA", quote = F, sep = "\t", col.names = T)
fwrite(BF_sig, "/home/ashley/LCL_chemotherapy/YRI/YRI_pwas_results/adj_assoc_output/YRI_BFsig_PCAIR_PAV_filtered_baseline_rho0.1_zpval0.05.adj.txt", na = "NA", quote = F, sep = "\t", col.names = T)
|
#' @title Enrichment analysis for genes of network
#'
#' @description Enrichment analysis of a set of genes derived from the network
#' of any condition using WebGestalt interface in R. Given a vector of genes,
#' this function will return the enrichment related to the selected database.
#'
#' @param organism WebGestaltR supports 12 organisms. Users can use the function
#' listOrganism() to check available organisms.
#' @param database The functional categories for the enrichment analysis. Users
#' can use the function listGeneSet() to check the available functional databases
#' for the selected organism. Multiple databases in a vector are supported too.
#' @param genes Should be an R vector object containing the interesting gene list.
#' @param refGene Should be an R vector object containing the reference gene list.
#' There is a list with reference genes for 5 organisms in this package (see
#' \code{\link{refGenes}}).
#' @param GeneType The ID type of the genes and refGene (they must be the same type).
#' Users can use the function listIdType() to check the available gene types.
#' @param fdrMethod Has five FDR methods: holm, hochberg, hommel, bonferroni, BH
#' and BY (default: BH).
#' @param fdrThr The significant threshold for fdrMethod (default: 0.05).
#' @param minNum Will be exclude the categories with the number of annotated
#' genes less than minNum for enrichment analysis (default: 5).
#' @param maxNum Will be exclude the categories with the number of annotated
#' genes larger than maxNum for enrichment analysis (default: 500).
#'
#' @return
#' Returns an list with the results of the enrichment analysis of the genes and
#' a network with the database ID (column 1) and the corresponding
#' genes (column 2).
#'
#' @importFrom WebGestaltR WebGestaltR listOrganism listGeneSet listIdType
#'
#' @examples
#' \dontrun{
#' # load the CeTF class object resulted from runAnalysis function
#' data(CeTFdemo)
#'
#' # getting the genes in network of condition 1
#' genes <- unique(c(as.character(NetworkData(CeTFdemo, 'network1')[, 'gene1']),
#' as.character(NetworkData(CeTFdemo, 'network1')[, 'gene2'])))
#'
#' # performing getEnrich analysis
#' cond1 <- getEnrich(organism='hsapiens', database='geneontology_Biological_Process',
#' genes=genes, GeneType='ensembl_gene_id',
#' refGene=refGenes$Homo_sapiens$ENSEMBL,
#' fdrMethod = 'BH', fdrThr = 0.05, minNum = 5, maxNum = 500)
#' }
#'
#' @export
getEnrich <- function(organism, database, genes, refGene, GeneType, fdrMethod = "BH",
fdrThr = 0.05, minNum = 5, maxNum = 500) {
if (!organism %in% listOrganism()) {
stop("Select a valid organism")
}
if (!database %in% listGeneSet()[, 1]) {
stop("Select a valid database to perform the enrichment")
}
if (missing(genes)) {
stop("No genes provided")
}
if (missing(refGene)) {
stop("No refGene provided")
}
if (!GeneType %in% listIdType()) {
stop("Select a valid GeneType for genes")
}
if (!fdrMethod %in% c("holm", "hochberg", "hommel", "bonferroni", "BH",
"BY")) {
stop("Select a valid fdrMethod")
}
res <- WebGestaltR(enrichMethod = "ORA", isOutput = FALSE, organism = organism,
enrichDatabase = database, interestGene = genes, interestGeneType = GeneType,
referenceGene = refGene, referenceGeneType = GeneType, fdrMethod = fdrMethod,
fdrThr = fdrThr, minNum = minNum, maxNum = maxNum)
if (is.null(res)) {
stop("None pathway enriched: try to use a different set of genes")
}
colnames(res)[1] <- "ID"
colnames(res)[11] <- "geneID"
tmp <- apply(res, 1, function(x) {
temp <- NULL
pathways1 <- NULL
temp <- strsplit(x[["geneID"]], ";")
pathways1 <- as.character(x[["ID"]])
pathways1 <- rep(pathways1, length(temp[[1]]))
return(data.frame(pathways = pathways1, gc = temp[[1]]))
})
tmp <- do.call(rbind, tmp)
tmp <- data.frame(gene1 = tmp$pathways, gene2 = tmp$gc)
return(list(results = res, netGO = tmp))
}
| /R/getEnrich.R | no_license | cbiagii/CeTF | R | false | false | 4,176 | r | #' @title Enrichment analysis for genes of network
#'
#' @description Enrichment analysis of a set of genes derived from the network
#' of any condition using WebGestalt interface in R. Given a vector of genes,
#' this function will return the enrichment related to the selected database.
#'
#' @param organism WebGestaltR supports 12 organisms. Users can use the function
#' listOrganism() to check available organisms.
#' @param database The functional categories for the enrichment analysis. Users
#' can use the function listGeneSet() to check the available functional databases
#' for the selected organism. Multiple databases in a vector are supported too.
#' @param genes Should be an R vector object containing the interesting gene list.
#' @param refGene Should be an R vector object containing the reference gene list.
#' There is a list with reference genes for 5 organisms in this package (see
#' \code{\link{refGenes}}).
#' @param GeneType The ID type of the genes and refGene (they must be the same type).
#' Users can use the function listIdType() to check the available gene types.
#' @param fdrMethod Has five FDR methods: holm, hochberg, hommel, bonferroni, BH
#' and BY (default: BH).
#' @param fdrThr The significant threshold for fdrMethod (default: 0.05).
#' @param minNum Will be exclude the categories with the number of annotated
#' genes less than minNum for enrichment analysis (default: 5).
#' @param maxNum Will be exclude the categories with the number of annotated
#' genes larger than maxNum for enrichment analysis (default: 500).
#'
#' @return
#' Returns an list with the results of the enrichment analysis of the genes and
#' a network with the database ID (column 1) and the corresponding
#' genes (column 2).
#'
#' @importFrom WebGestaltR WebGestaltR listOrganism listGeneSet listIdType
#'
#' @examples
#' \dontrun{
#' # load the CeTF class object resulted from runAnalysis function
#' data(CeTFdemo)
#'
#' # getting the genes in network of condition 1
#' genes <- unique(c(as.character(NetworkData(CeTFdemo, 'network1')[, 'gene1']),
#' as.character(NetworkData(CeTFdemo, 'network1')[, 'gene2'])))
#'
#' # performing getEnrich analysis
#' cond1 <- getEnrich(organism='hsapiens', database='geneontology_Biological_Process',
#' genes=genes, GeneType='ensembl_gene_id',
#' refGene=refGenes$Homo_sapiens$ENSEMBL,
#' fdrMethod = 'BH', fdrThr = 0.05, minNum = 5, maxNum = 500)
#' }
#'
#' @export
getEnrich <- function(organism, database, genes, refGene, GeneType, fdrMethod = "BH",
fdrThr = 0.05, minNum = 5, maxNum = 500) {
if (!organism %in% listOrganism()) {
stop("Select a valid organism")
}
if (!database %in% listGeneSet()[, 1]) {
stop("Select a valid database to perform the enrichment")
}
if (missing(genes)) {
stop("No genes provided")
}
if (missing(refGene)) {
stop("No refGene provided")
}
if (!GeneType %in% listIdType()) {
stop("Select a valid GeneType for genes")
}
if (!fdrMethod %in% c("holm", "hochberg", "hommel", "bonferroni", "BH",
"BY")) {
stop("Select a valid fdrMethod")
}
res <- WebGestaltR(enrichMethod = "ORA", isOutput = FALSE, organism = organism,
enrichDatabase = database, interestGene = genes, interestGeneType = GeneType,
referenceGene = refGene, referenceGeneType = GeneType, fdrMethod = fdrMethod,
fdrThr = fdrThr, minNum = minNum, maxNum = maxNum)
if (is.null(res)) {
stop("None pathway enriched: try to use a different set of genes")
}
colnames(res)[1] <- "ID"
colnames(res)[11] <- "geneID"
tmp <- apply(res, 1, function(x) {
temp <- NULL
pathways1 <- NULL
temp <- strsplit(x[["geneID"]], ";")
pathways1 <- as.character(x[["ID"]])
pathways1 <- rep(pathways1, length(temp[[1]]))
return(data.frame(pathways = pathways1, gc = temp[[1]]))
})
tmp <- do.call(rbind, tmp)
tmp <- data.frame(gene1 = tmp$pathways, gene2 = tmp$gc)
return(list(results = res, netGO = tmp))
}
|
#' @title FUNCTION_TITLE
#' @description FUNCTION_DESCRIPTION
#'
'brcaData'
| /R/data.R | no_license | leevenstar/GSE3744o | R | false | false | 78 | r | #' @title FUNCTION_TITLE
#' @description FUNCTION_DESCRIPTION
#'
'brcaData'
|
# Create a sequence to 100 and scale values to (0, 25)
t <- c(0:100)
t <- t * 25/100
# Define the time series
Yt1 <- sin(t)
# Plot our time series
plot(
t,
Yt1,
ylim = c(-1.1, 1.25),
type = "l",
col = "red",
lwd = 1,
lty = 1,
xlab = "Time",
ylab = NA
)
legend(
"top",
inset=0.01,
col=c("red","blue"),
lty=c(1,2),
lwd=c(1,1),
legend = c(
expression(sin(t)),
expression(sin(t+pi/2))),
bg="white",
box.col="white",
horiz=TRUE
) | /sandbox/example_mlalicea.R | no_license | ds-wm/atsa-2021 | R | false | false | 485 | r | # Create a sequence to 100 and scale values to (0, 25)
t <- c(0:100)
t <- t * 25/100
# Define the time series
Yt1 <- sin(t)
# Plot our time series
plot(
t,
Yt1,
ylim = c(-1.1, 1.25),
type = "l",
col = "red",
lwd = 1,
lty = 1,
xlab = "Time",
ylab = NA
)
legend(
"top",
inset=0.01,
col=c("red","blue"),
lty=c(1,2),
lwd=c(1,1),
legend = c(
expression(sin(t)),
expression(sin(t+pi/2))),
bg="white",
box.col="white",
horiz=TRUE
) |
#' @details
#' `ism_get_site_matrix()` is a deprecated version of `ism()` that only works
#' on `xts` objects. It takes in 1 column of historical data for a single
#' site and applies (ISM) to it. This function allows you to change the start
#' date of the returned data in it, while `ism()` does not. When using `ism(),
#' [reindex()] should be used after it to change the start date.
#' `ism_get_site_matrix()` can be used on monthly or annual data. If applying it
#' to monthly data, then `xtsData` needs to be monthly data, and `monthly`
#' should be set to `TRUE`. If using annual data, then `xtsData` should
#' be annual, i.e., all with a December time stamp, and `monthly` should be
#' set to `FALSE`. If `monthly` is `FALSE` and `xtsData` is
#' monthly data, an error will occur.
#'
#' @return `ism_get_site_matrix()` returns an `xts` matrix with the number of
#' years/months specified by `nYrs` and the number of columns equal to the
#' number of years in `xtsData`
#'
#' @param xtsData An xts vector.
#' @param startMonth The start month and year of the return matrix. Should be
#' able to be cast to a [zoo::yearmon].
#' @param nYrs The number of years to create the data for. Defaults to the
#' number of years in xtsData, but can be less.
#' @param monthly Boolean that should be set to `TRUE` if the data are monthly;
#' should set to `FALSE` if annual data.
#'
#' @export
#' @rdname ism
ism_get_site_matrix <- function(xtsData, startMonth, nYrs = NA, monthly = TRUE)
{
.Deprecated("ism")
if(!xts::is.xts(xtsData)){
stop('xtsData is not of type xts')
}
if(is.na(nYrs)){
nYrs <- xts::nyears(xtsData)
} else{
if(nYrs > xts::nyears(xtsData))
stop('nYrs is longer than xtsData.')
}
# make the data not an xts object so we can rbind it together
zz <- matrix(unclass(xtsData))#, nrow = length(xtsData))
zz <- rbind(zz,zz) # now can easily loop through the data for ISM
ntraces <- 1:xts::nyears(xtsData)
ismMatrix <- simplify2array(
lapply(ntraces, getSubsetOfData, zz, nYrs, monthly)
)
# now convert back to xts object with monthly timestep
if(monthly) {
ismYearMon <- zoo::as.yearmon(startMonth) + seq(0,nrow(ismMatrix)-1)/12
} else{
ismYearMon <- zoo::as.yearmon(startMonth) + seq(0,nrow(ismMatrix)-1)
}
ismMatrix <- xts::as.xts(zoo::read.zoo(data.frame(ismYearMon, ismMatrix)))
ismMatrix
}
| /R/ism_get_matrix.R | no_license | BoulderCodeHub/CRSSIO | R | false | false | 2,413 | r | #' @details
#' `ism_get_site_matrix()` is a deprecated version of `ism()` that only works
#' on `xts` objects. It takes in 1 column of historical data for a single
#' site and applies (ISM) to it. This function allows you to change the start
#' date of the returned data in it, while `ism()` does not. When using `ism(),
#' [reindex()] should be used after it to change the start date.
#' `ism_get_site_matrix()` can be used on monthly or annual data. If applying it
#' to monthly data, then `xtsData` needs to be monthly data, and `monthly`
#' should be set to `TRUE`. If using annual data, then `xtsData` should
#' be annual, i.e., all with a December time stamp, and `monthly` should be
#' set to `FALSE`. If `monthly` is `FALSE` and `xtsData` is
#' monthly data, an error will occur.
#'
#' @return `ism_get_site_matrix()` returns an `xts` matrix with the number of
#' years/months specified by `nYrs` and the number of columns equal to the
#' number of years in `xtsData`
#'
#' @param xtsData An xts vector.
#' @param startMonth The start month and year of the return matrix. Should be
#' able to be cast to a [zoo::yearmon].
#' @param nYrs The number of years to create the data for. Defaults to the
#' number of years in xtsData, but can be less.
#' @param monthly Boolean that should be set to `TRUE` if the data are monthly;
#' should set to `FALSE` if annual data.
#'
#' @export
#' @rdname ism
ism_get_site_matrix <- function(xtsData, startMonth, nYrs = NA, monthly = TRUE)
{
.Deprecated("ism")
if(!xts::is.xts(xtsData)){
stop('xtsData is not of type xts')
}
if(is.na(nYrs)){
nYrs <- xts::nyears(xtsData)
} else{
if(nYrs > xts::nyears(xtsData))
stop('nYrs is longer than xtsData.')
}
# make the data not an xts object so we can rbind it together
zz <- matrix(unclass(xtsData))#, nrow = length(xtsData))
zz <- rbind(zz,zz) # now can easily loop through the data for ISM
ntraces <- 1:xts::nyears(xtsData)
ismMatrix <- simplify2array(
lapply(ntraces, getSubsetOfData, zz, nYrs, monthly)
)
# now convert back to xts object with monthly timestep
if(monthly) {
ismYearMon <- zoo::as.yearmon(startMonth) + seq(0,nrow(ismMatrix)-1)/12
} else{
ismYearMon <- zoo::as.yearmon(startMonth) + seq(0,nrow(ismMatrix)-1)
}
ismMatrix <- xts::as.xts(zoo::read.zoo(data.frame(ismYearMon, ismMatrix)))
ismMatrix
}
|
##
##source("markovgiventheta.R")
#This program was coded by Nam Lethanh from Osaka University (2011)
############INPUT PART############################
jmax=5 #Brige
theta<-c(0.06,0.09,0.19,0.25,0)
z=0.1 # Please select the interval or elapsed time in Markov processs
########################################
thetasa<-matrix(double(1),nrow=jmax,ncol=jmax)
probability<-matrix(double(1),jmax,jmax)
##############defining thetasa value#################
##############################################
markovprob<-function(jmax,z,theta,probb){
probb<-matrix(double(1),jmax,jmax)
##theta<-matrix(double(1),nrow=1,ncol=jmax)
thetasa<-matrix(double(1),nrow=jmax,ncol=jmax)
#################################################
for (i in 1:jmax){
for (j in 1:jmax)
thetasa[i,j]=theta[i]-theta[j]
}
print(thetasa)
###############################################
for (i in 1:jmax){
for (j in 1: jmax){
prob1=0.0
reserve<-1.0
for (k in i:(j-1)){
if (j<=i) {
reserve=1
} else {
reserve=reserve*theta[k]
}
}
print(reserve)
#################################################
if (i>j){
probb[i,j]=0.0
} else {
for (k in i:j){
prod11=1.0
######################
for (e in i:j){
if(e !=k) {
prod11=thetasa[e,k]*prod11
}
}
#####################
prob1=prob1+exp(-theta[k]*z)/prod11
}
#############
prob1<-prob1*reserve
probb[i,j]=prob1
}
}
}
print(probb)
}
#########################
pro<-markovprob(jmax,z,theta,probb)
# After running this code in R, from R console, you just type down pro, results will appear.
require(MASS)
write.matrix(pro, file="mtp.csv",sep=",")
| /BR-Avalanche/markovgiventheta.R | no_license | namkyodai/Models | R | false | false | 1,548 | r | ##
##source("markovgiventheta.R")
#This program was coded by Nam Lethanh from Osaka University (2011)
############INPUT PART############################
jmax=5 #Brige
theta<-c(0.06,0.09,0.19,0.25,0)
z=0.1 # Please select the interval or elapsed time in Markov processs
########################################
thetasa<-matrix(double(1),nrow=jmax,ncol=jmax)
probability<-matrix(double(1),jmax,jmax)
##############defining thetasa value#################
##############################################
markovprob<-function(jmax,z,theta,probb){
probb<-matrix(double(1),jmax,jmax)
##theta<-matrix(double(1),nrow=1,ncol=jmax)
thetasa<-matrix(double(1),nrow=jmax,ncol=jmax)
#################################################
for (i in 1:jmax){
for (j in 1:jmax)
thetasa[i,j]=theta[i]-theta[j]
}
print(thetasa)
###############################################
for (i in 1:jmax){
for (j in 1: jmax){
prob1=0.0
reserve<-1.0
for (k in i:(j-1)){
if (j<=i) {
reserve=1
} else {
reserve=reserve*theta[k]
}
}
print(reserve)
#################################################
if (i>j){
probb[i,j]=0.0
} else {
for (k in i:j){
prod11=1.0
######################
for (e in i:j){
if(e !=k) {
prod11=thetasa[e,k]*prod11
}
}
#####################
prob1=prob1+exp(-theta[k]*z)/prod11
}
#############
prob1<-prob1*reserve
probb[i,j]=prob1
}
}
}
print(probb)
}
#########################
pro<-markovprob(jmax,z,theta,probb)
# After running this code in R, from R console, you just type down pro, results will appear.
require(MASS)
write.matrix(pro, file="mtp.csv",sep=",")
|
pdf("barplot.pdf", width=6, height=4)
mat <- read.table("irs4_rna_exp.txt", sep="\t", header=TRUE)
my_vector=mat$exonRPKM
names(my_vector)=mat$Sample
barplot(my_vector, ylab="RPKM", las=2, col="black", ylim=c(0,300))
dev.off()
| /barplot.R | no_license | rahulk87/myCodes | R | false | false | 227 | r | pdf("barplot.pdf", width=6, height=4)
mat <- read.table("irs4_rna_exp.txt", sep="\t", header=TRUE)
my_vector=mat$exonRPKM
names(my_vector)=mat$Sample
barplot(my_vector, ylab="RPKM", las=2, col="black", ylim=c(0,300))
dev.off()
|
ind <- as.character(read.table("new_ind")[,1])
pheno <- read.table("new_pheno")
load("new_marker.RData")
source("cgwas.R")
dat_fit <- fit(pheno=pheno)
fit_plot(dat=dat_fit,pheno=pheno,ind=ind,filen="Figure_growth_fit1.pdf",index=1:40,len=0)
fit_plot(dat=dat_fit,pheno=pheno,ind=ind,filen="Figure_growth_fit2.pdf",index=1:40,len=40)
fit_plot(dat=dat_fit,pheno=pheno,ind=ind,filen="Figure_growth_fit3.pdf",index=1:40,len=80)
fit_plot1(dat=dat_fit,pheno=pheno,ind=ind,filen="Figure_growth_fit4.pdf",index=1:22,len=120)
par_dat <- dat_gen(dat=dat_fit)
marker <- new_marker[-par_dat$outlier,-1]
pop <- read.table("new_meanQ")[-par_dat$outlier,]
snp1 <- as.matrix(marker)
snp1[which(snp1==-1)] <- NA
write.table(t(snp1),file="snp.txt",row.names = F,col.names = F,quote = F,sep="\t")
snp2 <- snp1/2
K <- emma_kinship(t(snp2))
ret_A <- conGWAS(m=snp1,p=par_dat$parameter_adjust[,1],q=pop,K=K)
ret_R <- conGWAS(m=snp1,p=par_dat$parameter_adjust[,2],q=pop,K=K)
ret_lambda <- conGWAS(m=snp1,p=par_dat$parameter[,3],q=pop,K=K)
ret_tI <- conGWAS(m=snp1,p=par_dat$parameter[,4],q=pop,K=K)
save(ret_A,file="ret_A.RData");save(ret_R,file="ret_R.RData")
save(ret_lambda,file="ret_lambda.RData");save(ret_tI,file="ret_tI.RData")
#ret_Alog <- conGWAS(m=snp1,p=log(par_dat$parameter[,1]),q=pop,K=K)
#ret_Rlog <- conGWAS(m=snp1,p=log(par_dat$parameter[,2]),q=pop,K=K)
#save(ret_Alog,file="ret_Alog.RData");save(ret_Rlog,file="ret_Rlog.RData")
pos <- read.table("../Genome_info/new_marker_info.txt")
my.pvalue.listA <-list("GLM"=ret_A$pvalue,"Q"=ret_A$Qpvalue,"Q+K"=ret_A$QKpvalue)
pdf("conA_QQ.pdf",height=4,width=4)
qqunif.plot(my.pvalue.listA, auto.key=list(corner=c(.95,.05)),conf.alpha=.1)
dev.off()
LReggif_A <- ginf(PV=ret_A$pvalue) #0.9748421
Qgif_A <- ginf(PV=ret_A$Qpvalue) #0.9593254
QKgif_A <- ginf(PV=ret_A$QKpvalue) #0.9671246
manhattan_plot(pv=ret_A$Qpvalue,pos=pos,thre=4.1,filen="con_A_gwas")
my.pvalue.listR <-list("GLM"=ret_R$pvalue,"Q"=ret_R$Qpvalue,"Q+K"=ret_R$QKpvalue)
pdf("conR_QQ.pdf",height=4,width=4)
qqunif.plot(my.pvalue.listR, auto.key=list(corner=c(.95,.05)),conf.alpha=.1)
dev.off()
LReggif_R <- ginf(PV=ret_R$pvalue) #1.066342
Qgif_R <- ginf(PV=ret_R$Qpvalue) #1.024329
QKgif_R <- ginf(PV=ret_R$QKpvalue) #1.010251
manhattan_plot(pv=ret_R$QKpvalue,pos=pos,thre=4.22,filen="con_R_gwas")
my.pvalue.listL <-list("GLM"=ret_lambda$pvalue,"Q"=ret_lambda$Qpvalue,"Q+K"=ret_lambda$QKpvalue)
pdf("conL_QQ.pdf",height=4,width=4)
qqunif.plot(my.pvalue.listL, auto.key=list(corner=c(.95,.05)),conf.alpha=.1)
dev.off()
LReggif_lambda <- ginf(PV=ret_lambda$pvalue) #0.9621696
Qgif_lambda <- ginf(PV=ret_lambda$Qpvalue) #0.9707739
QKgif_lambda <- ginf(PV=ret_lambda$QKpvalue) #0.9888482
manhattan_plot(pv=ret_lambda$pvalue,pos=pos,thre=4.32,filen="con_L_gwas")
my.pvalue.listT <-list("GLM"=ret_tI$pvalue,"Q"=ret_tI$Qpvalue,"Q+K"=ret_tI$QKpvalue)
pdf("conT_QQ.pdf",height=4,width=4)
qqunif.plot(my.pvalue.listT, auto.key=list(corner=c(.95,.05)),conf.alpha=.1)
dev.off()
LReggif_T <- ginf(PV=ret_tI$pvalue) #1.007188
Qgif_T <- ginf(PV=ret_tI$Qpvalue) #0.9938116
QKgif_T <- ginf(PV=ret_tI$QKpvalue) #1.018217
manhattan_plot(pv=ret_tI$Qpvalue,pos=pos,thre=4.5,filen="con_TI_gwas")
########Genetic effect##################
| /con_test.R | no_license | QianRuZhang01/callusQTL | R | false | false | 3,266 | r |
ind <- as.character(read.table("new_ind")[,1])
pheno <- read.table("new_pheno")
load("new_marker.RData")
source("cgwas.R")
dat_fit <- fit(pheno=pheno)
fit_plot(dat=dat_fit,pheno=pheno,ind=ind,filen="Figure_growth_fit1.pdf",index=1:40,len=0)
fit_plot(dat=dat_fit,pheno=pheno,ind=ind,filen="Figure_growth_fit2.pdf",index=1:40,len=40)
fit_plot(dat=dat_fit,pheno=pheno,ind=ind,filen="Figure_growth_fit3.pdf",index=1:40,len=80)
fit_plot1(dat=dat_fit,pheno=pheno,ind=ind,filen="Figure_growth_fit4.pdf",index=1:22,len=120)
par_dat <- dat_gen(dat=dat_fit)
marker <- new_marker[-par_dat$outlier,-1]
pop <- read.table("new_meanQ")[-par_dat$outlier,]
snp1 <- as.matrix(marker)
snp1[which(snp1==-1)] <- NA
write.table(t(snp1),file="snp.txt",row.names = F,col.names = F,quote = F,sep="\t")
snp2 <- snp1/2
K <- emma_kinship(t(snp2))
ret_A <- conGWAS(m=snp1,p=par_dat$parameter_adjust[,1],q=pop,K=K)
ret_R <- conGWAS(m=snp1,p=par_dat$parameter_adjust[,2],q=pop,K=K)
ret_lambda <- conGWAS(m=snp1,p=par_dat$parameter[,3],q=pop,K=K)
ret_tI <- conGWAS(m=snp1,p=par_dat$parameter[,4],q=pop,K=K)
save(ret_A,file="ret_A.RData");save(ret_R,file="ret_R.RData")
save(ret_lambda,file="ret_lambda.RData");save(ret_tI,file="ret_tI.RData")
#ret_Alog <- conGWAS(m=snp1,p=log(par_dat$parameter[,1]),q=pop,K=K)
#ret_Rlog <- conGWAS(m=snp1,p=log(par_dat$parameter[,2]),q=pop,K=K)
#save(ret_Alog,file="ret_Alog.RData");save(ret_Rlog,file="ret_Rlog.RData")
pos <- read.table("../Genome_info/new_marker_info.txt")
my.pvalue.listA <-list("GLM"=ret_A$pvalue,"Q"=ret_A$Qpvalue,"Q+K"=ret_A$QKpvalue)
pdf("conA_QQ.pdf",height=4,width=4)
qqunif.plot(my.pvalue.listA, auto.key=list(corner=c(.95,.05)),conf.alpha=.1)
dev.off()
LReggif_A <- ginf(PV=ret_A$pvalue) #0.9748421
Qgif_A <- ginf(PV=ret_A$Qpvalue) #0.9593254
QKgif_A <- ginf(PV=ret_A$QKpvalue) #0.9671246
manhattan_plot(pv=ret_A$Qpvalue,pos=pos,thre=4.1,filen="con_A_gwas")
my.pvalue.listR <-list("GLM"=ret_R$pvalue,"Q"=ret_R$Qpvalue,"Q+K"=ret_R$QKpvalue)
pdf("conR_QQ.pdf",height=4,width=4)
qqunif.plot(my.pvalue.listR, auto.key=list(corner=c(.95,.05)),conf.alpha=.1)
dev.off()
LReggif_R <- ginf(PV=ret_R$pvalue) #1.066342
Qgif_R <- ginf(PV=ret_R$Qpvalue) #1.024329
QKgif_R <- ginf(PV=ret_R$QKpvalue) #1.010251
manhattan_plot(pv=ret_R$QKpvalue,pos=pos,thre=4.22,filen="con_R_gwas")
my.pvalue.listL <-list("GLM"=ret_lambda$pvalue,"Q"=ret_lambda$Qpvalue,"Q+K"=ret_lambda$QKpvalue)
pdf("conL_QQ.pdf",height=4,width=4)
qqunif.plot(my.pvalue.listL, auto.key=list(corner=c(.95,.05)),conf.alpha=.1)
dev.off()
LReggif_lambda <- ginf(PV=ret_lambda$pvalue) #0.9621696
Qgif_lambda <- ginf(PV=ret_lambda$Qpvalue) #0.9707739
QKgif_lambda <- ginf(PV=ret_lambda$QKpvalue) #0.9888482
manhattan_plot(pv=ret_lambda$pvalue,pos=pos,thre=4.32,filen="con_L_gwas")
my.pvalue.listT <-list("GLM"=ret_tI$pvalue,"Q"=ret_tI$Qpvalue,"Q+K"=ret_tI$QKpvalue)
pdf("conT_QQ.pdf",height=4,width=4)
qqunif.plot(my.pvalue.listT, auto.key=list(corner=c(.95,.05)),conf.alpha=.1)
dev.off()
LReggif_T <- ginf(PV=ret_tI$pvalue) #1.007188
Qgif_T <- ginf(PV=ret_tI$Qpvalue) #0.9938116
QKgif_T <- ginf(PV=ret_tI$QKpvalue) #1.018217
manhattan_plot(pv=ret_tI$Qpvalue,pos=pos,thre=4.5,filen="con_TI_gwas")
########Genetic effect##################
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/youtube_objects.R
\name{ChannelStatistics}
\alias{ChannelStatistics}
\title{ChannelStatistics Object}
\usage{
ChannelStatistics(commentCount = NULL, hiddenSubscriberCount = NULL,
subscriberCount = NULL, videoCount = NULL, viewCount = NULL)
}
\arguments{
\item{commentCount}{The number of comments for the channel}
\item{hiddenSubscriberCount}{Whether or not the number of subscribers is shown for this user}
\item{subscriberCount}{The number of subscribers that the channel has}
\item{videoCount}{The number of videos uploaded to the channel}
\item{viewCount}{The number of times the channel has been viewed}
}
\value{
ChannelStatistics object
}
\description{
ChannelStatistics Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Statistics about a channel: number of subscribers, number of videos in the channel, etc.
}
| /googleyoutubev3.auto/man/ChannelStatistics.Rd | permissive | uwazac/autoGoogleAPI | R | false | true | 941 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/youtube_objects.R
\name{ChannelStatistics}
\alias{ChannelStatistics}
\title{ChannelStatistics Object}
\usage{
ChannelStatistics(commentCount = NULL, hiddenSubscriberCount = NULL,
subscriberCount = NULL, videoCount = NULL, viewCount = NULL)
}
\arguments{
\item{commentCount}{The number of comments for the channel}
\item{hiddenSubscriberCount}{Whether or not the number of subscribers is shown for this user}
\item{subscriberCount}{The number of subscribers that the channel has}
\item{videoCount}{The number of videos uploaded to the channel}
\item{viewCount}{The number of times the channel has been viewed}
}
\value{
ChannelStatistics object
}
\description{
ChannelStatistics Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Statistics about a channel: number of subscribers, number of videos in the channel, etc.
}
|
# R script to make figures for EMF33 Bioenergy Brazil crosscut
# ---- START ----
# clear memory
rm(list=ls())
# Load Libraries
library(reshape);
library(ggplot2);
library(data.table);
library(tidyr)
library(plyr)
library(dplyr)
library(stringr)
library(xlsx)
library(ggmap)
library(maps)
library(mapdata)
library(gridExtra)
library(scales)
library(ggpubr)
library(grid)
# ---- CONSTANTS ----
ppi <- 600
FSizeTitle = 10
FSizeStrip = 9
FSizeAxis = 9
FSizeLeg = 9
ActiveModel = c("AIM/CGE","BET","COFFEE","DNE21+ V.14","FARM 3.1","GCAM_EMF33","GRAPE-15","IMACLIM-NLU","IMAGE","POLES EMF33")
ActiveYear = c(2010,2030,2050,2070)
# ActiveYear = c(2020,2030,2040,2050,2060,2070,2080,2090,2100)
ActiveYear2 = c(2050,2100)
# ---- READ DATA FILE ----
BraDATA = read.csv(paste0(getwd(),"/GitHub/EMF33/data/Brazil/BraDATA.csv"), sep=",", dec=".", stringsAsFactors = FALSE)
BraDATA$X <- NULL
# ---- PROCESS DATA FILE ----
BraDATA = subset(BraDATA, (MODEL %in% ActiveModel) & (Year %in% ActiveYear))
# GCAM data lacks values for "Emissions|CO2|Energy"
# Calculate thisas the difference between total and AFOLU
BraDATA.GCAMCor <- BraDATA %>%
subset(MODEL == "GCAM_EMF33" & !(VARIABLE == "Emissions|CO2|Energy")) %>%
spread(key = "VARIABLE", value = "value") %>%
set_colnames(c("MODEL","SCENARIO","REGION","UNIT","Year","TotalEmis","AFOLU")) %>%
mutate(Energy = TotalEmis - AFOLU) %>%
set_colnames(c("MODEL","SCENARIO","REGION","UNIT","Year","Emissions|CO2","Emissions|CO2|Land Use","Emissions|CO2|Energy")) %>%
melt(id.vars=c("MODEL","SCENARIO","REGION","UNIT","Year")) %>%
set_colnames(c("MODEL","SCENARIO","REGION","UNIT","Year","VARIABLE","value"))
BraDATA = BraDATA %>%
subset(!MODEL == "GCAM_EMF33") %>%
rbind(BraDATA.GCAMCor)
rm(BraDATA.GCAMCor)
# ---- LABELS ----
#Model labels with text wraps
model_labels <- c("AIM/CGE"="AIM/CGE","BET"="BET","COFFEE"="COFFEE","DNE21+ V.14"="DNE21+","FARM 3.1"="FARM","MESSAGE-GLOBIOM"="MESSAGEix-\nGLOBIOM","GCAM_EMF33"="GCAM","GRAPE-15"="GRAPE","IMACLIM-NLU"="IMACLIM-\nNLU","IMAGE"="IMAGE","POLES EMF33"="POLES","REMIND-MAGPIE"="REMIND-\nMAgPIE")
#Model labels without text wraps
model_labels2 <- c("AIM/CGE"="AIM/CGE","BET"="BET","COFFEE"="COFFEE","DNE21+ V.14"="DNE21+","FARM 3.1"="FARM","MESSAGE-GLOBIOM"="MESSAGEix-GLOBIOM","GCAM_EMF33"="GCAM","GRAPE-15"="GRAPE","IMACLIM-NLU"="IMACLIM-NLU","IMAGE"="IMAGE","POLES EMF33"="POLES","REMIND-MAGPIE"="REMIND-MAgPIE")
# ---- FIGURES ----
# ---- FIG: Total Emissions ----
TotEmis <- ggplot() +
geom_line(data=subset(BraDATA, REGION == "Brazil" & SCENARIO == "R3-B-lo-full" & VARIABLE == "Emissions|CO2"),
aes(x=Year,y = value, color=VARIABLE), size=1, alpha=1) +
geom_line(data=subset(BraDATA, REGION == "Brazil" & SCENARIO == "R3-B-lo-full" & !VARIABLE == "Emissions|CO2"),
aes(x=Year,y = value, color=VARIABLE), size=1, alpha=0.75) +
geom_hline(yintercept=0,size = 0.1, colour='black') +
ylab("Emissions MtCO2/yr") + xlab("") +
theme_bw() + theme(panel.grid.minor=element_blank(), panel.grid.major=element_line(colour="gray80", size = 0.3)) +
theme(plot.title = element_text(size = FSizeTitle, face = "bold")) +
theme(text= element_text(size=FSizeStrip, face="plain"), axis.text.x = element_text(angle=66, size=FSizeAxis, hjust=1), axis.text.y = element_text(size=FSizeAxis)) +
theme(panel.border = element_rect(colour = "black", fill=NA, size=0.2)) +
theme(legend.position="bottom", legend.box="vertical", legend.direction = "horizontal", legend.spacing.y=unit(0.01,"cm")) +
scale_colour_manual(values=c("black","red","forestgreen"),
name="Emission Source:",
breaks=c("Emissions|CO2","Emissions|CO2|Energy","Emissions|CO2|Land Use"),
labels=c("Total","Energy","AFOLU"),
guide="legend") +
theme(strip.text.x = element_text(size = FSizeStrip, face="bold"), strip.text.y = element_text(size = FSizeStrip, face="bold")) +
facet_wrap(~MODEL, scales="free_y", labeller=labeller(MODEL = model_labels))
TotEmis
GlobEmis <- ggplot() +
geom_line(data=subset(BraDATA, SCENARIO == "R3-B-lo-full" & VARIABLE == "Emissions|CO2"),
aes(x=Year,y = value, color=VARIABLE), size=1, alpha=1) +
geom_line(data=subset(BraDATA, SCENARIO == "R3-B-lo-full" & !VARIABLE == "Emissions|CO2"),
aes(x=Year,y = value, color=VARIABLE), size=1, alpha=0.75) +
geom_hline(yintercept=0,size = 0.1, colour='black') +
ylab(expression(paste("Emissions MtCO"[2],"/yr"))) + xlab("") +
theme_bw() + theme(panel.grid.minor=element_blank(), panel.grid.major=element_line(colour="gray80", size = 0.3)) +
theme(plot.title = element_text(size = FSizeTitle, face = "bold")) +
theme(text= element_text(size=FSizeStrip, face="plain"), axis.text.x = element_text(angle=66, size=FSizeAxis, hjust=1), axis.text.y = element_text(size=FSizeAxis)) +
theme(panel.border = element_rect(colour = "black", fill=NA, size=0.2)) +
theme(legend.position="bottom", legend.box="vertical", legend.direction = "horizontal", legend.spacing.y=unit(0.01,"cm")) +
scale_colour_manual(values=c("black","red","forestgreen"),
name="Emission Source:",
breaks=c("Emissions|CO2","Emissions|CO2|Energy","Emissions|CO2|Land Use"),
labels=c("Total","Energy","AFOLU"),
guide="legend") +
scale_x_continuous(breaks=ActiveYear) +
theme(strip.text.x = element_text(size = FSizeStrip, face="bold"), strip.text.y = element_text(size = FSizeStrip, face="bold")) +
facet_grid(REGION~MODEL, scales="free_y", labeller=labeller(MODEL = model_labels))
GlobEmis
# ---- OUTPUT ----
# png(paste0(getwd(),"/GitHub/EMF33/output/Brazil/Emissions.png"), width=6*ppi, height=5*ppi, res=ppi)
# print(plot(TotEmis))
# dev.off()
#
# png(paste0(getwd(),"/GitHub/EMF33/output/Brazil/GlobalEmissions.png"), width=10*ppi, height=4*ppi, res=ppi)
# print(plot(GlobEmis))
# dev.off()
#
| /script/Brazil.R | no_license | VassilisDaioglou/EMF33 | R | false | false | 6,005 | r | # R script to make figures for EMF33 Bioenergy Brazil crosscut
# ---- START ----
# clear memory
rm(list=ls())
# Load Libraries
library(reshape);
library(ggplot2);
library(data.table);
library(tidyr)
library(plyr)
library(dplyr)
library(stringr)
library(xlsx)
library(ggmap)
library(maps)
library(mapdata)
library(gridExtra)
library(scales)
library(ggpubr)
library(grid)
# ---- CONSTANTS ----
ppi <- 600
FSizeTitle = 10
FSizeStrip = 9
FSizeAxis = 9
FSizeLeg = 9
ActiveModel = c("AIM/CGE","BET","COFFEE","DNE21+ V.14","FARM 3.1","GCAM_EMF33","GRAPE-15","IMACLIM-NLU","IMAGE","POLES EMF33")
ActiveYear = c(2010,2030,2050,2070)
# ActiveYear = c(2020,2030,2040,2050,2060,2070,2080,2090,2100)
ActiveYear2 = c(2050,2100)
# ---- READ DATA FILE ----
BraDATA = read.csv(paste0(getwd(),"/GitHub/EMF33/data/Brazil/BraDATA.csv"), sep=",", dec=".", stringsAsFactors = FALSE)
BraDATA$X <- NULL
# ---- PROCESS DATA FILE ----
BraDATA = subset(BraDATA, (MODEL %in% ActiveModel) & (Year %in% ActiveYear))
# GCAM data lacks values for "Emissions|CO2|Energy"
# Calculate thisas the difference between total and AFOLU
BraDATA.GCAMCor <- BraDATA %>%
subset(MODEL == "GCAM_EMF33" & !(VARIABLE == "Emissions|CO2|Energy")) %>%
spread(key = "VARIABLE", value = "value") %>%
set_colnames(c("MODEL","SCENARIO","REGION","UNIT","Year","TotalEmis","AFOLU")) %>%
mutate(Energy = TotalEmis - AFOLU) %>%
set_colnames(c("MODEL","SCENARIO","REGION","UNIT","Year","Emissions|CO2","Emissions|CO2|Land Use","Emissions|CO2|Energy")) %>%
melt(id.vars=c("MODEL","SCENARIO","REGION","UNIT","Year")) %>%
set_colnames(c("MODEL","SCENARIO","REGION","UNIT","Year","VARIABLE","value"))
BraDATA = BraDATA %>%
subset(!MODEL == "GCAM_EMF33") %>%
rbind(BraDATA.GCAMCor)
rm(BraDATA.GCAMCor)
# ---- LABELS ----
#Model labels with text wraps
model_labels <- c("AIM/CGE"="AIM/CGE","BET"="BET","COFFEE"="COFFEE","DNE21+ V.14"="DNE21+","FARM 3.1"="FARM","MESSAGE-GLOBIOM"="MESSAGEix-\nGLOBIOM","GCAM_EMF33"="GCAM","GRAPE-15"="GRAPE","IMACLIM-NLU"="IMACLIM-\nNLU","IMAGE"="IMAGE","POLES EMF33"="POLES","REMIND-MAGPIE"="REMIND-\nMAgPIE")
#Model labels without text wraps
model_labels2 <- c("AIM/CGE"="AIM/CGE","BET"="BET","COFFEE"="COFFEE","DNE21+ V.14"="DNE21+","FARM 3.1"="FARM","MESSAGE-GLOBIOM"="MESSAGEix-GLOBIOM","GCAM_EMF33"="GCAM","GRAPE-15"="GRAPE","IMACLIM-NLU"="IMACLIM-NLU","IMAGE"="IMAGE","POLES EMF33"="POLES","REMIND-MAGPIE"="REMIND-MAgPIE")
# ---- FIGURES ----
# ---- FIG: Total Emissions ----
TotEmis <- ggplot() +
geom_line(data=subset(BraDATA, REGION == "Brazil" & SCENARIO == "R3-B-lo-full" & VARIABLE == "Emissions|CO2"),
aes(x=Year,y = value, color=VARIABLE), size=1, alpha=1) +
geom_line(data=subset(BraDATA, REGION == "Brazil" & SCENARIO == "R3-B-lo-full" & !VARIABLE == "Emissions|CO2"),
aes(x=Year,y = value, color=VARIABLE), size=1, alpha=0.75) +
geom_hline(yintercept=0,size = 0.1, colour='black') +
ylab("Emissions MtCO2/yr") + xlab("") +
theme_bw() + theme(panel.grid.minor=element_blank(), panel.grid.major=element_line(colour="gray80", size = 0.3)) +
theme(plot.title = element_text(size = FSizeTitle, face = "bold")) +
theme(text= element_text(size=FSizeStrip, face="plain"), axis.text.x = element_text(angle=66, size=FSizeAxis, hjust=1), axis.text.y = element_text(size=FSizeAxis)) +
theme(panel.border = element_rect(colour = "black", fill=NA, size=0.2)) +
theme(legend.position="bottom", legend.box="vertical", legend.direction = "horizontal", legend.spacing.y=unit(0.01,"cm")) +
scale_colour_manual(values=c("black","red","forestgreen"),
name="Emission Source:",
breaks=c("Emissions|CO2","Emissions|CO2|Energy","Emissions|CO2|Land Use"),
labels=c("Total","Energy","AFOLU"),
guide="legend") +
theme(strip.text.x = element_text(size = FSizeStrip, face="bold"), strip.text.y = element_text(size = FSizeStrip, face="bold")) +
facet_wrap(~MODEL, scales="free_y", labeller=labeller(MODEL = model_labels))
TotEmis
GlobEmis <- ggplot() +
geom_line(data=subset(BraDATA, SCENARIO == "R3-B-lo-full" & VARIABLE == "Emissions|CO2"),
aes(x=Year,y = value, color=VARIABLE), size=1, alpha=1) +
geom_line(data=subset(BraDATA, SCENARIO == "R3-B-lo-full" & !VARIABLE == "Emissions|CO2"),
aes(x=Year,y = value, color=VARIABLE), size=1, alpha=0.75) +
geom_hline(yintercept=0,size = 0.1, colour='black') +
ylab(expression(paste("Emissions MtCO"[2],"/yr"))) + xlab("") +
theme_bw() + theme(panel.grid.minor=element_blank(), panel.grid.major=element_line(colour="gray80", size = 0.3)) +
theme(plot.title = element_text(size = FSizeTitle, face = "bold")) +
theme(text= element_text(size=FSizeStrip, face="plain"), axis.text.x = element_text(angle=66, size=FSizeAxis, hjust=1), axis.text.y = element_text(size=FSizeAxis)) +
theme(panel.border = element_rect(colour = "black", fill=NA, size=0.2)) +
theme(legend.position="bottom", legend.box="vertical", legend.direction = "horizontal", legend.spacing.y=unit(0.01,"cm")) +
scale_colour_manual(values=c("black","red","forestgreen"),
name="Emission Source:",
breaks=c("Emissions|CO2","Emissions|CO2|Energy","Emissions|CO2|Land Use"),
labels=c("Total","Energy","AFOLU"),
guide="legend") +
scale_x_continuous(breaks=ActiveYear) +
theme(strip.text.x = element_text(size = FSizeStrip, face="bold"), strip.text.y = element_text(size = FSizeStrip, face="bold")) +
facet_grid(REGION~MODEL, scales="free_y", labeller=labeller(MODEL = model_labels))
GlobEmis
# ---- OUTPUT ----
# png(paste0(getwd(),"/GitHub/EMF33/output/Brazil/Emissions.png"), width=6*ppi, height=5*ppi, res=ppi)
# print(plot(TotEmis))
# dev.off()
#
# png(paste0(getwd(),"/GitHub/EMF33/output/Brazil/GlobalEmissions.png"), width=10*ppi, height=4*ppi, res=ppi)
# print(plot(GlobEmis))
# dev.off()
#
|
a = 0
b = 1
for(i in seq(1, 20)) {
cat(i, b, '\n')
c = a + b
a = b
b = c
} | /resolucao/r/fibonacci.r | permissive | rafaelbes/numericalMethodsCourse | R | false | false | 82 | r | a = 0
b = 1
for(i in seq(1, 20)) {
cat(i, b, '\n')
c = a + b
a = b
b = c
} |
#' Calculate a Wald statistic
#'
#'@import magrittr
#'@param idx indices for a group which we want to extract a p-value for
#'@param coef an estimated parameter
#'@param vcov a variance-covariance matrix
#'@param verbose a logical value.
#'@export
wald_pvalue<-function(coef,vcov, position, excludeStates = "*", order = 1, position2 = NULL, verbose = TRUE){
idx= extractIndices(coef = coef,position = position,excludeStates = excludeStates,order = order,position2 = position2,verbose= FALSE)
if(verbose) cat("p-value for ",paste(rownames(coef[idx,,drop=F]),collapse = ","),"\n")
coef_sub = matrix(coef[idx],ncol = 1)
vcov_inv_sub = chol2inv(chol(vcov[idx,idx,drop=F]))
chi2value = as.numeric(t(coef_sub)%*%vcov_inv_sub%*%coef_sub)
p.grp = pchisq(q = chi2value,df = length(idx),lower.tail = F)
r=c(chi2value,p.grp)
names(r) = c("chisq2","p")
r
}
# wald_pvalue(coef = fit$coef,vcov = vcov,position = 12,excludeStates = "*",order = 1,position2 = NULL,verbose = T)
#
# wald_pvalue(coef = fit$coef,vcov = vcov,position = c(1,2),excludeStates = "*",order = 2,position2 = c(3,4),verbose = T)
| /R/wald_pvalue.R | no_license | mhu48/pudms | R | false | false | 1,106 | r | #' Calculate a Wald statistic
#'
#'@import magrittr
#'@param idx indices for a group which we want to extract a p-value for
#'@param coef an estimated parameter
#'@param vcov a variance-covariance matrix
#'@param verbose a logical value.
#'@export
wald_pvalue<-function(coef,vcov, position, excludeStates = "*", order = 1, position2 = NULL, verbose = TRUE){
idx= extractIndices(coef = coef,position = position,excludeStates = excludeStates,order = order,position2 = position2,verbose= FALSE)
if(verbose) cat("p-value for ",paste(rownames(coef[idx,,drop=F]),collapse = ","),"\n")
coef_sub = matrix(coef[idx],ncol = 1)
vcov_inv_sub = chol2inv(chol(vcov[idx,idx,drop=F]))
chi2value = as.numeric(t(coef_sub)%*%vcov_inv_sub%*%coef_sub)
p.grp = pchisq(q = chi2value,df = length(idx),lower.tail = F)
r=c(chi2value,p.grp)
names(r) = c("chisq2","p")
r
}
# wald_pvalue(coef = fit$coef,vcov = vcov,position = 12,excludeStates = "*",order = 1,position2 = NULL,verbose = T)
#
# wald_pvalue(coef = fit$coef,vcov = vcov,position = c(1,2),excludeStates = "*",order = 2,position2 = c(3,4),verbose = T)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/db.R
\name{sql_options}
\alias{sql_options}
\title{Options for generating SQL}
\usage{
sql_options(cte = FALSE, use_star = TRUE, qualify_all_columns = FALSE)
}
\arguments{
\item{cte}{If \code{FALSE}, the default, subqueries are used. If \code{TRUE} common
table expressions are used.}
\item{use_star}{If \code{TRUE}, the default, \code{*} is used to select all columns of
a table. If \code{FALSE} all columns are explicitly selected.}
\item{qualify_all_columns}{If \code{FALSE}, the default, columns are only
qualified with the table they come from if the same column name appears in
multiple tables.}
}
\value{
A <dbplyr_sql_options> object.
}
\description{
Options for generating SQL
}
\examples{
library(dplyr, warn.conflicts = FALSE)
lf1 <- lazy_frame(key = 1, a = 1, b = 2)
lf2 <- lazy_frame(key = 1, a = 1, c = 3)
result <- left_join(lf1, lf2, by = "key") \%>\%
filter(c >= 3)
show_query(result)
sql_options <- sql_options(cte = TRUE, qualify_all_columns = TRUE)
show_query(result, sql_options = sql_options)
}
| /man/sql_options.Rd | permissive | tidyverse/dbplyr | R | false | true | 1,101 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/db.R
\name{sql_options}
\alias{sql_options}
\title{Options for generating SQL}
\usage{
sql_options(cte = FALSE, use_star = TRUE, qualify_all_columns = FALSE)
}
\arguments{
\item{cte}{If \code{FALSE}, the default, subqueries are used. If \code{TRUE} common
table expressions are used.}
\item{use_star}{If \code{TRUE}, the default, \code{*} is used to select all columns of
a table. If \code{FALSE} all columns are explicitly selected.}
\item{qualify_all_columns}{If \code{FALSE}, the default, columns are only
qualified with the table they come from if the same column name appears in
multiple tables.}
}
\value{
A <dbplyr_sql_options> object.
}
\description{
Options for generating SQL
}
\examples{
library(dplyr, warn.conflicts = FALSE)
lf1 <- lazy_frame(key = 1, a = 1, b = 2)
lf2 <- lazy_frame(key = 1, a = 1, c = 3)
result <- left_join(lf1, lf2, by = "key") \%>\%
filter(c >= 3)
show_query(result)
sql_options <- sql_options(cte = TRUE, qualify_all_columns = TRUE)
show_query(result, sql_options = sql_options)
}
|
#' Maximum likelihood estimation for bivariate dependent competing risks data under the Frank copula with the Pareto margins and fixed \eqn{\theta}
#'
#' @param t.event Vector of the observed failure times.
#' @param event1 Vector of the indicators for the failure cause 1.
#' @param event2 Vector of the indicators for the failure cause 2.
#' @param Theta Copula parameter \eqn{\theta}.
#' @param Alpha1.0 Initial guess for the scale parameter \eqn{\alpha_{1}} with default value 1.
#' @param Alpha2.0 Initial guess for the scale parameter \eqn{\alpha_{2}} with default value 1.
#' @param Gamma1.0 Initial guess for the shape parameter \eqn{\gamma_{1}} with default value 1.
#' @param Gamma2.0 Initial guess for the shape parameter \eqn{\gamma_{2}} with default value 1.
#' @param epsilon Positive tunning parameter in the NR algorithm with default value \eqn{10^{-5}}.
#' @param d Positive tunning parameter in the NR algorithm with default value \eqn{e^{10}}.
#' @param r.1 Positive tunning parameter in the NR algorithm with default value 1.
#' @param r.2 Positive tunning parameter in the NR algorithm with default value 1.
#' @param r.3 Positive tunning parameter in the NR algorithm with default value 1.
#' @param r.4 Positive tunning parameter in the NR algorithm with default value 1.
#' @description Maximum likelihood estimation for bivariate dependent competing risks data under the Frank copula with the Pareto margins and fixed \eqn{\theta}.
#'
#' @return \item{n}{Sample size.}
#' \item{count}{Iteration number.}
#' \item{random}{Randomization number.}
#' \item{Alpha1}{Positive scale parameter for the Pareto margin (failure cause 1).}
#' \item{Alpha2}{Positive scale parameter for the Pareto margin (failure cause 2).}
#' \item{Gamma1}{Positive shape parameter for the Pareto margin (failure cause 1).}
#' \item{Gamma2}{Positive shape parameter for the Pareto margin (failure cause 2).}
#' \item{MedX}{Median lifetime due to failure cause 1.}
#' \item{MedY}{Median lifetime due to failure cause 2.}
#' \item{MeanX}{Mean lifetime due to failure cause 1.}
#' \item{MeanY}{Mean lifetime due to failure cause 2.}
#' \item{logL}{Log-likelihood value under the fitted model.}
#' \item{AIC}{AIC value under the fitted model.}
#' \item{BIC}{BIC value under the fitted model.}
#'
#' @references Shih J-H, Lee W, Sun L-H, Emura T (2018), Fitting competing risks data to bivariate Pareto models, Communications in Statistics - Theory and Methods, doi: 10.1080/03610926.2018.1425450.
#' @importFrom stats qnorm runif
#' @importFrom utils globalVariables
#' @importFrom methods is
#' @export
#'
#' @examples
#' t.event = c(72,40,20,65,24,46,62,61,60,60,59,59,49,20, 3,58,29,26,52,20,
#' 51,51,31,42,38,69,39,33, 8,13,33, 9,21,66, 5,27, 2,20,19,60,
#' 32,53,53,43,21,74,72,14,33, 8,10,51, 7,33, 3,43,37, 5, 6, 2,
#' 5,64, 1,21,16,21,12,75,74,54,73,36,59, 6,58,16,19,39,26,60,
#' 43, 7, 9,67,62,17,25, 0, 5,34,59,31,58,30,57, 5,55,55,52, 0,
#' 51,17,70,74,74,20, 2, 8,27,23, 1,52,51, 6, 0,26,65,26, 6, 6,
#' 68,33,67,23, 6,11, 6,57,57,29, 9,53,51, 8, 0,21,27,22,12,68,
#' 21,68, 0, 2,14,18, 5,60,40,51,50,46,65, 9,21,27,54,52,75,30,
#' 70,14, 0,42,12,40, 2,12,53,11,18,13,45, 8,28,67,67,24,64,26,
#' 57,32,42,20,71,54,64,51, 1, 2, 0,54,69,68,67,66,64,63,35,62,
#' 7,35,24,57, 1, 4,74, 0,51,36,16,32,68,17,66,65,19,41,28, 0,
#' 46,63,60,59,46,63, 8,74,18,33,12, 1,66,28,30,57,50,39,40,24,
#' 6,30,58,68,24,33,65, 2,64,19,15,10,12,53,51, 1,40,40,66, 2,
#' 21,35,29,54,37,10,29,71,12,13,27,66,28,31,12, 9,21,19,51,71,
#' 76,46,47,75,75,49,75,75,31,69,74,25,72,28,36, 8,71,60,14,22,
#' 67,62,68,68,27,68,68,67,67, 3,49,12,30,67, 5,65,24,66,36,66,
#' 40,13,40, 0,14,45,64,13,24,15,26, 5,63,35,61,61,50,57,21,26,
#' 11,59,42,27,50,57,57, 0, 1,54,53,23, 8,51,27,52,52,52,45,48,
#' 18, 2, 2,35,75,75, 9,39, 0,26,17,43,53,47,11,65,16,21,64, 7,
#' 38,55, 5,28,38,20,24,27,31, 9, 9,11,56,36,56,15,51,33,70,32,
#' 5,23,63,30,53,12,58,54,36,20,74,34,70,25,65, 4,10,58,37,56,
#' 6, 0,70,70,28,40,67,36,23,23,62,62,62, 2,34, 4,12,56, 1, 7,
#' 4,70,65, 7,30,40,13,22, 0,18,64,13,26, 1,16,33,22,30,53,53,
#' 7,61,40, 9,59, 7,12,46,50, 0,52,19,52,51,51,14,27,51, 5, 0,
#' 41,53,19)
#'
#' event1 = c(0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
#' 0,0,1,0,0,0,1,0,1,1,0,1,1,1,1,0,0,1,1,0,
#' 1,0,0,1,1,0,0,1,0,0,0,1,0,1,0,0,1,0,1,1,
#' 1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
#' 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
#' 0,0,0,0,0,0,1,1,0,0,0,0,0,1,1,0,0,1,0,0,
#' 0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,1,0,
#' 0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
#' 0,0,0,0,0,0,1,1,0,1,0,0,0,0,1,0,0,0,0,0,
#' 1,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
#' 0,0,0,0,0,0,0,1,0,0,1,1,0,1,0,0,1,1,0,0,
#' 1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
#' 0,0,1,0,1,0,0,0,0,1,1,1,1,0,0,0,1,1,0,0,
#' 1,1,1,1,0,0,1,0,1,1,1,1,1,1,1,0,1,1,0,1,
#' 0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,
#' 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
#' 0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,
#' 0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
#' 1,0,0,0,0,0,0,1,0,0,0,0,1,0,1,0,1,0,0,1,
#' 1,1,0,1,1,1,1,1,1,1,1,0,1,1,0,0,0,0,0,0,
#' 0,0,0,1,0,0,0,0,1,0,0,1,0,1,0,1,1,0,1,0,
#' 1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,
#' 1,0,0,1,0,0,0,1,0,1,0,0,1,0,0,0,1,1,0,1,
#' 1,1,1,0,0,0,1,0,0,0,0,0,0,0,0,1,1,0,0,0,
#' 0,0,1)
#'
#' event2 = c(0,1,1,0,0,1,0,0,0,0,0,0,0,1,1,0,1,1,0,1,
#' 0,0,0,1,1,0,0,1,0,0,1,0,0,0,0,1,1,0,0,0,
#' 0,0,0,0,0,0,0,0,1,1,1,0,1,0,1,1,0,1,0,0,
#' 0,0,1,0,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1,1,
#' 1,1,1,0,1,1,1,1,1,1,0,1,0,1,0,1,0,0,0,1,
#' 0,1,1,0,0,1,0,0,1,1,1,0,0,0,0,1,1,0,1,1,
#' 0,1,0,0,1,1,0,0,0,1,1,0,0,1,1,1,0,1,0,0,
#' 1,0,1,0,0,1,0,0,1,0,1,1,0,1,1,1,0,0,0,1,
#' 0,1,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,1,0,1,
#' 0,0,1,1,0,1,0,1,1,1,0,1,0,0,0,0,0,0,1,0,
#' 1,1,1,0,1,1,1,0,1,1,0,0,0,0,0,0,0,0,1,1,
#' 0,0,0,0,1,0,1,0,1,1,1,1,0,1,1,1,0,1,1,1,
#' 1,1,0,0,0,1,0,1,0,0,0,0,0,0,0,1,0,0,0,1,
#' 0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,
#' 0,0,1,0,0,1,0,0,1,0,0,1,0,1,1,0,0,1,1,1,
#' 1,1,0,0,1,0,0,0,0,1,1,1,1,0,1,1,1,0,1,0,
#' 1,1,1,1,1,1,0,1,1,1,1,0,0,1,0,0,1,1,1,0,
#' 1,0,0,1,1,0,0,1,1,0,0,1,1,1,1,0,0,0,1,1,
#' 0,1,1,1,0,0,1,0,1,1,1,1,0,1,0,0,0,1,0,0,
#' 0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,1,0,1,0,1,
#' 1,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
#' 0,1,0,0,1,1,0,1,1,1,0,0,0,1,0,1,0,0,1,1,
#' 0,0,0,0,1,1,1,0,1,0,1,1,0,1,1,1,0,0,1,0,
#' 0,0,0,1,0,1,0,1,0,1,0,1,0,0,0,0,0,0,1,1,
#' 1,0,0)
#'
#' library(Bivariate.Pareto)
#' set.seed(10)
#' MLE.Frank.Pareto(t.event,event1,event2,Theta = -5)
MLE.Frank.Pareto = function(t.event,event1,event2,Theta,Alpha1.0 = 1,Alpha2.0 = 1,
Gamma1.0 = 1,Gamma2.0 = 1,epsilon = 1e-5,d = exp(10),
r.1 = 6,r.2 = 6,r.3 = 6,r.4 = 6) {
### checking inputs ###
n = length(t.event)
if (length(t.event[t.event < 0]) != 0) {stop("t.event must be non-negative")}
if (length(event1) != n) {stop("the length of event1 is different from t.event")}
if (length(event2) != n) {stop("the length of event2 is different from t.event")}
if (length(event1[event1 == 0 | event1 == 1]) != n) {stop("elements in event1 must be either 0 or 1")}
if (length(event2[event2 == 0 | event2 == 1]) != n) {stop("elements in event2 must be either 0 or 1")}
temp.event = event1+event2
if (length(temp.event[temp.event == 2]) != 0) {stop("event1 and event2 cannot be 1 simultaneously")}
if (Theta == 0) {stop("Theta cannot be zero")}
if (Alpha1.0 <= 0) {stop("Alpha1.0 must be positive")}
if (Alpha2.0 <= 0) {stop("Alpha2.0 must be positive")}
if (Gamma1.0 <= 0) {stop("Alpha1.0 must be positive")}
if (Gamma2.0 <= 0) {stop("Alpha2.0 must be positive")}
if (epsilon <= 0) {stop("epsilon must be positive")}
if (d <= 0) {stop("d must be positive")}
if (r.1 <= 0) {stop("r.1 must be positive")}
if (r.2 <= 0) {stop("r.2 must be positive")}
if (r.3 <= 0) {stop("r.3 must be positive")}
if (r.4 <= 0) {stop("r.3 must be positive")}
### functions ###
log_L = function(par){
Alpha1 = exp(par[1])
Alpha2 = exp(par[2])
Gamma1 = exp(par[3])
Gamma2 = exp(par[4])
h1 = Alpha1*Gamma1/(1+Alpha1*t.event)
h2 = Alpha2*Gamma2/(1+Alpha2*t.event)
S1 = (1+Alpha1*t.event)^(-Gamma1)
S2 = (1+Alpha2*t.event)^(-Gamma2)
ST = -(1/Theta)*log(1+(exp(-Theta*S1)-1)*(exp(-Theta*S2)-1)/(exp(-Theta)-1))
f1 = h1*S1*exp(-Theta*S1)*(exp(-Theta*S2)-1)/((exp(-Theta)-1)*exp(-Theta*ST))
f2 = h2*S2*exp(-Theta*S2)*(exp(-Theta*S1)-1)/((exp(-Theta)-1)*exp(-Theta*ST))
sum((1-event1-event2)*log(ST))+sum(event1*log(f1))+sum(event2*log(f2))
}
SL_function = function(par){
Alpha1 = exp(par[1])
Alpha2 = exp(par[2])
Gamma1 = exp(par[3])
Gamma2 = exp(par[4])
h1 = Alpha1*Gamma1/(1+Alpha1*t.event)
h2 = Alpha2*Gamma2/(1+Alpha2*t.event)
S1 = (1+Alpha1*t.event)^(-Gamma1)
S2 = (1+Alpha2*t.event)^(-Gamma2)
p0 = exp(-Theta)
p1 = exp(-Theta*S1)
p2 = exp(-Theta*S2)
ST = -(1/Theta)*log(1+(exp(-Theta*S1)-1)*(exp(-Theta*S2)-1)/(exp(-Theta)-1))
der_h1_Alpha1 = Gamma1/(1+Alpha1*t.event)^2
der_h2_Alpha2 = Gamma2/(1+Alpha2*t.event)^2
der_S1_Alpha1 = -Gamma1*t.event*(1+Alpha1*t.event)^(-Gamma1-1)
der_S2_Alpha2 = -Gamma2*t.event*(1+Alpha2*t.event)^(-Gamma2-1)
der_h1_Gamma1 = Alpha1/(1+Alpha1*t.event)
der_h2_Gamma2 = Alpha2/(1+Alpha2*t.event)
der_S1_Gamma1 = -(1+Alpha1*t.event)^(-Gamma1)*log(1+Alpha1*t.event)
der_S2_Gamma2 = -(1+Alpha2*t.event)^(-Gamma2)*log(1+Alpha2*t.event)
der_ST_Alpha1 = der_S1_Alpha1*p1*(p2-1)/(p0-1+(p1-1)*(p2-1))
der_ST_Alpha2 = der_S2_Alpha2*p2*(p1-1)/(p0-1+(p1-1)*(p2-1))
der_ST_Gamma1 = der_S1_Gamma1*p1*(p2-1)/(p0-1+(p1-1)*(p2-1))
der_ST_Gamma2 = der_S2_Gamma2*p2*(p1-1)/(p0-1+(p1-1)*(p2-1))
d11 = sum(event1*(der_h1_Alpha1/h1+der_S1_Alpha1/S1-Theta*der_S1_Alpha1+Theta*der_ST_Alpha1))
d12 = sum(event2*(-Theta*der_S1_Alpha1*p1/(p1-1)+Theta*der_ST_Alpha1))
d13 = sum((1-event1-event2)*(der_ST_Alpha1/ST))
d1 = d11+d12+d13
d21 = sum(event1*(-Theta*der_S2_Alpha2*p2/(p2-1)+Theta*der_ST_Alpha2))
d22 = sum(event2*(der_h2_Alpha2/h2+der_S2_Alpha2/S2-Theta*der_S2_Alpha2+Theta*der_ST_Alpha2))
d23 = sum((1-event1-event2)*(der_ST_Alpha2/ST))
d2 = d21+d22+d23
d31 = sum(event1*(der_h1_Gamma1/h1+der_S1_Gamma1/S1-Theta*der_S1_Gamma1+Theta*der_ST_Gamma1))
d32 = sum(event2*(-Theta*der_S1_Gamma1*p1/(p1-1)+Theta*der_ST_Gamma1))
d33 = sum((1-event1-event2)*(der_ST_Gamma1/ST))
d3 = d31+d32+d33
d41 = sum(event1*(-Theta*der_S2_Gamma2*p2/(p2-1)+Theta*der_ST_Gamma2))
d42 = sum(event2*(der_h2_Gamma2/h2+der_S2_Gamma2/S2-Theta*der_S2_Gamma2+Theta*der_ST_Gamma2))
d43 = sum((1-event1-event2)*(der_ST_Gamma2/ST))
d4 = d41+d42+d43
c(exp(par[1])*d1,exp(par[2])*d2,exp(par[3])*d3,exp(par[4])*d4)
}
HL_function = function(par){
Alpha1 = exp(par[1])
Alpha2 = exp(par[2])
Gamma1 = exp(par[3])
Gamma2 = exp(par[4])
h1 = Alpha1*Gamma1/(1+Alpha1*t.event)
h2 = Alpha2*Gamma2/(1+Alpha2*t.event)
S1 = (1+Alpha1*t.event)^(-Gamma1)
S2 = (1+Alpha2*t.event)^(-Gamma2)
p0 = exp(-Theta)
p1 = exp(-Theta*S1)
p2 = exp(-Theta*S2)
ST = -(1/Theta)*log(1+(exp(-Theta*S1)-1)*(exp(-Theta*S2)-1)/(exp(-Theta)-1))
der_h1_Alpha1 = Gamma1/(1+Alpha1*t.event)^2
der_h2_Alpha2 = Gamma2/(1+Alpha2*t.event)^2
der_S1_Alpha1 = -Gamma1*t.event*(1+Alpha1*t.event)^(-Gamma1-1)
der_S2_Alpha2 = -Gamma2*t.event*(1+Alpha2*t.event)^(-Gamma2-1)
der_h1_Gamma1 = Alpha1/(1+Alpha1*t.event)
der_h2_Gamma2 = Alpha2/(1+Alpha2*t.event)
der_S1_Gamma1 = -(1+Alpha1*t.event)^(-Gamma1)*log(1+Alpha1*t.event)
der_S2_Gamma2 = -(1+Alpha2*t.event)^(-Gamma2)*log(1+Alpha2*t.event)
der_ST_Alpha1 = der_S1_Alpha1*p1*(p2-1)/(p0-1+(p1-1)*(p2-1))
der_ST_Alpha2 = der_S2_Alpha2*p2*(p1-1)/(p0-1+(p1-1)*(p2-1))
der_ST_Gamma1 = der_S1_Gamma1*p1*(p2-1)/(p0-1+(p1-1)*(p2-1))
der_ST_Gamma2 = der_S2_Gamma2*p2*(p1-1)/(p0-1+(p1-1)*(p2-1))
der_h1_Alpha1_Alpha1 = -2*Gamma1*t.event/(1+Alpha1*t.event)^3
der_h2_Alpha2_Alpha2 = -2*Gamma2*t.event/(1+Alpha2*t.event)^3
der_S1_Alpha1_Alpha1 = Gamma1*(Gamma1+1)*t.event^2*(1+Alpha1*t.event)^(-Gamma1-2)
der_S2_Alpha2_Alpha2 = Gamma2*(Gamma2+1)*t.event^2*(1+Alpha2*t.event)^(-Gamma2-2)
der_h1_Gamma1_Gamma1 = 0
der_h2_Gamma2_Gamma2 = 0
der_S1_Gamma1_Gamma1 = (1+Alpha1*t.event)^(-Gamma1)*(log(1+Alpha1*t.event))^2
der_S2_Gamma2_Gamma2 = (1+Alpha2*t.event)^(-Gamma2)*(log(1+Alpha2*t.event))^2
der_h1_Alpha1_Gamma1 = (1+Alpha1*t.event)^(-2)
der_h2_Alpha2_Gamma2 = (1+Alpha2*t.event)^(-2)
der_S1_Alpha1_Gamma1 = t.event*(Gamma1*log(1+Alpha1*t.event)-1)/(1+Alpha1*t.event)^(Gamma1+1)
der_S2_Alpha2_Gamma2 = t.event*(Gamma2*log(1+Alpha2*t.event)-1)/(1+Alpha2*t.event)^(Gamma2+1)
der_ST_Alpha1_Alpha1 = ((p0-1+(p1-1)*(p2-1))*(p2-1)*(der_S1_Alpha1_Alpha1*p1-Theta*der_S1_Alpha1^2*p1)-der_S1_Alpha1*p1*(p2-1)*(-Theta*der_S1_Alpha1*p1*(p2-1)))/(p0-1+(p1-1)*(p2-1))^2
der_ST_Alpha2_Alpha2 = ((p0-1+(p1-1)*(p2-1))*(p1-1)*(der_S2_Alpha2_Alpha2*p2-Theta*der_S2_Alpha2^2*p2)-der_S2_Alpha2*p2*(p1-1)*(-Theta*der_S2_Alpha2*p2*(p1-1)))/(p0-1+(p1-1)*(p2-1))^2
der_ST_Alpha1_Alpha2 = ((p0-1+(p1-1)*(p2-1))*p1*p2*-Theta*der_S1_Alpha1*der_S2_Alpha2+Theta*p1*p2*(p1-1)*(p2-1)*der_S1_Alpha1*der_S2_Alpha2)/(p0-1+(p1-1)*(p2-1))^2
der_ST_Gamma1_Gamma1 = ((p0-1+(p1-1)*(p2-1))*(p2-1)*(der_S1_Gamma1_Gamma1*p1-Theta*der_S1_Gamma1^2*p1)-der_S1_Gamma1*p1*(p2-1)*(-Theta*der_S1_Gamma1*p1*(p2-1)))/(p0-1+(p1-1)*(p2-1))^2
der_ST_Gamma2_Gamma2 = ((p0-1+(p1-1)*(p2-1))*(p1-1)*(der_S2_Gamma2_Gamma2*p2-Theta*der_S2_Gamma2^2*p2)-der_S2_Gamma2*p2*(p1-1)*(-Theta*der_S2_Gamma2*p2*(p1-1)))/(p0-1+(p1-1)*(p2-1))^2
der_ST_Gamma1_Gamma2 = ((p0-1+(p1-1)*(p2-1))*p1*p2*-Theta*der_S1_Gamma1*der_S2_Gamma2+Theta*p1*p2*(p1-1)*(p2-1)*der_S1_Gamma1*der_S2_Gamma2)/(p0-1+(p1-1)*(p2-1))^2
der_ST_Alpha1_Gamma1 = ((p0-1+(p1-1)*(p2-1))*(p2-1)*(der_S1_Alpha1_Gamma1*p1-Theta*der_S1_Alpha1*der_S1_Gamma1*p1)+Theta*der_S1_Alpha1*der_S1_Gamma1*p1^2*(p2-1)^2)/(p0-1+(p1-1)*(p2-1))^2
der_ST_Alpha1_Gamma2 = ((p0-1+(p1-1)*(p2-1))*p1*der_S1_Alpha1*-Theta*der_S2_Gamma2*p2+Theta*der_S1_Alpha1*der_S2_Gamma2*p1*p2*(p1-1)*(p2-1))/(p0-1+(p1-1)*(p2-1))^2
der_ST_Alpha2_Gamma1 = ((p0-1+(p1-1)*(p2-1))*p1*p2*-Theta*der_S1_Gamma1*der_S2_Alpha2+Theta*p1*p2*(p1-1)*(p2-1)*der_S1_Gamma1*der_S2_Alpha2)/(p0-1+(p1-1)*(p2-1))^2
der_ST_Alpha2_Gamma2 = ((p0-1+(p1-1)*(p2-1))*(p1-1)*(der_S2_Alpha2_Gamma2*p2-Theta*der_S2_Alpha2*der_S2_Gamma2*p2)+Theta*der_S2_Alpha2*der_S2_Gamma2*p2^2*(p1-1)^2)/(p0-1+(p1-1)*(p2-1))^2
d11 = sum(event1*(der_h1_Alpha1/h1+der_S1_Alpha1/S1-Theta*der_S1_Alpha1+Theta*der_ST_Alpha1))
d12 = sum(event2*(-Theta*der_S1_Alpha1*p1/(p1-1)+Theta*der_ST_Alpha1))
d13 = sum((1-event1-event2)*(der_ST_Alpha1/ST))
d1 = d11+d12+d13
d21 = sum(event1*(-Theta*der_S2_Alpha2*p2/(p2-1)+Theta*der_ST_Alpha2))
d22 = sum(event2*(der_h2_Alpha2/h2+der_S2_Alpha2/S2-Theta*der_S2_Alpha2+Theta*der_ST_Alpha2))
d23 = sum((1-event1-event2)*(der_ST_Alpha2/ST))
d2 = d21+d22+d23
d31 = sum(event1*(der_h1_Gamma1/h1+der_S1_Gamma1/S1-Theta*der_S1_Gamma1+Theta*der_ST_Gamma1))
d32 = sum(event2*(-Theta*der_S1_Gamma1*p1/(p1-1)+Theta*der_ST_Gamma1))
d33 = sum((1-event1-event2)*(der_ST_Gamma1/ST))
d3 = d31+d32+d33
d41 = sum(event1*(-Theta*der_S2_Gamma2*p2/(p2-1)+Theta*der_ST_Gamma2))
d42 = sum(event2*(der_h2_Gamma2/h2+der_S2_Gamma2/S2-Theta*der_S2_Gamma2+Theta*der_ST_Gamma2))
d43 = sum((1-event1-event2)*(der_ST_Gamma2/ST))
d4 = d41+d42+d43
D111 = sum(event1*((der_h1_Alpha1_Alpha1*h1-der_h1_Alpha1^2)/h1^2+(der_S1_Alpha1_Alpha1*S1-der_S1_Alpha1^2)/S1^2-Theta*der_S1_Alpha1_Alpha1+Theta*der_ST_Alpha1_Alpha1))
D112 = sum(event2*(((p1-1)*(-Theta*der_S1_Alpha1_Alpha1*p1+Theta^2*der_S1_Alpha1^2*p1)-Theta^2*der_S1_Alpha1^2*p1^2)/(p1-1)^2+Theta*der_ST_Alpha1_Alpha1))
D113 = sum((1-event1-event2)*((ST*der_ST_Alpha1_Alpha1-der_ST_Alpha1^2)/ST^2))
D11 = D111+D112+D113
D221 = sum(event1*(((p2-1)*(-Theta*der_S2_Alpha2_Alpha2*p2+Theta^2*der_S2_Alpha2^2*p2)-Theta^2*der_S2_Alpha2^2*p2^2)/(p2-1)^2+Theta*der_ST_Alpha2_Alpha2))
D222 = sum(event2*((der_h2_Alpha2_Alpha2*h2-der_h2_Alpha2^2)/h2^2+(der_S2_Alpha2_Alpha2*S2-der_S2_Alpha2^2)/S2^2-Theta*der_S2_Alpha2_Alpha2+Theta*der_ST_Alpha2_Alpha2))
D223 = sum((1-event1-event2)*((ST*der_ST_Alpha2_Alpha2-der_ST_Alpha2^2)/ST^2))
D22 = D221+D222+D223
D121 = sum(event1*(Theta*der_ST_Alpha1_Alpha2))
D122 = sum(event2*(Theta*der_ST_Alpha1_Alpha2))
D123 = sum((1-event1-event2)*((ST*der_ST_Alpha1_Alpha2-der_ST_Alpha1*der_ST_Alpha2)/ST^2))
D12 = D121+D122+D123
D331 = sum(event1*((der_h1_Gamma1_Gamma1*h1-der_h1_Gamma1^2)/h1^2+(der_S1_Gamma1_Gamma1*S1-der_S1_Gamma1^2)/S1^2-Theta*der_S1_Gamma1_Gamma1+Theta*der_ST_Gamma1_Gamma1))
D332 = sum(event2*(((p1-1)*(-Theta*der_S1_Gamma1_Gamma1*p1+Theta^2*der_S1_Gamma1^2*p1)-Theta^2*der_S1_Gamma1^2*p1^2)/(p1-1)^2+Theta*der_ST_Gamma1_Gamma1))
D333 = sum((1-event1-event2)*((ST*der_ST_Gamma1_Gamma1-der_ST_Gamma1^2)/ST^2))
D33 = D331+D332+D333
D441 = sum(event1*(((p2-1)*(-Theta*der_S2_Gamma2_Gamma2*p2+Theta^2*der_S2_Gamma2^2*p2)-Theta^2*der_S2_Gamma2^2*p2^2)/(p2-1)^2+Theta*der_ST_Gamma2_Gamma2))
D442 = sum(event2*((der_h2_Gamma2_Gamma2*h2-der_h2_Gamma2^2)/h2^2+(der_S2_Gamma2_Gamma2*S2-der_S2_Gamma2^2)/S2^2-Theta*der_S2_Gamma2_Gamma2+Theta*der_ST_Gamma2_Gamma2))
D443 = sum((1-event1-event2)*((ST*der_ST_Gamma2_Gamma2-der_ST_Gamma2^2)/ST^2))
D44 = D441+D442+D443
D341 = sum(event1*(Theta*der_ST_Gamma1_Gamma2))
D342 = sum(event2*(Theta*der_ST_Gamma1_Gamma2))
D343 = sum((1-event1-event2)*((ST*der_ST_Gamma1_Gamma2-der_ST_Gamma1*der_ST_Gamma2)/ST^2))
D34 = D341+D342+D343
D131 = sum(event1*((der_h1_Alpha1_Gamma1*h1-der_h1_Alpha1*der_h1_Gamma1)/h1^2 +(der_S1_Alpha1_Gamma1*S1-der_S1_Alpha1*der_S1_Gamma1)/S1^2-Theta*der_S1_Alpha1_Gamma1+Theta*der_ST_Alpha1_Gamma1))
D132 = sum(event2*(((p1-1)*(-Theta*der_S1_Alpha1_Gamma1*p1)-Theta^2*der_S1_Alpha1*der_S1_Gamma1*p1)/(p1-1)^2+Theta*der_ST_Alpha1_Gamma1))
D133 = sum((1-event1-event2)*((ST*der_ST_Alpha1_Gamma1-der_ST_Alpha1*der_ST_Gamma1)/ST^2))
D13 = D131+D132+D133
D141 = sum(event1*(Theta*der_ST_Alpha1_Gamma2))
D142 = sum(event2*(Theta*der_ST_Alpha1_Gamma2))
D143 = sum((1-event1-event2)*((ST*der_ST_Alpha1_Gamma2-der_ST_Alpha1*der_ST_Gamma2)/ST^2))
D14 = D141+D142+D143
D231 = sum(event1*(Theta*der_ST_Alpha2_Gamma1))
D232 = sum(event2*(Theta*der_ST_Alpha2_Gamma1))
D233 = sum((1-event1-event2)*((ST*der_ST_Alpha2_Gamma1-der_ST_Alpha2*der_ST_Gamma1)/ST^2))
D23 = D231+D232+D233
D241 = sum(event1*(((p2-1)*(-Theta*der_S2_Alpha2_Gamma2*p2+Theta^2*der_S2_Alpha2*der_S2_Gamma2*p2)-Theta^2*der_S2_Alpha2*der_S2_Gamma2*p2^2)/(p2-1)^2+Theta*der_ST_Alpha2_Gamma2))
D242 = sum(event2*((der_h2_Alpha2_Gamma2*h2-der_h2_Alpha2*der_h2_Gamma2)/h2^2+(der_S2_Alpha2_Gamma2*S2-der_S2_Alpha2*der_S2_Gamma2)/S2^2-Theta*der_S2_Alpha2_Gamma2+Theta*der_ST_Alpha2_Gamma2))
D243 = sum((1-event1-event2)*((ST*der_ST_Alpha2_Gamma2-der_ST_Alpha2*der_ST_Gamma2)/ST^2))
D24 = D241+D242+D243
DD11 = exp(2*par[1])*D11+exp(par[1])*d1
DD12 = exp(par[1])*exp(par[2])*D12
DD13 = exp(par[1])*exp(par[3])*D13
DD14 = exp(par[1])*exp(par[4])*D14
DD22 = exp(2*par[2])*D22+exp(par[2])*d2
DD23 = exp(par[2])*exp(par[3])*D23
DD24 = exp(par[2])*exp(par[4])*D24
DD33 = exp(2*par[3])*D33+exp(par[3])*d3
DD34 = exp(par[3])*exp(par[4])*D34
DD44 = exp(2*par[4])*D44+exp(par[4])*d4
matrix(c(DD11,DD12,DD13,DD14,DD12,DD22,DD23,DD24,DD13,DD23,DD33,DD34,DD14,DD24,DD34,DD44),4,4)
}
H_function = function(par){
Alpha1 = par[1]
Alpha2 = par[2]
Gamma1 = par[3]
Gamma2 = par[4]
h1 = Alpha1*Gamma1/(1+Alpha1*t.event)
h2 = Alpha2*Gamma2/(1+Alpha2*t.event)
S1 = (1+Alpha1*t.event)^(-Gamma1)
S2 = (1+Alpha2*t.event)^(-Gamma2)
p0 = exp(-Theta)
p1 = exp(-Theta*S1)
p2 = exp(-Theta*S2)
ST = -(1/Theta)*log(1+(exp(-Theta*S1)-1)*(exp(-Theta*S2)-1)/(exp(-Theta)-1))
der_h1_Alpha1 = Gamma1/(1+Alpha1*t.event)^2
der_h2_Alpha2 = Gamma2/(1+Alpha2*t.event)^2
der_S1_Alpha1 = -Gamma1*t.event*(1+Alpha1*t.event)^(-Gamma1-1)
der_S2_Alpha2 = -Gamma2*t.event*(1+Alpha2*t.event)^(-Gamma2-1)
der_h1_Gamma1 = Alpha1/(1+Alpha1*t.event)
der_h2_Gamma2 = Alpha2/(1+Alpha2*t.event)
der_S1_Gamma1 = -(1+Alpha1*t.event)^(-Gamma1)*log(1+Alpha1*t.event)
der_S2_Gamma2 = -(1+Alpha2*t.event)^(-Gamma2)*log(1+Alpha2*t.event)
der_ST_Alpha1 = der_S1_Alpha1*p1*(p2-1)/(p0-1+(p1-1)*(p2-1))
der_ST_Alpha2 = der_S2_Alpha2*p2*(p1-1)/(p0-1+(p1-1)*(p2-1))
der_ST_Gamma1 = der_S1_Gamma1*p1*(p2-1)/(p0-1+(p1-1)*(p2-1))
der_ST_Gamma2 = der_S2_Gamma2*p2*(p1-1)/(p0-1+(p1-1)*(p2-1))
der_h1_Alpha1_Alpha1 = -2*Gamma1*t.event/(1+Alpha1*t.event)^3
der_h2_Alpha2_Alpha2 = -2*Gamma2*t.event/(1+Alpha2*t.event)^3
der_S1_Alpha1_Alpha1 = Gamma1*(Gamma1+1)*t.event^2*(1+Alpha1*t.event)^(-Gamma1-2)
der_S2_Alpha2_Alpha2 = Gamma2*(Gamma2+1)*t.event^2*(1+Alpha2*t.event)^(-Gamma2-2)
der_h1_Gamma1_Gamma1 = 0
der_h2_Gamma2_Gamma2 = 0
der_S1_Gamma1_Gamma1 = (1+Alpha1*t.event)^(-Gamma1)*(log(1+Alpha1*t.event))^2
der_S2_Gamma2_Gamma2 = (1+Alpha2*t.event)^(-Gamma2)*(log(1+Alpha2*t.event))^2
der_h1_Alpha1_Gamma1 = (1+Alpha1*t.event)^(-2)
der_h2_Alpha2_Gamma2 = (1+Alpha2*t.event)^(-2)
der_S1_Alpha1_Gamma1 = t.event*(Gamma1*log(1+Alpha1*t.event)-1)/(1+Alpha1*t.event)^(Gamma1+1)
der_S2_Alpha2_Gamma2 = t.event*(Gamma2*log(1+Alpha2*t.event)-1)/(1+Alpha2*t.event)^(Gamma2+1)
der_ST_Alpha1_Alpha1 = ((p0-1+(p1-1)*(p2-1))*(p2-1)*(der_S1_Alpha1_Alpha1*p1-Theta*der_S1_Alpha1^2*p1)-der_S1_Alpha1*p1*(p2-1)*(-Theta*der_S1_Alpha1*p1*(p2-1)))/(p0-1+(p1-1)*(p2-1))^2
der_ST_Alpha2_Alpha2 = ((p0-1+(p1-1)*(p2-1))*(p1-1)*(der_S2_Alpha2_Alpha2*p2-Theta*der_S2_Alpha2^2*p2)-der_S2_Alpha2*p2*(p1-1)*(-Theta*der_S2_Alpha2*p2*(p1-1)))/(p0-1+(p1-1)*(p2-1))^2
der_ST_Alpha1_Alpha2 = ((p0-1+(p1-1)*(p2-1))*p1*p2*-Theta*der_S1_Alpha1*der_S2_Alpha2+Theta*p1*p2*(p1-1)*(p2-1)*der_S1_Alpha1*der_S2_Alpha2)/(p0-1+(p1-1)*(p2-1))^2
der_ST_Gamma1_Gamma1 = ((p0-1+(p1-1)*(p2-1))*(p2-1)*(der_S1_Gamma1_Gamma1*p1-Theta*der_S1_Gamma1^2*p1)-der_S1_Gamma1*p1*(p2-1)*(-Theta*der_S1_Gamma1*p1*(p2-1)))/(p0-1+(p1-1)*(p2-1))^2
der_ST_Gamma2_Gamma2 = ((p0-1+(p1-1)*(p2-1))*(p1-1)*(der_S2_Gamma2_Gamma2*p2-Theta*der_S2_Gamma2^2*p2)-der_S2_Gamma2*p2*(p1-1)*(-Theta*der_S2_Gamma2*p2*(p1-1)))/(p0-1+(p1-1)*(p2-1))^2
der_ST_Gamma1_Gamma2 = ((p0-1+(p1-1)*(p2-1))*p1*p2*-Theta*der_S1_Gamma1*der_S2_Gamma2+Theta*p1*p2*(p1-1)*(p2-1)*der_S1_Gamma1*der_S2_Gamma2)/(p0-1+(p1-1)*(p2-1))^2
der_ST_Alpha1_Gamma1 = ((p0-1+(p1-1)*(p2-1))*(p2-1)*(der_S1_Alpha1_Gamma1*p1-Theta*der_S1_Alpha1*der_S1_Gamma1*p1)+Theta*der_S1_Alpha1*der_S1_Gamma1*p1^2*(p2-1)^2)/(p0-1+(p1-1)*(p2-1))^2
der_ST_Alpha1_Gamma2 = ((p0-1+(p1-1)*(p2-1))*p1*der_S1_Alpha1*-Theta*der_S2_Gamma2*p2+Theta*der_S1_Alpha1*der_S2_Gamma2*p1*p2*(p1-1)*(p2-1))/(p0-1+(p1-1)*(p2-1))^2
der_ST_Alpha2_Gamma1 = ((p0-1+(p1-1)*(p2-1))*p1*p2*-Theta*der_S1_Gamma1*der_S2_Alpha2+Theta*p1*p2*(p1-1)*(p2-1)*der_S1_Gamma1*der_S2_Alpha2)/(p0-1+(p1-1)*(p2-1))^2
der_ST_Alpha2_Gamma2 = ((p0-1+(p1-1)*(p2-1))*(p1-1)*(der_S2_Alpha2_Gamma2*p2-Theta*der_S2_Alpha2*der_S2_Gamma2*p2)+Theta*der_S2_Alpha2*der_S2_Gamma2*p2^2*(p1-1)^2)/(p0-1+(p1-1)*(p2-1))^2
d11 = sum(event1*(der_h1_Alpha1/h1+der_S1_Alpha1/S1-Theta*der_S1_Alpha1+Theta*der_ST_Alpha1))
d12 = sum(event2*(-Theta*der_S1_Alpha1*p1/(p1-1)+Theta*der_ST_Alpha1))
d13 = sum((1-event1-event2)*(der_ST_Alpha1/ST))
d1 = d11+d12+d13
d21 = sum(event1*(-Theta*der_S2_Alpha2*p2/(p2-1)+Theta*der_ST_Alpha2))
d22 = sum(event2*(der_h2_Alpha2/h2+der_S2_Alpha2/S2-Theta*der_S2_Alpha2+Theta*der_ST_Alpha2))
d23 = sum((1-event1-event2)*(der_ST_Alpha2/ST))
d2 = d21+d22+d23
d31 = sum(event1*(der_h1_Gamma1/h1+der_S1_Gamma1/S1-Theta*der_S1_Gamma1+Theta*der_ST_Gamma1))
d32 = sum(event2*(-Theta*der_S1_Gamma1*p1/(p1-1)+Theta*der_ST_Gamma1))
d33 = sum((1-event1-event2)*(der_ST_Gamma1/ST))
d3 = d31+d32+d33
d41 = sum(event1*(-Theta*der_S2_Gamma2*p2/(p2-1)+Theta*der_ST_Gamma2))
d42 = sum(event2*(der_h2_Gamma2/h2+der_S2_Gamma2/S2-Theta*der_S2_Gamma2+Theta*der_ST_Gamma2))
d43 = sum((1-event1-event2)*(der_ST_Gamma2/ST))
d4 = d41+d42+d43
D111 = sum(event1*((der_h1_Alpha1_Alpha1*h1-der_h1_Alpha1^2)/h1^2+(der_S1_Alpha1_Alpha1*S1-der_S1_Alpha1^2)/S1^2-Theta*der_S1_Alpha1_Alpha1+Theta*der_ST_Alpha1_Alpha1))
D112 = sum(event2*(((p1-1)*(-Theta*der_S1_Alpha1_Alpha1*p1+Theta^2*der_S1_Alpha1^2*p1)-Theta^2*der_S1_Alpha1^2*p1^2)/(p1-1)^2+Theta*der_ST_Alpha1_Alpha1))
D113 = sum((1-event1-event2)*((ST*der_ST_Alpha1_Alpha1-der_ST_Alpha1^2)/ST^2))
D11 = D111+D112+D113
D221 = sum(event1*(((p2-1)*(-Theta*der_S2_Alpha2_Alpha2*p2+Theta^2*der_S2_Alpha2^2*p2)-Theta^2*der_S2_Alpha2^2*p2^2)/(p2-1)^2+Theta*der_ST_Alpha2_Alpha2))
D222 = sum(event2*((der_h2_Alpha2_Alpha2*h2-der_h2_Alpha2^2)/h2^2+(der_S2_Alpha2_Alpha2*S2-der_S2_Alpha2^2)/S2^2-Theta*der_S2_Alpha2_Alpha2+Theta*der_ST_Alpha2_Alpha2))
D223 = sum((1-event1-event2)*((ST*der_ST_Alpha2_Alpha2-der_ST_Alpha2^2)/ST^2))
D22 = D221+D222+D223
D121 = sum(event1*(Theta*der_ST_Alpha1_Alpha2))
D122 = sum(event2*(Theta*der_ST_Alpha1_Alpha2))
D123 = sum((1-event1-event2)*((ST*der_ST_Alpha1_Alpha2-der_ST_Alpha1*der_ST_Alpha2)/ST^2))
D12 = D121+D122+D123
D331 = sum(event1*((der_h1_Gamma1_Gamma1*h1-der_h1_Gamma1^2)/h1^2+(der_S1_Gamma1_Gamma1*S1-der_S1_Gamma1^2)/S1^2-Theta*der_S1_Gamma1_Gamma1+Theta*der_ST_Gamma1_Gamma1))
D332 = sum(event2*(((p1-1)*(-Theta*der_S1_Gamma1_Gamma1*p1+Theta^2*der_S1_Gamma1^2*p1)-Theta^2*der_S1_Gamma1^2*p1^2)/(p1-1)^2+Theta*der_ST_Gamma1_Gamma1))
D333 = sum((1-event1-event2)*((ST*der_ST_Gamma1_Gamma1-der_ST_Gamma1^2)/ST^2))
D33 = D331+D332+D333
D441 = sum(event1*(((p2-1)*(-Theta*der_S2_Gamma2_Gamma2*p2+Theta^2*der_S2_Gamma2^2*p2)-Theta^2*der_S2_Gamma2^2*p2^2)/(p2-1)^2+Theta*der_ST_Gamma2_Gamma2))
D442 = sum(event2*((der_h2_Gamma2_Gamma2*h2-der_h2_Gamma2^2)/h2^2+(der_S2_Gamma2_Gamma2*S2-der_S2_Gamma2^2)/S2^2-Theta*der_S2_Gamma2_Gamma2+Theta*der_ST_Gamma2_Gamma2))
D443 = sum((1-event1-event2)*((ST*der_ST_Gamma2_Gamma2-der_ST_Gamma2^2)/ST^2))
D44 = D441+D442+D443
D341 = sum(event1*(Theta*der_ST_Gamma1_Gamma2))
D342 = sum(event2*(Theta*der_ST_Gamma1_Gamma2))
D343 = sum((1-event1-event2)*((ST*der_ST_Gamma1_Gamma2-der_ST_Gamma1*der_ST_Gamma2)/ST^2))
D34 = D341+D342+D343
D131 = sum(event1*((der_h1_Alpha1_Gamma1*h1-der_h1_Alpha1*der_h1_Gamma1)/h1^2 +(der_S1_Alpha1_Gamma1*S1-der_S1_Alpha1*der_S1_Gamma1)/S1^2-Theta*der_S1_Alpha1_Gamma1+Theta*der_ST_Alpha1_Gamma1))
D132 = sum(event2*(((p1-1)*(-Theta*der_S1_Alpha1_Gamma1*p1)-Theta^2*der_S1_Alpha1*der_S1_Gamma1*p1)/(p1-1)^2+Theta*der_ST_Alpha1_Gamma1))
D133 = sum((1-event1-event2)*((ST*der_ST_Alpha1_Gamma1-der_ST_Alpha1*der_ST_Gamma1)/ST^2))
D13 = D131+D132+D133
D141 = sum(event1*(Theta*der_ST_Alpha1_Gamma2))
D142 = sum(event2*(Theta*der_ST_Alpha1_Gamma2))
D143 = sum((1-event1-event2)*((ST*der_ST_Alpha1_Gamma2-der_ST_Alpha1*der_ST_Gamma2)/ST^2))
D14 = D141+D142+D143
D231 = sum(event1*(Theta*der_ST_Alpha2_Gamma1))
D232 = sum(event2*(Theta*der_ST_Alpha2_Gamma1))
D233 = sum((1-event1-event2)*((ST*der_ST_Alpha2_Gamma1-der_ST_Alpha2*der_ST_Gamma1)/ST^2))
D23 = D231+D232+D233
D241 = sum(event1*(((p2-1)*(-Theta*der_S2_Alpha2_Gamma2*p2+Theta^2*der_S2_Alpha2*der_S2_Gamma2*p2)-Theta^2*der_S2_Alpha2*der_S2_Gamma2*p2^2)/(p2-1)^2+Theta*der_ST_Alpha2_Gamma2))
D242 = sum(event2*((der_h2_Alpha2_Gamma2*h2-der_h2_Alpha2*der_h2_Gamma2)/h2^2+(der_S2_Alpha2_Gamma2*S2-der_S2_Alpha2*der_S2_Gamma2)/S2^2-Theta*der_S2_Alpha2_Gamma2+Theta*der_ST_Alpha2_Gamma2))
D243 = sum((1-event1-event2)*((ST*der_ST_Alpha2_Gamma2-der_ST_Alpha2*der_ST_Gamma2)/ST^2))
D24 = D241+D242+D243
matrix(c(D11,D12,D13,D14,D12,D22,D23,D24,D13,D23,D33,D34,D14,D24,D34,D44),4,4)
}
par_old = c(log(Alpha1.0),log(Alpha2.0),log(Gamma1.0),log(Gamma2.0))
count = 0
random = 0
repeat{
temp = try(solve(HL_function(par_old),silent = TRUE))
if (is(temp,"try-error")){
random = random+1
count = 0
par_old = c(log(Alpha1.0*exp(runif(1,-r.1,r.1))),
log(Alpha2.0*exp(runif(1,-r.2,r.2))),
log(Gamma1.0*exp(runif(1,-r.3,r.3))),
log(Gamma2.0*exp(runif(1,-r.4,r.4))))
next
}
par_new = par_old-solve(HL_function(par_old))%*%SL_function(par_old)
count = count+1
if (is.na(sum(par_new)) |
max(abs(par_new)) > log(d)) {
random = random+1
count = 0
par_old = c(log(Alpha1.0*exp(runif(1,-r.1,r.1))),
log(Alpha2.0*exp(runif(1,-r.2,r.2))),
log(Gamma1.0*exp(runif(1,-r.3,r.3))),
log(Gamma2.0*exp(runif(1,-r.4,r.4))))
next
}
if (max(abs(exp(par_old)-exp(par_new))) < epsilon) {break}
par_old = par_new
}
Alpha1_hat = exp(par_new[1])
Alpha2_hat = exp(par_new[2])
Gamma1_hat = exp(par_new[3])
Gamma2_hat = exp(par_new[4])
Info = solve(-H_function(exp(par_new)))
Alpha1_se = sqrt(Info[1,1])
Alpha2_se = sqrt(Info[2,2])
Gamma1_se = sqrt(Info[3,3])
Gamma2_se = sqrt(Info[4,4])
InfoL = solve(-HL_function(par_new))
CI_Alpha1 = c(Alpha1_hat*exp(-qnorm(0.975)*sqrt(InfoL[1,1])),
Alpha1_hat*exp(+qnorm(0.975)*sqrt(InfoL[1,1])))
CI_Alpha2 = c(Alpha2_hat*exp(-qnorm(0.975)*sqrt(InfoL[2,2])),
Alpha2_hat*exp(+qnorm(0.975)*sqrt(InfoL[2,2])))
CI_Gamma1 = c(Gamma1_hat*exp(-qnorm(0.975)*sqrt(InfoL[3,3])),
Gamma1_hat*exp(+qnorm(0.975)*sqrt(InfoL[3,3])))
CI_Gamma2 = c(Gamma2_hat*exp(-qnorm(0.975)*sqrt(InfoL[4,4])),
Gamma2_hat*exp(+qnorm(0.975)*sqrt(InfoL[4,4])))
MedX_hat = (2^(1/Gamma1_hat)-1)/Alpha1_hat
MedY_hat = (2^(1/Gamma2_hat)-1)/Alpha2_hat
transX = c((1-2^(1/Gamma1_hat))/Alpha1_hat^2,0,-2^(1/Gamma1_hat)*log(2)/(Alpha1_hat*Gamma1_hat^2),0)
transY = c(0,(1-2^(1/Gamma2_hat))/Alpha2_hat^2,0,-2^(1/Gamma2_hat)*log(2)/(Alpha2_hat*Gamma2_hat^2))
MedX_se = sqrt(t(transX)%*%Info%*%transX)
MedY_se = sqrt(t(transY)%*%Info%*%transY)
temp_transX = c(-1,0,-2^(1/Gamma1_hat)*log(2)/((2^(1/Gamma1_hat)-1)*Gamma1_hat),0)
temp_transY = c(0,-1,0,-2^(1/Gamma2_hat)*log(2)/((2^(1/Gamma2_hat)-1)*Gamma2_hat))
temp_MedX_se = sqrt(t(temp_transX)%*%InfoL%*%temp_transX)
temp_MedY_se = sqrt(t(temp_transY)%*%InfoL%*%temp_transY)
CI_MedX = c(MedX_hat*exp(-qnorm(0.975)*temp_MedX_se),
MedX_hat*exp(+qnorm(0.975)*temp_MedX_se))
CI_MedY = c(MedY_hat*exp(-qnorm(0.975)*temp_MedY_se),
MedY_hat*exp(+qnorm(0.975)*temp_MedY_se))
Alpha1.res = c(Estimate = Alpha1_hat,SE = Alpha1_se,CI.lower = CI_Alpha1[1],CI.upper = CI_Alpha1[2])
Alpha2.res = c(Estimate = Alpha2_hat,SE = Alpha2_se,CI.lower = CI_Alpha2[1],CI.upper = CI_Alpha2[2])
Gamma1.res = c(Estimate = Gamma1_hat,SE = Gamma1_se,CI.lower = CI_Gamma1[1],CI.upper = CI_Gamma1[2])
Gamma2.res = c(Estimate = Gamma2_hat,SE = Gamma2_se,CI.lower = CI_Gamma2[1],CI.upper = CI_Gamma2[2])
MedX.res = c(Estimate = MedX_hat,SE = MedX_se,CI.lower = CI_MedX[1],CI.upper = CI_MedX[2])
MedY.res = c(Estimate = MedY_hat,SE = MedY_se,CI.lower = CI_MedY[1],CI.upper = CI_MedY[2])
if (Gamma1_hat < 1 & Gamma2_hat < 1) {
return(list(n = n,Iteration = count,Randomization = random,
Alpha1 = Alpha1.res,Alpha2 = Alpha2.res,Gamma1 = Gamma1.res,Gamma2 = Gamma2.res,
MedX = MedX.res,MedY = MedY.res,MeanX = "Unavaliable",MeanY = "Unavaliable",
logL = log_L(par_new),AIC = 2*length(par_new)-2*log_L(par_new),
BIC = length(par_new)*log(length(t.event))-2*log_L(par_new)))
} else if (Gamma1_hat >= 1 & Gamma2_hat >= 1) {
MeanX_hat = 1/(Alpha1_hat*(Gamma1_hat-1))
MeanY_hat = 1/(Alpha2_hat*(Gamma2_hat-1))
trans2X = c(-1/(Alpha1_hat^2*(Gamma1_hat-1)),0,-1/(Alpha1_hat*(Gamma1_hat-1)^2),0)
trans2Y = c(0,-1/(Alpha2_hat^2*(Gamma2_hat-1)),0,-1/(Alpha2_hat*(Gamma2_hat-1)^2))
MeanX_se = sqrt(t(trans2X)%*%Info%*%trans2X)
MeanY_se = sqrt(t(trans2Y)%*%Info%*%trans2Y)
temp_trans2X = c(-1,0,-Gamma1_hat/(Gamma1_hat-1),0)
temp_trans2Y = c(0,-1,0,-Gamma2_hat/(Gamma2_hat-1))
temp_MeanX_se = sqrt(t(temp_trans2X)%*%InfoL%*%temp_trans2X)
temp_MeanY_se = sqrt(t(temp_trans2Y)%*%InfoL%*%temp_trans2Y)
CI_MeanX = c(MeanX_hat*exp(-qnorm(0.975)*temp_MeanX_se),
MeanX_hat*exp(+qnorm(0.975)*temp_MeanX_se))
CI_MeanY = c(MeanY_hat*exp(-qnorm(0.975)*temp_MeanY_se),
MeanY_hat*exp(+qnorm(0.975)*temp_MeanY_se))
MeanX.res = c(Estimate = MeanX_hat,SE = MeanX_se,CI.lower = CI_MeanX[1],CI.upper = CI_MeanX[2])
MeanY.res = c(Estimate = MeanY_hat,SE = MeanY_se,CI.lower = CI_MeanY[1],CI.upper = CI_MeanY[2])
return(list(n = n,Iteration = count,Randomization = random,
Alpha1 = Alpha1.res,Alpha2 = Alpha2.res,Gamma1 = Gamma1.res,Gamma2 = Gamma2.res,
MedX = MedX.res,MedY = MedY.res,MeanX = MeanX.res,MeanY = MeanY.res,
logL = log_L(par_new),AIC = 2*length(par_new)-2*log_L(par_new),
BIC = length(par_new)*log(length(t.event))-2*log_L(par_new)))
} else if (Gamma1_hat >= 1 & Gamma2_hat < 1) {
MeanX_hat = 1/(Alpha1_hat*(Gamma1_hat-1))
trans2X = c(-1/(Alpha1_hat^2*(Gamma1_hat-1)),0,-1/(Alpha1_hat*(Gamma1_hat-1)^2),0)
MeanX_se = sqrt(t(trans2X)%*%Info%*%trans2X)
temp_trans2X = c(-1,0,-Gamma1_hat/(Gamma1_hat-1),0)
temp_MeanX_se = sqrt(t(temp_trans2X)%*%InfoL%*%temp_trans2X)
CI_MeanX = c(MeanX_hat*exp(-qnorm(0.975)*temp_MeanX_se),
MeanX_hat*exp(+qnorm(0.975)*temp_MeanX_se))
MeanX.res = c(Estimate = MeanX_hat,SE = MeanX_se,CI.lower = CI_MeanX[1],CI.upper = CI_MeanX[2])
return(list(n = n,Iteration = count,Randomization = random,
Alpha1 = Alpha1.res,Alpha2 = Alpha2.res,Gamma1 = Gamma1.res,Gamma2 = Gamma2.res,
MedX = MedX.res,MedY = MedY.res,MeanX = MeanX.res,MeanY = "Unavaliable",
logL = log_L(par_new),AIC = 2*length(par_new)-2*log_L(par_new),
BIC = length(par_new)*log(length(t.event))-2*log_L(par_new)))
} else {
MeanY_hat = 1/(Alpha2_hat*(Gamma2_hat-1))
trans2Y = c(0,-1/(Alpha2_hat^2*(Gamma2_hat-1)),0,-1/(Alpha2_hat*(Gamma2_hat-1)^2))
MeanY_se = sqrt(t(trans2Y)%*%Info%*%trans2Y)
temp_trans2Y = c(0,-1,0,-Gamma2_hat/(Gamma2_hat-1))
temp_MeanY_se = sqrt(t(temp_trans2Y)%*%InfoL%*%temp_trans2Y)
CI_MeanY = c(MeanY_hat*exp(-qnorm(0.975)*temp_MeanY_se),
MeanY_hat*exp(+qnorm(0.975)*temp_MeanY_se))
MeanY.res = c(Estimate = MeanY_hat,SE = MeanY_se,CI.lower = CI_MeanY[1],CI.upper = CI_MeanY[2])
return(list(n = n,Iteration = count,Randomization = random,
Alpha1 = Alpha1.res,Alpha2 = Alpha2.res,Gamma1 = Gamma1.res,Gamma2 = Gamma2.res,
MedX = MedX.res,MedY = MedY.res,MeanX = "Unavaliable",MeanY = MeanY.res,
logL = log_L(par_new),AIC = 2*length(par_new)-2*log_L(par_new),
BIC = length(par_new)*log(length(t.event))-2*log_L(par_new)))
}
}
| /R/MLE.Frank.Pareto.R | no_license | cran/Bivariate.Pareto | R | false | false | 36,920 | r | #' Maximum likelihood estimation for bivariate dependent competing risks data under the Frank copula with the Pareto margins and fixed \eqn{\theta}
#'
#' @param t.event Vector of the observed failure times.
#' @param event1 Vector of the indicators for the failure cause 1.
#' @param event2 Vector of the indicators for the failure cause 2.
#' @param Theta Copula parameter \eqn{\theta}.
#' @param Alpha1.0 Initial guess for the scale parameter \eqn{\alpha_{1}} with default value 1.
#' @param Alpha2.0 Initial guess for the scale parameter \eqn{\alpha_{2}} with default value 1.
#' @param Gamma1.0 Initial guess for the shape parameter \eqn{\gamma_{1}} with default value 1.
#' @param Gamma2.0 Initial guess for the shape parameter \eqn{\gamma_{2}} with default value 1.
#' @param epsilon Positive tunning parameter in the NR algorithm with default value \eqn{10^{-5}}.
#' @param d Positive tunning parameter in the NR algorithm with default value \eqn{e^{10}}.
#' @param r.1 Positive tunning parameter in the NR algorithm with default value 1.
#' @param r.2 Positive tunning parameter in the NR algorithm with default value 1.
#' @param r.3 Positive tunning parameter in the NR algorithm with default value 1.
#' @param r.4 Positive tunning parameter in the NR algorithm with default value 1.
#' @description Maximum likelihood estimation for bivariate dependent competing risks data under the Frank copula with the Pareto margins and fixed \eqn{\theta}.
#'
#' @return \item{n}{Sample size.}
#' \item{count}{Iteration number.}
#' \item{random}{Randomization number.}
#' \item{Alpha1}{Positive scale parameter for the Pareto margin (failure cause 1).}
#' \item{Alpha2}{Positive scale parameter for the Pareto margin (failure cause 2).}
#' \item{Gamma1}{Positive shape parameter for the Pareto margin (failure cause 1).}
#' \item{Gamma2}{Positive shape parameter for the Pareto margin (failure cause 2).}
#' \item{MedX}{Median lifetime due to failure cause 1.}
#' \item{MedY}{Median lifetime due to failure cause 2.}
#' \item{MeanX}{Mean lifetime due to failure cause 1.}
#' \item{MeanY}{Mean lifetime due to failure cause 2.}
#' \item{logL}{Log-likelihood value under the fitted model.}
#' \item{AIC}{AIC value under the fitted model.}
#' \item{BIC}{BIC value under the fitted model.}
#'
#' @references Shih J-H, Lee W, Sun L-H, Emura T (2018), Fitting competing risks data to bivariate Pareto models, Communications in Statistics - Theory and Methods, doi: 10.1080/03610926.2018.1425450.
#' @importFrom stats qnorm runif
#' @importFrom utils globalVariables
#' @importFrom methods is
#' @export
#'
#' @examples
#' t.event = c(72,40,20,65,24,46,62,61,60,60,59,59,49,20, 3,58,29,26,52,20,
#' 51,51,31,42,38,69,39,33, 8,13,33, 9,21,66, 5,27, 2,20,19,60,
#' 32,53,53,43,21,74,72,14,33, 8,10,51, 7,33, 3,43,37, 5, 6, 2,
#' 5,64, 1,21,16,21,12,75,74,54,73,36,59, 6,58,16,19,39,26,60,
#' 43, 7, 9,67,62,17,25, 0, 5,34,59,31,58,30,57, 5,55,55,52, 0,
#' 51,17,70,74,74,20, 2, 8,27,23, 1,52,51, 6, 0,26,65,26, 6, 6,
#' 68,33,67,23, 6,11, 6,57,57,29, 9,53,51, 8, 0,21,27,22,12,68,
#' 21,68, 0, 2,14,18, 5,60,40,51,50,46,65, 9,21,27,54,52,75,30,
#' 70,14, 0,42,12,40, 2,12,53,11,18,13,45, 8,28,67,67,24,64,26,
#' 57,32,42,20,71,54,64,51, 1, 2, 0,54,69,68,67,66,64,63,35,62,
#' 7,35,24,57, 1, 4,74, 0,51,36,16,32,68,17,66,65,19,41,28, 0,
#' 46,63,60,59,46,63, 8,74,18,33,12, 1,66,28,30,57,50,39,40,24,
#' 6,30,58,68,24,33,65, 2,64,19,15,10,12,53,51, 1,40,40,66, 2,
#' 21,35,29,54,37,10,29,71,12,13,27,66,28,31,12, 9,21,19,51,71,
#' 76,46,47,75,75,49,75,75,31,69,74,25,72,28,36, 8,71,60,14,22,
#' 67,62,68,68,27,68,68,67,67, 3,49,12,30,67, 5,65,24,66,36,66,
#' 40,13,40, 0,14,45,64,13,24,15,26, 5,63,35,61,61,50,57,21,26,
#' 11,59,42,27,50,57,57, 0, 1,54,53,23, 8,51,27,52,52,52,45,48,
#' 18, 2, 2,35,75,75, 9,39, 0,26,17,43,53,47,11,65,16,21,64, 7,
#' 38,55, 5,28,38,20,24,27,31, 9, 9,11,56,36,56,15,51,33,70,32,
#' 5,23,63,30,53,12,58,54,36,20,74,34,70,25,65, 4,10,58,37,56,
#' 6, 0,70,70,28,40,67,36,23,23,62,62,62, 2,34, 4,12,56, 1, 7,
#' 4,70,65, 7,30,40,13,22, 0,18,64,13,26, 1,16,33,22,30,53,53,
#' 7,61,40, 9,59, 7,12,46,50, 0,52,19,52,51,51,14,27,51, 5, 0,
#' 41,53,19)
#'
#' event1 = c(0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
#' 0,0,1,0,0,0,1,0,1,1,0,1,1,1,1,0,0,1,1,0,
#' 1,0,0,1,1,0,0,1,0,0,0,1,0,1,0,0,1,0,1,1,
#' 1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
#' 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
#' 0,0,0,0,0,0,1,1,0,0,0,0,0,1,1,0,0,1,0,0,
#' 0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,1,0,
#' 0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
#' 0,0,0,0,0,0,1,1,0,1,0,0,0,0,1,0,0,0,0,0,
#' 1,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
#' 0,0,0,0,0,0,0,1,0,0,1,1,0,1,0,0,1,1,0,0,
#' 1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
#' 0,0,1,0,1,0,0,0,0,1,1,1,1,0,0,0,1,1,0,0,
#' 1,1,1,1,0,0,1,0,1,1,1,1,1,1,1,0,1,1,0,1,
#' 0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,
#' 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
#' 0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,
#' 0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
#' 1,0,0,0,0,0,0,1,0,0,0,0,1,0,1,0,1,0,0,1,
#' 1,1,0,1,1,1,1,1,1,1,1,0,1,1,0,0,0,0,0,0,
#' 0,0,0,1,0,0,0,0,1,0,0,1,0,1,0,1,1,0,1,0,
#' 1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,
#' 1,0,0,1,0,0,0,1,0,1,0,0,1,0,0,0,1,1,0,1,
#' 1,1,1,0,0,0,1,0,0,0,0,0,0,0,0,1,1,0,0,0,
#' 0,0,1)
#'
#' event2 = c(0,1,1,0,0,1,0,0,0,0,0,0,0,1,1,0,1,1,0,1,
#' 0,0,0,1,1,0,0,1,0,0,1,0,0,0,0,1,1,0,0,0,
#' 0,0,0,0,0,0,0,0,1,1,1,0,1,0,1,1,0,1,0,0,
#' 0,0,1,0,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1,1,
#' 1,1,1,0,1,1,1,1,1,1,0,1,0,1,0,1,0,0,0,1,
#' 0,1,1,0,0,1,0,0,1,1,1,0,0,0,0,1,1,0,1,1,
#' 0,1,0,0,1,1,0,0,0,1,1,0,0,1,1,1,0,1,0,0,
#' 1,0,1,0,0,1,0,0,1,0,1,1,0,1,1,1,0,0,0,1,
#' 0,1,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,1,0,1,
#' 0,0,1,1,0,1,0,1,1,1,0,1,0,0,0,0,0,0,1,0,
#' 1,1,1,0,1,1,1,0,1,1,0,0,0,0,0,0,0,0,1,1,
#' 0,0,0,0,1,0,1,0,1,1,1,1,0,1,1,1,0,1,1,1,
#' 1,1,0,0,0,1,0,1,0,0,0,0,0,0,0,1,0,0,0,1,
#' 0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,
#' 0,0,1,0,0,1,0,0,1,0,0,1,0,1,1,0,0,1,1,1,
#' 1,1,0,0,1,0,0,0,0,1,1,1,1,0,1,1,1,0,1,0,
#' 1,1,1,1,1,1,0,1,1,1,1,0,0,1,0,0,1,1,1,0,
#' 1,0,0,1,1,0,0,1,1,0,0,1,1,1,1,0,0,0,1,1,
#' 0,1,1,1,0,0,1,0,1,1,1,1,0,1,0,0,0,1,0,0,
#' 0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,1,0,1,0,1,
#' 1,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
#' 0,1,0,0,1,1,0,1,1,1,0,0,0,1,0,1,0,0,1,1,
#' 0,0,0,0,1,1,1,0,1,0,1,1,0,1,1,1,0,0,1,0,
#' 0,0,0,1,0,1,0,1,0,1,0,1,0,0,0,0,0,0,1,1,
#' 1,0,0)
#'
#' library(Bivariate.Pareto)
#' set.seed(10)
#' MLE.Frank.Pareto(t.event,event1,event2,Theta = -5)
MLE.Frank.Pareto = function(t.event,event1,event2,Theta,Alpha1.0 = 1,Alpha2.0 = 1,
Gamma1.0 = 1,Gamma2.0 = 1,epsilon = 1e-5,d = exp(10),
r.1 = 6,r.2 = 6,r.3 = 6,r.4 = 6) {
### checking inputs ###
n = length(t.event)
if (length(t.event[t.event < 0]) != 0) {stop("t.event must be non-negative")}
if (length(event1) != n) {stop("the length of event1 is different from t.event")}
if (length(event2) != n) {stop("the length of event2 is different from t.event")}
if (length(event1[event1 == 0 | event1 == 1]) != n) {stop("elements in event1 must be either 0 or 1")}
if (length(event2[event2 == 0 | event2 == 1]) != n) {stop("elements in event2 must be either 0 or 1")}
temp.event = event1+event2
if (length(temp.event[temp.event == 2]) != 0) {stop("event1 and event2 cannot be 1 simultaneously")}
if (Theta == 0) {stop("Theta cannot be zero")}
if (Alpha1.0 <= 0) {stop("Alpha1.0 must be positive")}
if (Alpha2.0 <= 0) {stop("Alpha2.0 must be positive")}
if (Gamma1.0 <= 0) {stop("Alpha1.0 must be positive")}
if (Gamma2.0 <= 0) {stop("Alpha2.0 must be positive")}
if (epsilon <= 0) {stop("epsilon must be positive")}
if (d <= 0) {stop("d must be positive")}
if (r.1 <= 0) {stop("r.1 must be positive")}
if (r.2 <= 0) {stop("r.2 must be positive")}
if (r.3 <= 0) {stop("r.3 must be positive")}
if (r.4 <= 0) {stop("r.3 must be positive")}
### functions ###
log_L = function(par){
Alpha1 = exp(par[1])
Alpha2 = exp(par[2])
Gamma1 = exp(par[3])
Gamma2 = exp(par[4])
h1 = Alpha1*Gamma1/(1+Alpha1*t.event)
h2 = Alpha2*Gamma2/(1+Alpha2*t.event)
S1 = (1+Alpha1*t.event)^(-Gamma1)
S2 = (1+Alpha2*t.event)^(-Gamma2)
ST = -(1/Theta)*log(1+(exp(-Theta*S1)-1)*(exp(-Theta*S2)-1)/(exp(-Theta)-1))
f1 = h1*S1*exp(-Theta*S1)*(exp(-Theta*S2)-1)/((exp(-Theta)-1)*exp(-Theta*ST))
f2 = h2*S2*exp(-Theta*S2)*(exp(-Theta*S1)-1)/((exp(-Theta)-1)*exp(-Theta*ST))
sum((1-event1-event2)*log(ST))+sum(event1*log(f1))+sum(event2*log(f2))
}
SL_function = function(par){
Alpha1 = exp(par[1])
Alpha2 = exp(par[2])
Gamma1 = exp(par[3])
Gamma2 = exp(par[4])
h1 = Alpha1*Gamma1/(1+Alpha1*t.event)
h2 = Alpha2*Gamma2/(1+Alpha2*t.event)
S1 = (1+Alpha1*t.event)^(-Gamma1)
S2 = (1+Alpha2*t.event)^(-Gamma2)
p0 = exp(-Theta)
p1 = exp(-Theta*S1)
p2 = exp(-Theta*S2)
ST = -(1/Theta)*log(1+(exp(-Theta*S1)-1)*(exp(-Theta*S2)-1)/(exp(-Theta)-1))
der_h1_Alpha1 = Gamma1/(1+Alpha1*t.event)^2
der_h2_Alpha2 = Gamma2/(1+Alpha2*t.event)^2
der_S1_Alpha1 = -Gamma1*t.event*(1+Alpha1*t.event)^(-Gamma1-1)
der_S2_Alpha2 = -Gamma2*t.event*(1+Alpha2*t.event)^(-Gamma2-1)
der_h1_Gamma1 = Alpha1/(1+Alpha1*t.event)
der_h2_Gamma2 = Alpha2/(1+Alpha2*t.event)
der_S1_Gamma1 = -(1+Alpha1*t.event)^(-Gamma1)*log(1+Alpha1*t.event)
der_S2_Gamma2 = -(1+Alpha2*t.event)^(-Gamma2)*log(1+Alpha2*t.event)
der_ST_Alpha1 = der_S1_Alpha1*p1*(p2-1)/(p0-1+(p1-1)*(p2-1))
der_ST_Alpha2 = der_S2_Alpha2*p2*(p1-1)/(p0-1+(p1-1)*(p2-1))
der_ST_Gamma1 = der_S1_Gamma1*p1*(p2-1)/(p0-1+(p1-1)*(p2-1))
der_ST_Gamma2 = der_S2_Gamma2*p2*(p1-1)/(p0-1+(p1-1)*(p2-1))
d11 = sum(event1*(der_h1_Alpha1/h1+der_S1_Alpha1/S1-Theta*der_S1_Alpha1+Theta*der_ST_Alpha1))
d12 = sum(event2*(-Theta*der_S1_Alpha1*p1/(p1-1)+Theta*der_ST_Alpha1))
d13 = sum((1-event1-event2)*(der_ST_Alpha1/ST))
d1 = d11+d12+d13
d21 = sum(event1*(-Theta*der_S2_Alpha2*p2/(p2-1)+Theta*der_ST_Alpha2))
d22 = sum(event2*(der_h2_Alpha2/h2+der_S2_Alpha2/S2-Theta*der_S2_Alpha2+Theta*der_ST_Alpha2))
d23 = sum((1-event1-event2)*(der_ST_Alpha2/ST))
d2 = d21+d22+d23
d31 = sum(event1*(der_h1_Gamma1/h1+der_S1_Gamma1/S1-Theta*der_S1_Gamma1+Theta*der_ST_Gamma1))
d32 = sum(event2*(-Theta*der_S1_Gamma1*p1/(p1-1)+Theta*der_ST_Gamma1))
d33 = sum((1-event1-event2)*(der_ST_Gamma1/ST))
d3 = d31+d32+d33
d41 = sum(event1*(-Theta*der_S2_Gamma2*p2/(p2-1)+Theta*der_ST_Gamma2))
d42 = sum(event2*(der_h2_Gamma2/h2+der_S2_Gamma2/S2-Theta*der_S2_Gamma2+Theta*der_ST_Gamma2))
d43 = sum((1-event1-event2)*(der_ST_Gamma2/ST))
d4 = d41+d42+d43
c(exp(par[1])*d1,exp(par[2])*d2,exp(par[3])*d3,exp(par[4])*d4)
}
HL_function = function(par){
Alpha1 = exp(par[1])
Alpha2 = exp(par[2])
Gamma1 = exp(par[3])
Gamma2 = exp(par[4])
h1 = Alpha1*Gamma1/(1+Alpha1*t.event)
h2 = Alpha2*Gamma2/(1+Alpha2*t.event)
S1 = (1+Alpha1*t.event)^(-Gamma1)
S2 = (1+Alpha2*t.event)^(-Gamma2)
p0 = exp(-Theta)
p1 = exp(-Theta*S1)
p2 = exp(-Theta*S2)
ST = -(1/Theta)*log(1+(exp(-Theta*S1)-1)*(exp(-Theta*S2)-1)/(exp(-Theta)-1))
der_h1_Alpha1 = Gamma1/(1+Alpha1*t.event)^2
der_h2_Alpha2 = Gamma2/(1+Alpha2*t.event)^2
der_S1_Alpha1 = -Gamma1*t.event*(1+Alpha1*t.event)^(-Gamma1-1)
der_S2_Alpha2 = -Gamma2*t.event*(1+Alpha2*t.event)^(-Gamma2-1)
der_h1_Gamma1 = Alpha1/(1+Alpha1*t.event)
der_h2_Gamma2 = Alpha2/(1+Alpha2*t.event)
der_S1_Gamma1 = -(1+Alpha1*t.event)^(-Gamma1)*log(1+Alpha1*t.event)
der_S2_Gamma2 = -(1+Alpha2*t.event)^(-Gamma2)*log(1+Alpha2*t.event)
der_ST_Alpha1 = der_S1_Alpha1*p1*(p2-1)/(p0-1+(p1-1)*(p2-1))
der_ST_Alpha2 = der_S2_Alpha2*p2*(p1-1)/(p0-1+(p1-1)*(p2-1))
der_ST_Gamma1 = der_S1_Gamma1*p1*(p2-1)/(p0-1+(p1-1)*(p2-1))
der_ST_Gamma2 = der_S2_Gamma2*p2*(p1-1)/(p0-1+(p1-1)*(p2-1))
der_h1_Alpha1_Alpha1 = -2*Gamma1*t.event/(1+Alpha1*t.event)^3
der_h2_Alpha2_Alpha2 = -2*Gamma2*t.event/(1+Alpha2*t.event)^3
der_S1_Alpha1_Alpha1 = Gamma1*(Gamma1+1)*t.event^2*(1+Alpha1*t.event)^(-Gamma1-2)
der_S2_Alpha2_Alpha2 = Gamma2*(Gamma2+1)*t.event^2*(1+Alpha2*t.event)^(-Gamma2-2)
der_h1_Gamma1_Gamma1 = 0
der_h2_Gamma2_Gamma2 = 0
der_S1_Gamma1_Gamma1 = (1+Alpha1*t.event)^(-Gamma1)*(log(1+Alpha1*t.event))^2
der_S2_Gamma2_Gamma2 = (1+Alpha2*t.event)^(-Gamma2)*(log(1+Alpha2*t.event))^2
der_h1_Alpha1_Gamma1 = (1+Alpha1*t.event)^(-2)
der_h2_Alpha2_Gamma2 = (1+Alpha2*t.event)^(-2)
der_S1_Alpha1_Gamma1 = t.event*(Gamma1*log(1+Alpha1*t.event)-1)/(1+Alpha1*t.event)^(Gamma1+1)
der_S2_Alpha2_Gamma2 = t.event*(Gamma2*log(1+Alpha2*t.event)-1)/(1+Alpha2*t.event)^(Gamma2+1)
der_ST_Alpha1_Alpha1 = ((p0-1+(p1-1)*(p2-1))*(p2-1)*(der_S1_Alpha1_Alpha1*p1-Theta*der_S1_Alpha1^2*p1)-der_S1_Alpha1*p1*(p2-1)*(-Theta*der_S1_Alpha1*p1*(p2-1)))/(p0-1+(p1-1)*(p2-1))^2
der_ST_Alpha2_Alpha2 = ((p0-1+(p1-1)*(p2-1))*(p1-1)*(der_S2_Alpha2_Alpha2*p2-Theta*der_S2_Alpha2^2*p2)-der_S2_Alpha2*p2*(p1-1)*(-Theta*der_S2_Alpha2*p2*(p1-1)))/(p0-1+(p1-1)*(p2-1))^2
der_ST_Alpha1_Alpha2 = ((p0-1+(p1-1)*(p2-1))*p1*p2*-Theta*der_S1_Alpha1*der_S2_Alpha2+Theta*p1*p2*(p1-1)*(p2-1)*der_S1_Alpha1*der_S2_Alpha2)/(p0-1+(p1-1)*(p2-1))^2
der_ST_Gamma1_Gamma1 = ((p0-1+(p1-1)*(p2-1))*(p2-1)*(der_S1_Gamma1_Gamma1*p1-Theta*der_S1_Gamma1^2*p1)-der_S1_Gamma1*p1*(p2-1)*(-Theta*der_S1_Gamma1*p1*(p2-1)))/(p0-1+(p1-1)*(p2-1))^2
der_ST_Gamma2_Gamma2 = ((p0-1+(p1-1)*(p2-1))*(p1-1)*(der_S2_Gamma2_Gamma2*p2-Theta*der_S2_Gamma2^2*p2)-der_S2_Gamma2*p2*(p1-1)*(-Theta*der_S2_Gamma2*p2*(p1-1)))/(p0-1+(p1-1)*(p2-1))^2
der_ST_Gamma1_Gamma2 = ((p0-1+(p1-1)*(p2-1))*p1*p2*-Theta*der_S1_Gamma1*der_S2_Gamma2+Theta*p1*p2*(p1-1)*(p2-1)*der_S1_Gamma1*der_S2_Gamma2)/(p0-1+(p1-1)*(p2-1))^2
der_ST_Alpha1_Gamma1 = ((p0-1+(p1-1)*(p2-1))*(p2-1)*(der_S1_Alpha1_Gamma1*p1-Theta*der_S1_Alpha1*der_S1_Gamma1*p1)+Theta*der_S1_Alpha1*der_S1_Gamma1*p1^2*(p2-1)^2)/(p0-1+(p1-1)*(p2-1))^2
der_ST_Alpha1_Gamma2 = ((p0-1+(p1-1)*(p2-1))*p1*der_S1_Alpha1*-Theta*der_S2_Gamma2*p2+Theta*der_S1_Alpha1*der_S2_Gamma2*p1*p2*(p1-1)*(p2-1))/(p0-1+(p1-1)*(p2-1))^2
der_ST_Alpha2_Gamma1 = ((p0-1+(p1-1)*(p2-1))*p1*p2*-Theta*der_S1_Gamma1*der_S2_Alpha2+Theta*p1*p2*(p1-1)*(p2-1)*der_S1_Gamma1*der_S2_Alpha2)/(p0-1+(p1-1)*(p2-1))^2
der_ST_Alpha2_Gamma2 = ((p0-1+(p1-1)*(p2-1))*(p1-1)*(der_S2_Alpha2_Gamma2*p2-Theta*der_S2_Alpha2*der_S2_Gamma2*p2)+Theta*der_S2_Alpha2*der_S2_Gamma2*p2^2*(p1-1)^2)/(p0-1+(p1-1)*(p2-1))^2
d11 = sum(event1*(der_h1_Alpha1/h1+der_S1_Alpha1/S1-Theta*der_S1_Alpha1+Theta*der_ST_Alpha1))
d12 = sum(event2*(-Theta*der_S1_Alpha1*p1/(p1-1)+Theta*der_ST_Alpha1))
d13 = sum((1-event1-event2)*(der_ST_Alpha1/ST))
d1 = d11+d12+d13
d21 = sum(event1*(-Theta*der_S2_Alpha2*p2/(p2-1)+Theta*der_ST_Alpha2))
d22 = sum(event2*(der_h2_Alpha2/h2+der_S2_Alpha2/S2-Theta*der_S2_Alpha2+Theta*der_ST_Alpha2))
d23 = sum((1-event1-event2)*(der_ST_Alpha2/ST))
d2 = d21+d22+d23
d31 = sum(event1*(der_h1_Gamma1/h1+der_S1_Gamma1/S1-Theta*der_S1_Gamma1+Theta*der_ST_Gamma1))
d32 = sum(event2*(-Theta*der_S1_Gamma1*p1/(p1-1)+Theta*der_ST_Gamma1))
d33 = sum((1-event1-event2)*(der_ST_Gamma1/ST))
d3 = d31+d32+d33
d41 = sum(event1*(-Theta*der_S2_Gamma2*p2/(p2-1)+Theta*der_ST_Gamma2))
d42 = sum(event2*(der_h2_Gamma2/h2+der_S2_Gamma2/S2-Theta*der_S2_Gamma2+Theta*der_ST_Gamma2))
d43 = sum((1-event1-event2)*(der_ST_Gamma2/ST))
d4 = d41+d42+d43
D111 = sum(event1*((der_h1_Alpha1_Alpha1*h1-der_h1_Alpha1^2)/h1^2+(der_S1_Alpha1_Alpha1*S1-der_S1_Alpha1^2)/S1^2-Theta*der_S1_Alpha1_Alpha1+Theta*der_ST_Alpha1_Alpha1))
D112 = sum(event2*(((p1-1)*(-Theta*der_S1_Alpha1_Alpha1*p1+Theta^2*der_S1_Alpha1^2*p1)-Theta^2*der_S1_Alpha1^2*p1^2)/(p1-1)^2+Theta*der_ST_Alpha1_Alpha1))
D113 = sum((1-event1-event2)*((ST*der_ST_Alpha1_Alpha1-der_ST_Alpha1^2)/ST^2))
D11 = D111+D112+D113
D221 = sum(event1*(((p2-1)*(-Theta*der_S2_Alpha2_Alpha2*p2+Theta^2*der_S2_Alpha2^2*p2)-Theta^2*der_S2_Alpha2^2*p2^2)/(p2-1)^2+Theta*der_ST_Alpha2_Alpha2))
D222 = sum(event2*((der_h2_Alpha2_Alpha2*h2-der_h2_Alpha2^2)/h2^2+(der_S2_Alpha2_Alpha2*S2-der_S2_Alpha2^2)/S2^2-Theta*der_S2_Alpha2_Alpha2+Theta*der_ST_Alpha2_Alpha2))
D223 = sum((1-event1-event2)*((ST*der_ST_Alpha2_Alpha2-der_ST_Alpha2^2)/ST^2))
D22 = D221+D222+D223
D121 = sum(event1*(Theta*der_ST_Alpha1_Alpha2))
D122 = sum(event2*(Theta*der_ST_Alpha1_Alpha2))
D123 = sum((1-event1-event2)*((ST*der_ST_Alpha1_Alpha2-der_ST_Alpha1*der_ST_Alpha2)/ST^2))
D12 = D121+D122+D123
D331 = sum(event1*((der_h1_Gamma1_Gamma1*h1-der_h1_Gamma1^2)/h1^2+(der_S1_Gamma1_Gamma1*S1-der_S1_Gamma1^2)/S1^2-Theta*der_S1_Gamma1_Gamma1+Theta*der_ST_Gamma1_Gamma1))
D332 = sum(event2*(((p1-1)*(-Theta*der_S1_Gamma1_Gamma1*p1+Theta^2*der_S1_Gamma1^2*p1)-Theta^2*der_S1_Gamma1^2*p1^2)/(p1-1)^2+Theta*der_ST_Gamma1_Gamma1))
D333 = sum((1-event1-event2)*((ST*der_ST_Gamma1_Gamma1-der_ST_Gamma1^2)/ST^2))
D33 = D331+D332+D333
D441 = sum(event1*(((p2-1)*(-Theta*der_S2_Gamma2_Gamma2*p2+Theta^2*der_S2_Gamma2^2*p2)-Theta^2*der_S2_Gamma2^2*p2^2)/(p2-1)^2+Theta*der_ST_Gamma2_Gamma2))
D442 = sum(event2*((der_h2_Gamma2_Gamma2*h2-der_h2_Gamma2^2)/h2^2+(der_S2_Gamma2_Gamma2*S2-der_S2_Gamma2^2)/S2^2-Theta*der_S2_Gamma2_Gamma2+Theta*der_ST_Gamma2_Gamma2))
D443 = sum((1-event1-event2)*((ST*der_ST_Gamma2_Gamma2-der_ST_Gamma2^2)/ST^2))
D44 = D441+D442+D443
D341 = sum(event1*(Theta*der_ST_Gamma1_Gamma2))
D342 = sum(event2*(Theta*der_ST_Gamma1_Gamma2))
D343 = sum((1-event1-event2)*((ST*der_ST_Gamma1_Gamma2-der_ST_Gamma1*der_ST_Gamma2)/ST^2))
D34 = D341+D342+D343
D131 = sum(event1*((der_h1_Alpha1_Gamma1*h1-der_h1_Alpha1*der_h1_Gamma1)/h1^2 +(der_S1_Alpha1_Gamma1*S1-der_S1_Alpha1*der_S1_Gamma1)/S1^2-Theta*der_S1_Alpha1_Gamma1+Theta*der_ST_Alpha1_Gamma1))
D132 = sum(event2*(((p1-1)*(-Theta*der_S1_Alpha1_Gamma1*p1)-Theta^2*der_S1_Alpha1*der_S1_Gamma1*p1)/(p1-1)^2+Theta*der_ST_Alpha1_Gamma1))
D133 = sum((1-event1-event2)*((ST*der_ST_Alpha1_Gamma1-der_ST_Alpha1*der_ST_Gamma1)/ST^2))
D13 = D131+D132+D133
D141 = sum(event1*(Theta*der_ST_Alpha1_Gamma2))
D142 = sum(event2*(Theta*der_ST_Alpha1_Gamma2))
D143 = sum((1-event1-event2)*((ST*der_ST_Alpha1_Gamma2-der_ST_Alpha1*der_ST_Gamma2)/ST^2))
D14 = D141+D142+D143
D231 = sum(event1*(Theta*der_ST_Alpha2_Gamma1))
D232 = sum(event2*(Theta*der_ST_Alpha2_Gamma1))
D233 = sum((1-event1-event2)*((ST*der_ST_Alpha2_Gamma1-der_ST_Alpha2*der_ST_Gamma1)/ST^2))
D23 = D231+D232+D233
D241 = sum(event1*(((p2-1)*(-Theta*der_S2_Alpha2_Gamma2*p2+Theta^2*der_S2_Alpha2*der_S2_Gamma2*p2)-Theta^2*der_S2_Alpha2*der_S2_Gamma2*p2^2)/(p2-1)^2+Theta*der_ST_Alpha2_Gamma2))
D242 = sum(event2*((der_h2_Alpha2_Gamma2*h2-der_h2_Alpha2*der_h2_Gamma2)/h2^2+(der_S2_Alpha2_Gamma2*S2-der_S2_Alpha2*der_S2_Gamma2)/S2^2-Theta*der_S2_Alpha2_Gamma2+Theta*der_ST_Alpha2_Gamma2))
D243 = sum((1-event1-event2)*((ST*der_ST_Alpha2_Gamma2-der_ST_Alpha2*der_ST_Gamma2)/ST^2))
D24 = D241+D242+D243
DD11 = exp(2*par[1])*D11+exp(par[1])*d1
DD12 = exp(par[1])*exp(par[2])*D12
DD13 = exp(par[1])*exp(par[3])*D13
DD14 = exp(par[1])*exp(par[4])*D14
DD22 = exp(2*par[2])*D22+exp(par[2])*d2
DD23 = exp(par[2])*exp(par[3])*D23
DD24 = exp(par[2])*exp(par[4])*D24
DD33 = exp(2*par[3])*D33+exp(par[3])*d3
DD34 = exp(par[3])*exp(par[4])*D34
DD44 = exp(2*par[4])*D44+exp(par[4])*d4
matrix(c(DD11,DD12,DD13,DD14,DD12,DD22,DD23,DD24,DD13,DD23,DD33,DD34,DD14,DD24,DD34,DD44),4,4)
}
H_function = function(par){
Alpha1 = par[1]
Alpha2 = par[2]
Gamma1 = par[3]
Gamma2 = par[4]
h1 = Alpha1*Gamma1/(1+Alpha1*t.event)
h2 = Alpha2*Gamma2/(1+Alpha2*t.event)
S1 = (1+Alpha1*t.event)^(-Gamma1)
S2 = (1+Alpha2*t.event)^(-Gamma2)
p0 = exp(-Theta)
p1 = exp(-Theta*S1)
p2 = exp(-Theta*S2)
ST = -(1/Theta)*log(1+(exp(-Theta*S1)-1)*(exp(-Theta*S2)-1)/(exp(-Theta)-1))
der_h1_Alpha1 = Gamma1/(1+Alpha1*t.event)^2
der_h2_Alpha2 = Gamma2/(1+Alpha2*t.event)^2
der_S1_Alpha1 = -Gamma1*t.event*(1+Alpha1*t.event)^(-Gamma1-1)
der_S2_Alpha2 = -Gamma2*t.event*(1+Alpha2*t.event)^(-Gamma2-1)
der_h1_Gamma1 = Alpha1/(1+Alpha1*t.event)
der_h2_Gamma2 = Alpha2/(1+Alpha2*t.event)
der_S1_Gamma1 = -(1+Alpha1*t.event)^(-Gamma1)*log(1+Alpha1*t.event)
der_S2_Gamma2 = -(1+Alpha2*t.event)^(-Gamma2)*log(1+Alpha2*t.event)
der_ST_Alpha1 = der_S1_Alpha1*p1*(p2-1)/(p0-1+(p1-1)*(p2-1))
der_ST_Alpha2 = der_S2_Alpha2*p2*(p1-1)/(p0-1+(p1-1)*(p2-1))
der_ST_Gamma1 = der_S1_Gamma1*p1*(p2-1)/(p0-1+(p1-1)*(p2-1))
der_ST_Gamma2 = der_S2_Gamma2*p2*(p1-1)/(p0-1+(p1-1)*(p2-1))
der_h1_Alpha1_Alpha1 = -2*Gamma1*t.event/(1+Alpha1*t.event)^3
der_h2_Alpha2_Alpha2 = -2*Gamma2*t.event/(1+Alpha2*t.event)^3
der_S1_Alpha1_Alpha1 = Gamma1*(Gamma1+1)*t.event^2*(1+Alpha1*t.event)^(-Gamma1-2)
der_S2_Alpha2_Alpha2 = Gamma2*(Gamma2+1)*t.event^2*(1+Alpha2*t.event)^(-Gamma2-2)
der_h1_Gamma1_Gamma1 = 0
der_h2_Gamma2_Gamma2 = 0
der_S1_Gamma1_Gamma1 = (1+Alpha1*t.event)^(-Gamma1)*(log(1+Alpha1*t.event))^2
der_S2_Gamma2_Gamma2 = (1+Alpha2*t.event)^(-Gamma2)*(log(1+Alpha2*t.event))^2
der_h1_Alpha1_Gamma1 = (1+Alpha1*t.event)^(-2)
der_h2_Alpha2_Gamma2 = (1+Alpha2*t.event)^(-2)
der_S1_Alpha1_Gamma1 = t.event*(Gamma1*log(1+Alpha1*t.event)-1)/(1+Alpha1*t.event)^(Gamma1+1)
der_S2_Alpha2_Gamma2 = t.event*(Gamma2*log(1+Alpha2*t.event)-1)/(1+Alpha2*t.event)^(Gamma2+1)
der_ST_Alpha1_Alpha1 = ((p0-1+(p1-1)*(p2-1))*(p2-1)*(der_S1_Alpha1_Alpha1*p1-Theta*der_S1_Alpha1^2*p1)-der_S1_Alpha1*p1*(p2-1)*(-Theta*der_S1_Alpha1*p1*(p2-1)))/(p0-1+(p1-1)*(p2-1))^2
der_ST_Alpha2_Alpha2 = ((p0-1+(p1-1)*(p2-1))*(p1-1)*(der_S2_Alpha2_Alpha2*p2-Theta*der_S2_Alpha2^2*p2)-der_S2_Alpha2*p2*(p1-1)*(-Theta*der_S2_Alpha2*p2*(p1-1)))/(p0-1+(p1-1)*(p2-1))^2
der_ST_Alpha1_Alpha2 = ((p0-1+(p1-1)*(p2-1))*p1*p2*-Theta*der_S1_Alpha1*der_S2_Alpha2+Theta*p1*p2*(p1-1)*(p2-1)*der_S1_Alpha1*der_S2_Alpha2)/(p0-1+(p1-1)*(p2-1))^2
der_ST_Gamma1_Gamma1 = ((p0-1+(p1-1)*(p2-1))*(p2-1)*(der_S1_Gamma1_Gamma1*p1-Theta*der_S1_Gamma1^2*p1)-der_S1_Gamma1*p1*(p2-1)*(-Theta*der_S1_Gamma1*p1*(p2-1)))/(p0-1+(p1-1)*(p2-1))^2
der_ST_Gamma2_Gamma2 = ((p0-1+(p1-1)*(p2-1))*(p1-1)*(der_S2_Gamma2_Gamma2*p2-Theta*der_S2_Gamma2^2*p2)-der_S2_Gamma2*p2*(p1-1)*(-Theta*der_S2_Gamma2*p2*(p1-1)))/(p0-1+(p1-1)*(p2-1))^2
der_ST_Gamma1_Gamma2 = ((p0-1+(p1-1)*(p2-1))*p1*p2*-Theta*der_S1_Gamma1*der_S2_Gamma2+Theta*p1*p2*(p1-1)*(p2-1)*der_S1_Gamma1*der_S2_Gamma2)/(p0-1+(p1-1)*(p2-1))^2
der_ST_Alpha1_Gamma1 = ((p0-1+(p1-1)*(p2-1))*(p2-1)*(der_S1_Alpha1_Gamma1*p1-Theta*der_S1_Alpha1*der_S1_Gamma1*p1)+Theta*der_S1_Alpha1*der_S1_Gamma1*p1^2*(p2-1)^2)/(p0-1+(p1-1)*(p2-1))^2
der_ST_Alpha1_Gamma2 = ((p0-1+(p1-1)*(p2-1))*p1*der_S1_Alpha1*-Theta*der_S2_Gamma2*p2+Theta*der_S1_Alpha1*der_S2_Gamma2*p1*p2*(p1-1)*(p2-1))/(p0-1+(p1-1)*(p2-1))^2
der_ST_Alpha2_Gamma1 = ((p0-1+(p1-1)*(p2-1))*p1*p2*-Theta*der_S1_Gamma1*der_S2_Alpha2+Theta*p1*p2*(p1-1)*(p2-1)*der_S1_Gamma1*der_S2_Alpha2)/(p0-1+(p1-1)*(p2-1))^2
der_ST_Alpha2_Gamma2 = ((p0-1+(p1-1)*(p2-1))*(p1-1)*(der_S2_Alpha2_Gamma2*p2-Theta*der_S2_Alpha2*der_S2_Gamma2*p2)+Theta*der_S2_Alpha2*der_S2_Gamma2*p2^2*(p1-1)^2)/(p0-1+(p1-1)*(p2-1))^2
d11 = sum(event1*(der_h1_Alpha1/h1+der_S1_Alpha1/S1-Theta*der_S1_Alpha1+Theta*der_ST_Alpha1))
d12 = sum(event2*(-Theta*der_S1_Alpha1*p1/(p1-1)+Theta*der_ST_Alpha1))
d13 = sum((1-event1-event2)*(der_ST_Alpha1/ST))
d1 = d11+d12+d13
d21 = sum(event1*(-Theta*der_S2_Alpha2*p2/(p2-1)+Theta*der_ST_Alpha2))
d22 = sum(event2*(der_h2_Alpha2/h2+der_S2_Alpha2/S2-Theta*der_S2_Alpha2+Theta*der_ST_Alpha2))
d23 = sum((1-event1-event2)*(der_ST_Alpha2/ST))
d2 = d21+d22+d23
d31 = sum(event1*(der_h1_Gamma1/h1+der_S1_Gamma1/S1-Theta*der_S1_Gamma1+Theta*der_ST_Gamma1))
d32 = sum(event2*(-Theta*der_S1_Gamma1*p1/(p1-1)+Theta*der_ST_Gamma1))
d33 = sum((1-event1-event2)*(der_ST_Gamma1/ST))
d3 = d31+d32+d33
d41 = sum(event1*(-Theta*der_S2_Gamma2*p2/(p2-1)+Theta*der_ST_Gamma2))
d42 = sum(event2*(der_h2_Gamma2/h2+der_S2_Gamma2/S2-Theta*der_S2_Gamma2+Theta*der_ST_Gamma2))
d43 = sum((1-event1-event2)*(der_ST_Gamma2/ST))
d4 = d41+d42+d43
D111 = sum(event1*((der_h1_Alpha1_Alpha1*h1-der_h1_Alpha1^2)/h1^2+(der_S1_Alpha1_Alpha1*S1-der_S1_Alpha1^2)/S1^2-Theta*der_S1_Alpha1_Alpha1+Theta*der_ST_Alpha1_Alpha1))
D112 = sum(event2*(((p1-1)*(-Theta*der_S1_Alpha1_Alpha1*p1+Theta^2*der_S1_Alpha1^2*p1)-Theta^2*der_S1_Alpha1^2*p1^2)/(p1-1)^2+Theta*der_ST_Alpha1_Alpha1))
D113 = sum((1-event1-event2)*((ST*der_ST_Alpha1_Alpha1-der_ST_Alpha1^2)/ST^2))
D11 = D111+D112+D113
D221 = sum(event1*(((p2-1)*(-Theta*der_S2_Alpha2_Alpha2*p2+Theta^2*der_S2_Alpha2^2*p2)-Theta^2*der_S2_Alpha2^2*p2^2)/(p2-1)^2+Theta*der_ST_Alpha2_Alpha2))
D222 = sum(event2*((der_h2_Alpha2_Alpha2*h2-der_h2_Alpha2^2)/h2^2+(der_S2_Alpha2_Alpha2*S2-der_S2_Alpha2^2)/S2^2-Theta*der_S2_Alpha2_Alpha2+Theta*der_ST_Alpha2_Alpha2))
D223 = sum((1-event1-event2)*((ST*der_ST_Alpha2_Alpha2-der_ST_Alpha2^2)/ST^2))
D22 = D221+D222+D223
D121 = sum(event1*(Theta*der_ST_Alpha1_Alpha2))
D122 = sum(event2*(Theta*der_ST_Alpha1_Alpha2))
D123 = sum((1-event1-event2)*((ST*der_ST_Alpha1_Alpha2-der_ST_Alpha1*der_ST_Alpha2)/ST^2))
D12 = D121+D122+D123
D331 = sum(event1*((der_h1_Gamma1_Gamma1*h1-der_h1_Gamma1^2)/h1^2+(der_S1_Gamma1_Gamma1*S1-der_S1_Gamma1^2)/S1^2-Theta*der_S1_Gamma1_Gamma1+Theta*der_ST_Gamma1_Gamma1))
D332 = sum(event2*(((p1-1)*(-Theta*der_S1_Gamma1_Gamma1*p1+Theta^2*der_S1_Gamma1^2*p1)-Theta^2*der_S1_Gamma1^2*p1^2)/(p1-1)^2+Theta*der_ST_Gamma1_Gamma1))
D333 = sum((1-event1-event2)*((ST*der_ST_Gamma1_Gamma1-der_ST_Gamma1^2)/ST^2))
D33 = D331+D332+D333
D441 = sum(event1*(((p2-1)*(-Theta*der_S2_Gamma2_Gamma2*p2+Theta^2*der_S2_Gamma2^2*p2)-Theta^2*der_S2_Gamma2^2*p2^2)/(p2-1)^2+Theta*der_ST_Gamma2_Gamma2))
D442 = sum(event2*((der_h2_Gamma2_Gamma2*h2-der_h2_Gamma2^2)/h2^2+(der_S2_Gamma2_Gamma2*S2-der_S2_Gamma2^2)/S2^2-Theta*der_S2_Gamma2_Gamma2+Theta*der_ST_Gamma2_Gamma2))
D443 = sum((1-event1-event2)*((ST*der_ST_Gamma2_Gamma2-der_ST_Gamma2^2)/ST^2))
D44 = D441+D442+D443
D341 = sum(event1*(Theta*der_ST_Gamma1_Gamma2))
D342 = sum(event2*(Theta*der_ST_Gamma1_Gamma2))
D343 = sum((1-event1-event2)*((ST*der_ST_Gamma1_Gamma2-der_ST_Gamma1*der_ST_Gamma2)/ST^2))
D34 = D341+D342+D343
D131 = sum(event1*((der_h1_Alpha1_Gamma1*h1-der_h1_Alpha1*der_h1_Gamma1)/h1^2 +(der_S1_Alpha1_Gamma1*S1-der_S1_Alpha1*der_S1_Gamma1)/S1^2-Theta*der_S1_Alpha1_Gamma1+Theta*der_ST_Alpha1_Gamma1))
D132 = sum(event2*(((p1-1)*(-Theta*der_S1_Alpha1_Gamma1*p1)-Theta^2*der_S1_Alpha1*der_S1_Gamma1*p1)/(p1-1)^2+Theta*der_ST_Alpha1_Gamma1))
D133 = sum((1-event1-event2)*((ST*der_ST_Alpha1_Gamma1-der_ST_Alpha1*der_ST_Gamma1)/ST^2))
D13 = D131+D132+D133
D141 = sum(event1*(Theta*der_ST_Alpha1_Gamma2))
D142 = sum(event2*(Theta*der_ST_Alpha1_Gamma2))
D143 = sum((1-event1-event2)*((ST*der_ST_Alpha1_Gamma2-der_ST_Alpha1*der_ST_Gamma2)/ST^2))
D14 = D141+D142+D143
D231 = sum(event1*(Theta*der_ST_Alpha2_Gamma1))
D232 = sum(event2*(Theta*der_ST_Alpha2_Gamma1))
D233 = sum((1-event1-event2)*((ST*der_ST_Alpha2_Gamma1-der_ST_Alpha2*der_ST_Gamma1)/ST^2))
D23 = D231+D232+D233
D241 = sum(event1*(((p2-1)*(-Theta*der_S2_Alpha2_Gamma2*p2+Theta^2*der_S2_Alpha2*der_S2_Gamma2*p2)-Theta^2*der_S2_Alpha2*der_S2_Gamma2*p2^2)/(p2-1)^2+Theta*der_ST_Alpha2_Gamma2))
D242 = sum(event2*((der_h2_Alpha2_Gamma2*h2-der_h2_Alpha2*der_h2_Gamma2)/h2^2+(der_S2_Alpha2_Gamma2*S2-der_S2_Alpha2*der_S2_Gamma2)/S2^2-Theta*der_S2_Alpha2_Gamma2+Theta*der_ST_Alpha2_Gamma2))
D243 = sum((1-event1-event2)*((ST*der_ST_Alpha2_Gamma2-der_ST_Alpha2*der_ST_Gamma2)/ST^2))
D24 = D241+D242+D243
matrix(c(D11,D12,D13,D14,D12,D22,D23,D24,D13,D23,D33,D34,D14,D24,D34,D44),4,4)
}
par_old = c(log(Alpha1.0),log(Alpha2.0),log(Gamma1.0),log(Gamma2.0))
count = 0
random = 0
repeat{
temp = try(solve(HL_function(par_old),silent = TRUE))
if (is(temp,"try-error")){
random = random+1
count = 0
par_old = c(log(Alpha1.0*exp(runif(1,-r.1,r.1))),
log(Alpha2.0*exp(runif(1,-r.2,r.2))),
log(Gamma1.0*exp(runif(1,-r.3,r.3))),
log(Gamma2.0*exp(runif(1,-r.4,r.4))))
next
}
par_new = par_old-solve(HL_function(par_old))%*%SL_function(par_old)
count = count+1
if (is.na(sum(par_new)) |
max(abs(par_new)) > log(d)) {
random = random+1
count = 0
par_old = c(log(Alpha1.0*exp(runif(1,-r.1,r.1))),
log(Alpha2.0*exp(runif(1,-r.2,r.2))),
log(Gamma1.0*exp(runif(1,-r.3,r.3))),
log(Gamma2.0*exp(runif(1,-r.4,r.4))))
next
}
if (max(abs(exp(par_old)-exp(par_new))) < epsilon) {break}
par_old = par_new
}
Alpha1_hat = exp(par_new[1])
Alpha2_hat = exp(par_new[2])
Gamma1_hat = exp(par_new[3])
Gamma2_hat = exp(par_new[4])
Info = solve(-H_function(exp(par_new)))
Alpha1_se = sqrt(Info[1,1])
Alpha2_se = sqrt(Info[2,2])
Gamma1_se = sqrt(Info[3,3])
Gamma2_se = sqrt(Info[4,4])
InfoL = solve(-HL_function(par_new))
CI_Alpha1 = c(Alpha1_hat*exp(-qnorm(0.975)*sqrt(InfoL[1,1])),
Alpha1_hat*exp(+qnorm(0.975)*sqrt(InfoL[1,1])))
CI_Alpha2 = c(Alpha2_hat*exp(-qnorm(0.975)*sqrt(InfoL[2,2])),
Alpha2_hat*exp(+qnorm(0.975)*sqrt(InfoL[2,2])))
CI_Gamma1 = c(Gamma1_hat*exp(-qnorm(0.975)*sqrt(InfoL[3,3])),
Gamma1_hat*exp(+qnorm(0.975)*sqrt(InfoL[3,3])))
CI_Gamma2 = c(Gamma2_hat*exp(-qnorm(0.975)*sqrt(InfoL[4,4])),
Gamma2_hat*exp(+qnorm(0.975)*sqrt(InfoL[4,4])))
MedX_hat = (2^(1/Gamma1_hat)-1)/Alpha1_hat
MedY_hat = (2^(1/Gamma2_hat)-1)/Alpha2_hat
transX = c((1-2^(1/Gamma1_hat))/Alpha1_hat^2,0,-2^(1/Gamma1_hat)*log(2)/(Alpha1_hat*Gamma1_hat^2),0)
transY = c(0,(1-2^(1/Gamma2_hat))/Alpha2_hat^2,0,-2^(1/Gamma2_hat)*log(2)/(Alpha2_hat*Gamma2_hat^2))
MedX_se = sqrt(t(transX)%*%Info%*%transX)
MedY_se = sqrt(t(transY)%*%Info%*%transY)
temp_transX = c(-1,0,-2^(1/Gamma1_hat)*log(2)/((2^(1/Gamma1_hat)-1)*Gamma1_hat),0)
temp_transY = c(0,-1,0,-2^(1/Gamma2_hat)*log(2)/((2^(1/Gamma2_hat)-1)*Gamma2_hat))
temp_MedX_se = sqrt(t(temp_transX)%*%InfoL%*%temp_transX)
temp_MedY_se = sqrt(t(temp_transY)%*%InfoL%*%temp_transY)
CI_MedX = c(MedX_hat*exp(-qnorm(0.975)*temp_MedX_se),
MedX_hat*exp(+qnorm(0.975)*temp_MedX_se))
CI_MedY = c(MedY_hat*exp(-qnorm(0.975)*temp_MedY_se),
MedY_hat*exp(+qnorm(0.975)*temp_MedY_se))
Alpha1.res = c(Estimate = Alpha1_hat,SE = Alpha1_se,CI.lower = CI_Alpha1[1],CI.upper = CI_Alpha1[2])
Alpha2.res = c(Estimate = Alpha2_hat,SE = Alpha2_se,CI.lower = CI_Alpha2[1],CI.upper = CI_Alpha2[2])
Gamma1.res = c(Estimate = Gamma1_hat,SE = Gamma1_se,CI.lower = CI_Gamma1[1],CI.upper = CI_Gamma1[2])
Gamma2.res = c(Estimate = Gamma2_hat,SE = Gamma2_se,CI.lower = CI_Gamma2[1],CI.upper = CI_Gamma2[2])
MedX.res = c(Estimate = MedX_hat,SE = MedX_se,CI.lower = CI_MedX[1],CI.upper = CI_MedX[2])
MedY.res = c(Estimate = MedY_hat,SE = MedY_se,CI.lower = CI_MedY[1],CI.upper = CI_MedY[2])
if (Gamma1_hat < 1 & Gamma2_hat < 1) {
return(list(n = n,Iteration = count,Randomization = random,
Alpha1 = Alpha1.res,Alpha2 = Alpha2.res,Gamma1 = Gamma1.res,Gamma2 = Gamma2.res,
MedX = MedX.res,MedY = MedY.res,MeanX = "Unavaliable",MeanY = "Unavaliable",
logL = log_L(par_new),AIC = 2*length(par_new)-2*log_L(par_new),
BIC = length(par_new)*log(length(t.event))-2*log_L(par_new)))
} else if (Gamma1_hat >= 1 & Gamma2_hat >= 1) {
MeanX_hat = 1/(Alpha1_hat*(Gamma1_hat-1))
MeanY_hat = 1/(Alpha2_hat*(Gamma2_hat-1))
trans2X = c(-1/(Alpha1_hat^2*(Gamma1_hat-1)),0,-1/(Alpha1_hat*(Gamma1_hat-1)^2),0)
trans2Y = c(0,-1/(Alpha2_hat^2*(Gamma2_hat-1)),0,-1/(Alpha2_hat*(Gamma2_hat-1)^2))
MeanX_se = sqrt(t(trans2X)%*%Info%*%trans2X)
MeanY_se = sqrt(t(trans2Y)%*%Info%*%trans2Y)
temp_trans2X = c(-1,0,-Gamma1_hat/(Gamma1_hat-1),0)
temp_trans2Y = c(0,-1,0,-Gamma2_hat/(Gamma2_hat-1))
temp_MeanX_se = sqrt(t(temp_trans2X)%*%InfoL%*%temp_trans2X)
temp_MeanY_se = sqrt(t(temp_trans2Y)%*%InfoL%*%temp_trans2Y)
CI_MeanX = c(MeanX_hat*exp(-qnorm(0.975)*temp_MeanX_se),
MeanX_hat*exp(+qnorm(0.975)*temp_MeanX_se))
CI_MeanY = c(MeanY_hat*exp(-qnorm(0.975)*temp_MeanY_se),
MeanY_hat*exp(+qnorm(0.975)*temp_MeanY_se))
MeanX.res = c(Estimate = MeanX_hat,SE = MeanX_se,CI.lower = CI_MeanX[1],CI.upper = CI_MeanX[2])
MeanY.res = c(Estimate = MeanY_hat,SE = MeanY_se,CI.lower = CI_MeanY[1],CI.upper = CI_MeanY[2])
return(list(n = n,Iteration = count,Randomization = random,
Alpha1 = Alpha1.res,Alpha2 = Alpha2.res,Gamma1 = Gamma1.res,Gamma2 = Gamma2.res,
MedX = MedX.res,MedY = MedY.res,MeanX = MeanX.res,MeanY = MeanY.res,
logL = log_L(par_new),AIC = 2*length(par_new)-2*log_L(par_new),
BIC = length(par_new)*log(length(t.event))-2*log_L(par_new)))
} else if (Gamma1_hat >= 1 & Gamma2_hat < 1) {
MeanX_hat = 1/(Alpha1_hat*(Gamma1_hat-1))
trans2X = c(-1/(Alpha1_hat^2*(Gamma1_hat-1)),0,-1/(Alpha1_hat*(Gamma1_hat-1)^2),0)
MeanX_se = sqrt(t(trans2X)%*%Info%*%trans2X)
temp_trans2X = c(-1,0,-Gamma1_hat/(Gamma1_hat-1),0)
temp_MeanX_se = sqrt(t(temp_trans2X)%*%InfoL%*%temp_trans2X)
CI_MeanX = c(MeanX_hat*exp(-qnorm(0.975)*temp_MeanX_se),
MeanX_hat*exp(+qnorm(0.975)*temp_MeanX_se))
MeanX.res = c(Estimate = MeanX_hat,SE = MeanX_se,CI.lower = CI_MeanX[1],CI.upper = CI_MeanX[2])
return(list(n = n,Iteration = count,Randomization = random,
Alpha1 = Alpha1.res,Alpha2 = Alpha2.res,Gamma1 = Gamma1.res,Gamma2 = Gamma2.res,
MedX = MedX.res,MedY = MedY.res,MeanX = MeanX.res,MeanY = "Unavaliable",
logL = log_L(par_new),AIC = 2*length(par_new)-2*log_L(par_new),
BIC = length(par_new)*log(length(t.event))-2*log_L(par_new)))
} else {
MeanY_hat = 1/(Alpha2_hat*(Gamma2_hat-1))
trans2Y = c(0,-1/(Alpha2_hat^2*(Gamma2_hat-1)),0,-1/(Alpha2_hat*(Gamma2_hat-1)^2))
MeanY_se = sqrt(t(trans2Y)%*%Info%*%trans2Y)
temp_trans2Y = c(0,-1,0,-Gamma2_hat/(Gamma2_hat-1))
temp_MeanY_se = sqrt(t(temp_trans2Y)%*%InfoL%*%temp_trans2Y)
CI_MeanY = c(MeanY_hat*exp(-qnorm(0.975)*temp_MeanY_se),
MeanY_hat*exp(+qnorm(0.975)*temp_MeanY_se))
MeanY.res = c(Estimate = MeanY_hat,SE = MeanY_se,CI.lower = CI_MeanY[1],CI.upper = CI_MeanY[2])
return(list(n = n,Iteration = count,Randomization = random,
Alpha1 = Alpha1.res,Alpha2 = Alpha2.res,Gamma1 = Gamma1.res,Gamma2 = Gamma2.res,
MedX = MedX.res,MedY = MedY.res,MeanX = "Unavaliable",MeanY = MeanY.res,
logL = log_L(par_new),AIC = 2*length(par_new)-2*log_L(par_new),
BIC = length(par_new)*log(length(t.event))-2*log_L(par_new)))
}
}
|
if (!require("pacman")) install.packages("pacman", repos='https://stat.ethz.ch/CRAN/'); library(pacman)
p_load(shiny,
knitr,
markdown,
ggplot2,
grid,
DT,
dplyr,
tidyr,
knitr,
httpuv,
shinyjs,
assertthat,
ggvis)
# options(shiny.trace = FALSE)
source("../../util.R")
ui <- basicPage(
useShinyjs(),
rmarkdownOutput("../../Instructions/confidenceInterval-1.Rmd"),
sidebarLayout(position = "right",
sidebarPanel(
sliderInput("sampleCount", "How many times to sample?:", 10, 500, 100, 10),
sliderInput("obsCount", "How many observations in each sample?:", 5, 50, 10, 1),
actionButton("sampleBtn", "Draw samples")
),
mainPanel(
plotOutput("plotScatter", click = "plot_click", width = "400px", height = "150px"),
ggvisOutput("plotHist"),
ggvisOutput("plotSampleHist"),
ggvisOutput("plotNormMean")
# "*: Each of the mean is centered around the population mean (corresponding to zero in this plot) and scaled by the SE of each sample."
)
),
rmarkdownOutput("../../Instructions/confidenceInterval-2.Rmd"),
sidebarLayout(position = "right",
sidebarPanel(
sliderInput("tArea", "Area under the blue line:", 0, 0.999, 0.95, 0.05)
),
mainPanel(
ggvisOutput("plotTAreas"),
ggvisOutput("plotCINorm"),
ggvisOutput("plotCI")
)
)
)
server <- function(input, output,session) {
x <- c(3, 10, 15, 3, 4, 7, 1, 12)
y <- c(4, 10, 12, 17, 15, 20, 14, 3)
normXRange <- c(-5, 5)
normXResolution <- 0.01
# initialize reactive values with existing data
val <- reactiveValues(data = cbind (x = x, y = y),
isPlotInitialized = FALSE,
statMean = NULL,
statMedian = NULL,
statMode = NULL,
statSD = NULL,
sampleDf = NULL,
meanValDf = NULL,
sdBarDf = NULL,
seBarDf = NULL,
ciBarDf = NULL,
nonCaptureCount = NULL,
normMeanVis = NULL,
sampleMeanDf = NULL)
# observe click on the scatterplot
observeEvent(input$plot_click, {
xRand <- rnorm(20, mean = input$plot_click$x, sd = 1)
yRand <- rnorm(20, mean = input$plot_click$y, sd = 1)
data <- rbind(val$data, cbind(x = xRand, y = yRand))
data <- tail(data, 200) # cap at 200 data points
val$data <- data
})
# render scatterplot
output$plotScatter <- renderPlot({
p <- ggplot(data = NULL, aes(x=val$data[,1], y=val$data[,2])) +
geom_point() +
theme_bw() +
theme(legend.position="none") +
xlim(-1, 16) +
xlab("x") +
ylab("y")
p
})
# render histogram (and calculate statistics)
hisVis <- reactive({
histData <- data.frame(x = val$data[,1])
val$statMean <- mean(histData$x)
val$statSD <- sd(histData$x)
val$statMedian <- median(histData$x)
val$statMode <- 0 # TODO: update this line to the new mode function findModes(histData$x)$values
# pack descriptive statistics for plotting
statData <- data.frame(
value = c(val$statMean), #, val$statMedian, val$statMode),
stat = c("mean"), #, "median", rep("mode", length(val$statMode)) ),
color = c("blue") #, "green", rep("orange", length(val$statMode)))
)
statSDDf <- data.frame(
x <- c(val$statMean - val$statSD, val$statMean + val$statSD),
y <- c(1, 1)
)
meanVbarDf <- data.frame(x = val$statMean - 0.01, x2 =val$statMean + 0.01)
# plot histogram
histData %>%
ggvis(~x) %>%
add_axis("x", title = "x") %>%
scale_numeric("x", domain = c(-1,16)) %>%
set_options(width = 400, height = 200, resizable = FALSE, keep_aspect = TRUE, renderer = "canvas") %>%
hide_legend('fill') %>%
# histogram of the population
layer_histograms(width = 1, fill := "lightgray", stroke := NA) %>%
# population mean
layer_points(data = statData, x = ~value, y = 0, fillOpacity := 0.8, fill := ~color) %>%
layer_rects(data = meanVbarDf, x = ~x, x2 = ~x2, y := 0, y2 = 0, stroke := "blue") %>%
# population SD
layer_paths(data = statSDDf, x = ~x, y = 0, stroke := "blue")
})
hisVis %>% bind_shiny("plotHist")
# plot histogram of samples
sampleHistVis <- reactive({
meanValDf <- val$meanValDf
sampleMeanDf <- val$sampleMeanDf
sdOfSampleMeans <- sd(meanValDf$Mean)
sdLeft <- sampleMeanDf$SampleMean - sdOfSampleMeans
sdRight <- sampleMeanDf$SampleMean + sdOfSampleMeans
sdDf <- data.frame(x = sdLeft, x2 = sdRight)
meanVbarDf <- data.frame(x = sampleMeanDf$SampleMean - 0.01, x2 = sampleMeanDf$SampleMean + 0.01)
meanValDf %>%
ggvis(~Mean) %>%
set_options(width = 400, height = 200, resizable = FALSE, keep_aspect = TRUE, renderer = "canvas") %>%
add_axis("x", title = "Green dot: Mean of the means and its SD") %>%
add_axis("y", title = "Count of means") %>%
hide_legend('fill') %>%
scale_numeric("x", domain = c(-1, 16)) %>%
# standard deviation of the sample means
layer_rects(data = sdDf, x = ~x, x2 = ~x2, y = 0, y2 = 0, stroke := "green") %>%
# distribution of means
layer_histograms(width = 0.1, fill := "grey", fillOpacity := 0.5, stroke := NA) %>%
# mean of the sample means (sample mean)
layer_points(data = sampleMeanDf, x = ~SampleMean, y = ~y, fill := "white", stroke := "green") %>%
layer_rects(data = meanVbarDf, x = ~x, x2 = ~x2, y := 0, y2 = 0, stroke := "green")
})
# update sample navigation slider
observeEvent(input$sampleCount, {
updateSliderInput(session, "sampleWindow", max = input$sampleCount - 9)
})
# plot histogram of rescaled samples
normMeanVis <- reactive({
meanValDf <- val$meanValDf
# adjust location and scale
popMean <- val$statMean
meanValDf$normMean <- (meanValDf$Mean - popMean) / (meanValDf$SD / sqrt(input$obsCount))
# remove samples that are -Inf (all observations are the same data point)
meanValDf <- meanValDf[which(!is.infinite(meanValDf$normMean)),]
# binning data and calculate probability histogram
suppressMessages(
bins <- compute_bin(meanValDf, ~normMean)
)
bins$prob <- bins$count_ / ( bins$width_[1] * sum(bins$count_))
# generate the t location-scale distribution
dtX <- seq(normXRange[1], normXRange[2], normXResolution)
dtY <- dt(dtX, df = input$obsCount - 1)
dtDf <- data.frame(x = dtX, y = dtY)
# for plotting mean
statData <- data.frame(
x = 0,
y = 0
)
# plot (and save to reactive variable for reuse)
val$normMeanVis <- bins %>%
ggvis(x = ~x_, y = ~prob) %>%
set_options(width = 400, height = 200, resizable = FALSE, keep_aspect = TRUE, renderer = "canvas") %>%
add_axis("x", title = "Normalized means") %>%
add_axis("y", title = "Relative frequency density") %>%
scale_numeric("x", domain = normXRange, nice = FALSE, clamp = TRUE) %>%
hide_legend('fill') %>%
# distribution of means
layer_bars(width = bins$width_[1], stack = FALSE, fill := "lightgrey", stroke := NA) %>%
# t distribution
layer_paths(data = dtDf, x = ~x, y = ~y, stroke := "lightblue", strokeWidth := 3) %>%
# standard error is roughly equal to 1
layer_rects(data = statData,x = -1, x2 = 1, y = ~y, y2 = ~y, stroke := "green") %>%
# the scale is centered around population mean
layer_points(data = statData, x = ~x, y = ~y, fillOpacity := 0.8, fill := "blue") %>%
layer_rects(data = statData, x = ~x, x2 = ~x, y := 0, y2 = 0, stroke := "blue")
val$normMeanVis
})
tAreaVis <- reactive({
tDOF <- input$obsCount - 1
tX <- seq(normXRange[1], normXRange[2], normXResolution)
tY <- dt(tX, df = tDOF)
tArea <- input$tArea
lCut <- (1 - tArea) / 2
rCut <- tArea + lCut
tVals <- sort(qt(c(lCut, rCut) , df = tDOF))
selected <- ifelse(tX < tVals[1] | tX > tVals[2], FALSE, TRUE)
fill <- ifelse(selected, "blue", NA)
distDf <- data.frame(x = tX, y = tY, selected = selected, fill = fill)
selDf <- distDf[which(distDf$selected == TRUE),]
val$normMeanVis %>%
layer_ribbons(data = distDf, x = ~x, y = ~y, y2 = 0, fill := "white", fillOpacity := 0.8) %>%
layer_ribbons(data = selDf, x = ~x, y = ~y, y2 = 0, fill := "lightblue", fillOpacity := 0.6) %>%
hide_legend("fill")
})
tCINormVis <- reactive({
tDOF <- input$obsCount - 1
tArea <- input$tArea
tVals <- sort(qt(c(tArea, 1 - tArea) , df = tDOF))
ciDf <- data.frame(x = 0, ci = tVals)
ciDf %>%
ggvis() %>%
set_options(width = 400, height = 100, resizable = FALSE, keep_aspect = TRUE, renderer = "canvas",
padding = padding(10, 10, 40, 43)) %>%
add_axis("x", title = "Interval in the 'Normalized means' scale", grid = FALSE) %>%
add_axis("y", ticks = 0, grid = FALSE) %>%
scale_numeric("x", domain = normXRange, nice = FALSE, clamp = TRUE) %>%
scale_numeric("y", domain = c(-2, 2), nice = FALSE, clamp = TRUE) %>%
hide_legend('fill') %>%
layer_paths(x = ~ci, y = 0, stroke := "lightblue", strokeWidth := 2) %>%
layer_points(x = ~x, y = 0, shape := "diamond", fill := "grey")
})
tCIVis <- reactive({
tDOF <- input$obsCount - 1
tArea <- input$tArea
tVals <- sort(qt(c(tArea, 1 - tArea) , df = tDOF))
aMean <- val$meanValDf$Mean[1]
aSE <- val$meanValDf$SD[1] / sqrt(input$obsCount)
ciDf <- data.frame(x = aMean, ci = c(aMean + tVals[1] * aSE, aMean + tVals[2] * aSE)) # NOTE: since we got the two sides of the t values, we only add "+" for both lower and upper CI
ciDf %>%
ggvis() %>%
set_options(width = 400, height = 100, resizable = FALSE, keep_aspect = TRUE, renderer = "canvas",
padding = padding(10, 10, 40, 43)) %>%
add_axis("x", title = "Interval in the original scale", grid = FALSE) %>%
add_axis("y", ticks = 0, grid = FALSE) %>%
scale_numeric("x", domain = c(-1, 16), nice = FALSE, clamp = TRUE) %>%
scale_numeric("y", domain = c(-2, 2), nice = FALSE, clamp = TRUE) %>%
hide_legend('fill') %>%
layer_paths(x = ~ci, y = 0, stroke := "grey", strokeWidth := 2) %>%
layer_points(x = ~x, y = 0, shape := "diamond", fill := "grey")
})
# handle sampling
observeEvent(c(input$sampleCount, input$obsCount, input$sampleBtn), {
data <- isolate(val$data)
# draw samples
sampleRowIdxs <- matrix(sample.int(nrow(data), input$obsCount * input$sampleCount, replace = TRUE), nrow = input$sampleCount)
sampleVals <- matrix(data[sampleRowIdxs], nrow = input$sampleCount)
sampleDf <- data.frame(x = as.numeric(sampleVals), SampleId = rep(1:input$sampleCount, each = input$obsCount))
# calculate mean and SD of each sample (sample distribution)
meanVals <- apply(sampleVals, 1, mean)
sdVals <- apply(sampleVals, 1, sd)
meanValDf <- data.frame(Mean = meanVals, SD = sdVals, SampleId = 1:input$sampleCount)
# calculate the intervals for plotting
valSE <- meanValDf$SD / sqrt(input$obsCount)
sdBarDf <- makeBarDf(meanValDf, meanValDf$SD)
seBarDf <- makeBarDf(meanValDf, valSE)
# NOTE: CI is calculated in a separate reactive block below
# calculate the sample mean (mean of means)
sampleMean <- mean(meanVals)
sampleMeanDf <- data.frame(SampleMean = sampleMean, y = 0)
# update reactive values
val$sampleDf <- sampleDf
val$meanValDf <- meanValDf
val$sampleMeanDf <- sampleMeanDf
val$sdBarDf <- sdBarDf
val$seBarDf <- seBarDf
# start the vis
if (!val$isPlotInitialized)
{
sampleHistVis %>% bind_shiny("plotSampleHist")
normMeanVis %>% bind_shiny("plotNormMean")
tAreaVis %>% bind_shiny("plotTAreas")
tCINormVis %>% bind_shiny("plotCINorm")
tCIVis %>% bind_shiny("plotCI")
val$isPlotInitialized <- TRUE
}
})
}
shinyApp(ui, server) | /apps/04_confidenceInterval/app.R | no_license | chatchavan/StatisticsLecture | R | false | false | 12,343 | r | if (!require("pacman")) install.packages("pacman", repos='https://stat.ethz.ch/CRAN/'); library(pacman)
p_load(shiny,
knitr,
markdown,
ggplot2,
grid,
DT,
dplyr,
tidyr,
knitr,
httpuv,
shinyjs,
assertthat,
ggvis)
# options(shiny.trace = FALSE)
source("../../util.R")
ui <- basicPage(
useShinyjs(),
rmarkdownOutput("../../Instructions/confidenceInterval-1.Rmd"),
sidebarLayout(position = "right",
sidebarPanel(
sliderInput("sampleCount", "How many times to sample?:", 10, 500, 100, 10),
sliderInput("obsCount", "How many observations in each sample?:", 5, 50, 10, 1),
actionButton("sampleBtn", "Draw samples")
),
mainPanel(
plotOutput("plotScatter", click = "plot_click", width = "400px", height = "150px"),
ggvisOutput("plotHist"),
ggvisOutput("plotSampleHist"),
ggvisOutput("plotNormMean")
# "*: Each of the mean is centered around the population mean (corresponding to zero in this plot) and scaled by the SE of each sample."
)
),
rmarkdownOutput("../../Instructions/confidenceInterval-2.Rmd"),
sidebarLayout(position = "right",
sidebarPanel(
sliderInput("tArea", "Area under the blue line:", 0, 0.999, 0.95, 0.05)
),
mainPanel(
ggvisOutput("plotTAreas"),
ggvisOutput("plotCINorm"),
ggvisOutput("plotCI")
)
)
)
server <- function(input, output,session) {
x <- c(3, 10, 15, 3, 4, 7, 1, 12)
y <- c(4, 10, 12, 17, 15, 20, 14, 3)
normXRange <- c(-5, 5)
normXResolution <- 0.01
# initialize reactive values with existing data
val <- reactiveValues(data = cbind (x = x, y = y),
isPlotInitialized = FALSE,
statMean = NULL,
statMedian = NULL,
statMode = NULL,
statSD = NULL,
sampleDf = NULL,
meanValDf = NULL,
sdBarDf = NULL,
seBarDf = NULL,
ciBarDf = NULL,
nonCaptureCount = NULL,
normMeanVis = NULL,
sampleMeanDf = NULL)
# observe click on the scatterplot
observeEvent(input$plot_click, {
xRand <- rnorm(20, mean = input$plot_click$x, sd = 1)
yRand <- rnorm(20, mean = input$plot_click$y, sd = 1)
data <- rbind(val$data, cbind(x = xRand, y = yRand))
data <- tail(data, 200) # cap at 200 data points
val$data <- data
})
# render scatterplot
output$plotScatter <- renderPlot({
p <- ggplot(data = NULL, aes(x=val$data[,1], y=val$data[,2])) +
geom_point() +
theme_bw() +
theme(legend.position="none") +
xlim(-1, 16) +
xlab("x") +
ylab("y")
p
})
# render histogram (and calculate statistics)
hisVis <- reactive({
histData <- data.frame(x = val$data[,1])
val$statMean <- mean(histData$x)
val$statSD <- sd(histData$x)
val$statMedian <- median(histData$x)
val$statMode <- 0 # TODO: update this line to the new mode function findModes(histData$x)$values
# pack descriptive statistics for plotting
statData <- data.frame(
value = c(val$statMean), #, val$statMedian, val$statMode),
stat = c("mean"), #, "median", rep("mode", length(val$statMode)) ),
color = c("blue") #, "green", rep("orange", length(val$statMode)))
)
statSDDf <- data.frame(
x <- c(val$statMean - val$statSD, val$statMean + val$statSD),
y <- c(1, 1)
)
meanVbarDf <- data.frame(x = val$statMean - 0.01, x2 =val$statMean + 0.01)
# plot histogram
histData %>%
ggvis(~x) %>%
add_axis("x", title = "x") %>%
scale_numeric("x", domain = c(-1,16)) %>%
set_options(width = 400, height = 200, resizable = FALSE, keep_aspect = TRUE, renderer = "canvas") %>%
hide_legend('fill') %>%
# histogram of the population
layer_histograms(width = 1, fill := "lightgray", stroke := NA) %>%
# population mean
layer_points(data = statData, x = ~value, y = 0, fillOpacity := 0.8, fill := ~color) %>%
layer_rects(data = meanVbarDf, x = ~x, x2 = ~x2, y := 0, y2 = 0, stroke := "blue") %>%
# population SD
layer_paths(data = statSDDf, x = ~x, y = 0, stroke := "blue")
})
hisVis %>% bind_shiny("plotHist")
# plot histogram of samples
sampleHistVis <- reactive({
meanValDf <- val$meanValDf
sampleMeanDf <- val$sampleMeanDf
sdOfSampleMeans <- sd(meanValDf$Mean)
sdLeft <- sampleMeanDf$SampleMean - sdOfSampleMeans
sdRight <- sampleMeanDf$SampleMean + sdOfSampleMeans
sdDf <- data.frame(x = sdLeft, x2 = sdRight)
meanVbarDf <- data.frame(x = sampleMeanDf$SampleMean - 0.01, x2 = sampleMeanDf$SampleMean + 0.01)
meanValDf %>%
ggvis(~Mean) %>%
set_options(width = 400, height = 200, resizable = FALSE, keep_aspect = TRUE, renderer = "canvas") %>%
add_axis("x", title = "Green dot: Mean of the means and its SD") %>%
add_axis("y", title = "Count of means") %>%
hide_legend('fill') %>%
scale_numeric("x", domain = c(-1, 16)) %>%
# standard deviation of the sample means
layer_rects(data = sdDf, x = ~x, x2 = ~x2, y = 0, y2 = 0, stroke := "green") %>%
# distribution of means
layer_histograms(width = 0.1, fill := "grey", fillOpacity := 0.5, stroke := NA) %>%
# mean of the sample means (sample mean)
layer_points(data = sampleMeanDf, x = ~SampleMean, y = ~y, fill := "white", stroke := "green") %>%
layer_rects(data = meanVbarDf, x = ~x, x2 = ~x2, y := 0, y2 = 0, stroke := "green")
})
# update sample navigation slider
observeEvent(input$sampleCount, {
updateSliderInput(session, "sampleWindow", max = input$sampleCount - 9)
})
# plot histogram of rescaled samples
normMeanVis <- reactive({
meanValDf <- val$meanValDf
# adjust location and scale
popMean <- val$statMean
meanValDf$normMean <- (meanValDf$Mean - popMean) / (meanValDf$SD / sqrt(input$obsCount))
# remove samples that are -Inf (all observations are the same data point)
meanValDf <- meanValDf[which(!is.infinite(meanValDf$normMean)),]
# binning data and calculate probability histogram
suppressMessages(
bins <- compute_bin(meanValDf, ~normMean)
)
bins$prob <- bins$count_ / ( bins$width_[1] * sum(bins$count_))
# generate the t location-scale distribution
dtX <- seq(normXRange[1], normXRange[2], normXResolution)
dtY <- dt(dtX, df = input$obsCount - 1)
dtDf <- data.frame(x = dtX, y = dtY)
# for plotting mean
statData <- data.frame(
x = 0,
y = 0
)
# plot (and save to reactive variable for reuse)
val$normMeanVis <- bins %>%
ggvis(x = ~x_, y = ~prob) %>%
set_options(width = 400, height = 200, resizable = FALSE, keep_aspect = TRUE, renderer = "canvas") %>%
add_axis("x", title = "Normalized means") %>%
add_axis("y", title = "Relative frequency density") %>%
scale_numeric("x", domain = normXRange, nice = FALSE, clamp = TRUE) %>%
hide_legend('fill') %>%
# distribution of means
layer_bars(width = bins$width_[1], stack = FALSE, fill := "lightgrey", stroke := NA) %>%
# t distribution
layer_paths(data = dtDf, x = ~x, y = ~y, stroke := "lightblue", strokeWidth := 3) %>%
# standard error is roughly equal to 1
layer_rects(data = statData,x = -1, x2 = 1, y = ~y, y2 = ~y, stroke := "green") %>%
# the scale is centered around population mean
layer_points(data = statData, x = ~x, y = ~y, fillOpacity := 0.8, fill := "blue") %>%
layer_rects(data = statData, x = ~x, x2 = ~x, y := 0, y2 = 0, stroke := "blue")
val$normMeanVis
})
tAreaVis <- reactive({
tDOF <- input$obsCount - 1
tX <- seq(normXRange[1], normXRange[2], normXResolution)
tY <- dt(tX, df = tDOF)
tArea <- input$tArea
lCut <- (1 - tArea) / 2
rCut <- tArea + lCut
tVals <- sort(qt(c(lCut, rCut) , df = tDOF))
selected <- ifelse(tX < tVals[1] | tX > tVals[2], FALSE, TRUE)
fill <- ifelse(selected, "blue", NA)
distDf <- data.frame(x = tX, y = tY, selected = selected, fill = fill)
selDf <- distDf[which(distDf$selected == TRUE),]
val$normMeanVis %>%
layer_ribbons(data = distDf, x = ~x, y = ~y, y2 = 0, fill := "white", fillOpacity := 0.8) %>%
layer_ribbons(data = selDf, x = ~x, y = ~y, y2 = 0, fill := "lightblue", fillOpacity := 0.6) %>%
hide_legend("fill")
})
tCINormVis <- reactive({
tDOF <- input$obsCount - 1
tArea <- input$tArea
tVals <- sort(qt(c(tArea, 1 - tArea) , df = tDOF))
ciDf <- data.frame(x = 0, ci = tVals)
ciDf %>%
ggvis() %>%
set_options(width = 400, height = 100, resizable = FALSE, keep_aspect = TRUE, renderer = "canvas",
padding = padding(10, 10, 40, 43)) %>%
add_axis("x", title = "Interval in the 'Normalized means' scale", grid = FALSE) %>%
add_axis("y", ticks = 0, grid = FALSE) %>%
scale_numeric("x", domain = normXRange, nice = FALSE, clamp = TRUE) %>%
scale_numeric("y", domain = c(-2, 2), nice = FALSE, clamp = TRUE) %>%
hide_legend('fill') %>%
layer_paths(x = ~ci, y = 0, stroke := "lightblue", strokeWidth := 2) %>%
layer_points(x = ~x, y = 0, shape := "diamond", fill := "grey")
})
tCIVis <- reactive({
tDOF <- input$obsCount - 1
tArea <- input$tArea
tVals <- sort(qt(c(tArea, 1 - tArea) , df = tDOF))
aMean <- val$meanValDf$Mean[1]
aSE <- val$meanValDf$SD[1] / sqrt(input$obsCount)
ciDf <- data.frame(x = aMean, ci = c(aMean + tVals[1] * aSE, aMean + tVals[2] * aSE)) # NOTE: since we got the two sides of the t values, we only add "+" for both lower and upper CI
ciDf %>%
ggvis() %>%
set_options(width = 400, height = 100, resizable = FALSE, keep_aspect = TRUE, renderer = "canvas",
padding = padding(10, 10, 40, 43)) %>%
add_axis("x", title = "Interval in the original scale", grid = FALSE) %>%
add_axis("y", ticks = 0, grid = FALSE) %>%
scale_numeric("x", domain = c(-1, 16), nice = FALSE, clamp = TRUE) %>%
scale_numeric("y", domain = c(-2, 2), nice = FALSE, clamp = TRUE) %>%
hide_legend('fill') %>%
layer_paths(x = ~ci, y = 0, stroke := "grey", strokeWidth := 2) %>%
layer_points(x = ~x, y = 0, shape := "diamond", fill := "grey")
})
# handle sampling
observeEvent(c(input$sampleCount, input$obsCount, input$sampleBtn), {
data <- isolate(val$data)
# draw samples
sampleRowIdxs <- matrix(sample.int(nrow(data), input$obsCount * input$sampleCount, replace = TRUE), nrow = input$sampleCount)
sampleVals <- matrix(data[sampleRowIdxs], nrow = input$sampleCount)
sampleDf <- data.frame(x = as.numeric(sampleVals), SampleId = rep(1:input$sampleCount, each = input$obsCount))
# calculate mean and SD of each sample (sample distribution)
meanVals <- apply(sampleVals, 1, mean)
sdVals <- apply(sampleVals, 1, sd)
meanValDf <- data.frame(Mean = meanVals, SD = sdVals, SampleId = 1:input$sampleCount)
# calculate the intervals for plotting
valSE <- meanValDf$SD / sqrt(input$obsCount)
sdBarDf <- makeBarDf(meanValDf, meanValDf$SD)
seBarDf <- makeBarDf(meanValDf, valSE)
# NOTE: CI is calculated in a separate reactive block below
# calculate the sample mean (mean of means)
sampleMean <- mean(meanVals)
sampleMeanDf <- data.frame(SampleMean = sampleMean, y = 0)
# update reactive values
val$sampleDf <- sampleDf
val$meanValDf <- meanValDf
val$sampleMeanDf <- sampleMeanDf
val$sdBarDf <- sdBarDf
val$seBarDf <- seBarDf
# start the vis
if (!val$isPlotInitialized)
{
sampleHistVis %>% bind_shiny("plotSampleHist")
normMeanVis %>% bind_shiny("plotNormMean")
tAreaVis %>% bind_shiny("plotTAreas")
tCINormVis %>% bind_shiny("plotCINorm")
tCIVis %>% bind_shiny("plotCI")
val$isPlotInitialized <- TRUE
}
})
}
shinyApp(ui, server) |
#' @title Creates a calendar
#'
#' @description
#' The \code{Calendar} stores all information necessary to compute business days.
#' This works like a helper class for many of \code{bizdays}' methods.
#'
#' @param holidays a vector of Dates which contains the holidays
#' @param start.date the date which the calendar starts
#' @param end.date the date which the calendar ends
#' @param name calendar's name
#' @param dib a single numeric variable which indicates the amount of days
#' within a year (\code{dib} stands for days in base).
#'
#' @details
#' The arguments \code{start.date} and \code{end.date} can be set but once they aren't and \code{holidays}
#' is set, \code{start.date} is defined to \code{min(holidays)} and \code{end.date} to \code{max(holidays)}.
#' If holidays isn't set \code{start.date} is set to \code{'1970-01-01'} and \code{end.date} to \code{'2071-01-01'}.
#'
#' \code{weekdays} is controversial but it is only a sequence of nonworking weekdays.
#' In the great majority of situations it refers to the weekend but it is also possible defining
#' it differently.
#' \code{weekdays} accepts a \code{character} sequence with lower case weekdays (
#' \code{sunday}, \code{monday}, \code{thuesday}, \code{wednesday}, \code{thursday},
#' \code{friday}, \code{saturday}).
#' This argument defaults to \code{NULL} because the default intended behavior for
#' \code{Calendar} returns an \emph{actual} calendar, so calling \code{Calendar(dib=365)}
#' returns a \emph{actual/365} calendar and \code{Calendar(dib=360)} and \emph{actual/360}
#' (for more calendars see \href{http://en.wikipedia.org/wiki/Day_count_convention}{Day Count Convention})
#' To define the weekend as the nonworking weekdays one could simply
#' use \code{weekdays=c("saturday", "sunday")}.
#'
#' \code{dib} reffers to \emph{days in base} and represents the amount of days within a year.
#' That is necessary for defining Day Count Conventions and for accounting annualized periods
#' (see \code{\link{bizyears}}).
#'
#' The arguments \code{adjust.from} and \code{adjust.to} are used to adjust \code{bizdays}' arguments
#' \code{from} and \code{to}, respectively.
#' These arguments need to be adjusted when nonworking days are provided.
#' The default behavior, setting \code{adjust.from=adjust.previous} and \code{adjust.to=adjust.next},
#' works like Excel's function NETWORKDAYS, since that is fairly used by a great number of practitioners.
#'
#' \code{Calendar} doesn't have to be named, but it helps identifying the calendars once many are instantiated.
#' You name a \code{Calendar} by setting the argument \code{name}.
#'
#' @export
#' @examples
#' data(holidayCN)
#' that <- Calendar(name="CN", holidays = holidayCN)
#' options(calendar=that)
#'
#' # ACTUAL calendar
#' cal <- Calendar(name="Actual", dib=365)
#' # calendar default name is gregorian
#' cal <- Calendar(start.date="1976-07-12", end.date="2013-10-28")
#' is.null(cal$name) # TRUE
Calendar <- function (holidays=integer(0),
startDate="2005-01-01", endDate="2020-12-31",
pattern = c("%Y-%m-%d","%Y%m%d"), name="gregorian", dib=NULL) {
# check the parameters
pattern <- match.arg(pattern)
# convert to POSIX-date
startDate <- as.Date(startDate, format = pattern)
endDate <- as.Date(endDate, format = pattern)
#
dates <- seq(from=startDate, to=endDate, by='day')
n.dates <- as.integer(dates)
n.holidays <- as.integer(as.Date(holidays))
.is.bizday <- !n.dates %in% n.holidays
# bizdays index
n.bizdays <- n.dates[.is.bizday]
index.bizdays <- seq_along(n.bizdays)
index <- cumsum(.is.bizday)
that <- list(name = name, dib = dib, startDate = startDate, endDate = endDate,
index = index, maxindex = max(index.bizdays), mindate = min(n.dates), maxdate = max(n.dates),
bizdays = dates[.is.bizday], n.bizdays = n.dates[.is.bizday],
holidays = dates[!.is.bizday], n.holidays = dates[!.is.bizday],
n.dates = n.dates
)
# set class attribute
class(that) <- 'Calendar'
return(that)
}
#' @export
print.Calendar <- function(cal, ...) {
cat('Calendar:', cal$name,
'\nRange:', as.Date(cal$startDate,origin="1970-1-1"),
'to', as.Date(cal$endDate,origin="1970-1-1"),
'\ndib:', cal$dib,
'\n')
invisible(cal)
}
#' Adjusts the given dates to the next/previous business day
#'
#' If the given dates are business days it returns the given dates, but once it
#' is not, it returns the next/previous business days.
#'
#' @param lhs dates to be adjusted
#' @param rhs offset days
#' @param cal an instance of \code{Calendar}
#'
#' @section Date types accepted:
#'
#' The argument \code{dates} accepts \code{Date} objects and any
#' object that returns a valid \code{Date} object when passed through
#' \code{as.Date}, which include all \code{POSIX*} classes and \code{character}
#' objects with ISO formatted dates.
#'
#' @return
#' \code{Date} objects adjusted accordingly.
#'
#' @rdname adjust.date
#'
#' @export
`%add%` <- function(lhs, rhs, method = c("next","previous"), cal = getOption("calendar"),...) UseMethod("%add%")
#' @export
`%add%.default` <- function(lhs, rhs, method = c("next","previous"), cal = getOption("calendar"),...) {
lhs = as.Date(lhs)
`%+%.Date`(lhs, rhs, method, cal)
}
#' @export
`%add%.Date` <- function(lhs, rhs, method = c("next","previous"), cal = getOption("calendar"),...) {
# check rhs
stopifnot(is.numeric(rhs))
# check the lengths
stopifnot(length(rhs) == 1 | length(lhs)==length(rhs))
method = match.arg(method)
offset = switch(method,
"next" = 1,
"previous" = -1)
n.lhs <- as.integer(lhs)
idx <- match(n.lhs, cal$n.bizdays)
idx[is.na(idx)] <- FALSE
while(!all(idx)) {
n.lhs[!idx] <- n.lhs[!idx] + offset
if (any(n.lhs>cal$maxdate)) stop("Exceed the calendar max date")
idx <- match(n.lhs, cal$n.bizdays)
idx[is.na(idx)] <- FALSE
}
if (any(idx+rhs<=0)) stop("Exceed the calendar min date")
if (any(idx+rhs>cal$maxindex)) stop("Exceed the calendar max date")
cal$bizdays[idx + rhs]
}
#' next biz date
#'
#' @rdname adjust.date
#' @export
nextbiz <- function(lhs, method = "previous", cal = getOption("calendar") ) `%+%`(lhs,1,method,cal)
#' previous biz date
#'
#' @rdname adjust.date
#' @export
prevbiz <- function(lhs, method = "next", cal = getOption("calendar") ) `%+%`(lhs,-1,method,cal)
#' Computes business days between two dates.
#' @export
bizdays <- function(from, to, cal=getOption("calendar")) UseMethod('bizdays')
#' @export
bizdays.default <- function(from, to, cal=getOption("calendar")) {
from <- as.Date(from)
bizdays.Date(from, to, cal)
}
#' @export
bizdays.Date <- function(from, to, cal=getOption("calendar")) {
tryCatch({to <- as.Date(to)}, error=function(e) e)
# ---
if (all(is.na(to))) return( rep(NA, max(length(to), length(from))) )
if ( ! any(from >= cal$startDate & from <= cal$endDate) )
stop('Given "from" date out of range.')
if ( ! any(to >= cal$startDate & to <= cal$endDate) )
stop('Given "to" date out of range.')
lengths <- c(length(from), length(to))
if (max(lengths) %% min(lengths) != 0)
stop("from's length must be multiple of to's length and vice-versa.")
if ( ! all(from <= to, na.rm=TRUE) )
stop('All from dates must be greater than all to dates.')
from <- `%+%`(from, rhs = 0)
to <- `%+%`(to, rhs = 0, method="previous")
from.idx <- cal$index[match(as.integer(from), cal$n.dates)]
to.idx <- cal$index[match(as.integer(to), cal$n.dates)]
to.idx - from.idx + 1
}
#' @export
is.bizday <- function(dates,cal=getOption("calendar")) UseMethod("is.bizday")
#' @export
is.bizday.default <- function(dates,cal=getOption("calendar")) {
dates <- as.Date(dates)
is.bizday(dates, cal)
}
#' @export
is.bizday.Date <- function(dates, cal=getOption("calendar")) {
if ( ! any(dates >= cal$startDate & dates <= cal$endDate) )
stop('Given date out of range.')
as.integer(dates) %in% cal$n.bizdays
}
#' @export
bizseq <- function(from, to, cal=getOption("calendar")) UseMethod('bizseq')
#' @export
bizseq.default <- function(from, to, cal=getOption("calendar")) {
from <- as.Date(from)
bizseq(from, to, cal)
}
#' @export
bizseq.Date <- function(from, to, cal=getOption("calendar")) {
to <- as.Date(to)
if ( ! any(from >= cal$startDate & from <= cal$endDate) )
stop('Given "from" date out of range.')
if ( ! any(to >= cal$startDate & to <= cal$endDate) )
stop('Given "to" date out of range.')
if ( ! all(from <= to) )
stop('All from dates must be greater than all to dates.')
from <- as.integer(from)
to <- as.integer(to)
as.Date(cal$n.bizdays[which(cal$n.bizdays >= from & cal$n.bizdays <= to)], origin='1970-01-01')
}
| /R/Calendar.R | no_license | jokbull/bizday | R | false | false | 8,823 | r | #' @title Creates a calendar
#'
#' @description
#' The \code{Calendar} stores all information necessary to compute business days.
#' This works like a helper class for many of \code{bizdays}' methods.
#'
#' @param holidays a vector of Dates which contains the holidays
#' @param start.date the date which the calendar starts
#' @param end.date the date which the calendar ends
#' @param name calendar's name
#' @param dib a single numeric variable which indicates the amount of days
#' within a year (\code{dib} stands for days in base).
#'
#' @details
#' The arguments \code{start.date} and \code{end.date} can be set but once they aren't and \code{holidays}
#' is set, \code{start.date} is defined to \code{min(holidays)} and \code{end.date} to \code{max(holidays)}.
#' If holidays isn't set \code{start.date} is set to \code{'1970-01-01'} and \code{end.date} to \code{'2071-01-01'}.
#'
#' \code{weekdays} is controversial but it is only a sequence of nonworking weekdays.
#' In the great majority of situations it refers to the weekend but it is also possible defining
#' it differently.
#' \code{weekdays} accepts a \code{character} sequence with lower case weekdays (
#' \code{sunday}, \code{monday}, \code{thuesday}, \code{wednesday}, \code{thursday},
#' \code{friday}, \code{saturday}).
#' This argument defaults to \code{NULL} because the default intended behavior for
#' \code{Calendar} returns an \emph{actual} calendar, so calling \code{Calendar(dib=365)}
#' returns a \emph{actual/365} calendar and \code{Calendar(dib=360)} and \emph{actual/360}
#' (for more calendars see \href{http://en.wikipedia.org/wiki/Day_count_convention}{Day Count Convention})
#' To define the weekend as the nonworking weekdays one could simply
#' use \code{weekdays=c("saturday", "sunday")}.
#'
#' \code{dib} reffers to \emph{days in base} and represents the amount of days within a year.
#' That is necessary for defining Day Count Conventions and for accounting annualized periods
#' (see \code{\link{bizyears}}).
#'
#' The arguments \code{adjust.from} and \code{adjust.to} are used to adjust \code{bizdays}' arguments
#' \code{from} and \code{to}, respectively.
#' These arguments need to be adjusted when nonworking days are provided.
#' The default behavior, setting \code{adjust.from=adjust.previous} and \code{adjust.to=adjust.next},
#' works like Excel's function NETWORKDAYS, since that is fairly used by a great number of practitioners.
#'
#' \code{Calendar} doesn't have to be named, but it helps identifying the calendars once many are instantiated.
#' You name a \code{Calendar} by setting the argument \code{name}.
#'
#' @export
#' @examples
#' data(holidayCN)
#' that <- Calendar(name="CN", holidays = holidayCN)
#' options(calendar=that)
#'
#' # ACTUAL calendar
#' cal <- Calendar(name="Actual", dib=365)
#' # calendar default name is gregorian
#' cal <- Calendar(start.date="1976-07-12", end.date="2013-10-28")
#' is.null(cal$name) # TRUE
Calendar <- function (holidays=integer(0),
startDate="2005-01-01", endDate="2020-12-31",
pattern = c("%Y-%m-%d","%Y%m%d"), name="gregorian", dib=NULL) {
# check the parameters
pattern <- match.arg(pattern)
# convert to POSIX-date
startDate <- as.Date(startDate, format = pattern)
endDate <- as.Date(endDate, format = pattern)
#
dates <- seq(from=startDate, to=endDate, by='day')
n.dates <- as.integer(dates)
n.holidays <- as.integer(as.Date(holidays))
.is.bizday <- !n.dates %in% n.holidays
# bizdays index
n.bizdays <- n.dates[.is.bizday]
index.bizdays <- seq_along(n.bizdays)
index <- cumsum(.is.bizday)
that <- list(name = name, dib = dib, startDate = startDate, endDate = endDate,
index = index, maxindex = max(index.bizdays), mindate = min(n.dates), maxdate = max(n.dates),
bizdays = dates[.is.bizday], n.bizdays = n.dates[.is.bizday],
holidays = dates[!.is.bizday], n.holidays = dates[!.is.bizday],
n.dates = n.dates
)
# set class attribute
class(that) <- 'Calendar'
return(that)
}
#' @export
print.Calendar <- function(cal, ...) {
cat('Calendar:', cal$name,
'\nRange:', as.Date(cal$startDate,origin="1970-1-1"),
'to', as.Date(cal$endDate,origin="1970-1-1"),
'\ndib:', cal$dib,
'\n')
invisible(cal)
}
#' Adjusts the given dates to the next/previous business day
#'
#' If the given dates are business days it returns the given dates, but once it
#' is not, it returns the next/previous business days.
#'
#' @param lhs dates to be adjusted
#' @param rhs offset days
#' @param cal an instance of \code{Calendar}
#'
#' @section Date types accepted:
#'
#' The argument \code{dates} accepts \code{Date} objects and any
#' object that returns a valid \code{Date} object when passed through
#' \code{as.Date}, which include all \code{POSIX*} classes and \code{character}
#' objects with ISO formatted dates.
#'
#' @return
#' \code{Date} objects adjusted accordingly.
#'
#' @rdname adjust.date
#'
#' @export
`%add%` <- function(lhs, rhs, method = c("next","previous"), cal = getOption("calendar"),...) UseMethod("%add%")
#' @export
`%add%.default` <- function(lhs, rhs, method = c("next","previous"), cal = getOption("calendar"),...) {
lhs = as.Date(lhs)
`%+%.Date`(lhs, rhs, method, cal)
}
#' @export
`%add%.Date` <- function(lhs, rhs, method = c("next","previous"), cal = getOption("calendar"),...) {
# check rhs
stopifnot(is.numeric(rhs))
# check the lengths
stopifnot(length(rhs) == 1 | length(lhs)==length(rhs))
method = match.arg(method)
offset = switch(method,
"next" = 1,
"previous" = -1)
n.lhs <- as.integer(lhs)
idx <- match(n.lhs, cal$n.bizdays)
idx[is.na(idx)] <- FALSE
while(!all(idx)) {
n.lhs[!idx] <- n.lhs[!idx] + offset
if (any(n.lhs>cal$maxdate)) stop("Exceed the calendar max date")
idx <- match(n.lhs, cal$n.bizdays)
idx[is.na(idx)] <- FALSE
}
if (any(idx+rhs<=0)) stop("Exceed the calendar min date")
if (any(idx+rhs>cal$maxindex)) stop("Exceed the calendar max date")
cal$bizdays[idx + rhs]
}
#' next biz date
#'
#' @rdname adjust.date
#' @export
nextbiz <- function(lhs, method = "previous", cal = getOption("calendar") ) `%+%`(lhs,1,method,cal)
#' previous biz date
#'
#' @rdname adjust.date
#' @export
prevbiz <- function(lhs, method = "next", cal = getOption("calendar") ) `%+%`(lhs,-1,method,cal)
#' Computes business days between two dates.
#' @export
bizdays <- function(from, to, cal=getOption("calendar")) UseMethod('bizdays')
#' @export
bizdays.default <- function(from, to, cal=getOption("calendar")) {
from <- as.Date(from)
bizdays.Date(from, to, cal)
}
#' @export
bizdays.Date <- function(from, to, cal=getOption("calendar")) {
tryCatch({to <- as.Date(to)}, error=function(e) e)
# ---
if (all(is.na(to))) return( rep(NA, max(length(to), length(from))) )
if ( ! any(from >= cal$startDate & from <= cal$endDate) )
stop('Given "from" date out of range.')
if ( ! any(to >= cal$startDate & to <= cal$endDate) )
stop('Given "to" date out of range.')
lengths <- c(length(from), length(to))
if (max(lengths) %% min(lengths) != 0)
stop("from's length must be multiple of to's length and vice-versa.")
if ( ! all(from <= to, na.rm=TRUE) )
stop('All from dates must be greater than all to dates.')
from <- `%+%`(from, rhs = 0)
to <- `%+%`(to, rhs = 0, method="previous")
from.idx <- cal$index[match(as.integer(from), cal$n.dates)]
to.idx <- cal$index[match(as.integer(to), cal$n.dates)]
to.idx - from.idx + 1
}
#' @export
is.bizday <- function(dates,cal=getOption("calendar")) UseMethod("is.bizday")
#' @export
is.bizday.default <- function(dates,cal=getOption("calendar")) {
dates <- as.Date(dates)
is.bizday(dates, cal)
}
#' @export
is.bizday.Date <- function(dates, cal=getOption("calendar")) {
if ( ! any(dates >= cal$startDate & dates <= cal$endDate) )
stop('Given date out of range.')
as.integer(dates) %in% cal$n.bizdays
}
#' @export
bizseq <- function(from, to, cal=getOption("calendar")) UseMethod('bizseq')
#' @export
bizseq.default <- function(from, to, cal=getOption("calendar")) {
from <- as.Date(from)
bizseq(from, to, cal)
}
#' @export
bizseq.Date <- function(from, to, cal=getOption("calendar")) {
to <- as.Date(to)
if ( ! any(from >= cal$startDate & from <= cal$endDate) )
stop('Given "from" date out of range.')
if ( ! any(to >= cal$startDate & to <= cal$endDate) )
stop('Given "to" date out of range.')
if ( ! all(from <= to) )
stop('All from dates must be greater than all to dates.')
from <- as.integer(from)
to <- as.integer(to)
as.Date(cal$n.bizdays[which(cal$n.bizdays >= from & cal$n.bizdays <= to)], origin='1970-01-01')
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.