content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
library(quantmod)
library(TTR)
library(PerformanceAnalytics)
library(tseries)
library(xts)
library(zoo)
library(quantstrat)
library(dplyr)
library(knitr)
library(plumber)
library(tibble)
library(highfrequency)
library(rtsdata)
library(lubridate)
library(nnet)
library(caret)
library(deepnet)
library(leaflet)
##' @title queryorder
##' @description get current queuing orders.
##' @details return a data.frame of current queuing orders, each row of
##' the data.frame representing an order, queryorder will return all of the
##' queuing orders if orderid is NULL. when there is no queuing orders,
##' queryorder will return a data.frame with 0 rows.
##' @param orderids specyfing order ids to be queried, return all orders if
##' orderids=NULL, default NULL.
##' @return a data.frame of queuing orders.
##' @examples
##' \dontrun{
##' ## get all queuing orders
##' queryorder()
##' ## get all orders that satisfying orderid%in%c("order1","order2")
##' queryorder(orderids=c("order1","order2"))
##' }
##' @export
queryorder <- function(orderids=NULL){
if(is.null(orderids))
return(.tradingstates$orders)
else
return(.tradingstates$orders[.tradingstates$orders$orderid%in%orderids,])
}
##' @title querycapital
##' @description get current capital status.
##' @details return a data.frame of current capital status, each row
##' of the data.frame representing an instrument, if instrumentids is not NULL,
##' querycapital will return the capital status specified by instrumentids.
##' @param instrumentids specifying instrumentids to be queried, return total
##' capital status if instrumentids=NULL, default NULL.
##' @return a data.frame of all specified instruments' current status
##' @examples
##' \dontrun{
##' ## get total capital status
##' querycapital()
##' ## get capital status of TF1603 and T1603
##' querycapital(instrumentids=c("TF1603","T1603"))
##' }
##' @export
querycapital <- function(instrumentids=NULL){
if(!is.null(instrumentids))
return(.tradingstates$capital[.tradingstates$capital$instrumentid%in%instrumentids,])
else
return(.tradingstates$capital)
}
##' @title ordersubmission
##' @description
##' take one of the following order actions: open, close, closetoday,
##' closepreday and cancel.
##' @details ordersubmission submit an order specified by the user, it also
##' take some additional actions after the submission. For example, if set
##' timeoutlist=TRUE and timeoutsleep=1, the simulator will first submit an
##' order and cancel it if the order is not executed in the next second.
##' @seealso \link{multisubmission} \link{timeoutchasesubmission}
##' \link{timeoutsubmission} \link{chasesubmission}
##' @param instrumentid character, instrument identifier.
##' @param orderid character, specifying an unique order id, can be generated
##' by randomid().
##' @param direction integer, specifying trading direction. 1 for long,
##' -1 for short.
##' @param price numeric, specifiying order pirce.NOTE: when price=0,
##' ordersubmission() will submit a market order; when price=NULL,
##' ordersubmission() will take the corresponding bid1 or ask1 price as
##' submitted price.
##' @param hands integer, specifying amount to be submitted.
##' @param action character, specifying submit action, action can take value
##' from one of "open","close","closetoday","closepreday" and "cancel". amount
##' submitted in action='close' can not be greater than the sum of current
##' holdings and queuing open hands.
##' @param timeoutlist logical, indicating wether to give current order a
##' timeout interval, the length of the interval is specified by timeoutsleep.
##' if the order hasn't been executed after a time interval greater than
##' timeoutsleep, the order will be canceled.
##' @param timeoutchase logical, indicating whether to chase order when timeout.
##' @param timeoutsleep numeric, specifying the timeout inverval in seconds.
##' @param chaselist logical, indicating wether to put this order to
##' auto-chase list. if the order hasn' been executed for a time inverval
##' longer than chasesleep, the simulator will cancel this order(if needed),
##' then submit a new one with the sampe hands and a price equal to the
##' bid1/ask1 price. the simulator will repeat this action until the original
##' submitted amount is executed.
##' @param chasesleep numeric, specifying the time interval between each
##' execution check. In seconds.
##' @return order status code.
##' @examples
##' \dontrun{
##' ## submit an open order, buy 1 hand of TF1603 at price 99
##' ## a length 5 random orderid is generated by randomid(5)
##' ordersubmission(instrumentid="TF1603",orderid=randomid(5),
##' direction=1,price=99,hands=1,action="open")
##' }
##' @export
ordersubmission <- function(instrumentid="TF1603",orderid=NULL,direction=1,price=0,hands=1,action="open",timeoutlist=FALSE,timeoutchase=FALSE,timeoutsleep=1,chaselist=FALSE,chasesleep=1){
tradetime=.tradingstates$currenttradetime
if(is.null(orderid)){
warning("orderid not specified, generating a random id")
orderid <- randomid(10)
}
match.arg(action,choices = c("open","close","closetoday","closepreday","cancel"))
if(is.null(instrumentid)){
stop("instrumentid must not be NULL!")
}
## cancel order
if(action=="cancel"){
canceledorder <- .tradingstates$orders[.tradingstates$orders$orderid==orderid,]
.tradingstates$orders <- .tradingstates$orders[.tradingstates$orders$orderid!=orderid,]
.writeorderhistory(instrumentid,orderid,canceledorder$direction,canceledorder$hands,canceledorder$price,tradeprice=0,status=5,action,cost=0)
return(5)
}
if(any(c(hands%%1!=0, hands<=0, isTRUE(price<0) , !(direction%in%c(-1,1))))){
stop("illegal parameter values!")
}
.sucker <- function(LONGHOLDINGS,SHORTHOLDINGS){
vol <- abs(hands)
if(direction==-1){
## close long, hold>0, untrade<0
hold <- sum(.tradingstates$capital[[LONGHOLDINGS]][.tradingstates$capital$instrumentid==instrumentid])
nethold <- hold+untrade
if( (hold==0) | direction==sign(nethold) |
vol>abs(hold) | vol>abs(nethold) |
(any(currentinstrument$price==0¤tinstrument$direction==direction¤tinstrument$action%in%c("close",action)) & price==0) ){
.writeorderhistory(instrumentid,orderid,direction,hands,price,tradeprice=0,status=6,action,cost=0)
stop("submission failed, status code: 6, orderid: ",orderid)
}
}
else{
## close short, hold<0, untrade>0
hold <- sum(.tradingstates$capital[[SHORTHOLDINGS]][.tradingstates$capital$instrumentid==instrumentid])
nethold <- hold+untrade
if( (hold==0) | direction==sign(nethold) |
vol>abs(hold) | vol>abs(nethold) |
(any(currentinstrument$price==0¤tinstrument$direction==direction¤tinstrument$action%in%c("close",action)) & price==0) ){
.writeorderhistory(instrumentid,orderid,direction,hands,price,tradeprice=0,status=6,action,cost=0)
stop("submission failed, status code: 6, orderid: ",orderid)
}
}
}
## special requirements when action!=cancel
## get most recent orderbook
mostrecentorderbook <- .INSTRUMENT$orderbook[[instrumentid]]
## submist bid1 or ask1 when price=NULL
if(is.null(price)){
price <- ifelse(direction==1,mostrecentorderbook$buybook$price[1],mostrecentorderbook$sellbook$price[1])
}
## tmp file, used to update order state
orders <- .tradingstates$orders
currentinstrument <- orders[orders$instrumentid==instrumentid,]
if(orderid%in%currentinstrument$orderid){
stop("orderid already exists!")
}
if(action=="open"){
## only one market order is allowed in each position
if(any(currentinstrument$price==0¤tinstrument$direction==direction¤tinstrument$action=="open") & price==0){
.writeorderhistory(instrumentid,orderid,direction,hands,price,tradeprice=0,status=6,action,cost=0)
stop(6)
}
orders <- rbind(orders,data.frame(instrumentid=instrumentid,orderid=orderid,direction=direction,price=price,hands=hands,action=action,initialhands=hands,timeoutlist=timeoutlist,timeoutchase=timeoutchase,timeoutsleep=timeoutsleep,chaselist=chaselist,chasesleep=chasesleep,submitstart=tradetime,stringsAsFactors=FALSE))
## save prior orders
if(price>0){
.priororders(mostrecentorderbook = mostrecentorderbook,orderid = orderid,direction = direction,price=price)
}
.tradingstates$orders <- orders
.writeorderhistory(instrumentid,orderid,direction,hands,price,tradeprice=0,status=3,action,cost=0)
return(3)
}
else if(action=="close"){
## untrade closes
untrade <- sum(currentinstrument$hands[currentinstrument$direction==direction¤tinstrument$action%in%c("close","closepreday","closetoday")])*direction #untrade(long)<0, untrade(short)>0
.sucker("totallongholdings","totalshortholdings")
orders <- rbind(orders,data.frame(instrumentid=instrumentid,orderid=orderid,direction=direction,price=price,hands=hands,action=action,initialhands=hands,timeoutlist=timeoutlist,timeoutchase=timeoutchase,timeoutsleep=timeoutsleep,chaselist=chaselist,chasesleep=chasesleep,submitstart=tradetime,stringsAsFactors=FALSE))
if(price>0)
.priororders(mostrecentorderbook = mostrecentorderbook,orderid = orderid,direction = direction,price=price)
.tradingstates$orders <- orders
.writeorderhistory(instrumentid,orderid,direction,hands,price,tradeprice=0,status=3,action,cost=0)
return(3)
}
else if(action=="closetoday"){
## untrade closes
untrade <- sum(currentinstrument$hands[currentinstrument$direction==direction¤tinstrument$action%in%c("close","closetoday")])*direction
.sucker("longholdingstoday","shortholdingstoday")
orders <- rbind(orders,data.frame(instrumentid=instrumentid,orderid=orderid,direction=direction,price=price,hands=hands,action=action,initialhands=hands,timeoutlist=timeoutlist,timeoutchase=timeoutchase,timeoutsleep=timeoutsleep,chaselist=chaselist,chasesleep=chasesleep,submitstart=tradetime,stringsAsFactors=FALSE))
if(price>0)
.priororders(mostrecentorderbook = mostrecentorderbook,orderid = orderid,direction = direction,price=price)
.tradingstates$orders <- orders
.writeorderhistory(instrumentid,orderid,direction,hands,price,tradeprice=0,status=3,action,cost=0)
return(3)
}
else{
## closepreday
## untrade closes
untrade <- sum(currentinstrument$hands[currentinstrument$direction==direction¤tinstrument$action%in%c("close","closepreday")])*direction
.sucker("longholdingspreday","shortholdingspreday")
orders <- rbind(orders,data.frame(instrumentid=instrumentid,orderid=orderid,direction=direction,price=price,hands=hands,action=action,initialhands=hands,timeoutlist=timeoutlist,timeoutchase=timeoutchase,timeoutsleep=timeoutsleep,chaselist=chaselist,chasesleep=chasesleep,submitstart=tradetime,stringsAsFactors=FALSE))
if(price>0)
.priororders(mostrecentorderbook = mostrecentorderbook,orderid = orderid,direction = direction,price=price)
.tradingstates$orders <- orders
.writeorderhistory(instrumentid,orderid,direction,hands,price,tradeprice=0,status=3,action,cost=0)
return(3)
}
}
##' @title multisubmission
##'
##' @description submit multiple orders, a simple wrapper of ordersubmission.
##' instrumentid, direction, price, hands and action must be of length one or
##' the same length with the number of orders; orderid must be of length zero
##' or the same length with the number of orders!
##' @seealso \link{ordersubmission} \link{timeoutchasesubmission}
##' \link{timeoutsubmission} \link{chasesubmission}
##' @param instrumentid character, instrument identifier
##' @param orderid character, if length(orderid)==0 (default), multisubmission
##' will generate a random id for each order
##' @param direction integer, specifying trading direction. 1 for long,
##' -1 for short.
##' @param price numeric, specifiying order pirce.default NULL.
##' NOTE: when price=0, ordersubmission will submit a market order; when
##' price=NULL, ordersubmission() will take the corresponding bid1 or ask1
##' price as order price.
##' @param hands integer, specifying hands to be submitted.
##' @param action character, action can take value from one of "open","close",
##' "closetoday","closepreday" and "cancel". hands submitted in action='close'
##' can not be greater than the sum of current holdings and queuing open hands.
##' @param timeoutlist logical, specyfing wether to give current order a
##' timeout interval, the length of the interval is specified by timeoutsleep.
##' if the order hasn't been executed after a time interval greater than
##' timeoutsleep, the order will be canceled.
##' @param timeoutchase logical, indicating whether to chase order when timeout.
##' @param timeoutsleep numeric, specifying the timeout inverval in seconds.
##' @param chaselist logical, specifying wether to put this order to
##' auto-chase list. if the order hasn' been executed for a time inverval
##' longer than chasesleep, the simulator will cancel this order(if needed),
##' then submit a new one with the sampe hands and a price equal to the
##' bid1/ask1 price. the simulator will repeat this action until the original
##' submitted amount is executed.
##' @param chasesleep numeric, specifying the time interval between each
##' execution check. In seconds.
##' @return order status code.
##' @examples
##' \dontrun{
##' ## submit an one hand long open order at each bid price of TF1512.
##' multisubmission(instrumentid="TF1512",orderid=NULL,direction=1,
##' price=orderbook$buybook$price,hands=1,action='open')
##' }
##' @export
multisubmission <- function(instrumentid="qtid",orderid=NULL,direction=1,price=NULL,hands=1,action="open",timeoutlist=FALSE,timeoutchase=FALSE,timeoutsleep=1,chaselist=FALSE,chasesleep=1){
## multiple orders
tryCatch(expr={
## special effects when price=NULL
if(is.null(price)){
if(length(orderid)==0){
orders <- data.frame(instrumentid=instrumentid,direction=direction,hands=hands,action=action,timeoutlist=timeoutlist,timeoutchase=timeoutchase,timeoutsleep=timeoutsleep,chaselist=chaselist,chasesleep=chasesleep,stringsAsFactors = FALSE)
orderids <- NULL
for(i in 1:nrow(orders)){orderids <- c(orderids,randomid(5))}
orders$orderid <- orderids
}
else{
orders <- data.frame(instrumentid=instrumentid,orderid=orderid,direction=direction,hands=hands,action=action,timeoutlist=timeoutlist,timeoutchase=timeoutchase,timeoutsleep=timeoutsleep,chaselist=chaselist,chasesleep=chasesleep,stringsAsFactors = FALSE)
}
}
else{
## price is not null
if(length(orderid)==0){
orders <- data.frame(instrumentid=instrumentid,direction=direction,price=price,hands=hands,action=action,timeoutlist=timeoutlist,timeoutchase=timeoutchase,timeoutsleep=timeoutsleep,chaselist=chaselist,chasesleep=chasesleep,stringsAsFactors = FALSE)
orderids <- NULL
for(i in 1:nrow(orders)){orderids <- c(orderids,randomid(5))}
orders$orderid <- orderids
}
else{
orders <- data.frame(instrumentid=instrumentid,orderid=orderid,direction=direction,price=price,hands=hands,action=action,timeoutlist=timeoutlist,timeoutchase=timeoutchase,timeoutsleep=timeoutsleep,chaselist=chaselist,chasesleep=chasesleep,stringsAsFactors = FALSE)
}
}
},
warning=function(w){stop("instrumentid, direction, price, hands action timeoutlist, timeoutchase, timeoutsleep, chaselist and chasesleep must be of length one or the same length with the number of orders!! orderid must be of length zero or the same length with the number of orders!")},
error=function(e){stop("instrumentid, direction, price, hands action timeoutlist, timeoutchase, timeoutsleep, chaselist and chasesleep must be of length one or the same length with the number of orders!! orderid must be of length zero or the same length with the number of orders!")})
for(i in 1:nrow(orders)){
ordersubmission(instrumentid = orders$instrumentid[i],
orderid = orders$orderid[i],direction = orders$direction[i],
price=orders$price[i],hands = orders$hands[i],action = orders$action[i],
timeoutlist=orders$timeoutlist[i],
timeoutchase=orders$timeoutchase[i],
timeoutsleep=orders$timeoutsleep[i],
chaselist=orders$chaselist[i],
chasesleep=orders$chasesleep[i])
}
return()
}
##' @title timeoutsubmission
##'
##' @description submit an order with timeout checking. The order will be
##' canceled when it hasn't been executed for a duration longer than
##' timeoutsleep
##'
##' @details timeoutsubmission is a wrapper of ordersubmission, it act the same
##' as ordersubmission(...,timeoutlist=TRUE,chaselist=FALSE)
##' @seealso \link{multisubmission} \link{timeoutchasesubmission} \link{ordersubmission} \link{chasesubmission}
##' @param instrumentid character, instrument identifier.
##' @param orderid character, specifying an unique order id, can be generated
##' by randomid().
##' @param direction integer, specifying trading direction. 1 for long,
##' -1 for short.
##' @param price numeric, specifiying order pirce.NOTE: when price=0,
##' ordersubmission() will submit a market order; when price=NULL,
##' ordersubmission() will take the corresponding bid1 or ask1 price as
##' submitted price.
##' @param hands integer, specifying amount to be submitted.
##' @param action character, specifying submit action, action can take value
##' from one of "open","close","closetoday","closepreday" and "cancel". amount
##' submitted in action='close' can not be greater than the sum of current
##' holdings and queuing open hands.
##' @param timeoutsleep numeric, specifying the timeout inverval in seconds.
##' @return order status code.
##' @examples
##' \dontrun{
##' ## submit an open order, buy 1 hand of TF1603 at price 99
##' ## cancel the order if it's not executed in the next 10 seconds
##' timeoutsubmission(instrumentid="TF1603",orderid=randomid(5),
##' direction=1,price=99,hands=1,action="open".
##' timeoutsleep=10)
##' }
##' @export
timeoutsubmission <- function(instrumentid="qtid",orderid=NULL,direction=1,price=0,hands=1,action="open",timeoutsleep=1){
if(missing(timeoutsleep)){
warning("'timeoutsleep' not found! set to 1")
}
ordersubmission(instrumentid=instrumentid,
orderid=orderid,
direction=direction,
price=price,hands=hands,
action=action,
timeoutlist=TRUE,
timeoutsleep=timeoutsleep)
return()
}
##' @title chasesubmission
##'
##' @description chase bid1 or ask1. after every 'chasesleep' seconds,
##' simulator will check wether current order's price equals to bid1 or
##' ask1 price, if not, order chaser will replace it with a new one satisfying
##' the price condition.
##' @details chasesubmission is a wrapper of ordersubmission, it act the same
##' as ordersubmission(...,timeoutlist=FALSE,chaselist=TRUE).
##' @seealso \link{multisubmission} \link{timeoutchasesubmission}
##' \link{ordersubmission} \link{chasesubmission}
##' @param instrumentid character, instrument identifier.
##' @param orderid character, specifying an unique order id, can be generated
##' by randomid().
##' @param direction integer, specifying trading direction. 1 for long,
##' -1 for short.
##' @param price numeric, specifiying order pirce.NOTE: when price=0,
##' ordersubmission() will submit a market order; when price=NULL,
##' ordersubmission() will take the corresponding bid1 or ask1 price as
##' submitted price.
##' @param hands integer, specifying amount to be submitted.
##' @param action character, specifying submit action, action can take value
##' from one of "open","close","closetoday","closepreday" and "cancel". amount
##' submitted in action='close' can not be greater than the sum of current
##' holdings and queuing open hands.
##' @param chasesleep numeric, specifying the time interval between each
##' execution check. In seconds.
##' @return order status code.
##' @examples
##' \dontrun{
##' ## submit an open order, buy 1 hand of TF1603 at price 99
##' ## chase bid1 price if it's not executed in the next 10 seconds
##' ## recheck the price condition every 10 seconds.
##' chasesubmission(instrumentid="TF1603",orderid=randomid(5),
##' direction=1,price=99,hands=1,action="open".
##' chasesleep=10)
##' }
##' @export
chasesubmission <- function(instrumentid="qtid",orderid=NULL,direction=1,price=0,hands=1,action="open",chasesleep=1){
if(missing(chasesleep)){
warning("'chasesleep' not found! set to 1")
}
ordersubmission(instrumentid=instrumentid,
orderid=orderid,
direction=direction,
price=price,hands=hands,
action=action,
chaselist = TRUE,
chasesleep=chasesleep)
return()
}
##' @title timeoutchasesubmission
##' @description submit an order with timeout checking, chase bid1 or ask1.
##' price to execute it when timeout. type ?ordersumission, ?timeoutsubmission
##' and ?chasesubmission for more information.
##' @details timeoutchaseubmission is a wrapper of ordersubmission, it act the
##' same as ordersubmission(...,timeoutlist=TRUE,chaselist=TRUE)
##' @seealso \link{multisubmission} \link{ordersubmission}
##' \link{timeoutsubmission} \link{chasesubmission}
##' @param instrumentid character, instrument identifier.
##' @param orderid character, specifying an unique order id, can be generated
##' by randomid().
##' @param direction integer, specifying trading direction. 1 for long,
##' -1 for short.
##' @param price numeric, specifiying order pirce.NOTE: when price=0,
##' ordersubmission() will submit a market order; when price=NULL,
##' ordersubmission() will take the corresponding bid1 or ask1 price as
##' submitted price.
##' @param hands integer, specifying amount to be submitted.
##' @param action character, specifying submit action, action can take value
##' from one of "open","close","closetoday","closepreday" and "cancel". amount
##' submitted in action='close' can not be greater than the sum of current
##' holdings and queuing open hands.
##' @param timeoutsleep numeric, specifying the timeout inverval in seconds.
##' @param chasesleep numeric, specifying the time interval between each
##' execution check. In seconds.
##' @return order status code.
##' @examples
##' \dontrun{
##' ## submit an open order, buy 1 hand of TF1603 at price 99
##' ## chase bid1 price if it's not executed in the next 5 seconds
##' ## recheck the price condition every 10 seconds.
##' chasesubmission(instrumentid="TF1603",orderid=randomid(5),
##' direction=1,price=99,hands=1,action="open".
##' timeoutsleep=5,
##' chasesleep=10)
##' }
##' @export
timeoutchasesubmission <- function(instrumentid="qtid",orderid=NULL,direction=1,price=0,hands=1,action="open",timeoutsleep=1,chasesleep=1){
if(missing(timeoutsleep)){
warning("'timeoutsleep' not found! set to 1")
}
if(missing(chasesleep)){
warning("'chasesleep' not found! set to 1")
}
ordersubmission(instrumentid=instrumentid,
orderid=orderid,
direction=direction,
price=price,hands=hands,
action=action,
timeoutlist = TRUE,timeoutchase = TRUE,
timeoutsleep=timeoutsleep,
chasesleep=chasesleep)
return()
}
##' @title meanopen
##' @description calculate unclosed orders' mean open price for a specific instrument
##' and holdings side.
##' @details meanopen will calculate mean price according to following rules: 1. earlier open orders are prior to be closed. 2. return volume weighted mean of unclosed order's transaction price.
##' @param instrumentid character, instrument identifier.
##' @param side character, "long" or "short", specifying holdings's side.
##' @return numeric, mean open price.
##' @examples
##' \dontrun{
##' ## check long holdings' mean open price of TF1603
##' meanopen("TF1603","long")
##' }
##' @export
meanopen <- function(instrumentid=character(),side="long"){
match.arg(side,c("long","short"))
if(side=="long"){
IDX <- .tradingstates$unclosedlong$instrumentid==instrumentid
if(nrow(.tradingstates$unclosedlong[IDX,])==0){
return(NULL)
}
else{
return(sum(.tradingstates$unclosedlong$tradeprice[IDX]*.tradingstates$unclosedlong$tradehands[IDX])/sum(.tradingstates$unclosedlong$tradehands[IDX]))
}
}
else{
IDX <- .tradingstates$unclosedshort$instrumentid==instrumentid
if(nrow(.tradingstates$unclosedshort[IDX,])==0){
return(NULL)
}
else{
return(sum(.tradingstates$unclosedshort$tradeprice[IDX]*.tradingstates$unclosedshort$tradehands[IDX])/sum(.tradingstates$unclosedshort$tradehands[IDX]))
}
}
}
##' @title holdingsprofit
##' @description calculate unclosed holdings' dynamic profit. require
##' setting unclosed=TRUE in HFTsimulator.
##' total_profit = holdings_profit + closed_profit
##' @details
##' long holdings' dynamic profit = holdings * (last_price - mean_open_price),
##' short holdings' dynamic profit = holdings * (mean_open_price - lastprice).
##' @seealso \link{HFTsimulator} \link{meanopen} \link{closedprofit}
##' @param instrumentid character, instrument identifier.
##' @param side character, "long" or "short", specifying holdings's side.
##' @return numeric, holdings profit.
##' @examples
##' \dontrun{
##' ## get longholding's profit of TF1603
##' holdingsprofit("TF1603","long")
##' }
##' @export
holdingsprofit <- function(instrumentid=character(),side="long"){
MEANOPEN <- meanopen(instrumentid,side)
if(is.null(MEANOPEN)){return(0)}
lastprice <- .INSTRUMENT$lastprice[[instrumentid]]
multiplier <- .INSTRUMENT$multiplier[[instrumentid]]
## get holdings
HOLDINGS <- ifelse(side=="long",.tradingstates$capital$totallongholdings[.tradingstates$capital$instrumentid==instrumentid],.tradingstates$capital$totalshortholdings[.tradingstates$capital$instrumentid==instrumentid])
return(HOLDINGS*(lastprice-MEANOPEN)*multiplier)
}
##' @title closed profit
##' @description calculate closed profit. require setting closed=TRUE in
##' HFTsimulator.
##' @details closed profit is the most recent cash when all holdings are
##' equal to zero. total_profit = holdings_profit + closed_profit.
##' @seealso \link{HFTsimulator} \link{holdingsprofit}
##' @param instrumentid character, instrument identifier
##' @return numeric, closed profit
##' @examples
##' \dontrun{
##' ## get closed profit of TF1603
##' closedprofit("TF1603")
##' }
##' @export
closedprofit <- function(instrumentid){
return(.tradingstates$closedtracker$cash[.tradingstates$closedtracker$instrumentid==instrumentid])
}
##' @title randomid
##' @description generage a random order id
##' @param n number of chars
##' @return character, order id
##' @examples
##' \dontrun{
##' ## generate a 5 characters' order id
##' randomid(5)
##' }
##' @importFrom stats runif
##' @export
randomid <- function(n){paste(letters[ceiling(runif(n,0,26))],collapse = "")}
##' @title isnewday
##' @description check if current instrument's data comes from a new day.
##' @param instrumentid character, instrument identifier, unique.
##' @return logical, indication wether current data come from a new trading day.
##' @export
isnewday <- function(instrumentid){
return(.tradingstates$startoftheday[instrumentid])
}
##' @title perfectexecution
##' @description execute and order immediatele with a specified price, without
##' going through the simulation system. Can be used to comparing simulated
##' strategy with a perfect situation.
##' @param instrumentid character, instrument identifier.
##' @param orderid character, specifying an unique order id, can be generated
##' by randomid().
##' @param direction integer, specifying trading direction. 1 for long,
##' -1 for short.
##' @param price numeric, specifiying order pirce.
##' @param hands integer, specifying amount to be submitted.
##' @param action character, specifying submit action, action can take value
##' from one of "open","close","closetoday","closepreday" and "cancel". amount
##' submitted in action='close' can not be greater than the sum of current
##' holdings and queuing open hands.
##' @return nothing.
##' @examples
##' \dontrun{
##' ## submit an open order, buy 1 hand of TF1603 at price 99
##' ## the order will be executed immediately at price 99
##' perfectexecution(instrumentid="TF1603",orderid='xxx',
##' direction=1,price=99,hands=1,action="open")
##' }
##' @importFrom methods is
##' @export
perfectexecution<-function(instrumentid,orderid="xxx",direction,price,hands,action){
tradetime=.tradingstates$currenttradetime
if(any(hands<=0)) stop("hands must be greater than zero!")
if(is(direction,"character") | any(!direction%in%c(-1,1))) stop("direction must be numeric or integer of value 1 or -1!")
if(any(price<=0)) stop("price must be greater than 0!")
if(any(!action%in%c("open","close"))) stop("action can only be open or close!")
## multiple orders
tryCatch(orders <- data.frame(instrumentid=instrumentid,direction=direction,price=price,hands=hands,action=action,stringsAsFactors = FALSE),
warning=function(w){stop("instrumentid, direction, price, hands and action must be of length one or the same length with the number of orders!!")},
error=function(e){stop("instrumentid, direction, price, hands and action must be of length one or the same length with the number of orders!!")})
for(i in 1:nrow(orders)){
fee <- .INSTRUMENT$fee[[instrumentid]]
closeprior <- .INSTRUMENT$closeprior[[instrumentid]]
multiplier <- .INSTRUMENT$multiplier[[instrumentid]]
## additional evaluation expression durring debuging, do not delete
## eval(parse(text = paste(".tradingstates$currenttimeformat <- ",ENV,"$timeformat",sep ="")))
## add initial hands
id <- randomid(5)
.tradingstates$orders <- data.frame(instrumentid="someinstrument",orderid=id,direction=0,price=0,hands=0,action="someaction",initialhands=orders$hands[i],timeoutlist=FALSE,timeoutchase=FALSE,timeoutsleep=1,chaselist=FALSE,chasesleep=1,submitstart=tradetime,stringsAsFactors=FALSE)
cost <- .updatecapital(orders$instrumentid[i],orders$direction[i],orders$hands[i],orders$action[i],orders$price[i],fee,closeprior,multiplier)
.writecapitalhistory(instrumentid=orders$instrumentid[i],tradeprice=orders$price[i],tradehands=orders$hands[i],cost=cost)
.writeorderhistory(instrumentid=orders$instrumentid[i],orderid=id,direction=orders$direction[i],hands=0,price=orders$price[i],tradeprice=orders$price[i],status=0,action=orders$action[i],cost=cost)
.writetraded(orders$instrumentid[i],id,orders$action[i],orders$direction[i],orders$hands[i],orders$price[i])
.trackclosed(orders$instrumentid[i],orders$action[i],orders$direction[i],orders$hands[i],orders$price[i],multiplier)
.trackunclosed(orders$instrumentid[i],id,orders$action[i],orders$direction[i],orders$hands[i],orders$price[i])
}
}
##' @title closeall
##' @description close all holdings of a specific instrument, if close price is
##' not specified, the holdings will be closed with market orders.
##' @seealso \link{chasecloseall}
##' @param instrumentid character, specyfing instrument to be closed.
##' @param price numeric, specyfing limit close order's price, if NULL,
##' simulator will close the holdings with market orders.
##' @details closeall can only close one instrument at a time
##' @return nothing
##' @export
closeall <- function(instrumentid="qtid",price=NULL){
capital <- querycapital(instrumentids = instrumentid)
if(nrow(capital)==0){
warning(paste(instrumentid,"not found!"))
return()
}
if(length(instrumentid)>1){
stop("close more than one instruments!")
}
if(capital$totallongholdings<=0 & capital$totalshortholdings>=0){
print("no holdings to be closed")
return()
}
## ordersubmission
if(capital$totallongholdings!=0)
ordersubmission(instrumentid=instrumentid,orderid = randomid(5),
direction = -1,price = 0,hands=capital$totallongholdings,action = "close")
if(capital$totalshortholdings!=0)
ordersubmission(instrumentid=instrumentid,orderid = randomid(5),
direction = 1,price = 0,hands= -capital$totalshortholdings,action = "close")
return()
}
##' @title cancelall
##' @description cancel all satisfied orders
##' @details cancelall will cancel all orders satisfying user specified
##' filter conditions, a fillter won't be considered when it is NULL.
##' @seealso \link{replaceall}
##' @param instrumentid character, specifying a filter for instrument
##' identifiers.
##' @param direction integer, specifying a filter for trading directions.
##' 1 for long and -1 for short.
##' @param action character, specifying a filter for actions, can take value
##' from one of "open","close","closetoday","closepreday"
##' @param pricemin numeric, specifying a filter for price lower limit.
##' @param pricemax numeric, specifying a filter for price upper limit.
##' @param orderid character, specifying the set of orderids to be canceled.
##' NOTE: if orderid is not null, cancelall will disregard any other filters
##' and cancel orders only by orderid.
##' @return nothing
##' @examples
##' \dontrun{
##' ## cancel all orders satisfy direction==-1
##' cancelall(direction==-1)
##' }
##' @export
cancelall <- function(instrumentid=NULL,direction=NULL,action=NULL,pricemin=NULL,pricemax=NULL,orderid=NULL){
orders <- .tradingstates$orders
if(nrow(orders)==0){
return()
}
## orderid is not null
if(!is.null(orderid)){
orders <- orders[orders$orderid%in%orderid,]
if(nrow(orders)==0){
return()
}
for(i in seq_along(orders$orderid)){
ordersubmission(instrumentid = orders$instrumentid[i],orderid = orders$orderid[i],action = "cancel")
}
return()
}
## orderid is null
if(!is.null(instrumentid)){
orders <- orders[orders$instrumentid%in%instrumentid,]
}
if(!is.null(direction)){
orders <- orders[orders$direction==direction,]
}
if(!is.null(action)){
orders <- orders[orders$action%in%action,]
}
if(!is.null(pricemin)){
orders <- orders[orders$price>=pricemin,]
}
if(!is.null(pricemax)){
orders <- orders[orders$price<=pricemax,]
}
if(nrow(orders)==0){
return()
}
for(i in seq_along(orders$orderid)){
ordersubmission(instrumentid = orders$instrumentid[i],orderid = orders$orderid[i],action = "cancel")
}
return()
}
##' @title replaceall
##' @description replace all satisfied orders with one new one, which has a new
##' price and a hands equal to the cumulated hands of orders replaced.
##' @seealso \link{cancelall}
##' @param instrumentid character, specifying a filter for instrument
##' identifiers.
##' @param direction integer, specifying a filter for trading directions.
##' 1 for long and -1 for short.
##' @param action character, specifying a filter for actions, can take value
##' from one of "open","close","closetoday","closepreday"
##' @param pricemin numeric, specifying a filter for price lower limit.
##' @param pricemax numeric, specifying a filter for price upper limit.
##' @param newprice numeric, new order price, will replace with a market order
##' when newprice=0.
##' @return nothing
##' @examples
##'\dontrun{
##' ## find all orders satisfy direction==-1 and action=='open' and
##' ## price <=101, replace them with a new order with price 100.01.
##' replaceall(tradetime,"TF1512",direction=-1,action='open',
##' pricemax=101,newprice=100.01)
##' }
##' @export
replaceall <- function(instrumentid=NULL,direction=NULL,action=NULL,pricemin=NULL,pricemax=NULL,newprice=NULL){
## cancel old orders
orders <- .tradingstates$orders
if(nrow(orders)==0){
print("no orders to replace")
return()
}
if(is.null(instrumentid) | is.null(direction) | is.null(action) | is.null(newprice) ){
stop("instrumentid, direction, action and newprice can not be NULL!")
}
else{
orders <- orders[orders$instrumentid%in%instrumentid &
orders$direction==direction &
orders$action%in%action,]
}
if(!is.null(pricemin)){
orders <- orders[orders$price>=pricemin,]
}
if(!is.null(pricemax)){
orders <- orders[orders$price<=pricemax,]
}
if(nrow(orders)==0){
print("no orders to replace")
return()
}
for(i in seq_along(orders$orderid)){
ordersubmission(instrumentid = orders$instrumentid[i],orderid = orders$orderid[i],action = "cancel")
}
## submit a new one
ordersubmission(instrumentid = instrumentid,orderid = randomid(5),direction=direction,price=newprice,hands=sum(orders$hands),action = action)
return()
}
##' @title lazysubmission
##' @description submit a target holding, simulator will cancel all irrevelant
##' orders and chase bid1 or ask1 price automatically until the target holding
## is achieved. This function can only be used when set tc=TRUE in HFTsimualtor.
##' @seealso \link{HFTsimulator}
##' @param instrumentid character, instrument identifier
##' @param longholding integer, specifying target long holdings of
##' 'instrumentid', longholding must be greater than or equal to 0.
##' @param shortholding integer, specifying target short holdings of
##' 'instrumentid', shortholding must be less than or equal to 0.
##' @return nothing
##' @examples
##'\dontrun{
##' lazysubmission("TF1512",longholding=5,shortholding=-3)
##' }
##' @export
lazysubmission <- function(instrumentid,longholding=NULL,shortholding=NULL){
tradetime=.tradingstates$currenttradetime
if(!.tradingstates$tc){
stop("lazysubmission: trade center not enabled! pleas set tc=TRUE at initialization")
}
if(!is.null(longholding)){
.tradingstates$th$longholding[.tradingstates$th$instrumentid==instrumentid] <- longholding
}
if(!is.null(shortholding)){
.tradingstates$th$shortholding[.tradingstates$th$instrumentid==instrumentid] <- shortholding
}
## update immediatelly
.tradingstates$justchanged[instrumentid] <- TRUE
.tradingstates$lastchange[instrumentid] <- tradetime
.tradecenter(instrumentid)
}
##' @title submitmultilevelopen
##' @description submit multiple open orders while cancel all other orders
##' satisfying the cancel conditions, cancel conditions are specified by
##' cancelallother, cancelprime, cancelsub and cancelnotinthebook.
##' @seealso \link{multisubmission} \link{cancelall}
##' @param instrumentid character, instrument identifier.
##' @param LEVELS integer, specifying postions in order book. Orders will be
##' submmited to these positions.
##' @param hands integer, specifying amount to be submitted.
##' @param DIRECTION integer, specifying trading direction. 1 for long,
##' -1 for short.
##' @param cancelallother, logical, indicating wehter or not cancel all other
##' orders that satisfying in the order book but with different prices.
##' @param cancelprime cancel all orders with higher priority price
##' @param cancelsub cancel all orders with lower priority price
##' @param cancelnotinthebook cancel orders not in orderbook
##' @return nothing.
##' @importFrom stats na.omit
##' @export
submitmultilevelopen <- function(instrumentid,LEVELS=c(1,2),hands=1,cancelallother=FALSE,cancelprime=FALSE,cancelsub=FALSE,DIRECTION=1,cancelnotinthebook=FALSE){
LIMITS <- .tradingstates$orders[.tradingstates$orders$price!=0&.tradingstates$orders$direction==DIRECTION,]
if(DIRECTION==1){
orderbook <- .INSTRUMENT$orderbook[[instrumentid]]$buybook
}
else{
orderbook <- .INSTRUMENT$orderbook[[instrumentid]]$sellbook
}
if(nrow(LIMITS)!=0){
idx <- match(LIMITS$price,orderbook$price)
## 0. cancel orders not in the book
if(cancelnotinthebook){
if(any(is.na(idx))){
cancelall(orderid = LIMITS$orderid[is.na(idx)])
}
}
## 1. conditional cancel and open
if(any(!is.na(idx))){
LIMITS <- LIMITS[!is.na(idx),]
idx <- na.omit(idx)
## 1.1 cancel
if(cancelallother){
allother <- !(idx%in%LEVELS)
if(any(allother)){
cancelall(orderid = LIMITS$orderid[allother])
}
}
else if(cancelprime){
primeorders <- idx<min(LEVELS)
if(any(primeorders)){
cancelall(orderid = LIMITS$orderid[primeorders])
}
}
else if(cancelsub){
suborders <- idx>max(LEVELS)
if(any(suborders)){
cancelall(orderid = LIMITS$orderid[suborders])
}
}
## 1.2 open
neworders <- !(LEVELS%in%idx)
if(any(neworders)){
multisubmission(instrumentid=instrumentid,direction = DIRECTION,price = orderbook$price[LEVELS[neworders]],hands = hands,action = "open")
}
}
}
else{
multisubmission(instrumentid=instrumentid,direction = DIRECTION,price = orderbook$price[LEVELS],hands = hands,action = "open")
}
}
##' @title chasecloseall
##' @description chase close all holdings of a specific instrument.
##' @seealso \link{closeall}
##' @details chasecloseall can only close one instrument at a time, simulator
##' will recheck if the order price is equal to current bid1 or ask1 price every
##' chasesleep seconds, if not, simulator will cancel it and submit a new one.
##' This action will be repeated until all specified holdings are executed.
##' @param instrumentid character, specyfing instrument to be closed.
##' @param chasesleep numeric, specyfing order chasing interval.
##' @return nothing
##' @export
chasecloseall <- function(instrumentid,chasesleep=1){
## long holdings
LH <- .tradingstates$capital$totallongholdings[.tradingstates$capital$instrumentid==instrumentid]
## short holdigns
SH <- .tradingstates$capital$totalshortholdings[.tradingstates$capital$instrumentid==instrumentid]
## long close
LC <- sum(.tradingstates$orders$hands[.tradingstates$orders$instrumentid==instrumentid & .tradingstates$orders$direction==1 & .tradingstates$orders$action=="close"])
## short close
SC <- sum(.tradingstates$orders$hands[.tradingstates$orders$instrumentid==instrumentid & .tradingstates$orders$direction==-1 & .tradingstates$orders$action=="close"])
orderbook <- .INSTRUMENT$orderbook[[instrumentid]]
if(LH-SC>0){
chasesubmission(instrumentid=instrumentid,orderid = randomid(5),
direction = -1,price = orderbook$sellbook$price[1],hands = LH-SC,action = "close",chasesleep = chasesleep)
}
if((-SH)-LC>0){
chasesubmission(instrumentid=instrumentid,orderid = randomid(5),
direction = 1,price = orderbook$buybook$price[1],hands = (-SH)-LC,action = "close",chasesleep = chasesleep)
}
}
## market order flow:
## bid1,ask1 : previous bid1 and ask1 prices
## lastprice,volume : current last price and volume
## AGGREGATE: indicating return cumulate value or not
## return a matirx with two columes.
##' @title BIS
##' @description market order flow.
##' @details extract market order flow form give transaction data.
##' @param lastprice last trading price.
##' @param bid1 previous orderbook's bid1 price.
##' @param ask1 previous orderbook's ask1 price.
##' @param volume last trading volume.
##' @param AGGREGATE specyfing wether to aggretate all buyer/seller initiated
##' volumes together.
##' @return a matrix of two columns corresponding to buyer and seller initialed
##' order flow.
##' @export
BSI <- function(lastprice,bid1,ask1,volume,AGGREGATE=FALSE){
mid <- (bid1+ask1)/2
if(AGGREGATE){
BI <- sum(volume[lastprice>mid],na.rm = TRUE)
SI <- sum(volume[lastprice<mid],na.rm = TRUE)
other <- sum(volume[lastprice==mid],na.rm = TRUE)/2
BI <- BI+other
SI <- SI+other
return(c(BI=BI,SI=SI))
}
else{
BI <- volume
SI <- volume
BI[lastprice<mid] <- 0
SI[lastprice>mid] <- 0
idx <- lastprice==mid
if(any(idx)){
BI[idx] <- volume[idx]/2
SI[idx] <- BI[idx]
}
return(cbind(BI,SI))
}
}
## limit order flow:
BSO <- function(orderbook,preorderbook,bsi){
}
##' @title S
##' @description shortcut
##' @param instrumentid character, instrument identifier.
##' @param attr name or call
##' @export
S <- function(instrumentid,attr){
attr <- substitute(attr)
if(!is.character(attr)) attr <- deparse(attr)
switch(attr,
"orders.non" = nrow(.tradingstates$orders[.tradingstates$orders$instrumentid=="a",])==0,
"orders.exist" = nrow(.tradingstates$orders[.tradingstates$orders$instrumentid=="a",])!=0,
"longopen" = .tradingstates$orders[.tradingstates$orders$action=="open" & .tradingstates$orders$direction==1 &.tradingstates$orders$instrumentid==instrumentid,],
"longopen.non" = nrow(.tradingstates$orders[.tradingstates$orders$action=="open" & .tradingstates$orders$direction==1 &.tradingstates$orders$instrumentid==instrumentid,])==0,
"longopen.exist" = nrow(.tradingstates$orders[.tradingstates$orders$action=="open" & .tradingstates$orders$direction==1 &.tradingstates$orders$instrumentid==instrumentid,])!=0,
"shortopen" = .tradingstates$orders[.tradingstates$orders$action=="open"&.tradingstates$orders$direction==-1 &.tradingstates$orders$instrumentid==instrumentid,],
"shortopen.non" = nrow(.tradingstates$orders[.tradingstates$orders$action=="open"&.tradingstates$orders$direction==-1 &.tradingstates$orders$instrumentid==instrumentid,])==0,
"shortopen.exist" = nrow(.tradingstates$orders[.tradingstates$orders$action=="open"&.tradingstates$orders$direction==-1 &.tradingstates$orders$instrumentid==instrumentid,])!=0,
"longclose" = .tradingstates$orders[.tradingstates$orders$action=="close"&.tradingstates$orders$direction==1 &.tradingstates$orders$instrumentid==instrumentid,],
"longclose.non" = nrow(.tradingstates$orders[.tradingstates$orders$action=="close"&.tradingstates$orders$direction==1 &.tradingstates$orders$instrumentid==instrumentid,])==0,
"longclose.exist" = nrow(.tradingstates$orders[.tradingstates$orders$action=="close"&.tradingstates$orders$direction==1 &.tradingstates$orders$instrumentid==instrumentid,])!=0,
"shortclose" = .tradingstates$orders[.tradingstates$orders$action=="close"&.tradingstates$orders$direction==-1 &.tradingstates$orders$instrumentid==instrumentid,],
"shortclose.non" = nrow(.tradingstates$orders[.tradingstates$orders$action=="close"&.tradingstates$orders$direction==-1 &.tradingstates$orders$instrumentid==instrumentid,])==0,
"shortclose.exist" = nrow(.tradingstates$orders[.tradingstates$orders$action=="close"&.tradingstates$orders$direction==-1 &.tradingstates$orders$instrumentid==instrumentid,])!=0,
"holdings.exist" = .tradingstates$capital$totallongholdings[.tradingstates$capital$instrumentid==instrumentid] >0 | .tradingstates$capital$totalshortholdings[.tradingstates$capital$instrumentid==instrumentid]<0,
"holdings.non" = .tradingstates$capital$totallongholdings[.tradingstates$capital$instrumentid==instrumentid] ==0 & .tradingstates$capital$totalshortholdings[.tradingstates$capital$instrumentid==instrumentid]==0,
"longholdings.exist" = .tradingstates$capital$totallongholdings[.tradingstates$capital$instrumentid==instrumentid]>0,
"longholdings.non" = .tradingstates$capital$totallongholdings[.tradingstates$capital$instrumentid==instrumentid]==0,
"shortholdings.exist" = .tradingstates$capital$totalshortholdings[.tradingstates$capital$instrumentid==instrumentid]<0,
"shortholdings.non" = .tradingstates$capital$totalshortholdings[.tradingstates$capital$instrumentid==instrumentid]==0
)
}
##' @title HFTsimulator
##' @description high-frequency trading simulator.
##' @details
##' Initialize simulator states, including simulation back ground
##' functionalities and many ohter simulator related parameters. All
##' states related variables are saved in an environment named
##' '.tradingstates'. Queuing orders and capital state will be saved and
##' kept updated in tradingstates during simulation. There are two improtant
##' data.frames stored in this envrionment, 'orders' and 'capital'. All
##' current queuing orders will be stored as one rows in orders during
##' simulation. if there is no queuing order, orders will be a data.frame
##' with 0 row. each instruments' capital state will be stored as one row in
##' capital. capital has at least one row. \code{queryorder()} and
##' \code{qureycapital()} can be used inside strategy function to fetch orders
##' and capital from .tradingstates.
##' @seealso \link{lazysubmission} \link{cancelall} \link{queryorder}
#' \link{querycapital} \link{meanopen} \link{holdingsprofit}
##' @param stg function, strategy function.
##' @param ... parameters passed to stg.
##' @param datalist data.frame or list, specifying taq data used in the
##' simulation. datalist must be a list of data.frame(s) or a data.frame.
##' @param formatlist list, specifying taq data format, formatlist is either a
##' list of data format specifycation or a list of lists of specifications.
##' @param instrumentids character, spefifying instruments to be traded.
##' @param tc logical, indicating wehter to use a simulated tradecenter. when
##' tc=TRUE, only lazysubmission can be used as submit function in stg. Defalut
##' FALSE.
##' @param Sleep numeric, idle time length of simulated tradecenter, measured
##' in seconds, default 1.
##' @param DIGITSSECS integer, specifying second digits, default 3.
##' @param septraded logical, indicating wether to record traded orders
##' separately.
##' @param unclosed logical, indicating wether to track all unclosed orders,
##' set unclosed=TRUE when you need to calculate mean open price and open
##' profit. Default TRUE.
##' @param closed logical, indicating wether to track all zero holding states,
##' set closed=TRUE when you need to calculate close profit, default TRUE.
##' @param interdaily logical, indicating wether to support interdaily strategies,
##' most of the time MM strategies are appiled in intraday situations,
##' set it to TRUE only when you know exactly what you are doing. Defalut FALSE.
##' @param verboselimitpriors logical, indicating wether to record all prior
##' limit orders' informations. if verboselimitpriors=TRUE, simulator will
##' contatenate all limitpriors in a list named 'verbosepriors'. Default TRUE.
##' @return a list containing all kinds of histories and current states.
##' @importFrom stats runif
##' @importFrom utils setTxtProgressBar txtProgressBar
##' @importFrom methods is
##' @export
HFTsimulator <- function(stg,...,instrumentids,datalist,formatlist,
tc=FALSE,Sleep=1,DIGITSSECS=3,septraded=FALSE,unclosed=TRUE,closed=TRUE,interdaily=FALSE,
verboselimitpriors=TRUE){
## strategy function check
if(!is(stg,"function")){
stop(substitute(stg),"is not a function!")
}
## data check
## put all data in a list, the list is of the same length of instrumetids
if(!is(instrumentids,"character")) stop("instrumentids must be of type character.")
if(is(datalist,"list")){
if(length(instrumentids)!=length(datalist)) stop("length of instrumentids is not equal to length of datalist!")
names(datalist) <- instrumentids #sequence of the datas must be in accordance with instrumentids.
}else if(is(datalist,"data.frame")){
if(length(instrumentids)!=1) stop("unequal length of data and instrumentids")
eval(parse(text = paste("datalist<- list(",instrumentids,"=datalist)",sep = ""))) #convert to list
}else{
stop("datalist must be of type data.frame or list")
}
## data format check
## put all dataformat in a list, the list is of the same length of instrumetids
requiredformat <- c("pbuyhands","pbuyprice","psellhands","psellprice","ptradetime","plastprice","pvolume")
if(all(requiredformat%in%names(formatlist))){
eval(parse(text = paste("formatlist <- list(",paste(paste(instrumentids,"=formatlist"),collapse = ","),")")))
}else if(all(requiredformat%in%names(formatlist[[1]]))){
if(length(formatlist)!=1 & length(formatlist)!=length(instrumentids)) stop("unequal length of formatlist and datalist.")
}else{
stop("missing format specifications in ",substitute(formatlist))
}
cat("Initializing simulator states...")
.CFEupdate <- function(DATA,INSTRUMENTID){
DATA <- unlist(strsplit(paste(DATA,collapse = ","),split = ","))
## extract information
tradetime <<- .extractinfo("tradetime",DATA,ptradetime=.INSTRUMENT$ptradetime[[INSTRUMENTID]],timeformat=.INSTRUMENT$timeformat[[INSTRUMENTID]])
## keep tracking most recent tradetime IMPORTANT
.tradingstates$currenttradetime <- tradetime
## interdaily trading-----------------------------------
if(.tradingstates$interdaily){
## reset instrument trading start indicator
.tradingstates$startoftheday[INSTRUMENTID] <- FALSE
HMOS <- .extractinfo("HMOS",DATA,ptradetime=.INSTRUMENT$ptradetime[[INSTRUMENTID]],timeformat=.INSTRUMENT$timeformat[[INSTRUMENTID]])
.INSTRUMENT$current[[INSTRUMENTID]] <- ifelse(HMOS<=.INSTRUMENT$endoftheday[[INSTRUMENTID]],as.numeric(difftime(HMOS,"1970-01-01 00:00:00.000",units = "secs")+.INSTRUMENT$tomidnight[[INSTRUMENTID]]),as.numeric(difftime(HMOS,.INSTRUMENT$endoftheday[[INSTRUMENTID]],units = "secs")))
## new day condition
if(.INSTRUMENT$current[[INSTRUMENTID]]<.INSTRUMENT$pre[[INSTRUMENTID]]){
## instrument trading start indicator
.tradingstates$startoftheday[INSTRUMENTID] <- TRUE
## reset total volume and orderbook
.INSTRUMENT$pretotalvolume <- .INSTRUMENT$pretotalvolume[names(.INSTRUMENT$pretotalvolume)!=INSTRUMENTID]
.INSTRUMENT$preorderbook <- .INSTRUMENT$preorderbook[names(.INSTRUMENT$preorderbook)!=INSTRUMENTID]
IDX <- .tradingstates$capital$instrumentid==INSTRUMENTID
## move holdings to preholdins
.tradingstates$capital[IDX,c("longholdingspreday","shortholdingspreday")] <- .tradingstates$capital[IDX,c("longholdingspreday","shortholdingspreday")]+.tradingstates$capital[IDX,c("longholdingstoday","shortholdingstoday")]
.tradingstates$capital[IDX,c("longholdingstoday","shortholdingstoday")] <- c(0,0)
## .INSTRUMENT$newday[[INSTRUMENTID]] <- FALSE
}
.INSTRUMENT$pre[[INSTRUMENTID]] <- .INSTRUMENT$current[[INSTRUMENTID]]
}
## interdaily trading-----------------------------------
lastprice <<- .extractinfo("lastprice",DATA,plastprice=.INSTRUMENT$plastprice[[INSTRUMENTID]])
.INSTRUMENT$lastprice[[INSTRUMENTID]] <- lastprice
totalvolume <<- .extractinfo("volume",DATA,pvolume=.INSTRUMENT$pvolume[[INSTRUMENTID]])
if(! INSTRUMENTID%in%names(.INSTRUMENT$pretotalvolume) ){
.INSTRUMENT$pretotalvolume[[INSTRUMENTID]] <- totalvolume
}
volume <<- totalvolume-.INSTRUMENT$pretotalvolume[[INSTRUMENTID]]
orderbook <<- .extractinfo("orderbook",DATA,pbuyhands=.INSTRUMENT$pbuyhands[[INSTRUMENTID]],pbuyprice=.INSTRUMENT$pbuyprice[[INSTRUMENTID]],psellhands=.INSTRUMENT$psellhands[[INSTRUMENTID]],psellprice=.INSTRUMENT$psellprice[[INSTRUMENTID]])
if(! INSTRUMENTID%in%names(.INSTRUMENT$preorderbook) ){
.INSTRUMENT$preorderbook[[INSTRUMENTID]] <- orderbook
}
.INSTRUMENT$orderbook[[INSTRUMENTID]] <- orderbook
preorderbook <<- .INSTRUMENT$preorderbook[[INSTRUMENTID]] #might be useful
## update states
.updateinstrument(instrumentid=INSTRUMENTID,lastprice,volume,orderbook,.INSTRUMENT$preorderbook[[INSTRUMENTID]],.INSTRUMENT$fee[[INSTRUMENTID]],.INSTRUMENT$closeprior[[INSTRUMENTID]],multiplier=.INSTRUMENT$multiplier[[INSTRUMENTID]])
## save as previous values
.INSTRUMENT$pretotalvolume[[INSTRUMENTID]] <- totalvolume
.INSTRUMENT$preorderbook[[INSTRUMENTID]] <- orderbook
## some automatic functions
.timeoutdetector()
.orderchaser()
.tradecenter(INSTRUMENTID)
}
## garbage picker
garbagepicker <- eval(parse(text = deparse(stg)))
## environment settings
options(digits.secs=DIGITSSECS)
options(stringsAsFactors = FALSE)
## initialize simulator state
.tradingstates$tc <- tc #trade-center
.tradingstates$septraded <- septraded
.tradingstates$interdaily <- interdaily #interdaily support
.tradingstates$Sleep <- Sleep #trade-center idle time
.tradingstates$closed <- closed #recored all closed orders
.tradingstates$unclosed <- unclosed #track all unclosed orders
.tradingstates$orders <- data.frame(
instrumentid=character(),
orderid=character(),direction=numeric(),
price=numeric(),hands=numeric(),
action=character(),
initialhands=numeric(),
timeoutlist=logical(), #wether to check timeout
timeoutchase=logical(), #wether to chase after timeout
timeoutsleep=numeric(), #length of timeout,in secs
chaselist=logical(), #wether to chase
chasesleep=numeric(), #length of chase sleep time,secs
submitstart=character(), #chase or timeout start time
stringsAsFactors=FALSE)
.tradingstates$limitprior <- NULL #high prior limit orders
.tradingstates$capital <- data.frame(
instrumentid=character(),
longholdingstoday=numeric(), shortholdingstoday=numeric(),
longholdingspreday=numeric(),shortholdingspreday=numeric(),
totallongholdings=numeric(),totalshortholdings=numeric(),
cash=numeric(),stringsAsFactors=FALSE
)
.tradingstates$th <- data.frame(instrumentid=character(),longholding=numeric(),
shortholding=numeric(),stringsAsFactors = FALSE) #targetholdings required by trade center
.tradingstates$orderhistory <- data.frame(
instrumentid=character(),orderid=character(),
direction=numeric(),price=numeric(),
hands=numeric(),action=character(),
tradetime=character(),tradeprice=numeric(),
cost=numeric(),status=numeric(),
initialhands=numeric(),
stringsAsFactors = FALSE)
.tradingstates$capitalhistory <- data.frame(
instrumentid=character(),
longholdingstoday=numeric(), shortholdingstoday=numeric(),
longholdingspreday=numeric(),shortholdingspreday=numeric(),
totallongholdings=numeric(),totalshortholdings=numeric(),
cash=numeric(),tradetime=character(),
tradeprice=numeric(),tradehands=numeric(),cost=numeric(),
stringsAsFactors=FALSE)
.tradingstates$longopen <- data.frame(
instrumentid=character(),orderid=character(),
action=character(),
direction=numeric(),
tradehands=numeric(),
tradeprice=numeric(),
stringsAsFactors = FALSE)
.tradingstates$shortclose <- .tradingstates$longopen
.tradingstates$shortopen <- .tradingstates$longopen
.tradingstates$shortclose <- .tradingstates$longopen
.tradingstates$currenttradetime <- character() #current time tracker
.tradingstates$startoftheday <- logical() #interdaily
.tradingstates$verbosepriors <- NULL
.tradingstates$justchanged <- NULL
.tradingstates$lastchange <- NULL
.tradingstates$closedtracker <- data.frame(instrumentid=character(),cash=numeric(),stringsAsFactors=FALSE) #closed
.tradingstates$unclosedlong <- .tradingstates$longopen
.tradingstates$unclosedshort <- .tradingstates$longopen
## <<<<<<<<<<<<<<< TO DO >>>>>>>>>>>>>>>
## rearrange data sequence (to support multiple instruments with different data formats)
if(length(formatlist)>=2){
if(any(vapply(2:length(formatlist),function(i){
!identical(formatlist[[i]],formatlist[[i-1]])
},FUN.VALUE = logical(1)))) stop("multiple instruments with different data formats is not supported yet.")
}
## merge all instruments' data to a large data.frame
tags <- rep(instrumentids,times=vapply(datalist,function(d){nrow(d)},FUN.VALUE = numeric(1)))
datalist <- lapply(datalist,function(d){names(d) <- paste("V",1:ncol(d),sep = "");return(d)})
datalist <- do.call(rbind,datalist)
datalist$instrumentid <- tags
datalist <- datalist[order(datalist[,formatlist[[1]]$ptradetime]),] #order by time
## initialize instruments' states
if(length(formatlist)==1 & length(formatlist)!=length(instrumentids)){
formatlist <- rep(formatlist,length(instrumentids))
names(formatlist) <- instrumentids
}
for(instrumentid in instrumentids){
dataformat <- formatlist[[instrumentid]]
if(is.null(dataformat[["fee"]])){
dataformat$fee=c(long=0,short=0,closetoday=0,closepreday=0)
}
if(is.null(dataformat[["closeprior"]])){
dataformat$closeprior = "today"
}
if(is.null(dataformat[["timeformat"]])){
dataformat$timeformat = "%Y-%m-%d %H:%M:%OS"
}
if(is.null(dataformat[["endoftheday"]])){
dataformat$endoftheday="23:59:59.999"
}
if(is.null(dataformat[["multiplier"]])){
dataformat$multiplier=1
}
.initializeinstrument(instrumentid=instrumentid,
pbuyhands=dataformat$pbuyhands,
pbuyprice=dataformat$pbuyprice,
psellhands=dataformat$psellhands,
psellprice=dataformat$psellprice,
ptradetime=dataformat$ptradetime,
plastprice=dataformat$plastprice,
pvolume=dataformat$pvolume,
fee=dataformat$fee,
closeprior=dataformat$closeprior,
timeformat=dataformat$timeformat,
endoftheday=dataformat$endoftheday,
multiplier=dataformat$multiplier)
}
cat("done\n")
pb <- txtProgressBar(min = 1,max = nrow(datalist),style = 3)
## initialize tmp vars
tradetime <- character(1)
lastprice <- numeric(1)
totalvolume <- numeric(1)
volume <- numeric(1)
orderbook <- list()
preorderbook <- list()
## simulation
for(i in 1:nrow(datalist)){
.CFEupdate(DATA = datalist[i,],INSTRUMENTID = datalist[i,"instrumentid"])
garbagepicker(...)
if(verboselimitpriors){
.verboselimitpriors()
}
setTxtProgressBar(pb,i)
}
cat("\n")
invisible(list(orderhistory=.tradingstates$orderhistory,capitalhistory=.tradingstates$capitalhistory,queuingorders=.tradingstates$orders,capital=.tradingstates$capital,verbosepriors=.tradingstates$verbosepriors))
}
#* @apiTitle HTF API
#* @apiDescription RISKLOGICS HIGH TRADING FREQUENCY API
#* @apiContact list(name = "IDRISS OLIVIER BADO", url = "https://github.com/idrissbado", email = "olivier.bado@kyria-cs.com")
#* @apiLicense list(name = "Apache 2.0", url = "https://www.apache.org/licenses/LICENSE-2.0.html")
#* @apiVersion 1.0.1
DemoStrategy <- function(){
bsi <- BSI(lastprice=lastprice,bid1 = preorderbook$buybook$price[1],ask1 = preorderbook$sellbook$price[1],volume = volume) # BSI return a length-two vetor representing the amount initiated by buyer and seller
spread <- orderbook$sellbook$price[1]-orderbook$buybook$price[1] # bid-ask-spread
if( spread>0.01 & bsi[2]<20 & S("TF1603",longopen.non)){
## profit margin is big, seller initiated amount is small, and there is no long open order in queue.
timeoutsubmission(instrumentid="TF1603",direction = 1,orderid = randomid(5),
price = orderbook$buybook$price[1],hands = 1,
action = "open",timeoutsleep = 10) #submit a long open order, canceled it if no execution in 10 seconds.
}
else if(spread>0.01 & bsi[1]<20 & S("TF1603",shortopen.non)){
## profit margin is big, buyer initiated amount is small, and there is no short open order in queue.
timeoutsubmission(instrumentid="TF1603",direction = -1,orderid = randomid(5),
price = orderbook$sellbook$price[1],hands = 1,
action = "open",timeoutsleep = 10) #submit a short open order, canceled it if no execution in 10 seconds.
}
chasecloseall("TF1603",chasesleep = 1) # close all open positions.
}
#*Plot indicator
#*@param req data
#*@serializer png
#*@post /plotindicator
function(req,sub){
data<-read.csv(text=req$postBody,header=TRUE,sep=",")
names(data)<-c("date" ,"data.open","data.High","data.Low","data.Close","data.volume","data.Adj")
data$date<-ymd(data$date)
data<-ts(data)
chartSeries(data, type="candlesticks",
theme=chartTheme('white') )
}
#*return simple moving average
#*@param req
#*@serializer json
#*@post /SMA
function(req,n){
data<-read.csv(text=req$postBody,header=TRUE,sep=",")
names(data)<-c("date" ,"data.open","data.High","data.Low","data.Close","data.volume","data.Adj")
data<-ts(data)
sma <-SMA(Cl(data),as.numeric(n))
return(sma)
}
#*return exponential moving average
#*@param req
#*@param n
#*@post /EMA
function(req,n){
data<-read.csv(text=req$postBody,header=TRUE,sep=",")
names(data)<-c("date" ,"data.open","data.High","data.Low","data.Close","data.volume","data.Adj")
data<-ts(data)
sma <-EMA(Cl(data),as.numeric(n))
return(sma)
}
#*return Bolllinger Band
#*@param req
#*@param n
#*@post /BBAND
function(req){
data<-read.csv(text=req$postBody,header=TRUE,sep=",")
names(data)<-c("date" ,"data.open","data.High","data.Low","data.Close","data.volume","data.Adj")
data<-ts(data)
BBands(Cl(data),s.d=2)
}
#*return momentum
#*@param req
#*@param n
#*@post /momentum
function(req,n){
data<-read.csv(text=req$postBody,header=TRUE,sep=",")
names(data)<-c("date" ,"data.open","data.High","data.Low","data.Close","data.volume","data.Adj")
data<-ts(data)
momentum(Cl(data), as.numeric(n))
}
#*return exponential moving average
#*@param req
#*@param n
#*@post /ROC
function(req,n){
data<-read.csv(text=req$postBody,header=TRUE,sep=",")
names(data)<-c("date" ,"data.open","data.High","data.Low","data.Close","data.volume","data.Adj")
data<-ts(data)
ROC(Cl(data),as.numeric(n))
}
#*return macd
#*@param req
#*@param n
#*@post /macd
function(req){
data<-read.csv(text=req$postBody,header=TRUE,sep=",")
names(data)<-c("date" ,"data.open","data.High","data.Low","data.Close","data.volume","data.Adj")
data<-ts(data)
MACD(Cl(data), nFast=12, nSlow=26,
nSig=9, maType=SMA)
}
#*return rsi
#*@param req
#*@param n
#*@post /rsi
function(req,n){
data<-read.csv(text=req$postBody,header=TRUE,sep=",")
names(data)<-c("date" ,"data.open","data.High","data.Low","data.Close","data.volume","data.Adj")
data<-ts(data)
rsi = RSI(Cl(data), as.numeric(n))
return(rsi)
}
#* return plotwithSMA
#* @param req
#* @param n
#* @post /plotwithSMA
function(req,n){
data<-read.csv(text=req$postBody,header=TRUE,sep=",")
names(data)<-c("date" ,"data.open","data.High","data.Low","data.Close","data.volume","data.Adj")
data<-ts(data)
chartSeries(data,
theme=chartTheme('white'))
addSMA(as.numeric(n),on=1,col = "blue")
}
#* return plotwithEMA
#* @param req
#* @param n
#* @post /plotwithEMA
function(req,n){
data<-read.csv(text=req$postBody,header=TRUE,sep=",")
names(data)<-c("date" ,"data.open","data.High","data.Low","data.Close","data.volume","data.Adj")
data<-ts(data)
chartSeries(data,
theme=chartTheme('white'))
addEMA(as.numeric(n),on=1,col = "blue")
}
#* add indicator utilised
#* @param req
#* @serializer csv
#* return plotwithSMA
#* @param req
#* @param n
#* @post /plotwithSMA
function(req,n){
data<-read.csv(text=req$postBody,header=TRUE,sep=",")
names(data)<-c("date" ,"data.open","data.High","data.Low","data.Close","data.volume","data.Adj")
data<-ts(data)
chartSeries(data,
theme=chartTheme('white'))
addSMA(as.numeric(n),on=1,col = "blue")
}
#* return plotwithRSI
#* @param req
#* @param n
#*@serializer png
#* @post /plotwithRSI
function(req,n){
data<-read.csv(text=req$postBody,header=TRUE,sep=",")
names(data)<-c("date" ,"data.open","data.High","data.Low","data.Close","data.volume","data.Adj")
data<-ts(data)
chartSeries(data,
theme=chartTheme('white'))
addRSI(as.numeric(n),maType="EMA")
}
#*returnplotTA
#*@param req
#*@param n
#*@serializer png
#*@post /plotTA
function(req,n){
data<-read.csv(text=req$postBody,header=TRUE,sep=",")
names(data)<-c("date" ,"data.open","data.High","data.Low","data.Close","data.volume","data.Adj")
data<-ts(data)
sma <- SMA(Cl(data),as.numeric(n))
addTA(sma, on=1, col="red")
}
#* return trading size
#*@param Wealth: value
#*@param qty Trade unit: qty stocks per trade
#Test the following strategy based on day RSI :
#*@param upper1 :Buy one more unit if RSI <upper1
#*@param upper2 :Keep buying the same if upper1< RSI < upper2
#Stop trading if RSI >= upper2
#*@serializer png
#*@post /tradingsize
function(qty,day,req,upper1,upper2,value){
qty<-as.numeric(qty)
data<-read.csv(text=req$postBody,header=TRUE,sep=",")
names(data)<-c("date" ,"data.open","data.High","data.Low","data.Close","data.volume","data.Adj")
data<-ts(data)
day<-as.numeric(day)
signal <- c() #trade signal with size
signal[1:(day+1)] <- 0
price <- Cl(data)
wealth <-c()
wealth[1:(day+1)] <- as.numeric(value)
return<-c()
return[1:(day+1)] <- 0
profit <-c()
profit[1:(day+1)] <- 0
#We now generate trading signal with size:
rsi <- RSI(price, day) #rsi is the lag of RSI
for (i in (day+1): length(price)){
if (rsi[i] < as.numeric(upper1)){ #buy one more unit if rsi < upper1
signal[i] <- signal[i-1]+1
} else if (rsi[i] < as.numeric(upper2)){ #no change if rsi < upper2
signal[i] <- signal[i-1]
} else { #sell if rsi > upper2
signal[i] <- 0
}
}
signal<-reclass(signal,price)
#Now we are ready to apply Trade Rule
Close <- Cl(data)
Open <- Op(data)
trade <- Lag(signal)
for (i in (day+1):length(price)){
profit[i] <- qty * trade[i] * (Close[i] - Open[i])
wealth[i] <- wealth[i-1] + profit[i]
return[i] <- (wealth[i] / wealth[i-1]) -1
}
ret3<-ts(reclass(return,price))
charts.PerformanceSummary(ret3, main="Trade Size")
}
#* return nontrading size
#*@param Wealth: value
#*@param qty Trade unit: qty stocks per trade
#Test the following strategy based on day RSI :
#*@param upper1 :Buy one more unit if RSI <upper1
#*@param upper2 :Keep buying the same if upper1< RSI < upper2
#Stop trading if RSI >= upper2
#*@serializer png
#*@post /nontradingsize
function(qty,day,req,upper1,upper2,value){
data<-read.csv(text=req$postBody,header=TRUE,sep=",")
names(data)<-c("date" ,"data.open","data.High","data.Low","data.Close","data.volume","data.Adj")
data<-ts(data)
qty <-as.numeric(qty)
day <-as.numeric(day)
signal <- c() #trade signal
signal[1:(day+1)] <- 0
price <- Cl(data)
stock <- c() #stock holding
stock[1:(day+1)] <-0
cash <-c()
cash[1:(day+1)] <- as.numeric(value)
#Trading signal is based on simple RSI:
rsi <- RSI(price, day) #rsi is the lag of RSI
for (i in (day+1): length(price)){
if (rsi[i] < as.numeric(upper1)){ #buy one more unit if rsi < upper1
signal[i] <- 1
} else if (rsi[i] < as.numeric(upper2)){ #no change if rsi < upper2
signal[i] <- 0
} else { #sell if rsi > upper2
signal[i] <- -1
}
}
signal<-ts(reclass(signal,price))
#Assume buying at closing price. We keep track of how cash and stock changes:
trade <- Lag(signal) #rsi is the lag of RSI
for (i in (day+1): length(price)){
if (trade[i]>=0){
stock[i] <- stock[i-1] + qty*trade[i]
cash[i] <- cash[i-1] -
qty*trade[i]*price[i]
} else{
stock[i] <- 0
cash[i] <- cash[i-1] +
stock[i-1]*price[i]
}
}
stock<-ts(reclass(stock,price))
cash<-ts(reclass(cash,price))
#To evaluate performance, we calculate equity using cash and stock holdings.
equity <-c()
equity[1:(day+1)] <- as.numeric(value)
return<-c()
return[1:(day+1)] <- 0
for (i in (day+1): length(price)){
equity[i] <- stock[i] * price[i] + cash[i]
return[i] <- equity[i]/equity[i-1]-1
}
equity<-ts(reclass(equity,price))
return<-ts(reclass(return,price))
dev.new()
par(new=TRUE)
charts.PerformanceSummary(return,
main="Non-Day-Trading")
#We can plot the equity line showing how the performance of the strategy:
#
}
#*Create strategy
#Long when macd crosses macd signal upwards
#Short when macd crosses macd signal downwards
#*@param req
#*@serializer csv
#*@post /createstrategy
function(req){
data<-read.csv(text=req$postBody,header = T,sep=",")
data<-ts(data)
data <- na.omit(merge(data, MACD(Cl(data))))
data$sig = ifelse(data$macd < data$signal, -1, 1)
#Flat on first day and last day
data$sig[1] <- 0
data$sig[nrow(data)] <- 0
#Fill in the signal for other times
#Wherever signal is NA, copy previous value to next row
data$signal <- na.locf(data$signal)
#Lag signal so that you don't trade on the same bar that your signal fires
data$signal <- Lag(data$signal)
#Replace NA with zero position on first row
data$signal[1] <- 0
return(data)
}
#*Create a table with your returns
#*@param req
#*@serializer csv
#*@post /returnstable
function(req){
data<-read.csv(text=req$postBody,header=TRUE,sep=",")
data<-ts(data)
GOOG <- na.omit(merge(data, MACD(Cl(data))))
Returns <- na.omit(GOOG$signal)* dailyReturn(Cl(ts(GOOG)))
return(as.data.frame(Returns))
}
#*Create a calendar table of returns
#*@param req
#*@serializer csv
#*@post /calendarreturntable
function(req){
data<-read.csv(text=req$postBody,header=TRUE,sep=",")
data<-ts(data)
GOOG <- na.omit(merge(data, MACD(Cl(data))))
Returns <- na.omit(GOOG$signal) * dailyReturn(Cl(ts(GOOG)))
as.data.frame(table.CalendarReturns(Returns))
}
#*Create a table of drawdowns
#*@param req
#*@serializer csv
#*@post /calendarreturntable
function(req){
data<-read.csv(text=req$postBody,header=TRUE,sep=",")
data<-ts(data)
GOOG <- na.omit(merge(data, MACD(Cl(data))))
Returns <- na.omit(GOOG$signal) * dailyReturn(Cl(ts(GOOG)))
returns<-as.data.frame(table.Drawdowns(Returns, top=10))
return(returns)
}
#*create direction of price (up or down) depending on whether current price
#*is greater or lower than the previous 20 days price.
#* @param req trading data contains date 'open', "high", "low", "close"
#*@serializer csv
#*@post /directiontrading
function(req){
#Get the data
min_gbpchf18 <- read.csv(text=req$postBody,
head = TRUE, sep=";")
names(min_gbpchf18) <- c('date', 'open', "high", "low", "close")
min_gbpchf18 <- min_gbpchf18[,-c(2:4)]
min_gbpchf18$date <- ymd_hms(min_gbpchf18$date)
ohlc <- data.frame(min_gbpchf18$close)
time_index <- as.POSIXct(min_gbpchf18$date)
min_gbpchf <- as.xts(ohlc, time_index)
names(min_gbpchf) <- c("close")
min_gbpchf18<-ts(min_gbpchf18)
#Get indicators
rsi14 <- RSI(Cl(min_gbpchf))
rsi5 <- RSI(Cl(min_gbpchf))
sma10 <- SMA(Cl(min_gbpchf), n = 10)
sma20 <- SMA(Cl(min_gbpchf), n = 20)
ema15 <- EMA(Cl(min_gbpchf), n = 15)
macd7205 <- MACD(Cl(min_gbpchf), 7, 20, 5, 'SMA')
macd12269 <- MACD(Cl(min_gbpchf), 12, 26, 9, 'SMA')
min_gbpchf <- cbind(min_gbpchf$close, sma10, sma20, ema15,
rsi14, rsi5, macd7205, macd12269)
min_gbpchf <- na.omit(min_gbpchf)
direction <- data.frame(matrix(NA, dim(min_gbpchf)[1], 1))
lag_ret <- (min_gbpchf$close - Lag(min_gbpchf$close, 20)) / Lag(min_gbpchf$close, 20)
direction[lag_ret > 0.0001] <- 'Up'
direction[lag_ret < -0.0001] <- 'Down'
direction[lag_ret < 0.0001 & lag_ret > -0.0001] <- "Nowhere"
direction <- na.omit(direction)
return(table(direction))
}
#* return req trading data
#* @param fractionTraining
#* @param fractionValidation
#* @param fractionTest
#* @serializer png
#* @post /predictiontrading
function(req,fractionTraining,fractionValidation,fractionTest){
# Create random training, validation, and test sets
# Set some input variables to define the splitting.
# Input 1. The data frame that you want to split into training, validation, and test.
df <- read.csv(text=req$postBody,header=TRUE,sep=",")
df<-ts(df)
# Compute sample sizes.
sampleSizeTraining <- floor(as.numeric(fractionTraining)* nrow(df))
sampleSizeValidation <- floor(as.numeric(fractionValidation) * nrow(df))
sampleSizeTest <- floor(as.numeric(fractionTest)* nrow(df))
# Create the randomly-sampled indices for the dataframe. Use setdiff() to
# avoid overlapping subsets of indices.
indicesTraining <- sort(sample(seq_len(nrow(df)), size=sampleSizeTraining))
indicesNotTraining <- setdiff(seq_len(nrow(df)), indicesTraining)
indicesValidation <- sort(sample(indicesNotTraining, size=sampleSizeValidation))
indicesTest <- setdiff(indicesNotTraining, indicesValidation)
# Finally, output the three dataframes for training, validation and test.
dfTraining <- df[as.numeric(indicesTraining), ]
dfValidation <- df[as.numeric(indicesValidation), ]
dfTest <- df[as.numeric(indicesTest), ]
min_gbpchf<-df
direction <- data.frame(matrix(NA, dim(min_gbpchf)[1], 1))
lag_ret <- (min_gbpchf$close - Lag(min_gbpchf$close, 20)) / Lag(min_gbpchf$close, 20)
direction[lag_ret > 0.0001] <- 'Up'
direction[lag_ret < -0.0001] <- 'Down'
direction[lag_ret < 0.0001 & lag_ret > -0.0001] <- "Nowhere"
direction <- na.omit(direction)
train_direction <- direction[as.numeric(indicesTraining), ]
validation_direction <- direction[as.numeric(indicesValidation), ]
testing_direction <- direction[as.numeric(indicesTest), ]
model <- dbn.dnn.train(scale(train), class.ind(train_direction), hidden = c(3,4,6))
validation_prediction <- nn.predict(model, validation)
summary(validation_prediction)
nn.test(model, validation, class.ind(validation_direction), t= 0.4)
valid_pred_class <- data.frame(matrix(NA, dim(validation_prediction)[1], 1))
valid_pred_class[validation_prediction[,'Down'] > 0.3754, 1] <- 'Down'
valid_pred_class[validation_prediction[,'Nowhere'] > 0.233, 1] <- 'Nowhere'
valid_pred_class[validation_prediction[,'Up'] > 0.3771, 1] <- 'Up'
valid_pred_class <- na.locf(valid_pred_class)
valid_pred_class <- as.factor(valid_pred_class$matrix.NA..dim.validation_prediction..1...1.)
validation_direction <- as.factor(validation_direction)
valid_matrix <- confusionMatrix(valid_pred_class, validation_direction)
test_prediction <- nn.predict(model, testing)
test_pred_class <- data.frame(matrix(NA, dim(test_prediction)[1], 1))
test_pred_class[validation_prediction[,'Down'] > 0.3754, 1] <- 'Down'
test_pred_class[validation_prediction[,'Nowhere'] > 0.233, 1] <- 'Nowhere'
test_pred_class[validation_prediction[,'Up'] > 0.3771, 1] <- 'Up'
valid_pred_class <- na.locf(test_pred_class)
test_pred_class <- as.factor(test_pred_class$matrix.NA..dim.test_prediction..1...1.)
testing_direction <- as.factor(testing_direction)
test_matrix <- confusionMatrix(test_pred_class, testing_direction)
signal <- ifelse(test_pred_class == 'Up', 1, ifelse(test_pred_class == 'Down', -1,0))
cost <- 0
trade_ret <- testing$close * Lag(signal) - cost
#Plot the benchmark of the derivative with the performance of your strategy
charts.PerformanceSummary(trade_ret)
}
#* start simulation
#*@param instrumentids
#*@serializer png
#*@post /simulationtrad
function(req,instrumentids){
TFtaq<-read.csv(text=req$postBody,header=TRUE,sep=",")
res1 <- HFTsimulator(stg = DemoStrategy, #strategy function
instrumentids = "instrumentids", #security id(s)
datalist = TFtaq , formatlist = TFformat #data
)
return(res1)
}
#* return trading summaryplot
#*@param req data of trading
#*@param instrumentids trading instrument
#*@param starttime the started time "09:15:00.000"
#*@param endtime the ended time "11:30:00.000"
#*@serializer png
#*@post /tradinsummary
function(req,instrumentids,starttime ,endtime){
data<-read.csv(text=req$postBody,header = TRUE,sep=",")
tradesummary(data,"instrumentids","starttime" ,"endtime") #summary plot
}
#* return check details of some specific orders.
#*@param req data of trading
#*@param instrumentids trading instrument
#*@param starttime the started time "13:00:00.000"
#*@param endtime the ended time "15:15:00.000"
#*@param the column for specific orders to return
#*@serializer png
#*@post /multy
function(req,instrumentids,starttime ,endtime,n){
TFtq<-read.csv(text=req$postBody,header = TRUE,sep=",")
res2 <- tradesummary(TFtq,"instrumentids", "starttime ","endtime ") #summary plot
checklimit(instrumentdata = TFtq,orderid = res2$traded$orderid[as.numeric(n)])
} | /risklogictrading.R | no_license | idrissbado/HTFAPI | R | false | false | 81,142 | r | library(quantmod)
library(TTR)
library(PerformanceAnalytics)
library(tseries)
library(xts)
library(zoo)
library(quantstrat)
library(dplyr)
library(knitr)
library(plumber)
library(tibble)
library(highfrequency)
library(rtsdata)
library(lubridate)
library(nnet)
library(caret)
library(deepnet)
library(leaflet)
##' @title queryorder
##' @description get current queuing orders.
##' @details return a data.frame of current queuing orders, each row of
##' the data.frame representing an order, queryorder will return all of the
##' queuing orders if orderid is NULL. when there is no queuing orders,
##' queryorder will return a data.frame with 0 rows.
##' @param orderids specyfing order ids to be queried, return all orders if
##' orderids=NULL, default NULL.
##' @return a data.frame of queuing orders.
##' @examples
##' \dontrun{
##' ## get all queuing orders
##' queryorder()
##' ## get all orders that satisfying orderid%in%c("order1","order2")
##' queryorder(orderids=c("order1","order2"))
##' }
##' @export
queryorder <- function(orderids=NULL){
if(is.null(orderids))
return(.tradingstates$orders)
else
return(.tradingstates$orders[.tradingstates$orders$orderid%in%orderids,])
}
##' @title querycapital
##' @description get current capital status.
##' @details return a data.frame of current capital status, each row
##' of the data.frame representing an instrument, if instrumentids is not NULL,
##' querycapital will return the capital status specified by instrumentids.
##' @param instrumentids specifying instrumentids to be queried, return total
##' capital status if instrumentids=NULL, default NULL.
##' @return a data.frame of all specified instruments' current status
##' @examples
##' \dontrun{
##' ## get total capital status
##' querycapital()
##' ## get capital status of TF1603 and T1603
##' querycapital(instrumentids=c("TF1603","T1603"))
##' }
##' @export
querycapital <- function(instrumentids=NULL){
if(!is.null(instrumentids))
return(.tradingstates$capital[.tradingstates$capital$instrumentid%in%instrumentids,])
else
return(.tradingstates$capital)
}
##' @title ordersubmission
##' @description
##' take one of the following order actions: open, close, closetoday,
##' closepreday and cancel.
##' @details ordersubmission submit an order specified by the user, it also
##' take some additional actions after the submission. For example, if set
##' timeoutlist=TRUE and timeoutsleep=1, the simulator will first submit an
##' order and cancel it if the order is not executed in the next second.
##' @seealso \link{multisubmission} \link{timeoutchasesubmission}
##' \link{timeoutsubmission} \link{chasesubmission}
##' @param instrumentid character, instrument identifier.
##' @param orderid character, specifying an unique order id, can be generated
##' by randomid().
##' @param direction integer, specifying trading direction. 1 for long,
##' -1 for short.
##' @param price numeric, specifiying order pirce.NOTE: when price=0,
##' ordersubmission() will submit a market order; when price=NULL,
##' ordersubmission() will take the corresponding bid1 or ask1 price as
##' submitted price.
##' @param hands integer, specifying amount to be submitted.
##' @param action character, specifying submit action, action can take value
##' from one of "open","close","closetoday","closepreday" and "cancel". amount
##' submitted in action='close' can not be greater than the sum of current
##' holdings and queuing open hands.
##' @param timeoutlist logical, indicating wether to give current order a
##' timeout interval, the length of the interval is specified by timeoutsleep.
##' if the order hasn't been executed after a time interval greater than
##' timeoutsleep, the order will be canceled.
##' @param timeoutchase logical, indicating whether to chase order when timeout.
##' @param timeoutsleep numeric, specifying the timeout inverval in seconds.
##' @param chaselist logical, indicating wether to put this order to
##' auto-chase list. if the order hasn' been executed for a time inverval
##' longer than chasesleep, the simulator will cancel this order(if needed),
##' then submit a new one with the sampe hands and a price equal to the
##' bid1/ask1 price. the simulator will repeat this action until the original
##' submitted amount is executed.
##' @param chasesleep numeric, specifying the time interval between each
##' execution check. In seconds.
##' @return order status code.
##' @examples
##' \dontrun{
##' ## submit an open order, buy 1 hand of TF1603 at price 99
##' ## a length 5 random orderid is generated by randomid(5)
##' ordersubmission(instrumentid="TF1603",orderid=randomid(5),
##' direction=1,price=99,hands=1,action="open")
##' }
##' @export
ordersubmission <- function(instrumentid="TF1603",orderid=NULL,direction=1,price=0,hands=1,action="open",timeoutlist=FALSE,timeoutchase=FALSE,timeoutsleep=1,chaselist=FALSE,chasesleep=1){
tradetime=.tradingstates$currenttradetime
if(is.null(orderid)){
warning("orderid not specified, generating a random id")
orderid <- randomid(10)
}
match.arg(action,choices = c("open","close","closetoday","closepreday","cancel"))
if(is.null(instrumentid)){
stop("instrumentid must not be NULL!")
}
## cancel order
if(action=="cancel"){
canceledorder <- .tradingstates$orders[.tradingstates$orders$orderid==orderid,]
.tradingstates$orders <- .tradingstates$orders[.tradingstates$orders$orderid!=orderid,]
.writeorderhistory(instrumentid,orderid,canceledorder$direction,canceledorder$hands,canceledorder$price,tradeprice=0,status=5,action,cost=0)
return(5)
}
if(any(c(hands%%1!=0, hands<=0, isTRUE(price<0) , !(direction%in%c(-1,1))))){
stop("illegal parameter values!")
}
.sucker <- function(LONGHOLDINGS,SHORTHOLDINGS){
vol <- abs(hands)
if(direction==-1){
## close long, hold>0, untrade<0
hold <- sum(.tradingstates$capital[[LONGHOLDINGS]][.tradingstates$capital$instrumentid==instrumentid])
nethold <- hold+untrade
if( (hold==0) | direction==sign(nethold) |
vol>abs(hold) | vol>abs(nethold) |
(any(currentinstrument$price==0¤tinstrument$direction==direction¤tinstrument$action%in%c("close",action)) & price==0) ){
.writeorderhistory(instrumentid,orderid,direction,hands,price,tradeprice=0,status=6,action,cost=0)
stop("submission failed, status code: 6, orderid: ",orderid)
}
}
else{
## close short, hold<0, untrade>0
hold <- sum(.tradingstates$capital[[SHORTHOLDINGS]][.tradingstates$capital$instrumentid==instrumentid])
nethold <- hold+untrade
if( (hold==0) | direction==sign(nethold) |
vol>abs(hold) | vol>abs(nethold) |
(any(currentinstrument$price==0¤tinstrument$direction==direction¤tinstrument$action%in%c("close",action)) & price==0) ){
.writeorderhistory(instrumentid,orderid,direction,hands,price,tradeprice=0,status=6,action,cost=0)
stop("submission failed, status code: 6, orderid: ",orderid)
}
}
}
## special requirements when action!=cancel
## get most recent orderbook
mostrecentorderbook <- .INSTRUMENT$orderbook[[instrumentid]]
## submist bid1 or ask1 when price=NULL
if(is.null(price)){
price <- ifelse(direction==1,mostrecentorderbook$buybook$price[1],mostrecentorderbook$sellbook$price[1])
}
## tmp file, used to update order state
orders <- .tradingstates$orders
currentinstrument <- orders[orders$instrumentid==instrumentid,]
if(orderid%in%currentinstrument$orderid){
stop("orderid already exists!")
}
if(action=="open"){
## only one market order is allowed in each position
if(any(currentinstrument$price==0¤tinstrument$direction==direction¤tinstrument$action=="open") & price==0){
.writeorderhistory(instrumentid,orderid,direction,hands,price,tradeprice=0,status=6,action,cost=0)
stop(6)
}
orders <- rbind(orders,data.frame(instrumentid=instrumentid,orderid=orderid,direction=direction,price=price,hands=hands,action=action,initialhands=hands,timeoutlist=timeoutlist,timeoutchase=timeoutchase,timeoutsleep=timeoutsleep,chaselist=chaselist,chasesleep=chasesleep,submitstart=tradetime,stringsAsFactors=FALSE))
## save prior orders
if(price>0){
.priororders(mostrecentorderbook = mostrecentorderbook,orderid = orderid,direction = direction,price=price)
}
.tradingstates$orders <- orders
.writeorderhistory(instrumentid,orderid,direction,hands,price,tradeprice=0,status=3,action,cost=0)
return(3)
}
else if(action=="close"){
## untrade closes
untrade <- sum(currentinstrument$hands[currentinstrument$direction==direction¤tinstrument$action%in%c("close","closepreday","closetoday")])*direction #untrade(long)<0, untrade(short)>0
.sucker("totallongholdings","totalshortholdings")
orders <- rbind(orders,data.frame(instrumentid=instrumentid,orderid=orderid,direction=direction,price=price,hands=hands,action=action,initialhands=hands,timeoutlist=timeoutlist,timeoutchase=timeoutchase,timeoutsleep=timeoutsleep,chaselist=chaselist,chasesleep=chasesleep,submitstart=tradetime,stringsAsFactors=FALSE))
if(price>0)
.priororders(mostrecentorderbook = mostrecentorderbook,orderid = orderid,direction = direction,price=price)
.tradingstates$orders <- orders
.writeorderhistory(instrumentid,orderid,direction,hands,price,tradeprice=0,status=3,action,cost=0)
return(3)
}
else if(action=="closetoday"){
## untrade closes
untrade <- sum(currentinstrument$hands[currentinstrument$direction==direction¤tinstrument$action%in%c("close","closetoday")])*direction
.sucker("longholdingstoday","shortholdingstoday")
orders <- rbind(orders,data.frame(instrumentid=instrumentid,orderid=orderid,direction=direction,price=price,hands=hands,action=action,initialhands=hands,timeoutlist=timeoutlist,timeoutchase=timeoutchase,timeoutsleep=timeoutsleep,chaselist=chaselist,chasesleep=chasesleep,submitstart=tradetime,stringsAsFactors=FALSE))
if(price>0)
.priororders(mostrecentorderbook = mostrecentorderbook,orderid = orderid,direction = direction,price=price)
.tradingstates$orders <- orders
.writeorderhistory(instrumentid,orderid,direction,hands,price,tradeprice=0,status=3,action,cost=0)
return(3)
}
else{
## closepreday
## untrade closes
untrade <- sum(currentinstrument$hands[currentinstrument$direction==direction¤tinstrument$action%in%c("close","closepreday")])*direction
.sucker("longholdingspreday","shortholdingspreday")
orders <- rbind(orders,data.frame(instrumentid=instrumentid,orderid=orderid,direction=direction,price=price,hands=hands,action=action,initialhands=hands,timeoutlist=timeoutlist,timeoutchase=timeoutchase,timeoutsleep=timeoutsleep,chaselist=chaselist,chasesleep=chasesleep,submitstart=tradetime,stringsAsFactors=FALSE))
if(price>0)
.priororders(mostrecentorderbook = mostrecentorderbook,orderid = orderid,direction = direction,price=price)
.tradingstates$orders <- orders
.writeorderhistory(instrumentid,orderid,direction,hands,price,tradeprice=0,status=3,action,cost=0)
return(3)
}
}
##' @title multisubmission
##'
##' @description submit multiple orders, a simple wrapper of ordersubmission.
##' instrumentid, direction, price, hands and action must be of length one or
##' the same length with the number of orders; orderid must be of length zero
##' or the same length with the number of orders!
##' @seealso \link{ordersubmission} \link{timeoutchasesubmission}
##' \link{timeoutsubmission} \link{chasesubmission}
##' @param instrumentid character, instrument identifier
##' @param orderid character, if length(orderid)==0 (default), multisubmission
##' will generate a random id for each order
##' @param direction integer, specifying trading direction. 1 for long,
##' -1 for short.
##' @param price numeric, specifiying order pirce.default NULL.
##' NOTE: when price=0, ordersubmission will submit a market order; when
##' price=NULL, ordersubmission() will take the corresponding bid1 or ask1
##' price as order price.
##' @param hands integer, specifying hands to be submitted.
##' @param action character, action can take value from one of "open","close",
##' "closetoday","closepreday" and "cancel". hands submitted in action='close'
##' can not be greater than the sum of current holdings and queuing open hands.
##' @param timeoutlist logical, specyfing wether to give current order a
##' timeout interval, the length of the interval is specified by timeoutsleep.
##' if the order hasn't been executed after a time interval greater than
##' timeoutsleep, the order will be canceled.
##' @param timeoutchase logical, indicating whether to chase order when timeout.
##' @param timeoutsleep numeric, specifying the timeout inverval in seconds.
##' @param chaselist logical, specifying wether to put this order to
##' auto-chase list. if the order hasn' been executed for a time inverval
##' longer than chasesleep, the simulator will cancel this order(if needed),
##' then submit a new one with the sampe hands and a price equal to the
##' bid1/ask1 price. the simulator will repeat this action until the original
##' submitted amount is executed.
##' @param chasesleep numeric, specifying the time interval between each
##' execution check. In seconds.
##' @return order status code.
##' @examples
##' \dontrun{
##' ## submit an one hand long open order at each bid price of TF1512.
##' multisubmission(instrumentid="TF1512",orderid=NULL,direction=1,
##' price=orderbook$buybook$price,hands=1,action='open')
##' }
##' @export
multisubmission <- function(instrumentid="qtid",orderid=NULL,direction=1,price=NULL,hands=1,action="open",timeoutlist=FALSE,timeoutchase=FALSE,timeoutsleep=1,chaselist=FALSE,chasesleep=1){
## multiple orders
tryCatch(expr={
## special effects when price=NULL
if(is.null(price)){
if(length(orderid)==0){
orders <- data.frame(instrumentid=instrumentid,direction=direction,hands=hands,action=action,timeoutlist=timeoutlist,timeoutchase=timeoutchase,timeoutsleep=timeoutsleep,chaselist=chaselist,chasesleep=chasesleep,stringsAsFactors = FALSE)
orderids <- NULL
for(i in 1:nrow(orders)){orderids <- c(orderids,randomid(5))}
orders$orderid <- orderids
}
else{
orders <- data.frame(instrumentid=instrumentid,orderid=orderid,direction=direction,hands=hands,action=action,timeoutlist=timeoutlist,timeoutchase=timeoutchase,timeoutsleep=timeoutsleep,chaselist=chaselist,chasesleep=chasesleep,stringsAsFactors = FALSE)
}
}
else{
## price is not null
if(length(orderid)==0){
orders <- data.frame(instrumentid=instrumentid,direction=direction,price=price,hands=hands,action=action,timeoutlist=timeoutlist,timeoutchase=timeoutchase,timeoutsleep=timeoutsleep,chaselist=chaselist,chasesleep=chasesleep,stringsAsFactors = FALSE)
orderids <- NULL
for(i in 1:nrow(orders)){orderids <- c(orderids,randomid(5))}
orders$orderid <- orderids
}
else{
orders <- data.frame(instrumentid=instrumentid,orderid=orderid,direction=direction,price=price,hands=hands,action=action,timeoutlist=timeoutlist,timeoutchase=timeoutchase,timeoutsleep=timeoutsleep,chaselist=chaselist,chasesleep=chasesleep,stringsAsFactors = FALSE)
}
}
},
warning=function(w){stop("instrumentid, direction, price, hands action timeoutlist, timeoutchase, timeoutsleep, chaselist and chasesleep must be of length one or the same length with the number of orders!! orderid must be of length zero or the same length with the number of orders!")},
error=function(e){stop("instrumentid, direction, price, hands action timeoutlist, timeoutchase, timeoutsleep, chaselist and chasesleep must be of length one or the same length with the number of orders!! orderid must be of length zero or the same length with the number of orders!")})
for(i in 1:nrow(orders)){
ordersubmission(instrumentid = orders$instrumentid[i],
orderid = orders$orderid[i],direction = orders$direction[i],
price=orders$price[i],hands = orders$hands[i],action = orders$action[i],
timeoutlist=orders$timeoutlist[i],
timeoutchase=orders$timeoutchase[i],
timeoutsleep=orders$timeoutsleep[i],
chaselist=orders$chaselist[i],
chasesleep=orders$chasesleep[i])
}
return()
}
##' @title timeoutsubmission
##'
##' @description submit an order with timeout checking. The order will be
##' canceled when it hasn't been executed for a duration longer than
##' timeoutsleep
##'
##' @details timeoutsubmission is a wrapper of ordersubmission, it act the same
##' as ordersubmission(...,timeoutlist=TRUE,chaselist=FALSE)
##' @seealso \link{multisubmission} \link{timeoutchasesubmission} \link{ordersubmission} \link{chasesubmission}
##' @param instrumentid character, instrument identifier.
##' @param orderid character, specifying an unique order id, can be generated
##' by randomid().
##' @param direction integer, specifying trading direction. 1 for long,
##' -1 for short.
##' @param price numeric, specifiying order pirce.NOTE: when price=0,
##' ordersubmission() will submit a market order; when price=NULL,
##' ordersubmission() will take the corresponding bid1 or ask1 price as
##' submitted price.
##' @param hands integer, specifying amount to be submitted.
##' @param action character, specifying submit action, action can take value
##' from one of "open","close","closetoday","closepreday" and "cancel". amount
##' submitted in action='close' can not be greater than the sum of current
##' holdings and queuing open hands.
##' @param timeoutsleep numeric, specifying the timeout inverval in seconds.
##' @return order status code.
##' @examples
##' \dontrun{
##' ## submit an open order, buy 1 hand of TF1603 at price 99
##' ## cancel the order if it's not executed in the next 10 seconds
##' timeoutsubmission(instrumentid="TF1603",orderid=randomid(5),
##' direction=1,price=99,hands=1,action="open".
##' timeoutsleep=10)
##' }
##' @export
timeoutsubmission <- function(instrumentid="qtid",orderid=NULL,direction=1,price=0,hands=1,action="open",timeoutsleep=1){
if(missing(timeoutsleep)){
warning("'timeoutsleep' not found! set to 1")
}
ordersubmission(instrumentid=instrumentid,
orderid=orderid,
direction=direction,
price=price,hands=hands,
action=action,
timeoutlist=TRUE,
timeoutsleep=timeoutsleep)
return()
}
##' @title chasesubmission
##'
##' @description chase bid1 or ask1. after every 'chasesleep' seconds,
##' simulator will check wether current order's price equals to bid1 or
##' ask1 price, if not, order chaser will replace it with a new one satisfying
##' the price condition.
##' @details chasesubmission is a wrapper of ordersubmission, it act the same
##' as ordersubmission(...,timeoutlist=FALSE,chaselist=TRUE).
##' @seealso \link{multisubmission} \link{timeoutchasesubmission}
##' \link{ordersubmission} \link{chasesubmission}
##' @param instrumentid character, instrument identifier.
##' @param orderid character, specifying an unique order id, can be generated
##' by randomid().
##' @param direction integer, specifying trading direction. 1 for long,
##' -1 for short.
##' @param price numeric, specifiying order pirce.NOTE: when price=0,
##' ordersubmission() will submit a market order; when price=NULL,
##' ordersubmission() will take the corresponding bid1 or ask1 price as
##' submitted price.
##' @param hands integer, specifying amount to be submitted.
##' @param action character, specifying submit action, action can take value
##' from one of "open","close","closetoday","closepreday" and "cancel". amount
##' submitted in action='close' can not be greater than the sum of current
##' holdings and queuing open hands.
##' @param chasesleep numeric, specifying the time interval between each
##' execution check. In seconds.
##' @return order status code.
##' @examples
##' \dontrun{
##' ## submit an open order, buy 1 hand of TF1603 at price 99
##' ## chase bid1 price if it's not executed in the next 10 seconds
##' ## recheck the price condition every 10 seconds.
##' chasesubmission(instrumentid="TF1603",orderid=randomid(5),
##' direction=1,price=99,hands=1,action="open".
##' chasesleep=10)
##' }
##' @export
chasesubmission <- function(instrumentid="qtid",orderid=NULL,direction=1,price=0,hands=1,action="open",chasesleep=1){
if(missing(chasesleep)){
warning("'chasesleep' not found! set to 1")
}
ordersubmission(instrumentid=instrumentid,
orderid=orderid,
direction=direction,
price=price,hands=hands,
action=action,
chaselist = TRUE,
chasesleep=chasesleep)
return()
}
##' @title timeoutchasesubmission
##' @description submit an order with timeout checking, chase bid1 or ask1.
##' price to execute it when timeout. type ?ordersumission, ?timeoutsubmission
##' and ?chasesubmission for more information.
##' @details timeoutchaseubmission is a wrapper of ordersubmission, it act the
##' same as ordersubmission(...,timeoutlist=TRUE,chaselist=TRUE)
##' @seealso \link{multisubmission} \link{ordersubmission}
##' \link{timeoutsubmission} \link{chasesubmission}
##' @param instrumentid character, instrument identifier.
##' @param orderid character, specifying an unique order id, can be generated
##' by randomid().
##' @param direction integer, specifying trading direction. 1 for long,
##' -1 for short.
##' @param price numeric, specifiying order pirce.NOTE: when price=0,
##' ordersubmission() will submit a market order; when price=NULL,
##' ordersubmission() will take the corresponding bid1 or ask1 price as
##' submitted price.
##' @param hands integer, specifying amount to be submitted.
##' @param action character, specifying submit action, action can take value
##' from one of "open","close","closetoday","closepreday" and "cancel". amount
##' submitted in action='close' can not be greater than the sum of current
##' holdings and queuing open hands.
##' @param timeoutsleep numeric, specifying the timeout inverval in seconds.
##' @param chasesleep numeric, specifying the time interval between each
##' execution check. In seconds.
##' @return order status code.
##' @examples
##' \dontrun{
##' ## submit an open order, buy 1 hand of TF1603 at price 99
##' ## chase bid1 price if it's not executed in the next 5 seconds
##' ## recheck the price condition every 10 seconds.
##' chasesubmission(instrumentid="TF1603",orderid=randomid(5),
##' direction=1,price=99,hands=1,action="open".
##' timeoutsleep=5,
##' chasesleep=10)
##' }
##' @export
timeoutchasesubmission <- function(instrumentid="qtid",orderid=NULL,direction=1,price=0,hands=1,action="open",timeoutsleep=1,chasesleep=1){
if(missing(timeoutsleep)){
warning("'timeoutsleep' not found! set to 1")
}
if(missing(chasesleep)){
warning("'chasesleep' not found! set to 1")
}
ordersubmission(instrumentid=instrumentid,
orderid=orderid,
direction=direction,
price=price,hands=hands,
action=action,
timeoutlist = TRUE,timeoutchase = TRUE,
timeoutsleep=timeoutsleep,
chasesleep=chasesleep)
return()
}
##' @title meanopen
##' @description calculate unclosed orders' mean open price for a specific instrument
##' and holdings side.
##' @details meanopen will calculate mean price according to following rules: 1. earlier open orders are prior to be closed. 2. return volume weighted mean of unclosed order's transaction price.
##' @param instrumentid character, instrument identifier.
##' @param side character, "long" or "short", specifying holdings's side.
##' @return numeric, mean open price.
##' @examples
##' \dontrun{
##' ## check long holdings' mean open price of TF1603
##' meanopen("TF1603","long")
##' }
##' @export
meanopen <- function(instrumentid=character(),side="long"){
match.arg(side,c("long","short"))
if(side=="long"){
IDX <- .tradingstates$unclosedlong$instrumentid==instrumentid
if(nrow(.tradingstates$unclosedlong[IDX,])==0){
return(NULL)
}
else{
return(sum(.tradingstates$unclosedlong$tradeprice[IDX]*.tradingstates$unclosedlong$tradehands[IDX])/sum(.tradingstates$unclosedlong$tradehands[IDX]))
}
}
else{
IDX <- .tradingstates$unclosedshort$instrumentid==instrumentid
if(nrow(.tradingstates$unclosedshort[IDX,])==0){
return(NULL)
}
else{
return(sum(.tradingstates$unclosedshort$tradeprice[IDX]*.tradingstates$unclosedshort$tradehands[IDX])/sum(.tradingstates$unclosedshort$tradehands[IDX]))
}
}
}
##' @title holdingsprofit
##' @description calculate unclosed holdings' dynamic profit. require
##' setting unclosed=TRUE in HFTsimulator.
##' total_profit = holdings_profit + closed_profit
##' @details
##' long holdings' dynamic profit = holdings * (last_price - mean_open_price),
##' short holdings' dynamic profit = holdings * (mean_open_price - lastprice).
##' @seealso \link{HFTsimulator} \link{meanopen} \link{closedprofit}
##' @param instrumentid character, instrument identifier.
##' @param side character, "long" or "short", specifying holdings's side.
##' @return numeric, holdings profit.
##' @examples
##' \dontrun{
##' ## get longholding's profit of TF1603
##' holdingsprofit("TF1603","long")
##' }
##' @export
holdingsprofit <- function(instrumentid=character(),side="long"){
MEANOPEN <- meanopen(instrumentid,side)
if(is.null(MEANOPEN)){return(0)}
lastprice <- .INSTRUMENT$lastprice[[instrumentid]]
multiplier <- .INSTRUMENT$multiplier[[instrumentid]]
## get holdings
HOLDINGS <- ifelse(side=="long",.tradingstates$capital$totallongholdings[.tradingstates$capital$instrumentid==instrumentid],.tradingstates$capital$totalshortholdings[.tradingstates$capital$instrumentid==instrumentid])
return(HOLDINGS*(lastprice-MEANOPEN)*multiplier)
}
##' @title closed profit
##' @description calculate closed profit. require setting closed=TRUE in
##' HFTsimulator.
##' @details closed profit is the most recent cash when all holdings are
##' equal to zero. total_profit = holdings_profit + closed_profit.
##' @seealso \link{HFTsimulator} \link{holdingsprofit}
##' @param instrumentid character, instrument identifier
##' @return numeric, closed profit
##' @examples
##' \dontrun{
##' ## get closed profit of TF1603
##' closedprofit("TF1603")
##' }
##' @export
closedprofit <- function(instrumentid){
return(.tradingstates$closedtracker$cash[.tradingstates$closedtracker$instrumentid==instrumentid])
}
##' @title randomid
##' @description generage a random order id
##' @param n number of chars
##' @return character, order id
##' @examples
##' \dontrun{
##' ## generate a 5 characters' order id
##' randomid(5)
##' }
##' @importFrom stats runif
##' @export
randomid <- function(n){paste(letters[ceiling(runif(n,0,26))],collapse = "")}
##' @title isnewday
##' @description check if current instrument's data comes from a new day.
##' @param instrumentid character, instrument identifier, unique.
##' @return logical, indication wether current data come from a new trading day.
##' @export
isnewday <- function(instrumentid){
return(.tradingstates$startoftheday[instrumentid])
}
##' @title perfectexecution
##' @description execute and order immediatele with a specified price, without
##' going through the simulation system. Can be used to comparing simulated
##' strategy with a perfect situation.
##' @param instrumentid character, instrument identifier.
##' @param orderid character, specifying an unique order id, can be generated
##' by randomid().
##' @param direction integer, specifying trading direction. 1 for long,
##' -1 for short.
##' @param price numeric, specifiying order pirce.
##' @param hands integer, specifying amount to be submitted.
##' @param action character, specifying submit action, action can take value
##' from one of "open","close","closetoday","closepreday" and "cancel". amount
##' submitted in action='close' can not be greater than the sum of current
##' holdings and queuing open hands.
##' @return nothing.
##' @examples
##' \dontrun{
##' ## submit an open order, buy 1 hand of TF1603 at price 99
##' ## the order will be executed immediately at price 99
##' perfectexecution(instrumentid="TF1603",orderid='xxx',
##' direction=1,price=99,hands=1,action="open")
##' }
##' @importFrom methods is
##' @export
perfectexecution<-function(instrumentid,orderid="xxx",direction,price,hands,action){
tradetime=.tradingstates$currenttradetime
if(any(hands<=0)) stop("hands must be greater than zero!")
if(is(direction,"character") | any(!direction%in%c(-1,1))) stop("direction must be numeric or integer of value 1 or -1!")
if(any(price<=0)) stop("price must be greater than 0!")
if(any(!action%in%c("open","close"))) stop("action can only be open or close!")
## multiple orders
tryCatch(orders <- data.frame(instrumentid=instrumentid,direction=direction,price=price,hands=hands,action=action,stringsAsFactors = FALSE),
warning=function(w){stop("instrumentid, direction, price, hands and action must be of length one or the same length with the number of orders!!")},
error=function(e){stop("instrumentid, direction, price, hands and action must be of length one or the same length with the number of orders!!")})
for(i in 1:nrow(orders)){
fee <- .INSTRUMENT$fee[[instrumentid]]
closeprior <- .INSTRUMENT$closeprior[[instrumentid]]
multiplier <- .INSTRUMENT$multiplier[[instrumentid]]
## additional evaluation expression durring debuging, do not delete
## eval(parse(text = paste(".tradingstates$currenttimeformat <- ",ENV,"$timeformat",sep ="")))
## add initial hands
id <- randomid(5)
.tradingstates$orders <- data.frame(instrumentid="someinstrument",orderid=id,direction=0,price=0,hands=0,action="someaction",initialhands=orders$hands[i],timeoutlist=FALSE,timeoutchase=FALSE,timeoutsleep=1,chaselist=FALSE,chasesleep=1,submitstart=tradetime,stringsAsFactors=FALSE)
cost <- .updatecapital(orders$instrumentid[i],orders$direction[i],orders$hands[i],orders$action[i],orders$price[i],fee,closeprior,multiplier)
.writecapitalhistory(instrumentid=orders$instrumentid[i],tradeprice=orders$price[i],tradehands=orders$hands[i],cost=cost)
.writeorderhistory(instrumentid=orders$instrumentid[i],orderid=id,direction=orders$direction[i],hands=0,price=orders$price[i],tradeprice=orders$price[i],status=0,action=orders$action[i],cost=cost)
.writetraded(orders$instrumentid[i],id,orders$action[i],orders$direction[i],orders$hands[i],orders$price[i])
.trackclosed(orders$instrumentid[i],orders$action[i],orders$direction[i],orders$hands[i],orders$price[i],multiplier)
.trackunclosed(orders$instrumentid[i],id,orders$action[i],orders$direction[i],orders$hands[i],orders$price[i])
}
}
##' @title closeall
##' @description close all holdings of a specific instrument, if close price is
##' not specified, the holdings will be closed with market orders.
##' @seealso \link{chasecloseall}
##' @param instrumentid character, specyfing instrument to be closed.
##' @param price numeric, specyfing limit close order's price, if NULL,
##' simulator will close the holdings with market orders.
##' @details closeall can only close one instrument at a time
##' @return nothing
##' @export
closeall <- function(instrumentid="qtid",price=NULL){
capital <- querycapital(instrumentids = instrumentid)
if(nrow(capital)==0){
warning(paste(instrumentid,"not found!"))
return()
}
if(length(instrumentid)>1){
stop("close more than one instruments!")
}
if(capital$totallongholdings<=0 & capital$totalshortholdings>=0){
print("no holdings to be closed")
return()
}
## ordersubmission
if(capital$totallongholdings!=0)
ordersubmission(instrumentid=instrumentid,orderid = randomid(5),
direction = -1,price = 0,hands=capital$totallongholdings,action = "close")
if(capital$totalshortholdings!=0)
ordersubmission(instrumentid=instrumentid,orderid = randomid(5),
direction = 1,price = 0,hands= -capital$totalshortholdings,action = "close")
return()
}
##' @title cancelall
##' @description cancel all satisfied orders
##' @details cancelall will cancel all orders satisfying user specified
##' filter conditions, a fillter won't be considered when it is NULL.
##' @seealso \link{replaceall}
##' @param instrumentid character, specifying a filter for instrument
##' identifiers.
##' @param direction integer, specifying a filter for trading directions.
##' 1 for long and -1 for short.
##' @param action character, specifying a filter for actions, can take value
##' from one of "open","close","closetoday","closepreday"
##' @param pricemin numeric, specifying a filter for price lower limit.
##' @param pricemax numeric, specifying a filter for price upper limit.
##' @param orderid character, specifying the set of orderids to be canceled.
##' NOTE: if orderid is not null, cancelall will disregard any other filters
##' and cancel orders only by orderid.
##' @return nothing
##' @examples
##' \dontrun{
##' ## cancel all orders satisfy direction==-1
##' cancelall(direction==-1)
##' }
##' @export
cancelall <- function(instrumentid=NULL,direction=NULL,action=NULL,pricemin=NULL,pricemax=NULL,orderid=NULL){
orders <- .tradingstates$orders
if(nrow(orders)==0){
return()
}
## orderid is not null
if(!is.null(orderid)){
orders <- orders[orders$orderid%in%orderid,]
if(nrow(orders)==0){
return()
}
for(i in seq_along(orders$orderid)){
ordersubmission(instrumentid = orders$instrumentid[i],orderid = orders$orderid[i],action = "cancel")
}
return()
}
## orderid is null
if(!is.null(instrumentid)){
orders <- orders[orders$instrumentid%in%instrumentid,]
}
if(!is.null(direction)){
orders <- orders[orders$direction==direction,]
}
if(!is.null(action)){
orders <- orders[orders$action%in%action,]
}
if(!is.null(pricemin)){
orders <- orders[orders$price>=pricemin,]
}
if(!is.null(pricemax)){
orders <- orders[orders$price<=pricemax,]
}
if(nrow(orders)==0){
return()
}
for(i in seq_along(orders$orderid)){
ordersubmission(instrumentid = orders$instrumentid[i],orderid = orders$orderid[i],action = "cancel")
}
return()
}
##' @title replaceall
##' @description replace all satisfied orders with one new one, which has a new
##' price and a hands equal to the cumulated hands of orders replaced.
##' @seealso \link{cancelall}
##' @param instrumentid character, specifying a filter for instrument
##' identifiers.
##' @param direction integer, specifying a filter for trading directions.
##' 1 for long and -1 for short.
##' @param action character, specifying a filter for actions, can take value
##' from one of "open","close","closetoday","closepreday"
##' @param pricemin numeric, specifying a filter for price lower limit.
##' @param pricemax numeric, specifying a filter for price upper limit.
##' @param newprice numeric, new order price, will replace with a market order
##' when newprice=0.
##' @return nothing
##' @examples
##'\dontrun{
##' ## find all orders satisfy direction==-1 and action=='open' and
##' ## price <=101, replace them with a new order with price 100.01.
##' replaceall(tradetime,"TF1512",direction=-1,action='open',
##' pricemax=101,newprice=100.01)
##' }
##' @export
replaceall <- function(instrumentid=NULL,direction=NULL,action=NULL,pricemin=NULL,pricemax=NULL,newprice=NULL){
## cancel old orders
orders <- .tradingstates$orders
if(nrow(orders)==0){
print("no orders to replace")
return()
}
if(is.null(instrumentid) | is.null(direction) | is.null(action) | is.null(newprice) ){
stop("instrumentid, direction, action and newprice can not be NULL!")
}
else{
orders <- orders[orders$instrumentid%in%instrumentid &
orders$direction==direction &
orders$action%in%action,]
}
if(!is.null(pricemin)){
orders <- orders[orders$price>=pricemin,]
}
if(!is.null(pricemax)){
orders <- orders[orders$price<=pricemax,]
}
if(nrow(orders)==0){
print("no orders to replace")
return()
}
for(i in seq_along(orders$orderid)){
ordersubmission(instrumentid = orders$instrumentid[i],orderid = orders$orderid[i],action = "cancel")
}
## submit a new one
ordersubmission(instrumentid = instrumentid,orderid = randomid(5),direction=direction,price=newprice,hands=sum(orders$hands),action = action)
return()
}
##' @title lazysubmission
##' @description submit a target holding, simulator will cancel all irrevelant
##' orders and chase bid1 or ask1 price automatically until the target holding
## is achieved. This function can only be used when set tc=TRUE in HFTsimualtor.
##' @seealso \link{HFTsimulator}
##' @param instrumentid character, instrument identifier
##' @param longholding integer, specifying target long holdings of
##' 'instrumentid', longholding must be greater than or equal to 0.
##' @param shortholding integer, specifying target short holdings of
##' 'instrumentid', shortholding must be less than or equal to 0.
##' @return nothing
##' @examples
##'\dontrun{
##' lazysubmission("TF1512",longholding=5,shortholding=-3)
##' }
##' @export
lazysubmission <- function(instrumentid,longholding=NULL,shortholding=NULL){
tradetime=.tradingstates$currenttradetime
if(!.tradingstates$tc){
stop("lazysubmission: trade center not enabled! pleas set tc=TRUE at initialization")
}
if(!is.null(longholding)){
.tradingstates$th$longholding[.tradingstates$th$instrumentid==instrumentid] <- longholding
}
if(!is.null(shortholding)){
.tradingstates$th$shortholding[.tradingstates$th$instrumentid==instrumentid] <- shortholding
}
## update immediatelly
.tradingstates$justchanged[instrumentid] <- TRUE
.tradingstates$lastchange[instrumentid] <- tradetime
.tradecenter(instrumentid)
}
##' @title submitmultilevelopen
##' @description submit multiple open orders while cancel all other orders
##' satisfying the cancel conditions, cancel conditions are specified by
##' cancelallother, cancelprime, cancelsub and cancelnotinthebook.
##' @seealso \link{multisubmission} \link{cancelall}
##' @param instrumentid character, instrument identifier.
##' @param LEVELS integer, specifying postions in order book. Orders will be
##' submmited to these positions.
##' @param hands integer, specifying amount to be submitted.
##' @param DIRECTION integer, specifying trading direction. 1 for long,
##' -1 for short.
##' @param cancelallother, logical, indicating wehter or not cancel all other
##' orders that satisfying in the order book but with different prices.
##' @param cancelprime cancel all orders with higher priority price
##' @param cancelsub cancel all orders with lower priority price
##' @param cancelnotinthebook cancel orders not in orderbook
##' @return nothing.
##' @importFrom stats na.omit
##' @export
submitmultilevelopen <- function(instrumentid,LEVELS=c(1,2),hands=1,cancelallother=FALSE,cancelprime=FALSE,cancelsub=FALSE,DIRECTION=1,cancelnotinthebook=FALSE){
LIMITS <- .tradingstates$orders[.tradingstates$orders$price!=0&.tradingstates$orders$direction==DIRECTION,]
if(DIRECTION==1){
orderbook <- .INSTRUMENT$orderbook[[instrumentid]]$buybook
}
else{
orderbook <- .INSTRUMENT$orderbook[[instrumentid]]$sellbook
}
if(nrow(LIMITS)!=0){
idx <- match(LIMITS$price,orderbook$price)
## 0. cancel orders not in the book
if(cancelnotinthebook){
if(any(is.na(idx))){
cancelall(orderid = LIMITS$orderid[is.na(idx)])
}
}
## 1. conditional cancel and open
if(any(!is.na(idx))){
LIMITS <- LIMITS[!is.na(idx),]
idx <- na.omit(idx)
## 1.1 cancel
if(cancelallother){
allother <- !(idx%in%LEVELS)
if(any(allother)){
cancelall(orderid = LIMITS$orderid[allother])
}
}
else if(cancelprime){
primeorders <- idx<min(LEVELS)
if(any(primeorders)){
cancelall(orderid = LIMITS$orderid[primeorders])
}
}
else if(cancelsub){
suborders <- idx>max(LEVELS)
if(any(suborders)){
cancelall(orderid = LIMITS$orderid[suborders])
}
}
## 1.2 open
neworders <- !(LEVELS%in%idx)
if(any(neworders)){
multisubmission(instrumentid=instrumentid,direction = DIRECTION,price = orderbook$price[LEVELS[neworders]],hands = hands,action = "open")
}
}
}
else{
multisubmission(instrumentid=instrumentid,direction = DIRECTION,price = orderbook$price[LEVELS],hands = hands,action = "open")
}
}
##' @title chasecloseall
##' @description chase close all holdings of a specific instrument.
##' @seealso \link{closeall}
##' @details chasecloseall can only close one instrument at a time, simulator
##' will recheck if the order price is equal to current bid1 or ask1 price every
##' chasesleep seconds, if not, simulator will cancel it and submit a new one.
##' This action will be repeated until all specified holdings are executed.
##' @param instrumentid character, specyfing instrument to be closed.
##' @param chasesleep numeric, specyfing order chasing interval.
##' @return nothing
##' @export
chasecloseall <- function(instrumentid,chasesleep=1){
## long holdings
LH <- .tradingstates$capital$totallongholdings[.tradingstates$capital$instrumentid==instrumentid]
## short holdigns
SH <- .tradingstates$capital$totalshortholdings[.tradingstates$capital$instrumentid==instrumentid]
## long close
LC <- sum(.tradingstates$orders$hands[.tradingstates$orders$instrumentid==instrumentid & .tradingstates$orders$direction==1 & .tradingstates$orders$action=="close"])
## short close
SC <- sum(.tradingstates$orders$hands[.tradingstates$orders$instrumentid==instrumentid & .tradingstates$orders$direction==-1 & .tradingstates$orders$action=="close"])
orderbook <- .INSTRUMENT$orderbook[[instrumentid]]
if(LH-SC>0){
chasesubmission(instrumentid=instrumentid,orderid = randomid(5),
direction = -1,price = orderbook$sellbook$price[1],hands = LH-SC,action = "close",chasesleep = chasesleep)
}
if((-SH)-LC>0){
chasesubmission(instrumentid=instrumentid,orderid = randomid(5),
direction = 1,price = orderbook$buybook$price[1],hands = (-SH)-LC,action = "close",chasesleep = chasesleep)
}
}
## market order flow:
## bid1,ask1 : previous bid1 and ask1 prices
## lastprice,volume : current last price and volume
## AGGREGATE: indicating return cumulate value or not
## return a matirx with two columes.
##' @title BIS
##' @description market order flow.
##' @details extract market order flow form give transaction data.
##' @param lastprice last trading price.
##' @param bid1 previous orderbook's bid1 price.
##' @param ask1 previous orderbook's ask1 price.
##' @param volume last trading volume.
##' @param AGGREGATE specyfing wether to aggretate all buyer/seller initiated
##' volumes together.
##' @return a matrix of two columns corresponding to buyer and seller initialed
##' order flow.
##' @export
BSI <- function(lastprice,bid1,ask1,volume,AGGREGATE=FALSE){
mid <- (bid1+ask1)/2
if(AGGREGATE){
BI <- sum(volume[lastprice>mid],na.rm = TRUE)
SI <- sum(volume[lastprice<mid],na.rm = TRUE)
other <- sum(volume[lastprice==mid],na.rm = TRUE)/2
BI <- BI+other
SI <- SI+other
return(c(BI=BI,SI=SI))
}
else{
BI <- volume
SI <- volume
BI[lastprice<mid] <- 0
SI[lastprice>mid] <- 0
idx <- lastprice==mid
if(any(idx)){
BI[idx] <- volume[idx]/2
SI[idx] <- BI[idx]
}
return(cbind(BI,SI))
}
}
## limit order flow:
BSO <- function(orderbook,preorderbook,bsi){
}
##' @title S
##' @description shortcut
##' @param instrumentid character, instrument identifier.
##' @param attr name or call
##' @export
S <- function(instrumentid,attr){
attr <- substitute(attr)
if(!is.character(attr)) attr <- deparse(attr)
switch(attr,
"orders.non" = nrow(.tradingstates$orders[.tradingstates$orders$instrumentid=="a",])==0,
"orders.exist" = nrow(.tradingstates$orders[.tradingstates$orders$instrumentid=="a",])!=0,
"longopen" = .tradingstates$orders[.tradingstates$orders$action=="open" & .tradingstates$orders$direction==1 &.tradingstates$orders$instrumentid==instrumentid,],
"longopen.non" = nrow(.tradingstates$orders[.tradingstates$orders$action=="open" & .tradingstates$orders$direction==1 &.tradingstates$orders$instrumentid==instrumentid,])==0,
"longopen.exist" = nrow(.tradingstates$orders[.tradingstates$orders$action=="open" & .tradingstates$orders$direction==1 &.tradingstates$orders$instrumentid==instrumentid,])!=0,
"shortopen" = .tradingstates$orders[.tradingstates$orders$action=="open"&.tradingstates$orders$direction==-1 &.tradingstates$orders$instrumentid==instrumentid,],
"shortopen.non" = nrow(.tradingstates$orders[.tradingstates$orders$action=="open"&.tradingstates$orders$direction==-1 &.tradingstates$orders$instrumentid==instrumentid,])==0,
"shortopen.exist" = nrow(.tradingstates$orders[.tradingstates$orders$action=="open"&.tradingstates$orders$direction==-1 &.tradingstates$orders$instrumentid==instrumentid,])!=0,
"longclose" = .tradingstates$orders[.tradingstates$orders$action=="close"&.tradingstates$orders$direction==1 &.tradingstates$orders$instrumentid==instrumentid,],
"longclose.non" = nrow(.tradingstates$orders[.tradingstates$orders$action=="close"&.tradingstates$orders$direction==1 &.tradingstates$orders$instrumentid==instrumentid,])==0,
"longclose.exist" = nrow(.tradingstates$orders[.tradingstates$orders$action=="close"&.tradingstates$orders$direction==1 &.tradingstates$orders$instrumentid==instrumentid,])!=0,
"shortclose" = .tradingstates$orders[.tradingstates$orders$action=="close"&.tradingstates$orders$direction==-1 &.tradingstates$orders$instrumentid==instrumentid,],
"shortclose.non" = nrow(.tradingstates$orders[.tradingstates$orders$action=="close"&.tradingstates$orders$direction==-1 &.tradingstates$orders$instrumentid==instrumentid,])==0,
"shortclose.exist" = nrow(.tradingstates$orders[.tradingstates$orders$action=="close"&.tradingstates$orders$direction==-1 &.tradingstates$orders$instrumentid==instrumentid,])!=0,
"holdings.exist" = .tradingstates$capital$totallongholdings[.tradingstates$capital$instrumentid==instrumentid] >0 | .tradingstates$capital$totalshortholdings[.tradingstates$capital$instrumentid==instrumentid]<0,
"holdings.non" = .tradingstates$capital$totallongholdings[.tradingstates$capital$instrumentid==instrumentid] ==0 & .tradingstates$capital$totalshortholdings[.tradingstates$capital$instrumentid==instrumentid]==0,
"longholdings.exist" = .tradingstates$capital$totallongholdings[.tradingstates$capital$instrumentid==instrumentid]>0,
"longholdings.non" = .tradingstates$capital$totallongholdings[.tradingstates$capital$instrumentid==instrumentid]==0,
"shortholdings.exist" = .tradingstates$capital$totalshortholdings[.tradingstates$capital$instrumentid==instrumentid]<0,
"shortholdings.non" = .tradingstates$capital$totalshortholdings[.tradingstates$capital$instrumentid==instrumentid]==0
)
}
##' @title HFTsimulator
##' @description high-frequency trading simulator.
##' @details
##' Initialize simulator states, including simulation back ground
##' functionalities and many ohter simulator related parameters. All
##' states related variables are saved in an environment named
##' '.tradingstates'. Queuing orders and capital state will be saved and
##' kept updated in tradingstates during simulation. There are two improtant
##' data.frames stored in this envrionment, 'orders' and 'capital'. All
##' current queuing orders will be stored as one rows in orders during
##' simulation. if there is no queuing order, orders will be a data.frame
##' with 0 row. each instruments' capital state will be stored as one row in
##' capital. capital has at least one row. \code{queryorder()} and
##' \code{qureycapital()} can be used inside strategy function to fetch orders
##' and capital from .tradingstates.
##' @seealso \link{lazysubmission} \link{cancelall} \link{queryorder}
#' \link{querycapital} \link{meanopen} \link{holdingsprofit}
##' @param stg function, strategy function.
##' @param ... parameters passed to stg.
##' @param datalist data.frame or list, specifying taq data used in the
##' simulation. datalist must be a list of data.frame(s) or a data.frame.
##' @param formatlist list, specifying taq data format, formatlist is either a
##' list of data format specifycation or a list of lists of specifications.
##' @param instrumentids character, spefifying instruments to be traded.
##' @param tc logical, indicating wehter to use a simulated tradecenter. when
##' tc=TRUE, only lazysubmission can be used as submit function in stg. Defalut
##' FALSE.
##' @param Sleep numeric, idle time length of simulated tradecenter, measured
##' in seconds, default 1.
##' @param DIGITSSECS integer, specifying second digits, default 3.
##' @param septraded logical, indicating wether to record traded orders
##' separately.
##' @param unclosed logical, indicating wether to track all unclosed orders,
##' set unclosed=TRUE when you need to calculate mean open price and open
##' profit. Default TRUE.
##' @param closed logical, indicating wether to track all zero holding states,
##' set closed=TRUE when you need to calculate close profit, default TRUE.
##' @param interdaily logical, indicating wether to support interdaily strategies,
##' most of the time MM strategies are appiled in intraday situations,
##' set it to TRUE only when you know exactly what you are doing. Defalut FALSE.
##' @param verboselimitpriors logical, indicating wether to record all prior
##' limit orders' informations. if verboselimitpriors=TRUE, simulator will
##' contatenate all limitpriors in a list named 'verbosepriors'. Default TRUE.
##' @return a list containing all kinds of histories and current states.
##' @importFrom stats runif
##' @importFrom utils setTxtProgressBar txtProgressBar
##' @importFrom methods is
##' @export
HFTsimulator <- function(stg,...,instrumentids,datalist,formatlist,
tc=FALSE,Sleep=1,DIGITSSECS=3,septraded=FALSE,unclosed=TRUE,closed=TRUE,interdaily=FALSE,
verboselimitpriors=TRUE){
## strategy function check
if(!is(stg,"function")){
stop(substitute(stg),"is not a function!")
}
## data check
## put all data in a list, the list is of the same length of instrumetids
if(!is(instrumentids,"character")) stop("instrumentids must be of type character.")
if(is(datalist,"list")){
if(length(instrumentids)!=length(datalist)) stop("length of instrumentids is not equal to length of datalist!")
names(datalist) <- instrumentids #sequence of the datas must be in accordance with instrumentids.
}else if(is(datalist,"data.frame")){
if(length(instrumentids)!=1) stop("unequal length of data and instrumentids")
eval(parse(text = paste("datalist<- list(",instrumentids,"=datalist)",sep = ""))) #convert to list
}else{
stop("datalist must be of type data.frame or list")
}
## data format check
## put all dataformat in a list, the list is of the same length of instrumetids
requiredformat <- c("pbuyhands","pbuyprice","psellhands","psellprice","ptradetime","plastprice","pvolume")
if(all(requiredformat%in%names(formatlist))){
eval(parse(text = paste("formatlist <- list(",paste(paste(instrumentids,"=formatlist"),collapse = ","),")")))
}else if(all(requiredformat%in%names(formatlist[[1]]))){
if(length(formatlist)!=1 & length(formatlist)!=length(instrumentids)) stop("unequal length of formatlist and datalist.")
}else{
stop("missing format specifications in ",substitute(formatlist))
}
cat("Initializing simulator states...")
.CFEupdate <- function(DATA,INSTRUMENTID){
DATA <- unlist(strsplit(paste(DATA,collapse = ","),split = ","))
## extract information
tradetime <<- .extractinfo("tradetime",DATA,ptradetime=.INSTRUMENT$ptradetime[[INSTRUMENTID]],timeformat=.INSTRUMENT$timeformat[[INSTRUMENTID]])
## keep tracking most recent tradetime IMPORTANT
.tradingstates$currenttradetime <- tradetime
## interdaily trading-----------------------------------
if(.tradingstates$interdaily){
## reset instrument trading start indicator
.tradingstates$startoftheday[INSTRUMENTID] <- FALSE
HMOS <- .extractinfo("HMOS",DATA,ptradetime=.INSTRUMENT$ptradetime[[INSTRUMENTID]],timeformat=.INSTRUMENT$timeformat[[INSTRUMENTID]])
.INSTRUMENT$current[[INSTRUMENTID]] <- ifelse(HMOS<=.INSTRUMENT$endoftheday[[INSTRUMENTID]],as.numeric(difftime(HMOS,"1970-01-01 00:00:00.000",units = "secs")+.INSTRUMENT$tomidnight[[INSTRUMENTID]]),as.numeric(difftime(HMOS,.INSTRUMENT$endoftheday[[INSTRUMENTID]],units = "secs")))
## new day condition
if(.INSTRUMENT$current[[INSTRUMENTID]]<.INSTRUMENT$pre[[INSTRUMENTID]]){
## instrument trading start indicator
.tradingstates$startoftheday[INSTRUMENTID] <- TRUE
## reset total volume and orderbook
.INSTRUMENT$pretotalvolume <- .INSTRUMENT$pretotalvolume[names(.INSTRUMENT$pretotalvolume)!=INSTRUMENTID]
.INSTRUMENT$preorderbook <- .INSTRUMENT$preorderbook[names(.INSTRUMENT$preorderbook)!=INSTRUMENTID]
IDX <- .tradingstates$capital$instrumentid==INSTRUMENTID
## move holdings to preholdins
.tradingstates$capital[IDX,c("longholdingspreday","shortholdingspreday")] <- .tradingstates$capital[IDX,c("longholdingspreday","shortholdingspreday")]+.tradingstates$capital[IDX,c("longholdingstoday","shortholdingstoday")]
.tradingstates$capital[IDX,c("longholdingstoday","shortholdingstoday")] <- c(0,0)
## .INSTRUMENT$newday[[INSTRUMENTID]] <- FALSE
}
.INSTRUMENT$pre[[INSTRUMENTID]] <- .INSTRUMENT$current[[INSTRUMENTID]]
}
## interdaily trading-----------------------------------
lastprice <<- .extractinfo("lastprice",DATA,plastprice=.INSTRUMENT$plastprice[[INSTRUMENTID]])
.INSTRUMENT$lastprice[[INSTRUMENTID]] <- lastprice
totalvolume <<- .extractinfo("volume",DATA,pvolume=.INSTRUMENT$pvolume[[INSTRUMENTID]])
if(! INSTRUMENTID%in%names(.INSTRUMENT$pretotalvolume) ){
.INSTRUMENT$pretotalvolume[[INSTRUMENTID]] <- totalvolume
}
volume <<- totalvolume-.INSTRUMENT$pretotalvolume[[INSTRUMENTID]]
orderbook <<- .extractinfo("orderbook",DATA,pbuyhands=.INSTRUMENT$pbuyhands[[INSTRUMENTID]],pbuyprice=.INSTRUMENT$pbuyprice[[INSTRUMENTID]],psellhands=.INSTRUMENT$psellhands[[INSTRUMENTID]],psellprice=.INSTRUMENT$psellprice[[INSTRUMENTID]])
if(! INSTRUMENTID%in%names(.INSTRUMENT$preorderbook) ){
.INSTRUMENT$preorderbook[[INSTRUMENTID]] <- orderbook
}
.INSTRUMENT$orderbook[[INSTRUMENTID]] <- orderbook
preorderbook <<- .INSTRUMENT$preorderbook[[INSTRUMENTID]] #might be useful
## update states
.updateinstrument(instrumentid=INSTRUMENTID,lastprice,volume,orderbook,.INSTRUMENT$preorderbook[[INSTRUMENTID]],.INSTRUMENT$fee[[INSTRUMENTID]],.INSTRUMENT$closeprior[[INSTRUMENTID]],multiplier=.INSTRUMENT$multiplier[[INSTRUMENTID]])
## save as previous values
.INSTRUMENT$pretotalvolume[[INSTRUMENTID]] <- totalvolume
.INSTRUMENT$preorderbook[[INSTRUMENTID]] <- orderbook
## some automatic functions
.timeoutdetector()
.orderchaser()
.tradecenter(INSTRUMENTID)
}
## garbage picker
garbagepicker <- eval(parse(text = deparse(stg)))
## environment settings
options(digits.secs=DIGITSSECS)
options(stringsAsFactors = FALSE)
## initialize simulator state
.tradingstates$tc <- tc #trade-center
.tradingstates$septraded <- septraded
.tradingstates$interdaily <- interdaily #interdaily support
.tradingstates$Sleep <- Sleep #trade-center idle time
.tradingstates$closed <- closed #recored all closed orders
.tradingstates$unclosed <- unclosed #track all unclosed orders
.tradingstates$orders <- data.frame(
instrumentid=character(),
orderid=character(),direction=numeric(),
price=numeric(),hands=numeric(),
action=character(),
initialhands=numeric(),
timeoutlist=logical(), #wether to check timeout
timeoutchase=logical(), #wether to chase after timeout
timeoutsleep=numeric(), #length of timeout,in secs
chaselist=logical(), #wether to chase
chasesleep=numeric(), #length of chase sleep time,secs
submitstart=character(), #chase or timeout start time
stringsAsFactors=FALSE)
.tradingstates$limitprior <- NULL #high prior limit orders
.tradingstates$capital <- data.frame(
instrumentid=character(),
longholdingstoday=numeric(), shortholdingstoday=numeric(),
longholdingspreday=numeric(),shortholdingspreday=numeric(),
totallongholdings=numeric(),totalshortholdings=numeric(),
cash=numeric(),stringsAsFactors=FALSE
)
.tradingstates$th <- data.frame(instrumentid=character(),longholding=numeric(),
shortholding=numeric(),stringsAsFactors = FALSE) #targetholdings required by trade center
.tradingstates$orderhistory <- data.frame(
instrumentid=character(),orderid=character(),
direction=numeric(),price=numeric(),
hands=numeric(),action=character(),
tradetime=character(),tradeprice=numeric(),
cost=numeric(),status=numeric(),
initialhands=numeric(),
stringsAsFactors = FALSE)
.tradingstates$capitalhistory <- data.frame(
instrumentid=character(),
longholdingstoday=numeric(), shortholdingstoday=numeric(),
longholdingspreday=numeric(),shortholdingspreday=numeric(),
totallongholdings=numeric(),totalshortholdings=numeric(),
cash=numeric(),tradetime=character(),
tradeprice=numeric(),tradehands=numeric(),cost=numeric(),
stringsAsFactors=FALSE)
.tradingstates$longopen <- data.frame(
instrumentid=character(),orderid=character(),
action=character(),
direction=numeric(),
tradehands=numeric(),
tradeprice=numeric(),
stringsAsFactors = FALSE)
.tradingstates$shortclose <- .tradingstates$longopen
.tradingstates$shortopen <- .tradingstates$longopen
.tradingstates$shortclose <- .tradingstates$longopen
.tradingstates$currenttradetime <- character() #current time tracker
.tradingstates$startoftheday <- logical() #interdaily
.tradingstates$verbosepriors <- NULL
.tradingstates$justchanged <- NULL
.tradingstates$lastchange <- NULL
.tradingstates$closedtracker <- data.frame(instrumentid=character(),cash=numeric(),stringsAsFactors=FALSE) #closed
.tradingstates$unclosedlong <- .tradingstates$longopen
.tradingstates$unclosedshort <- .tradingstates$longopen
## <<<<<<<<<<<<<<< TO DO >>>>>>>>>>>>>>>
## rearrange data sequence (to support multiple instruments with different data formats)
if(length(formatlist)>=2){
if(any(vapply(2:length(formatlist),function(i){
!identical(formatlist[[i]],formatlist[[i-1]])
},FUN.VALUE = logical(1)))) stop("multiple instruments with different data formats is not supported yet.")
}
## merge all instruments' data to a large data.frame
tags <- rep(instrumentids,times=vapply(datalist,function(d){nrow(d)},FUN.VALUE = numeric(1)))
datalist <- lapply(datalist,function(d){names(d) <- paste("V",1:ncol(d),sep = "");return(d)})
datalist <- do.call(rbind,datalist)
datalist$instrumentid <- tags
datalist <- datalist[order(datalist[,formatlist[[1]]$ptradetime]),] #order by time
## initialize instruments' states
if(length(formatlist)==1 & length(formatlist)!=length(instrumentids)){
formatlist <- rep(formatlist,length(instrumentids))
names(formatlist) <- instrumentids
}
for(instrumentid in instrumentids){
dataformat <- formatlist[[instrumentid]]
if(is.null(dataformat[["fee"]])){
dataformat$fee=c(long=0,short=0,closetoday=0,closepreday=0)
}
if(is.null(dataformat[["closeprior"]])){
dataformat$closeprior = "today"
}
if(is.null(dataformat[["timeformat"]])){
dataformat$timeformat = "%Y-%m-%d %H:%M:%OS"
}
if(is.null(dataformat[["endoftheday"]])){
dataformat$endoftheday="23:59:59.999"
}
if(is.null(dataformat[["multiplier"]])){
dataformat$multiplier=1
}
.initializeinstrument(instrumentid=instrumentid,
pbuyhands=dataformat$pbuyhands,
pbuyprice=dataformat$pbuyprice,
psellhands=dataformat$psellhands,
psellprice=dataformat$psellprice,
ptradetime=dataformat$ptradetime,
plastprice=dataformat$plastprice,
pvolume=dataformat$pvolume,
fee=dataformat$fee,
closeprior=dataformat$closeprior,
timeformat=dataformat$timeformat,
endoftheday=dataformat$endoftheday,
multiplier=dataformat$multiplier)
}
cat("done\n")
pb <- txtProgressBar(min = 1,max = nrow(datalist),style = 3)
## initialize tmp vars
tradetime <- character(1)
lastprice <- numeric(1)
totalvolume <- numeric(1)
volume <- numeric(1)
orderbook <- list()
preorderbook <- list()
## simulation
for(i in 1:nrow(datalist)){
.CFEupdate(DATA = datalist[i,],INSTRUMENTID = datalist[i,"instrumentid"])
garbagepicker(...)
if(verboselimitpriors){
.verboselimitpriors()
}
setTxtProgressBar(pb,i)
}
cat("\n")
invisible(list(orderhistory=.tradingstates$orderhistory,capitalhistory=.tradingstates$capitalhistory,queuingorders=.tradingstates$orders,capital=.tradingstates$capital,verbosepriors=.tradingstates$verbosepriors))
}
#* @apiTitle HTF API
#* @apiDescription RISKLOGICS HIGH TRADING FREQUENCY API
#* @apiContact list(name = "IDRISS OLIVIER BADO", url = "https://github.com/idrissbado", email = "olivier.bado@kyria-cs.com")
#* @apiLicense list(name = "Apache 2.0", url = "https://www.apache.org/licenses/LICENSE-2.0.html")
#* @apiVersion 1.0.1
DemoStrategy <- function(){
bsi <- BSI(lastprice=lastprice,bid1 = preorderbook$buybook$price[1],ask1 = preorderbook$sellbook$price[1],volume = volume) # BSI return a length-two vetor representing the amount initiated by buyer and seller
spread <- orderbook$sellbook$price[1]-orderbook$buybook$price[1] # bid-ask-spread
if( spread>0.01 & bsi[2]<20 & S("TF1603",longopen.non)){
## profit margin is big, seller initiated amount is small, and there is no long open order in queue.
timeoutsubmission(instrumentid="TF1603",direction = 1,orderid = randomid(5),
price = orderbook$buybook$price[1],hands = 1,
action = "open",timeoutsleep = 10) #submit a long open order, canceled it if no execution in 10 seconds.
}
else if(spread>0.01 & bsi[1]<20 & S("TF1603",shortopen.non)){
## profit margin is big, buyer initiated amount is small, and there is no short open order in queue.
timeoutsubmission(instrumentid="TF1603",direction = -1,orderid = randomid(5),
price = orderbook$sellbook$price[1],hands = 1,
action = "open",timeoutsleep = 10) #submit a short open order, canceled it if no execution in 10 seconds.
}
chasecloseall("TF1603",chasesleep = 1) # close all open positions.
}
#*Plot indicator
#*@param req data
#*@serializer png
#*@post /plotindicator
function(req,sub){
data<-read.csv(text=req$postBody,header=TRUE,sep=",")
names(data)<-c("date" ,"data.open","data.High","data.Low","data.Close","data.volume","data.Adj")
data$date<-ymd(data$date)
data<-ts(data)
chartSeries(data, type="candlesticks",
theme=chartTheme('white') )
}
#*return simple moving average
#*@param req
#*@serializer json
#*@post /SMA
function(req,n){
data<-read.csv(text=req$postBody,header=TRUE,sep=",")
names(data)<-c("date" ,"data.open","data.High","data.Low","data.Close","data.volume","data.Adj")
data<-ts(data)
sma <-SMA(Cl(data),as.numeric(n))
return(sma)
}
#*return exponential moving average
#*@param req
#*@param n
#*@post /EMA
function(req,n){
data<-read.csv(text=req$postBody,header=TRUE,sep=",")
names(data)<-c("date" ,"data.open","data.High","data.Low","data.Close","data.volume","data.Adj")
data<-ts(data)
sma <-EMA(Cl(data),as.numeric(n))
return(sma)
}
#*return Bolllinger Band
#*@param req
#*@param n
#*@post /BBAND
function(req){
data<-read.csv(text=req$postBody,header=TRUE,sep=",")
names(data)<-c("date" ,"data.open","data.High","data.Low","data.Close","data.volume","data.Adj")
data<-ts(data)
BBands(Cl(data),s.d=2)
}
#*return momentum
#*@param req
#*@param n
#*@post /momentum
function(req,n){
data<-read.csv(text=req$postBody,header=TRUE,sep=",")
names(data)<-c("date" ,"data.open","data.High","data.Low","data.Close","data.volume","data.Adj")
data<-ts(data)
momentum(Cl(data), as.numeric(n))
}
#*return exponential moving average
#*@param req
#*@param n
#*@post /ROC
function(req,n){
data<-read.csv(text=req$postBody,header=TRUE,sep=",")
names(data)<-c("date" ,"data.open","data.High","data.Low","data.Close","data.volume","data.Adj")
data<-ts(data)
ROC(Cl(data),as.numeric(n))
}
#*return macd
#*@param req
#*@param n
#*@post /macd
function(req){
data<-read.csv(text=req$postBody,header=TRUE,sep=",")
names(data)<-c("date" ,"data.open","data.High","data.Low","data.Close","data.volume","data.Adj")
data<-ts(data)
MACD(Cl(data), nFast=12, nSlow=26,
nSig=9, maType=SMA)
}
#*return rsi
#*@param req
#*@param n
#*@post /rsi
function(req,n){
data<-read.csv(text=req$postBody,header=TRUE,sep=",")
names(data)<-c("date" ,"data.open","data.High","data.Low","data.Close","data.volume","data.Adj")
data<-ts(data)
rsi = RSI(Cl(data), as.numeric(n))
return(rsi)
}
#* return plotwithSMA
#* @param req
#* @param n
#* @post /plotwithSMA
function(req,n){
data<-read.csv(text=req$postBody,header=TRUE,sep=",")
names(data)<-c("date" ,"data.open","data.High","data.Low","data.Close","data.volume","data.Adj")
data<-ts(data)
chartSeries(data,
theme=chartTheme('white'))
addSMA(as.numeric(n),on=1,col = "blue")
}
#* return plotwithEMA
#* @param req
#* @param n
#* @post /plotwithEMA
function(req,n){
data<-read.csv(text=req$postBody,header=TRUE,sep=",")
names(data)<-c("date" ,"data.open","data.High","data.Low","data.Close","data.volume","data.Adj")
data<-ts(data)
chartSeries(data,
theme=chartTheme('white'))
addEMA(as.numeric(n),on=1,col = "blue")
}
#* add indicator utilised
#* @param req
#* @serializer csv
#* return plotwithSMA
#* @param req
#* @param n
#* @post /plotwithSMA
function(req,n){
data<-read.csv(text=req$postBody,header=TRUE,sep=",")
names(data)<-c("date" ,"data.open","data.High","data.Low","data.Close","data.volume","data.Adj")
data<-ts(data)
chartSeries(data,
theme=chartTheme('white'))
addSMA(as.numeric(n),on=1,col = "blue")
}
#* return plotwithRSI
#* @param req
#* @param n
#*@serializer png
#* @post /plotwithRSI
function(req,n){
data<-read.csv(text=req$postBody,header=TRUE,sep=",")
names(data)<-c("date" ,"data.open","data.High","data.Low","data.Close","data.volume","data.Adj")
data<-ts(data)
chartSeries(data,
theme=chartTheme('white'))
addRSI(as.numeric(n),maType="EMA")
}
#*returnplotTA
#*@param req
#*@param n
#*@serializer png
#*@post /plotTA
function(req,n){
data<-read.csv(text=req$postBody,header=TRUE,sep=",")
names(data)<-c("date" ,"data.open","data.High","data.Low","data.Close","data.volume","data.Adj")
data<-ts(data)
sma <- SMA(Cl(data),as.numeric(n))
addTA(sma, on=1, col="red")
}
#* return trading size
#*@param Wealth: value
#*@param qty Trade unit: qty stocks per trade
#Test the following strategy based on day RSI :
#*@param upper1 :Buy one more unit if RSI <upper1
#*@param upper2 :Keep buying the same if upper1< RSI < upper2
#Stop trading if RSI >= upper2
#*@serializer png
#*@post /tradingsize
function(qty,day,req,upper1,upper2,value){
qty<-as.numeric(qty)
data<-read.csv(text=req$postBody,header=TRUE,sep=",")
names(data)<-c("date" ,"data.open","data.High","data.Low","data.Close","data.volume","data.Adj")
data<-ts(data)
day<-as.numeric(day)
signal <- c() #trade signal with size
signal[1:(day+1)] <- 0
price <- Cl(data)
wealth <-c()
wealth[1:(day+1)] <- as.numeric(value)
return<-c()
return[1:(day+1)] <- 0
profit <-c()
profit[1:(day+1)] <- 0
#We now generate trading signal with size:
rsi <- RSI(price, day) #rsi is the lag of RSI
for (i in (day+1): length(price)){
if (rsi[i] < as.numeric(upper1)){ #buy one more unit if rsi < upper1
signal[i] <- signal[i-1]+1
} else if (rsi[i] < as.numeric(upper2)){ #no change if rsi < upper2
signal[i] <- signal[i-1]
} else { #sell if rsi > upper2
signal[i] <- 0
}
}
signal<-reclass(signal,price)
#Now we are ready to apply Trade Rule
Close <- Cl(data)
Open <- Op(data)
trade <- Lag(signal)
for (i in (day+1):length(price)){
profit[i] <- qty * trade[i] * (Close[i] - Open[i])
wealth[i] <- wealth[i-1] + profit[i]
return[i] <- (wealth[i] / wealth[i-1]) -1
}
ret3<-ts(reclass(return,price))
charts.PerformanceSummary(ret3, main="Trade Size")
}
#* return nontrading size
#*@param Wealth: value
#*@param qty Trade unit: qty stocks per trade
#Test the following strategy based on day RSI :
#*@param upper1 :Buy one more unit if RSI <upper1
#*@param upper2 :Keep buying the same if upper1< RSI < upper2
#Stop trading if RSI >= upper2
#*@serializer png
#*@post /nontradingsize
function(qty,day,req,upper1,upper2,value){
data<-read.csv(text=req$postBody,header=TRUE,sep=",")
names(data)<-c("date" ,"data.open","data.High","data.Low","data.Close","data.volume","data.Adj")
data<-ts(data)
qty <-as.numeric(qty)
day <-as.numeric(day)
signal <- c() #trade signal
signal[1:(day+1)] <- 0
price <- Cl(data)
stock <- c() #stock holding
stock[1:(day+1)] <-0
cash <-c()
cash[1:(day+1)] <- as.numeric(value)
#Trading signal is based on simple RSI:
rsi <- RSI(price, day) #rsi is the lag of RSI
for (i in (day+1): length(price)){
if (rsi[i] < as.numeric(upper1)){ #buy one more unit if rsi < upper1
signal[i] <- 1
} else if (rsi[i] < as.numeric(upper2)){ #no change if rsi < upper2
signal[i] <- 0
} else { #sell if rsi > upper2
signal[i] <- -1
}
}
signal<-ts(reclass(signal,price))
#Assume buying at closing price. We keep track of how cash and stock changes:
trade <- Lag(signal) #rsi is the lag of RSI
for (i in (day+1): length(price)){
if (trade[i]>=0){
stock[i] <- stock[i-1] + qty*trade[i]
cash[i] <- cash[i-1] -
qty*trade[i]*price[i]
} else{
stock[i] <- 0
cash[i] <- cash[i-1] +
stock[i-1]*price[i]
}
}
stock<-ts(reclass(stock,price))
cash<-ts(reclass(cash,price))
#To evaluate performance, we calculate equity using cash and stock holdings.
equity <-c()
equity[1:(day+1)] <- as.numeric(value)
return<-c()
return[1:(day+1)] <- 0
for (i in (day+1): length(price)){
equity[i] <- stock[i] * price[i] + cash[i]
return[i] <- equity[i]/equity[i-1]-1
}
equity<-ts(reclass(equity,price))
return<-ts(reclass(return,price))
dev.new()
par(new=TRUE)
charts.PerformanceSummary(return,
main="Non-Day-Trading")
#We can plot the equity line showing how the performance of the strategy:
#
}
#*Create strategy
#Long when macd crosses macd signal upwards
#Short when macd crosses macd signal downwards
#*@param req
#*@serializer csv
#*@post /createstrategy
function(req){
data<-read.csv(text=req$postBody,header = T,sep=",")
data<-ts(data)
data <- na.omit(merge(data, MACD(Cl(data))))
data$sig = ifelse(data$macd < data$signal, -1, 1)
#Flat on first day and last day
data$sig[1] <- 0
data$sig[nrow(data)] <- 0
#Fill in the signal for other times
#Wherever signal is NA, copy previous value to next row
data$signal <- na.locf(data$signal)
#Lag signal so that you don't trade on the same bar that your signal fires
data$signal <- Lag(data$signal)
#Replace NA with zero position on first row
data$signal[1] <- 0
return(data)
}
#*Create a table with your returns
#*@param req
#*@serializer csv
#*@post /returnstable
function(req){
data<-read.csv(text=req$postBody,header=TRUE,sep=",")
data<-ts(data)
GOOG <- na.omit(merge(data, MACD(Cl(data))))
Returns <- na.omit(GOOG$signal)* dailyReturn(Cl(ts(GOOG)))
return(as.data.frame(Returns))
}
#*Create a calendar table of returns
#*@param req
#*@serializer csv
#*@post /calendarreturntable
function(req){
data<-read.csv(text=req$postBody,header=TRUE,sep=",")
data<-ts(data)
GOOG <- na.omit(merge(data, MACD(Cl(data))))
Returns <- na.omit(GOOG$signal) * dailyReturn(Cl(ts(GOOG)))
as.data.frame(table.CalendarReturns(Returns))
}
#*Create a table of drawdowns
#*@param req
#*@serializer csv
#*@post /calendarreturntable
function(req){
data<-read.csv(text=req$postBody,header=TRUE,sep=",")
data<-ts(data)
GOOG <- na.omit(merge(data, MACD(Cl(data))))
Returns <- na.omit(GOOG$signal) * dailyReturn(Cl(ts(GOOG)))
returns<-as.data.frame(table.Drawdowns(Returns, top=10))
return(returns)
}
#*create direction of price (up or down) depending on whether current price
#*is greater or lower than the previous 20 days price.
#* @param req trading data contains date 'open', "high", "low", "close"
#*@serializer csv
#*@post /directiontrading
function(req){
#Get the data
min_gbpchf18 <- read.csv(text=req$postBody,
head = TRUE, sep=";")
names(min_gbpchf18) <- c('date', 'open', "high", "low", "close")
min_gbpchf18 <- min_gbpchf18[,-c(2:4)]
min_gbpchf18$date <- ymd_hms(min_gbpchf18$date)
ohlc <- data.frame(min_gbpchf18$close)
time_index <- as.POSIXct(min_gbpchf18$date)
min_gbpchf <- as.xts(ohlc, time_index)
names(min_gbpchf) <- c("close")
min_gbpchf18<-ts(min_gbpchf18)
#Get indicators
rsi14 <- RSI(Cl(min_gbpchf))
rsi5 <- RSI(Cl(min_gbpchf))
sma10 <- SMA(Cl(min_gbpchf), n = 10)
sma20 <- SMA(Cl(min_gbpchf), n = 20)
ema15 <- EMA(Cl(min_gbpchf), n = 15)
macd7205 <- MACD(Cl(min_gbpchf), 7, 20, 5, 'SMA')
macd12269 <- MACD(Cl(min_gbpchf), 12, 26, 9, 'SMA')
min_gbpchf <- cbind(min_gbpchf$close, sma10, sma20, ema15,
rsi14, rsi5, macd7205, macd12269)
min_gbpchf <- na.omit(min_gbpchf)
direction <- data.frame(matrix(NA, dim(min_gbpchf)[1], 1))
lag_ret <- (min_gbpchf$close - Lag(min_gbpchf$close, 20)) / Lag(min_gbpchf$close, 20)
direction[lag_ret > 0.0001] <- 'Up'
direction[lag_ret < -0.0001] <- 'Down'
direction[lag_ret < 0.0001 & lag_ret > -0.0001] <- "Nowhere"
direction <- na.omit(direction)
return(table(direction))
}
#* return req trading data
#* @param fractionTraining
#* @param fractionValidation
#* @param fractionTest
#* @serializer png
#* @post /predictiontrading
function(req,fractionTraining,fractionValidation,fractionTest){
# Create random training, validation, and test sets
# Set some input variables to define the splitting.
# Input 1. The data frame that you want to split into training, validation, and test.
df <- read.csv(text=req$postBody,header=TRUE,sep=",")
df<-ts(df)
# Compute sample sizes.
sampleSizeTraining <- floor(as.numeric(fractionTraining)* nrow(df))
sampleSizeValidation <- floor(as.numeric(fractionValidation) * nrow(df))
sampleSizeTest <- floor(as.numeric(fractionTest)* nrow(df))
# Create the randomly-sampled indices for the dataframe. Use setdiff() to
# avoid overlapping subsets of indices.
indicesTraining <- sort(sample(seq_len(nrow(df)), size=sampleSizeTraining))
indicesNotTraining <- setdiff(seq_len(nrow(df)), indicesTraining)
indicesValidation <- sort(sample(indicesNotTraining, size=sampleSizeValidation))
indicesTest <- setdiff(indicesNotTraining, indicesValidation)
# Finally, output the three dataframes for training, validation and test.
dfTraining <- df[as.numeric(indicesTraining), ]
dfValidation <- df[as.numeric(indicesValidation), ]
dfTest <- df[as.numeric(indicesTest), ]
min_gbpchf<-df
direction <- data.frame(matrix(NA, dim(min_gbpchf)[1], 1))
lag_ret <- (min_gbpchf$close - Lag(min_gbpchf$close, 20)) / Lag(min_gbpchf$close, 20)
direction[lag_ret > 0.0001] <- 'Up'
direction[lag_ret < -0.0001] <- 'Down'
direction[lag_ret < 0.0001 & lag_ret > -0.0001] <- "Nowhere"
direction <- na.omit(direction)
train_direction <- direction[as.numeric(indicesTraining), ]
validation_direction <- direction[as.numeric(indicesValidation), ]
testing_direction <- direction[as.numeric(indicesTest), ]
model <- dbn.dnn.train(scale(train), class.ind(train_direction), hidden = c(3,4,6))
validation_prediction <- nn.predict(model, validation)
summary(validation_prediction)
nn.test(model, validation, class.ind(validation_direction), t= 0.4)
valid_pred_class <- data.frame(matrix(NA, dim(validation_prediction)[1], 1))
valid_pred_class[validation_prediction[,'Down'] > 0.3754, 1] <- 'Down'
valid_pred_class[validation_prediction[,'Nowhere'] > 0.233, 1] <- 'Nowhere'
valid_pred_class[validation_prediction[,'Up'] > 0.3771, 1] <- 'Up'
valid_pred_class <- na.locf(valid_pred_class)
valid_pred_class <- as.factor(valid_pred_class$matrix.NA..dim.validation_prediction..1...1.)
validation_direction <- as.factor(validation_direction)
valid_matrix <- confusionMatrix(valid_pred_class, validation_direction)
test_prediction <- nn.predict(model, testing)
test_pred_class <- data.frame(matrix(NA, dim(test_prediction)[1], 1))
test_pred_class[validation_prediction[,'Down'] > 0.3754, 1] <- 'Down'
test_pred_class[validation_prediction[,'Nowhere'] > 0.233, 1] <- 'Nowhere'
test_pred_class[validation_prediction[,'Up'] > 0.3771, 1] <- 'Up'
valid_pred_class <- na.locf(test_pred_class)
test_pred_class <- as.factor(test_pred_class$matrix.NA..dim.test_prediction..1...1.)
testing_direction <- as.factor(testing_direction)
test_matrix <- confusionMatrix(test_pred_class, testing_direction)
signal <- ifelse(test_pred_class == 'Up', 1, ifelse(test_pred_class == 'Down', -1,0))
cost <- 0
trade_ret <- testing$close * Lag(signal) - cost
#Plot the benchmark of the derivative with the performance of your strategy
charts.PerformanceSummary(trade_ret)
}
#* start simulation
#*@param instrumentids
#*@serializer png
#*@post /simulationtrad
function(req,instrumentids){
TFtaq<-read.csv(text=req$postBody,header=TRUE,sep=",")
res1 <- HFTsimulator(stg = DemoStrategy, #strategy function
instrumentids = "instrumentids", #security id(s)
datalist = TFtaq , formatlist = TFformat #data
)
return(res1)
}
#* return trading summaryplot
#*@param req data of trading
#*@param instrumentids trading instrument
#*@param starttime the started time "09:15:00.000"
#*@param endtime the ended time "11:30:00.000"
#*@serializer png
#*@post /tradinsummary
function(req,instrumentids,starttime ,endtime){
data<-read.csv(text=req$postBody,header = TRUE,sep=",")
tradesummary(data,"instrumentids","starttime" ,"endtime") #summary plot
}
#* return check details of some specific orders.
#*@param req data of trading
#*@param instrumentids trading instrument
#*@param starttime the started time "13:00:00.000"
#*@param endtime the ended time "15:15:00.000"
#*@param the column for specific orders to return
#*@serializer png
#*@post /multy
function(req,instrumentids,starttime ,endtime,n){
TFtq<-read.csv(text=req$postBody,header = TRUE,sep=",")
res2 <- tradesummary(TFtq,"instrumentids", "starttime ","endtime ") #summary plot
checklimit(instrumentdata = TFtq,orderid = res2$traded$orderid[as.numeric(n)])
} |
#Paolo Tamagnini
#1536242
#paolotamag@gmail.com
#target density f(x) : Beta(a1,b1)
#instrumental density g(y) : Beta(a2,b2)
#U is Uniform(0,1)
#acceptance condition: k*U < Beta(Y,a1,b1)/Beta(Y,a2,b2)
#parameter k: max(Beta(x,a1,b1)/Beta(x,a2,b2))
generateNBetafromBeta = function(N,a1,b1,a2,b2) {
set.seed(123)
#defining function to optimize
funzBeta2 = function(x){
dbeta(x,a1,b1)/dbeta(x,a2,b2)
}
#optimization starts from E(x)
expf = c(a1/(a1+b1))
k = optim(expf,funzBeta2,control=list(fnscale=-1),method = 'BFGS')$value
print(k)
X=NULL
#We will create samples U and Y of length N*k,
#until X reaches length N.
#if we are lucky this 'while' will iter just once.
#This because to create N xi with p of accept. = 1/k,
#we need approximately N*k ui and yi.
#If we don't select enough xi in the first iteration,
#we should definately get done in the second.
while ( length(X) < N ){
U =runif(N*k,min = 0,max = 1)
Y= rbeta(N*k,a2,b2)
X=c(X,Y[U<dbeta(Y,a1,b1)/(k*dbeta(Y,a2,b2))])}
#let's take away any extra xi
X=X[1:N]
return(X) }
#I chose a beta(2,2) as instrumental or candidate distribution.
#It is better than a beta(1,1)=Unif(0,1) because the resulting k is higher.
#This gives a smaller p of acceptance which brings more efficiency.
X = generateNBetafromBeta(100000,3,3,2,2)
#anyway the exercise require a uniform candidate density, then is:
X = generateNBetafromBeta(100000,3,3,1,1)
#as we can see it fits the target density
hist(X,freq = F,breaks = 75, col = 'orange', main = 'empirical histogram vs target density')
xfit<-seq(min(X),max(X),length=length(X))
yfit<-dbeta(xfit,3,3)
lines(xfit, yfit, col="blue", lwd=2)
#we can now generate instead a Bernoulli sample on which we can approximate the prob of accept.
generateBernFromNTrials = function(N,a1,b1,a2,b2){
set.seed(123)
funzBeta2 = function(x){
dbeta(x,a1,b1)/dbeta(x,a2,b2)
}
k = optim(c(a1/(a1+b1)),funzBeta2,method="BFGS",control=list(fnscale=-1))$value
U =runif(N,min = 0,max = 1)
Y= rbeta(N,a2,b2)
X = Y[U<=dbeta(Y,a1,b1)/(k*dbeta(Y,a2,b2))]
succ = rep(1,length(X))
unsuc = rep(0,N - length(X))
X = c(succ,unsuc)
print('I =')
print(1/k)
print('I_hat =')
print(mean(X))
return(X) }
#candidate density = beta(2,2)
Xtilde = generateBernFromNTrials(10000,3,3,2,2)
#candidate density = beta(1,1) = unif(0,1)
Xtilde = generateBernFromNTrials(10000,3,3,1,1) | /1536242_PT_es7.R | no_license | paolotamag/bayesian_inference_R-exs | R | false | false | 2,431 | r | #Paolo Tamagnini
#1536242
#paolotamag@gmail.com
#target density f(x) : Beta(a1,b1)
#instrumental density g(y) : Beta(a2,b2)
#U is Uniform(0,1)
#acceptance condition: k*U < Beta(Y,a1,b1)/Beta(Y,a2,b2)
#parameter k: max(Beta(x,a1,b1)/Beta(x,a2,b2))
generateNBetafromBeta = function(N,a1,b1,a2,b2) {
set.seed(123)
#defining function to optimize
funzBeta2 = function(x){
dbeta(x,a1,b1)/dbeta(x,a2,b2)
}
#optimization starts from E(x)
expf = c(a1/(a1+b1))
k = optim(expf,funzBeta2,control=list(fnscale=-1),method = 'BFGS')$value
print(k)
X=NULL
#We will create samples U and Y of length N*k,
#until X reaches length N.
#if we are lucky this 'while' will iter just once.
#This because to create N xi with p of accept. = 1/k,
#we need approximately N*k ui and yi.
#If we don't select enough xi in the first iteration,
#we should definately get done in the second.
while ( length(X) < N ){
U =runif(N*k,min = 0,max = 1)
Y= rbeta(N*k,a2,b2)
X=c(X,Y[U<dbeta(Y,a1,b1)/(k*dbeta(Y,a2,b2))])}
#let's take away any extra xi
X=X[1:N]
return(X) }
#I chose a beta(2,2) as instrumental or candidate distribution.
#It is better than a beta(1,1)=Unif(0,1) because the resulting k is higher.
#This gives a smaller p of acceptance which brings more efficiency.
X = generateNBetafromBeta(100000,3,3,2,2)
#anyway the exercise require a uniform candidate density, then is:
X = generateNBetafromBeta(100000,3,3,1,1)
#as we can see it fits the target density
hist(X,freq = F,breaks = 75, col = 'orange', main = 'empirical histogram vs target density')
xfit<-seq(min(X),max(X),length=length(X))
yfit<-dbeta(xfit,3,3)
lines(xfit, yfit, col="blue", lwd=2)
#we can now generate instead a Bernoulli sample on which we can approximate the prob of accept.
generateBernFromNTrials = function(N,a1,b1,a2,b2){
set.seed(123)
funzBeta2 = function(x){
dbeta(x,a1,b1)/dbeta(x,a2,b2)
}
k = optim(c(a1/(a1+b1)),funzBeta2,method="BFGS",control=list(fnscale=-1))$value
U =runif(N,min = 0,max = 1)
Y= rbeta(N,a2,b2)
X = Y[U<=dbeta(Y,a1,b1)/(k*dbeta(Y,a2,b2))]
succ = rep(1,length(X))
unsuc = rep(0,N - length(X))
X = c(succ,unsuc)
print('I =')
print(1/k)
print('I_hat =')
print(mean(X))
return(X) }
#candidate density = beta(2,2)
Xtilde = generateBernFromNTrials(10000,3,3,2,2)
#candidate density = beta(1,1) = unif(0,1)
Xtilde = generateBernFromNTrials(10000,3,3,1,1) |
## Script avec des fonctions qui plottent / version congres ISEM2019
# packages / data ---------------------------------------------------------
library(tidyverse)
data_piege <- read.csv("/home/bastien/cecidomyie/data/2017_piege.csv")
# data_piege <- read.csv("D:/Mes donnees/GitHub_Bastien/cecidomyie-master/data/2017_piege.csv")
larves_obs <- cbind(data_piege %>% filter(Sol == "ER") %>% pull(larves),
data_piege %>% filter(Sol == "PS") %>% pull(larves),
data_piege %>% filter(Sol == "EH") %>% pull(larves))
inflos_obs <- cbind(data_piege %>% filter(Sol == "ER") %>% pull(inflos),
data_piege %>% filter(Sol == "PS") %>% pull(inflos),
data_piege %>% filter(Sol == "EH") %>% pull(inflos))
data_piege_b2 <- read.csv("/home/bastien/cecidomyie/data/2017_piege_bloc2.csv")
# data_piege_b2 <- read.csv("D:/Mes donnees/GitHub_Bastien/cecidomyie-master/data/2017_piege_bloc2.csv")
larves_obs_b2 <- cbind(data_piege_b2 %>% filter(Sol == "ER") %>% pull(larves),
data_piege_b2 %>% filter(Sol == "PS") %>% pull(larves),
data_piege_b2 %>% filter(Sol == "EH") %>% pull(larves))
# Sys.setlocale("LC_TIME","English")
# Plots function ----------------------------------------------------------
plot_dynamics_A <- function(args, inflos) {
obs <- larves_obs
est <- dynamics_A(args, inflos)
er <- data.frame(date = date2017, obs = obs[, 1], est = est[, 1]) %>%
mutate(Sol = factor("Low weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(obs, est, key = statut, value = nombre, factor_key = TRUE)
ps <- data.frame(date = date2017, obs = obs[, 2], est = est[, 2]) %>%
mutate(Sol = factor("Synthetic mulching",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(obs, est, key = statut, value = nombre, factor_key = TRUE)
eh <- data.frame(date = date2017, obs = obs[, 3], est = est[, 3]) %>%
mutate(Sol = factor("High weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(obs, est, key = statut, value = nombre, factor_key = TRUE)
to_plot <- bind_rows(er, ps, eh)
to_plot %>% ggplot +
aes(x = date, y = nombre, color = statut) +
geom_point() +
geom_line() +
facet_grid(. ~ Sol) +
theme_bw() +
theme(legend.title = element_blank(), legend.position = "bottom") +
# scale_color_discrete(labels = c("Observation", "Estimation")) +
scale_color_manual(values = c("black", "green4"), labels = c("Observed", "Estimated")) +
xlab(element_blank()) +
theme(axis.text.x = element_text(size=10, angle=45, vjust=1, hjust=1)) +
ylab("Number of larvae")
}
plot_dynamics <- function(args, inflos) {
obs <- larves_obs
est <- dynamics(args, inflos)
er <- data.frame(date = date2017, obs = obs[, 1], est = est[, 1]) %>%
mutate(Sol = factor("Enherbement ras",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(obs, est, key = statut, value = nombre, factor_key = TRUE)
ps <- data.frame(date = date2017, obs = obs[, 2], est = est[, 2]) %>%
mutate(Sol = factor("Paillage synthétique",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(obs, est, key = statut, value = nombre, factor_key = TRUE)
eh <- data.frame(date = date2017, obs = obs[, 3], est = est[, 3]) %>%
mutate(Sol = factor("Enherbement haut",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(obs, est, key = statut, value = nombre, factor_key = TRUE)
to_plot <- bind_rows(er, ps, eh)
to_plot %>% ggplot +
aes(x = date, y = nombre, color = statut) +
geom_point() +
geom_line() +
facet_grid(. ~ Sol) +
theme_bw() +
theme(legend.title = element_blank(), legend.position = "bottom") +
scale_color_discrete(labels = c("Observation", "Estimation")) +
xlab("Date") +
ylab("Nombre de larves")
}
plot_decompo_A <- function(args, inflos) {
obs <- larves_obs
estimations <- decomposition_A(args, inflos)
L_obs <- apply(obs, 2, sum)
L_sim <- apply(estimations[[1]], 2, sum)
# avec inflos CDE
LperI0_CDE_obs <- apply(obs/inflos, 2, mean, na.rm=T)
LperI0_CDE_sim <- apply(estimations[[1]]/inflos, 2, mean, na.rm=T)
LperI_CDE_obs <- L_obs / apply(inflos, 2, sum)
LperI_CDE_sim <- L_sim / apply(inflos, 2, sum)
# avec toutes les inflos
LperI0_obs <- apply(obs/inflos_obs, 2, mean, na.rm=T)
LperI0_sim <- apply(estimations[[1]]/inflos_obs, 2, mean, na.rm=T)
LperI_obs <- L_obs / apply(inflos_obs, 2, sum)
LperI_sim <- L_sim / apply(inflos_obs, 2, sum)
print(list(rbind(L_obs, LperI0_CDE_obs, LperI_CDE_obs, LperI0_obs, LperI_obs),
rbind(L_sim, LperI0_CDE_sim, LperI_CDE_sim, LperI0_sim, LperI_sim)))
ploter <- data.frame(Date = date2017,
Sol = factor("Low weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 1],
Estimated = estimations[[1]][, 1],
F_pup = estimations[[2]][, 1],
F_diap = estimations[[3]][, 1],
F_endo = estimations[[4]][, 1],
F_exo = estimations[[5]][, 1]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
plotps <- data.frame(Date = date2017,
Sol = factor("Synthetic mulching",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 2],
Estimated = estimations[[1]][, 2],
F_pup = estimations[[2]][, 2],
F_diap = estimations[[3]][, 2],
F_endo = estimations[[4]][, 2],
F_exo = estimations[[5]][, 2]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
ploteh <- data.frame(Date = date2017,
Sol = factor("High weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 3],
Estimated = estimations[[1]][, 3],
F_pup = estimations[[2]][, 3],
F_diap = estimations[[3]][, 3],
F_endo = estimations[[4]][, 3],
F_exo = estimations[[5]][, 3]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
to_plot <- bind_rows(ploter, plotps, ploteh)
to_plot %>% ggplot +
aes(x = Date) +
geom_area(aes(y = prop, fill = prov), alpha = 0.5) +
geom_line(aes(y = Observed, color = "Observed"), lwd = 0.75) +
geom_line(aes(y = Estimated, color = "Estimated"), lwd = 0.75) +
geom_point(aes(y = Observed, color = "Observed")) +
geom_point(aes(y = Estimated, color = "Estimated")) +
theme_bw() +
facet_grid(. ~ Sol) +
theme(legend.title = element_blank(), legend.position = "right") +
scale_color_manual(values = c("green4", "black")) +
xlab(element_blank()) +
theme(axis.text.x = element_text(size=10, angle=45, vjust=1, hjust=1)) +
ylab("Number of larvae")
}
plot_decompo_B <- function(args, inflos) {
obs <- larves_obs
estimations <- decomposition_B(args, inflos)
L_obs <- apply(obs, 2, sum)
L_sim <- apply(estimations[[1]], 2, sum)
# avec inflos CDE
LperI0_CDE_obs <- apply(obs/inflos, 2, mean, na.rm=T)
LperI0_CDE_sim <- apply(estimations[[1]]/inflos, 2, mean, na.rm=T)
LperI_CDE_obs <- L_obs / apply(inflos, 2, sum)
LperI_CDE_sim <- L_sim / apply(inflos, 2, sum)
# avec toutes les inflos
LperI0_obs <- apply(obs/inflos_obs, 2, mean, na.rm=T)
LperI0_sim <- apply(estimations[[1]]/inflos_obs, 2, mean, na.rm=T)
LperI_obs <- L_obs / apply(inflos_obs, 2, sum)
LperI_sim <- L_sim / apply(inflos_obs, 2, sum)
print(list(rbind(L_obs, LperI0_CDE_obs, LperI_CDE_obs, LperI0_obs, LperI_obs),
rbind(L_sim, LperI0_CDE_sim, LperI_CDE_sim, LperI0_sim, LperI_sim)))
ploter <- data.frame(Date = date2017,
Sol = factor("Low weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 1],
Estimated = estimations[[1]][, 1],
F_pup = estimations[[2]][, 1],
F_diap = estimations[[3]][, 1],
F_endo = estimations[[4]][, 1],
F_exo = estimations[[5]][, 1]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
plotps <- data.frame(Date = date2017,
Sol = factor("Synthetic mulching",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 2],
Estimated = estimations[[1]][, 2],
F_pup = estimations[[2]][, 2],
F_diap = estimations[[3]][, 2],
F_endo = estimations[[4]][, 2],
F_exo = estimations[[5]][, 2]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
ploteh <- data.frame(Date = date2017,
Sol = factor("High weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 3],
Estimated = estimations[[1]][, 3],
F_pup = estimations[[2]][, 3],
F_diap = estimations[[3]][, 3],
F_endo = estimations[[4]][, 3],
F_exo = estimations[[5]][, 3]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
to_plot <- bind_rows(ploter, plotps, ploteh)
to_plot %>% ggplot +
aes(x = Date) +
geom_area(aes(y = prop, fill = prov), alpha = 0.5) +
geom_line(aes(y = Observed, color = "Observed"), lwd = 0.75) +
geom_line(aes(y = Estimated, color = "Estimated"), lwd = 0.75) +
geom_point(aes(y = Observed, color = "Observed")) +
geom_point(aes(y = Estimated, color = "Estimated")) +
theme_bw() +
facet_grid(. ~ Sol) +
theme(legend.title = element_blank(), legend.position = "right") +
scale_color_manual(values = c("green4", "black")) +
xlab(element_blank()) +
theme(axis.text.x = element_text(size=10, angle=45, vjust=1, hjust=1)) +
ylab("Number of larvae")
}
plot_decompo_B2 <- function(args, inflos) {
obs <- larves_obs
estimations <- decomposition_B2(args, inflos)
L_obs <- apply(obs, 2, sum)
L_sim <- apply(estimations[[1]], 2, sum)
# avec inflos CDE
LperI0_CDE_obs <- apply(obs/inflos, 2, mean, na.rm=T)
LperI0_CDE_sim <- apply(estimations[[1]]/inflos, 2, mean, na.rm=T)
LperI_CDE_obs <- L_obs / apply(inflos, 2, sum)
LperI_CDE_sim <- L_sim / apply(inflos, 2, sum)
# avec toutes les inflos
LperI0_obs <- apply(obs/inflos_obs, 2, mean, na.rm=T)
LperI0_sim <- apply(estimations[[1]]/inflos_obs, 2, mean, na.rm=T)
LperI_obs <- L_obs / apply(inflos_obs, 2, sum)
LperI_sim <- L_sim / apply(inflos_obs, 2, sum)
print(list(rbind(L_obs, LperI0_CDE_obs, LperI_CDE_obs, LperI0_obs, LperI_obs),
rbind(L_sim, LperI0_CDE_sim, LperI_CDE_sim, LperI0_sim, LperI_sim)))
ploter <- data.frame(Date = date2017,
Sol = factor("Low weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 1],
Estimated = estimations[[1]][, 1],
F_pup = estimations[[2]][, 1],
F_diap = estimations[[3]][, 1],
F_endo = estimations[[4]][, 1],
F_exo = estimations[[5]][, 1]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
plotps <- data.frame(Date = date2017,
Sol = factor("Synthetic mulching",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 2],
Estimated = estimations[[1]][, 2],
F_pup = estimations[[2]][, 2],
F_diap = estimations[[3]][, 2],
F_endo = estimations[[4]][, 2],
F_exo = estimations[[5]][, 2]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
ploteh <- data.frame(Date = date2017,
Sol = factor("High weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 3],
Estimated = estimations[[1]][, 3],
F_pup = estimations[[2]][, 3],
F_diap = estimations[[3]][, 3],
F_endo = estimations[[4]][, 3],
F_exo = estimations[[5]][, 3]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
to_plot <- bind_rows(ploter, plotps, ploteh)
to_plot %>% ggplot +
aes(x = Date) +
geom_area(aes(y = prop, fill = prov), alpha = 0.5) +
geom_line(aes(y = Observed, color = "Observed"), lwd = 0.75) +
geom_line(aes(y = Estimated, color = "Estimated"), lwd = 0.75) +
geom_point(aes(y = Observed, color = "Observed")) +
geom_point(aes(y = Estimated, color = "Estimated")) +
theme_bw() +
facet_grid(. ~ Sol) +
theme(legend.title = element_blank(), legend.position = "right") +
scale_color_manual(values = c("green4", "black")) +
xlab(element_blank()) +
theme(axis.text.x = element_text(size=10, angle=45, vjust=1, hjust=1)) +
ylab("Number of larvae")
}
plot_decompo_C <- function(args, inflos) {
obs <- larves_obs
estimations <- decomposition_C(args, inflos)
L_obs <- apply(obs, 2, sum)
L_sim <- apply(estimations[[1]], 2, sum)
# avec inflos CDE
LperI0_CDE_obs <- apply(obs/inflos, 2, mean, na.rm=T)
LperI0_CDE_sim <- apply(estimations[[1]]/inflos, 2, mean, na.rm=T)
LperI_CDE_obs <- L_obs / apply(inflos, 2, sum)
LperI_CDE_sim <- L_sim / apply(inflos, 2, sum)
# avec toutes les inflos
LperI0_obs <- apply(obs/inflos_obs, 2, mean, na.rm=T)
LperI0_sim <- apply(estimations[[1]]/inflos_obs, 2, mean, na.rm=T)
LperI_obs <- L_obs / apply(inflos_obs, 2, sum)
LperI_sim <- L_sim / apply(inflos_obs, 2, sum)
print(list(rbind(L_obs, LperI0_CDE_obs, LperI_CDE_obs, LperI0_obs, LperI_obs),
rbind(L_sim, LperI0_CDE_sim, LperI_CDE_sim, LperI0_sim, LperI_sim)))
ploter <- data.frame(Date = date2017,
Sol = factor("Low weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 1],
Estimated = estimations[[1]][, 1],
F_pup = estimations[[2]][, 1],
F_diap = estimations[[3]][, 1],
F_endo = estimations[[4]][, 1],
F_exo = estimations[[5]][, 1]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
plotps <- data.frame(Date = date2017,
Sol = factor("Synthetic mulching",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 2],
Estimated = estimations[[1]][, 2],
F_pup = estimations[[2]][, 2],
F_diap = estimations[[3]][, 2],
F_endo = estimations[[4]][, 2],
F_exo = estimations[[5]][, 2]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
ploteh <- data.frame(Date = date2017,
Sol = factor("High weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 3],
Estimated = estimations[[1]][, 3],
F_pup = estimations[[2]][, 3],
F_diap = estimations[[3]][, 3],
F_endo = estimations[[4]][, 3],
F_exo = estimations[[5]][, 3]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
to_plot <- bind_rows(ploter, plotps, ploteh)
to_plot %>% ggplot +
aes(x = Date) +
geom_area(aes(y = prop, fill = prov), alpha = 0.5) +
geom_line(aes(y = Observed, color = "Observed"), lwd = 0.75) +
geom_line(aes(y = Estimated, color = "Estimated"), lwd = 0.75) +
geom_point(aes(y = Observed, color = "Observed")) +
geom_point(aes(y = Estimated, color = "Estimated")) +
theme_bw() +
facet_grid(. ~ Sol) +
theme(legend.title = element_blank(), legend.position = "right") +
scale_color_manual(values = c("green4", "black")) +
xlab(element_blank()) +
theme(axis.text.x = element_text(size=10, angle=45, vjust=1, hjust=1)) +
ylab("Number of larvae")
}
plot_decompo <- function(args, inflos) {
obs <- larves_obs
estimations <- decomposition(args, inflos)
L_obs <- apply(obs, 2, sum)
L_sim <- apply(estimations[[1]], 2, sum)
# avec inflos CDE
LperI0_CDE_obs <- apply(obs/inflos, 2, mean, na.rm=T)
LperI0_CDE_sim <- apply(estimations[[1]]/inflos, 2, mean, na.rm=T)
LperI_CDE_obs <- L_obs / apply(inflos, 2, sum)
LperI_CDE_sim <- L_sim / apply(inflos, 2, sum)
# avec toutes les inflos
LperI0_obs <- apply(obs/inflos_obs, 2, mean, na.rm=T)
LperI0_sim <- apply(estimations[[1]]/inflos_obs, 2, mean, na.rm=T)
LperI_obs <- L_obs / apply(inflos_obs, 2, sum)
LperI_sim <- L_sim / apply(inflos_obs, 2, sum)
print(list(rbind(L_obs, LperI0_CDE_obs, LperI_CDE_obs, LperI0_obs, LperI_obs),
rbind(L_sim, LperI0_CDE_sim, LperI_CDE_sim, LperI0_sim, LperI_sim)))
ploter <- data.frame(Date = date2017,
Sol = factor("Low weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 1],
Estimated = estimations[[1]][, 1],
F_pup = estimations[[2]][, 1],
F_diap = estimations[[3]][, 1],
F_endo = estimations[[4]][, 1],
F_exo = estimations[[5]][, 1]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
plotps <- data.frame(Date = date2017,
Sol = factor("Synthetic mulching",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 2],
Estimated = estimations[[1]][, 2],
F_pup = estimations[[2]][, 2],
F_diap = estimations[[3]][, 2],
F_endo = estimations[[4]][, 2],
F_exo = estimations[[5]][, 2]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
ploteh <- data.frame(Date = date2017,
Sol = factor("High weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 3],
Estimated = estimations[[1]][, 3],
F_pup = estimations[[2]][, 3],
F_diap = estimations[[3]][, 3],
F_endo = estimations[[4]][, 3],
F_exo = estimations[[5]][, 3]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
to_plot <- bind_rows(ploter, plotps, ploteh)
to_plot %>% ggplot +
aes(x = Date) +
geom_area(aes(y = prop, fill = prov), alpha = 0.5) +
geom_line(aes(y = Observed, color = "Observed"), lwd = 0.75) +
geom_line(aes(y = Estimated, color = "Estimated"), lwd = 0.75) +
geom_point(aes(y = Observed, color = "Observed")) +
geom_point(aes(y = Estimated, color = "Estimated")) +
theme_bw() +
facet_grid(. ~ Sol) +
theme(legend.title = element_blank(), legend.position = "right") +
scale_color_manual(values = c("green4", "black")) +
xlab(element_blank()) +
theme(axis.text.x = element_text(size=10, angle=45, vjust=1, hjust=1)) +
ylab("Number of larvae")
}
plot_decompo_C2 <- function(args, inflos) {
obs <- larves_obs
estimations <- decomposition_C2(args, inflos)
L_obs <- apply(obs, 2, sum)
L_sim <- apply(estimations[[1]], 2, sum)
# avec inflos CDE
LperI0_CDE_obs <- apply(obs/inflos, 2, mean, na.rm=T)
LperI0_CDE_sim <- apply(estimations[[1]]/inflos, 2, mean, na.rm=T)
LperI_CDE_obs <- L_obs / apply(inflos, 2, sum)
LperI_CDE_sim <- L_sim / apply(inflos, 2, sum)
# avec toutes les inflos
LperI0_obs <- apply(obs/inflos_obs, 2, mean, na.rm=T)
LperI0_sim <- apply(estimations[[1]]/inflos_obs, 2, mean, na.rm=T)
LperI_obs <- L_obs / apply(inflos_obs, 2, sum)
LperI_sim <- L_sim / apply(inflos_obs, 2, sum)
print(list(rbind(L_obs, LperI0_CDE_obs, LperI_CDE_obs, LperI0_obs, LperI_obs),
rbind(L_sim, LperI0_CDE_sim, LperI_CDE_sim, LperI0_sim, LperI_sim)))
ploter <- data.frame(Date = date2017,
Sol = factor("Low weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 1],
Estimated = estimations[[1]][, 1],
F_pup = estimations[[2]][, 1],
F_diap = estimations[[3]][, 1],
F_endo = estimations[[4]][, 1],
F_exo = estimations[[5]][, 1]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
plotps <- data.frame(Date = date2017,
Sol = factor("Synthetic mulching",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 2],
Estimated = estimations[[1]][, 2],
F_pup = estimations[[2]][, 2],
F_diap = estimations[[3]][, 2],
F_endo = estimations[[4]][, 2],
F_exo = estimations[[5]][, 2]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
ploteh <- data.frame(Date = date2017,
Sol = factor("High weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 3],
Estimated = estimations[[1]][, 3],
F_pup = estimations[[2]][, 3],
F_diap = estimations[[3]][, 3],
F_endo = estimations[[4]][, 3],
F_exo = estimations[[5]][, 3]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
to_plot <- bind_rows(ploter, plotps, ploteh)
to_plot %>% ggplot +
aes(x = Date) +
geom_area(aes(y = prop, fill = prov), alpha = 0.5) +
geom_line(aes(y = Observed, color = "Observed"), lwd = 0.75) +
geom_line(aes(y = Estimated, color = "Estimated"), lwd = 0.75) +
geom_point(aes(y = Observed, color = "Observed")) +
geom_point(aes(y = Estimated, color = "Estimated")) +
theme_bw() +
facet_grid(. ~ Sol) +
theme(legend.title = element_blank(), legend.position = "right") +
scale_color_manual(values = c("green4", "black")) +
xlab(element_blank()) +
theme(axis.text.x = element_text(size=10, angle=45, vjust=1, hjust=1)) +
ylab("Number of larvae")
}
plot_decompo_b2 <- function(args, inflos) {
obs <- larves_obs_b2
estimations <- decomposition_b2(args, inflos)
L_obs <- apply(obs, 2, sum)
L_sim <- apply(estimations[[1]], 2, sum)
# avec inflos CDE
LperI0_CDE_obs <- apply(obs/inflos, 2, mean, na.rm=T)
LperI0_CDE_sim <- apply(estimations[[1]]/inflos, 2, mean, na.rm=T)
LperI_CDE_obs <- L_obs / apply(inflos, 2, sum)
LperI_CDE_sim <- L_sim / apply(inflos, 2, sum)
# avec toutes les inflos
LperI0_obs <- apply(obs/inflos_obs, 2, mean, na.rm=T)
LperI0_sim <- apply(estimations[[1]]/inflos_obs, 2, mean, na.rm=T)
LperI_obs <- L_obs / apply(inflos_obs, 2, sum)
LperI_sim <- L_sim / apply(inflos_obs, 2, sum)
print(list(rbind(L_obs, LperI0_CDE_obs, LperI_CDE_obs, LperI0_obs, LperI_obs),
rbind(L_sim, LperI0_CDE_sim, LperI_CDE_sim, LperI0_sim, LperI_sim)))
ploter <- data.frame(Date = date2017,
Sol = factor("Low weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 1],
Estimated = estimations[[1]][, 1],
F_pup = estimations[[2]][, 1],
F_diap = estimations[[3]][, 1],
F_endo = estimations[[4]][, 1],
F_exo = estimations[[5]][, 1]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
plotps <- data.frame(Date = date2017,
Sol = factor("Synthetic mulching",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 2],
Estimated = estimations[[1]][, 2],
F_pup = estimations[[2]][, 2],
F_diap = estimations[[3]][, 2],
F_endo = estimations[[4]][, 2],
F_exo = estimations[[5]][, 2]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
ploteh <- data.frame(Date = date2017,
Sol = factor("High weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 3],
Estimated = estimations[[1]][, 3],
F_pup = estimations[[2]][, 3],
F_diap = estimations[[3]][, 3],
F_endo = estimations[[4]][, 3],
F_exo = estimations[[5]][, 3]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
to_plot <- bind_rows(ploter, plotps, ploteh)
to_plot %>% ggplot +
aes(x = Date) +
geom_area(aes(y = prop, fill = prov), alpha = 0.5) +
geom_line(aes(y = Observed, color = "Observed"), lwd = 0.75) +
geom_line(aes(y = Estimated, color = "Estimated"), lwd = 0.75) +
geom_point(aes(y = Observed, color = "Observed")) +
geom_point(aes(y = Estimated, color = "Estimated")) +
theme_bw() +
facet_grid(. ~ Sol) +
theme(legend.title = element_blank(), legend.position = "right") +
scale_color_manual(values = c("green4", "black")) +
xlab(element_blank()) +
theme(axis.text.x = element_text(size=10, angle=45, vjust=1, hjust=1)) +
ylab("Number of larvae")
}
plot_decompo_season <- function(args, inflos) {
obs <- larves_obs
estimations <- decomposition_season_bloc1(args, inflos)
L_obs <- apply(obs, 2, sum)
L_sim <- apply(estimations[[1]], 2, sum)
# avec inflos CDE
LperI0_CDE_obs <- apply(obs/inflos, 2, mean, na.rm=T)
LperI0_CDE_sim <- apply(estimations[[1]]/inflos, 2, mean, na.rm=T)
LperI_CDE_obs <- L_obs / apply(inflos, 2, sum)
LperI_CDE_sim <- L_sim / apply(inflos, 2, sum)
# avec toutes les inflos
LperI0_obs <- apply(obs/inflos_obs, 2, mean, na.rm=T)
LperI0_sim <- apply(estimations[[1]]/inflos_obs, 2, mean, na.rm=T)
LperI_obs <- L_obs / apply(inflos_obs, 2, sum)
LperI_sim <- L_sim / apply(inflos_obs, 2, sum)
print(list(rbind(L_obs, LperI0_CDE_obs, LperI_CDE_obs, LperI0_obs, LperI_obs),
rbind(L_sim, LperI0_CDE_sim, LperI_CDE_sim, LperI0_sim, LperI_sim)))
ploter <- data.frame(Date = date2017,
Sol = factor("Low weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 1],
Estimated = estimations[[1]][, 1],
F_pup = estimations[[2]][, 1],
F_diap = estimations[[3]][, 1],
F_endo = estimations[[4]][, 1],
F_exo = estimations[[5]][, 1]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
plotps <- data.frame(Date = date2017,
Sol = factor("Synthetic mulching",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 2],
Estimated = estimations[[1]][, 2],
F_pup = estimations[[2]][, 2],
F_diap = estimations[[3]][, 2],
F_endo = estimations[[4]][, 2],
F_exo = estimations[[5]][, 2]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
ploteh <- data.frame(Date = date2017,
Sol = factor("High weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 3],
Estimated = estimations[[1]][, 3],
F_pup = estimations[[2]][, 3],
F_diap = estimations[[3]][, 3],
F_endo = estimations[[4]][, 3],
F_exo = estimations[[5]][, 3]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
to_plot <- bind_rows(ploter, plotps, ploteh)
to_plot %>% ggplot +
aes(x = Date) +
geom_area(aes(y = prop, fill = prov), alpha = 0.5) +
geom_line(aes(y = Observed, color = "Observed"), lwd = 0.75) +
geom_line(aes(y = Estimated, color = "Estimated"), lwd = 0.75) +
geom_point(aes(y = Observed, color = "Observed")) +
geom_point(aes(y = Estimated, color = "Estimated")) +
theme_bw() +
facet_grid(. ~ Sol) +
theme(legend.title = element_blank(), legend.position = "right") +
scale_color_manual(values = c("green4", "black")) +
xlab(element_blank()) +
theme(axis.text.x = element_text(size=10, angle=45, vjust=1, hjust=1)) +
ylab("Number of larvae")
}
plot_decompo_season_b2 <- function(args, inflos) {
obs <- larves_obs_b2
estimations <- decomposition_season_bloc2(args, inflos)
L_obs <- apply(obs, 2, sum)
L_sim <- apply(estimations[[1]], 2, sum)
# avec inflos CDE
LperI0_CDE_obs <- apply(obs/inflos, 2, mean, na.rm=T)
LperI0_CDE_sim <- apply(estimations[[1]]/inflos, 2, mean, na.rm=T)
LperI_CDE_obs <- L_obs / apply(inflos, 2, sum)
LperI_CDE_sim <- L_sim / apply(inflos, 2, sum)
# avec toutes les inflos
LperI0_obs <- apply(obs/inflos_obs, 2, mean, na.rm=T)
LperI0_sim <- apply(estimations[[1]]/inflos_obs, 2, mean, na.rm=T)
LperI_obs <- L_obs / apply(inflos_obs, 2, sum)
LperI_sim <- L_sim / apply(inflos_obs, 2, sum)
print(list(rbind(L_obs, LperI0_CDE_obs, LperI_CDE_obs, LperI0_obs, LperI_obs),
rbind(L_sim, LperI0_CDE_sim, LperI_CDE_sim, LperI0_sim, LperI_sim)))
ploter <- data.frame(Date = date2017,
Sol = factor("Low weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 1],
Estimated = estimations[[1]][, 1],
F_pup = estimations[[2]][, 1],
F_diap = estimations[[3]][, 1],
F_endo = estimations[[4]][, 1],
F_exo = estimations[[5]][, 1]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
plotps <- data.frame(Date = date2017,
Sol = factor("Synthetic mulching",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 2],
Estimated = estimations[[1]][, 2],
F_pup = estimations[[2]][, 2],
F_diap = estimations[[3]][, 2],
F_endo = estimations[[4]][, 2],
F_exo = estimations[[5]][, 2]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
ploteh <- data.frame(Date = date2017,
Sol = factor("High weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 3],
Estimated = estimations[[1]][, 3],
F_pup = estimations[[2]][, 3],
F_diap = estimations[[3]][, 3],
F_endo = estimations[[4]][, 3],
F_exo = estimations[[5]][, 3]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
to_plot <- bind_rows(ploter, plotps, ploteh)
to_plot %>% ggplot +
aes(x = Date) +
geom_area(aes(y = prop, fill = prov), alpha = 0.5) +
geom_line(aes(y = Observed, color = "Observed"), lwd = 0.75) +
geom_line(aes(y = Estimated, color = "Estimated"), lwd = 0.75) +
geom_point(aes(y = Observed, color = "Observed")) +
geom_point(aes(y = Estimated, color = "Estimated")) +
theme_bw() +
facet_grid(. ~ Sol) +
theme(legend.title = element_blank(), legend.position = "right") +
scale_color_manual(values = c("green4", "black")) +
xlab(element_blank()) +
theme(axis.text.x = element_text(size=10, angle=45, vjust=1, hjust=1)) +
ylab("Number of larvae")
}
plot_decompo_15sept_b2 <- function(args, inflos) {
obs <- larves_obs_b2
estimations <- decomposition_15sept_bloc2(args, inflos)
L_obs <- apply(obs, 2, sum)
L_sim <- apply(estimations[[1]], 2, sum)
# avec inflos CDE
LperI0_CDE_obs <- apply(obs/inflos, 2, mean, na.rm=T)
LperI0_CDE_sim <- apply(estimations[[1]]/inflos, 2, mean, na.rm=T)
LperI_CDE_obs <- L_obs / apply(inflos, 2, sum)
LperI_CDE_sim <- L_sim / apply(inflos, 2, sum)
# avec toutes les inflos
LperI0_obs <- apply(obs/inflos_obs, 2, mean, na.rm=T)
LperI0_sim <- apply(estimations[[1]]/inflos_obs, 2, mean, na.rm=T)
LperI_obs <- L_obs / apply(inflos_obs, 2, sum)
LperI_sim <- L_sim / apply(inflos_obs, 2, sum)
print(list(rbind(L_obs, LperI0_CDE_obs, LperI_CDE_obs, LperI0_obs, LperI_obs),
rbind(L_sim, LperI0_CDE_sim, LperI_CDE_sim, LperI0_sim, LperI_sim)))
ploter <- data.frame(Date = date2017,
Sol = factor("Low weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 1],
Estimated = estimations[[1]][, 1],
F_pup = estimations[[2]][, 1],
F_diap = estimations[[3]][, 1],
F_endo = estimations[[4]][, 1],
F_exo = estimations[[5]][, 1]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
plotps <- data.frame(Date = date2017,
Sol = factor("Synthetic mulching",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 2],
Estimated = estimations[[1]][, 2],
F_pup = estimations[[2]][, 2],
F_diap = estimations[[3]][, 2],
F_endo = estimations[[4]][, 2],
F_exo = estimations[[5]][, 2]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
ploteh <- data.frame(Date = date2017,
Sol = factor("High weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 3],
Estimated = estimations[[1]][, 3],
F_pup = estimations[[2]][, 3],
F_diap = estimations[[3]][, 3],
F_endo = estimations[[4]][, 3],
F_exo = estimations[[5]][, 3]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
to_plot <- bind_rows(ploter, plotps, ploteh)
to_plot %>% ggplot +
aes(x = Date) +
geom_area(aes(y = prop, fill = prov), alpha = 0.5) +
geom_line(aes(y = Observed, color = "Observed"), lwd = 0.75) +
geom_line(aes(y = Estimated, color = "Estimated"), lwd = 0.75) +
geom_point(aes(y = Observed, color = "Observed")) +
geom_point(aes(y = Estimated, color = "Estimated")) +
theme_bw() +
facet_grid(. ~ Sol) +
theme(legend.title = element_blank(), legend.position = "right") +
scale_color_manual(values = c("green4", "black")) +
xlab(element_blank()) +
theme(axis.text.x = element_text(size=10, angle=45, vjust=1, hjust=1)) +
ylab("Number of larvae")
}
plot_decompo_season_inflos <- function(args, inflos) {
obs <- larves_obs
estimations <- decomposition_season_inflos_bloc1(args, inflos)
L_obs <- apply(obs, 2, sum)
L_sim <- apply(estimations[[1]], 2, sum)
# avec inflos CDE
LperI0_CDE_obs <- apply(obs/inflos, 2, mean, na.rm=T)
LperI0_CDE_sim <- apply(estimations[[1]]/inflos, 2, mean, na.rm=T)
LperI_CDE_obs <- L_obs / apply(inflos, 2, sum)
LperI_CDE_sim <- L_sim / apply(inflos, 2, sum)
# avec toutes les inflos
LperI0_obs <- apply(obs/inflos_obs, 2, mean, na.rm=T)
LperI0_sim <- apply(estimations[[1]]/inflos_obs, 2, mean, na.rm=T)
LperI_obs <- L_obs / apply(inflos_obs, 2, sum)
LperI_sim <- L_sim / apply(inflos_obs, 2, sum)
print(list(rbind(L_obs, LperI0_CDE_obs, LperI_CDE_obs, LperI0_obs, LperI_obs),
rbind(L_sim, LperI0_CDE_sim, LperI_CDE_sim, LperI0_sim, LperI_sim)))
ploter <- data.frame(Date = date2017,
Sol = factor("Low weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 1],
Estimated = estimations[[1]][, 1],
F_pup = estimations[[2]][, 1],
F_diap = estimations[[3]][, 1],
F_endo = estimations[[4]][, 1],
F_exo = estimations[[5]][, 1]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
plotps <- data.frame(Date = date2017,
Sol = factor("Synthetic mulching",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 2],
Estimated = estimations[[1]][, 2],
F_pup = estimations[[2]][, 2],
F_diap = estimations[[3]][, 2],
F_endo = estimations[[4]][, 2],
F_exo = estimations[[5]][, 2]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
ploteh <- data.frame(Date = date2017,
Sol = factor("High weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 3],
Estimated = estimations[[1]][, 3],
F_pup = estimations[[2]][, 3],
F_diap = estimations[[3]][, 3],
F_endo = estimations[[4]][, 3],
F_exo = estimations[[5]][, 3]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
to_plot <- bind_rows(ploter, plotps, ploteh)
to_plot %>% ggplot +
aes(x = Date) +
geom_area(aes(y = prop, fill = prov), alpha = 0.5) +
geom_line(aes(y = Observed, color = "Observed"), lwd = 0.75) +
geom_line(aes(y = Estimated, color = "Estimated"), lwd = 0.75) +
geom_point(aes(y = Observed, color = "Observed")) +
geom_point(aes(y = Estimated, color = "Estimated")) +
theme_bw() +
facet_grid(. ~ Sol) +
theme(legend.title = element_blank(), legend.position = "right") +
scale_color_manual(values = c("green4", "black")) +
xlab(element_blank()) +
theme(axis.text.x = element_text(size=10, angle=45, vjust=1, hjust=1)) +
ylab("Number of larvae")
}
plot_decompo_season_inflos_b2 <- function(args, inflos) {
obs <- larves_obs_b2
estimations <- decomposition_season_inflos_bloc2(args, inflos)
L_obs <- apply(obs, 2, sum)
L_sim <- apply(estimations[[1]], 2, sum)
# avec inflos CDE
LperI0_CDE_obs <- apply(obs/inflos, 2, mean, na.rm=T)
LperI0_CDE_sim <- apply(estimations[[1]]/inflos, 2, mean, na.rm=T)
LperI_CDE_obs <- L_obs / apply(inflos, 2, sum)
LperI_CDE_sim <- L_sim / apply(inflos, 2, sum)
# avec toutes les inflos
LperI0_obs <- apply(obs/inflos_obs, 2, mean, na.rm=T)
LperI0_sim <- apply(estimations[[1]]/inflos_obs, 2, mean, na.rm=T)
LperI_obs <- L_obs / apply(inflos_obs, 2, sum)
LperI_sim <- L_sim / apply(inflos_obs, 2, sum)
print(list(rbind(L_obs, LperI0_CDE_obs, LperI_CDE_obs, LperI0_obs, LperI_obs),
rbind(L_sim, LperI0_CDE_sim, LperI_CDE_sim, LperI0_sim, LperI_sim)))
ploter <- data.frame(Date = date2017,
Sol = factor("Low weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 1],
Estimated = estimations[[1]][, 1],
F_pup = estimations[[2]][, 1],
F_diap = estimations[[3]][, 1],
F_endo = estimations[[4]][, 1],
F_exo = estimations[[5]][, 1]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
plotps <- data.frame(Date = date2017,
Sol = factor("Synthetic mulching",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 2],
Estimated = estimations[[1]][, 2],
F_pup = estimations[[2]][, 2],
F_diap = estimations[[3]][, 2],
F_endo = estimations[[4]][, 2],
F_exo = estimations[[5]][, 2]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
ploteh <- data.frame(Date = date2017,
Sol = factor("High weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 3],
Estimated = estimations[[1]][, 3],
F_pup = estimations[[2]][, 3],
F_diap = estimations[[3]][, 3],
F_endo = estimations[[4]][, 3],
F_exo = estimations[[5]][, 3]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
to_plot <- bind_rows(ploter, plotps, ploteh)
to_plot %>% ggplot +
aes(x = Date) +
geom_area(aes(y = prop, fill = prov), alpha = 0.5) +
geom_line(aes(y = Observed, color = "Observed"), lwd = 0.75) +
geom_line(aes(y = Estimated, color = "Estimated"), lwd = 0.75) +
geom_point(aes(y = Observed, color = "Observed")) +
geom_point(aes(y = Estimated, color = "Estimated")) +
theme_bw() +
facet_grid(. ~ Sol) +
theme(legend.title = element_blank(), legend.position = "right") +
scale_color_manual(values = c("green4", "black")) +
xlab(element_blank()) +
theme(axis.text.x = element_text(size=10, angle=45, vjust=1, hjust=1)) +
ylab("Number of larvae")
}
### fonctions pour les simulations / ISEM2019
plot_inflos <- function(inflos, inflos_CDE) {
est1 <- inflos
est2 <- inflos_CDE
er <- data.frame(date = date2017, est1 = est1[, 1], est2 = est2[, 1]) %>%
mutate(Sol = factor("Low weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(est1, est2, key = statut, value = nombre, factor_key = TRUE)
ps <- data.frame(date = date2017, est1 = est1[, 2], est2 = est2[, 2]) %>%
mutate(Sol = factor("Synthetic mulching",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(est1, est2, key = statut, value = nombre, factor_key = TRUE)
eh <- data.frame(date = date2017, est1 = est1[, 3], est2 = est2[, 3]) %>%
mutate(Sol = factor("High weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(est1, est2, key = statut, value = nombre, factor_key = TRUE)
to_plot <- bind_rows(er, ps, eh)
to_plot %>% ggplot +
aes(x = date, y = nombre, color = statut) +
#geom_point() +
geom_line(lwd=1) +
facet_grid(. ~ Sol) +
theme_bw() +
theme(legend.title = element_blank(), legend.position = "right") +
scale_color_manual(values = c("blue", "red"), labels = c("Simulation_1", "Simulation_2")) +
xlab(element_blank()) +
theme(axis.text.x = element_text(size=10, angle=45, vjust=1, hjust=1)) +
ylab("Number of inflorescences")
}
plot_sim_fenv_A <- function(args1, args2, inflos) {
est1 <- dynamics_A(args1, inflos)
est2 <- dynamics_A(args2, inflos)
larves_sim1 <- apply(est1, 2, sum)
larves_sim2 <- apply(est2, 2, sum)
larvesperinf0_sim1 <- apply(est1/inflos, 2, mean, na.rm=T)
larvesperinf0_sim2 <- apply(est2/inflos, 2, mean, na.rm=T)
larvesperinf_sim1 <- larves_sim1 / apply(inflos, 2, sum)
larvesperinf_sim2 <- larves_sim2 / apply(inflos, 2, sum)
print(list(rbind(larves_sim1, larvesperinf0_sim1, larvesperinf_sim1),
rbind(larves_sim2, larvesperinf0_sim2, larvesperinf_sim2),
100*(larves_sim2-larves_sim1)/larves_sim2))
er <- data.frame(date = date2017, est1 = est1[, 1], est2 = est2[, 1]) %>%
mutate(Sol = factor("Low weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(est1, est2, key = statut, value = nombre, factor_key = TRUE)
ps <- data.frame(date = date2017, est1 = est1[, 2], est2 = est2[, 2]) %>%
mutate(Sol = factor("Synthetic mulching",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(est1, est2, key = statut, value = nombre, factor_key = TRUE)
eh <- data.frame(date = date2017, est1 = est1[, 3], est2 = est2[, 3]) %>%
mutate(Sol = factor("High weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(est1, est2, key = statut, value = nombre, factor_key = TRUE)
to_plot <- bind_rows(er, ps, eh)
to_plot %>% ggplot +
aes(x = date, y = nombre, color = statut) +
#geom_point() +
geom_line(lwd=1) +
facet_grid(. ~ Sol) +
theme_bw() +
theme(legend.title = element_blank(), legend.position = "right") +
scale_color_manual(values = c("black", "red"), labels = c("Simulation_1", "Simulation_2")) +
xlab(element_blank()) +
theme(axis.text.x = element_text(size=10, angle=45, vjust=1, hjust=1)) +
ylab("Number of larvae")
}
plot_sim_fenv_C <- function(args1, args2, inflos) {
est1 <- dynamics(args1, inflos)
est2 <- dynamics(args2, inflos)
larves_sim1 <- apply(est1, 2, sum)
larves_sim2 <- apply(est2, 2, sum)
larvesperinf0_sim1 <- apply(est1/inflos, 2, mean, na.rm=T)
larvesperinf0_sim2 <- apply(est2/inflos, 2, mean, na.rm=T)
larvesperinf_sim1 <- larves_sim1 / apply(inflos, 2, sum)
larvesperinf_sim2 <- larves_sim2 / apply(inflos, 2, sum)
print(list(rbind(larves_sim1, larvesperinf0_sim1, larvesperinf_sim1),
rbind(larves_sim2, larvesperinf0_sim2, larvesperinf_sim2),
100*(larves_sim2-larves_sim1)/larves_sim2))
er <- data.frame(date = date2017, est1 = est1[, 1], est2 = est2[, 1]) %>%
mutate(Sol = factor("Low weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(est1, est2, key = statut, value = nombre, factor_key = TRUE)
ps <- data.frame(date = date2017, est1 = est1[, 2], est2 = est2[, 2]) %>%
mutate(Sol = factor("Synthetic mulching",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(est1, est2, key = statut, value = nombre, factor_key = TRUE)
eh <- data.frame(date = date2017, est1 = est1[, 3], est2 = est2[, 3]) %>%
mutate(Sol = factor("High weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(est1, est2, key = statut, value = nombre, factor_key = TRUE)
to_plot <- bind_rows(er, ps, eh)
to_plot %>% ggplot +
aes(x = date, y = nombre, color = statut) +
#geom_point() +
geom_line(lwd=1) +
facet_grid(. ~ Sol) +
theme_bw() +
theme(legend.title = element_blank(), legend.position = "right") +
scale_color_manual(values = c("black", "red"), labels = c("Simulation_1", "Simulation_2")) +
xlab(element_blank()) +
theme(axis.text.x = element_text(size=10, angle=45, vjust=1, hjust=1)) +
ylab("Number of larvae")
}
plot_sim_fenv_D <- function(args1, args2, inflos) {
est1 <- dynamics_season_b1(args1, inflos)
est2 <- dynamics_season_b1(args2, inflos)
larves_sim1 <- apply(est1, 2, sum)
larves_sim2 <- apply(est2, 2, sum)
larvesperinf0_sim1 <- apply(est1/inflos, 2, mean, na.rm=T)
larvesperinf0_sim2 <- apply(est2/inflos, 2, mean, na.rm=T)
larvesperinf_sim1 <- larves_sim1 / apply(inflos, 2, sum)
larvesperinf_sim2 <- larves_sim2 / apply(inflos, 2, sum)
print(list(rbind(larves_sim1, larvesperinf0_sim1, larvesperinf_sim1),
rbind(larves_sim2, larvesperinf0_sim2, larvesperinf_sim2),
100*(larves_sim2-larves_sim1)/larves_sim2))
er <- data.frame(date = date2017, est1 = est1[, 1], est2 = est2[, 1]) %>%
mutate(Sol = factor("Low weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(est1, est2, key = statut, value = nombre, factor_key = TRUE)
ps <- data.frame(date = date2017, est1 = est1[, 2], est2 = est2[, 2]) %>%
mutate(Sol = factor("Synthetic mulching",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(est1, est2, key = statut, value = nombre, factor_key = TRUE)
eh <- data.frame(date = date2017, est1 = est1[, 3], est2 = est2[, 3]) %>%
mutate(Sol = factor("High weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(est1, est2, key = statut, value = nombre, factor_key = TRUE)
to_plot <- bind_rows(er, ps, eh)
to_plot %>% ggplot +
aes(x = date, y = nombre, color = statut) +
#geom_point() +
geom_line(lwd=1) +
facet_grid(. ~ Sol) +
theme_bw() +
theme(legend.title = element_blank(), legend.position = "right") +
scale_color_manual(values = c("black", "red"), labels = c("Simulation_1", "Simulation_2")) +
xlab(element_blank()) +
theme(axis.text.x = element_text(size=10, angle=45, vjust=1, hjust=1)) +
ylab("Number of larvae")
}
plot_sim_inflos_A <- function(args1, inflos1, inflos2) {
est1 <- dynamics_A(args1, inflos1)
est2 <- dynamics_A(args1, inflos2)
larves_sim1 <- apply(est1, 2, sum)
larves_sim2 <- apply(est2, 2, sum)
larvesperinf0_sim1 <- apply(est1/inflos1, 2, mean, na.rm=T)
larvesperinf0_sim2 <- apply(est2/inflos2, 2, mean, na.rm=T)
larvesperinf_sim1 <- larves_sim1 / apply(inflos1, 2, sum)
larvesperinf_sim2 <- larves_sim2 / apply(inflos2, 2, sum)
print(list(rbind(larves_sim1, larvesperinf0_sim1, larvesperinf_sim1),
rbind(larves_sim2, larvesperinf0_sim2, larvesperinf_sim2),
100*(larves_sim2-larves_sim1)/larves_sim2))
er <- data.frame(date = date2017, est1 = est1[, 1], est2 = est2[, 1]) %>%
mutate(Sol = factor("Low weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(est1, est2, key = statut, value = nombre, factor_key = TRUE)
ps <- data.frame(date = date2017, est1 = est1[, 2], est2 = est2[, 2]) %>%
mutate(Sol = factor("Synthetic mulching",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(est1, est2, key = statut, value = nombre, factor_key = TRUE)
eh <- data.frame(date = date2017, est1 = est1[, 3], est2 = est2[, 3]) %>%
mutate(Sol = factor("High weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(est1, est2, key = statut, value = nombre, factor_key = TRUE)
to_plot <- bind_rows(er, ps, eh)
to_plot %>% ggplot +
aes(x = date, y = nombre, color = statut) +
#geom_point() +
geom_line(lwd=1) +
facet_grid(. ~ Sol) +
theme_bw() +
theme(legend.title = element_blank(), legend.position = "right") +
scale_color_manual(values = c("black", "red"), labels = c("Simulation_1", "Simulation_2")) +
xlab(element_blank()) +
theme(axis.text.x = element_text(size=10, angle=45, vjust=1, hjust=1)) +
ylab("Number of larvae")
}
plot_sim_inflos_C <- function(args1, inflos1, inflos2) {
est1 <- dynamics(args1, inflos1)
est2 <- dynamics(args1, inflos2)
larves_sim1 <- apply(est1, 2, sum)
larves_sim2 <- apply(est2, 2, sum)
larvesperinf0_sim1 <- apply(est1/inflos1, 2, mean, na.rm=T)
larvesperinf0_sim2 <- apply(est2/inflos2, 2, mean, na.rm=T)
larvesperinf_sim1 <- larves_sim1 / apply(inflos1, 2, sum)
larvesperinf_sim2 <- larves_sim2 / apply(inflos2, 2, sum)
print(list(rbind(larves_sim1, larvesperinf0_sim1, larvesperinf_sim1),
rbind(larves_sim2, larvesperinf0_sim2, larvesperinf_sim2),
100*(larves_sim2-larves_sim1)/larves_sim2))
er <- data.frame(date = date2017, est1 = est1[, 1], est2 = est2[, 1]) %>%
mutate(Sol = factor("Low weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(est1, est2, key = statut, value = nombre, factor_key = TRUE)
ps <- data.frame(date = date2017, est1 = est1[, 2], est2 = est2[, 2]) %>%
mutate(Sol = factor("Synthetic mulching",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(est1, est2, key = statut, value = nombre, factor_key = TRUE)
eh <- data.frame(date = date2017, est1 = est1[, 3], est2 = est2[, 3]) %>%
mutate(Sol = factor("High weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(est1, est2, key = statut, value = nombre, factor_key = TRUE)
to_plot <- bind_rows(er, ps, eh)
to_plot %>% ggplot +
aes(x = date, y = nombre, color = statut) +
#geom_point() +
geom_line(lwd=1) +
facet_grid(. ~ Sol) +
theme_bw() +
theme(legend.title = element_blank(), legend.position = "right") +
scale_color_manual(values = c("black", "red"), labels = c("Simulation_1", "Simulation_2")) +
xlab(element_blank()) +
theme(axis.text.x = element_text(size=10, angle=45, vjust=1, hjust=1)) +
ylab("Number of larvae")
}
plot_sim_inflos_D <- function(args1, inflos1, inflos2) {
est1 <- dynamics_season_b1(args1, inflos1)
est2 <- dynamics_season_b1(args1, inflos2)
larves_sim1 <- apply(est1, 2, sum)
larves_sim2 <- apply(est2, 2, sum)
larvesperinf0_sim1 <- apply(est1/inflos1, 2, mean, na.rm=T)
larvesperinf0_sim2 <- apply(est2/inflos2, 2, mean, na.rm=T)
larvesperinf_sim1 <- larves_sim1 / apply(inflos1, 2, sum)
larvesperinf_sim2 <- larves_sim2 / apply(inflos2, 2, sum)
print(list(rbind(larves_sim1, larvesperinf0_sim1, larvesperinf_sim1),
rbind(larves_sim2, larvesperinf0_sim2, larvesperinf_sim2),
100*(larves_sim2-larves_sim1)/larves_sim2))
er <- data.frame(date = date2017, est1 = est1[, 1], est2 = est2[, 1]) %>%
mutate(Sol = factor("Low weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(est1, est2, key = statut, value = nombre, factor_key = TRUE)
ps <- data.frame(date = date2017, est1 = est1[, 2], est2 = est2[, 2]) %>%
mutate(Sol = factor("Synthetic mulching",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(est1, est2, key = statut, value = nombre, factor_key = TRUE)
eh <- data.frame(date = date2017, est1 = est1[, 3], est2 = est2[, 3]) %>%
mutate(Sol = factor("High weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(est1, est2, key = statut, value = nombre, factor_key = TRUE)
to_plot <- bind_rows(er, ps, eh)
to_plot %>% ggplot +
aes(x = date, y = nombre, color = statut) +
#geom_point() +
geom_line(lwd=1) +
facet_grid(. ~ Sol) +
theme_bw() +
theme(legend.title = element_blank(), legend.position = "right") +
scale_color_manual(values = c("black", "red"), labels = c("Simulation_1", "Simulation_2")) +
xlab(element_blank()) +
theme(axis.text.x = element_text(size=10, angle=45, vjust=1, hjust=1)) +
ylab("Number of larvae")
}
| /model_R/plot_res_IsG_ISEM.R | no_license | bastienreyne/cecidomyie | R | false | false | 60,465 | r | ## Script avec des fonctions qui plottent / version congres ISEM2019
# packages / data ---------------------------------------------------------
library(tidyverse)
data_piege <- read.csv("/home/bastien/cecidomyie/data/2017_piege.csv")
# data_piege <- read.csv("D:/Mes donnees/GitHub_Bastien/cecidomyie-master/data/2017_piege.csv")
larves_obs <- cbind(data_piege %>% filter(Sol == "ER") %>% pull(larves),
data_piege %>% filter(Sol == "PS") %>% pull(larves),
data_piege %>% filter(Sol == "EH") %>% pull(larves))
inflos_obs <- cbind(data_piege %>% filter(Sol == "ER") %>% pull(inflos),
data_piege %>% filter(Sol == "PS") %>% pull(inflos),
data_piege %>% filter(Sol == "EH") %>% pull(inflos))
data_piege_b2 <- read.csv("/home/bastien/cecidomyie/data/2017_piege_bloc2.csv")
# data_piege_b2 <- read.csv("D:/Mes donnees/GitHub_Bastien/cecidomyie-master/data/2017_piege_bloc2.csv")
larves_obs_b2 <- cbind(data_piege_b2 %>% filter(Sol == "ER") %>% pull(larves),
data_piege_b2 %>% filter(Sol == "PS") %>% pull(larves),
data_piege_b2 %>% filter(Sol == "EH") %>% pull(larves))
# Sys.setlocale("LC_TIME","English")
# Plots function ----------------------------------------------------------
plot_dynamics_A <- function(args, inflos) {
obs <- larves_obs
est <- dynamics_A(args, inflos)
er <- data.frame(date = date2017, obs = obs[, 1], est = est[, 1]) %>%
mutate(Sol = factor("Low weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(obs, est, key = statut, value = nombre, factor_key = TRUE)
ps <- data.frame(date = date2017, obs = obs[, 2], est = est[, 2]) %>%
mutate(Sol = factor("Synthetic mulching",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(obs, est, key = statut, value = nombre, factor_key = TRUE)
eh <- data.frame(date = date2017, obs = obs[, 3], est = est[, 3]) %>%
mutate(Sol = factor("High weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(obs, est, key = statut, value = nombre, factor_key = TRUE)
to_plot <- bind_rows(er, ps, eh)
to_plot %>% ggplot +
aes(x = date, y = nombre, color = statut) +
geom_point() +
geom_line() +
facet_grid(. ~ Sol) +
theme_bw() +
theme(legend.title = element_blank(), legend.position = "bottom") +
# scale_color_discrete(labels = c("Observation", "Estimation")) +
scale_color_manual(values = c("black", "green4"), labels = c("Observed", "Estimated")) +
xlab(element_blank()) +
theme(axis.text.x = element_text(size=10, angle=45, vjust=1, hjust=1)) +
ylab("Number of larvae")
}
plot_dynamics <- function(args, inflos) {
obs <- larves_obs
est <- dynamics(args, inflos)
er <- data.frame(date = date2017, obs = obs[, 1], est = est[, 1]) %>%
mutate(Sol = factor("Enherbement ras",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(obs, est, key = statut, value = nombre, factor_key = TRUE)
ps <- data.frame(date = date2017, obs = obs[, 2], est = est[, 2]) %>%
mutate(Sol = factor("Paillage synthétique",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(obs, est, key = statut, value = nombre, factor_key = TRUE)
eh <- data.frame(date = date2017, obs = obs[, 3], est = est[, 3]) %>%
mutate(Sol = factor("Enherbement haut",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(obs, est, key = statut, value = nombre, factor_key = TRUE)
to_plot <- bind_rows(er, ps, eh)
to_plot %>% ggplot +
aes(x = date, y = nombre, color = statut) +
geom_point() +
geom_line() +
facet_grid(. ~ Sol) +
theme_bw() +
theme(legend.title = element_blank(), legend.position = "bottom") +
scale_color_discrete(labels = c("Observation", "Estimation")) +
xlab("Date") +
ylab("Nombre de larves")
}
plot_decompo_A <- function(args, inflos) {
obs <- larves_obs
estimations <- decomposition_A(args, inflos)
L_obs <- apply(obs, 2, sum)
L_sim <- apply(estimations[[1]], 2, sum)
# avec inflos CDE
LperI0_CDE_obs <- apply(obs/inflos, 2, mean, na.rm=T)
LperI0_CDE_sim <- apply(estimations[[1]]/inflos, 2, mean, na.rm=T)
LperI_CDE_obs <- L_obs / apply(inflos, 2, sum)
LperI_CDE_sim <- L_sim / apply(inflos, 2, sum)
# avec toutes les inflos
LperI0_obs <- apply(obs/inflos_obs, 2, mean, na.rm=T)
LperI0_sim <- apply(estimations[[1]]/inflos_obs, 2, mean, na.rm=T)
LperI_obs <- L_obs / apply(inflos_obs, 2, sum)
LperI_sim <- L_sim / apply(inflos_obs, 2, sum)
print(list(rbind(L_obs, LperI0_CDE_obs, LperI_CDE_obs, LperI0_obs, LperI_obs),
rbind(L_sim, LperI0_CDE_sim, LperI_CDE_sim, LperI0_sim, LperI_sim)))
ploter <- data.frame(Date = date2017,
Sol = factor("Low weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 1],
Estimated = estimations[[1]][, 1],
F_pup = estimations[[2]][, 1],
F_diap = estimations[[3]][, 1],
F_endo = estimations[[4]][, 1],
F_exo = estimations[[5]][, 1]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
plotps <- data.frame(Date = date2017,
Sol = factor("Synthetic mulching",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 2],
Estimated = estimations[[1]][, 2],
F_pup = estimations[[2]][, 2],
F_diap = estimations[[3]][, 2],
F_endo = estimations[[4]][, 2],
F_exo = estimations[[5]][, 2]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
ploteh <- data.frame(Date = date2017,
Sol = factor("High weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 3],
Estimated = estimations[[1]][, 3],
F_pup = estimations[[2]][, 3],
F_diap = estimations[[3]][, 3],
F_endo = estimations[[4]][, 3],
F_exo = estimations[[5]][, 3]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
to_plot <- bind_rows(ploter, plotps, ploteh)
to_plot %>% ggplot +
aes(x = Date) +
geom_area(aes(y = prop, fill = prov), alpha = 0.5) +
geom_line(aes(y = Observed, color = "Observed"), lwd = 0.75) +
geom_line(aes(y = Estimated, color = "Estimated"), lwd = 0.75) +
geom_point(aes(y = Observed, color = "Observed")) +
geom_point(aes(y = Estimated, color = "Estimated")) +
theme_bw() +
facet_grid(. ~ Sol) +
theme(legend.title = element_blank(), legend.position = "right") +
scale_color_manual(values = c("green4", "black")) +
xlab(element_blank()) +
theme(axis.text.x = element_text(size=10, angle=45, vjust=1, hjust=1)) +
ylab("Number of larvae")
}
plot_decompo_B <- function(args, inflos) {
obs <- larves_obs
estimations <- decomposition_B(args, inflos)
L_obs <- apply(obs, 2, sum)
L_sim <- apply(estimations[[1]], 2, sum)
# avec inflos CDE
LperI0_CDE_obs <- apply(obs/inflos, 2, mean, na.rm=T)
LperI0_CDE_sim <- apply(estimations[[1]]/inflos, 2, mean, na.rm=T)
LperI_CDE_obs <- L_obs / apply(inflos, 2, sum)
LperI_CDE_sim <- L_sim / apply(inflos, 2, sum)
# avec toutes les inflos
LperI0_obs <- apply(obs/inflos_obs, 2, mean, na.rm=T)
LperI0_sim <- apply(estimations[[1]]/inflos_obs, 2, mean, na.rm=T)
LperI_obs <- L_obs / apply(inflos_obs, 2, sum)
LperI_sim <- L_sim / apply(inflos_obs, 2, sum)
print(list(rbind(L_obs, LperI0_CDE_obs, LperI_CDE_obs, LperI0_obs, LperI_obs),
rbind(L_sim, LperI0_CDE_sim, LperI_CDE_sim, LperI0_sim, LperI_sim)))
ploter <- data.frame(Date = date2017,
Sol = factor("Low weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 1],
Estimated = estimations[[1]][, 1],
F_pup = estimations[[2]][, 1],
F_diap = estimations[[3]][, 1],
F_endo = estimations[[4]][, 1],
F_exo = estimations[[5]][, 1]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
plotps <- data.frame(Date = date2017,
Sol = factor("Synthetic mulching",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 2],
Estimated = estimations[[1]][, 2],
F_pup = estimations[[2]][, 2],
F_diap = estimations[[3]][, 2],
F_endo = estimations[[4]][, 2],
F_exo = estimations[[5]][, 2]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
ploteh <- data.frame(Date = date2017,
Sol = factor("High weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 3],
Estimated = estimations[[1]][, 3],
F_pup = estimations[[2]][, 3],
F_diap = estimations[[3]][, 3],
F_endo = estimations[[4]][, 3],
F_exo = estimations[[5]][, 3]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
to_plot <- bind_rows(ploter, plotps, ploteh)
to_plot %>% ggplot +
aes(x = Date) +
geom_area(aes(y = prop, fill = prov), alpha = 0.5) +
geom_line(aes(y = Observed, color = "Observed"), lwd = 0.75) +
geom_line(aes(y = Estimated, color = "Estimated"), lwd = 0.75) +
geom_point(aes(y = Observed, color = "Observed")) +
geom_point(aes(y = Estimated, color = "Estimated")) +
theme_bw() +
facet_grid(. ~ Sol) +
theme(legend.title = element_blank(), legend.position = "right") +
scale_color_manual(values = c("green4", "black")) +
xlab(element_blank()) +
theme(axis.text.x = element_text(size=10, angle=45, vjust=1, hjust=1)) +
ylab("Number of larvae")
}
plot_decompo_B2 <- function(args, inflos) {
obs <- larves_obs
estimations <- decomposition_B2(args, inflos)
L_obs <- apply(obs, 2, sum)
L_sim <- apply(estimations[[1]], 2, sum)
# avec inflos CDE
LperI0_CDE_obs <- apply(obs/inflos, 2, mean, na.rm=T)
LperI0_CDE_sim <- apply(estimations[[1]]/inflos, 2, mean, na.rm=T)
LperI_CDE_obs <- L_obs / apply(inflos, 2, sum)
LperI_CDE_sim <- L_sim / apply(inflos, 2, sum)
# avec toutes les inflos
LperI0_obs <- apply(obs/inflos_obs, 2, mean, na.rm=T)
LperI0_sim <- apply(estimations[[1]]/inflos_obs, 2, mean, na.rm=T)
LperI_obs <- L_obs / apply(inflos_obs, 2, sum)
LperI_sim <- L_sim / apply(inflos_obs, 2, sum)
print(list(rbind(L_obs, LperI0_CDE_obs, LperI_CDE_obs, LperI0_obs, LperI_obs),
rbind(L_sim, LperI0_CDE_sim, LperI_CDE_sim, LperI0_sim, LperI_sim)))
ploter <- data.frame(Date = date2017,
Sol = factor("Low weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 1],
Estimated = estimations[[1]][, 1],
F_pup = estimations[[2]][, 1],
F_diap = estimations[[3]][, 1],
F_endo = estimations[[4]][, 1],
F_exo = estimations[[5]][, 1]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
plotps <- data.frame(Date = date2017,
Sol = factor("Synthetic mulching",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 2],
Estimated = estimations[[1]][, 2],
F_pup = estimations[[2]][, 2],
F_diap = estimations[[3]][, 2],
F_endo = estimations[[4]][, 2],
F_exo = estimations[[5]][, 2]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
ploteh <- data.frame(Date = date2017,
Sol = factor("High weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 3],
Estimated = estimations[[1]][, 3],
F_pup = estimations[[2]][, 3],
F_diap = estimations[[3]][, 3],
F_endo = estimations[[4]][, 3],
F_exo = estimations[[5]][, 3]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
to_plot <- bind_rows(ploter, plotps, ploteh)
to_plot %>% ggplot +
aes(x = Date) +
geom_area(aes(y = prop, fill = prov), alpha = 0.5) +
geom_line(aes(y = Observed, color = "Observed"), lwd = 0.75) +
geom_line(aes(y = Estimated, color = "Estimated"), lwd = 0.75) +
geom_point(aes(y = Observed, color = "Observed")) +
geom_point(aes(y = Estimated, color = "Estimated")) +
theme_bw() +
facet_grid(. ~ Sol) +
theme(legend.title = element_blank(), legend.position = "right") +
scale_color_manual(values = c("green4", "black")) +
xlab(element_blank()) +
theme(axis.text.x = element_text(size=10, angle=45, vjust=1, hjust=1)) +
ylab("Number of larvae")
}
plot_decompo_C <- function(args, inflos) {
obs <- larves_obs
estimations <- decomposition_C(args, inflos)
L_obs <- apply(obs, 2, sum)
L_sim <- apply(estimations[[1]], 2, sum)
# avec inflos CDE
LperI0_CDE_obs <- apply(obs/inflos, 2, mean, na.rm=T)
LperI0_CDE_sim <- apply(estimations[[1]]/inflos, 2, mean, na.rm=T)
LperI_CDE_obs <- L_obs / apply(inflos, 2, sum)
LperI_CDE_sim <- L_sim / apply(inflos, 2, sum)
# avec toutes les inflos
LperI0_obs <- apply(obs/inflos_obs, 2, mean, na.rm=T)
LperI0_sim <- apply(estimations[[1]]/inflos_obs, 2, mean, na.rm=T)
LperI_obs <- L_obs / apply(inflos_obs, 2, sum)
LperI_sim <- L_sim / apply(inflos_obs, 2, sum)
print(list(rbind(L_obs, LperI0_CDE_obs, LperI_CDE_obs, LperI0_obs, LperI_obs),
rbind(L_sim, LperI0_CDE_sim, LperI_CDE_sim, LperI0_sim, LperI_sim)))
ploter <- data.frame(Date = date2017,
Sol = factor("Low weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 1],
Estimated = estimations[[1]][, 1],
F_pup = estimations[[2]][, 1],
F_diap = estimations[[3]][, 1],
F_endo = estimations[[4]][, 1],
F_exo = estimations[[5]][, 1]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
plotps <- data.frame(Date = date2017,
Sol = factor("Synthetic mulching",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 2],
Estimated = estimations[[1]][, 2],
F_pup = estimations[[2]][, 2],
F_diap = estimations[[3]][, 2],
F_endo = estimations[[4]][, 2],
F_exo = estimations[[5]][, 2]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
ploteh <- data.frame(Date = date2017,
Sol = factor("High weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 3],
Estimated = estimations[[1]][, 3],
F_pup = estimations[[2]][, 3],
F_diap = estimations[[3]][, 3],
F_endo = estimations[[4]][, 3],
F_exo = estimations[[5]][, 3]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
to_plot <- bind_rows(ploter, plotps, ploteh)
to_plot %>% ggplot +
aes(x = Date) +
geom_area(aes(y = prop, fill = prov), alpha = 0.5) +
geom_line(aes(y = Observed, color = "Observed"), lwd = 0.75) +
geom_line(aes(y = Estimated, color = "Estimated"), lwd = 0.75) +
geom_point(aes(y = Observed, color = "Observed")) +
geom_point(aes(y = Estimated, color = "Estimated")) +
theme_bw() +
facet_grid(. ~ Sol) +
theme(legend.title = element_blank(), legend.position = "right") +
scale_color_manual(values = c("green4", "black")) +
xlab(element_blank()) +
theme(axis.text.x = element_text(size=10, angle=45, vjust=1, hjust=1)) +
ylab("Number of larvae")
}
plot_decompo <- function(args, inflos) {
obs <- larves_obs
estimations <- decomposition(args, inflos)
L_obs <- apply(obs, 2, sum)
L_sim <- apply(estimations[[1]], 2, sum)
# avec inflos CDE
LperI0_CDE_obs <- apply(obs/inflos, 2, mean, na.rm=T)
LperI0_CDE_sim <- apply(estimations[[1]]/inflos, 2, mean, na.rm=T)
LperI_CDE_obs <- L_obs / apply(inflos, 2, sum)
LperI_CDE_sim <- L_sim / apply(inflos, 2, sum)
# avec toutes les inflos
LperI0_obs <- apply(obs/inflos_obs, 2, mean, na.rm=T)
LperI0_sim <- apply(estimations[[1]]/inflos_obs, 2, mean, na.rm=T)
LperI_obs <- L_obs / apply(inflos_obs, 2, sum)
LperI_sim <- L_sim / apply(inflos_obs, 2, sum)
print(list(rbind(L_obs, LperI0_CDE_obs, LperI_CDE_obs, LperI0_obs, LperI_obs),
rbind(L_sim, LperI0_CDE_sim, LperI_CDE_sim, LperI0_sim, LperI_sim)))
ploter <- data.frame(Date = date2017,
Sol = factor("Low weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 1],
Estimated = estimations[[1]][, 1],
F_pup = estimations[[2]][, 1],
F_diap = estimations[[3]][, 1],
F_endo = estimations[[4]][, 1],
F_exo = estimations[[5]][, 1]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
plotps <- data.frame(Date = date2017,
Sol = factor("Synthetic mulching",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 2],
Estimated = estimations[[1]][, 2],
F_pup = estimations[[2]][, 2],
F_diap = estimations[[3]][, 2],
F_endo = estimations[[4]][, 2],
F_exo = estimations[[5]][, 2]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
ploteh <- data.frame(Date = date2017,
Sol = factor("High weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 3],
Estimated = estimations[[1]][, 3],
F_pup = estimations[[2]][, 3],
F_diap = estimations[[3]][, 3],
F_endo = estimations[[4]][, 3],
F_exo = estimations[[5]][, 3]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
to_plot <- bind_rows(ploter, plotps, ploteh)
to_plot %>% ggplot +
aes(x = Date) +
geom_area(aes(y = prop, fill = prov), alpha = 0.5) +
geom_line(aes(y = Observed, color = "Observed"), lwd = 0.75) +
geom_line(aes(y = Estimated, color = "Estimated"), lwd = 0.75) +
geom_point(aes(y = Observed, color = "Observed")) +
geom_point(aes(y = Estimated, color = "Estimated")) +
theme_bw() +
facet_grid(. ~ Sol) +
theme(legend.title = element_blank(), legend.position = "right") +
scale_color_manual(values = c("green4", "black")) +
xlab(element_blank()) +
theme(axis.text.x = element_text(size=10, angle=45, vjust=1, hjust=1)) +
ylab("Number of larvae")
}
plot_decompo_C2 <- function(args, inflos) {
obs <- larves_obs
estimations <- decomposition_C2(args, inflos)
L_obs <- apply(obs, 2, sum)
L_sim <- apply(estimations[[1]], 2, sum)
# avec inflos CDE
LperI0_CDE_obs <- apply(obs/inflos, 2, mean, na.rm=T)
LperI0_CDE_sim <- apply(estimations[[1]]/inflos, 2, mean, na.rm=T)
LperI_CDE_obs <- L_obs / apply(inflos, 2, sum)
LperI_CDE_sim <- L_sim / apply(inflos, 2, sum)
# avec toutes les inflos
LperI0_obs <- apply(obs/inflos_obs, 2, mean, na.rm=T)
LperI0_sim <- apply(estimations[[1]]/inflos_obs, 2, mean, na.rm=T)
LperI_obs <- L_obs / apply(inflos_obs, 2, sum)
LperI_sim <- L_sim / apply(inflos_obs, 2, sum)
print(list(rbind(L_obs, LperI0_CDE_obs, LperI_CDE_obs, LperI0_obs, LperI_obs),
rbind(L_sim, LperI0_CDE_sim, LperI_CDE_sim, LperI0_sim, LperI_sim)))
ploter <- data.frame(Date = date2017,
Sol = factor("Low weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 1],
Estimated = estimations[[1]][, 1],
F_pup = estimations[[2]][, 1],
F_diap = estimations[[3]][, 1],
F_endo = estimations[[4]][, 1],
F_exo = estimations[[5]][, 1]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
plotps <- data.frame(Date = date2017,
Sol = factor("Synthetic mulching",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 2],
Estimated = estimations[[1]][, 2],
F_pup = estimations[[2]][, 2],
F_diap = estimations[[3]][, 2],
F_endo = estimations[[4]][, 2],
F_exo = estimations[[5]][, 2]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
ploteh <- data.frame(Date = date2017,
Sol = factor("High weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 3],
Estimated = estimations[[1]][, 3],
F_pup = estimations[[2]][, 3],
F_diap = estimations[[3]][, 3],
F_endo = estimations[[4]][, 3],
F_exo = estimations[[5]][, 3]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
to_plot <- bind_rows(ploter, plotps, ploteh)
to_plot %>% ggplot +
aes(x = Date) +
geom_area(aes(y = prop, fill = prov), alpha = 0.5) +
geom_line(aes(y = Observed, color = "Observed"), lwd = 0.75) +
geom_line(aes(y = Estimated, color = "Estimated"), lwd = 0.75) +
geom_point(aes(y = Observed, color = "Observed")) +
geom_point(aes(y = Estimated, color = "Estimated")) +
theme_bw() +
facet_grid(. ~ Sol) +
theme(legend.title = element_blank(), legend.position = "right") +
scale_color_manual(values = c("green4", "black")) +
xlab(element_blank()) +
theme(axis.text.x = element_text(size=10, angle=45, vjust=1, hjust=1)) +
ylab("Number of larvae")
}
plot_decompo_b2 <- function(args, inflos) {
obs <- larves_obs_b2
estimations <- decomposition_b2(args, inflos)
L_obs <- apply(obs, 2, sum)
L_sim <- apply(estimations[[1]], 2, sum)
# avec inflos CDE
LperI0_CDE_obs <- apply(obs/inflos, 2, mean, na.rm=T)
LperI0_CDE_sim <- apply(estimations[[1]]/inflos, 2, mean, na.rm=T)
LperI_CDE_obs <- L_obs / apply(inflos, 2, sum)
LperI_CDE_sim <- L_sim / apply(inflos, 2, sum)
# avec toutes les inflos
LperI0_obs <- apply(obs/inflos_obs, 2, mean, na.rm=T)
LperI0_sim <- apply(estimations[[1]]/inflos_obs, 2, mean, na.rm=T)
LperI_obs <- L_obs / apply(inflos_obs, 2, sum)
LperI_sim <- L_sim / apply(inflos_obs, 2, sum)
print(list(rbind(L_obs, LperI0_CDE_obs, LperI_CDE_obs, LperI0_obs, LperI_obs),
rbind(L_sim, LperI0_CDE_sim, LperI_CDE_sim, LperI0_sim, LperI_sim)))
ploter <- data.frame(Date = date2017,
Sol = factor("Low weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 1],
Estimated = estimations[[1]][, 1],
F_pup = estimations[[2]][, 1],
F_diap = estimations[[3]][, 1],
F_endo = estimations[[4]][, 1],
F_exo = estimations[[5]][, 1]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
plotps <- data.frame(Date = date2017,
Sol = factor("Synthetic mulching",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 2],
Estimated = estimations[[1]][, 2],
F_pup = estimations[[2]][, 2],
F_diap = estimations[[3]][, 2],
F_endo = estimations[[4]][, 2],
F_exo = estimations[[5]][, 2]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
ploteh <- data.frame(Date = date2017,
Sol = factor("High weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 3],
Estimated = estimations[[1]][, 3],
F_pup = estimations[[2]][, 3],
F_diap = estimations[[3]][, 3],
F_endo = estimations[[4]][, 3],
F_exo = estimations[[5]][, 3]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
to_plot <- bind_rows(ploter, plotps, ploteh)
to_plot %>% ggplot +
aes(x = Date) +
geom_area(aes(y = prop, fill = prov), alpha = 0.5) +
geom_line(aes(y = Observed, color = "Observed"), lwd = 0.75) +
geom_line(aes(y = Estimated, color = "Estimated"), lwd = 0.75) +
geom_point(aes(y = Observed, color = "Observed")) +
geom_point(aes(y = Estimated, color = "Estimated")) +
theme_bw() +
facet_grid(. ~ Sol) +
theme(legend.title = element_blank(), legend.position = "right") +
scale_color_manual(values = c("green4", "black")) +
xlab(element_blank()) +
theme(axis.text.x = element_text(size=10, angle=45, vjust=1, hjust=1)) +
ylab("Number of larvae")
}
plot_decompo_season <- function(args, inflos) {
obs <- larves_obs
estimations <- decomposition_season_bloc1(args, inflos)
L_obs <- apply(obs, 2, sum)
L_sim <- apply(estimations[[1]], 2, sum)
# avec inflos CDE
LperI0_CDE_obs <- apply(obs/inflos, 2, mean, na.rm=T)
LperI0_CDE_sim <- apply(estimations[[1]]/inflos, 2, mean, na.rm=T)
LperI_CDE_obs <- L_obs / apply(inflos, 2, sum)
LperI_CDE_sim <- L_sim / apply(inflos, 2, sum)
# avec toutes les inflos
LperI0_obs <- apply(obs/inflos_obs, 2, mean, na.rm=T)
LperI0_sim <- apply(estimations[[1]]/inflos_obs, 2, mean, na.rm=T)
LperI_obs <- L_obs / apply(inflos_obs, 2, sum)
LperI_sim <- L_sim / apply(inflos_obs, 2, sum)
print(list(rbind(L_obs, LperI0_CDE_obs, LperI_CDE_obs, LperI0_obs, LperI_obs),
rbind(L_sim, LperI0_CDE_sim, LperI_CDE_sim, LperI0_sim, LperI_sim)))
ploter <- data.frame(Date = date2017,
Sol = factor("Low weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 1],
Estimated = estimations[[1]][, 1],
F_pup = estimations[[2]][, 1],
F_diap = estimations[[3]][, 1],
F_endo = estimations[[4]][, 1],
F_exo = estimations[[5]][, 1]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
plotps <- data.frame(Date = date2017,
Sol = factor("Synthetic mulching",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 2],
Estimated = estimations[[1]][, 2],
F_pup = estimations[[2]][, 2],
F_diap = estimations[[3]][, 2],
F_endo = estimations[[4]][, 2],
F_exo = estimations[[5]][, 2]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
ploteh <- data.frame(Date = date2017,
Sol = factor("High weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 3],
Estimated = estimations[[1]][, 3],
F_pup = estimations[[2]][, 3],
F_diap = estimations[[3]][, 3],
F_endo = estimations[[4]][, 3],
F_exo = estimations[[5]][, 3]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
to_plot <- bind_rows(ploter, plotps, ploteh)
to_plot %>% ggplot +
aes(x = Date) +
geom_area(aes(y = prop, fill = prov), alpha = 0.5) +
geom_line(aes(y = Observed, color = "Observed"), lwd = 0.75) +
geom_line(aes(y = Estimated, color = "Estimated"), lwd = 0.75) +
geom_point(aes(y = Observed, color = "Observed")) +
geom_point(aes(y = Estimated, color = "Estimated")) +
theme_bw() +
facet_grid(. ~ Sol) +
theme(legend.title = element_blank(), legend.position = "right") +
scale_color_manual(values = c("green4", "black")) +
xlab(element_blank()) +
theme(axis.text.x = element_text(size=10, angle=45, vjust=1, hjust=1)) +
ylab("Number of larvae")
}
plot_decompo_season_b2 <- function(args, inflos) {
obs <- larves_obs_b2
estimations <- decomposition_season_bloc2(args, inflos)
L_obs <- apply(obs, 2, sum)
L_sim <- apply(estimations[[1]], 2, sum)
# avec inflos CDE
LperI0_CDE_obs <- apply(obs/inflos, 2, mean, na.rm=T)
LperI0_CDE_sim <- apply(estimations[[1]]/inflos, 2, mean, na.rm=T)
LperI_CDE_obs <- L_obs / apply(inflos, 2, sum)
LperI_CDE_sim <- L_sim / apply(inflos, 2, sum)
# avec toutes les inflos
LperI0_obs <- apply(obs/inflos_obs, 2, mean, na.rm=T)
LperI0_sim <- apply(estimations[[1]]/inflos_obs, 2, mean, na.rm=T)
LperI_obs <- L_obs / apply(inflos_obs, 2, sum)
LperI_sim <- L_sim / apply(inflos_obs, 2, sum)
print(list(rbind(L_obs, LperI0_CDE_obs, LperI_CDE_obs, LperI0_obs, LperI_obs),
rbind(L_sim, LperI0_CDE_sim, LperI_CDE_sim, LperI0_sim, LperI_sim)))
ploter <- data.frame(Date = date2017,
Sol = factor("Low weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 1],
Estimated = estimations[[1]][, 1],
F_pup = estimations[[2]][, 1],
F_diap = estimations[[3]][, 1],
F_endo = estimations[[4]][, 1],
F_exo = estimations[[5]][, 1]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
plotps <- data.frame(Date = date2017,
Sol = factor("Synthetic mulching",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 2],
Estimated = estimations[[1]][, 2],
F_pup = estimations[[2]][, 2],
F_diap = estimations[[3]][, 2],
F_endo = estimations[[4]][, 2],
F_exo = estimations[[5]][, 2]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
ploteh <- data.frame(Date = date2017,
Sol = factor("High weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 3],
Estimated = estimations[[1]][, 3],
F_pup = estimations[[2]][, 3],
F_diap = estimations[[3]][, 3],
F_endo = estimations[[4]][, 3],
F_exo = estimations[[5]][, 3]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
to_plot <- bind_rows(ploter, plotps, ploteh)
to_plot %>% ggplot +
aes(x = Date) +
geom_area(aes(y = prop, fill = prov), alpha = 0.5) +
geom_line(aes(y = Observed, color = "Observed"), lwd = 0.75) +
geom_line(aes(y = Estimated, color = "Estimated"), lwd = 0.75) +
geom_point(aes(y = Observed, color = "Observed")) +
geom_point(aes(y = Estimated, color = "Estimated")) +
theme_bw() +
facet_grid(. ~ Sol) +
theme(legend.title = element_blank(), legend.position = "right") +
scale_color_manual(values = c("green4", "black")) +
xlab(element_blank()) +
theme(axis.text.x = element_text(size=10, angle=45, vjust=1, hjust=1)) +
ylab("Number of larvae")
}
plot_decompo_15sept_b2 <- function(args, inflos) {
obs <- larves_obs_b2
estimations <- decomposition_15sept_bloc2(args, inflos)
L_obs <- apply(obs, 2, sum)
L_sim <- apply(estimations[[1]], 2, sum)
# avec inflos CDE
LperI0_CDE_obs <- apply(obs/inflos, 2, mean, na.rm=T)
LperI0_CDE_sim <- apply(estimations[[1]]/inflos, 2, mean, na.rm=T)
LperI_CDE_obs <- L_obs / apply(inflos, 2, sum)
LperI_CDE_sim <- L_sim / apply(inflos, 2, sum)
# avec toutes les inflos
LperI0_obs <- apply(obs/inflos_obs, 2, mean, na.rm=T)
LperI0_sim <- apply(estimations[[1]]/inflos_obs, 2, mean, na.rm=T)
LperI_obs <- L_obs / apply(inflos_obs, 2, sum)
LperI_sim <- L_sim / apply(inflos_obs, 2, sum)
print(list(rbind(L_obs, LperI0_CDE_obs, LperI_CDE_obs, LperI0_obs, LperI_obs),
rbind(L_sim, LperI0_CDE_sim, LperI_CDE_sim, LperI0_sim, LperI_sim)))
ploter <- data.frame(Date = date2017,
Sol = factor("Low weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 1],
Estimated = estimations[[1]][, 1],
F_pup = estimations[[2]][, 1],
F_diap = estimations[[3]][, 1],
F_endo = estimations[[4]][, 1],
F_exo = estimations[[5]][, 1]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
plotps <- data.frame(Date = date2017,
Sol = factor("Synthetic mulching",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 2],
Estimated = estimations[[1]][, 2],
F_pup = estimations[[2]][, 2],
F_diap = estimations[[3]][, 2],
F_endo = estimations[[4]][, 2],
F_exo = estimations[[5]][, 2]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
ploteh <- data.frame(Date = date2017,
Sol = factor("High weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 3],
Estimated = estimations[[1]][, 3],
F_pup = estimations[[2]][, 3],
F_diap = estimations[[3]][, 3],
F_endo = estimations[[4]][, 3],
F_exo = estimations[[5]][, 3]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
to_plot <- bind_rows(ploter, plotps, ploteh)
to_plot %>% ggplot +
aes(x = Date) +
geom_area(aes(y = prop, fill = prov), alpha = 0.5) +
geom_line(aes(y = Observed, color = "Observed"), lwd = 0.75) +
geom_line(aes(y = Estimated, color = "Estimated"), lwd = 0.75) +
geom_point(aes(y = Observed, color = "Observed")) +
geom_point(aes(y = Estimated, color = "Estimated")) +
theme_bw() +
facet_grid(. ~ Sol) +
theme(legend.title = element_blank(), legend.position = "right") +
scale_color_manual(values = c("green4", "black")) +
xlab(element_blank()) +
theme(axis.text.x = element_text(size=10, angle=45, vjust=1, hjust=1)) +
ylab("Number of larvae")
}
plot_decompo_season_inflos <- function(args, inflos) {
obs <- larves_obs
estimations <- decomposition_season_inflos_bloc1(args, inflos)
L_obs <- apply(obs, 2, sum)
L_sim <- apply(estimations[[1]], 2, sum)
# avec inflos CDE
LperI0_CDE_obs <- apply(obs/inflos, 2, mean, na.rm=T)
LperI0_CDE_sim <- apply(estimations[[1]]/inflos, 2, mean, na.rm=T)
LperI_CDE_obs <- L_obs / apply(inflos, 2, sum)
LperI_CDE_sim <- L_sim / apply(inflos, 2, sum)
# avec toutes les inflos
LperI0_obs <- apply(obs/inflos_obs, 2, mean, na.rm=T)
LperI0_sim <- apply(estimations[[1]]/inflos_obs, 2, mean, na.rm=T)
LperI_obs <- L_obs / apply(inflos_obs, 2, sum)
LperI_sim <- L_sim / apply(inflos_obs, 2, sum)
print(list(rbind(L_obs, LperI0_CDE_obs, LperI_CDE_obs, LperI0_obs, LperI_obs),
rbind(L_sim, LperI0_CDE_sim, LperI_CDE_sim, LperI0_sim, LperI_sim)))
ploter <- data.frame(Date = date2017,
Sol = factor("Low weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 1],
Estimated = estimations[[1]][, 1],
F_pup = estimations[[2]][, 1],
F_diap = estimations[[3]][, 1],
F_endo = estimations[[4]][, 1],
F_exo = estimations[[5]][, 1]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
plotps <- data.frame(Date = date2017,
Sol = factor("Synthetic mulching",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 2],
Estimated = estimations[[1]][, 2],
F_pup = estimations[[2]][, 2],
F_diap = estimations[[3]][, 2],
F_endo = estimations[[4]][, 2],
F_exo = estimations[[5]][, 2]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
ploteh <- data.frame(Date = date2017,
Sol = factor("High weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 3],
Estimated = estimations[[1]][, 3],
F_pup = estimations[[2]][, 3],
F_diap = estimations[[3]][, 3],
F_endo = estimations[[4]][, 3],
F_exo = estimations[[5]][, 3]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
to_plot <- bind_rows(ploter, plotps, ploteh)
to_plot %>% ggplot +
aes(x = Date) +
geom_area(aes(y = prop, fill = prov), alpha = 0.5) +
geom_line(aes(y = Observed, color = "Observed"), lwd = 0.75) +
geom_line(aes(y = Estimated, color = "Estimated"), lwd = 0.75) +
geom_point(aes(y = Observed, color = "Observed")) +
geom_point(aes(y = Estimated, color = "Estimated")) +
theme_bw() +
facet_grid(. ~ Sol) +
theme(legend.title = element_blank(), legend.position = "right") +
scale_color_manual(values = c("green4", "black")) +
xlab(element_blank()) +
theme(axis.text.x = element_text(size=10, angle=45, vjust=1, hjust=1)) +
ylab("Number of larvae")
}
plot_decompo_season_inflos_b2 <- function(args, inflos) {
obs <- larves_obs_b2
estimations <- decomposition_season_inflos_bloc2(args, inflos)
L_obs <- apply(obs, 2, sum)
L_sim <- apply(estimations[[1]], 2, sum)
# avec inflos CDE
LperI0_CDE_obs <- apply(obs/inflos, 2, mean, na.rm=T)
LperI0_CDE_sim <- apply(estimations[[1]]/inflos, 2, mean, na.rm=T)
LperI_CDE_obs <- L_obs / apply(inflos, 2, sum)
LperI_CDE_sim <- L_sim / apply(inflos, 2, sum)
# avec toutes les inflos
LperI0_obs <- apply(obs/inflos_obs, 2, mean, na.rm=T)
LperI0_sim <- apply(estimations[[1]]/inflos_obs, 2, mean, na.rm=T)
LperI_obs <- L_obs / apply(inflos_obs, 2, sum)
LperI_sim <- L_sim / apply(inflos_obs, 2, sum)
print(list(rbind(L_obs, LperI0_CDE_obs, LperI_CDE_obs, LperI0_obs, LperI_obs),
rbind(L_sim, LperI0_CDE_sim, LperI_CDE_sim, LperI0_sim, LperI_sim)))
ploter <- data.frame(Date = date2017,
Sol = factor("Low weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 1],
Estimated = estimations[[1]][, 1],
F_pup = estimations[[2]][, 1],
F_diap = estimations[[3]][, 1],
F_endo = estimations[[4]][, 1],
F_exo = estimations[[5]][, 1]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
plotps <- data.frame(Date = date2017,
Sol = factor("Synthetic mulching",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 2],
Estimated = estimations[[1]][, 2],
F_pup = estimations[[2]][, 2],
F_diap = estimations[[3]][, 2],
F_endo = estimations[[4]][, 2],
F_exo = estimations[[5]][, 2]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
ploteh <- data.frame(Date = date2017,
Sol = factor("High weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover")),
Observed = obs[, 3],
Estimated = estimations[[1]][, 3],
F_pup = estimations[[2]][, 3],
F_diap = estimations[[3]][, 3],
F_endo = estimations[[4]][, 3],
F_exo = estimations[[5]][, 3]) %>%
gather(F_exo, F_endo, F_diap, F_pup,
key = prov, value = prop, factor_key = TRUE)
to_plot <- bind_rows(ploter, plotps, ploteh)
to_plot %>% ggplot +
aes(x = Date) +
geom_area(aes(y = prop, fill = prov), alpha = 0.5) +
geom_line(aes(y = Observed, color = "Observed"), lwd = 0.75) +
geom_line(aes(y = Estimated, color = "Estimated"), lwd = 0.75) +
geom_point(aes(y = Observed, color = "Observed")) +
geom_point(aes(y = Estimated, color = "Estimated")) +
theme_bw() +
facet_grid(. ~ Sol) +
theme(legend.title = element_blank(), legend.position = "right") +
scale_color_manual(values = c("green4", "black")) +
xlab(element_blank()) +
theme(axis.text.x = element_text(size=10, angle=45, vjust=1, hjust=1)) +
ylab("Number of larvae")
}
### fonctions pour les simulations / ISEM2019
plot_inflos <- function(inflos, inflos_CDE) {
est1 <- inflos
est2 <- inflos_CDE
er <- data.frame(date = date2017, est1 = est1[, 1], est2 = est2[, 1]) %>%
mutate(Sol = factor("Low weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(est1, est2, key = statut, value = nombre, factor_key = TRUE)
ps <- data.frame(date = date2017, est1 = est1[, 2], est2 = est2[, 2]) %>%
mutate(Sol = factor("Synthetic mulching",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(est1, est2, key = statut, value = nombre, factor_key = TRUE)
eh <- data.frame(date = date2017, est1 = est1[, 3], est2 = est2[, 3]) %>%
mutate(Sol = factor("High weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(est1, est2, key = statut, value = nombre, factor_key = TRUE)
to_plot <- bind_rows(er, ps, eh)
to_plot %>% ggplot +
aes(x = date, y = nombre, color = statut) +
#geom_point() +
geom_line(lwd=1) +
facet_grid(. ~ Sol) +
theme_bw() +
theme(legend.title = element_blank(), legend.position = "right") +
scale_color_manual(values = c("blue", "red"), labels = c("Simulation_1", "Simulation_2")) +
xlab(element_blank()) +
theme(axis.text.x = element_text(size=10, angle=45, vjust=1, hjust=1)) +
ylab("Number of inflorescences")
}
plot_sim_fenv_A <- function(args1, args2, inflos) {
est1 <- dynamics_A(args1, inflos)
est2 <- dynamics_A(args2, inflos)
larves_sim1 <- apply(est1, 2, sum)
larves_sim2 <- apply(est2, 2, sum)
larvesperinf0_sim1 <- apply(est1/inflos, 2, mean, na.rm=T)
larvesperinf0_sim2 <- apply(est2/inflos, 2, mean, na.rm=T)
larvesperinf_sim1 <- larves_sim1 / apply(inflos, 2, sum)
larvesperinf_sim2 <- larves_sim2 / apply(inflos, 2, sum)
print(list(rbind(larves_sim1, larvesperinf0_sim1, larvesperinf_sim1),
rbind(larves_sim2, larvesperinf0_sim2, larvesperinf_sim2),
100*(larves_sim2-larves_sim1)/larves_sim2))
er <- data.frame(date = date2017, est1 = est1[, 1], est2 = est2[, 1]) %>%
mutate(Sol = factor("Low weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(est1, est2, key = statut, value = nombre, factor_key = TRUE)
ps <- data.frame(date = date2017, est1 = est1[, 2], est2 = est2[, 2]) %>%
mutate(Sol = factor("Synthetic mulching",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(est1, est2, key = statut, value = nombre, factor_key = TRUE)
eh <- data.frame(date = date2017, est1 = est1[, 3], est2 = est2[, 3]) %>%
mutate(Sol = factor("High weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(est1, est2, key = statut, value = nombre, factor_key = TRUE)
to_plot <- bind_rows(er, ps, eh)
to_plot %>% ggplot +
aes(x = date, y = nombre, color = statut) +
#geom_point() +
geom_line(lwd=1) +
facet_grid(. ~ Sol) +
theme_bw() +
theme(legend.title = element_blank(), legend.position = "right") +
scale_color_manual(values = c("black", "red"), labels = c("Simulation_1", "Simulation_2")) +
xlab(element_blank()) +
theme(axis.text.x = element_text(size=10, angle=45, vjust=1, hjust=1)) +
ylab("Number of larvae")
}
plot_sim_fenv_C <- function(args1, args2, inflos) {
est1 <- dynamics(args1, inflos)
est2 <- dynamics(args2, inflos)
larves_sim1 <- apply(est1, 2, sum)
larves_sim2 <- apply(est2, 2, sum)
larvesperinf0_sim1 <- apply(est1/inflos, 2, mean, na.rm=T)
larvesperinf0_sim2 <- apply(est2/inflos, 2, mean, na.rm=T)
larvesperinf_sim1 <- larves_sim1 / apply(inflos, 2, sum)
larvesperinf_sim2 <- larves_sim2 / apply(inflos, 2, sum)
print(list(rbind(larves_sim1, larvesperinf0_sim1, larvesperinf_sim1),
rbind(larves_sim2, larvesperinf0_sim2, larvesperinf_sim2),
100*(larves_sim2-larves_sim1)/larves_sim2))
er <- data.frame(date = date2017, est1 = est1[, 1], est2 = est2[, 1]) %>%
mutate(Sol = factor("Low weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(est1, est2, key = statut, value = nombre, factor_key = TRUE)
ps <- data.frame(date = date2017, est1 = est1[, 2], est2 = est2[, 2]) %>%
mutate(Sol = factor("Synthetic mulching",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(est1, est2, key = statut, value = nombre, factor_key = TRUE)
eh <- data.frame(date = date2017, est1 = est1[, 3], est2 = est2[, 3]) %>%
mutate(Sol = factor("High weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(est1, est2, key = statut, value = nombre, factor_key = TRUE)
to_plot <- bind_rows(er, ps, eh)
to_plot %>% ggplot +
aes(x = date, y = nombre, color = statut) +
#geom_point() +
geom_line(lwd=1) +
facet_grid(. ~ Sol) +
theme_bw() +
theme(legend.title = element_blank(), legend.position = "right") +
scale_color_manual(values = c("black", "red"), labels = c("Simulation_1", "Simulation_2")) +
xlab(element_blank()) +
theme(axis.text.x = element_text(size=10, angle=45, vjust=1, hjust=1)) +
ylab("Number of larvae")
}
plot_sim_fenv_D <- function(args1, args2, inflos) {
est1 <- dynamics_season_b1(args1, inflos)
est2 <- dynamics_season_b1(args2, inflos)
larves_sim1 <- apply(est1, 2, sum)
larves_sim2 <- apply(est2, 2, sum)
larvesperinf0_sim1 <- apply(est1/inflos, 2, mean, na.rm=T)
larvesperinf0_sim2 <- apply(est2/inflos, 2, mean, na.rm=T)
larvesperinf_sim1 <- larves_sim1 / apply(inflos, 2, sum)
larvesperinf_sim2 <- larves_sim2 / apply(inflos, 2, sum)
print(list(rbind(larves_sim1, larvesperinf0_sim1, larvesperinf_sim1),
rbind(larves_sim2, larvesperinf0_sim2, larvesperinf_sim2),
100*(larves_sim2-larves_sim1)/larves_sim2))
er <- data.frame(date = date2017, est1 = est1[, 1], est2 = est2[, 1]) %>%
mutate(Sol = factor("Low weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(est1, est2, key = statut, value = nombre, factor_key = TRUE)
ps <- data.frame(date = date2017, est1 = est1[, 2], est2 = est2[, 2]) %>%
mutate(Sol = factor("Synthetic mulching",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(est1, est2, key = statut, value = nombre, factor_key = TRUE)
eh <- data.frame(date = date2017, est1 = est1[, 3], est2 = est2[, 3]) %>%
mutate(Sol = factor("High weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(est1, est2, key = statut, value = nombre, factor_key = TRUE)
to_plot <- bind_rows(er, ps, eh)
to_plot %>% ggplot +
aes(x = date, y = nombre, color = statut) +
#geom_point() +
geom_line(lwd=1) +
facet_grid(. ~ Sol) +
theme_bw() +
theme(legend.title = element_blank(), legend.position = "right") +
scale_color_manual(values = c("black", "red"), labels = c("Simulation_1", "Simulation_2")) +
xlab(element_blank()) +
theme(axis.text.x = element_text(size=10, angle=45, vjust=1, hjust=1)) +
ylab("Number of larvae")
}
plot_sim_inflos_A <- function(args1, inflos1, inflos2) {
est1 <- dynamics_A(args1, inflos1)
est2 <- dynamics_A(args1, inflos2)
larves_sim1 <- apply(est1, 2, sum)
larves_sim2 <- apply(est2, 2, sum)
larvesperinf0_sim1 <- apply(est1/inflos1, 2, mean, na.rm=T)
larvesperinf0_sim2 <- apply(est2/inflos2, 2, mean, na.rm=T)
larvesperinf_sim1 <- larves_sim1 / apply(inflos1, 2, sum)
larvesperinf_sim2 <- larves_sim2 / apply(inflos2, 2, sum)
print(list(rbind(larves_sim1, larvesperinf0_sim1, larvesperinf_sim1),
rbind(larves_sim2, larvesperinf0_sim2, larvesperinf_sim2),
100*(larves_sim2-larves_sim1)/larves_sim2))
er <- data.frame(date = date2017, est1 = est1[, 1], est2 = est2[, 1]) %>%
mutate(Sol = factor("Low weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(est1, est2, key = statut, value = nombre, factor_key = TRUE)
ps <- data.frame(date = date2017, est1 = est1[, 2], est2 = est2[, 2]) %>%
mutate(Sol = factor("Synthetic mulching",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(est1, est2, key = statut, value = nombre, factor_key = TRUE)
eh <- data.frame(date = date2017, est1 = est1[, 3], est2 = est2[, 3]) %>%
mutate(Sol = factor("High weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(est1, est2, key = statut, value = nombre, factor_key = TRUE)
to_plot <- bind_rows(er, ps, eh)
to_plot %>% ggplot +
aes(x = date, y = nombre, color = statut) +
#geom_point() +
geom_line(lwd=1) +
facet_grid(. ~ Sol) +
theme_bw() +
theme(legend.title = element_blank(), legend.position = "right") +
scale_color_manual(values = c("black", "red"), labels = c("Simulation_1", "Simulation_2")) +
xlab(element_blank()) +
theme(axis.text.x = element_text(size=10, angle=45, vjust=1, hjust=1)) +
ylab("Number of larvae")
}
plot_sim_inflos_C <- function(args1, inflos1, inflos2) {
est1 <- dynamics(args1, inflos1)
est2 <- dynamics(args1, inflos2)
larves_sim1 <- apply(est1, 2, sum)
larves_sim2 <- apply(est2, 2, sum)
larvesperinf0_sim1 <- apply(est1/inflos1, 2, mean, na.rm=T)
larvesperinf0_sim2 <- apply(est2/inflos2, 2, mean, na.rm=T)
larvesperinf_sim1 <- larves_sim1 / apply(inflos1, 2, sum)
larvesperinf_sim2 <- larves_sim2 / apply(inflos2, 2, sum)
print(list(rbind(larves_sim1, larvesperinf0_sim1, larvesperinf_sim1),
rbind(larves_sim2, larvesperinf0_sim2, larvesperinf_sim2),
100*(larves_sim2-larves_sim1)/larves_sim2))
er <- data.frame(date = date2017, est1 = est1[, 1], est2 = est2[, 1]) %>%
mutate(Sol = factor("Low weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(est1, est2, key = statut, value = nombre, factor_key = TRUE)
ps <- data.frame(date = date2017, est1 = est1[, 2], est2 = est2[, 2]) %>%
mutate(Sol = factor("Synthetic mulching",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(est1, est2, key = statut, value = nombre, factor_key = TRUE)
eh <- data.frame(date = date2017, est1 = est1[, 3], est2 = est2[, 3]) %>%
mutate(Sol = factor("High weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(est1, est2, key = statut, value = nombre, factor_key = TRUE)
to_plot <- bind_rows(er, ps, eh)
to_plot %>% ggplot +
aes(x = date, y = nombre, color = statut) +
#geom_point() +
geom_line(lwd=1) +
facet_grid(. ~ Sol) +
theme_bw() +
theme(legend.title = element_blank(), legend.position = "right") +
scale_color_manual(values = c("black", "red"), labels = c("Simulation_1", "Simulation_2")) +
xlab(element_blank()) +
theme(axis.text.x = element_text(size=10, angle=45, vjust=1, hjust=1)) +
ylab("Number of larvae")
}
plot_sim_inflos_D <- function(args1, inflos1, inflos2) {
est1 <- dynamics_season_b1(args1, inflos1)
est2 <- dynamics_season_b1(args1, inflos2)
larves_sim1 <- apply(est1, 2, sum)
larves_sim2 <- apply(est2, 2, sum)
larvesperinf0_sim1 <- apply(est1/inflos1, 2, mean, na.rm=T)
larvesperinf0_sim2 <- apply(est2/inflos2, 2, mean, na.rm=T)
larvesperinf_sim1 <- larves_sim1 / apply(inflos1, 2, sum)
larvesperinf_sim2 <- larves_sim2 / apply(inflos2, 2, sum)
print(list(rbind(larves_sim1, larvesperinf0_sim1, larvesperinf_sim1),
rbind(larves_sim2, larvesperinf0_sim2, larvesperinf_sim2),
100*(larves_sim2-larves_sim1)/larves_sim2))
er <- data.frame(date = date2017, est1 = est1[, 1], est2 = est2[, 1]) %>%
mutate(Sol = factor("Low weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(est1, est2, key = statut, value = nombre, factor_key = TRUE)
ps <- data.frame(date = date2017, est1 = est1[, 2], est2 = est2[, 2]) %>%
mutate(Sol = factor("Synthetic mulching",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(est1, est2, key = statut, value = nombre, factor_key = TRUE)
eh <- data.frame(date = date2017, est1 = est1[, 3], est2 = est2[, 3]) %>%
mutate(Sol = factor("High weed cover",
levels = c("Low weed cover",
"Synthetic mulching",
"High weed cover"))) %>%
gather(est1, est2, key = statut, value = nombre, factor_key = TRUE)
to_plot <- bind_rows(er, ps, eh)
to_plot %>% ggplot +
aes(x = date, y = nombre, color = statut) +
#geom_point() +
geom_line(lwd=1) +
facet_grid(. ~ Sol) +
theme_bw() +
theme(legend.title = element_blank(), legend.position = "right") +
scale_color_manual(values = c("black", "red"), labels = c("Simulation_1", "Simulation_2")) +
xlab(element_blank()) +
theme(axis.text.x = element_text(size=10, angle=45, vjust=1, hjust=1)) +
ylab("Number of larvae")
}
|
#############
##Libraries##
#############
library(plyr)
library(cowplot)
library(magrittr)
library(ggplot2)
#########################
##Set master plot theme##
#########################
theme_main <- function() {
theme_bw() +
theme(
#panel.grid.major = element_blank(),
#panel.grid.minor = element_blank(),
axis.text = element_text(size = 15),
axis.title = element_text(size = 20),
strip.text = element_text(size = 20),
legend.text= element_text(size = 15),
legend.title = element_text(size = 20),
plot.title = element_text(size = 25, face = "bold")
)
}
# set wd
setwd("/Users/wittkopp_member/Code")
setwd("/Users/henryertl/Documents/Devs")
#####################
##Read primary data##
#####################
full_dataset <- read.delim("./Integrative_AS_genomics/AS_ATAC_RNA_2020_10_1/ATAC_seq_datafiles/CPM_transformed_datatables/ZHR_Z30_ATAC_counts_ALLclasses_20min_CPM_centered1000.txt", header = T)
full_dataset$class <- NULL
colnames(full_dataset) <- c("chrom", "start", "end", "P1_1", "P1_2", "P1_3", "P2_1", "P2_2", "P2_3", "HYB_1_P1", "HYB_2_P1", "HYB_3_P1", "HYB_1_P2", "HYB_2_P2", "HYB_3_P2")
full_dataset$Paste_locus <- paste(full_dataset$chrom, full_dataset$start, full_dataset$end, sep = "_")
Parental_data <- full_dataset[, c("chrom", "start", "end", "Paste_locus", "P1_1", "P1_2", "P1_3", "P2_1", "P2_2", "P2_3")]
Hybrid_data <- full_dataset[, c("chrom", "start", "end", "Paste_locus", "HYB_1_P1", "HYB_2_P1", "HYB_3_P1", "HYB_1_P2", "HYB_2_P2", "HYB_3_P2")]
####################
##Combine datasets##
####################
Parental_results <- read.table("./Integrative_AS_genomics/AS_ATAC_RNA_2020_10_1/ATAC_seq_datafiles/Bayes_test_outputs/Parental_test_output_ZHR_Z30_ATAC_CPM_macs2_20min_centered1000.txt", header = T) %>% unique()
Hybrid_results <- read.table("./Integrative_AS_genomics/AS_ATAC_RNA_2020_10_1/ATAC_seq_datafiles/Bayes_test_outputs/Hybrid_test_output_ZHR_Z30_ATAC_CPM_macs2_20min_centered1000.txt", header = T) %>% unique()
Parental_hybrid_results <- read.table("./Integrative_AS_genomics/AS_ATAC_RNA_2020_10_1/ATAC_seq_datafiles/Bayes_test_outputs/Parental_Hybrid_test_output_ZHR_Z30_ATAC_CPM_macs2_20min_centered1000.txt", header = T) %>% unique()
Full_results_output <- join_all(list(Parental_data, Hybrid_data, Parental_results, Hybrid_results, Parental_hybrid_results), by = 'Paste_locus', type = 'full')
Full_results_output <- na.omit(Full_results_output) %>% unique
##Apply FDR correction
##First plot distribution of p-values to check for any weirdness
Parent_P_plot <- ggplot(Full_results_output, aes(x = P_p_value)) + geom_histogram(bins = 100) + ggtitle("Parents")
Hybrid_P_plot <- ggplot(Full_results_output, aes(x = H_p_value)) + geom_histogram(bins = 100) + ggtitle("Hybrids")
Parent_hybrid_P_plot <- ggplot(Full_results_output, aes(x = H_P_p_value)) + geom_histogram(bins = 100) + ggtitle("Hybrid-Parents")
p_vals <- plot_grid(Parent_P_plot, Hybrid_P_plot, Parent_hybrid_P_plot, nrow = 3)
ggsave(p_vals, file = "/Users/henryertl/Documents/Wittkopp_lab/AS_ATAC_RNA_2020_10_1/ATAC_seq/Figures/p_vals_ZHR_TSIM_sub_ATAC_20min.pdf", width = 15, height = 15)
##If all is well, run FDR correction
Full_results_output$P_qvalue <- p.adjust(Full_results_output$P_p_value, method = "BY")
Full_results_output$H_qvalue <- p.adjust(Full_results_output$H_p_value, method = "BY")
Full_results_output$P_H_qvalue <- p.adjust(Full_results_output$H_P_p_value, method = "BH")
####################################
##Get consistent allele directions##
####################################
Full_results_output$Direction_parent <- NA
for (i in 1:nrow(Full_results_output)) {
if (Full_results_output$P1_1[i] > Full_results_output$P2_1[i] & Full_results_output$P1_2[i] > Full_results_output$P2_2[i] & Full_results_output$P1_3[i] > Full_results_output$P2_3[i]){
Full_results_output$Direction_parent[i] <- "P1"
} else if (Full_results_output$P1_1[i] < Full_results_output$P2_1[i] & Full_results_output$P1_2[i] < Full_results_output$P2_2[i] & Full_results_output$P1_3[i] < Full_results_output$P2_3[i]){
Full_results_output$Direction_parent[i] <- "P2"
} else {Full_results_output$Direction_parent[i] <- "Ambig"}
}
Full_results_output$Direction_hybrid <- NA
for (i in 1:nrow(Full_results_output)) {
if (Full_results_output$HYB_1_P1[i] > Full_results_output$HYB_1_P2[i] & Full_results_output$HYB_2_P1[i] > Full_results_output$HYB_2_P2[i] & Full_results_output$HYB_3_P1[i] > Full_results_output$HYB_3_P2[i]){
Full_results_output$Direction_hybrid[i] <- "P1"
} else if (Full_results_output$HYB_1_P1[i] < Full_results_output$HYB_1_P2[i] & Full_results_output$HYB_2_P1[i] < Full_results_output$HYB_2_P2[i] & Full_results_output$HYB_3_P1[i] < Full_results_output$HYB_3_P2[i]){
Full_results_output$Direction_hybrid[i] <- "P2"
} else {Full_results_output$Direction_hybrid[i] <- "Ambig"}
}
##########################################################################
### Get Parental / Hybrid ratio to classify opposing vs same cis+trans ###
##########################################################################
Full_results_output$P_H_ratio <- abs(Full_results_output$P_est.mean / Full_results_output$H_est.mean)
####################################################
##Run classifier to establish class of each region##
####################################################
##Set qvalue cut-off
critical_value <- 0.05
##Run classifier
Full_results_output$Regulatory_class <- "Conserved/Ambiguous"
for (i in 1:nrow(Full_results_output)) {
if (Full_results_output$P_qvalue[i] > critical_value & Full_results_output$H_qvalue[i] > critical_value & Full_results_output$P_H_qvalue[i] > critical_value){
Full_results_output$Regulatory_class[i] <- "Conserved/Ambiguous"
} else if (Full_results_output$P_qvalue[i] < critical_value & Full_results_output$H_qvalue[i] < critical_value & Full_results_output$P_H_qvalue[i] > critical_value){
Full_results_output$Regulatory_class[i] <- "Cis"
} else if (Full_results_output$P_qvalue[i] < critical_value & Full_results_output$H_qvalue[i] > critical_value & Full_results_output$P_H_qvalue[i] < critical_value){
Full_results_output$Regulatory_class[i] <- "Trans"
} else if (Full_results_output$P_qvalue[i] < critical_value & Full_results_output$H_qvalue[i] < critical_value & Full_results_output$P_H_qvalue[i] < critical_value & Full_results_output$Direction_parent[i] == Full_results_output$Direction_hybrid[i] & Full_results_output$P_H_ratio[i] > 1 & Full_results_output$Direction_hybrid[i] != "Ambig" & Full_results_output$Direction_parent[i] != "Ambig"){
Full_results_output$Regulatory_class[i] <- "Cis_+_Trans,opposing"
} else if (Full_results_output$P_qvalue[i] < critical_value & Full_results_output$H_qvalue[i] < critical_value & Full_results_output$P_H_qvalue[i] < critical_value & Full_results_output$Direction_parent[i] == Full_results_output$Direction_hybrid[i] & Full_results_output$P_H_ratio[i] < 1 & Full_results_output$Direction_hybrid[i] != "Ambig" & Full_results_output$Direction_parent[i] != "Ambig"){
Full_results_output$Regulatory_class[i] <- "Cis_+_Trans,same"
} else if (Full_results_output$P_qvalue[i] < critical_value & Full_results_output$H_qvalue[i] < critical_value & Full_results_output$P_H_qvalue[i] < critical_value & Full_results_output$Direction_parent[i] != Full_results_output$Direction_hybrid[i] & Full_results_output$Direction_hybrid[i] != "Ambig" & Full_results_output$Direction_parent[i] != "Ambig"){
Full_results_output$Regulatory_class[i] <- "Cis_*_Trans"
} else if (Full_results_output$P_qvalue[i] > critical_value & Full_results_output$H_qvalue[i] < critical_value & Full_results_output$P_H_qvalue[i] < critical_value){
Full_results_output$Regulatory_class[i] <- "Compensatory"
}
}
##Run classifier for opposing and same
Full_results_output$Direction <- "NA"
for (i in 1:nrow(Full_results_output)) {
if (Full_results_output$Regulatory_class[i] == "Cis_+_Trans,opposing"){
Full_results_output$Direction[i] <- "Opposing"
} else if (Full_results_output$Regulatory_class[i] == "Cis_*_Trans"){
Full_results_output$Direction[i] <- "Opposing"
} else if (Full_results_output$Regulatory_class[i] == "Compensatory"){
Full_results_output$Direction[i] <- "Opposing"
} else if (Full_results_output$Regulatory_class[i] == "Cis_+_Trans,same"){
Full_results_output$Direction[i] <- "Reinforcing"
}
}
nrow(subset(Full_results_output, Full_results_output$Direction == "Opposing"))
nrow(subset(Full_results_output, Full_results_output$Direction == "Reinforcing"))
#### Compute % CIS and TRANS ####
Full_results_output$trans_reg_diff <- Full_results_output$P_est.mean - Full_results_output$H_est.mean
Full_results_output$perc_cis <- (abs(Full_results_output$H_est.mean)/(abs(Full_results_output$H_est.mean) + abs(Full_results_output$trans_reg_diff))) * 100
## Reassign classes
Full_results_output$Direction <- NULL
classes <- read.delim("./Integrative_AS_genomics/AS_ATAC_RNA_2020_10_1/ATAC_seq_datafiles/CPM_transformed_datatables/ZHR_Z30_ATAC_counts_ALLclasses_20min_CPM_centered1000.txt", header = T)
classes$Paste_locus <- paste(classes$chrom, classes$start, classes$end, sep = "_")
classes_key <- classes[,c(ncol(classes), ncol(classes)-1)]
write.table(classes_key, file = "/Users/wittkopp_member/Code/Integrative_AS_genomics/AS_ATAC_RNA_2020_10_1/BED_files_for_analyses/class_pastelocus.txt", sep = "\t", quote = F, row.names = F)
Full_results_output <- left_join(Full_results_output, classes_key, by = "Paste_locus")
##################################
##Write out full results to file##
##################################
write.table(Full_results_output, file = "./Integrative_AS_genomics/AS_ATAC_RNA_2020_10_1/ATAC_seq_datafiles/Bayes_test_outputs/Full_results_output_ZHR_Z30_ATAC_20min_centered1000_classes.txt", sep = "\t", row.names = F, quote = F)
##########################
##Generate summary plots##
##########################
perc_cis <- ggplot(Full_results_output, aes(perc_cis)) +
geom_density(color="darkblue", fill="lightblue") +
theme_main() +
xlab("Percent Cis") +
ggtitle("D.mel,ZHR - D.sim Chromatin accessiblity divergence")
ggsave(perc_cis, file = "./Integrative_AS_genomics/AS_ATAC_RNA_2020_10_1/Figures_centered1000_runs/perc_cis_ZHR_Z30_ATAC_CPM_20min.pdf", width = 15, height = 15)
cis_trans_ATAC_CPM <- Full_results_output %>%
ggplot(aes(x = P_est.mean, y = H_est.mean)) +
geom_point(alpha = 0.3) +
geom_hline(yintercept = 0, linetype = "dashed") +
geom_abline(intercept = 0, slope = 1, linetype = "dashed") + theme_main() + geom_vline(xintercept = 0, linetype = "dashed") + xlim(-2, 2) + ylim(-2, 2)
ggsave(cis_trans_ATAC_CPM, file = "./Integrative_AS_genomics/AS_ATAC_RNA_2020_10_1/Figures_centered1000_runs/cis_trans_ZHR_Z30_sub_ATAC_CPM_20min_ALL.pdf", width = 15, height = 15)
############ DENSITY PLOTS ######
get_density <- function(x, y, ...) {
dens <- MASS::kde2d(x, y, ...)
ix <- findInterval(x, dens$x)
iy <- findInterval(y, dens$y)
ii <- cbind(ix, iy)
return(dens$z[ii])
}
Full_results_output$density <- NA
Full_results_output$density[Full_results_output$Regulatory_class == "Cis"] <- get_density(Full_results_output$H_est.mean[Full_results_output$Regulatory_class == "Cis"], Full_results_output$P_est.mean[Full_results_output$Regulatory_class == "Cis"], n = 500)
Full_results_output$density[Full_results_output$Regulatory_class == "Trans"] <- get_density(Full_results_output$H_est.mean[Full_results_output$Regulatory_class == "Trans"], Full_results_output$P_est.mean[Full_results_output$Regulatory_class == "Trans"], n = 500)
Full_results_output$density[Full_results_output$Regulatory_class == "Cis_*_Trans"] <- get_density(Full_results_output$H_est.mean[Full_results_output$Regulatory_class == "Cis_*_Trans"], Full_results_output$P_est.mean[Full_results_output$Regulatory_class == "Cis_*_Trans"], n = 500)
Full_results_output$density[Full_results_output$Regulatory_class == "Cis_+_Trans,same"] <- get_density(Full_results_output$H_est.mean[Full_results_output$Regulatory_class == "Cis_+_Trans,same"], Full_results_output$P_est.mean[Full_results_output$Regulatory_class == "Cis_+_Trans,same"], n = 500)
Full_results_output$density[Full_results_output$Regulatory_class == "Cis_+_Trans,opposing"] <- get_density(Full_results_output$H_est.mean[Full_results_output$Regulatory_class == "Cis_+_Trans,opposing"], Full_results_output$P_est.mean[Full_results_output$Regulatory_class == "Cis_+_Trans,opposing"], n = 500)
Full_results_output$density[Full_results_output$Regulatory_class == "Compensatory"] <- get_density(Full_results_output$H_est.mean[Full_results_output$Regulatory_class == "Compensatory"], Full_results_output$P_est.mean[Full_results_output$Regulatory_class == "Compensatory"], n = 500)
Full_results_output$density[Full_results_output$Regulatory_class == "Conserved/Ambiguous"] <- get_density(Full_results_output$H_est.mean[Full_results_output$Regulatory_class == "Conserved/Ambiguous"], Full_results_output$P_est.mean[Full_results_output$Regulatory_class == "Conserved/Ambiguous"], n = 500)
A <- Full_results_output %>% subset(Regulatory_class == "Cis") %>%
ggplot(aes(x = P_est.mean, y = H_est.mean)) + geom_point(alpha = 0.9) +
geom_point(aes(P_est.mean, H_est.mean, col = density)) +
geom_hline(yintercept = 0, linetype = "dashed") +
geom_abline(intercept = 0, slope = 1, linetype = "dashed") + theme_main() + geom_vline(xintercept = 0, linetype = "dashed") + xlim(-3, 3) + ylim(-3, 3) + guides(col = F) + ggtitle("Cis")
B <- Full_results_output %>% subset(Regulatory_class == "Trans") %>%
ggplot(aes(x = P_est.mean, y = H_est.mean)) + geom_point(alpha = 0.9) +
geom_point(aes(P_est.mean, H_est.mean, col = density)) +
geom_hline(yintercept = 0, linetype = "dashed") +
geom_abline(intercept = 0, slope = 1, linetype = "dashed") + theme_main() + geom_vline(xintercept = 0, linetype = "dashed") + xlim(-3, 3) + ylim(-3, 3) + guides(col = F) + ggtitle("Trans")
C <- Full_results_output %>% subset(Regulatory_class == "Cis_*_Trans") %>%
ggplot(aes(x = P_est.mean, y = H_est.mean)) + geom_point(alpha = 0.9) +
geom_point(aes(P_est.mean, H_est.mean, col = density)) +
geom_hline(yintercept = 0, linetype = "dashed") +
geom_abline(intercept = 0, slope = 1, linetype = "dashed") + theme_main() + geom_vline(xintercept = 0, linetype = "dashed") + xlim(-3, 3) + ylim(-3, 3) + guides(col = F) + ggtitle("Cis * Trans")
D <- Full_results_output %>% subset(Regulatory_class == "Cis_+_Trans,same") %>%
ggplot(aes(x = P_est.mean, y = H_est.mean)) + geom_point(alpha = 0.9) +
geom_point(aes(P_est.mean, H_est.mean, col = density)) +
geom_hline(yintercept = 0, linetype = "dashed") +
geom_abline(intercept = 0, slope = 1, linetype = "dashed") + theme_main() + geom_vline(xintercept = 0, linetype = "dashed") + xlim(-3, 3) + ylim(-3, 3) + guides(col = F) + ggtitle("Cis + Trans,same")
E <- Full_results_output %>% subset(Regulatory_class == "Cis_+_Trans,opposing") %>%
ggplot(aes(x = P_est.mean, y = H_est.mean)) + geom_point(alpha = 0.9) +
geom_point(aes(P_est.mean, H_est.mean, col = density)) +
geom_hline(yintercept = 0, linetype = "dashed") +
geom_abline(intercept = 0, slope = 1, linetype = "dashed") + theme_main() + geom_vline(xintercept = 0, linetype = "dashed") + xlim(-3, 3) + ylim(-3, 3) + guides(col = F) + ggtitle("Cis + Trans,opposing")
G <- Full_results_output %>% subset(Regulatory_class == "Compensatory") %>%
ggplot(aes(x = P_est.mean, y = H_est.mean)) + geom_point(alpha = 0.9) +
geom_point(aes(P_est.mean, H_est.mean, col = density)) +
geom_hline(yintercept = 0, linetype = "dashed") +
geom_abline(intercept = 0, slope = 1, linetype = "dashed") + theme_main() + geom_vline(xintercept = 0, linetype = "dashed") + xlim(-3, 3) + ylim(-3, 3) + guides(col = F) + ggtitle("Compensatory")
H <- Full_results_output %>% subset(Regulatory_class == "Conserved/Ambiguous") %>%
ggplot(aes(x = P_est.mean, y = H_est.mean)) + geom_point(alpha = 0.9) +
geom_point(aes(P_est.mean, H_est.mean, col = density)) +
geom_hline(yintercept = 0, linetype = "dashed") +
geom_abline(intercept = 0, slope = 1, linetype = "dashed") + theme_main() + geom_vline(xintercept = 0, linetype = "dashed") + xlim(-3, 3) + ylim(-3, 3) + guides(col = F) + ggtitle("Conserved/Ambiguous")
facet_all <- plot_grid(A, B, C, D, E, G, H)
ggsave(facet_all, file = "./Integrative_AS_genomics/AS_ATAC_RNA_2020_10_1/Figures_centered1000_runs/cis_trans_ZHR_TSIM_ATAC_CPM_20min_ALL_reg_classes_facet.pdf", width = 15, height = 15)
facet_cis_trans <- plot_grid(C, D, E, G)
ggsave(facet_cis_trans, file = "./Integrative_AS_genomics/AS_ATAC_RNA_2020_10_1/Figures_centered1000_runs/cis_trans_ZHR_TSIM_ATAC_CPM_20min_cis_trans_reg_classes_facet.pdf", width = 15, height = 15)
| /AS_ATAC_RNA_2020_10_1/ATAC_pipelines/STEP3_CATEGORIZE_CISTRANS_BAYES_PROCESS/Bayes_model_and_analyses/cis_trans_bayes_model_output_analysis_ATAC_CPM_ZHR_Z30.R | no_license | henryertl/Integrative_AS_genomics | R | false | false | 16,694 | r | #############
##Libraries##
#############
library(plyr)
library(cowplot)
library(magrittr)
library(ggplot2)
#########################
##Set master plot theme##
#########################
theme_main <- function() {
theme_bw() +
theme(
#panel.grid.major = element_blank(),
#panel.grid.minor = element_blank(),
axis.text = element_text(size = 15),
axis.title = element_text(size = 20),
strip.text = element_text(size = 20),
legend.text= element_text(size = 15),
legend.title = element_text(size = 20),
plot.title = element_text(size = 25, face = "bold")
)
}
# set wd
setwd("/Users/wittkopp_member/Code")
setwd("/Users/henryertl/Documents/Devs")
#####################
##Read primary data##
#####################
full_dataset <- read.delim("./Integrative_AS_genomics/AS_ATAC_RNA_2020_10_1/ATAC_seq_datafiles/CPM_transformed_datatables/ZHR_Z30_ATAC_counts_ALLclasses_20min_CPM_centered1000.txt", header = T)
full_dataset$class <- NULL
colnames(full_dataset) <- c("chrom", "start", "end", "P1_1", "P1_2", "P1_3", "P2_1", "P2_2", "P2_3", "HYB_1_P1", "HYB_2_P1", "HYB_3_P1", "HYB_1_P2", "HYB_2_P2", "HYB_3_P2")
full_dataset$Paste_locus <- paste(full_dataset$chrom, full_dataset$start, full_dataset$end, sep = "_")
Parental_data <- full_dataset[, c("chrom", "start", "end", "Paste_locus", "P1_1", "P1_2", "P1_3", "P2_1", "P2_2", "P2_3")]
Hybrid_data <- full_dataset[, c("chrom", "start", "end", "Paste_locus", "HYB_1_P1", "HYB_2_P1", "HYB_3_P1", "HYB_1_P2", "HYB_2_P2", "HYB_3_P2")]
####################
##Combine datasets##
####################
Parental_results <- read.table("./Integrative_AS_genomics/AS_ATAC_RNA_2020_10_1/ATAC_seq_datafiles/Bayes_test_outputs/Parental_test_output_ZHR_Z30_ATAC_CPM_macs2_20min_centered1000.txt", header = T) %>% unique()
Hybrid_results <- read.table("./Integrative_AS_genomics/AS_ATAC_RNA_2020_10_1/ATAC_seq_datafiles/Bayes_test_outputs/Hybrid_test_output_ZHR_Z30_ATAC_CPM_macs2_20min_centered1000.txt", header = T) %>% unique()
Parental_hybrid_results <- read.table("./Integrative_AS_genomics/AS_ATAC_RNA_2020_10_1/ATAC_seq_datafiles/Bayes_test_outputs/Parental_Hybrid_test_output_ZHR_Z30_ATAC_CPM_macs2_20min_centered1000.txt", header = T) %>% unique()
Full_results_output <- join_all(list(Parental_data, Hybrid_data, Parental_results, Hybrid_results, Parental_hybrid_results), by = 'Paste_locus', type = 'full')
Full_results_output <- na.omit(Full_results_output) %>% unique
##Apply FDR correction
##First plot distribution of p-values to check for any weirdness
Parent_P_plot <- ggplot(Full_results_output, aes(x = P_p_value)) + geom_histogram(bins = 100) + ggtitle("Parents")
Hybrid_P_plot <- ggplot(Full_results_output, aes(x = H_p_value)) + geom_histogram(bins = 100) + ggtitle("Hybrids")
Parent_hybrid_P_plot <- ggplot(Full_results_output, aes(x = H_P_p_value)) + geom_histogram(bins = 100) + ggtitle("Hybrid-Parents")
p_vals <- plot_grid(Parent_P_plot, Hybrid_P_plot, Parent_hybrid_P_plot, nrow = 3)
ggsave(p_vals, file = "/Users/henryertl/Documents/Wittkopp_lab/AS_ATAC_RNA_2020_10_1/ATAC_seq/Figures/p_vals_ZHR_TSIM_sub_ATAC_20min.pdf", width = 15, height = 15)
##If all is well, run FDR correction
Full_results_output$P_qvalue <- p.adjust(Full_results_output$P_p_value, method = "BY")
Full_results_output$H_qvalue <- p.adjust(Full_results_output$H_p_value, method = "BY")
Full_results_output$P_H_qvalue <- p.adjust(Full_results_output$H_P_p_value, method = "BH")
####################################
##Get consistent allele directions##
####################################
Full_results_output$Direction_parent <- NA
for (i in 1:nrow(Full_results_output)) {
if (Full_results_output$P1_1[i] > Full_results_output$P2_1[i] & Full_results_output$P1_2[i] > Full_results_output$P2_2[i] & Full_results_output$P1_3[i] > Full_results_output$P2_3[i]){
Full_results_output$Direction_parent[i] <- "P1"
} else if (Full_results_output$P1_1[i] < Full_results_output$P2_1[i] & Full_results_output$P1_2[i] < Full_results_output$P2_2[i] & Full_results_output$P1_3[i] < Full_results_output$P2_3[i]){
Full_results_output$Direction_parent[i] <- "P2"
} else {Full_results_output$Direction_parent[i] <- "Ambig"}
}
Full_results_output$Direction_hybrid <- NA
for (i in 1:nrow(Full_results_output)) {
if (Full_results_output$HYB_1_P1[i] > Full_results_output$HYB_1_P2[i] & Full_results_output$HYB_2_P1[i] > Full_results_output$HYB_2_P2[i] & Full_results_output$HYB_3_P1[i] > Full_results_output$HYB_3_P2[i]){
Full_results_output$Direction_hybrid[i] <- "P1"
} else if (Full_results_output$HYB_1_P1[i] < Full_results_output$HYB_1_P2[i] & Full_results_output$HYB_2_P1[i] < Full_results_output$HYB_2_P2[i] & Full_results_output$HYB_3_P1[i] < Full_results_output$HYB_3_P2[i]){
Full_results_output$Direction_hybrid[i] <- "P2"
} else {Full_results_output$Direction_hybrid[i] <- "Ambig"}
}
##########################################################################
### Get Parental / Hybrid ratio to classify opposing vs same cis+trans ###
##########################################################################
Full_results_output$P_H_ratio <- abs(Full_results_output$P_est.mean / Full_results_output$H_est.mean)
####################################################
##Run classifier to establish class of each region##
####################################################
##Set qvalue cut-off
critical_value <- 0.05
##Run classifier
Full_results_output$Regulatory_class <- "Conserved/Ambiguous"
for (i in 1:nrow(Full_results_output)) {
if (Full_results_output$P_qvalue[i] > critical_value & Full_results_output$H_qvalue[i] > critical_value & Full_results_output$P_H_qvalue[i] > critical_value){
Full_results_output$Regulatory_class[i] <- "Conserved/Ambiguous"
} else if (Full_results_output$P_qvalue[i] < critical_value & Full_results_output$H_qvalue[i] < critical_value & Full_results_output$P_H_qvalue[i] > critical_value){
Full_results_output$Regulatory_class[i] <- "Cis"
} else if (Full_results_output$P_qvalue[i] < critical_value & Full_results_output$H_qvalue[i] > critical_value & Full_results_output$P_H_qvalue[i] < critical_value){
Full_results_output$Regulatory_class[i] <- "Trans"
} else if (Full_results_output$P_qvalue[i] < critical_value & Full_results_output$H_qvalue[i] < critical_value & Full_results_output$P_H_qvalue[i] < critical_value & Full_results_output$Direction_parent[i] == Full_results_output$Direction_hybrid[i] & Full_results_output$P_H_ratio[i] > 1 & Full_results_output$Direction_hybrid[i] != "Ambig" & Full_results_output$Direction_parent[i] != "Ambig"){
Full_results_output$Regulatory_class[i] <- "Cis_+_Trans,opposing"
} else if (Full_results_output$P_qvalue[i] < critical_value & Full_results_output$H_qvalue[i] < critical_value & Full_results_output$P_H_qvalue[i] < critical_value & Full_results_output$Direction_parent[i] == Full_results_output$Direction_hybrid[i] & Full_results_output$P_H_ratio[i] < 1 & Full_results_output$Direction_hybrid[i] != "Ambig" & Full_results_output$Direction_parent[i] != "Ambig"){
Full_results_output$Regulatory_class[i] <- "Cis_+_Trans,same"
} else if (Full_results_output$P_qvalue[i] < critical_value & Full_results_output$H_qvalue[i] < critical_value & Full_results_output$P_H_qvalue[i] < critical_value & Full_results_output$Direction_parent[i] != Full_results_output$Direction_hybrid[i] & Full_results_output$Direction_hybrid[i] != "Ambig" & Full_results_output$Direction_parent[i] != "Ambig"){
Full_results_output$Regulatory_class[i] <- "Cis_*_Trans"
} else if (Full_results_output$P_qvalue[i] > critical_value & Full_results_output$H_qvalue[i] < critical_value & Full_results_output$P_H_qvalue[i] < critical_value){
Full_results_output$Regulatory_class[i] <- "Compensatory"
}
}
##Run classifier for opposing and same
Full_results_output$Direction <- "NA"
for (i in 1:nrow(Full_results_output)) {
if (Full_results_output$Regulatory_class[i] == "Cis_+_Trans,opposing"){
Full_results_output$Direction[i] <- "Opposing"
} else if (Full_results_output$Regulatory_class[i] == "Cis_*_Trans"){
Full_results_output$Direction[i] <- "Opposing"
} else if (Full_results_output$Regulatory_class[i] == "Compensatory"){
Full_results_output$Direction[i] <- "Opposing"
} else if (Full_results_output$Regulatory_class[i] == "Cis_+_Trans,same"){
Full_results_output$Direction[i] <- "Reinforcing"
}
}
nrow(subset(Full_results_output, Full_results_output$Direction == "Opposing"))
nrow(subset(Full_results_output, Full_results_output$Direction == "Reinforcing"))
#### Compute % CIS and TRANS ####
Full_results_output$trans_reg_diff <- Full_results_output$P_est.mean - Full_results_output$H_est.mean
Full_results_output$perc_cis <- (abs(Full_results_output$H_est.mean)/(abs(Full_results_output$H_est.mean) + abs(Full_results_output$trans_reg_diff))) * 100
## Reassign classes
Full_results_output$Direction <- NULL
classes <- read.delim("./Integrative_AS_genomics/AS_ATAC_RNA_2020_10_1/ATAC_seq_datafiles/CPM_transformed_datatables/ZHR_Z30_ATAC_counts_ALLclasses_20min_CPM_centered1000.txt", header = T)
classes$Paste_locus <- paste(classes$chrom, classes$start, classes$end, sep = "_")
classes_key <- classes[,c(ncol(classes), ncol(classes)-1)]
write.table(classes_key, file = "/Users/wittkopp_member/Code/Integrative_AS_genomics/AS_ATAC_RNA_2020_10_1/BED_files_for_analyses/class_pastelocus.txt", sep = "\t", quote = F, row.names = F)
Full_results_output <- left_join(Full_results_output, classes_key, by = "Paste_locus")
##################################
##Write out full results to file##
##################################
write.table(Full_results_output, file = "./Integrative_AS_genomics/AS_ATAC_RNA_2020_10_1/ATAC_seq_datafiles/Bayes_test_outputs/Full_results_output_ZHR_Z30_ATAC_20min_centered1000_classes.txt", sep = "\t", row.names = F, quote = F)
##########################
##Generate summary plots##
##########################
perc_cis <- ggplot(Full_results_output, aes(perc_cis)) +
geom_density(color="darkblue", fill="lightblue") +
theme_main() +
xlab("Percent Cis") +
ggtitle("D.mel,ZHR - D.sim Chromatin accessiblity divergence")
ggsave(perc_cis, file = "./Integrative_AS_genomics/AS_ATAC_RNA_2020_10_1/Figures_centered1000_runs/perc_cis_ZHR_Z30_ATAC_CPM_20min.pdf", width = 15, height = 15)
cis_trans_ATAC_CPM <- Full_results_output %>%
ggplot(aes(x = P_est.mean, y = H_est.mean)) +
geom_point(alpha = 0.3) +
geom_hline(yintercept = 0, linetype = "dashed") +
geom_abline(intercept = 0, slope = 1, linetype = "dashed") + theme_main() + geom_vline(xintercept = 0, linetype = "dashed") + xlim(-2, 2) + ylim(-2, 2)
ggsave(cis_trans_ATAC_CPM, file = "./Integrative_AS_genomics/AS_ATAC_RNA_2020_10_1/Figures_centered1000_runs/cis_trans_ZHR_Z30_sub_ATAC_CPM_20min_ALL.pdf", width = 15, height = 15)
############ DENSITY PLOTS ######
get_density <- function(x, y, ...) {
dens <- MASS::kde2d(x, y, ...)
ix <- findInterval(x, dens$x)
iy <- findInterval(y, dens$y)
ii <- cbind(ix, iy)
return(dens$z[ii])
}
Full_results_output$density <- NA
Full_results_output$density[Full_results_output$Regulatory_class == "Cis"] <- get_density(Full_results_output$H_est.mean[Full_results_output$Regulatory_class == "Cis"], Full_results_output$P_est.mean[Full_results_output$Regulatory_class == "Cis"], n = 500)
Full_results_output$density[Full_results_output$Regulatory_class == "Trans"] <- get_density(Full_results_output$H_est.mean[Full_results_output$Regulatory_class == "Trans"], Full_results_output$P_est.mean[Full_results_output$Regulatory_class == "Trans"], n = 500)
Full_results_output$density[Full_results_output$Regulatory_class == "Cis_*_Trans"] <- get_density(Full_results_output$H_est.mean[Full_results_output$Regulatory_class == "Cis_*_Trans"], Full_results_output$P_est.mean[Full_results_output$Regulatory_class == "Cis_*_Trans"], n = 500)
Full_results_output$density[Full_results_output$Regulatory_class == "Cis_+_Trans,same"] <- get_density(Full_results_output$H_est.mean[Full_results_output$Regulatory_class == "Cis_+_Trans,same"], Full_results_output$P_est.mean[Full_results_output$Regulatory_class == "Cis_+_Trans,same"], n = 500)
Full_results_output$density[Full_results_output$Regulatory_class == "Cis_+_Trans,opposing"] <- get_density(Full_results_output$H_est.mean[Full_results_output$Regulatory_class == "Cis_+_Trans,opposing"], Full_results_output$P_est.mean[Full_results_output$Regulatory_class == "Cis_+_Trans,opposing"], n = 500)
Full_results_output$density[Full_results_output$Regulatory_class == "Compensatory"] <- get_density(Full_results_output$H_est.mean[Full_results_output$Regulatory_class == "Compensatory"], Full_results_output$P_est.mean[Full_results_output$Regulatory_class == "Compensatory"], n = 500)
Full_results_output$density[Full_results_output$Regulatory_class == "Conserved/Ambiguous"] <- get_density(Full_results_output$H_est.mean[Full_results_output$Regulatory_class == "Conserved/Ambiguous"], Full_results_output$P_est.mean[Full_results_output$Regulatory_class == "Conserved/Ambiguous"], n = 500)
A <- Full_results_output %>% subset(Regulatory_class == "Cis") %>%
ggplot(aes(x = P_est.mean, y = H_est.mean)) + geom_point(alpha = 0.9) +
geom_point(aes(P_est.mean, H_est.mean, col = density)) +
geom_hline(yintercept = 0, linetype = "dashed") +
geom_abline(intercept = 0, slope = 1, linetype = "dashed") + theme_main() + geom_vline(xintercept = 0, linetype = "dashed") + xlim(-3, 3) + ylim(-3, 3) + guides(col = F) + ggtitle("Cis")
B <- Full_results_output %>% subset(Regulatory_class == "Trans") %>%
ggplot(aes(x = P_est.mean, y = H_est.mean)) + geom_point(alpha = 0.9) +
geom_point(aes(P_est.mean, H_est.mean, col = density)) +
geom_hline(yintercept = 0, linetype = "dashed") +
geom_abline(intercept = 0, slope = 1, linetype = "dashed") + theme_main() + geom_vline(xintercept = 0, linetype = "dashed") + xlim(-3, 3) + ylim(-3, 3) + guides(col = F) + ggtitle("Trans")
C <- Full_results_output %>% subset(Regulatory_class == "Cis_*_Trans") %>%
ggplot(aes(x = P_est.mean, y = H_est.mean)) + geom_point(alpha = 0.9) +
geom_point(aes(P_est.mean, H_est.mean, col = density)) +
geom_hline(yintercept = 0, linetype = "dashed") +
geom_abline(intercept = 0, slope = 1, linetype = "dashed") + theme_main() + geom_vline(xintercept = 0, linetype = "dashed") + xlim(-3, 3) + ylim(-3, 3) + guides(col = F) + ggtitle("Cis * Trans")
D <- Full_results_output %>% subset(Regulatory_class == "Cis_+_Trans,same") %>%
ggplot(aes(x = P_est.mean, y = H_est.mean)) + geom_point(alpha = 0.9) +
geom_point(aes(P_est.mean, H_est.mean, col = density)) +
geom_hline(yintercept = 0, linetype = "dashed") +
geom_abline(intercept = 0, slope = 1, linetype = "dashed") + theme_main() + geom_vline(xintercept = 0, linetype = "dashed") + xlim(-3, 3) + ylim(-3, 3) + guides(col = F) + ggtitle("Cis + Trans,same")
E <- Full_results_output %>% subset(Regulatory_class == "Cis_+_Trans,opposing") %>%
ggplot(aes(x = P_est.mean, y = H_est.mean)) + geom_point(alpha = 0.9) +
geom_point(aes(P_est.mean, H_est.mean, col = density)) +
geom_hline(yintercept = 0, linetype = "dashed") +
geom_abline(intercept = 0, slope = 1, linetype = "dashed") + theme_main() + geom_vline(xintercept = 0, linetype = "dashed") + xlim(-3, 3) + ylim(-3, 3) + guides(col = F) + ggtitle("Cis + Trans,opposing")
G <- Full_results_output %>% subset(Regulatory_class == "Compensatory") %>%
ggplot(aes(x = P_est.mean, y = H_est.mean)) + geom_point(alpha = 0.9) +
geom_point(aes(P_est.mean, H_est.mean, col = density)) +
geom_hline(yintercept = 0, linetype = "dashed") +
geom_abline(intercept = 0, slope = 1, linetype = "dashed") + theme_main() + geom_vline(xintercept = 0, linetype = "dashed") + xlim(-3, 3) + ylim(-3, 3) + guides(col = F) + ggtitle("Compensatory")
H <- Full_results_output %>% subset(Regulatory_class == "Conserved/Ambiguous") %>%
ggplot(aes(x = P_est.mean, y = H_est.mean)) + geom_point(alpha = 0.9) +
geom_point(aes(P_est.mean, H_est.mean, col = density)) +
geom_hline(yintercept = 0, linetype = "dashed") +
geom_abline(intercept = 0, slope = 1, linetype = "dashed") + theme_main() + geom_vline(xintercept = 0, linetype = "dashed") + xlim(-3, 3) + ylim(-3, 3) + guides(col = F) + ggtitle("Conserved/Ambiguous")
facet_all <- plot_grid(A, B, C, D, E, G, H)
ggsave(facet_all, file = "./Integrative_AS_genomics/AS_ATAC_RNA_2020_10_1/Figures_centered1000_runs/cis_trans_ZHR_TSIM_ATAC_CPM_20min_ALL_reg_classes_facet.pdf", width = 15, height = 15)
facet_cis_trans <- plot_grid(C, D, E, G)
ggsave(facet_cis_trans, file = "./Integrative_AS_genomics/AS_ATAC_RNA_2020_10_1/Figures_centered1000_runs/cis_trans_ZHR_TSIM_ATAC_CPM_20min_cis_trans_reg_classes_facet.pdf", width = 15, height = 15)
|
# EE 232E HW3
library("igraph")
## Data Processing
# Reading in the data from the file and storing the edge set
data_list = scan("/Users/Sidharth/Desktop/OneDrive/Courses_UCLA/Spring16/EE232E/Homeworks/HW3_104588717_904588105_004589213/sorted_directed_net.txt",what = list(0,0,0))
edge_in <- data_list[[1]] + 1
edge_out <- data_list[[2]] + 1
edge_set = cbind(edge_in,edge_out)
# Creating the graph from the edge set
network <- graph.edgelist(el = edge_set, directed = TRUE)
E(network)$weight <- data_list[[3]]
## Question 1
# Checking for connectivity
is.connected(network)
# Sanity check
gcc_strong <- clusters(network,mode = "strong")
gcc_strong$csize[8]
# Creating the giant connected component
network_component_list <- decompose.graph(network)
gcc_index <- which.max(sapply(network_component_list,vcount))
gcc <- network_component_list[[gcc_index]]
vcount(gcc)
plot(gcc, main = "Greatest Connected Component",vertex.size=1,vertex.label=NA)
## Question 2
# Degree distribution of the GCC
# in-degree
plot(degree.distribution(gcc, mode = "in"),main = "In-degree distribution of the GCC")
# out-degree
plot(degree.distribution(gcc, mode = "out"),main = "Out-degree distribution of the GCC")
## Question 3
# Option 1
gcc_undirected_1 <- as.undirected(gcc,mode="each")
gcc_undirected_1_comm <- label.propagation.community(gcc_undirected_1)
modularity(gcc_undirected_1_comm)
print(sizes(gcc_undirected_1_comm))
# Option 2
gcc_undirected_2 <- as.undirected(gcc,mode = "collapse",edge.attr.comb = list(weight = "prod"))
E(gcc_undirected_2)$weight <- sqrt(E(gcc_undirected_2)$weight)
gcc_undirected_2_lpc_comm <- label.propagation.community(gcc_undirected_2)
modularity(gcc_undirected_2_lpc_comm)
print(sizes(gcc_undirected_2_lpc_comm))
gcc_undirected_2_fg_comm <- fastgreedy.community(gcc_undirected_2)
modularity(gcc_undirected_2_fg_comm)
print(sizes(gcc_undirected_2_fg_comm)) | /HW3_104588717_904588105_004589213/HW3_104588717_904588105_004589213.R | no_license | JunnanLiu/EE232E-Graphs-and-Network-Flows | R | false | false | 1,902 | r | # EE 232E HW3
library("igraph")
## Data Processing
# Reading in the data from the file and storing the edge set
data_list = scan("/Users/Sidharth/Desktop/OneDrive/Courses_UCLA/Spring16/EE232E/Homeworks/HW3_104588717_904588105_004589213/sorted_directed_net.txt",what = list(0,0,0))
edge_in <- data_list[[1]] + 1
edge_out <- data_list[[2]] + 1
edge_set = cbind(edge_in,edge_out)
# Creating the graph from the edge set
network <- graph.edgelist(el = edge_set, directed = TRUE)
E(network)$weight <- data_list[[3]]
## Question 1
# Checking for connectivity
is.connected(network)
# Sanity check
gcc_strong <- clusters(network,mode = "strong")
gcc_strong$csize[8]
# Creating the giant connected component
network_component_list <- decompose.graph(network)
gcc_index <- which.max(sapply(network_component_list,vcount))
gcc <- network_component_list[[gcc_index]]
vcount(gcc)
plot(gcc, main = "Greatest Connected Component",vertex.size=1,vertex.label=NA)
## Question 2
# Degree distribution of the GCC
# in-degree
plot(degree.distribution(gcc, mode = "in"),main = "In-degree distribution of the GCC")
# out-degree
plot(degree.distribution(gcc, mode = "out"),main = "Out-degree distribution of the GCC")
## Question 3
# Option 1
gcc_undirected_1 <- as.undirected(gcc,mode="each")
gcc_undirected_1_comm <- label.propagation.community(gcc_undirected_1)
modularity(gcc_undirected_1_comm)
print(sizes(gcc_undirected_1_comm))
# Option 2
gcc_undirected_2 <- as.undirected(gcc,mode = "collapse",edge.attr.comb = list(weight = "prod"))
E(gcc_undirected_2)$weight <- sqrt(E(gcc_undirected_2)$weight)
gcc_undirected_2_lpc_comm <- label.propagation.community(gcc_undirected_2)
modularity(gcc_undirected_2_lpc_comm)
print(sizes(gcc_undirected_2_lpc_comm))
gcc_undirected_2_fg_comm <- fastgreedy.community(gcc_undirected_2)
modularity(gcc_undirected_2_fg_comm)
print(sizes(gcc_undirected_2_fg_comm)) |
################################ Matriz Confusion ###########################
library(tidyverse)
library(corrplot)
library(polycor)
library(glm2)
library(pscl)
library(boot)
library(VGAM)
# Para simulaciones
set.seed(27)
# Tamaño de muestra
n_muestra <- dim(CData_CDMX2[1])[1]
# Cargamos modelo ganador
mod_dist_4 <- zeroinfl(Vic_Rob_As ~ Seg_Mun + Region |
Edad + Mas_Pat_Vil + Region + Sit_Lab,
data= CData_CDMX2, dist="poisson",link="logit")
summary(mod_dist_4)
# Cargamos los valores estimados para p y lambda
p <- predict(mod_dist_4, type = "zero")
lambda <- predict(mod_dist_4, type = "count")
# Simulamos
sim <- rzipois(n_muestra, lambda = lambda, pstr0 = p)
conf <- table(sim,CData_CDMX2$Vic_Rob_As)
sum(diag(conf))/n_muestra
######### Funcion Multiples simulaciones ###########################
sim_conf_mat_zeroinfl <- function(mod, res=CData_CDMX2$Vic_Rob_As, n=100,
muest.size=n_muestra){
# Cargamos los valores estimados para p y lambda
p <- predict(mod, type = "zero") # Proba de 0
lambda <- predict(mod, type = "count") # Esperanza de la cuenta
aux <- rep(0, times = n) # Vector que almacenara la prop de exito
sim <- array(NA, dim = c(n,muest.size))
# Ciclo para simular
for (j in 1:muest.size) {
sim[,j] <- rzipois(n, lambda = lambda[j], pstr0 = p[j])
# Simulaciones
conf <- table(round(colMeans(sim)), res) # Matriz de confusion
aux[i] <- sum(diag(conf))/muest.size # Guardamos la proporcion de exito
}
return(mean(sim)) # Proporcion de exito promedio
}
######### ESTA ES LA CHIDA Funcion Multiples simulaciones SEGUNDA versión######
sim_conf_mat_zeroinfl2 <- function(mod, res=CData_CDMX2$Vic_Rob_As, n=100,
muest.size=n_muestra){
# Cargamos los valores estimados para p y lambda
p <- predict(mod, type = "zero") # Proba de 0
lambda <- predict(mod, type = "count") # Esperanza de la cuenta
aux <- rep(0, times = n) # Vector que almacenara la prop de exito
sim <- array(NA, dim = c(muest.size))
# Ciclo para simular
for (i in 1:n) {
for (j in 1:muest.size) {
sim[j] <- rzipois(1, lambda = lambda[j], pstr0 = p[j])
}
# Simulaciones
conf <- table(sim, res) # Matriz de confusion
aux[i] <- sum(diag(conf))/muest.size # Guardamos la proporcion de exito
}
return(mean(aux)) # Proporcion de exito promedio
}
######### Funcion Multiples simulaciones MEDIA########################
sim_media_zeroinfl <- function(mod, muest.size=n_muestra){
# Cargamos los valores estimados para p y lambda
p <- predict(mod, type = "zero") # Proba de 0
lambda <- predict(mod, type = "count") # Esperanza de la cuenta
sim <- array(NA, dim = c(muest.size))
# Ciclo para simular
for (j in 1:muest.size) {
sim[j] <- rzipois(1, lambda = lambda[j], pstr0 = p[j])
}
return(mean(sim)) # Media de la simulacion
}
######### Funcion Multiples simulaciones REGPOISSON ##############
sim_conf_mat_poi <- function(mod, res=CData_CDMX2$Vic_Rob_As, n=100,
muest.size=n_muestra){
####### Simulacion Reg Poisson
mu <- exp(predict(mod_PoiSin7))
sim_Poi <- rep(0, times = N)
for(i in 1:muest.size){
sim_Poi[i] <- rpois(1, lambda = mu[i])
}
aux <- rep(0, times = n) # Vector que almacenara la prop de exito
sim <- array(NA, dim = c(muest.size))
# Ciclo para simular
for (i in 1:n) {
for (j in 1:muest.size) {
sim[j] <- rzipois(1, lambda = lambda[j], pstr0 = p[j])
}
# Simulaciones
conf <- table(sim, res) # Matriz de confusion
aux[i] <- sum(diag(conf))/muest.size # Guardamos la proporcion de exito
}
return(mean(aux)) # Proporcion de exito promedio
}
######### Funcion Multiples simulaciones BAYES ##############
sim_conf_mat_bayes <- function(pred, res=CData_CDMX2$Vic_Rob_As, n=100,
muest.size=n_muestra){
aux <- rep(0, times = n) # Vector que almacenara la prop de exito
sim_aux <- array(NA, dim = c(muest.size))
# Ciclo para simular
for (i in 1:n) {
for (j in 1:muest.size) {
sim_aux[j] <- rzipois(1, lambda = pred$lambda[j], pstr0 = pred$p[j])
}
# Simulaciones
conf <- table(sim_aux, res) # Matriz de confusion
aux[i] <- sum(diag(conf))/muest.size # Guardamos la proporcion de exito
}
return(mean(aux)) # Proporcion de exito promedio
}
######### Funcion Multiples simulaciones BAYES MEDIA ########################
sim_media_bayes <- function(pred, muest.size=n_muestra){
sim <- array(NA, dim = c(muest.size))
# Ciclo para simular
for (j in 1:muest.size) {
sim[j] <- rzipois(1, lambda = pred$lambda[j], pstr0 = pred$p[j])
}
return(mean(sim)) # Media de la simulacion
}
| /ZIP/matriz_conf_zeroinfl.R | no_license | Luis-2199/BayesProject | R | false | false | 4,865 | r | ################################ Matriz Confusion ###########################
library(tidyverse)
library(corrplot)
library(polycor)
library(glm2)
library(pscl)
library(boot)
library(VGAM)
# Para simulaciones
set.seed(27)
# Tamaño de muestra
n_muestra <- dim(CData_CDMX2[1])[1]
# Cargamos modelo ganador
mod_dist_4 <- zeroinfl(Vic_Rob_As ~ Seg_Mun + Region |
Edad + Mas_Pat_Vil + Region + Sit_Lab,
data= CData_CDMX2, dist="poisson",link="logit")
summary(mod_dist_4)
# Cargamos los valores estimados para p y lambda
p <- predict(mod_dist_4, type = "zero")
lambda <- predict(mod_dist_4, type = "count")
# Simulamos
sim <- rzipois(n_muestra, lambda = lambda, pstr0 = p)
conf <- table(sim,CData_CDMX2$Vic_Rob_As)
sum(diag(conf))/n_muestra
######### Funcion Multiples simulaciones ###########################
sim_conf_mat_zeroinfl <- function(mod, res=CData_CDMX2$Vic_Rob_As, n=100,
muest.size=n_muestra){
# Cargamos los valores estimados para p y lambda
p <- predict(mod, type = "zero") # Proba de 0
lambda <- predict(mod, type = "count") # Esperanza de la cuenta
aux <- rep(0, times = n) # Vector que almacenara la prop de exito
sim <- array(NA, dim = c(n,muest.size))
# Ciclo para simular
for (j in 1:muest.size) {
sim[,j] <- rzipois(n, lambda = lambda[j], pstr0 = p[j])
# Simulaciones
conf <- table(round(colMeans(sim)), res) # Matriz de confusion
aux[i] <- sum(diag(conf))/muest.size # Guardamos la proporcion de exito
}
return(mean(sim)) # Proporcion de exito promedio
}
######### ESTA ES LA CHIDA Funcion Multiples simulaciones SEGUNDA versión######
sim_conf_mat_zeroinfl2 <- function(mod, res=CData_CDMX2$Vic_Rob_As, n=100,
muest.size=n_muestra){
# Cargamos los valores estimados para p y lambda
p <- predict(mod, type = "zero") # Proba de 0
lambda <- predict(mod, type = "count") # Esperanza de la cuenta
aux <- rep(0, times = n) # Vector que almacenara la prop de exito
sim <- array(NA, dim = c(muest.size))
# Ciclo para simular
for (i in 1:n) {
for (j in 1:muest.size) {
sim[j] <- rzipois(1, lambda = lambda[j], pstr0 = p[j])
}
# Simulaciones
conf <- table(sim, res) # Matriz de confusion
aux[i] <- sum(diag(conf))/muest.size # Guardamos la proporcion de exito
}
return(mean(aux)) # Proporcion de exito promedio
}
######### Funcion Multiples simulaciones MEDIA########################
sim_media_zeroinfl <- function(mod, muest.size=n_muestra){
# Cargamos los valores estimados para p y lambda
p <- predict(mod, type = "zero") # Proba de 0
lambda <- predict(mod, type = "count") # Esperanza de la cuenta
sim <- array(NA, dim = c(muest.size))
# Ciclo para simular
for (j in 1:muest.size) {
sim[j] <- rzipois(1, lambda = lambda[j], pstr0 = p[j])
}
return(mean(sim)) # Media de la simulacion
}
######### Funcion Multiples simulaciones REGPOISSON ##############
sim_conf_mat_poi <- function(mod, res=CData_CDMX2$Vic_Rob_As, n=100,
muest.size=n_muestra){
####### Simulacion Reg Poisson
mu <- exp(predict(mod_PoiSin7))
sim_Poi <- rep(0, times = N)
for(i in 1:muest.size){
sim_Poi[i] <- rpois(1, lambda = mu[i])
}
aux <- rep(0, times = n) # Vector que almacenara la prop de exito
sim <- array(NA, dim = c(muest.size))
# Ciclo para simular
for (i in 1:n) {
for (j in 1:muest.size) {
sim[j] <- rzipois(1, lambda = lambda[j], pstr0 = p[j])
}
# Simulaciones
conf <- table(sim, res) # Matriz de confusion
aux[i] <- sum(diag(conf))/muest.size # Guardamos la proporcion de exito
}
return(mean(aux)) # Proporcion de exito promedio
}
######### Funcion Multiples simulaciones BAYES ##############
sim_conf_mat_bayes <- function(pred, res=CData_CDMX2$Vic_Rob_As, n=100,
muest.size=n_muestra){
aux <- rep(0, times = n) # Vector que almacenara la prop de exito
sim_aux <- array(NA, dim = c(muest.size))
# Ciclo para simular
for (i in 1:n) {
for (j in 1:muest.size) {
sim_aux[j] <- rzipois(1, lambda = pred$lambda[j], pstr0 = pred$p[j])
}
# Simulaciones
conf <- table(sim_aux, res) # Matriz de confusion
aux[i] <- sum(diag(conf))/muest.size # Guardamos la proporcion de exito
}
return(mean(aux)) # Proporcion de exito promedio
}
######### Funcion Multiples simulaciones BAYES MEDIA ########################
sim_media_bayes <- function(pred, muest.size=n_muestra){
sim <- array(NA, dim = c(muest.size))
# Ciclo para simular
for (j in 1:muest.size) {
sim[j] <- rzipois(1, lambda = pred$lambda[j], pstr0 = pred$p[j])
}
return(mean(sim)) # Media de la simulacion
}
|
# Exercise-1: practice with basic syntax
# Create a variable `hometown` that stores the city in which you were born
# Assign your name to the variable `my.name`
# Assign your height to a variable `my.height`
# Create a variable `puppies` equal to the number of puppies you'd like to have
# Create a variable `puppy.price`, which is how expensive you think a puppy is
# Create a variable `total.cost` that has the total cost of all of your puppies
# Create a boolean variable `too.expensive`, set to true if the cost is greater than $1,000
# Create a variable `max.puppies`, which is the nuber of puppies you can afford for $1K.
my.name <- "Anushna"
my.height <- 5.6
puppies <- 20
puppy.price <- 5000
total.cost <- puppies * puppy.price
too.expensive <- total.cost > 1000
max.puppies <- 1000 / puppy.price
| /exercise-1/exercise.R | permissive | anushnap/m5-r-intro | R | false | false | 820 | r | # Exercise-1: practice with basic syntax
# Create a variable `hometown` that stores the city in which you were born
# Assign your name to the variable `my.name`
# Assign your height to a variable `my.height`
# Create a variable `puppies` equal to the number of puppies you'd like to have
# Create a variable `puppy.price`, which is how expensive you think a puppy is
# Create a variable `total.cost` that has the total cost of all of your puppies
# Create a boolean variable `too.expensive`, set to true if the cost is greater than $1,000
# Create a variable `max.puppies`, which is the nuber of puppies you can afford for $1K.
my.name <- "Anushna"
my.height <- 5.6
puppies <- 20
puppy.price <- 5000
total.cost <- puppies * puppy.price
too.expensive <- total.cost > 1000
max.puppies <- 1000 / puppy.price
|
testlist <- list(g = 0L, ws = NULL, xs = NULL, val = numeric(0), my_ws = numeric(0), my_xs = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(MHMM:::oneEMgammaCPP,testlist)
str(result) | /MHMM/inst/testfiles/oneEMgammaCPP/AFL_oneEMgammaCPP/oneEMgammaCPP_valgrind_files/1616007579-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 209 | r | testlist <- list(g = 0L, ws = NULL, xs = NULL, val = numeric(0), my_ws = numeric(0), my_xs = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(MHMM:::oneEMgammaCPP,testlist)
str(result) |
# =====================================================================
# This is the program to find a configuration value.
# Created by Dr. Jaeho H. BAE
# Assistant Professor
# Dept. of Logistics & Distribution Mgmt. at Hyechon College
# May, 2013.
# chillangri@gmail.com
# =====================================================================
readCFG <- function(valOpt) {
Opt_CFG <- "./source_code/option.cfg"
CFG <- as.data.frame(read.csv(Opt_CFG, header = TRUE, sep = ",", quote="\"", dec=".", comment.char="#"))
valResult <- as.data.frame(subset(CFG, Name==valOpt, select=c(Option_Value)))
valResult <- as.character(valResult$Option_Value)
return(gsub(" ", "", valResult))
} | /source_code/readCFG.r | no_license | Chillangri/AHP_Simul | R | false | false | 912 | r | # =====================================================================
# This is the program to find a configuration value.
# Created by Dr. Jaeho H. BAE
# Assistant Professor
# Dept. of Logistics & Distribution Mgmt. at Hyechon College
# May, 2013.
# chillangri@gmail.com
# =====================================================================
readCFG <- function(valOpt) {
Opt_CFG <- "./source_code/option.cfg"
CFG <- as.data.frame(read.csv(Opt_CFG, header = TRUE, sep = ",", quote="\"", dec=".", comment.char="#"))
valResult <- as.data.frame(subset(CFG, Name==valOpt, select=c(Option_Value)))
valResult <- as.character(valResult$Option_Value)
return(gsub(" ", "", valResult))
} |
#################### LET OP: Dit script bevat andere mis500 variabelen!! Zie meeting Mariska 14-06-2021
# MainSimulationScriptSLURM (following the code of the manual)
args <- commandArgs(TRUE) #SLURM command
args <- as.numeric(args)
RowOfDesign <- args[1]
Replication <- args[2]
############################# Simulation Design #############################
factors <- c(2,4,6,8) #number of latent variables
nobs <- c(200,400,800) #sample size
##Create the simulation design matrix (full factorial)
Design_mixed <- expand.grid(factors = factors, nobs = nobs)
#load packages
library(lavaan)
library(usethis)
# load functions
source("MIXED_all_functions_script.R")
################################ Simulation start (1 cell) ##########################
# initialize values
tmp <- proc.time()
nvarp <- 6
fact <- Design_mixed[RowOfDesign,1]
nvar <- nvarp*fact
input<- cbind(1, 0)
mis_W_withoutC <- as.matrix(input)
mis_W_withC <- as.matrix(input)
mis_P_withoutC <- as.matrix(input)
mis_P_withC <- as.matrix(input)
# Generate data
# set a random number seed to be able to replicate the result exactly
set.seed((Replication + 1000)*RowOfDesign)
SimDat <- do.call(MyDataGeneration, Design_mixed[RowOfDesign,] )
#### WLS - Model without specified cross-loadings (with silent check) ####
fit_WLS_withoutC <- try(cfa( model <- model_withoutC(fact),
data=SimDat, std.lv=TRUE,
ordered = colnames(SimDat[indexes_ord(fact)]),
estimator="WLSMV"),silent=TRUE)
if(inherits(fit_WLS_withoutC, "try-error")) {
mis_W_withoutC[1,2] <- 1
MyResult_WLS_withoutC_est <- matrix(NA,
nrow = 1,
ncol = length(ColnamesGeneratorEst("WLS",
"withoutC",
fact)))
MyResult_WLS_withoutC_err <- matrix(NA,
nrow = 1,
ncol = length(ColnamesGeneratorSE("WLS",
"withoutC",
fact)))
MyResult_WLS_withoutC_FI<- matrix(NA, nrow = 1, ncol = 5)
} else {
# parameter estimates
index <- which(fit_WLS_withoutC@ParTable$free != 0)
MyAnalysisResult_WLS_withoutC_est <- fit_WLS_withoutC@ParTable$est[index]
MyResult_WLS_withoutC_est <- matrix(MyAnalysisResult_WLS_withoutC_est,
nrow = 1,
ncol = length(ColnamesGeneratorEst("WLS",
"withoutC",
fact)))
colnames(MyResult_WLS_withoutC_est) <- ColnamesGeneratorEst("WLS", "withoutC", fact)
# standard errors
MyAnalysisResult_WLS_withoutC_err <- fit_WLS_withoutC@ParTable$se[index]
MyResult_WLS_withoutC_err <- matrix(MyAnalysisResult_WLS_withoutC_err,
nrow = 1,
ncol = length(ColnamesGeneratorSE("WLS",
"withoutC",
fact)))
colnames(MyResult_WLS_withoutC_err) <- ColnamesGeneratorSE("WLS",
"withoutC",
fact)
### FITINDICES
FI_WLS_withoutC <- fitMeasures(fit_WLS_withoutC,
c("chisq","df",
"pvalue", "cfi",
"srmr"))
MyResult_WLS_withoutC_FI<- matrix(FI_WLS_withoutC, nrow = 1, ncol = 5)
colnames(MyResult_WLS_withoutC_FI) <- c("chisq","df",
"pvalue", "cfi",
"srmr")
}
#### WLS - Model with specified cross-loadings (with silent check) ####
fit_WLS_withC <- try(cfa( model <- model_withC(fact),
data=SimDat, std.lv=TRUE,
ordered = colnames(SimDat[indexes_ord(fact)]),
estimator="WLSMV"),silent=TRUE)
if(inherits(fit_WLS_withC, "try-error")) {
mis_W_withC[1,2] <- 1
MyResult_WLS_withC_est <- matrix(NA,
nrow = 1,
ncol = length(ColnamesGeneratorEst("WLS",
"withC",
fact)))
MyResult_WLS_withC_err <- matrix(NA,
nrow = 1,
ncol = length(ColnamesGeneratorSE("WLS",
"withC",
fact)))
MyResult_WLS_withC_FI<- matrix(NA, nrow = 1, ncol = 5)
} else {
# parameter estimates
index <- which(fit_WLS_withC@ParTable$free != 0)
MyAnalysisResult_WLS_withC_est <- fit_WLS_withC@ParTable$est[index]
MyResult_WLS_withC_est <- matrix(MyAnalysisResult_WLS_withC_est,
nrow = 1,
ncol = length(ColnamesGeneratorEst("WLS",
"withC",
fact)))
colnames(MyResult_WLS_withC_est) <- ColnamesGeneratorEst("WLS", "withC", fact)
# standard errors
MyAnalysisResult_WLS_withC_err <- fit_WLS_withC@ParTable$se[index]
MyResult_WLS_withC_err <- matrix(MyAnalysisResult_WLS_withC_err,
nrow = 1,
ncol = length(ColnamesGeneratorSE("WLS",
"withC",
fact)))
colnames(MyResult_WLS_withC_err) <- ColnamesGeneratorSE("WLS", "withC", fact)
### FITINDICES
FI_WLS_withC <- fitMeasures(fit_WLS_withC, c("chisq","df",
"pvalue", "cfi",
"srmr"))
MyResult_WLS_withC_FI <- matrix(FI_WLS_withC, nrow = 1, ncol = 5)
colnames(MyResult_WLS_withC_FI) <- c("chisq","df",
"pvalue", "cfi",
"srmr")
}
#### PML - Model without specified cross-loadings (with silent check) ####
fit_PML_withoutC <- try(cfa( model <- model_withoutC(fact),
data=SimDat, std.lv=TRUE,
ordered = colnames(SimDat[indexes_ord(fact)]),
estimator="PML"),silent=TRUE)
if(inherits(fit_PML_withoutC, "try-error")) {
mis_P_withoutC[1,2] <- 1
MyResult_PML_withoutC_est <- matrix(NA,
nrow = 1,
ncol = length(ColnamesGeneratorEst("PML",
"withoutC",
fact)))
MyResult_PML_withoutC_err <- matrix(NA,
nrow = 1,
ncol = length(ColnamesGeneratorSE("PML",
"withoutC",
fact)))
MyResult_PML_withoutC_FI<- matrix(NA, nrow = 1, ncol = 5)
} else {
# parameter estimates
index <- which(fit_PML_withoutC@ParTable$free != 0)
MyAnalysisResult_PML_withoutC_est <- fit_PML_withoutC@ParTable$est[index]
MyResult_PML_withoutC_est <- matrix(MyAnalysisResult_PML_withoutC_est,
nrow = 1,
ncol = length(ColnamesGeneratorEst("PML",
"withoutC",
fact)))
colnames(MyResult_PML_withoutC_est) <- ColnamesGeneratorEst("PML", "withoutC", fact)
# standard errors
MyAnalysisResult_PML_withoutC_err <- fit_PML_withoutC@ParTable$se[index]
MyResult_PML_withoutC_err <- matrix(MyAnalysisResult_PML_withoutC_err,
nrow = 1,
ncol = length(ColnamesGeneratorSE("PML",
"withoutC",
fact)))
colnames(MyResult_PML_withoutC_err) <- ColnamesGeneratorSE("PML", "withoutC", fact)
### FITINDICES
FI_PML_withoutC <- fitMeasures(fit_PML_withoutC,
c("chisq.scaled","df.scaled",
"pvalue.scaled", "cfi.scaled",
"srmr"))
MyResult_PML_withoutC_FI<- matrix(FI_PML_withoutC, nrow = 1, ncol = 5)
colnames(MyResult_PML_withoutC_FI) <- c("chisq.scaled", "df.scaled",
"pvalue.scaled", "cfi.scaled",
"srmr")
}
#### PML - Model with specified cross-loadings (with silent check) ####
fit_PML_withC <- try(cfa( model <- model_withC(fact),
data=SimDat, std.lv=TRUE,
ordered = colnames(SimDat[indexes_ord(fact)]),
estimator="PML"),silent=TRUE)
if(inherits(fit_PML_withC, "try-error")) {
mis_P_withC[1,2] <- 1
MyResult_PML_withC_est <- matrix(MyAnalysisResult_PML_withC_est,
nrow = 1,
ncol = length(ColnamesGeneratorEst("PML",
"withC",
fact)))
MyResult_PML_withC_err <- matrix(MyAnalysisResult_PML_withC_err,
nrow = 1,
ncol = length(ColnamesGeneratorSE("PML",
"withC",
fact)))
MyResult_PML_withC_FI <- matrix(FI_PML_withC, nrow = 1, ncol = 5)
} else {
# parameter estimates
index <- which(fit_PML_withC@ParTable$free != 0)
MyAnalysisResult_PML_withC_est <- fit_PML_withC@ParTable$est[index]
MyResult_PML_withC_est <- matrix(MyAnalysisResult_PML_withC_est,
nrow = 1,
ncol = length(ColnamesGeneratorEst("PML",
"withC",
fact)))
colnames(MyResult_PML_withC_est) <- ColnamesGeneratorEst("PML", "withC", fact)
# standard errors
MyAnalysisResult_PML_withC_err <- fit_PML_withC@ParTable$se[index]
MyResult_PML_withC_err <- matrix(MyAnalysisResult_PML_withC_err,
nrow = 1,
ncol = length(ColnamesGeneratorSE("PML",
"withC",
fact)))
colnames(MyResult_PML_withC_err) <- ColnamesGeneratorSE("PML", "withC", fact)
### FITINDICES
FI_PML_withC <- fitMeasures(fit_PML_withC, c("chisq.scaled","df.scaled",
"pvalue.scaled", "cfi.scaled",
"srmr"))
MyResult_PML_withC_FI <- matrix(FI_PML_withC, nrow = 1, ncol = 5)
colnames(MyResult_PML_withC_FI) <- c("chisq.scaled", "df.scaled",
"pvalue.scaled", "cfi.scaled",
"srmr")
}
################################ Simulation all cells ###############################
setwd("/exports/fsw/mmcstorm/Analysis/finalmixedrep151to175")
# Write output of one cell of the design
# Save results
write.csv(MyResult_WLS_withoutC_est,
file = paste("WLS_withoutC_est", "Row", RowOfDesign, "Rep", Replication, ".csv" , sep =""))
write.csv(MyResult_WLS_withoutC_err,
file = paste("WLS_withoutC_err", "Row", RowOfDesign, "Rep", Replication, ".csv" , sep =""))
write.csv(MyResult_WLS_withoutC_FI,
file =paste("WLS_FI_withoutC", "Row", RowOfDesign,"Rep", Replication, ".csv" , sep =""))
write.csv(MyResult_WLS_withC_est,
file = paste("WLS_withC_est", "Row", RowOfDesign, "Rep", Replication, ".csv" , sep =""))
write.csv(MyResult_WLS_withC_err,
file = paste("WLS_withC_err", "Row", RowOfDesign,"Rep", Replication, ".csv" , sep =""))
write.csv(MyResult_WLS_withC_FI,
file =paste("WLS_FI_withC", "Row", RowOfDesign, "Rep", Replication, ".csv" , sep =""))
write.csv(MyResult_PML_withoutC_est,
file = paste("PML_withoutC_est", "Row", RowOfDesign ,"Rep", Replication, ".csv" , sep =""))
write.csv(MyResult_PML_withoutC_err,
file = paste("PML_withoutC_err", "Row", RowOfDesign ,"Rep", Replication, ".csv" , sep =""))
write.csv(MyResult_PML_withoutC_FI,
file =paste("PML_FI_withoutC", "Row", RowOfDesign,"Rep", Replication, ".csv" , sep =""))
write.csv(MyResult_PML_withC_est,
file = paste("PML_withC_est", "Row", RowOfDesign,"Rep", Replication, ".csv" , sep =""))
write.csv(MyResult_PML_withC_err,
file = paste("PML_withC_err", "Row", RowOfDesign,"Rep", Replication, ".csv" , sep =""))
write.csv(MyResult_PML_withC_FI,
file =paste("PML_FI_withC", "Row", RowOfDesign, "Rep", Replication, ".csv" , sep =""))
#save the time to run the analyses of K data sets in one cell of the design.
#time <- proc.time() - tmp
#save(time, file =paste("Time", "Row", RowOfDesign, "Rep", Replication, ".csv" , sep =""))
# see whether all replications are ok
setwd("/exports/fsw/mmcstorm/Simdata/mixed/silent_check")
write.csv(mis_W_withoutC,
file = paste("mis_W_withoutC", "Row", RowOfDesign, "Rep", Replication, ".csv" , sep =""))
write.csv(mis_W_withC,
file = paste("mis_W_withC", "Row", RowOfDesign, "Rep", Replication, ".csv" , sep =""))
write.csv(mis_P_withoutC,
file = paste("mis_P_withoutC", "Row", RowOfDesign, "Rep", Replication, ".csv" , sep =""))
write.csv(mis_P_withC,
file = paste("mis_P_withC", "Row", RowOfDesign, "Rep", Replication, ".csv" , sep =""))
setwd("/exports/fsw/mmcstorm/Simdata/mixed")
# save data
write.csv(SimDat,
file = paste("Simulated_Data", "Row", RowOfDesign, "Rep", Replication, ".csv" , sep ="")) | /Simulation scripts (study 2)/MIXED_MainSimulationSLURM_new.R | no_license | mmcstorm/PML | R | false | false | 15,141 | r | #################### LET OP: Dit script bevat andere mis500 variabelen!! Zie meeting Mariska 14-06-2021
# MainSimulationScriptSLURM (following the code of the manual)
args <- commandArgs(TRUE) #SLURM command
args <- as.numeric(args)
RowOfDesign <- args[1]
Replication <- args[2]
############################# Simulation Design #############################
factors <- c(2,4,6,8) #number of latent variables
nobs <- c(200,400,800) #sample size
##Create the simulation design matrix (full factorial)
Design_mixed <- expand.grid(factors = factors, nobs = nobs)
#load packages
library(lavaan)
library(usethis)
# load functions
source("MIXED_all_functions_script.R")
################################ Simulation start (1 cell) ##########################
# initialize values
tmp <- proc.time()
nvarp <- 6
fact <- Design_mixed[RowOfDesign,1]
nvar <- nvarp*fact
input<- cbind(1, 0)
mis_W_withoutC <- as.matrix(input)
mis_W_withC <- as.matrix(input)
mis_P_withoutC <- as.matrix(input)
mis_P_withC <- as.matrix(input)
# Generate data
# set a random number seed to be able to replicate the result exactly
set.seed((Replication + 1000)*RowOfDesign)
SimDat <- do.call(MyDataGeneration, Design_mixed[RowOfDesign,] )
#### WLS - Model without specified cross-loadings (with silent check) ####
fit_WLS_withoutC <- try(cfa( model <- model_withoutC(fact),
data=SimDat, std.lv=TRUE,
ordered = colnames(SimDat[indexes_ord(fact)]),
estimator="WLSMV"),silent=TRUE)
if(inherits(fit_WLS_withoutC, "try-error")) {
mis_W_withoutC[1,2] <- 1
MyResult_WLS_withoutC_est <- matrix(NA,
nrow = 1,
ncol = length(ColnamesGeneratorEst("WLS",
"withoutC",
fact)))
MyResult_WLS_withoutC_err <- matrix(NA,
nrow = 1,
ncol = length(ColnamesGeneratorSE("WLS",
"withoutC",
fact)))
MyResult_WLS_withoutC_FI<- matrix(NA, nrow = 1, ncol = 5)
} else {
# parameter estimates
index <- which(fit_WLS_withoutC@ParTable$free != 0)
MyAnalysisResult_WLS_withoutC_est <- fit_WLS_withoutC@ParTable$est[index]
MyResult_WLS_withoutC_est <- matrix(MyAnalysisResult_WLS_withoutC_est,
nrow = 1,
ncol = length(ColnamesGeneratorEst("WLS",
"withoutC",
fact)))
colnames(MyResult_WLS_withoutC_est) <- ColnamesGeneratorEst("WLS", "withoutC", fact)
# standard errors
MyAnalysisResult_WLS_withoutC_err <- fit_WLS_withoutC@ParTable$se[index]
MyResult_WLS_withoutC_err <- matrix(MyAnalysisResult_WLS_withoutC_err,
nrow = 1,
ncol = length(ColnamesGeneratorSE("WLS",
"withoutC",
fact)))
colnames(MyResult_WLS_withoutC_err) <- ColnamesGeneratorSE("WLS",
"withoutC",
fact)
### FITINDICES
FI_WLS_withoutC <- fitMeasures(fit_WLS_withoutC,
c("chisq","df",
"pvalue", "cfi",
"srmr"))
MyResult_WLS_withoutC_FI<- matrix(FI_WLS_withoutC, nrow = 1, ncol = 5)
colnames(MyResult_WLS_withoutC_FI) <- c("chisq","df",
"pvalue", "cfi",
"srmr")
}
#### WLS - Model with specified cross-loadings (with silent check) ####
fit_WLS_withC <- try(cfa( model <- model_withC(fact),
data=SimDat, std.lv=TRUE,
ordered = colnames(SimDat[indexes_ord(fact)]),
estimator="WLSMV"),silent=TRUE)
if(inherits(fit_WLS_withC, "try-error")) {
mis_W_withC[1,2] <- 1
MyResult_WLS_withC_est <- matrix(NA,
nrow = 1,
ncol = length(ColnamesGeneratorEst("WLS",
"withC",
fact)))
MyResult_WLS_withC_err <- matrix(NA,
nrow = 1,
ncol = length(ColnamesGeneratorSE("WLS",
"withC",
fact)))
MyResult_WLS_withC_FI<- matrix(NA, nrow = 1, ncol = 5)
} else {
# parameter estimates
index <- which(fit_WLS_withC@ParTable$free != 0)
MyAnalysisResult_WLS_withC_est <- fit_WLS_withC@ParTable$est[index]
MyResult_WLS_withC_est <- matrix(MyAnalysisResult_WLS_withC_est,
nrow = 1,
ncol = length(ColnamesGeneratorEst("WLS",
"withC",
fact)))
colnames(MyResult_WLS_withC_est) <- ColnamesGeneratorEst("WLS", "withC", fact)
# standard errors
MyAnalysisResult_WLS_withC_err <- fit_WLS_withC@ParTable$se[index]
MyResult_WLS_withC_err <- matrix(MyAnalysisResult_WLS_withC_err,
nrow = 1,
ncol = length(ColnamesGeneratorSE("WLS",
"withC",
fact)))
colnames(MyResult_WLS_withC_err) <- ColnamesGeneratorSE("WLS", "withC", fact)
### FITINDICES
FI_WLS_withC <- fitMeasures(fit_WLS_withC, c("chisq","df",
"pvalue", "cfi",
"srmr"))
MyResult_WLS_withC_FI <- matrix(FI_WLS_withC, nrow = 1, ncol = 5)
colnames(MyResult_WLS_withC_FI) <- c("chisq","df",
"pvalue", "cfi",
"srmr")
}
#### PML - Model without specified cross-loadings (with silent check) ####
fit_PML_withoutC <- try(cfa( model <- model_withoutC(fact),
data=SimDat, std.lv=TRUE,
ordered = colnames(SimDat[indexes_ord(fact)]),
estimator="PML"),silent=TRUE)
if(inherits(fit_PML_withoutC, "try-error")) {
mis_P_withoutC[1,2] <- 1
MyResult_PML_withoutC_est <- matrix(NA,
nrow = 1,
ncol = length(ColnamesGeneratorEst("PML",
"withoutC",
fact)))
MyResult_PML_withoutC_err <- matrix(NA,
nrow = 1,
ncol = length(ColnamesGeneratorSE("PML",
"withoutC",
fact)))
MyResult_PML_withoutC_FI<- matrix(NA, nrow = 1, ncol = 5)
} else {
# parameter estimates
index <- which(fit_PML_withoutC@ParTable$free != 0)
MyAnalysisResult_PML_withoutC_est <- fit_PML_withoutC@ParTable$est[index]
MyResult_PML_withoutC_est <- matrix(MyAnalysisResult_PML_withoutC_est,
nrow = 1,
ncol = length(ColnamesGeneratorEst("PML",
"withoutC",
fact)))
colnames(MyResult_PML_withoutC_est) <- ColnamesGeneratorEst("PML", "withoutC", fact)
# standard errors
MyAnalysisResult_PML_withoutC_err <- fit_PML_withoutC@ParTable$se[index]
MyResult_PML_withoutC_err <- matrix(MyAnalysisResult_PML_withoutC_err,
nrow = 1,
ncol = length(ColnamesGeneratorSE("PML",
"withoutC",
fact)))
colnames(MyResult_PML_withoutC_err) <- ColnamesGeneratorSE("PML", "withoutC", fact)
### FITINDICES
FI_PML_withoutC <- fitMeasures(fit_PML_withoutC,
c("chisq.scaled","df.scaled",
"pvalue.scaled", "cfi.scaled",
"srmr"))
MyResult_PML_withoutC_FI<- matrix(FI_PML_withoutC, nrow = 1, ncol = 5)
colnames(MyResult_PML_withoutC_FI) <- c("chisq.scaled", "df.scaled",
"pvalue.scaled", "cfi.scaled",
"srmr")
}
#### PML - Model with specified cross-loadings (with silent check) ####
fit_PML_withC <- try(cfa( model <- model_withC(fact),
data=SimDat, std.lv=TRUE,
ordered = colnames(SimDat[indexes_ord(fact)]),
estimator="PML"),silent=TRUE)
if(inherits(fit_PML_withC, "try-error")) {
mis_P_withC[1,2] <- 1
MyResult_PML_withC_est <- matrix(MyAnalysisResult_PML_withC_est,
nrow = 1,
ncol = length(ColnamesGeneratorEst("PML",
"withC",
fact)))
MyResult_PML_withC_err <- matrix(MyAnalysisResult_PML_withC_err,
nrow = 1,
ncol = length(ColnamesGeneratorSE("PML",
"withC",
fact)))
MyResult_PML_withC_FI <- matrix(FI_PML_withC, nrow = 1, ncol = 5)
} else {
# parameter estimates
index <- which(fit_PML_withC@ParTable$free != 0)
MyAnalysisResult_PML_withC_est <- fit_PML_withC@ParTable$est[index]
MyResult_PML_withC_est <- matrix(MyAnalysisResult_PML_withC_est,
nrow = 1,
ncol = length(ColnamesGeneratorEst("PML",
"withC",
fact)))
colnames(MyResult_PML_withC_est) <- ColnamesGeneratorEst("PML", "withC", fact)
# standard errors
MyAnalysisResult_PML_withC_err <- fit_PML_withC@ParTable$se[index]
MyResult_PML_withC_err <- matrix(MyAnalysisResult_PML_withC_err,
nrow = 1,
ncol = length(ColnamesGeneratorSE("PML",
"withC",
fact)))
colnames(MyResult_PML_withC_err) <- ColnamesGeneratorSE("PML", "withC", fact)
### FITINDICES
FI_PML_withC <- fitMeasures(fit_PML_withC, c("chisq.scaled","df.scaled",
"pvalue.scaled", "cfi.scaled",
"srmr"))
MyResult_PML_withC_FI <- matrix(FI_PML_withC, nrow = 1, ncol = 5)
colnames(MyResult_PML_withC_FI) <- c("chisq.scaled", "df.scaled",
"pvalue.scaled", "cfi.scaled",
"srmr")
}
################################ Simulation all cells ###############################
setwd("/exports/fsw/mmcstorm/Analysis/finalmixedrep151to175")
# Write output of one cell of the design
# Save results
write.csv(MyResult_WLS_withoutC_est,
file = paste("WLS_withoutC_est", "Row", RowOfDesign, "Rep", Replication, ".csv" , sep =""))
write.csv(MyResult_WLS_withoutC_err,
file = paste("WLS_withoutC_err", "Row", RowOfDesign, "Rep", Replication, ".csv" , sep =""))
write.csv(MyResult_WLS_withoutC_FI,
file =paste("WLS_FI_withoutC", "Row", RowOfDesign,"Rep", Replication, ".csv" , sep =""))
write.csv(MyResult_WLS_withC_est,
file = paste("WLS_withC_est", "Row", RowOfDesign, "Rep", Replication, ".csv" , sep =""))
write.csv(MyResult_WLS_withC_err,
file = paste("WLS_withC_err", "Row", RowOfDesign,"Rep", Replication, ".csv" , sep =""))
write.csv(MyResult_WLS_withC_FI,
file =paste("WLS_FI_withC", "Row", RowOfDesign, "Rep", Replication, ".csv" , sep =""))
write.csv(MyResult_PML_withoutC_est,
file = paste("PML_withoutC_est", "Row", RowOfDesign ,"Rep", Replication, ".csv" , sep =""))
write.csv(MyResult_PML_withoutC_err,
file = paste("PML_withoutC_err", "Row", RowOfDesign ,"Rep", Replication, ".csv" , sep =""))
write.csv(MyResult_PML_withoutC_FI,
file =paste("PML_FI_withoutC", "Row", RowOfDesign,"Rep", Replication, ".csv" , sep =""))
write.csv(MyResult_PML_withC_est,
file = paste("PML_withC_est", "Row", RowOfDesign,"Rep", Replication, ".csv" , sep =""))
write.csv(MyResult_PML_withC_err,
file = paste("PML_withC_err", "Row", RowOfDesign,"Rep", Replication, ".csv" , sep =""))
write.csv(MyResult_PML_withC_FI,
file =paste("PML_FI_withC", "Row", RowOfDesign, "Rep", Replication, ".csv" , sep =""))
#save the time to run the analyses of K data sets in one cell of the design.
#time <- proc.time() - tmp
#save(time, file =paste("Time", "Row", RowOfDesign, "Rep", Replication, ".csv" , sep =""))
# see whether all replications are ok
setwd("/exports/fsw/mmcstorm/Simdata/mixed/silent_check")
write.csv(mis_W_withoutC,
file = paste("mis_W_withoutC", "Row", RowOfDesign, "Rep", Replication, ".csv" , sep =""))
write.csv(mis_W_withC,
file = paste("mis_W_withC", "Row", RowOfDesign, "Rep", Replication, ".csv" , sep =""))
write.csv(mis_P_withoutC,
file = paste("mis_P_withoutC", "Row", RowOfDesign, "Rep", Replication, ".csv" , sep =""))
write.csv(mis_P_withC,
file = paste("mis_P_withC", "Row", RowOfDesign, "Rep", Replication, ".csv" , sep =""))
setwd("/exports/fsw/mmcstorm/Simdata/mixed")
# save data
write.csv(SimDat,
file = paste("Simulated_Data", "Row", RowOfDesign, "Rep", Replication, ".csv" , sep ="")) |
###########################################
###########################################
##
## Auto downloading hit tracker data
##
###########################################
###########################################
#=========================#
#### Loading packages ####
#=========================#
library(QuantPsyc)
library(readr)
library(rvest)
library(tidyr)
library(stringr)
library(pitchRx)
library(lubridate)
library(stringdist)
library(dplyr)
library(tibble)
#=====================#
#### Loading Data ####
#=====================#
setwd("~/BASP/R analyses/Baseball Data/Data Files")
# hittracker_all <- read_rds("hittracker_all_distinct.RDS")
hittracker_all <- read_rds("hittracker_updated_distinct.RDS")
cat("-----\n")
paste("Searching results from: ", today() - 1, ", back to: ", max(hittracker_all$game_date), sep = "") %>% noquote() %>% print()
cat("-----\n")
#=========================#
#### Downloading Data ####
#=========================#
ht_session <-
html_session(
"http://www.hittrackeronline.com/index.php",
httr::set_cookies(season = 2017, perpage = 10000)
)
hittracker_update_raw <-
ht_session %>%
html_nodes("table") %>%
magrittr::extract2(17) %>%
html_table(header = FALSE) %>%
slice(3:nrow(.)) %>%
magrittr::set_colnames(.[1,]) %>%
slice(2:(nrow(.)-2)) %>%
as_data_frame() %>%
transmute(
game_date = parse_date(Date, format = "%D"),
batter_name = parse_character(Hitter),
batter_team = parse_character(HitterTeam),
pitcher_name = parse_character(Pitcher),
pitcher_team = parse_character(PitcherTeam),
inning = parse_number(INN),
ballpark = parse_character(Ballpark),
type_luck = parse_character(`Type/Luck`),
hit_distance_ht = parse_number(TrueDist.),
launch_speed_ht = parse_number(SpeedOffBat),
launch_angle_ht = parse_number(Elev.Angle),
horiz_angle = parse_number(Horiz.Angle),
apex = parse_number(Apex),
num_parks = parse_number(`#Parks`),
game_year = game_date %>% year(),
game_type = "Regular Season"
)
# hittracker_update_raw %>% glimpse()
# hittracker_update_raw %>% slice(1) %>% glimpse()
# hittracker_update_raw %>% slice(nrow(.)) %>% glimpse()
#=====================#
#### Cleaning Data ####
#=====================#
hittracker_update <-
hittracker_update_raw %>%
## Modifying names
mutate(batter_name_ch = as.character(batter_name)) %>%
mutate(
batter_name_first = str_split(
batter_name_ch,
pattern = ", ",
n = 2,
simplify = TRUE) %>% unlist() %>% .[,2],
batter_name_last = str_split(
batter_name_ch,
pattern = ", ",
n = 2,
simplify = TRUE) %>% unlist() %>% .[,1]
) %>%
select(-batter_name_ch) %>%
mutate(batter_name_last = str_replace_all(batter_name_last, " Jr.", "")) %>%
mutate(batter_name_last = str_replace_all(batter_name_last, "\\ ", "")) %>%
mutate(batter_name_last = str_replace_all(batter_name_last, "\\.", "")) %>%
mutate(batter_name_last = str_to_lower(string = batter_name_last)) %>%
mutate(batter_name_first = str_replace_all(batter_name_first, "\\ ", "")) %>%
mutate(batter_name_first = str_replace_all(batter_name_first, "\\.", "")) %>%
mutate(batter_name_first = str_to_lower(string = batter_name_first)) %>%
mutate(batter_name_c = str_c(batter_name_last, batter_name_first, sep = ",")) %>%
mutate(batter_name_c = str_to_lower(string = batter_name_c)) %>%
mutate(pitcher_name_ch = as.character(pitcher_name)) %>%
mutate(
pitcher_name_first = str_split(
pitcher_name_ch,
pattern = ", ",
n = 2,
simplify = TRUE) %>% unlist() %>% .[,2],
pitcher_name_last = str_split(
pitcher_name_ch,
pattern = ", ",
n = 2,
simplify = TRUE) %>% unlist() %>% .[,1]
) %>%
select(-pitcher_name_ch) %>%
mutate(pitcher_name_last = str_replace_all(pitcher_name_last, " Jr.", "")) %>%
mutate(pitcher_name_last = str_replace_all(pitcher_name_last, "\\ ", "")) %>%
mutate(pitcher_name_last = str_replace_all(pitcher_name_last, "\\.", "")) %>%
mutate(pitcher_name_last = str_to_lower(string = pitcher_name_last)) %>%
mutate(pitcher_name_first = str_replace_all(pitcher_name_first, "\\ ", "")) %>%
mutate(pitcher_name_first = str_replace_all(pitcher_name_first, "\\.", "")) %>%
mutate(pitcher_name_first = str_to_lower(string = pitcher_name_first)) %>%
mutate(pitcher_name_c = str_c(pitcher_name_last, pitcher_name_first, sep = ",")) %>%
mutate(pitcher_name_c = str_to_lower(string = pitcher_name_c)) %>%
## Accounting for ballpark name changes
mutate(
ballpark = with(.,
case_when(
str_detect(ballpark, "Globe") ~ "Globe Life Park",
str_detect(ballpark, "Ameriquest") ~ "Globe Life Park",
str_detect(ballpark, "Rangers") ~ "Globe Life Park",
str_detect(ballpark, "Land Shark") ~ "Land Shark Stadium",
str_detect(ballpark, "Sun Life") ~ "Land Shark Stadium",
str_detect(ballpark, "Dolphin") ~ "Land Shark Stadium",
str_detect(ballpark, "Progressive") ~ "Progressive Field",
str_detect(ballpark, "Jacobs") ~ "Progressive Field",
str_detect(ballpark, "O.co Coliseu") ~ "O.co Coliseum",
str_detect(ballpark, "Oakland-Alam...") ~ "O.co Coliseum",
str_detect(ballpark, "McAfee") ~ "O.co Coliseum",
str_detect(ballpark, "O.co Coliseu") ~ "O.co Coliseum",
str_detect(ballpark, "Oakland-Alam...") ~ "O.co Coliseum",
str_detect(ballpark, "McAfee") ~ "O.co Coliseum",
TRUE ~ identity(ballpark) %>% as.character()))) %>%
## Accounting for ballpark name truncations
mutate(
ballpark = with(.,
case_when(
str_detect(ballpark, "Angel Stadiu...") ~ "Angel Stadium",
str_detect(ballpark, "Busch Stadiu...") ~ "Busch Stadium",
str_detect(ballpark, "Citizens Ban...") ~ "Citizens Bank Park",
str_detect(ballpark, "Comerica Par...") ~ "Comerica Park",
str_detect(ballpark, "Dodger Stadi...") ~ "Dodger Stadium",
str_detect(ballpark, "Great Americ...") ~ "Great American Ball Park",
str_detect(ballpark, "Kauffman Sta...") ~ "Kauffman Stadium",
str_detect(ballpark, "Minute Maid...") ~ "Minute Maid Park",
str_detect(ballpark, "Nationals Pa...") ~ "Nationals Park",
str_detect(ballpark, "Old Yankee S...") ~ "Old Yankee Stadium",
str_detect(ballpark, "Oriole Park ...") ~ "Camden Yards",
str_detect(ballpark, "Rogers Centr...") ~ "Rogers Centre",
str_detect(ballpark, "Tropicana Fi...") ~ "Tropicana Field",
str_detect(ballpark, "U.S. Cellula...") ~ "U.S. Cellular Field",
str_detect(ballpark, "Wrigley Fiel...") ~ "Wrigley Field",
str_detect(ballpark, "Yankee Stadi...") ~ "Yankee Stadium",
str_detect(ballpark, "Hiram Bithor...") ~ "Hiram Bithorn Stadium",
str_detect(ballpark, "Sydney Crick...") ~ "Sydney Cricket Ground",
str_detect(ballpark, "SunTrust Par...") ~ "SunTrust Park",
TRUE ~ identity(ballpark) %>% as.character()))) %>%
## Accounting for incorrect team name
mutate(
pitcher_team = with(.,
case_when(
str_detect(pitcher_team, "CHW") ~ "CWS",
str_detect(pitcher_team, "FLA") ~ "MIA",
TRUE ~ identity(pitcher_team) %>% as.character())),
batter_team = with(.,
case_when(
str_detect(batter_team, "CHW") ~ "CWS",
str_detect(batter_team, "FLA") ~ "MIA",
TRUE ~ identity(batter_team) %>% as.character()))) %>%
## Recoding horizonal angle (-45 left field line, 0 straightaway center, +45 right field line)
mutate(horiz_angle_c = (horiz_angle - 90) * -1) %>%
mutate(horiz_angle = horiz_angle - 45) %>%
mutate(horiz_angle_rad = (horiz_angle * pi) / 180) %>%
mutate(
horiz_angle_2 = with(.,
case_when(
horiz_angle_c < 0 ~ "Left",
horiz_angle_c >= 0 ~ "Right") %>%
factor(., levels = c("Left", "Right"),
ordered = TRUE)),
horiz_angle_3 = with(.,
case_when(
horiz_angle_c < -15 ~ "Left",
(horiz_angle_c >= -15) & (horiz_angle_c < 15) ~ "Center",
horiz_angle_c >= 15 ~ "Right") %>%
factor(., levels = c("Left", "Center", "Right"),
ordered = TRUE)),
horiz_angle_4 = with(.,
case_when(
horiz_angle_c < -22.5 ~ "Left",
(horiz_angle_c >= -22.5) & (horiz_angle_c < 0) ~ "Left Center",
(horiz_angle_c >= 0) & (horiz_angle_c < 22.5) ~ "Right Center",
horiz_angle_c >= 22.5 ~ "Right") %>%
factor(., levels = c("Left", "Left Center", "Right Center", "Right"),
ordered = TRUE)),
horiz_angle_5 = with(.,
case_when(
horiz_angle_c < -27 ~ "Left",
(horiz_angle_c >= -27) & (horiz_angle_c < -9) ~ "Left Center",
(horiz_angle_c >= -9) & (horiz_angle_c < 9) ~ "Center",
(horiz_angle_c >= 9) & (horiz_angle_c < 27) ~ "Right Center",
horiz_angle_c >= 27 ~ "Right") %>%
factor(., levels = c("Left", "Left Center", "Center", "Right Center", "Right"),
ordered = TRUE)),
horiz_angle_6 = with(.,
case_when(
horiz_angle_c < -30 ~ "Left",
(horiz_angle_c >= -30) & (horiz_angle_c < -15) ~ "Left Center",
(horiz_angle_c >= -15) & (horiz_angle_c < 0) ~ "Center Left",
(horiz_angle_c >= 0) & (horiz_angle_c < 15) ~ "Center Right",
(horiz_angle_c >= 15) & (horiz_angle_c < 30) ~ "Right Center",
horiz_angle_c >= 30 ~ "Right") %>%
factor(., levels = c("Left", "Left Center", "Center Left", "Center Right", "Right Center", "Right"),
ordered = TRUE))
) %>%
## Converting character variables to factors
mutate_if(is.character, as.factor) %>%
mutate(inning = as.factor(inning)) %>%
mutate(game_date = as_date(game_date)) %>%
## Cleaning up
# filter(launch_speed_ht != 0) %>%
filter(ballpark != "Fort Bragg" | is.na(ballpark)) %>%
filter(ballpark != "Hiram Bithorn Stadium" | is.na(ballpark)) %>%
filter(ballpark != "Sydney Cricket Ground" | is.na(ballpark)) %>%
filter(type_luck != "No homeruns found." | is.na(type_luck)) %>%
droplevels()
contrasts(hittracker_update$ballpark) <- "contr.sum"
contrasts(hittracker_update$batter_name) <- "contr.sum"
contrasts(hittracker_update$batter_team) <- "contr.sum"
contrasts(hittracker_update$pitcher_name) <- "contr.sum"
contrasts(hittracker_update$pitcher_team) <- "contr.sum"
contrasts(hittracker_update$inning) <- "contr.sum"
contrasts(hittracker_update$type_luck) <- "contr.sum"
#=======================#
#### Combining Data ####
#=======================#
hittracker_update_new <-
setdiff(
hittracker_update,
hittracker_all)
anti_join(
hittracker_update,
hittracker_all,
by = c(
"game_date",
"batter_name",
"pitcher_name",
"inning",
"hit_distance_ht",
"launch_speed_ht",
"launch_angle_ht",
"horiz_angle",
"apex"
)
)
######################
hittracker_update_new %>%
summarise(
max_date = max(game_date),
min_date = min(game_date)) %>%
with(.,
paste("Found results from: ", max_date, ", back to: ", min_date, sep = "") %>%
noquote() %>%
print()
)
cat("-----\n")
paste(nrow(hittracker_update_new), "rows added") %>% noquote() %>% print()
cat("-----\n")
paste(gdata::object.size(hittracker_update_new) %>% format(humanReadable = TRUE), "added") %>% noquote() %>% print()
cat("-----\n")
######################
hittracker_updated <-
bind_rows(
hittracker_all,
hittracker_update_new)
hittracker_updated_distinct <- hittracker_updated %>% distinct()
hittracker_updated_distinct <- hittracker_updated_distinct %>% arrange(desc(game_date))
#=====================#
#### Saving Data ####
#=====================#
write_rds(hittracker_updated_distinct, "hittracker_updated_distinct.RDS")
write_csv(hittracker_updated_distinct, "hittracker_updated_distinct.csv")
#=====================#
#### Cleaning up ####
#=====================#
rm(list = ls(envir = globalenv()), envir = globalenv())
# lapply(
# paste("package:", names(sessionInfo()$otherPkgs), sep = ""),
# detach,
# character.only = TRUE,
# unload = TRUE,
# force = TRUE
# )
gc()
#=====================#
#### End ####
#=====================#
| /code/data_gathering_code/updating_hit_tracker_data.R | no_license | cgettings/Baseball-Data | R | false | false | 13,663 | r | ###########################################
###########################################
##
## Auto downloading hit tracker data
##
###########################################
###########################################
#=========================#
#### Loading packages ####
#=========================#
library(QuantPsyc)
library(readr)
library(rvest)
library(tidyr)
library(stringr)
library(pitchRx)
library(lubridate)
library(stringdist)
library(dplyr)
library(tibble)
#=====================#
#### Loading Data ####
#=====================#
setwd("~/BASP/R analyses/Baseball Data/Data Files")
# hittracker_all <- read_rds("hittracker_all_distinct.RDS")
hittracker_all <- read_rds("hittracker_updated_distinct.RDS")
cat("-----\n")
paste("Searching results from: ", today() - 1, ", back to: ", max(hittracker_all$game_date), sep = "") %>% noquote() %>% print()
cat("-----\n")
#=========================#
#### Downloading Data ####
#=========================#
ht_session <-
html_session(
"http://www.hittrackeronline.com/index.php",
httr::set_cookies(season = 2017, perpage = 10000)
)
hittracker_update_raw <-
ht_session %>%
html_nodes("table") %>%
magrittr::extract2(17) %>%
html_table(header = FALSE) %>%
slice(3:nrow(.)) %>%
magrittr::set_colnames(.[1,]) %>%
slice(2:(nrow(.)-2)) %>%
as_data_frame() %>%
transmute(
game_date = parse_date(Date, format = "%D"),
batter_name = parse_character(Hitter),
batter_team = parse_character(HitterTeam),
pitcher_name = parse_character(Pitcher),
pitcher_team = parse_character(PitcherTeam),
inning = parse_number(INN),
ballpark = parse_character(Ballpark),
type_luck = parse_character(`Type/Luck`),
hit_distance_ht = parse_number(TrueDist.),
launch_speed_ht = parse_number(SpeedOffBat),
launch_angle_ht = parse_number(Elev.Angle),
horiz_angle = parse_number(Horiz.Angle),
apex = parse_number(Apex),
num_parks = parse_number(`#Parks`),
game_year = game_date %>% year(),
game_type = "Regular Season"
)
# hittracker_update_raw %>% glimpse()
# hittracker_update_raw %>% slice(1) %>% glimpse()
# hittracker_update_raw %>% slice(nrow(.)) %>% glimpse()
#=====================#
#### Cleaning Data ####
#=====================#
hittracker_update <-
hittracker_update_raw %>%
## Modifying names
mutate(batter_name_ch = as.character(batter_name)) %>%
mutate(
batter_name_first = str_split(
batter_name_ch,
pattern = ", ",
n = 2,
simplify = TRUE) %>% unlist() %>% .[,2],
batter_name_last = str_split(
batter_name_ch,
pattern = ", ",
n = 2,
simplify = TRUE) %>% unlist() %>% .[,1]
) %>%
select(-batter_name_ch) %>%
mutate(batter_name_last = str_replace_all(batter_name_last, " Jr.", "")) %>%
mutate(batter_name_last = str_replace_all(batter_name_last, "\\ ", "")) %>%
mutate(batter_name_last = str_replace_all(batter_name_last, "\\.", "")) %>%
mutate(batter_name_last = str_to_lower(string = batter_name_last)) %>%
mutate(batter_name_first = str_replace_all(batter_name_first, "\\ ", "")) %>%
mutate(batter_name_first = str_replace_all(batter_name_first, "\\.", "")) %>%
mutate(batter_name_first = str_to_lower(string = batter_name_first)) %>%
mutate(batter_name_c = str_c(batter_name_last, batter_name_first, sep = ",")) %>%
mutate(batter_name_c = str_to_lower(string = batter_name_c)) %>%
mutate(pitcher_name_ch = as.character(pitcher_name)) %>%
mutate(
pitcher_name_first = str_split(
pitcher_name_ch,
pattern = ", ",
n = 2,
simplify = TRUE) %>% unlist() %>% .[,2],
pitcher_name_last = str_split(
pitcher_name_ch,
pattern = ", ",
n = 2,
simplify = TRUE) %>% unlist() %>% .[,1]
) %>%
select(-pitcher_name_ch) %>%
mutate(pitcher_name_last = str_replace_all(pitcher_name_last, " Jr.", "")) %>%
mutate(pitcher_name_last = str_replace_all(pitcher_name_last, "\\ ", "")) %>%
mutate(pitcher_name_last = str_replace_all(pitcher_name_last, "\\.", "")) %>%
mutate(pitcher_name_last = str_to_lower(string = pitcher_name_last)) %>%
mutate(pitcher_name_first = str_replace_all(pitcher_name_first, "\\ ", "")) %>%
mutate(pitcher_name_first = str_replace_all(pitcher_name_first, "\\.", "")) %>%
mutate(pitcher_name_first = str_to_lower(string = pitcher_name_first)) %>%
mutate(pitcher_name_c = str_c(pitcher_name_last, pitcher_name_first, sep = ",")) %>%
mutate(pitcher_name_c = str_to_lower(string = pitcher_name_c)) %>%
## Accounting for ballpark name changes
mutate(
ballpark = with(.,
case_when(
str_detect(ballpark, "Globe") ~ "Globe Life Park",
str_detect(ballpark, "Ameriquest") ~ "Globe Life Park",
str_detect(ballpark, "Rangers") ~ "Globe Life Park",
str_detect(ballpark, "Land Shark") ~ "Land Shark Stadium",
str_detect(ballpark, "Sun Life") ~ "Land Shark Stadium",
str_detect(ballpark, "Dolphin") ~ "Land Shark Stadium",
str_detect(ballpark, "Progressive") ~ "Progressive Field",
str_detect(ballpark, "Jacobs") ~ "Progressive Field",
str_detect(ballpark, "O.co Coliseu") ~ "O.co Coliseum",
str_detect(ballpark, "Oakland-Alam...") ~ "O.co Coliseum",
str_detect(ballpark, "McAfee") ~ "O.co Coliseum",
str_detect(ballpark, "O.co Coliseu") ~ "O.co Coliseum",
str_detect(ballpark, "Oakland-Alam...") ~ "O.co Coliseum",
str_detect(ballpark, "McAfee") ~ "O.co Coliseum",
TRUE ~ identity(ballpark) %>% as.character()))) %>%
## Accounting for ballpark name truncations
mutate(
ballpark = with(.,
case_when(
str_detect(ballpark, "Angel Stadiu...") ~ "Angel Stadium",
str_detect(ballpark, "Busch Stadiu...") ~ "Busch Stadium",
str_detect(ballpark, "Citizens Ban...") ~ "Citizens Bank Park",
str_detect(ballpark, "Comerica Par...") ~ "Comerica Park",
str_detect(ballpark, "Dodger Stadi...") ~ "Dodger Stadium",
str_detect(ballpark, "Great Americ...") ~ "Great American Ball Park",
str_detect(ballpark, "Kauffman Sta...") ~ "Kauffman Stadium",
str_detect(ballpark, "Minute Maid...") ~ "Minute Maid Park",
str_detect(ballpark, "Nationals Pa...") ~ "Nationals Park",
str_detect(ballpark, "Old Yankee S...") ~ "Old Yankee Stadium",
str_detect(ballpark, "Oriole Park ...") ~ "Camden Yards",
str_detect(ballpark, "Rogers Centr...") ~ "Rogers Centre",
str_detect(ballpark, "Tropicana Fi...") ~ "Tropicana Field",
str_detect(ballpark, "U.S. Cellula...") ~ "U.S. Cellular Field",
str_detect(ballpark, "Wrigley Fiel...") ~ "Wrigley Field",
str_detect(ballpark, "Yankee Stadi...") ~ "Yankee Stadium",
str_detect(ballpark, "Hiram Bithor...") ~ "Hiram Bithorn Stadium",
str_detect(ballpark, "Sydney Crick...") ~ "Sydney Cricket Ground",
str_detect(ballpark, "SunTrust Par...") ~ "SunTrust Park",
TRUE ~ identity(ballpark) %>% as.character()))) %>%
## Accounting for incorrect team name
mutate(
pitcher_team = with(.,
case_when(
str_detect(pitcher_team, "CHW") ~ "CWS",
str_detect(pitcher_team, "FLA") ~ "MIA",
TRUE ~ identity(pitcher_team) %>% as.character())),
batter_team = with(.,
case_when(
str_detect(batter_team, "CHW") ~ "CWS",
str_detect(batter_team, "FLA") ~ "MIA",
TRUE ~ identity(batter_team) %>% as.character()))) %>%
## Recoding horizonal angle (-45 left field line, 0 straightaway center, +45 right field line)
mutate(horiz_angle_c = (horiz_angle - 90) * -1) %>%
mutate(horiz_angle = horiz_angle - 45) %>%
mutate(horiz_angle_rad = (horiz_angle * pi) / 180) %>%
mutate(
horiz_angle_2 = with(.,
case_when(
horiz_angle_c < 0 ~ "Left",
horiz_angle_c >= 0 ~ "Right") %>%
factor(., levels = c("Left", "Right"),
ordered = TRUE)),
horiz_angle_3 = with(.,
case_when(
horiz_angle_c < -15 ~ "Left",
(horiz_angle_c >= -15) & (horiz_angle_c < 15) ~ "Center",
horiz_angle_c >= 15 ~ "Right") %>%
factor(., levels = c("Left", "Center", "Right"),
ordered = TRUE)),
horiz_angle_4 = with(.,
case_when(
horiz_angle_c < -22.5 ~ "Left",
(horiz_angle_c >= -22.5) & (horiz_angle_c < 0) ~ "Left Center",
(horiz_angle_c >= 0) & (horiz_angle_c < 22.5) ~ "Right Center",
horiz_angle_c >= 22.5 ~ "Right") %>%
factor(., levels = c("Left", "Left Center", "Right Center", "Right"),
ordered = TRUE)),
horiz_angle_5 = with(.,
case_when(
horiz_angle_c < -27 ~ "Left",
(horiz_angle_c >= -27) & (horiz_angle_c < -9) ~ "Left Center",
(horiz_angle_c >= -9) & (horiz_angle_c < 9) ~ "Center",
(horiz_angle_c >= 9) & (horiz_angle_c < 27) ~ "Right Center",
horiz_angle_c >= 27 ~ "Right") %>%
factor(., levels = c("Left", "Left Center", "Center", "Right Center", "Right"),
ordered = TRUE)),
horiz_angle_6 = with(.,
case_when(
horiz_angle_c < -30 ~ "Left",
(horiz_angle_c >= -30) & (horiz_angle_c < -15) ~ "Left Center",
(horiz_angle_c >= -15) & (horiz_angle_c < 0) ~ "Center Left",
(horiz_angle_c >= 0) & (horiz_angle_c < 15) ~ "Center Right",
(horiz_angle_c >= 15) & (horiz_angle_c < 30) ~ "Right Center",
horiz_angle_c >= 30 ~ "Right") %>%
factor(., levels = c("Left", "Left Center", "Center Left", "Center Right", "Right Center", "Right"),
ordered = TRUE))
) %>%
## Converting character variables to factors
mutate_if(is.character, as.factor) %>%
mutate(inning = as.factor(inning)) %>%
mutate(game_date = as_date(game_date)) %>%
## Cleaning up
# filter(launch_speed_ht != 0) %>%
filter(ballpark != "Fort Bragg" | is.na(ballpark)) %>%
filter(ballpark != "Hiram Bithorn Stadium" | is.na(ballpark)) %>%
filter(ballpark != "Sydney Cricket Ground" | is.na(ballpark)) %>%
filter(type_luck != "No homeruns found." | is.na(type_luck)) %>%
droplevels()
contrasts(hittracker_update$ballpark) <- "contr.sum"
contrasts(hittracker_update$batter_name) <- "contr.sum"
contrasts(hittracker_update$batter_team) <- "contr.sum"
contrasts(hittracker_update$pitcher_name) <- "contr.sum"
contrasts(hittracker_update$pitcher_team) <- "contr.sum"
contrasts(hittracker_update$inning) <- "contr.sum"
contrasts(hittracker_update$type_luck) <- "contr.sum"
#=======================#
#### Combining Data ####
#=======================#
hittracker_update_new <-
setdiff(
hittracker_update,
hittracker_all)
anti_join(
hittracker_update,
hittracker_all,
by = c(
"game_date",
"batter_name",
"pitcher_name",
"inning",
"hit_distance_ht",
"launch_speed_ht",
"launch_angle_ht",
"horiz_angle",
"apex"
)
)
######################
hittracker_update_new %>%
summarise(
max_date = max(game_date),
min_date = min(game_date)) %>%
with(.,
paste("Found results from: ", max_date, ", back to: ", min_date, sep = "") %>%
noquote() %>%
print()
)
cat("-----\n")
paste(nrow(hittracker_update_new), "rows added") %>% noquote() %>% print()
cat("-----\n")
paste(gdata::object.size(hittracker_update_new) %>% format(humanReadable = TRUE), "added") %>% noquote() %>% print()
cat("-----\n")
######################
hittracker_updated <-
bind_rows(
hittracker_all,
hittracker_update_new)
hittracker_updated_distinct <- hittracker_updated %>% distinct()
hittracker_updated_distinct <- hittracker_updated_distinct %>% arrange(desc(game_date))
#=====================#
#### Saving Data ####
#=====================#
write_rds(hittracker_updated_distinct, "hittracker_updated_distinct.RDS")
write_csv(hittracker_updated_distinct, "hittracker_updated_distinct.csv")
#=====================#
#### Cleaning up ####
#=====================#
rm(list = ls(envir = globalenv()), envir = globalenv())
# lapply(
# paste("package:", names(sessionInfo()$otherPkgs), sep = ""),
# detach,
# character.only = TRUE,
# unload = TRUE,
# force = TRUE
# )
gc()
#=====================#
#### End ####
#=====================#
|
build_features_to_samples_table <- function(max_rows = NULL) {
# features_to_samples import ---------------------------------------------------
cat(crayon::magenta("Importing feather files for features_to_samples."), fill = TRUE)
features_to_samples <- synapse_read_all_feather_files("syn22125635")
cat(crayon::blue("Imported feather files for features_to_samples."), fill = TRUE)
# features_to_samples column fix ---------------------------------------------------
cat(crayon::magenta("Ensuring features_to_samples have all the correct columns and no dupes."), fill = TRUE)
features_to_samples <- features_to_samples %>%
dplyr::bind_rows(dplyr::tibble(
feature = character(),
sample = character(),
value = numeric()
)) %>%
dplyr::distinct(feature, sample, value) %>%
dplyr::filter(!is.na(feature) & !is.na(sample)) %>%
iatlas.data::resolve_df_dupes(keys = c("feature", "sample")) %>%
dplyr::select(feature, sample, value) %>%
dplyr::arrange(feature, sample)
cat(crayon::blue("Ensured features_to_samples have all the correct columns and no dupes."), fill = TRUE)
# features_to_samples data ---------------------------------------------------
cat(crayon::magenta("Building features_to_samples data."), fill = TRUE)
features_to_samples <- features_to_samples %>% dplyr::left_join(iatlas.data::get_features(), by = "feature")
features_to_samples <- features_to_samples %>% dplyr::left_join(
iatlas.data::get_samples() %>%
dplyr::as_tibble() %>%
dplyr::select(sample_id = id, sample = name),
by = "sample"
)
features_to_samples <- features_to_samples %>%
dplyr::select(feature_id, sample_id, value) %>%
dplyr::mutate(inf_value = ifelse(is.infinite(value), value, NA), value = ifelse(is.finite(value), value, NA))
cat(crayon::blue("Built features_to_samples data."), fill = TRUE)
# features_to_samples table ---------------------------------------------------
cat(crayon::magenta("Building features_to_samples table.\n\t(Please be patient, this may take a little while as there are", nrow(features_to_samples), "rows to write.)"), fill = TRUE, sep = " ")
table_written <- features_to_samples %>% iatlas.data::replace_table("features_to_samples", max_rows = max_rows)
cat(crayon::blue("Built features_to_samples table. (", nrow(features_to_samples), "rows )"), fill = TRUE, sep = " ")
}
| /R/build_features_to_samples_table.R | no_license | CRI-iAtlas/iatlas-data | R | false | false | 2,400 | r | build_features_to_samples_table <- function(max_rows = NULL) {
# features_to_samples import ---------------------------------------------------
cat(crayon::magenta("Importing feather files for features_to_samples."), fill = TRUE)
features_to_samples <- synapse_read_all_feather_files("syn22125635")
cat(crayon::blue("Imported feather files for features_to_samples."), fill = TRUE)
# features_to_samples column fix ---------------------------------------------------
cat(crayon::magenta("Ensuring features_to_samples have all the correct columns and no dupes."), fill = TRUE)
features_to_samples <- features_to_samples %>%
dplyr::bind_rows(dplyr::tibble(
feature = character(),
sample = character(),
value = numeric()
)) %>%
dplyr::distinct(feature, sample, value) %>%
dplyr::filter(!is.na(feature) & !is.na(sample)) %>%
iatlas.data::resolve_df_dupes(keys = c("feature", "sample")) %>%
dplyr::select(feature, sample, value) %>%
dplyr::arrange(feature, sample)
cat(crayon::blue("Ensured features_to_samples have all the correct columns and no dupes."), fill = TRUE)
# features_to_samples data ---------------------------------------------------
cat(crayon::magenta("Building features_to_samples data."), fill = TRUE)
features_to_samples <- features_to_samples %>% dplyr::left_join(iatlas.data::get_features(), by = "feature")
features_to_samples <- features_to_samples %>% dplyr::left_join(
iatlas.data::get_samples() %>%
dplyr::as_tibble() %>%
dplyr::select(sample_id = id, sample = name),
by = "sample"
)
features_to_samples <- features_to_samples %>%
dplyr::select(feature_id, sample_id, value) %>%
dplyr::mutate(inf_value = ifelse(is.infinite(value), value, NA), value = ifelse(is.finite(value), value, NA))
cat(crayon::blue("Built features_to_samples data."), fill = TRUE)
# features_to_samples table ---------------------------------------------------
cat(crayon::magenta("Building features_to_samples table.\n\t(Please be patient, this may take a little while as there are", nrow(features_to_samples), "rows to write.)"), fill = TRUE, sep = " ")
table_written <- features_to_samples %>% iatlas.data::replace_table("features_to_samples", max_rows = max_rows)
cat(crayon::blue("Built features_to_samples table. (", nrow(features_to_samples), "rows )"), fill = TRUE, sep = " ")
}
|
#change the file path
data1 <- read.csv(file("C:\\Users\\yupit\\Desktop\\It was rated\\ds-uph-master\\query.csv"))
# categorize
head(data1)
# view
summary(data1)
# brackets
####### Step 1
# plot
#install.packages("ggplot2")
library(ggplot2)
geom_point(1, 1, 1)
ggplot(data1, aes(x=latitude, y=longitude)) + geom_point() | /work.R | permissive | RyanHiroshi/Earthquake | R | false | false | 337 | r | #change the file path
data1 <- read.csv(file("C:\\Users\\yupit\\Desktop\\It was rated\\ds-uph-master\\query.csv"))
# categorize
head(data1)
# view
summary(data1)
# brackets
####### Step 1
# plot
#install.packages("ggplot2")
library(ggplot2)
geom_point(1, 1, 1)
ggplot(data1, aes(x=latitude, y=longitude)) + geom_point() |
library(hdf5r)
library(Seurat)
library(tidyverse)
library(cowplot)
library(viridis)
library(ggrepel)
library(e1071)
library(scales)
library(ggplot2)
library(ggrepel)
library(cowplot)
library(scales)
library(ggpubr)
args = commandArgs(trailingOnly=TRUE)
tp <- args[1]
cluster <- args[2]
file <- paste0("../singleCellProcessing/output/allpools.scanpy.",tp,".wMetaClustUmapGraph.exprLogNormNotScaled.h5ad")
pathToDE="outputTabs/DEsinglecell/"
h5_data <- H5File$new(file, mode = "r")
h5ls(file)
feature_matrix <- Matrix::sparseMatrix(
i = h5_data[['X/indices']][],
p = h5_data[['X/indptr']][],
x = h5_data[['X/data']][],
dimnames = list(
h5_data[["/var/_index"]][],
h5_data[["obs/_index"]][]),
dims = as.integer(c(length(h5_data[["/var/_index"]][]),
length(h5_data[["obs/_index"]][]))),
index1 = FALSE
)
seurat <- CreateSeuratObject(feature_matrix, min.cells = 0.01*dim(feature_matrix)[2])
rm(feature_matrix)
metadata <- data.frame(cellid=h5_data[["obs/_index"]][],
batch=h5_data[["obs/batch"]][],
donor_id=h5_data[["obs/donor_id"]][],
leiden_id=h5_data[["obs/leiden_id"]][],
pool_id=h5_data[["obs/pool_id"]][],
sample_id=h5_data[["obs/sample_id"]][],
tp_treat=h5_data[["obs/tp_treat"]][])
kk=h5read(file,"/obs/__categories")
print(object.size(metadata), units = "auto")
relabelColumns <- function(metadata, kk){
corr <-sapply(kk, function(x) {
tt <- seq(0,length(x),1)
tt <- tt[-length(tt)]
tt2 <- x
names(tt2) <- tt
tt2
})
colsToMatch <- colnames(metadata)[colnames(metadata) %in% names(corr)]
for (col in colsToMatch){
mtch_corr <- match(col, names(corr))
tmp <- corr[mtch_corr][[1]]
metadata[,match(col, colnames(metadata))] <- tmp[as.character(metadata[,match(col, colnames(metadata))])]
}
return(metadata)
}
metadata <- relabelColumns(metadata, kk)
batchNames <- read.table("../singleCellProcessing/pools_to_merge_strictTP.csv", sep=",", header = T)
batchNames <- batchNames[batchNames$sample_id %in% metadata$sample_id,]
batchNames$batchInfo <- paste0(batchNames$pool_id,"-",batchNames$time_point,
"-BIO", batchNames$bioRep, "-TEC", batchNames$techRep,"-TENX", batchNames$tenXRep)
metadata$batchInfo <- batchNames$batchInfo[match(metadata$sample_id, batchNames$sample_id)]
batchNames$batch <- metadata[match(batchNames$sample_id, metadata$sample_id),]$batch
batchNames <- batchNames[!is.na(batchNames$batch),]
seurat@meta.data <- metadata
metadata$donorIdExpanded <- metadata$donor_id
genes_state_KO <- c("wt","ASXL3_hom","SNCA_hom","CTNNB1","TCF4_het","wt","CHD2_het","SET_het","GATAD2B_het","TBL1XR1_het")
names(genes_state_KO) <- c("pool10","pool11","pool12","pool13","pool14","pool15","pool16","pool17","pool20","pool21")
poolsWithKO <- names(genes_state_KO)[names(genes_state_KO) %in% unique(metadata$pool_id)]
koGenes <- unname(genes_state_KO[match(poolsWithKO, names(genes_state_KO))])
koGenes <- gsub("_.+","", koGenes[!koGenes %in% "wt"])
stopifnot(all(koGenes %in% rownames(seurat)))
for (pool in poolsWithKO){
geneKO <- unname(genes_state_KO[names(genes_state_KO)==pool])
mask_KO <- grepl("kolf_2", metadata$donorIdExpanded) & (metadata$pool_id==pool)
stopifnot(any(mask_KO))
metadata[mask_KO,]$donorIdExpanded <- paste0(metadata[mask_KO,]$donorIdExpanded,"/",geneKO)
}
seurat@meta.data$donorIdExpanded <- metadata$donorIdExpanded
summData <- readRDS("outputTabs/DEsinglecell/CTfractionPerLinePerTPCurated2.RDS")
seurat@meta.data$outcomeCurated <- summData[match(seurat@meta.data$donor_id, summData$donor_id),]$outcome
metadata <- seurat@meta.data
mask_na <- !is.na(seurat@meta.data$outcomeCurated)
seurat <- seurat[,mask_na]
seurat@meta.data <- metadata[mask_na,]
annotLeiden <- c("DA","Astro","FPP1","Sertlike","Epend1","proFPP1","FPP2","proFPP2","Unk1","FPP3","ProSertlike","Unk2")
names(annotLeiden) <- as.character(c(0:11))
seurat@meta.data$annot <- unname(annotLeiden[seurat@meta.data$leiden_id])
metadata <- seurat@meta.data
## make sure there are at least 10 cells from each outcome for each cell-type
## Otherwise, the cell-type is not considered for further analysis.
comp1_fail <- as.matrix(table(subset(metadata, outcomeCurated=="Failed")$donorIdExpanded,
subset(metadata, outcomeCurated=="Failed")$annot))
clusters_fail <- names(which(colSums(comp1_fail)>10))
comp1_succ <- as.matrix(table(subset(metadata, outcomeCurated=="Successful")$donorIdExpanded,
subset(metadata, outcomeCurated=="Successful")$annot))
clusters_succ <- names(which(colSums(comp1_succ)>10))
clusters <- intersect(clusters_fail, clusters_succ)
stopifnot(cluster %in% clusters)
print(paste0("Processing cluster ", cluster))
Idents(seurat) <- "annot"
test = seurat[,Idents(seurat)==cluster]
metData <- seurat@meta.data
test@meta.data <- metData[metData$annot==cluster,]
## filter-out those cells below 10 cells
both <- sum(test@meta.data$outcomeCurated=="Successful")>10 & sum(test@meta.data$outcomeCurated=="Failed")>10
stopifnot(both)
##run DE analysis
Idents(test) <- "outcomeCurated"
de.response <- FindMarkers(test,
ident.1 = "Failed",
ident.2 = "Successful",
verbose = TRUE,
logfc.threshold=0,
min.cells.group = 1,
min.cells.feature = 1,
only.pos = FALSE,
min.pct = 0)
de.response$annot <- paste0(tp,"-", cluster)
de.response$geneId <- rownames(de.response)
rownames(de.response) <- NULL
reOrd <- dim(de.response)[2]-1
de.response <- de.response[,c(dim(de.response)[2],1:reOrd)]
de.response$FC <- 10^de.response$avg_logFC
de.response$log2FC <- log2(de.response$FC)
de.response$nTotalGenes <- dim(seurat)[1]
de.response$nRecoveredGenes <- dim(de.response)[1]
saveRDS(de.response,
file=paste0(pathToDE,"allDEtable2_",tp,"_",cluster,".RDS"))
| /analysis/16-cosmicEnrichment.R | no_license | paupuigdevall/somaticBurdenNeuro2022 | R | false | false | 6,142 | r | library(hdf5r)
library(Seurat)
library(tidyverse)
library(cowplot)
library(viridis)
library(ggrepel)
library(e1071)
library(scales)
library(ggplot2)
library(ggrepel)
library(cowplot)
library(scales)
library(ggpubr)
args = commandArgs(trailingOnly=TRUE)
tp <- args[1]
cluster <- args[2]
file <- paste0("../singleCellProcessing/output/allpools.scanpy.",tp,".wMetaClustUmapGraph.exprLogNormNotScaled.h5ad")
pathToDE="outputTabs/DEsinglecell/"
h5_data <- H5File$new(file, mode = "r")
h5ls(file)
feature_matrix <- Matrix::sparseMatrix(
i = h5_data[['X/indices']][],
p = h5_data[['X/indptr']][],
x = h5_data[['X/data']][],
dimnames = list(
h5_data[["/var/_index"]][],
h5_data[["obs/_index"]][]),
dims = as.integer(c(length(h5_data[["/var/_index"]][]),
length(h5_data[["obs/_index"]][]))),
index1 = FALSE
)
seurat <- CreateSeuratObject(feature_matrix, min.cells = 0.01*dim(feature_matrix)[2])
rm(feature_matrix)
metadata <- data.frame(cellid=h5_data[["obs/_index"]][],
batch=h5_data[["obs/batch"]][],
donor_id=h5_data[["obs/donor_id"]][],
leiden_id=h5_data[["obs/leiden_id"]][],
pool_id=h5_data[["obs/pool_id"]][],
sample_id=h5_data[["obs/sample_id"]][],
tp_treat=h5_data[["obs/tp_treat"]][])
kk=h5read(file,"/obs/__categories")
print(object.size(metadata), units = "auto")
relabelColumns <- function(metadata, kk){
corr <-sapply(kk, function(x) {
tt <- seq(0,length(x),1)
tt <- tt[-length(tt)]
tt2 <- x
names(tt2) <- tt
tt2
})
colsToMatch <- colnames(metadata)[colnames(metadata) %in% names(corr)]
for (col in colsToMatch){
mtch_corr <- match(col, names(corr))
tmp <- corr[mtch_corr][[1]]
metadata[,match(col, colnames(metadata))] <- tmp[as.character(metadata[,match(col, colnames(metadata))])]
}
return(metadata)
}
metadata <- relabelColumns(metadata, kk)
batchNames <- read.table("../singleCellProcessing/pools_to_merge_strictTP.csv", sep=",", header = T)
batchNames <- batchNames[batchNames$sample_id %in% metadata$sample_id,]
batchNames$batchInfo <- paste0(batchNames$pool_id,"-",batchNames$time_point,
"-BIO", batchNames$bioRep, "-TEC", batchNames$techRep,"-TENX", batchNames$tenXRep)
metadata$batchInfo <- batchNames$batchInfo[match(metadata$sample_id, batchNames$sample_id)]
batchNames$batch <- metadata[match(batchNames$sample_id, metadata$sample_id),]$batch
batchNames <- batchNames[!is.na(batchNames$batch),]
seurat@meta.data <- metadata
metadata$donorIdExpanded <- metadata$donor_id
genes_state_KO <- c("wt","ASXL3_hom","SNCA_hom","CTNNB1","TCF4_het","wt","CHD2_het","SET_het","GATAD2B_het","TBL1XR1_het")
names(genes_state_KO) <- c("pool10","pool11","pool12","pool13","pool14","pool15","pool16","pool17","pool20","pool21")
poolsWithKO <- names(genes_state_KO)[names(genes_state_KO) %in% unique(metadata$pool_id)]
koGenes <- unname(genes_state_KO[match(poolsWithKO, names(genes_state_KO))])
koGenes <- gsub("_.+","", koGenes[!koGenes %in% "wt"])
stopifnot(all(koGenes %in% rownames(seurat)))
for (pool in poolsWithKO){
geneKO <- unname(genes_state_KO[names(genes_state_KO)==pool])
mask_KO <- grepl("kolf_2", metadata$donorIdExpanded) & (metadata$pool_id==pool)
stopifnot(any(mask_KO))
metadata[mask_KO,]$donorIdExpanded <- paste0(metadata[mask_KO,]$donorIdExpanded,"/",geneKO)
}
seurat@meta.data$donorIdExpanded <- metadata$donorIdExpanded
summData <- readRDS("outputTabs/DEsinglecell/CTfractionPerLinePerTPCurated2.RDS")
seurat@meta.data$outcomeCurated <- summData[match(seurat@meta.data$donor_id, summData$donor_id),]$outcome
metadata <- seurat@meta.data
mask_na <- !is.na(seurat@meta.data$outcomeCurated)
seurat <- seurat[,mask_na]
seurat@meta.data <- metadata[mask_na,]
annotLeiden <- c("DA","Astro","FPP1","Sertlike","Epend1","proFPP1","FPP2","proFPP2","Unk1","FPP3","ProSertlike","Unk2")
names(annotLeiden) <- as.character(c(0:11))
seurat@meta.data$annot <- unname(annotLeiden[seurat@meta.data$leiden_id])
metadata <- seurat@meta.data
## make sure there are at least 10 cells from each outcome for each cell-type
## Otherwise, the cell-type is not considered for further analysis.
comp1_fail <- as.matrix(table(subset(metadata, outcomeCurated=="Failed")$donorIdExpanded,
subset(metadata, outcomeCurated=="Failed")$annot))
clusters_fail <- names(which(colSums(comp1_fail)>10))
comp1_succ <- as.matrix(table(subset(metadata, outcomeCurated=="Successful")$donorIdExpanded,
subset(metadata, outcomeCurated=="Successful")$annot))
clusters_succ <- names(which(colSums(comp1_succ)>10))
clusters <- intersect(clusters_fail, clusters_succ)
stopifnot(cluster %in% clusters)
print(paste0("Processing cluster ", cluster))
Idents(seurat) <- "annot"
test = seurat[,Idents(seurat)==cluster]
metData <- seurat@meta.data
test@meta.data <- metData[metData$annot==cluster,]
## filter-out those cells below 10 cells
both <- sum(test@meta.data$outcomeCurated=="Successful")>10 & sum(test@meta.data$outcomeCurated=="Failed")>10
stopifnot(both)
##run DE analysis
Idents(test) <- "outcomeCurated"
de.response <- FindMarkers(test,
ident.1 = "Failed",
ident.2 = "Successful",
verbose = TRUE,
logfc.threshold=0,
min.cells.group = 1,
min.cells.feature = 1,
only.pos = FALSE,
min.pct = 0)
de.response$annot <- paste0(tp,"-", cluster)
de.response$geneId <- rownames(de.response)
rownames(de.response) <- NULL
reOrd <- dim(de.response)[2]-1
de.response <- de.response[,c(dim(de.response)[2],1:reOrd)]
de.response$FC <- 10^de.response$avg_logFC
de.response$log2FC <- log2(de.response$FC)
de.response$nTotalGenes <- dim(seurat)[1]
de.response$nRecoveredGenes <- dim(de.response)[1]
saveRDS(de.response,
file=paste0(pathToDE,"allDEtable2_",tp,"_",cluster,".RDS"))
|
install.packages(ggfortify)
library(forecast)
library(ggplot2)
library(ggfortify)
##### Moving Average ######
# The head() command allows you to take a look at the first few rows of the dataset
head(gold)
# The dim() command allows you to check the size of the dataset
# The first value indicates the number of rows
# The second value indicates the number of columns
str(gold)
dim(gold)
# Next we create gold time series
goldts <-ts(gold[,2])
autoplot(goldts)
##############################################################################
# OPTIONAL - The following adds options to label the chart
##############################################################################
autoplot(goldts) +
ggtitle("Daily Gold Prices") +
xlab("period: Jan 1, 1968 to June 11, 2019") +
ylab("USD")
# The moving average command is ma(x, order)
# x is the data vector and order is the number of lags
# Make sure you install the forecast library
### example: moving average 10 lags
goldts10lags <- ma(goldts, 10)
autoplot(goldts10lags) +
ggtitle("Gold Prices: Moving Average 10 lags") +
xlab("period: Jan 1, 1968 to June 11, 2019") +
ylab("USD")
goldts100lags <- ma(goldts, 100)
autoplot(goldts100lags) +
ggtitle("Gold Prices: Moving Average 100 lags") +
xlab("period: Jan 1, 1968 to June 11, 2019") +
ylab("USD")
goldts500lags <- ma(goldts, 500)
autoplot(goldts500lags) +
ggtitle("Gold Prices: Moving Average 500 lags") +
xlab("period: Jan 1, 1968 to June 11, 2019") +
ylab("USD")
| /MovingAverage.R | no_license | jaidprakash/timeseries | R | false | false | 1,577 | r | install.packages(ggfortify)
library(forecast)
library(ggplot2)
library(ggfortify)
##### Moving Average ######
# The head() command allows you to take a look at the first few rows of the dataset
head(gold)
# The dim() command allows you to check the size of the dataset
# The first value indicates the number of rows
# The second value indicates the number of columns
str(gold)
dim(gold)
# Next we create gold time series
goldts <-ts(gold[,2])
autoplot(goldts)
##############################################################################
# OPTIONAL - The following adds options to label the chart
##############################################################################
autoplot(goldts) +
ggtitle("Daily Gold Prices") +
xlab("period: Jan 1, 1968 to June 11, 2019") +
ylab("USD")
# The moving average command is ma(x, order)
# x is the data vector and order is the number of lags
# Make sure you install the forecast library
### example: moving average 10 lags
goldts10lags <- ma(goldts, 10)
autoplot(goldts10lags) +
ggtitle("Gold Prices: Moving Average 10 lags") +
xlab("period: Jan 1, 1968 to June 11, 2019") +
ylab("USD")
goldts100lags <- ma(goldts, 100)
autoplot(goldts100lags) +
ggtitle("Gold Prices: Moving Average 100 lags") +
xlab("period: Jan 1, 1968 to June 11, 2019") +
ylab("USD")
goldts500lags <- ma(goldts, 500)
autoplot(goldts500lags) +
ggtitle("Gold Prices: Moving Average 500 lags") +
xlab("period: Jan 1, 1968 to June 11, 2019") +
ylab("USD")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/logis.R
\name{logis}
\alias{logis}
\title{The logistic transform}
\usage{
logis(Y,param)
}
\arguments{
\item{Y}{A generic matrix or one dimensional array}
\item{param}{Vector of coefficients, whose length is NCOL(Y) + 1 (to consider also an intercept term)}
}
\value{
Return a vector whose length is NROW(Y) and whose i-th component is the logistic function
at the scalar product between the i-th row of YY and the vector \code{param}.
}
\description{
Create a matrix YY binding array \code{Y} with a vector of ones, placed as the first column of YY.
It applies the logistic transform componentwise to the standard matrix multiplication between YY and \code{param}.
}
\examples{
n<-50
Y<-sample(c(1,2,3),n,replace=TRUE)
param<-c(0.2,0.7)
logis(Y,param)
}
\keyword{utilities}
| /man/logis.Rd | no_license | cran/CUB | R | false | true | 857 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/logis.R
\name{logis}
\alias{logis}
\title{The logistic transform}
\usage{
logis(Y,param)
}
\arguments{
\item{Y}{A generic matrix or one dimensional array}
\item{param}{Vector of coefficients, whose length is NCOL(Y) + 1 (to consider also an intercept term)}
}
\value{
Return a vector whose length is NROW(Y) and whose i-th component is the logistic function
at the scalar product between the i-th row of YY and the vector \code{param}.
}
\description{
Create a matrix YY binding array \code{Y} with a vector of ones, placed as the first column of YY.
It applies the logistic transform componentwise to the standard matrix multiplication between YY and \code{param}.
}
\examples{
n<-50
Y<-sample(c(1,2,3),n,replace=TRUE)
param<-c(0.2,0.7)
logis(Y,param)
}
\keyword{utilities}
|
bpWvW <- function(data) {
matched <- fig4B
print(head(matched))
print(wilcox.test(Paper_Correct~Diagnosis, data = matched))
print(boxplot(Paper_Correct~Diagnosis, data = matched,
ylab = "Written SDMT Score"))
print(median(subset(matched, Diagnosis == "HV")$Paper_Correct))
print(median(subset(matched, Diagnosis == "MS")$Paper_Correct))
}
| /FormerLabMembers/Linh/sdmt_analyses/funcs_cross/bpWvW.R | no_license | bielekovaLab/Bielekova-Lab-Code | R | false | false | 381 | r | bpWvW <- function(data) {
matched <- fig4B
print(head(matched))
print(wilcox.test(Paper_Correct~Diagnosis, data = matched))
print(boxplot(Paper_Correct~Diagnosis, data = matched,
ylab = "Written SDMT Score"))
print(median(subset(matched, Diagnosis == "HV")$Paper_Correct))
print(median(subset(matched, Diagnosis == "MS")$Paper_Correct))
}
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
#setwd("/Users/AjaySuresh/NYC_Public_Schools/")
dat = read.csv('SCHOOL_FULL_DATA.csv')
library(shiny)
library(ggplot2)
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("NYC Public School Data"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
h1("Scores", align = "center"),
h2("vs", align = "center"),
#Dropdown menu
selectInput(inputId = "ds",
label = "Choose a dataset",
choices = c("Poverty - Distribution", "Ethnicity Distribution",
"Ethnicity - African American",
"Ethnicity - Hispanic",
"Ethnicity - Asian",
"Ethnicity - White"))
#Load data
#submitButton("Load Data")
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("plot"),
verbatimTextOutput("lm")
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
linReg = reactive({
if(input$ds == "Ethnicity - African American"){
linearMod = lm(MAT_mean_score ~ Black, data = dat)
summary(linearMod)
}
else if(input$ds=="Ethnicity - Asian")
{
linearMod = lm(MAT_mean_score ~ Asian, data = dat)
summary(linearMod)
}
else if(input$ds == "Ethnicity - White")
{
linearMod = lm(MAT_mean_score ~ White, data = dat)
summary(linearMod)
}
else if(input$ds == "Ethnicity - Hispanic")
{
linearMod = lm(MAT_mean_score ~ Hispanic, data = dat)
summary(linearMod)
}
})
p2 = reactive({
if(input$ds == "Poverty - Distribution"){
p = sum(dat$Poverty)
np = sum(1-dat$Poverty)
dat$num_pov = dat$Poverty * dat$Total.Enrollment
dat$num_not_pov = (1-dat$Poverty) * dat$Total.Enrollment
n_p = 0
for (num in dat$num_pov) {
if (!is.na(num)) {
n_p = n_p + num
}
}
n_np = 0
for (num in dat$num_not_pov) {
if (!is.na(num)) {
n_np = n_np + num
}
}
Percents = c(p, (n_np-p)) / n_np
Numbers = c(n_p, n_np)
labels = c("Poverty", "Not Poverty")
df = data.frame(Percents, Numbers, labels)
require("gridExtra")
plt2 = ggplot(df, aes(x=labels, y=Numbers, fill=labels)) + geom_bar(stat='identity') +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
legend.position="none") +
labs(title="Student Poverty -- Distribution")
plt3 = ggplot(dat, aes(MAT_mean_score, fill=(Poverty<.5))) +
geom_histogram() +
labs(title="School Mean Test Score -- Poverty")
#plt3
grid.arrange(plt2, plt3, ncol=2)
}
else if(input$ds == "Ethnicity Distribution")
{
w = sum(dat$White)
b = sum(dat$Black)
a = sum(dat$Asian)
h = sum(dat$Hispanic)
n = 0
for (num in dat$Total.Enrollment) {
if (!is.na(num)) {
n = n + num
}
}
t = w + b + a + h
Percents = c(b,h,w,a) / t
Numbers = Percents * n
labels = c("Black", "Hispanic", "White", "Asian")
df = data.frame(Percents, Numbers, labels)
plt1 = ggplot(df, aes(x=1, y=Percents, fill=labels)) + geom_bar(stat='identity', width=0.01) +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank()) +
labs(title="Student Ethnicity -- Cumulative")
plt2 = ggplot(df, aes(x=labels, y=Numbers, fill=labels)) + geom_bar(stat='identity') +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank()) +
labs(title="Student Ethnicity -- Distribution")
grid.arrange(plt1, plt2, ncol=2)
}
else if(input$ds == "Ethnicity - Hispanic"){
ggplot(dat, aes(x=Hispanic, y=MAT_mean_score, color=("red"), fill=FALSE, alpha=.25)) +
geom_point() +
labs(title="Test Score -- Ethnicity") +
theme(legend.position="none")
ggplot(dat, aes(x=Hispanic, y=MAT_mean_score, color=(Poverty<.5), fill=FALSE, alpha=.25)) +
geom_point() +
labs(title="Test Score -- Ethnicity")
}
else if(input$ds == "Ethnicity - Asian"){
ggplot(dat, aes(x=Asian, y=MAT_mean_score, color=("red"), fill=FALSE, alpha=.25)) +
geom_point() +
labs(title="Test Score -- Ethnicity") +
theme(legend.position="none")
ggplot(dat, aes(x=Asian, y=MAT_mean_score, color=(Poverty<.5), fill=FALSE, alpha=.25)) +
geom_point() +
labs(title="Test Score -- Ethnicity")
}
else if(input$ds == "Ethnicity - White"){
ggplot(dat, aes(x=White, y=MAT_mean_score, color=("red"), fill=FALSE, alpha=.25)) +
geom_point() +
labs(title="Test Score -- Poverty") +
theme(legend.position="none")
ggplot(dat, aes(x=White, y=MAT_mean_score, color=(Poverty<.5), fill=FALSE, alpha=.25)) +
geom_point() +
labs(title="Test Score -- Ethnicity")
}
else if(input$ds == "Ethnicity - African American"){
ggplot(dat, aes(x=Black, y=MAT_mean_score, color=("red"), fill=FALSE, alpha=.25)) +
geom_point() +
labs(title="Test Score -- Ethnicity") +
theme(legend.position="none")
ggplot(dat, aes(x=Black, y=MAT_mean_score, color=(Poverty<.5), fill=FALSE, alpha=.25)) +
geom_point() +
labs(title="Test Score -- Ethnicity")
}
})
output$plot = renderPlot({plot(p2(), data = dat)})
output$lm = renderPrint({linReg()})
}
# Run the application
shinyApp(ui = ui, server = server)
| /app.R | no_license | parthik2/FinalProject | R | false | false | 6,133 | r | #
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
#setwd("/Users/AjaySuresh/NYC_Public_Schools/")
dat = read.csv('SCHOOL_FULL_DATA.csv')
library(shiny)
library(ggplot2)
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("NYC Public School Data"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
h1("Scores", align = "center"),
h2("vs", align = "center"),
#Dropdown menu
selectInput(inputId = "ds",
label = "Choose a dataset",
choices = c("Poverty - Distribution", "Ethnicity Distribution",
"Ethnicity - African American",
"Ethnicity - Hispanic",
"Ethnicity - Asian",
"Ethnicity - White"))
#Load data
#submitButton("Load Data")
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("plot"),
verbatimTextOutput("lm")
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
linReg = reactive({
if(input$ds == "Ethnicity - African American"){
linearMod = lm(MAT_mean_score ~ Black, data = dat)
summary(linearMod)
}
else if(input$ds=="Ethnicity - Asian")
{
linearMod = lm(MAT_mean_score ~ Asian, data = dat)
summary(linearMod)
}
else if(input$ds == "Ethnicity - White")
{
linearMod = lm(MAT_mean_score ~ White, data = dat)
summary(linearMod)
}
else if(input$ds == "Ethnicity - Hispanic")
{
linearMod = lm(MAT_mean_score ~ Hispanic, data = dat)
summary(linearMod)
}
})
p2 = reactive({
if(input$ds == "Poverty - Distribution"){
p = sum(dat$Poverty)
np = sum(1-dat$Poverty)
dat$num_pov = dat$Poverty * dat$Total.Enrollment
dat$num_not_pov = (1-dat$Poverty) * dat$Total.Enrollment
n_p = 0
for (num in dat$num_pov) {
if (!is.na(num)) {
n_p = n_p + num
}
}
n_np = 0
for (num in dat$num_not_pov) {
if (!is.na(num)) {
n_np = n_np + num
}
}
Percents = c(p, (n_np-p)) / n_np
Numbers = c(n_p, n_np)
labels = c("Poverty", "Not Poverty")
df = data.frame(Percents, Numbers, labels)
require("gridExtra")
plt2 = ggplot(df, aes(x=labels, y=Numbers, fill=labels)) + geom_bar(stat='identity') +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
legend.position="none") +
labs(title="Student Poverty -- Distribution")
plt3 = ggplot(dat, aes(MAT_mean_score, fill=(Poverty<.5))) +
geom_histogram() +
labs(title="School Mean Test Score -- Poverty")
#plt3
grid.arrange(plt2, plt3, ncol=2)
}
else if(input$ds == "Ethnicity Distribution")
{
w = sum(dat$White)
b = sum(dat$Black)
a = sum(dat$Asian)
h = sum(dat$Hispanic)
n = 0
for (num in dat$Total.Enrollment) {
if (!is.na(num)) {
n = n + num
}
}
t = w + b + a + h
Percents = c(b,h,w,a) / t
Numbers = Percents * n
labels = c("Black", "Hispanic", "White", "Asian")
df = data.frame(Percents, Numbers, labels)
plt1 = ggplot(df, aes(x=1, y=Percents, fill=labels)) + geom_bar(stat='identity', width=0.01) +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank()) +
labs(title="Student Ethnicity -- Cumulative")
plt2 = ggplot(df, aes(x=labels, y=Numbers, fill=labels)) + geom_bar(stat='identity') +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank()) +
labs(title="Student Ethnicity -- Distribution")
grid.arrange(plt1, plt2, ncol=2)
}
else if(input$ds == "Ethnicity - Hispanic"){
ggplot(dat, aes(x=Hispanic, y=MAT_mean_score, color=("red"), fill=FALSE, alpha=.25)) +
geom_point() +
labs(title="Test Score -- Ethnicity") +
theme(legend.position="none")
ggplot(dat, aes(x=Hispanic, y=MAT_mean_score, color=(Poverty<.5), fill=FALSE, alpha=.25)) +
geom_point() +
labs(title="Test Score -- Ethnicity")
}
else if(input$ds == "Ethnicity - Asian"){
ggplot(dat, aes(x=Asian, y=MAT_mean_score, color=("red"), fill=FALSE, alpha=.25)) +
geom_point() +
labs(title="Test Score -- Ethnicity") +
theme(legend.position="none")
ggplot(dat, aes(x=Asian, y=MAT_mean_score, color=(Poverty<.5), fill=FALSE, alpha=.25)) +
geom_point() +
labs(title="Test Score -- Ethnicity")
}
else if(input$ds == "Ethnicity - White"){
ggplot(dat, aes(x=White, y=MAT_mean_score, color=("red"), fill=FALSE, alpha=.25)) +
geom_point() +
labs(title="Test Score -- Poverty") +
theme(legend.position="none")
ggplot(dat, aes(x=White, y=MAT_mean_score, color=(Poverty<.5), fill=FALSE, alpha=.25)) +
geom_point() +
labs(title="Test Score -- Ethnicity")
}
else if(input$ds == "Ethnicity - African American"){
ggplot(dat, aes(x=Black, y=MAT_mean_score, color=("red"), fill=FALSE, alpha=.25)) +
geom_point() +
labs(title="Test Score -- Ethnicity") +
theme(legend.position="none")
ggplot(dat, aes(x=Black, y=MAT_mean_score, color=(Poverty<.5), fill=FALSE, alpha=.25)) +
geom_point() +
labs(title="Test Score -- Ethnicity")
}
})
output$plot = renderPlot({plot(p2(), data = dat)})
output$lm = renderPrint({linReg()})
}
# Run the application
shinyApp(ui = ui, server = server)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geom-quantile.r
\name{geom_quantile}
\alias{geom_quantile}
\title{Add quantile lines from a quantile regression.}
\usage{
geom_quantile(mapping = NULL, data = NULL, stat = "quantile",
position = "identity", lineend = "butt", linejoin = "round",
linemitre = 1, na.rm = FALSE, ...)
}
\arguments{
\item{mapping}{The aesthetic mapping, usually constructed with
\code{\link{aes}} or \code{\link{aes_string}}. Only needs to be set
at the layer level if you are overriding the plot defaults.}
\item{data}{A layer specific dataset - only needed if you want to override
the plot defaults.}
\item{stat}{The statistical transformation to use on the data for this
layer.}
\item{position}{The position adjustment to use for overlapping points
on this layer}
\item{lineend}{Line end style (round, butt, square)}
\item{linejoin}{Line join style (round, mitre, bevel)}
\item{linemitre}{Line mitre limit (number greater than 1)}
\item{na.rm}{If \code{FALSE} (the default), removes missing values with
a warning. If \code{TRUE} silently removes missing values.}
\item{...}{other arguments passed on to \code{\link{layer}}. This can
include aesthetics whose values you want to set, not map. See
\code{\link{layer}} for more details.}
}
\description{
This can be used as a continuous analogue of a geom_boxplot.
}
\examples{
# See stat_quantile for examples
}
\seealso{
See \code{\link{stat_quantile}} for examples.
}
| /man/geom_quantile.Rd | no_license | cattapre/ggplot2.SparkR | R | false | true | 1,490 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geom-quantile.r
\name{geom_quantile}
\alias{geom_quantile}
\title{Add quantile lines from a quantile regression.}
\usage{
geom_quantile(mapping = NULL, data = NULL, stat = "quantile",
position = "identity", lineend = "butt", linejoin = "round",
linemitre = 1, na.rm = FALSE, ...)
}
\arguments{
\item{mapping}{The aesthetic mapping, usually constructed with
\code{\link{aes}} or \code{\link{aes_string}}. Only needs to be set
at the layer level if you are overriding the plot defaults.}
\item{data}{A layer specific dataset - only needed if you want to override
the plot defaults.}
\item{stat}{The statistical transformation to use on the data for this
layer.}
\item{position}{The position adjustment to use for overlapping points
on this layer}
\item{lineend}{Line end style (round, butt, square)}
\item{linejoin}{Line join style (round, mitre, bevel)}
\item{linemitre}{Line mitre limit (number greater than 1)}
\item{na.rm}{If \code{FALSE} (the default), removes missing values with
a warning. If \code{TRUE} silently removes missing values.}
\item{...}{other arguments passed on to \code{\link{layer}}. This can
include aesthetics whose values you want to set, not map. See
\code{\link{layer}} for more details.}
}
\description{
This can be used as a continuous analogue of a geom_boxplot.
}
\examples{
# See stat_quantile for examples
}
\seealso{
See \code{\link{stat_quantile}} for examples.
}
|
## Box's M-test for testing homogeneity of covariance matrices
##
## Written by Andy Liaw (2004) converted from Matlab
## Andy's note indicates that he has left the original Matlab comments intact
##
##
## Slight clean-up and fix with corrected documentation provided by Ranjan Maitra (2016)
##
BoxMTest <- function(X, cl, alpha=0.05) {
## Multivariate Statistical Testing for the Homogeneity of Covariance
## Matrices by the Box's M.
##
## Syntax: function [MBox] = BoxMTest(X,alpha)
##
## Inputs:
## X - data matrix (Size of matrix must be n-by-p; # RM changed
## variables=column 1:p).
## alpha - significance level (default = 0.05).
## Output:
## MBox - the Box's M statistic.
## Chi-sqr. or F - the approximation statistic test.
## df's - degrees' of freedom of the approximation statistic test.
## P - observed significance level.
##
## If the groups sample-size is at least 20 (sufficiently large),
## Box's M test takes a Chi-square approximation; otherwise it takes
## an F approximation.
##
## Example: For a two groups (g = 2) with three independent variables
## (p = 3), we are interested in testing the homogeneity of covariances
## matrices with a significance level = 0.05. The two groups have the
## same sample-size n1 = n2 = 5.
## Group
## ---------------------------------------
## 1 2
## ---------------------------------------
## x1 x2 x3 x1 x2 x3
## ---------------------------------------
## 23 45 15 277 230 63
## 40 85 18 153 80 29
## 215 307 60 306 440 105
## 110 110 50 252 350 175
## 65 105 24 143 205 42
## ---------------------------------------
##
##
## Not true for R
##
##
## Total data matrix must be:
## X=[1 23 45 15;1 40 85 18;1 215 307 60;1 110 110 50;1 65 105 24;
## 2 277 230 63;2 153 80 29;2 306 440 105;2 252 350 175;2 143 205 42];
##
##
## Calling on Matlab the function:
## MBoxtest(X,0.05)
##
## Answer is:
##
## ------------------------------------------------------------
## MBox F df1 df2 P
## ------------------------------------------------------------
## 27.1622 2.6293 6 463 0.0162
## ------------------------------------------------------------
## Covariance matrices are significantly different.
##
## Created by A. Trujillo-Ortiz and R. Hernandez-Walls
## Facultad de Ciencias Marinas
## Universidad Autonoma de Baja California
## Apdo. Postal 453
## Ensenada, Baja California
## Mexico.
## atrujo_at_uabc.mx
## And the special collaboration of the post-graduate students of the 2002:2
## Multivariate Statistics Course: Karel Castro-Morales,
## Alejandro Espinoza-Tenorio, Andrea Guia-Ramirez, Raquel Muniz-Salazar,
## Jose Luis Sanchez-Osorio and Roberto Carmona-Pina.
## November 2002.
##
## To cite this file, this would be an appropriate format:
## Trujillo-Ortiz, A., R. Hernandez-Walls, K. Castro-Morales,
## A. Espinoza-Tenorio, A. Guia-Ramirez and R. Carmona-Pina. (2002).
## MBoxtest: Multivariate Statistical Testing for the Homogeneity of
## Covariance Matrices by the Box's M. A MATLAB file. [WWW document].
## URL http://www.mathworks.com/matlabcentral/fileexchange/loadFile.do?objectId=2733&objectType=FILE
##
## References:
##
## Stevens, J. (1992), Applied Multivariate Statistics for Social Sciences.
## 2nd. ed., New-Jersey:Lawrance Erlbaum Associates Publishers. pp. 260-269.
cl <- droplevels(cl) # RM (2016): important step that drops unused factor levels in R
if (alpha <= 0 || alpha >= 1)
stop('significance level must be between 0 and 1')
g = nlevels(cl) ## Number of groups.
n = table(cl) ## Vector of groups-size.
N = nrow(X)
p = ncol(X)
bandera = 2
if (any(n >= 20))
bandera = 1
## Partition of the group covariance matrices.
print(g)
covList <- tapply(as.matrix(X), rep(cl, ncol(X)), function(x, nc) cov(matrix(x, nc = nc)),
ncol(X))
deno = sum(n) - g
suma = array(0, dim=dim(covList[[1]]))
for (k in 1:g)
suma = suma + (n[k] - 1) * covList[[k]]
Sp = suma / deno ## Pooled covariance matrix.
Falta=0
for (k in 1:g)
Falta = Falta + ((n[k] - 1) * log(det(covList[[k]])))
MB = (sum(n) - g) * log(det(Sp)) - Falta ## Box's M statistic.
suma1 = sum(1 / (n[1:g] - 1))
suma2 = sum(1 / ((n[1:g] - 1)^2))
C = (((2 * p^2) + (3 * p) - 1) / (6 * (p + 1) * (g - 1))) *
(suma1 - (1 / deno)) ## Computing of correction factor.
if (bandera == 1)
{
X2 = MB * (1 - C) ## Chi-square approximation.
v = as.integer((p * (p + 1) * (g - 1)) / 2) ## Degrees of freedom.
## Significance value associated to the observed Chi-square statistic.
P = pchisq(X2, v, lower=FALSE) #RM: corrected to be the upper tail
cat('------------------------------------------------\n');
cat(' MBox Chi-sqr. df P\n')
cat('------------------------------------------------\n')
cat(sprintf("%10.4f%11.4f%12.i%13.4f\n", MB, X2, v, P))
cat('------------------------------------------------\n')
if (P >= alpha) {
cat('Covariance matrices are not significantly different.\n')
} else {
cat('Covariance matrices are significantly different.\n')
}
return(list(MBox=MB, ChiSq=X2, df=v, pValue=P))
}
else
{
## To obtain the F approximation we first define Co, which combined to
## the before C value are used to estimate the denominator degrees of
## freedom (v2); resulting two possible cases.
Co = (((p-1) * (p+2)) / (6 * (g-1))) * (suma2 - (1 / (deno^2)))
if (Co - (C^2) >= 0) {
v1 = as.integer((p * (p + 1) * (g - 1)) / 2) ## Numerator DF.
v21 = as.integer(trunc((v1 + 2) / (Co - (C^2)))) ## Denominator DF.
F1 = MB * ((1 - C - (v1 / v21)) / v1) ## F approximation.
## Significance value associated to the observed F statistic.
P1 = pf(F1, v1, v21, lower=FALSE)
cat('\n------------------------------------------------------------\n')
cat(' MBox F df1 df2 P\n')
cat('------------------------------------------------------------\n')
cat(sprintf("%10.4f%11.4f%11.i%14.i%13.4f\n", MB, F1, v1, v21, P1))
cat('------------------------------------------------------------\n')
if (P1 >= alpha) {
cat('Covariance matrices are not significantly different.\n')
} else {
cat('Covariance matrices are significantly different.\n')
}
return(list(MBox=MB, F=F1, df1=v1, df2=v21, pValue=P1))
} else {
v1 = as.integer((p * (p + 1) * (g - 1)) / 2) ## Numerator df.
v22 = as.integer(trunc((v1 + 2) / ((C^2) - Co))) ## Denominator df.
b = v22 / (1 - C - (2 / v22))
F2 = (v22 * MB) / (v1 * (b - MB)) ## F approximation.
## Significance value associated to the observed F statistic.
P2 = pf(F2, v1, v22, lower=FALSE)
cat('\n------------------------------------------------------------\n')
cat(' MBox F df1 df2 P\n')
cat('------------------------------------------------------------\n')
cat(sprintf('%10.4f%11.4f%11.i%14.i%13.4f\n', MB, F2, v1, v22, P2))
cat('------------------------------------------------------------\n')
if (P2 >= alpha) {
cat('Covariance matrices are not significantly different.\n')
} else {
cat('Covariance matrices are significantly different.\n')
}
return(list(MBox=MB, F=F2, df1=v1, df2=v22, pValue=P2))
}
}
}
| /STAT501/STAT501exam1YifanZhu/BoxMTest-2.R | no_license | fanne-stat/Homework | R | false | false | 8,374 | r | ## Box's M-test for testing homogeneity of covariance matrices
##
## Written by Andy Liaw (2004) converted from Matlab
## Andy's note indicates that he has left the original Matlab comments intact
##
##
## Slight clean-up and fix with corrected documentation provided by Ranjan Maitra (2016)
##
BoxMTest <- function(X, cl, alpha=0.05) {
## Multivariate Statistical Testing for the Homogeneity of Covariance
## Matrices by the Box's M.
##
## Syntax: function [MBox] = BoxMTest(X,alpha)
##
## Inputs:
## X - data matrix (Size of matrix must be n-by-p; # RM changed
## variables=column 1:p).
## alpha - significance level (default = 0.05).
## Output:
## MBox - the Box's M statistic.
## Chi-sqr. or F - the approximation statistic test.
## df's - degrees' of freedom of the approximation statistic test.
## P - observed significance level.
##
## If the groups sample-size is at least 20 (sufficiently large),
## Box's M test takes a Chi-square approximation; otherwise it takes
## an F approximation.
##
## Example: For a two groups (g = 2) with three independent variables
## (p = 3), we are interested in testing the homogeneity of covariances
## matrices with a significance level = 0.05. The two groups have the
## same sample-size n1 = n2 = 5.
## Group
## ---------------------------------------
## 1 2
## ---------------------------------------
## x1 x2 x3 x1 x2 x3
## ---------------------------------------
## 23 45 15 277 230 63
## 40 85 18 153 80 29
## 215 307 60 306 440 105
## 110 110 50 252 350 175
## 65 105 24 143 205 42
## ---------------------------------------
##
##
## Not true for R
##
##
## Total data matrix must be:
## X=[1 23 45 15;1 40 85 18;1 215 307 60;1 110 110 50;1 65 105 24;
## 2 277 230 63;2 153 80 29;2 306 440 105;2 252 350 175;2 143 205 42];
##
##
## Calling on Matlab the function:
## MBoxtest(X,0.05)
##
## Answer is:
##
## ------------------------------------------------------------
## MBox F df1 df2 P
## ------------------------------------------------------------
## 27.1622 2.6293 6 463 0.0162
## ------------------------------------------------------------
## Covariance matrices are significantly different.
##
## Created by A. Trujillo-Ortiz and R. Hernandez-Walls
## Facultad de Ciencias Marinas
## Universidad Autonoma de Baja California
## Apdo. Postal 453
## Ensenada, Baja California
## Mexico.
## atrujo_at_uabc.mx
## And the special collaboration of the post-graduate students of the 2002:2
## Multivariate Statistics Course: Karel Castro-Morales,
## Alejandro Espinoza-Tenorio, Andrea Guia-Ramirez, Raquel Muniz-Salazar,
## Jose Luis Sanchez-Osorio and Roberto Carmona-Pina.
## November 2002.
##
## To cite this file, this would be an appropriate format:
## Trujillo-Ortiz, A., R. Hernandez-Walls, K. Castro-Morales,
## A. Espinoza-Tenorio, A. Guia-Ramirez and R. Carmona-Pina. (2002).
## MBoxtest: Multivariate Statistical Testing for the Homogeneity of
## Covariance Matrices by the Box's M. A MATLAB file. [WWW document].
## URL http://www.mathworks.com/matlabcentral/fileexchange/loadFile.do?objectId=2733&objectType=FILE
##
## References:
##
## Stevens, J. (1992), Applied Multivariate Statistics for Social Sciences.
## 2nd. ed., New-Jersey:Lawrance Erlbaum Associates Publishers. pp. 260-269.
cl <- droplevels(cl) # RM (2016): important step that drops unused factor levels in R
if (alpha <= 0 || alpha >= 1)
stop('significance level must be between 0 and 1')
g = nlevels(cl) ## Number of groups.
n = table(cl) ## Vector of groups-size.
N = nrow(X)
p = ncol(X)
bandera = 2
if (any(n >= 20))
bandera = 1
## Partition of the group covariance matrices.
print(g)
covList <- tapply(as.matrix(X), rep(cl, ncol(X)), function(x, nc) cov(matrix(x, nc = nc)),
ncol(X))
deno = sum(n) - g
suma = array(0, dim=dim(covList[[1]]))
for (k in 1:g)
suma = suma + (n[k] - 1) * covList[[k]]
Sp = suma / deno ## Pooled covariance matrix.
Falta=0
for (k in 1:g)
Falta = Falta + ((n[k] - 1) * log(det(covList[[k]])))
MB = (sum(n) - g) * log(det(Sp)) - Falta ## Box's M statistic.
suma1 = sum(1 / (n[1:g] - 1))
suma2 = sum(1 / ((n[1:g] - 1)^2))
C = (((2 * p^2) + (3 * p) - 1) / (6 * (p + 1) * (g - 1))) *
(suma1 - (1 / deno)) ## Computing of correction factor.
if (bandera == 1)
{
X2 = MB * (1 - C) ## Chi-square approximation.
v = as.integer((p * (p + 1) * (g - 1)) / 2) ## Degrees of freedom.
## Significance value associated to the observed Chi-square statistic.
P = pchisq(X2, v, lower=FALSE) #RM: corrected to be the upper tail
cat('------------------------------------------------\n');
cat(' MBox Chi-sqr. df P\n')
cat('------------------------------------------------\n')
cat(sprintf("%10.4f%11.4f%12.i%13.4f\n", MB, X2, v, P))
cat('------------------------------------------------\n')
if (P >= alpha) {
cat('Covariance matrices are not significantly different.\n')
} else {
cat('Covariance matrices are significantly different.\n')
}
return(list(MBox=MB, ChiSq=X2, df=v, pValue=P))
}
else
{
## To obtain the F approximation we first define Co, which combined to
## the before C value are used to estimate the denominator degrees of
## freedom (v2); resulting two possible cases.
Co = (((p-1) * (p+2)) / (6 * (g-1))) * (suma2 - (1 / (deno^2)))
if (Co - (C^2) >= 0) {
v1 = as.integer((p * (p + 1) * (g - 1)) / 2) ## Numerator DF.
v21 = as.integer(trunc((v1 + 2) / (Co - (C^2)))) ## Denominator DF.
F1 = MB * ((1 - C - (v1 / v21)) / v1) ## F approximation.
## Significance value associated to the observed F statistic.
P1 = pf(F1, v1, v21, lower=FALSE)
cat('\n------------------------------------------------------------\n')
cat(' MBox F df1 df2 P\n')
cat('------------------------------------------------------------\n')
cat(sprintf("%10.4f%11.4f%11.i%14.i%13.4f\n", MB, F1, v1, v21, P1))
cat('------------------------------------------------------------\n')
if (P1 >= alpha) {
cat('Covariance matrices are not significantly different.\n')
} else {
cat('Covariance matrices are significantly different.\n')
}
return(list(MBox=MB, F=F1, df1=v1, df2=v21, pValue=P1))
} else {
v1 = as.integer((p * (p + 1) * (g - 1)) / 2) ## Numerator df.
v22 = as.integer(trunc((v1 + 2) / ((C^2) - Co))) ## Denominator df.
b = v22 / (1 - C - (2 / v22))
F2 = (v22 * MB) / (v1 * (b - MB)) ## F approximation.
## Significance value associated to the observed F statistic.
P2 = pf(F2, v1, v22, lower=FALSE)
cat('\n------------------------------------------------------------\n')
cat(' MBox F df1 df2 P\n')
cat('------------------------------------------------------------\n')
cat(sprintf('%10.4f%11.4f%11.i%14.i%13.4f\n', MB, F2, v1, v22, P2))
cat('------------------------------------------------------------\n')
if (P2 >= alpha) {
cat('Covariance matrices are not significantly different.\n')
} else {
cat('Covariance matrices are significantly different.\n')
}
return(list(MBox=MB, F=F2, df1=v1, df2=v22, pValue=P2))
}
}
}
|
S<-1
for (time in 1:7) {
S<-S+0.2*S }
cat(S,"\n") | /stemcell.R | no_license | dtonthat2/chem160module13 | R | false | false | 55 | r | S<-1
for (time in 1:7) {
S<-S+0.2*S }
cat(S,"\n") |
# This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common new_handlers new_service set_config
NULL
#' AWS Budgets
#'
#' @description
#' The AWS Budgets API enables you to use AWS Budgets to plan your service
#' usage, service costs, and instance reservations. The API reference
#' provides descriptions, syntax, and usage examples for each of the
#' actions and data types for AWS Budgets.
#'
#' Budgets provide you with a way to see the following information:
#'
#' - How close your plan is to your budgeted amount or to the free tier
#' limits
#'
#' - Your usage-to-date, including how much you've used of your Reserved
#' Instances (RIs)
#'
#' - Your current estimated charges from AWS, and how much your predicted
#' usage will accrue in charges by the end of the month
#'
#' - How much of your budget has been used
#'
#' AWS updates your budget status several times a day. Budgets track your
#' unblended costs, subscriptions, refunds, and RIs. You can create the
#' following types of budgets:
#'
#' - **Cost budgets** - Plan how much you want to spend on a service.
#'
#' - **Usage budgets** - Plan how much you want to use one or more
#' services.
#'
#' - **RI utilization budgets** - Define a utilization threshold, and
#' receive alerts when your RI usage falls below that threshold. This
#' lets you see if your RIs are unused or under-utilized.
#'
#' - **RI coverage budgets** - Define a coverage threshold, and receive
#' alerts when the number of your instance hours that are covered by
#' RIs fall below that threshold. This lets you see how much of your
#' instance usage is covered by a reservation.
#'
#' Service Endpoint
#'
#' The AWS Budgets API provides the following endpoint:
#'
#' - https://budgets.amazonaws.com
#'
#' For information about costs that are associated with the AWS Budgets
#' API, see [AWS Cost Management
#' Pricing](https://aws.amazon.com/aws-cost-management/pricing/).
#'
#' @param
#' config
#' Optional configuration of credentials, endpoint, and/or region.
#'
#' @section Service syntax:
#' ```
#' svc <- budgets(
#' config = list(
#' credentials = list(
#' creds = list(
#' access_key_id = "string",
#' secret_access_key = "string",
#' session_token = "string"
#' ),
#' profile = "string"
#' ),
#' endpoint = "string",
#' region = "string"
#' )
#' )
#' ```
#'
#' @examples
#' \dontrun{
#' svc <- budgets()
#' svc$create_budget(
#' Foo = 123
#' )
#' }
#'
#' @section Operations:
#' \tabular{ll}{
#' \link[=budgets_create_budget]{create_budget} \tab Creates a budget and, if included, notifications and subscribers\cr
#' \link[=budgets_create_budget_action]{create_budget_action} \tab Creates a budget action\cr
#' \link[=budgets_create_notification]{create_notification} \tab Creates a notification\cr
#' \link[=budgets_create_subscriber]{create_subscriber} \tab Creates a subscriber\cr
#' \link[=budgets_delete_budget]{delete_budget} \tab Deletes a budget\cr
#' \link[=budgets_delete_budget_action]{delete_budget_action} \tab Deletes a budget action\cr
#' \link[=budgets_delete_notification]{delete_notification} \tab Deletes a notification\cr
#' \link[=budgets_delete_subscriber]{delete_subscriber} \tab Deletes a subscriber\cr
#' \link[=budgets_describe_budget]{describe_budget} \tab Describes a budget\cr
#' \link[=budgets_describe_budget_action]{describe_budget_action} \tab Describes a budget action detail\cr
#' \link[=budgets_describe_budget_action_histories]{describe_budget_action_histories} \tab Describes a budget action history detail\cr
#' \link[=budgets_describe_budget_actions_for_account]{describe_budget_actions_for_account} \tab Describes all of the budget actions for an account\cr
#' \link[=budgets_describe_budget_actions_for_budget]{describe_budget_actions_for_budget} \tab Describes all of the budget actions for a budget\cr
#' \link[=budgets_describe_budget_performance_history]{describe_budget_performance_history} \tab Describes the history for DAILY, MONTHLY, and QUARTERLY budgets\cr
#' \link[=budgets_describe_budgets]{describe_budgets} \tab Lists the budgets that are associated with an account\cr
#' \link[=budgets_describe_notifications_for_budget]{describe_notifications_for_budget} \tab Lists the notifications that are associated with a budget\cr
#' \link[=budgets_describe_subscribers_for_notification]{describe_subscribers_for_notification} \tab Lists the subscribers that are associated with a notification\cr
#' \link[=budgets_execute_budget_action]{execute_budget_action} \tab Executes a budget action\cr
#' \link[=budgets_update_budget]{update_budget} \tab Updates a budget\cr
#' \link[=budgets_update_budget_action]{update_budget_action} \tab Updates a budget action\cr
#' \link[=budgets_update_notification]{update_notification} \tab Updates a notification\cr
#' \link[=budgets_update_subscriber]{update_subscriber} \tab Updates a subscriber
#' }
#'
#' @return
#' A client for the service. You can call the service's operations using
#' syntax like `svc$operation(...)`, where `svc` is the name you've assigned
#' to the client. The available operations are listed in the
#' Operations section.
#'
#' @rdname budgets
#' @export
budgets <- function(config = list()) {
svc <- .budgets$operations
svc <- set_config(svc, config)
return(svc)
}
# Private API objects: metadata, handlers, interfaces, etc.
.budgets <- list()
.budgets$operations <- list()
.budgets$metadata <- list(
service_name = "budgets",
endpoints = list("*" = list(endpoint = "https://budgets.amazonaws.com", global = TRUE), "cn-*" = list(endpoint = "budgets.{region}.amazonaws.com.cn", global = FALSE), "us-iso-*" = list(endpoint = "budgets.{region}.c2s.ic.gov", global = FALSE), "us-isob-*" = list(endpoint = "budgets.{region}.sc2s.sgov.gov", global = FALSE)),
service_id = "Budgets",
api_version = "2016-10-20",
signing_name = "budgets",
json_version = "1.1",
target_prefix = "AWSBudgetServiceGateway"
)
.budgets$service <- function(config = list()) {
handlers <- new_handlers("jsonrpc", "v4")
new_service(.budgets$metadata, handlers, config)
}
| /paws/R/budgets_service.R | permissive | williazo/paws | R | false | false | 6,216 | r | # This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common new_handlers new_service set_config
NULL
#' AWS Budgets
#'
#' @description
#' The AWS Budgets API enables you to use AWS Budgets to plan your service
#' usage, service costs, and instance reservations. The API reference
#' provides descriptions, syntax, and usage examples for each of the
#' actions and data types for AWS Budgets.
#'
#' Budgets provide you with a way to see the following information:
#'
#' - How close your plan is to your budgeted amount or to the free tier
#' limits
#'
#' - Your usage-to-date, including how much you've used of your Reserved
#' Instances (RIs)
#'
#' - Your current estimated charges from AWS, and how much your predicted
#' usage will accrue in charges by the end of the month
#'
#' - How much of your budget has been used
#'
#' AWS updates your budget status several times a day. Budgets track your
#' unblended costs, subscriptions, refunds, and RIs. You can create the
#' following types of budgets:
#'
#' - **Cost budgets** - Plan how much you want to spend on a service.
#'
#' - **Usage budgets** - Plan how much you want to use one or more
#' services.
#'
#' - **RI utilization budgets** - Define a utilization threshold, and
#' receive alerts when your RI usage falls below that threshold. This
#' lets you see if your RIs are unused or under-utilized.
#'
#' - **RI coverage budgets** - Define a coverage threshold, and receive
#' alerts when the number of your instance hours that are covered by
#' RIs fall below that threshold. This lets you see how much of your
#' instance usage is covered by a reservation.
#'
#' Service Endpoint
#'
#' The AWS Budgets API provides the following endpoint:
#'
#' - https://budgets.amazonaws.com
#'
#' For information about costs that are associated with the AWS Budgets
#' API, see [AWS Cost Management
#' Pricing](https://aws.amazon.com/aws-cost-management/pricing/).
#'
#' @param
#' config
#' Optional configuration of credentials, endpoint, and/or region.
#'
#' @section Service syntax:
#' ```
#' svc <- budgets(
#' config = list(
#' credentials = list(
#' creds = list(
#' access_key_id = "string",
#' secret_access_key = "string",
#' session_token = "string"
#' ),
#' profile = "string"
#' ),
#' endpoint = "string",
#' region = "string"
#' )
#' )
#' ```
#'
#' @examples
#' \dontrun{
#' svc <- budgets()
#' svc$create_budget(
#' Foo = 123
#' )
#' }
#'
#' @section Operations:
#' \tabular{ll}{
#' \link[=budgets_create_budget]{create_budget} \tab Creates a budget and, if included, notifications and subscribers\cr
#' \link[=budgets_create_budget_action]{create_budget_action} \tab Creates a budget action\cr
#' \link[=budgets_create_notification]{create_notification} \tab Creates a notification\cr
#' \link[=budgets_create_subscriber]{create_subscriber} \tab Creates a subscriber\cr
#' \link[=budgets_delete_budget]{delete_budget} \tab Deletes a budget\cr
#' \link[=budgets_delete_budget_action]{delete_budget_action} \tab Deletes a budget action\cr
#' \link[=budgets_delete_notification]{delete_notification} \tab Deletes a notification\cr
#' \link[=budgets_delete_subscriber]{delete_subscriber} \tab Deletes a subscriber\cr
#' \link[=budgets_describe_budget]{describe_budget} \tab Describes a budget\cr
#' \link[=budgets_describe_budget_action]{describe_budget_action} \tab Describes a budget action detail\cr
#' \link[=budgets_describe_budget_action_histories]{describe_budget_action_histories} \tab Describes a budget action history detail\cr
#' \link[=budgets_describe_budget_actions_for_account]{describe_budget_actions_for_account} \tab Describes all of the budget actions for an account\cr
#' \link[=budgets_describe_budget_actions_for_budget]{describe_budget_actions_for_budget} \tab Describes all of the budget actions for a budget\cr
#' \link[=budgets_describe_budget_performance_history]{describe_budget_performance_history} \tab Describes the history for DAILY, MONTHLY, and QUARTERLY budgets\cr
#' \link[=budgets_describe_budgets]{describe_budgets} \tab Lists the budgets that are associated with an account\cr
#' \link[=budgets_describe_notifications_for_budget]{describe_notifications_for_budget} \tab Lists the notifications that are associated with a budget\cr
#' \link[=budgets_describe_subscribers_for_notification]{describe_subscribers_for_notification} \tab Lists the subscribers that are associated with a notification\cr
#' \link[=budgets_execute_budget_action]{execute_budget_action} \tab Executes a budget action\cr
#' \link[=budgets_update_budget]{update_budget} \tab Updates a budget\cr
#' \link[=budgets_update_budget_action]{update_budget_action} \tab Updates a budget action\cr
#' \link[=budgets_update_notification]{update_notification} \tab Updates a notification\cr
#' \link[=budgets_update_subscriber]{update_subscriber} \tab Updates a subscriber
#' }
#'
#' @return
#' A client for the service. You can call the service's operations using
#' syntax like `svc$operation(...)`, where `svc` is the name you've assigned
#' to the client. The available operations are listed in the
#' Operations section.
#'
#' @rdname budgets
#' @export
budgets <- function(config = list()) {
svc <- .budgets$operations
svc <- set_config(svc, config)
return(svc)
}
# Private API objects: metadata, handlers, interfaces, etc.
.budgets <- list()
.budgets$operations <- list()
.budgets$metadata <- list(
service_name = "budgets",
endpoints = list("*" = list(endpoint = "https://budgets.amazonaws.com", global = TRUE), "cn-*" = list(endpoint = "budgets.{region}.amazonaws.com.cn", global = FALSE), "us-iso-*" = list(endpoint = "budgets.{region}.c2s.ic.gov", global = FALSE), "us-isob-*" = list(endpoint = "budgets.{region}.sc2s.sgov.gov", global = FALSE)),
service_id = "Budgets",
api_version = "2016-10-20",
signing_name = "budgets",
json_version = "1.1",
target_prefix = "AWSBudgetServiceGateway"
)
.budgets$service <- function(config = list()) {
handlers <- new_handlers("jsonrpc", "v4")
new_service(.budgets$metadata, handlers, config)
}
|
# Automatically generated by openapi-generator (https://openapi-generator.tech)
# Please update as you see appropriate
context("Test ObservationVariableSingleResponse")
model.instance <- ObservationVariableSingleResponse$new()
test_that("@context", {
# tests for the property `@context` (array[character])
# The JSON-LD Context is used to provide JSON-LD definitions to each field in a JSON object. By providing an array of context file urls, a BrAPI response object becomes JSON-LD compatible. For more information, see https://w3c.github.io/json-ld-syntax/#the-context
# uncomment below to test the property
#expect_equal(model.instance$`@context`, "EXPECTED_RESULT")
})
test_that("metadata", {
# tests for the property `metadata` (Metadata)
# uncomment below to test the property
#expect_equal(model.instance$`metadata`, "EXPECTED_RESULT")
})
test_that("result", {
# tests for the property `result` (ObservationVariable)
# uncomment below to test the property
#expect_equal(model.instance$`result`, "EXPECTED_RESULT")
})
| /tests/testthat/test_observation_variable_single_response.R | no_license | Breeding-Insight/brapi-r-v2 | R | false | false | 1,059 | r | # Automatically generated by openapi-generator (https://openapi-generator.tech)
# Please update as you see appropriate
context("Test ObservationVariableSingleResponse")
model.instance <- ObservationVariableSingleResponse$new()
test_that("@context", {
# tests for the property `@context` (array[character])
# The JSON-LD Context is used to provide JSON-LD definitions to each field in a JSON object. By providing an array of context file urls, a BrAPI response object becomes JSON-LD compatible. For more information, see https://w3c.github.io/json-ld-syntax/#the-context
# uncomment below to test the property
#expect_equal(model.instance$`@context`, "EXPECTED_RESULT")
})
test_that("metadata", {
# tests for the property `metadata` (Metadata)
# uncomment below to test the property
#expect_equal(model.instance$`metadata`, "EXPECTED_RESULT")
})
test_that("result", {
# tests for the property `result` (ObservationVariable)
# uncomment below to test the property
#expect_equal(model.instance$`result`, "EXPECTED_RESULT")
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.s3_operations.R
\name{delete_bucket_inventory_configuration}
\alias{delete_bucket_inventory_configuration}
\title{Deletes an inventory configuration (identified by the inventory ID) from the bucket}
\usage{
delete_bucket_inventory_configuration(Bucket, Id)
}
\arguments{
\item{Bucket}{[required] The name of the bucket containing the inventory configuration to delete.}
\item{Id}{[required] The ID used to identify the inventory configuration.}
}
\description{
Deletes an inventory configuration (identified by the inventory ID) from the bucket.
}
\section{Accepted Parameters}{
\preformatted{delete_bucket_inventory_configuration(
Bucket = "string",
Id = "string"
)
}
}
| /service/paws.s3/man/delete_bucket_inventory_configuration.Rd | permissive | CR-Mercado/paws | R | false | true | 760 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.s3_operations.R
\name{delete_bucket_inventory_configuration}
\alias{delete_bucket_inventory_configuration}
\title{Deletes an inventory configuration (identified by the inventory ID) from the bucket}
\usage{
delete_bucket_inventory_configuration(Bucket, Id)
}
\arguments{
\item{Bucket}{[required] The name of the bucket containing the inventory configuration to delete.}
\item{Id}{[required] The ID used to identify the inventory configuration.}
}
\description{
Deletes an inventory configuration (identified by the inventory ID) from the bucket.
}
\section{Accepted Parameters}{
\preformatted{delete_bucket_inventory_configuration(
Bucket = "string",
Id = "string"
)
}
}
|
library(dmr.claseval)
library(dmr.dectree)
library(dmr.regeval)
library(dmr.regtree)
library(dmr.stats)
library(dmr.util)
library(rpart)
library(e1071)
data(HouseVotes84, package="mlbench")
data(BostonHousing, package="mlbench")
set.seed(12)
mdata<-HouseVotes84
rhv <- runif(nrow(mdata))
hv.train <- mdata[rhv>=0.33,]
hv.test <- mdata[rhv<0.33,]
## a simple decision tree growing implementation
## with missing value support using fractional instances
grow.dectree.frac <- function(formula, data,
imp=entropy.p, maxprob=0.999, minsplit=2, maxdepth=8)
{
nmn <- function(node) { nodemap[,"node"]==node } # nodemap entries for node node
inn <- function(node)
{ nodemap[nodemap[,"node"]==node,"instance"] } # instances at node node
wgn <- function(node) { nodemap[nodemap[,"node"]==node,"weight"] } # weights at node node
init <- function()
{
clabs <<- factor(levels(data[[class]]),
levels=levels(data[[class]])) # class labels
tree <<- data.frame(node=1, attribute=NA, value=NA, class=NA, count=NA,
`names<-`(rep(list(NA), length(clabs)),
paste("p", clabs, sep=".")))
cprobs <<- (ncol(tree)-length(clabs)+1):ncol(tree) # class probability columns
nodemap <<- cbind(instance=1:nrow(data), node=rep(1, nrow(data)),
weight=rep(1, nrow(data)))
node <<- 1
}
next.node <- function(node)
{
if (any(opn <- tree$node>node))
min(tree$node[opn])
else Inf
}
class.distribution <- function(node)
{
tree[tree$node==node,"count"] <<- sum(wgn(node))
tree[tree$node==node,cprobs] <<- weighted.pdisc(data[inn(node),class], w=wgn(node))
}
class.label <- function(node)
{
tree$class[tree$node==node] <<- which.max(tree[tree$node==node,cprobs])
}
stop.criteria <- function(node)
{
node>=2^maxdepth || tree[tree$node==node,"count"]<minsplit ||
max(tree[tree$node==node,cprobs])>maxprob
}
split.eval <- function(av, sv, cl, w)
{
cond <- if (is.numeric(av)) av<=as.numeric(sv) else av==sv
cond1 <- !is.na(av) & cond # true split outcome
cond0 <- !is.na(av) & !cond # false split outcome
pd1 <- weighted.pdisc(cl[cond1], w=w[cond1])
n1 <- sum(w[cond1])
pd0 <- weighted.pdisc(cl[cond0], w=w[cond0])
n0 <- sum(w[cond0])
pdm <- weighted.pdisc(cl[is.na(av)], w=w[is.na(av)])
nm <- sum(w[is.na(av)])
if (nm>0)
{
p1 <- if (n1+n0>0) n1/(n1+n0) else 0.5
p0 <- 1-p1
pd1 <- (n1*pd1 + p1*nm*pdm)/(n1+p1*nm)
n1 <- n1 + p1*nm
pd0 <- (n0*pd0 + p0*nm*pdm)/(n0+p0*nm)
n0 <- n0 + nm*p0
}
if (n1>0 && n0>0)
weighted.impurity(pd1, n1, pd0, n0, imp)
else
Inf
}
split.select <- function(node)
{
splits <- data.frame()
for (attribute in attributes)
{
uav <- sort(unique(data[inn(node),attribute]))
if (length(uav)>1)
splits <- rbind(splits,
data.frame(attribute=attribute,
value=if (is.numeric(uav))
midbrk(uav)
else as.character(uav),
stringsAsFactors=FALSE))
}
if (nrow(splits)>0)
splits$eval <- sapply(1:nrow(splits),
function(s)
split.eval(data[inn(node),splits$attribute[s]],
splits$value[s],
data[inn(node),class], wgn(node)))
if ((best.eval <- min(splits$eval))<Inf)
tree[tree$node==node,2:3] <<- splits[which.min(splits$eval),1:2]
return(best.eval)
}
split.apply <- function(node)
{
tree <<- rbind(tree,
data.frame(node=(2*node):(2*node+1),
attribute=NA, value=NA, class=NA, count=NA,
`names<-`(rep(list(NA), length(clabs)),
paste("p", clabs, sep="."))))
av <- data[nodemap[,"instance"],tree$attribute[tree$node==node]]
cond <- if (is.numeric(av)) av<=as.numeric(tree$value[tree$node==node])
else av==tree$value[tree$node==node]
cond1 <- !is.na(av) & cond # true split outcome
cond0 <- !is.na(av) & !cond # false split outcome
n1 <- sum(nodemap[nmn(node) & cond1,"weight"])
n0 <- sum(nodemap[nmn(node) & cond0,"weight"])
nm <- sum(nodemap[nmn(node) & is.na(av),"weight"])
nodemap[nmn(node) & cond1,"node"] <<- 2*node
nodemap[nmn(node) & cond0,"node"] <<- 2*node+1
if (nm>0)
{
p1 <- if (n1+n0>0) n1/(n1+n0) else 0.5
p0 <- 1-p1
newnn <- nodemap[nmn(node) & is.na(av),,drop=FALSE]
nodemap[nmn(node) & is.na(av),"weight"] <<-
p1*nodemap[nmn(node) & is.na(av),"weight"]
nodemap[nmn(node) & is.na(av),"node"] <<- 2*node
newnn[,"weight"] <- p0*newnn[,"weight"]
newnn[,"node"] <- 2*node+1
nodemap <<- rbind(nodemap, newnn)
}
}
tree <- cprobs <- nodemap <- node <- NULL
clabs <- cprobs <- NULL
class <- y.var(formula)
attributes <- x.vars(formula, data)
init()
while (is.finite(node))
{
class.distribution(node)
class.label(node)
if (!stop.criteria(node))
if (split.select(node)<Inf)
split.apply(node)
node <- next.node(node)
tree
nodemap
is.finite(node)
}
tree$class <- clabs[tree$class]
`class<-`(tree, "dectree.frac")
}
## convert a dectree.frac object to a data frame
as.data.frame.dectree.frac <- function(x, row.names=NULL, optional=FALSE, ...)
{ as.data.frame(unclass(x), row.names=row.names, optional=optional) }
treef <- grow.dectree.frac(Class~., hv.train)
# data frame conversion
as.data.frame.dectree.frac(treef)
##############
#3-7-2.R
##############
## decision tree prediction
## with missing value support using fractional instances
predict.dectree.frac <- function(tree, data)
{
nmn <- function(node) { nodemap[,"node"]==node } # nodemap entries for node node
descend <- function(node)
{
if (!is.na(tree$attribute[tree$node==node])) # unless reached a leaf
{
av <- data[nodemap[,"instance"],tree$attribute[tree$node==node]]
cond <- if (is.numeric(av)) av<=as.numeric(tree$value[tree$node==node])
else av==tree$value[tree$node==node]
cond1 <- !is.na(av) & cond # true split outcome
cond0 <- !is.na(av) & !cond # false split outcome
nodemap[nmn(node) & cond1, "node"] <<- 2*node
nodemap[nmn(node) & cond0, "node"] <<- 2*node+1
if (sum(nodemap[nmn(node) & is.na(av), "weight"])>0)
{
n1 <- tree$count[tree$node==2*node]
n0 <- tree$count[tree$node==2*node+1]
p1 <- if (n1+n0>0) n1/(n1+n0) else 0.5
p0 <- 1-p1
newnn <- nodemap[nmn(node) & is.na(av),,drop=FALSE]
nodemap[nmn(node) & is.na(av),"weight"] <<-
p1*nodemap[nmn(node) & is.na(av),"weight"]
nodemap[nmn(node) & is.na(av), "node"] <<- 2*node
newnn[,"weight"] <- p0*newnn[,"weight"]
newnn[,"node"] <- 2*node+1
nodemap <<- rbind(nodemap, newnn)
}
descend(2*node)
descend(2*node+1)
}
}
nodemap <- cbind(instance=1:nrow(data), node=rep(1, nrow(data)),
weight=rep(1, nrow(data)))
descend(1)
clabs <- factor(levels(tree$class), levels=levels(tree$class))
votes <- merge(nodemap, as.data.frame(tree)[,c("node", "class",
paste("p", clabs, sep="."))])
cprobs <- (ncol(votes)-length(clabs)+1):ncol(votes)
clabs[by(votes, votes$instance,
function(v) which.max(colSums(v$weight*v[,cprobs])))]
}
# decision tree prediction for the given data with missing attribute values
hv.test.pred<-predict.dectree.frac(treef, hv.test)
err(predict.dectree.frac(treef, hv.train), hv.train$Class)
err(hv.test.pred, hv.test$Class)
| /rstudio/dm_tree_advanced.R | permissive | pwasiewi/dokerz | R | false | false | 8,067 | r | library(dmr.claseval)
library(dmr.dectree)
library(dmr.regeval)
library(dmr.regtree)
library(dmr.stats)
library(dmr.util)
library(rpart)
library(e1071)
data(HouseVotes84, package="mlbench")
data(BostonHousing, package="mlbench")
set.seed(12)
mdata<-HouseVotes84
rhv <- runif(nrow(mdata))
hv.train <- mdata[rhv>=0.33,]
hv.test <- mdata[rhv<0.33,]
## a simple decision tree growing implementation
## with missing value support using fractional instances
grow.dectree.frac <- function(formula, data,
imp=entropy.p, maxprob=0.999, minsplit=2, maxdepth=8)
{
nmn <- function(node) { nodemap[,"node"]==node } # nodemap entries for node node
inn <- function(node)
{ nodemap[nodemap[,"node"]==node,"instance"] } # instances at node node
wgn <- function(node) { nodemap[nodemap[,"node"]==node,"weight"] } # weights at node node
init <- function()
{
clabs <<- factor(levels(data[[class]]),
levels=levels(data[[class]])) # class labels
tree <<- data.frame(node=1, attribute=NA, value=NA, class=NA, count=NA,
`names<-`(rep(list(NA), length(clabs)),
paste("p", clabs, sep=".")))
cprobs <<- (ncol(tree)-length(clabs)+1):ncol(tree) # class probability columns
nodemap <<- cbind(instance=1:nrow(data), node=rep(1, nrow(data)),
weight=rep(1, nrow(data)))
node <<- 1
}
next.node <- function(node)
{
if (any(opn <- tree$node>node))
min(tree$node[opn])
else Inf
}
class.distribution <- function(node)
{
tree[tree$node==node,"count"] <<- sum(wgn(node))
tree[tree$node==node,cprobs] <<- weighted.pdisc(data[inn(node),class], w=wgn(node))
}
class.label <- function(node)
{
tree$class[tree$node==node] <<- which.max(tree[tree$node==node,cprobs])
}
stop.criteria <- function(node)
{
node>=2^maxdepth || tree[tree$node==node,"count"]<minsplit ||
max(tree[tree$node==node,cprobs])>maxprob
}
split.eval <- function(av, sv, cl, w)
{
cond <- if (is.numeric(av)) av<=as.numeric(sv) else av==sv
cond1 <- !is.na(av) & cond # true split outcome
cond0 <- !is.na(av) & !cond # false split outcome
pd1 <- weighted.pdisc(cl[cond1], w=w[cond1])
n1 <- sum(w[cond1])
pd0 <- weighted.pdisc(cl[cond0], w=w[cond0])
n0 <- sum(w[cond0])
pdm <- weighted.pdisc(cl[is.na(av)], w=w[is.na(av)])
nm <- sum(w[is.na(av)])
if (nm>0)
{
p1 <- if (n1+n0>0) n1/(n1+n0) else 0.5
p0 <- 1-p1
pd1 <- (n1*pd1 + p1*nm*pdm)/(n1+p1*nm)
n1 <- n1 + p1*nm
pd0 <- (n0*pd0 + p0*nm*pdm)/(n0+p0*nm)
n0 <- n0 + nm*p0
}
if (n1>0 && n0>0)
weighted.impurity(pd1, n1, pd0, n0, imp)
else
Inf
}
split.select <- function(node)
{
splits <- data.frame()
for (attribute in attributes)
{
uav <- sort(unique(data[inn(node),attribute]))
if (length(uav)>1)
splits <- rbind(splits,
data.frame(attribute=attribute,
value=if (is.numeric(uav))
midbrk(uav)
else as.character(uav),
stringsAsFactors=FALSE))
}
if (nrow(splits)>0)
splits$eval <- sapply(1:nrow(splits),
function(s)
split.eval(data[inn(node),splits$attribute[s]],
splits$value[s],
data[inn(node),class], wgn(node)))
if ((best.eval <- min(splits$eval))<Inf)
tree[tree$node==node,2:3] <<- splits[which.min(splits$eval),1:2]
return(best.eval)
}
split.apply <- function(node)
{
tree <<- rbind(tree,
data.frame(node=(2*node):(2*node+1),
attribute=NA, value=NA, class=NA, count=NA,
`names<-`(rep(list(NA), length(clabs)),
paste("p", clabs, sep="."))))
av <- data[nodemap[,"instance"],tree$attribute[tree$node==node]]
cond <- if (is.numeric(av)) av<=as.numeric(tree$value[tree$node==node])
else av==tree$value[tree$node==node]
cond1 <- !is.na(av) & cond # true split outcome
cond0 <- !is.na(av) & !cond # false split outcome
n1 <- sum(nodemap[nmn(node) & cond1,"weight"])
n0 <- sum(nodemap[nmn(node) & cond0,"weight"])
nm <- sum(nodemap[nmn(node) & is.na(av),"weight"])
nodemap[nmn(node) & cond1,"node"] <<- 2*node
nodemap[nmn(node) & cond0,"node"] <<- 2*node+1
if (nm>0)
{
p1 <- if (n1+n0>0) n1/(n1+n0) else 0.5
p0 <- 1-p1
newnn <- nodemap[nmn(node) & is.na(av),,drop=FALSE]
nodemap[nmn(node) & is.na(av),"weight"] <<-
p1*nodemap[nmn(node) & is.na(av),"weight"]
nodemap[nmn(node) & is.na(av),"node"] <<- 2*node
newnn[,"weight"] <- p0*newnn[,"weight"]
newnn[,"node"] <- 2*node+1
nodemap <<- rbind(nodemap, newnn)
}
}
tree <- cprobs <- nodemap <- node <- NULL
clabs <- cprobs <- NULL
class <- y.var(formula)
attributes <- x.vars(formula, data)
init()
while (is.finite(node))
{
class.distribution(node)
class.label(node)
if (!stop.criteria(node))
if (split.select(node)<Inf)
split.apply(node)
node <- next.node(node)
tree
nodemap
is.finite(node)
}
tree$class <- clabs[tree$class]
`class<-`(tree, "dectree.frac")
}
## convert a dectree.frac object to a data frame
as.data.frame.dectree.frac <- function(x, row.names=NULL, optional=FALSE, ...)
{ as.data.frame(unclass(x), row.names=row.names, optional=optional) }
treef <- grow.dectree.frac(Class~., hv.train)
# data frame conversion
as.data.frame.dectree.frac(treef)
##############
#3-7-2.R
##############
## decision tree prediction
## with missing value support using fractional instances
predict.dectree.frac <- function(tree, data)
{
nmn <- function(node) { nodemap[,"node"]==node } # nodemap entries for node node
descend <- function(node)
{
if (!is.na(tree$attribute[tree$node==node])) # unless reached a leaf
{
av <- data[nodemap[,"instance"],tree$attribute[tree$node==node]]
cond <- if (is.numeric(av)) av<=as.numeric(tree$value[tree$node==node])
else av==tree$value[tree$node==node]
cond1 <- !is.na(av) & cond # true split outcome
cond0 <- !is.na(av) & !cond # false split outcome
nodemap[nmn(node) & cond1, "node"] <<- 2*node
nodemap[nmn(node) & cond0, "node"] <<- 2*node+1
if (sum(nodemap[nmn(node) & is.na(av), "weight"])>0)
{
n1 <- tree$count[tree$node==2*node]
n0 <- tree$count[tree$node==2*node+1]
p1 <- if (n1+n0>0) n1/(n1+n0) else 0.5
p0 <- 1-p1
newnn <- nodemap[nmn(node) & is.na(av),,drop=FALSE]
nodemap[nmn(node) & is.na(av),"weight"] <<-
p1*nodemap[nmn(node) & is.na(av),"weight"]
nodemap[nmn(node) & is.na(av), "node"] <<- 2*node
newnn[,"weight"] <- p0*newnn[,"weight"]
newnn[,"node"] <- 2*node+1
nodemap <<- rbind(nodemap, newnn)
}
descend(2*node)
descend(2*node+1)
}
}
nodemap <- cbind(instance=1:nrow(data), node=rep(1, nrow(data)),
weight=rep(1, nrow(data)))
descend(1)
clabs <- factor(levels(tree$class), levels=levels(tree$class))
votes <- merge(nodemap, as.data.frame(tree)[,c("node", "class",
paste("p", clabs, sep="."))])
cprobs <- (ncol(votes)-length(clabs)+1):ncol(votes)
clabs[by(votes, votes$instance,
function(v) which.max(colSums(v$weight*v[,cprobs])))]
}
# decision tree prediction for the given data with missing attribute values
hv.test.pred<-predict.dectree.frac(treef, hv.test)
err(predict.dectree.frac(treef, hv.train), hv.train$Class)
err(hv.test.pred, hv.test$Class)
|
testlist <- list(hi = 1.34140986504503e+199, lo = 3.0982936601515e+227, mu = 7.35603736200443e+223, sig = 4.87620583420803e-153)
result <- do.call(gjam:::tnormRcpp,testlist)
str(result) | /gjam/inst/testfiles/tnormRcpp/libFuzzer_tnormRcpp/tnormRcpp_valgrind_files/1610044766-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 190 | r | testlist <- list(hi = 1.34140986504503e+199, lo = 3.0982936601515e+227, mu = 7.35603736200443e+223, sig = 4.87620583420803e-153)
result <- do.call(gjam:::tnormRcpp,testlist)
str(result) |
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(plotly)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("Old Faithful Geyser Data"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
selectInput("graficaType",
"tipo de grafica:",
choice=list("all row regions","all fable countries"))
),
# Show a plot of the generated distribution
mainPanel(
plotlyOutput("plot1")
)
)
))
| /ui.R | no_license | jigbadouin/Fableson | R | false | false | 791 | r | #
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(plotly)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("Old Faithful Geyser Data"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
selectInput("graficaType",
"tipo de grafica:",
choice=list("all row regions","all fable countries"))
),
# Show a plot of the generated distribution
mainPanel(
plotlyOutput("plot1")
)
)
))
|
testlist <- list(G = numeric(0), Rn = numeric(0), atmp = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), ra = numeric(0), relh = -1.72131968218895e+83, rs = numeric(0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545953e+139, 2.3317908961407e-93, 2.16562581831091e+161))
result <- do.call(meteor:::ET0_PenmanMonteith,testlist)
str(result) | /meteor/inst/testfiles/ET0_PenmanMonteith/AFL_ET0_PenmanMonteith/ET0_PenmanMonteith_valgrind_files/1615838160-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 826 | r | testlist <- list(G = numeric(0), Rn = numeric(0), atmp = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), ra = numeric(0), relh = -1.72131968218895e+83, rs = numeric(0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545953e+139, 2.3317908961407e-93, 2.16562581831091e+161))
result <- do.call(meteor:::ET0_PenmanMonteith,testlist)
str(result) |
\name{do.test}
\alias{do.test}
\title{
Test Functions and Expressions - for automated testing
}
\description{
Expressions are parsed and evaluated from \code{file}.
Each expression should evaluate to a logical \code{TRUE}. Otherwise,
\code{do.test()} prints the expression and its value.
}
\usage{
do.test(file, verbose=FALSE, strict=FALSE, local=FALSE, check)
}
\arguments{
\item{file}{
a file or connection containing code to test.
}
\item{verbose}{
logical flag. If \code{TRUE}, all expressions are printed, not just
those that fail. Regardless of this flag, the value is also printed
for failures.
}
\item{strict}{
logical flag. If \code{TRUE}, any validity
failures cause an error; that is, you get to debug after the first failed
assertion.
}
\item{local}{
logical flag controlling where the evaluation
takes place: by default (\code{local=FALSE}),
in the environment that called \code{do.test}, typically
the global environment, (objects created remain there after
\code{do.test} is finished).
\code{local=TRUE}, causes \code{do.test} to create and work
in a new environment.
}
\item{check}{
an unevaluated expression. If \code{check}is supplied,
\code{do.test}evaluates
this expression (it should be given via \code{Quote()}) between each
parse and evaluation. (This is for when you need to check some global
information.)
}
}
\details{
A test file typically contains a sequence of expressions to test
different aspects of a function or set of functions, including
testing that each input argument is handled appropriately,
error handling,
the output has the expected structure,
correct output under a number of combinations of inputs,
and error handling (warning and stop invoked when appropriate and
with appropriate messages). Each expression may contain multiple
lines grouped using \code{\{\}},
where early lines may do computations and the last line
checks for expected results, usually using \code{\link{all.equal}}.
Some expressions may be included that aren't intended to test anything
by finishing them with \code{TRUE}, e.g. to read data:
\code{\{read.table("data.txt"); TRUE\}}
or to remove objects at the end of a test file:
\code{\{rm(a, b, x, y); TRUE\}}.
We recommend including comments inside expressions to indicate
the purpose of each test; then if errors occur the comments are
printed too.
To compare just numbers, not names or matrix dimensions, functions
\code{\link{unname}} and \code{\link{drop}} are useful.
To exclude
certain components or attributes from the comparison the function
\code{all.equal.excluding} is useful. This is defined in the examples
below.
Each test should run silently if everything is working correctly;
there should be nothing printed. \code{\link{expectWarnings}} can
be used to intercept \code{\link{warning}} statements.
}
\value{
NULL
}
\seealso{
\code{\link{all.equal}},
\code{\link{allTrue}},
\code{\link{drop}},
\code{\link{expectStop}},
\code{\link{expectWarnings}},
\code{\link{identical}},
\code{\link{Quote}},
\code{\link{unname}}
}
\examples{
\dontrun{
# Create a toy test file, and run it
cat('{all.equal(24/8, 3)}',
'{all.equal(5, 6)}', # this one will fail
'expectWarnings( { # Test subscript replacement ',
' x <- data.frame(a=1:3,b=2:4)',
' x[,3] <- x',
' all.equal(ncol(x), 3)',
'}, expected = "provided 2 variables to replace 1 var")',
'expectStop(lm(5), expected = "invalid formula")',
'{ rm(x) ; TRUE }', # cleanup at end of test
sep="\n", file = "testfile.t")
do.test("testfile.t")
## ------- Test file: testfile.t ---------
## {all.equal(5, 6)}
## [1] "Mean relative difference: 0.2"
#
# The test that fails is printed, with the results of the test.
# In R 2.6.1 the subscript replacement test above also fails
# (bug reported 14 Jan 2008), resulting in the additional printout:
## expectWarnings( {
## x <- data.frame(a=1:3,b=2:4)
## x[,3] <- x
## all.equal(ncol(x), 3)
## }, expected = "provided 2 variables to replace 1 var")
## $`Test result`
## [1] "Mean relative difference: 0.25"
}
# This function is useful in some tests:
all.equal.excluding <- function(x, y, ..., excluding=NULL, attrs=NULL){
# Like all.equal, but exclude components in `excluding',
# and excluding attributes named in `attrs'.
#
# `excluding' and `attrs' should be character, names of components
# and attributes.
#
# For example:
# all.equal.excluding(obj1, obj2, excluding = c("call", "x"))
for(i in intersect(names(x), excluding)) x[[i]] <- NULL
for(i in intersect(names(y), excluding)) y[[i]] <- NULL
for(i in intersect(names(attributes(x)), attrs)) attr(x,i) <- NULL
for(i in intersect(names(attributes(y)), attrs)) attr(y,i) <- NULL
all.equal(x,y, ...)
}
# Test if two objects are the same except for "call" and "x":
data <- data.frame(x = 1:20, y = exp(1:20/20))
fit1 <- lm(y ~ x, data = data, x=TRUE)
fit2 <- update(fit1, x=)
all.equal.excluding(fit1, fit2, excluding = c("call", "x"))
}
\keyword{utilities}
% docclass is function
| /man/do.test.Rd | no_license | xdwang1991/splus2r | R | false | false | 5,120 | rd | \name{do.test}
\alias{do.test}
\title{
Test Functions and Expressions - for automated testing
}
\description{
Expressions are parsed and evaluated from \code{file}.
Each expression should evaluate to a logical \code{TRUE}. Otherwise,
\code{do.test()} prints the expression and its value.
}
\usage{
do.test(file, verbose=FALSE, strict=FALSE, local=FALSE, check)
}
\arguments{
\item{file}{
a file or connection containing code to test.
}
\item{verbose}{
logical flag. If \code{TRUE}, all expressions are printed, not just
those that fail. Regardless of this flag, the value is also printed
for failures.
}
\item{strict}{
logical flag. If \code{TRUE}, any validity
failures cause an error; that is, you get to debug after the first failed
assertion.
}
\item{local}{
logical flag controlling where the evaluation
takes place: by default (\code{local=FALSE}),
in the environment that called \code{do.test}, typically
the global environment, (objects created remain there after
\code{do.test} is finished).
\code{local=TRUE}, causes \code{do.test} to create and work
in a new environment.
}
\item{check}{
an unevaluated expression. If \code{check}is supplied,
\code{do.test}evaluates
this expression (it should be given via \code{Quote()}) between each
parse and evaluation. (This is for when you need to check some global
information.)
}
}
\details{
A test file typically contains a sequence of expressions to test
different aspects of a function or set of functions, including
testing that each input argument is handled appropriately,
error handling,
the output has the expected structure,
correct output under a number of combinations of inputs,
and error handling (warning and stop invoked when appropriate and
with appropriate messages). Each expression may contain multiple
lines grouped using \code{\{\}},
where early lines may do computations and the last line
checks for expected results, usually using \code{\link{all.equal}}.
Some expressions may be included that aren't intended to test anything
by finishing them with \code{TRUE}, e.g. to read data:
\code{\{read.table("data.txt"); TRUE\}}
or to remove objects at the end of a test file:
\code{\{rm(a, b, x, y); TRUE\}}.
We recommend including comments inside expressions to indicate
the purpose of each test; then if errors occur the comments are
printed too.
To compare just numbers, not names or matrix dimensions, functions
\code{\link{unname}} and \code{\link{drop}} are useful.
To exclude
certain components or attributes from the comparison the function
\code{all.equal.excluding} is useful. This is defined in the examples
below.
Each test should run silently if everything is working correctly;
there should be nothing printed. \code{\link{expectWarnings}} can
be used to intercept \code{\link{warning}} statements.
}
\value{
NULL
}
\seealso{
\code{\link{all.equal}},
\code{\link{allTrue}},
\code{\link{drop}},
\code{\link{expectStop}},
\code{\link{expectWarnings}},
\code{\link{identical}},
\code{\link{Quote}},
\code{\link{unname}}
}
\examples{
\dontrun{
# Create a toy test file, and run it
cat('{all.equal(24/8, 3)}',
'{all.equal(5, 6)}', # this one will fail
'expectWarnings( { # Test subscript replacement ',
' x <- data.frame(a=1:3,b=2:4)',
' x[,3] <- x',
' all.equal(ncol(x), 3)',
'}, expected = "provided 2 variables to replace 1 var")',
'expectStop(lm(5), expected = "invalid formula")',
'{ rm(x) ; TRUE }', # cleanup at end of test
sep="\n", file = "testfile.t")
do.test("testfile.t")
## ------- Test file: testfile.t ---------
## {all.equal(5, 6)}
## [1] "Mean relative difference: 0.2"
#
# The test that fails is printed, with the results of the test.
# In R 2.6.1 the subscript replacement test above also fails
# (bug reported 14 Jan 2008), resulting in the additional printout:
## expectWarnings( {
## x <- data.frame(a=1:3,b=2:4)
## x[,3] <- x
## all.equal(ncol(x), 3)
## }, expected = "provided 2 variables to replace 1 var")
## $`Test result`
## [1] "Mean relative difference: 0.25"
}
# This function is useful in some tests:
all.equal.excluding <- function(x, y, ..., excluding=NULL, attrs=NULL){
# Like all.equal, but exclude components in `excluding',
# and excluding attributes named in `attrs'.
#
# `excluding' and `attrs' should be character, names of components
# and attributes.
#
# For example:
# all.equal.excluding(obj1, obj2, excluding = c("call", "x"))
for(i in intersect(names(x), excluding)) x[[i]] <- NULL
for(i in intersect(names(y), excluding)) y[[i]] <- NULL
for(i in intersect(names(attributes(x)), attrs)) attr(x,i) <- NULL
for(i in intersect(names(attributes(y)), attrs)) attr(y,i) <- NULL
all.equal(x,y, ...)
}
# Test if two objects are the same except for "call" and "x":
data <- data.frame(x = 1:20, y = exp(1:20/20))
fit1 <- lm(y ~ x, data = data, x=TRUE)
fit2 <- update(fit1, x=)
all.equal.excluding(fit1, fit2, excluding = c("call", "x"))
}
\keyword{utilities}
% docclass is function
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ExFun.R
\name{normality.df}
\alias{normality.df}
\title{Normality test for a data frame.}
\usage{
normality.df(df, output = c("none", "figure", "message", "all"))
}
\arguments{
\item{df}{data.frame to be tested}
\item{output}{if 'none', nothing is displayed, if 'figure', a figure is displayed, if 'message' a message is displayed in the console and if 'all' a message and a figure are displayed.}
}
\value{
A list with the following elements:
\itemize{
\item \code{pvalues} a vector of the p-values obtained with the Shapiro Wilk test for all numerical variables in the data frame \code{df}.
\item \code{results} a text (printed onto the console, if wanted) describing the results of the performed tests.
}
}
\description{
This function performs the Shapiro Wilk test for all the numerical variables of a given data frame. Each test can be accompanied by a graphical presentation (density and qqplot) giving a piece of visual information.
}
\examples{
normality.df(iris)
## install.packages("kohonen")
data(wines, package = "kohonen")
normality.df(wines, "all")
}
| /NormalityTest/man/normality.df.Rd | permissive | Ubey-learning/NormalityTest | R | false | true | 1,148 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ExFun.R
\name{normality.df}
\alias{normality.df}
\title{Normality test for a data frame.}
\usage{
normality.df(df, output = c("none", "figure", "message", "all"))
}
\arguments{
\item{df}{data.frame to be tested}
\item{output}{if 'none', nothing is displayed, if 'figure', a figure is displayed, if 'message' a message is displayed in the console and if 'all' a message and a figure are displayed.}
}
\value{
A list with the following elements:
\itemize{
\item \code{pvalues} a vector of the p-values obtained with the Shapiro Wilk test for all numerical variables in the data frame \code{df}.
\item \code{results} a text (printed onto the console, if wanted) describing the results of the performed tests.
}
}
\description{
This function performs the Shapiro Wilk test for all the numerical variables of a given data frame. Each test can be accompanied by a graphical presentation (density and qqplot) giving a piece of visual information.
}
\examples{
normality.df(iris)
## install.packages("kohonen")
data(wines, package = "kohonen")
normality.df(wines, "all")
}
|
library(testthat)
library(schedulr)
test_check("schedulr")
| /tests/testthat.R | no_license | niranjv/schedulr | R | false | false | 60 | r | library(testthat)
library(schedulr)
test_check("schedulr")
|
waterfall_setwork <- function(data, elements, sets) {
# Prepare data ===============================================================
df <- data %>% dplyr::select(-!!elements)
df <- df %>% dplyr::select(!!!sets)
set_names <- names(df)
set_n <- length(set_names)
names(df) <- as.character(1:set_n)
# Transform data =============================================================
# Add intersections to the matrix
cols <- 1:set_n
combs <- unlist(sapply(cols[-1], function(x) {
asplit(combn(cols, m = x), 2)
}), recursive = FALSE)
# Find all intersections
lapply(combs, function(x) {
df <<- df %>% dplyr::mutate(!!paste0(x, collapse = " ") :=
as.numeric(rowMeans(df[, x]) == 1))
})
# From wide data frame to long (intersection size) data frame
intersect_min <- 1
df_nodes <- df %>%
dplyr::summarize(across(everything(), sum)) %>%
tidyr::pivot_longer(everything()) %>%
dplyr::rename(size = value)
# Keep intersection >= intersect_min (and always keep full sets)
keep_ind <- c(1:set_n,
which(df_nodes$size >= intersect_min)[
which(df_nodes$size >= intersect_min) > set_n])
df_nodes <- df_nodes[keep_ind, ]
# From intersection size data frame to node and edge data frames
tmp_list <- unlist(mapply(combn,
list(1:set_n),
seq_along(1:set_n),
simplify = FALSE),
recursive = FALSE)
tmp_list <- tmp_list[keep_ind]
df_edges <- which(t(sapply(tmp_list, function(x) sapply(tmp_list, function(y) {
all(x %in% y) & length(y) == length(x) + 1
}))), arr.ind = TRUE)
df_edges <- setNames(as.data.frame(df_edges),
c("from", "to"))[order(df_edges[,1]),]
df_edges <- df_edges %>%
dplyr::left_join(df_nodes %>% tibble::rowid_to_column(),
by = c("to" = "rowid"))
# Create graph ===============================================================
graph <- tidygraph::tbl_graph(nodes = df_nodes,
edges = df_edges)
layout <- ggraph::create_layout(graph,
layout = "igraph",
algorithm = "sugiyama")
layout %>% ggraph::ggraph() +
ggplot2::scale_size(range = c(1, 14),
breaks = c(5, 10, 50, 100)) +
ggraph::scale_edge_size(range = c(1, 10)) +
ggraph::geom_edge_link(aes(width = size),
color = "dimgrey") +
ggraph::geom_node_point(aes(size = size),
shape = 21,
color = "dimgrey",
fill = "white") +
ggraph::geom_node_label(aes(label = name),
nudge_y = -.3,
size = 3,
color = "white",
fill = "grey") +
ggplot2::theme_void() +
ggplot2::labs(size = "Set/Intersection Size", edge_width = "Overlap Size") +
ggplot2::theme(legend.position = "bottom",
plot.margin = unit(c(1, 1, 1, 1), "pt")) +
ggplot2::guides(size = guide_legend(title.position = "top"),
edge_width = guide_legend(title.position = "top")) +
ggplot2::scale_x_continuous(expand = expansion(c(.075, .075))) +
ggplot2::scale_y_continuous(expand = expansion(c(.10, .10)))
}
| /shiny/R/waterfall_setwork.R | no_license | aosavi/setworks | R | false | false | 3,439 | r | waterfall_setwork <- function(data, elements, sets) {
# Prepare data ===============================================================
df <- data %>% dplyr::select(-!!elements)
df <- df %>% dplyr::select(!!!sets)
set_names <- names(df)
set_n <- length(set_names)
names(df) <- as.character(1:set_n)
# Transform data =============================================================
# Add intersections to the matrix
cols <- 1:set_n
combs <- unlist(sapply(cols[-1], function(x) {
asplit(combn(cols, m = x), 2)
}), recursive = FALSE)
# Find all intersections
lapply(combs, function(x) {
df <<- df %>% dplyr::mutate(!!paste0(x, collapse = " ") :=
as.numeric(rowMeans(df[, x]) == 1))
})
# From wide data frame to long (intersection size) data frame
intersect_min <- 1
df_nodes <- df %>%
dplyr::summarize(across(everything(), sum)) %>%
tidyr::pivot_longer(everything()) %>%
dplyr::rename(size = value)
# Keep intersection >= intersect_min (and always keep full sets)
keep_ind <- c(1:set_n,
which(df_nodes$size >= intersect_min)[
which(df_nodes$size >= intersect_min) > set_n])
df_nodes <- df_nodes[keep_ind, ]
# From intersection size data frame to node and edge data frames
tmp_list <- unlist(mapply(combn,
list(1:set_n),
seq_along(1:set_n),
simplify = FALSE),
recursive = FALSE)
tmp_list <- tmp_list[keep_ind]
df_edges <- which(t(sapply(tmp_list, function(x) sapply(tmp_list, function(y) {
all(x %in% y) & length(y) == length(x) + 1
}))), arr.ind = TRUE)
df_edges <- setNames(as.data.frame(df_edges),
c("from", "to"))[order(df_edges[,1]),]
df_edges <- df_edges %>%
dplyr::left_join(df_nodes %>% tibble::rowid_to_column(),
by = c("to" = "rowid"))
# Create graph ===============================================================
graph <- tidygraph::tbl_graph(nodes = df_nodes,
edges = df_edges)
layout <- ggraph::create_layout(graph,
layout = "igraph",
algorithm = "sugiyama")
layout %>% ggraph::ggraph() +
ggplot2::scale_size(range = c(1, 14),
breaks = c(5, 10, 50, 100)) +
ggraph::scale_edge_size(range = c(1, 10)) +
ggraph::geom_edge_link(aes(width = size),
color = "dimgrey") +
ggraph::geom_node_point(aes(size = size),
shape = 21,
color = "dimgrey",
fill = "white") +
ggraph::geom_node_label(aes(label = name),
nudge_y = -.3,
size = 3,
color = "white",
fill = "grey") +
ggplot2::theme_void() +
ggplot2::labs(size = "Set/Intersection Size", edge_width = "Overlap Size") +
ggplot2::theme(legend.position = "bottom",
plot.margin = unit(c(1, 1, 1, 1), "pt")) +
ggplot2::guides(size = guide_legend(title.position = "top"),
edge_width = guide_legend(title.position = "top")) +
ggplot2::scale_x_continuous(expand = expansion(c(.075, .075))) +
ggplot2::scale_y_continuous(expand = expansion(c(.10, .10)))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/riskr.R
\docType{data}
\name{predictions}
\alias{predictions}
\title{simulated dataset: target and predictions}
\format{A data frame with 10000 rows and 2 variables}
\usage{
data(predictions)
}
\description{
A dataset containing the prices and other attributes of almost 54,000
diamonds. The variables are as follows:
}
\details{
\itemize{
\item score Numeric variable
\item target. A binary numeric vector
}
}
\keyword{datasets}
| /man/predictions.Rd | permissive | zabbeta/riskr | R | false | true | 515 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/riskr.R
\docType{data}
\name{predictions}
\alias{predictions}
\title{simulated dataset: target and predictions}
\format{A data frame with 10000 rows and 2 variables}
\usage{
data(predictions)
}
\description{
A dataset containing the prices and other attributes of almost 54,000
diamonds. The variables are as follows:
}
\details{
\itemize{
\item score Numeric variable
\item target. A binary numeric vector
}
}
\keyword{datasets}
|
# HUNGARIAN NOUN DECLENSION
# 1. STEMS #
define ASTEM [{ház} | {toll} | {híd} | {úr}];
define NOUN [{nap} | {ember} | {kert} | {hajó} | {gyümölcs}];
define STEM [ASTEM | NOUN];
## 2. VOWELS & CONSONANTS ##
define HIGHVOWEL [ e | é | ö | ő | ü | ű | i | í ];
define LOWVOWEL [ a | á | o | ó | u | ú ];
define HRVOWEL [ ö | ő | ü | ű ]; #high rounded vowels
define HURVOWEL [ e | é | i | í ]; #high unrounded vowels
define VOWEL [HIGHVOWEL | LOWVOWEL];
define CONSONANT [b|c|d|f|g|h|j|k|l|m|n|p|q|r|s|t|v|w|x|y|z];
### 3. NUMBER ###
define NUMBER [%+PL:{<AEÖO>k} | %+SG:0];
#### 4. MORPHOTACTICS #####
regex STEM NUMBER;
##### 5. VOWEL HARMONY #####
regex {<AEÖO>} -> a || ASTEM _;
regex {<AEÖO>} -> e || HURVOWEL CONSONANT+ _;
regex {<AEÖO>} -> ö || HRVOWEL CONSONANT+ _;
regex {<AEÖO>} -> o || LOWVOWEL CONSONANT+ _;
regex {<AEÖO>} -> 0 || VOWEL _;
turn stack
compose net
print lower-words
| /lib/1-1-number.r | no_license | homuhe/hunoun-FST | R | false | false | 970 | r | # HUNGARIAN NOUN DECLENSION
# 1. STEMS #
define ASTEM [{ház} | {toll} | {híd} | {úr}];
define NOUN [{nap} | {ember} | {kert} | {hajó} | {gyümölcs}];
define STEM [ASTEM | NOUN];
## 2. VOWELS & CONSONANTS ##
define HIGHVOWEL [ e | é | ö | ő | ü | ű | i | í ];
define LOWVOWEL [ a | á | o | ó | u | ú ];
define HRVOWEL [ ö | ő | ü | ű ]; #high rounded vowels
define HURVOWEL [ e | é | i | í ]; #high unrounded vowels
define VOWEL [HIGHVOWEL | LOWVOWEL];
define CONSONANT [b|c|d|f|g|h|j|k|l|m|n|p|q|r|s|t|v|w|x|y|z];
### 3. NUMBER ###
define NUMBER [%+PL:{<AEÖO>k} | %+SG:0];
#### 4. MORPHOTACTICS #####
regex STEM NUMBER;
##### 5. VOWEL HARMONY #####
regex {<AEÖO>} -> a || ASTEM _;
regex {<AEÖO>} -> e || HURVOWEL CONSONANT+ _;
regex {<AEÖO>} -> ö || HRVOWEL CONSONANT+ _;
regex {<AEÖO>} -> o || LOWVOWEL CONSONANT+ _;
regex {<AEÖO>} -> 0 || VOWEL _;
turn stack
compose net
print lower-words
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/check.R
\name{gamlassoChecks}
\alias{gamlassoChecks}
\title{Checking data before fitting gamlasso}
\usage{
gamlassoChecks(
data,
response.name,
linear.name,
smooth.name,
family,
linear.penalty,
smooth.penalty,
offset.name,
weights.name,
num.knots,
num.iter,
tolerance,
seed,
prompts
)
}
\arguments{
\item{data}{The training data for fitting the model}
\item{response.name}{The name of the response variable. Vector of two if
\code{family = "binomial"}}
\item{linear.name}{The names of the variables to be used as linear predictors}
\item{smooth.name}{The names of the variables to be used as smoothers}
\item{family}{The family describing the error distribution and link function
to be used in the model. A character string which can only be
\code{"gaussian"} (default), \code{"binomial"}, \code{"poisson"} or
\code{"cox"}. For \code{family = "binomial"}, \code{response} can be
a vector of two and for \code{family="cox"}, \code{weights} must
be provided (see details below).}
\item{linear.penalty}{The penalty used on the linear predictors. Can be 0, 1 or 2}
\item{smooth.penalty}{The penalty used on the smoothers. Can be 1 or 2}
\item{offset.name}{The name of the offset variable. \code{NULL} (default) if not provided}
\item{weights.name}{The name of the weights variable. \code{NULL} (default)
if not provided. See \code{Details} of \code{\link{gamlasso}}.}
\item{num.knots}{Number of knots for each smoothers. Can be a single integer
(recycled for each smoother variable) or a vector of integers the same length
as the number of smoothers.}
\item{num.iter}{Number of iterations for the gamlasso loop}
\item{tolerance}{Tolerance for covergence of the gamlasso loop}
\item{seed}{The random seed can be specified for reproducibility. This is used
for fitting the gam and lasso models, or fixed before each loop of gamlasso.}
\item{prompts}{logical. Should \code{gamlassoChecks} provide interactive
user prompts for corrective action when needed.}
}
\value{
\code{gamlassoChecks} produces a series of logical values:
\code{allcheck} indicating if the arguments passed all the checks,
\code{fit.smoothgam} indicating if there aren't any linear predictors and
a model with only smoothers should be fitted, \code{fit.glmnet}
is the counterpart for smooth predictors. It also returns the cleaned
(if needed) arguments as a list named \code{cleandata} who's elements are:
\tabular{ll}{
\code{train.data} \tab The training data with unnecessary columns deleted \cr
\code{linear.name}, \code{smooth.name}, \code{num.knots} \tab The changed
variable names and number of knots \cr
\code{linear.penalty}, \code{smooth.penalty} \tab The changed penalties for linear and smooth
terms. Reset to their default values only
in the rare case of too few predictors
}
}
\description{
This function checks if the arguments entered for fitting a gamlasso model
are compatible with each other. Not recommended to call directly. Only use
if cleaning data prior to fitting \code{\link{gamlassoFit}}
}
\note{
The arguments \code{offset.name}, \code{num.iter}, \code{tolerance}
and \code{seed} are not currently not being used in testing.
}
\examples{
## Usage similar to gamlassoFit
}
| /man/gamlassoChecks.Rd | no_license | cran/plsmselect | R | false | true | 3,324 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/check.R
\name{gamlassoChecks}
\alias{gamlassoChecks}
\title{Checking data before fitting gamlasso}
\usage{
gamlassoChecks(
data,
response.name,
linear.name,
smooth.name,
family,
linear.penalty,
smooth.penalty,
offset.name,
weights.name,
num.knots,
num.iter,
tolerance,
seed,
prompts
)
}
\arguments{
\item{data}{The training data for fitting the model}
\item{response.name}{The name of the response variable. Vector of two if
\code{family = "binomial"}}
\item{linear.name}{The names of the variables to be used as linear predictors}
\item{smooth.name}{The names of the variables to be used as smoothers}
\item{family}{The family describing the error distribution and link function
to be used in the model. A character string which can only be
\code{"gaussian"} (default), \code{"binomial"}, \code{"poisson"} or
\code{"cox"}. For \code{family = "binomial"}, \code{response} can be
a vector of two and for \code{family="cox"}, \code{weights} must
be provided (see details below).}
\item{linear.penalty}{The penalty used on the linear predictors. Can be 0, 1 or 2}
\item{smooth.penalty}{The penalty used on the smoothers. Can be 1 or 2}
\item{offset.name}{The name of the offset variable. \code{NULL} (default) if not provided}
\item{weights.name}{The name of the weights variable. \code{NULL} (default)
if not provided. See \code{Details} of \code{\link{gamlasso}}.}
\item{num.knots}{Number of knots for each smoothers. Can be a single integer
(recycled for each smoother variable) or a vector of integers the same length
as the number of smoothers.}
\item{num.iter}{Number of iterations for the gamlasso loop}
\item{tolerance}{Tolerance for covergence of the gamlasso loop}
\item{seed}{The random seed can be specified for reproducibility. This is used
for fitting the gam and lasso models, or fixed before each loop of gamlasso.}
\item{prompts}{logical. Should \code{gamlassoChecks} provide interactive
user prompts for corrective action when needed.}
}
\value{
\code{gamlassoChecks} produces a series of logical values:
\code{allcheck} indicating if the arguments passed all the checks,
\code{fit.smoothgam} indicating if there aren't any linear predictors and
a model with only smoothers should be fitted, \code{fit.glmnet}
is the counterpart for smooth predictors. It also returns the cleaned
(if needed) arguments as a list named \code{cleandata} who's elements are:
\tabular{ll}{
\code{train.data} \tab The training data with unnecessary columns deleted \cr
\code{linear.name}, \code{smooth.name}, \code{num.knots} \tab The changed
variable names and number of knots \cr
\code{linear.penalty}, \code{smooth.penalty} \tab The changed penalties for linear and smooth
terms. Reset to their default values only
in the rare case of too few predictors
}
}
\description{
This function checks if the arguments entered for fitting a gamlasso model
are compatible with each other. Not recommended to call directly. Only use
if cleaning data prior to fitting \code{\link{gamlassoFit}}
}
\note{
The arguments \code{offset.name}, \code{num.iter}, \code{tolerance}
and \code{seed} are not currently not being used in testing.
}
\examples{
## Usage similar to gamlassoFit
}
|
/02_study/00_BioInfo/03_Sequence Databases.R | no_license | braveji18/mystudy | R | false | false | 1,772 | r | ||
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/analyze_results.R
\name{plot_beta}
\alias{plot_beta}
\title{Plot the coefficients for each variable for each iteration of MCMC}
\usage{
plot_beta(result, title = "")
}
\arguments{
\item{result}{Output object from mmvbvs function}
\item{title}{A string object for the title of the resulting plot}
}
\value{
ggplot object
}
\description{
Plot the coefficients for each variable for each iteration of MCMC
}
| /MMVBVS/man/plot_beta.Rd | no_license | akhikolla/TestedPackages-NoIssues | R | false | true | 484 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/analyze_results.R
\name{plot_beta}
\alias{plot_beta}
\title{Plot the coefficients for each variable for each iteration of MCMC}
\usage{
plot_beta(result, title = "")
}
\arguments{
\item{result}{Output object from mmvbvs function}
\item{title}{A string object for the title of the resulting plot}
}
\value{
ggplot object
}
\description{
Plot the coefficients for each variable for each iteration of MCMC
}
|
---
title: "Machine learning: 5 components 4 properties after PCA.rmd"
author: "Mikhail Orlov"
output: pdf_document
---
Clearing workspace, setting working directory and loading R packages
```{r}
rm(list=ls())
setwd('/home/mikhail/Documents/Script_2016_all/PCA/')
library(R.matlab)
library(seqinr)
library(factoextra)
library(data.table)
library(caret)
library(doMC)
library(Biostrings)
library(reldna)
```
Reading in E. coli K12 genome (GenBank Accesion U00096.2), creating reverse complement genome and tranformation genome into character form
```{r}
e.coli_U00096.2<-unlist(read.fasta('e.coli_U00096.2.fasta', seqonly = T))
reverseComplement_e.coli_U00096.2<-as.character(reverseComplement(DNAString(e.coli_U00096.2)))
e.coli_U00096.2_char<-unlist(strsplit(e.coli_U00096.2, ''))
reverseComplement_e.coli_U00096.2_char<-unlist(strsplit(reverseComplement_e.coli_U00096.2, ''))
```
Dynamical properties calculation from within R using octave script translated according to Grinevich et al., 2013
Creating function for dynamical properties calculation
```{r}
dynchars<-function(seq, interval_size) {
if (missing(seq))
stop("Need to specify sequence (as a vector of chars)")
if (missing(interval_size))
stop("Need to specify interval size")
if(!is.character(seq))
stop("Sequence must be a character vector containing A, C, G, T letters only")
seq<-toupper(seq)
seq<-c(seq, seq[2:(interval_size)])
a<-3.4*10^(-10)
I<-c(7.6, 4.8, 8.2, 4.1)*10^(-44)
K<-c(227, 155, 220, 149)*10^(-20)
V<-c(2.09, 1.43, 3.12, 2.12)*10^(-20)
tau<-c(127, 99, 140, 84)
csA<-cumsum(seq=='A')
csT<-cumsum(seq=='T')
csG<-cumsum(seq=='G')
csC<-cumsum(seq=='C')
countA = csA[interval_size:length(csA)]-c(0, csA[1:(length(csA)-interval_size)])
countT = csT[interval_size:length(csT)]-c(0, csT[1:(length(csT)-interval_size)])
countG = csG[interval_size:length(csG)]-c(0, csG[1:(length(csG)-interval_size)])
countC = csC[interval_size:length(csC)]-c(0, csC[1:(length(csC)-interval_size)])
M<-cbind(countA, countT, countG, countC)/interval_size
M_comp<-cbind(countT, countA, countC, countG)/interval_size
M_comp<-apply(t(M_comp),1,rev)
Is<-as.numeric(M%*%I)#! numeric conversion
Ks<-as.numeric(M%*%K)
Vs<-as.numeric(M%*%V)
E01<-(8*(Ks*Vs)^0.5)* 6E23 / 4184
d1<-((Ks*a^2)/Vs)^(0.5)/a;
c1<-(Ks*a^2/Is)^0.5
m1<-E01/c1/6.011E-26
taus1<-as.numeric(M%*%tau) #!as.numeric conversion
gc1 = M[,3] + M[,4]
Is<-as.numeric(M_comp%*%I)#! numeric conversion
Ks<-as.numeric(M_comp%*%K)
Vs<-as.numeric(M_comp%*%V)
E02<- 8*(Ks*Vs)^0.5 * 6E23 / 4184;
d2<-((Ks*a^2)/Vs)^(0.5)/a;
c2<-(Ks*a^2/Is)^0.5;
m2<-E02/c2/6.011E-26;
taus2<-as.numeric(M_comp%*%tau)
gc2 = M_comp[,3] + M_comp[,4]
dynchars_return<-list(E01=E01, d1=d1, c1=c1, m1=m1, taus1=taus1, gc1=gc1, E02=E02, d2=d2, c2=c2, m2=m2, taus2=taus2, gc2=gc2)
return(dynchars_return)
}
```
Calculation the properties for a given genome using sliding window 200 nt
```{r}
dynchars_output<-dynchars(e.coli_U00096.2_char, 200)
E01<-dynchars_output$E01
E02<-dynchars_output$E02
d1<-dynchars_output$d1
d2<-dynchars_output$d2
gc200matlab1<-dynchars_output$gc1 #name of the variable is from before
gc200matlab2<-dynchars_output$gc2 #name of the variable is from before
```
Loading data on sequences of different types (promoters, non-promoters, genes, islands, and lowscore) from .Rdata files (must be copied separetely)
```{r}
load('spline_dataset_pro.Rdata')
load('spline_dataset_notpro.Rdata')
load('spline_dataset_gen.Rdata')
load('spline_dataset_isl.Rdata')
load('dataset_lowscore.Rdata')
##dataset_notpro[[1488]]<-NULL # to close to the left flank
```
Check-up for reverse strand
```{r, echo=FALSE}
gregexpr(toupper(dataset_isl$isl2228$seq), reverseComplement_e.coli_U00096.2)
nchar(e.coli_U00096.2)-dataset_isl$isl2228$tss-270
```
Extracting data on all promoters and on experimentaly found ones - including previosely calculated electrostatic potential profiles
```{r}
pro_names<-names(dataset_pro)
tsss<-c()
seqs<-c()
exp_strands<-c()
exp_tsss<-c()
exp_mpots_check<-c() #for experimentally found one
exp_names<-c()
for (i in 1:length(dataset_pro)){
tsss<-c(tsss, dataset_pro[[i]]$tss)
seqs<-c(seqs, dataset_pro[[i]]$seq)
if (dataset_pro[[i]]$evidence=='experimental'){
#exp_proms<-rbind(exp_proms, c(pro_names[i], strands[i], tsss[i], seqs[i]))
exp_names<-c(exp_names, pro_names[i])
exp_mpots_check<-rbind(exp_mpots_check, dataset_pro[[i]]$mpot)
#exp_promoters<-rbind(exp_promoters, c(pro_names[i], strands[i], tsss[i], seqs[i]))
exp_tsss<-c(exp_tsss, dataset_pro[[i]]$tss)
exp_strands<-c(exp_strands, dataset_pro[[i]]$strand)}
}
```
# # # Changing genomic coordinate used for dataset_... - TSS is according to 5'-end for both strands
```{r}
exp_tsss[which(exp_strands=='reverse')]<-nchar(e.coli_U00096.2)-exp_tsss[which(exp_strands=='reverse')]
#exp_tsss<--exp_tsss
```
Creating matrices for data on activation energy ('aeos1') and size ('aeos3'). The matrices is 699*201 - 699 promoters sequences, 201 value for a physical property profile
to 5'-end for both strands
Creating matrices of dynamical properties and GC-content for intervals [-150; 50] nts
```{r}
for (i in c('aeos1forward', 'aeos1reverse', 'aeos3forward', 'aeos3reverse', 'gc200forward', 'gc200reverse', 'mpotsforward', 'mpotsreverse')) {
assign(i, c())
}
zout<- -480:239
for (i in 1:length(exp_tsss)) {
if (exp_strands[i]=='forward') {
aeos1forward<-rbind(aeos1forward, E01[as.numeric(exp_tsss[i]-150):(exp_tsss[i]+50)])
#aeos2forward<-rbind(aeos2forward, matr1[(as.numericexp_tsss[i]-150):(as.numericexp_tsss[i]+50),2])
aeos3forward<-rbind(aeos3forward, d1[(as.numeric(exp_tsss[i])-150):(exp_tsss[i]+50)])
#aeos4forward<-rbind(aeos4forward, matr1[(as.numeric(exp_proms[i,3])-150):(as.numeric(exp_proms[i,3])+50),4])
gc200forward<-rbind(gc200forward, gc200matlab1[(as.numeric(exp_tsss[i])-150):(as.numeric(exp_tsss[i])+50)])
p<-lseqspline1D(substr(e.coli_U00096.2, exp_tsss[i]-250, exp_tsss[i]+150), bound=c(50, 350), ref=251 )
mpotsforward<-rbind(mpotsforward,
##mpot_whole_genome_forward$mpot[mpot_whole_genome_forward$x %in% (exp_tsss[i]-150):(exp_tsss[i]+50)])
p$mpot[p$x %in% zout])
} else {
aeos1reverse<-rbind(aeos1reverse, E02[(as.numeric(exp_tsss[i])-50):(as.numeric(exp_tsss[i])+150)])
#aeos2reverse<-rbind(aeos2reverse, matr1[(as.numeric(exp_tsss[i])-50):(as.numeric(exp_tsss[i])+150),2])
aeos3reverse<-rbind(aeos3reverse, d2[(as.numeric(exp_tsss[i])-50):(as.numeric(exp_tsss[i])+150)])
#aeos4reverse<-rbind(aeos4reverse, matr1[(as.numeric(exp_tsss[i])-50):(as.numeric(exp_tsss[i])+150),4])
gc200reverse<-rbind(gc200reverse, (gc200matlab2[(as.numeric(exp_tsss[i])-50):(as.numeric(exp_tsss[i])+150)]))
p<-lseqspline1D(substr(reverseComplement_e.coli_U00096.2, exp_tsss[i]-250, exp_tsss[i]+150), bound=c(50, 350), ref=251 )
mpotsreverse<-rbind(mpotsreverse,
##mpot_whole_genome_forward$mpot[mpot_whole_genome_forward$x %in% (exp_tsss[i]-150):(exp_tsss[i]+50)])
p$mpot[p$x %in% zout])
}
}
#merging matrices for forward and reverse strands together
aeos1<-rbind(aeos1forward, aeos1reverse)
#aeos2<-rbind(aeos2forward, aeos2reverse)
aeos3<-rbind(aeos3forward, aeos3reverse)
#aeos4<-rbind(aeos4forward, aeos4reverse)
gc200<-rbind(gc200forward, gc200reverse)
mpots<-rbind(mpotsforward, mpotsreverse)
```
extracting data for non-promoters. 1488 th sequences remove since it is 188 nts from flank and EP cannot be calculated
```{r}
notpro_names<-names(dataset_notpro)
nottsss<-rep(0, length(dataset_notpro))
notseqs<-c()
notstrands<-c()
#notmpots<-c()
for (i in 1:length(dataset_notpro)){
nottsss[i]<-dataset_notpro[[i]]$tss
notseqs<-toupper(c(notseqs, dataset_notpro[[i]]$seq))
notstrands<-c(notstrands, dataset_notpro[[i]]$strand)
# notmpots<-rbind(notmpots, dataset_notpro[[i]]$mpot)
}
# # # no reverse strand!
#nottsss[which(notstrands=='reverse')]<-nchar(e.coli_U00096.2)-nottsss[which(notstrands=='reverse')]
#nottsss<-nchar(e.coli_U00096.2)-nottsss
```
Matrices for physical properties profiles - creating matrices for data on activation energy ('aeos1') and size ('aeos3')
```{r}
for (i in c('notaeos1forward', 'notaeos1reverse', 'notaeos3forward', 'notaeos3reverse', 'notgc200forward', 'notgc200reverse', 'notmpotsforward', 'notmpotsreverse')) {
assign(i, c())
}
for (i in 1:length(nottsss)) {
if (notstrands[i]=='forward') {
if (nottsss[i]<250) {
notaeos1forward<-rbind(notaeos1forward, E01[(as.numeric(nottsss[i])-150):(as.numeric(nottsss[i])+50)])
#notaeos2forward<-rbind(notaeos2forward, matr1[(as.numeric(nottsss[i])-150):(as.numeric(nottsss[i])+50),2])
notaeos3forward<-rbind(notaeos3forward, d1[(as.numeric(nottsss[i])-150):(as.numeric(nottsss[i])+50)])
#notaeos4forward<-rbind(notaeos4forward, matr1[(as.numeric(nottsss[i])-150):(as.numeric(nottsss[i])+50),4])
notgc200forward<-rbind(notgc200forward, gc200matlab1[(as.numeric(nottsss[i])-150):(as.numeric(nottsss[i])+50)])
spec_for1488<-substr(
paste0(substr(e.coli_U00096.2, nchar(e.coli_U00096.2)-199, nchar(e.coli_U00096.2)), e.coli_U00096.2),
nottsss[i]-250+200, nottsss[i]+150+200)
p<-lseqspline1D(spec_for1488, bound=c(50, 350), ref=251 )
notmpotsforward<-rbind(notmpotsforward,
p$mpot[p$x %in% zout])
} else {
notaeos1forward<-rbind(notaeos1forward, E01[(as.numeric(nottsss[i])-150):(as.numeric(nottsss[i])+50)])
#notaeos2forward<-rbind(notaeos2forward, matr1[(as.numeric(nottsss[i])-150):(as.numeric(nottsss[i])+50),2])
notaeos3forward<-rbind(notaeos3forward, d1[(as.numeric(nottsss[i])-150):(as.numeric(nottsss[i])+50)])
#notaeos4forward<-rbind(notaeos4forward, matr1[(as.numeric(nottsss[i])-150):(as.numeric(nottsss[i])+50),4])
notgc200forward<-rbind(notgc200forward, gc200matlab1[(as.numeric(nottsss[i])-150):(as.numeric(nottsss[i])+50)])
p<-lseqspline1D(substr(e.coli_U00096.2, nottsss[i]-250, nottsss[i]+150), bound=c(50, 350), ref=251 )
notmpotsforward<-rbind(notmpotsforward,
##mpot_whole_genome_forward$mpot[mpot_whole_genome_forward$x %in% (exp_tsss[i]-150):(exp_tsss[i]+50)])
p$mpot[p$x %in% zout])
}
# } else {
# notaeos1reverse<-rbind(notaeos1reverse, E02[(as.numeric(nottsss[i])-50):(as.numeric(nottsss[i])+150)])
#notaeos2reverse<-rbind(notaeos2reverse, matr1[(as.numeric(nottsss[i])-50):(as.numeric(nottsss[i])+150),2])
# notaeos3reverse<-rbind(notaeos3reverse, d2[(as.numeric(nottsss[i])-50):(as.numeric(nottsss[i])+150)])
#notaeos4reverse<-rbind(notaeos4reverse, matr1[(as.numeric(nottsss[i])-50):(as.numeric(nottsss[i])+150),4])
# notgc200reverse<-rbind(notgc200reverse, (gc200matlab2[(as.numeric(nottsss[i])-50):(as.numeric(nottsss[i])+150)]))
# p<-lseqspline1D(substr(reverseComplement_e.coli_U00096.2, nottsss[i]-250, nottsss[i]+150), bound=c(50, 350), ref=251 )
# notmpotsreverse-rbind(notmpotsreverse,
##mpot_whole_genome_forward$mpot[mpot_whole_genome_forward$x %in% (exp_tsss[i]-150):(exp_tsss[i]+50)])
# p$mpot[p$x %in% zout])
}
}
#mergind data for strands is not neaded - reverse strand is empty
notaeos1<-notaeos1forward
#notaeos2<-notaeos2forward
notaeos3<-notaeos3forward
#notaeos4<-notaeos4forward
notgc200<-notgc200forward
notmpots<-notmpotsforward
```
Data extracion for genes
```{r}
gen_names<-names(dataset_gen)
gentsss<-c()
genseqs<-c
genstrands<-c()
#genmpots<-c()
for (i in 1:length(dataset_gen)){
gentsss<-c(gentsss, dataset_gen[[i]]$tss)
genseqs<-toupper(c(genseqs, dataset_gen[[i]]$seq))
genstrands<-c(genstrands, dataset_gen[[i]]$strand)
# genmpots<-rbind(genmpots, dataset_gen[[i]]$mpot)
}
```
```{r}
# # #
gentsss[which(genstrands=='reverse')]<-nchar(e.coli_U00096.2)-gentsss[which(genstrands=='reverse')]
for (i in c('genaeos1forward', 'genaeos1reverse', 'genaeos3forward', 'genaeos3reverse', 'gengc200forward', 'gengc200reverse', 'genmpotsforward', 'genmpotsreverse' )) {
assign(i, c())
}
for (i in 1:length(dataset_gen)) {
if (genstrands[i]=='forward') {
genaeos1forward<-rbind(genaeos1forward, E01[(as.numeric(gentsss[i])-150):(as.numeric(gentsss[i])+50)])
# genaeos2forward<-rbind(genaeos2forward, matr1[(as.numeric(gentsss[i])-150):(as.numeric(gentsss[i])+50),2])
genaeos3forward<-rbind(genaeos3forward, d1[(as.numeric(gentsss[i])-150):(as.numeric(gentsss[i])+50)])
# genaeos4forward<-rbind(genaeos4forward, matr1[(as.numeric(gentsss[i])-150):(as.numeric(gentsss[i])+50),4])
gengc200forward<-rbind(gengc200forward, gc200matlab1[(as.numeric(gentsss[i])-150):(as.numeric(gentsss[i])+50)])
p<-lseqspline1D(substr(e.coli_U00096.2, gentsss[i]-250, gentsss[i]+150), bound=c(50, 350), ref=251 )
genmpotsforward<-rbind(genmpotsforward,
##mpot_whole_genome_forward$mpot[mpot_whole_genome_forward$x %in% (exp_tsss[i]-150):(exp_tsss[i]+50)])
p$mpot[p$x %in% zout])
} else {
genaeos1reverse<-rbind(genaeos1reverse, E02[(as.numeric(gentsss[i])-50):(as.numeric(gentsss[i])+150)])
# genaeos2reverse<-rbind(genaeos2reverse, matr1[(as.numeric(gentsss[i])-50):(as.numeric(gentsss[i])+150),2])
genaeos3reverse<-rbind(genaeos3reverse, d2[(as.numeric(gentsss[i])-50):(as.numeric(gentsss[i])+150)])
#genaeos4reverse<-rbind(genaeos4reverse, matr1[(as.numeric(gentsss[i])-50):(as.numeric(gentsss[i])+150),4])
gengc200reverse<-rbind(gengc200reverse, (gc200matlab2[(as.numeric(gentsss[i])-50):(as.numeric(gentsss[i])+150)]))
p<-lseqspline1D(substr(reverseComplement_e.coli_U00096.2, gentsss[i]-250, gentsss[i]+150), bound=c(50, 350), ref=251 )
genmpotsreverse<-rbind(genmpotsreverse,
##mpot_whole_genome_forward$mpot[mpot_whole_genome_forward$x %in% (exp_tsss[i]-150):(exp_tsss[i]+50)])
p$mpot[p$x %in% zout])
}
}
genaeos1<-rbind(genaeos1forward, genaeos1reverse)
#genaeos2<-rbind(genaeos2forward, genaeos2reverse)
genaeos3<-rbind(genaeos3forward, genaeos3reverse)
#genaeos4<-rbind(genaeos4forward, genaeos4reverse)
gengc200<-rbind(gengc200forward, gengc200reverse)
genmpots<-rbind(genmpotsforward, genmpotsreverse)
```
Islands. data extracion
```{r}
isl_names<-names(dataset_isl)
isltsss<-c()
islseqs<-c
islstrands<-c()
#islmpots<-c()
for (i in 1:length(dataset_isl)){
isltsss<-c(isltsss, dataset_isl[[i]]$tss)
islseqs<-toupper(c(islseqs, dataset_isl[[i]]$seq))
islstrands<-c(islstrands, dataset_isl[[i]]$strand)
# islmpots<-rbind(islmpots, dataset_isl[[i]]$mpot)
}
# # #
isltsss[which(islstrands=='reverse')]<-nchar(e.coli_U00096.2)-isltsss[which(islstrands=='reverse')]
```
```{r}
for (i in c('islaeos1forward', 'islaeos1reverse', 'islaeos3forward', 'islaeos3reverse', 'islgc200forward', 'islgc200reverse', 'islmpotsforward', 'islmpotsreverse')) {
assign(i, c())
}
for (i in 1:length(dataset_isl)) {
if (islstrands[i]=='forward') {
islaeos1forward<-rbind(islaeos1forward, E01[(as.numeric(isltsss[i])-150):(as.numeric(isltsss[i])+50)])
# islaeos2forward<-rbind(islaeos2forward, matr1[(as.numeric(isltsss[i])-150):(as.numeric(isltsss[i])+50),2])
islaeos3forward<-rbind(islaeos3forward, d1[(as.numeric(isltsss[i])-150):(as.numeric(isltsss[i])+50)])
# islaeos4forward<-rbind(islaeos4forward, matr1[(as.numeric(isltsss[i])-150):(as.numeric(isltsss[i])+50),4])
islgc200forward<-rbind(islgc200forward, gc200matlab1[(as.numeric(isltsss[i])-150):(as.numeric(isltsss[i])+50)])
p<-lseqspline1D(substr(e.coli_U00096.2, isltsss[i]-250, isltsss[i]+150), bound=c(50, 350), ref=251 )
islmpotsforward<-rbind(islmpotsforward,
##mpot_whole_genome_forward$mpot[mpot_whole_genome_forward$x %in% (exp_tsss[i]-150):(exp_tsss[i]+50)])
p$mpot[p$x %in% zout])
} else {
islaeos1reverse<-rbind(islaeos1reverse, E02[(as.numeric(isltsss[i])-50):(as.numeric(isltsss[i])+150)])
# islaeos2reverse<-rbind(islaeos2reverse, matr1[(as.numeric(isltsss[i])-50):(as.numeric(isltsss[i])+150),2])
islaeos3reverse<-rbind(islaeos3reverse, d2[(as.numeric(isltsss[i])-50):(as.numeric(isltsss[i])+150)])
#islaeos4reverse<-rbind(islaeos4reverse, matr1[(as.numeric(isltsss[i])-50):(as.numeric(isltsss[i])+150),4])
islgc200reverse<-rbind(islgc200reverse, (gc200matlab2[(as.numeric(isltsss[i])-50):(as.numeric(isltsss[i])+150)]))
p<-lseqspline1D(substr(reverseComplement_e.coli_U00096.2, isltsss[i]-250, isltsss[i]+150), bound=c(50, 350), ref=251 )
islmpotsreverse<-rbind(islmpotsreverse,
##mpot_whole_genome_forward$mpot[mpot_whole_genome_forward$x %in% (exp_tsss[i]-150):(exp_tsss[i]+50)])
p$mpot[p$x %in% zout])
}
}
islaeos1<-rbind(islaeos1forward, islaeos1reverse)
#islaeos2<-rbind(islaeos2forward, islaeos2reverse)
islaeos3<-rbind(islaeos3forward, islaeos3reverse)
#islaeos4<-rbind(islaeos4forward, islaeos4reverse)
islgc200<-rbind(islgc200forward, islgc200reverse)
islmpots<-rbind(islmpotsforward, islmpotsreverse)
```
Lowscore. data extraction
only first 2000 among lowscore sequences are used!
```{r}
lowscore_names<-names(dataset_lowscore)
lowscoretsss<-c()
lowscoreseqs<-c
lowscorestrands<-c()
#lowscorempots<-c()
```
```{r}
for (i in 1:2000){
lowscoretsss<-c(lowscoretsss, dataset_lowscore[[i]]$tss)
lowscoreseqs<-toupper(c(lowscoreseqs, dataset_lowscore[[i]]$seq))
lowscorestrands<-c(lowscorestrands, dataset_lowscore[[i]]$strand)
# lowscorempots<-rbind(lowscorempots, dataset_lowscore[[i]]$mpot)
}
# # #
lowscoretsss[which(lowscorestrands=='reverse')]<-nchar(e.coli_U00096.2)-lowscoretsss[which(lowscorestrands=='reverse')]
```
```{r}
for (i in c('lowscoreaeos1forward', 'lowscoreaeos1reverse', 'lowscoreaeos3forward', 'lowscoreaeos3reverse', 'lowscoregc200forward', 'lowscoregc200reverse', 'lowscorempotsforward', 'lowscorempotsreverse' )) {
assign(i, c())
}
```
only first 2000 among lowscore sequences are used!
```{r}
for (i in 1:2000) {
if (lowscorestrands[i]=='forward') {
lowscoreaeos1forward<-rbind(lowscoreaeos1forward, E01[(as.numeric(lowscoretsss[i])-150):(as.numeric(lowscoretsss[i])+50)])
# lowscoreaeos2forward<-rbind(lowscoreaeos2forward, matr1[(as.numeric(lowscoretsss[i])-150):(as.numeric(lowscoretsss[i])+50),2])
lowscoreaeos3forward<-rbind(lowscoreaeos3forward, d1[(as.numeric(lowscoretsss[i])-150):(as.numeric(lowscoretsss[i])+50)])
# lowscoreaeos4forward<-rbind(lowscoreaeos4forward, matr1[(as.numeric(lowscoretsss[i])-150):(as.numeric(lowscoretsss[i])+50),4])
lowscoregc200forward<-rbind(lowscoregc200forward, gc200matlab1[(as.numeric(lowscoretsss[i])-150):(as.numeric(lowscoretsss[i])+50)])
p<-lseqspline1D(substr(e.coli_U00096.2, lowscoretsss[i]-250, lowscoretsss[i]+150), bound=c(50, 350), ref=251 )
lowscorempotsforward<-rbind(lowscorempotsforward,
##mpot_whole_genome_forward$mpot[mpot_whole_genome_forward$x %in% (exp_tsss[i]-150):(exp_tsss[i]+50)])
p$mpot[p$x %in% zout])
} else {
lowscoreaeos1reverse<-rbind(lowscoreaeos1reverse, E02[(as.numeric(lowscoretsss[i])-50):(as.numeric(lowscoretsss[i])+150)])
# lowscoreaeos2reverse<-rbind(lowscoreaeos2reverse, matr1[(as.numeric(lowscoretsss[i])-50):(as.numeric(lowscoretsss[i])+150),2])
lowscoreaeos3reverse<-rbind(lowscoreaeos3reverse, d2[(as.numeric(lowscoretsss[i])-50):(as.numeric(lowscoretsss[i])+150)])
#lowscoreaeos4reverse<-rbind(lowscoreaeos4reverse, matr1[(as.numeric(lowscoretsss[i])-50):(as.numeric(lowscoretsss[i])+150),4])
lowscoregc200reverse<-rbind(lowscoregc200reverse, (gc200matlab2[(as.numeric(lowscoretsss[i])-50):(as.numeric(lowscoretsss[i])+150)]))
p<-lseqspline1D(substr(reverseComplement_e.coli_U00096.2, lowscoretsss[i]-250, lowscoretsss[i]+150), bound=c(50, 350), ref=251 )
lowscorempotsreverse<-rbind(lowscorempotsreverse,
##mpot_whole_genome_forward$mpot[mpot_whole_genome_forward$x %in% (exp_tsss[i]-150):(exp_tsss[i]+50)])
p$mpot[p$x %in% zout])
}
}
lowscoreaeos1<-rbind(lowscoreaeos1forward, lowscoreaeos1reverse)
#lowscoreaeos2<-rbind(lowscoreaeos2forward, lowscoreaeos2reverse)
lowscoreaeos3<-rbind(lowscoreaeos3forward, lowscoreaeos3reverse)
#lowscoreaeos4<-rbind(lowscoreaeos4forward, lowscoreaeos4reverse)
lowscoregc200<-rbind(lowscoregc200forward, lowscoregc200reverse)
lowscorempots<-rbind(lowscorempotsforward, lowscorempotsreverse)
```
Setting names for created datasets
```{r}
rownames(aeos1)<-exp_names
rownames(aeos3)<-exp_names
rownames(mpots)<-exp_names
rownames(gc200)<-exp_names
rownames(notaeos1)<-paste0('Non_promoter_', 1:nrow(notaeos1))
rownames(notaeos3)<-paste0('Non_promoter_', 1:nrow(notaeos1))
rownames(notmpots)<-paste0('Non_promoter_', 1:nrow(notaeos1))
rownames(notgc200)<-paste0('Non_promoter_', 1:nrow(notaeos1))
rownames(genaeos1)<-paste0('Gene_', 1:nrow(genaeos1))
rownames(genaeos3)<-paste0('Gene_', 1:nrow(genaeos1))
rownames(genmpots)<-paste0('Gene_', 1:nrow(genaeos1))
rownames(gengc200)<-paste0('Gene_', 1:nrow(genaeos1))
rownames(islaeos1)<-paste0('Islands_', 1:nrow(islaeos1))
rownames(islaeos3)<-paste0('Islands_', 1:nrow(islaeos1))
rownames(islmpots)<-paste0('Islands_', 1:nrow(islaeos1))
rownames(islgc200)<-paste0('Islands_', 1:nrow(islaeos1))
rownames(lowscoreaeos1)<-paste0('Lowscore_', 1:nrow(lowscoreaeos1))
rownames(lowscoreaeos3)<-paste0('Lowscore_', 1:nrow(lowscoreaeos1))
rownames(lowscorempots)<-paste0('Lowscore_', 1:nrow(lowscoreaeos1))
rownames(lowscoregc200)<-paste0('Lowscore_', 1:nrow(lowscoreaeos1))
```
#Merging datasets. Scaling data
```{r}
to_pca_5components_4props<-rbind(cbind((aeos1), (aeos3), (mpots), (gc200)),
cbind((notaeos1), (notaeos3), (notmpots), (notgc200)),
cbind((genaeos1), (genaeos3), (genmpots), (gengc200)),
cbind((islaeos1), (islaeos3), (islmpots), (islgc200)),
cbind((lowscoreaeos1), (lowscoreaeos3), (lowscorempots), (lowscoregc200))
)
```
# saving the initial matrix for 5 components, 4 properties
```{r}
save(to_pca_5components_4props, file='to_pca_5components_4props.Rdata')
```
#setting sequences groups and variables groups
```{r}
habillage_5components_4props<-c(rep('Promoters', length(exp_names)), rep('Non-promoters', length(dataset_notpro)), rep('Genes', length(dataset_gen)), rep('Islands', length(dataset_isl)), rep('Lowscore', 2000))
habillage_5components_4props_cols<-c(rep('red', length(exp_names)), rep('blue', length(dataset_notpro)), rep('green', length(dataset_gen)), rep('yellow', length(dataset_isl)), rep('magenta', 2000))
colnames(to_pca_5components_4props)<-NULL
groups_variables <- c(rep('cyan', ncol(aeos1)), rep('navy', ncol(aeos3)), rep('darkgreen', ncol(mpots)), rep('darkred', ncol(gc200)))
```
#PRINCIPAL COMPONENTS ANALYSIS ITSELF
```{r}
princ.return.5comps.4props <- prcomp(to_pca_5components_4props, scale=T, center = T)
```
``` {r PCA variables plot using base graphics}
# Helper function :
# Correlation between variables and principal components
var_cor_func <- function(var.loadings, comp.sdev){
var.loadings*comp.sdev
}
# Variable correlation/coordinates
loadings <- princ.return.5comps.4props$rotation
sdev <- princ.return.5comps.4props$sdev
var.coord <- t(apply(loadings, 1, var_cor_func, sdev))
#Graph of variables using R base graph
#INDIVIDUALS
# Contributions of individuals
ind.coord <- princ.return.5comps.4props$x
contrib <- function(ind.coord, comp.sdev, n.ind){
100*(1/n.ind)*ind.coord^2/comp.sdev^2
}
ind.contrib <- t(apply(ind.coord,1, contrib,
princ.return.5comps.4props$sdev, nrow(ind.coord)))
svg('/home/mikhail/Documents/Script_2016_all/PCA/custom_biplot_pca_5_comps.svg', height = 10, width = 10)
plot(ind.coord[,1], ind.coord[,2], cex=0.8, pch=1,
xlab="PC1",ylab="PC2", col=habillage_5components_4props_cols)
abline(h=0, v=0, lty = 2)
#text(ind.coord[,1], ind.coord[,2], labels=rownames(ind.coord),
# cex=0.7, pos = 3)
# Plot the correlation circle
a <- seq(0, 2*pi, length = 100)
lines( cos(a)*50, sin(a)*50, type = 'l', col="gray", #*50 !
xlab = "PC1", ylab = "PC2")
abline(h = 0, v = 0, lty = 2)
# Add active variables
arrows(0, 0, var.coord[, 1]*50, var.coord[, 2]*50, #*50 so the size of arrows be proper
length = 0.1, angle = 15, code = 2, col = groups_variables)
# Add labels
#text(var.coord, labels=rownames(var.coord), cex = 1, adj=1)
legend('bottomright', legend = as.character(c('E0', 'd', 'EP', 'GC')), fill = unique(groups_variables), cex = 1.5)
legend("topright", legend=c('Promoters', 'Non-promoters', 'Genes', 'Promoter islands', 'Lowscore'), fill=unique(habillage_5components_4props_cols))
dev.off()
```
```{r 3D variables and individuals vusualization}
library(rgl)
plot3d(princ.return.5comps.4props$x[,1:3], col=habillage_5components_4props_cols, size=2, xlab = paste0('PC1, approx. ', ceiling(variance[1]), '% of variance'), ylab = paste0('PC2, approx. ', ceiling(variance[2]), '% of variance'), zlab = paste0('PC3, approx. ', ceiling(variance[3]), '% of variance'))
coords <- NULL
for (i in 1:nrow(princ.return.5comps.4props$rotation)) {
coords <- rbind(coords, rbind(c(0,0,0),princ.return.5comps.4props$rotation[i,1:3]))
}
lines3d(coords*1000, col=groups_variables, lwd=0.2)
M <- par3d("userMatrix") # #pca3d
movie3d( spin3d(), duration=15, dir="/home/mikhail/Documents/Script_2016_all/PCA/pca_movie3d/", clean = T, convert = T )
```
```{r Variables visualization}
svg('/home/mikhail/Documents/Script_2016_all/PCA/fviz_pca_var_princ.return.5comps.4props.svg', height = 10, width = 10)
fviz_pca_var(princ.return.5comps.4props, geom = c('arrow', 'text'), col.var="contrib")+
scale_color_gradient2(low="white", mid="white",
high="black", midpoint=0.1) + theme_minimal()
dev.off()
svg('/home/mikhail/Documents/Script_2016_all/PCA/fviz_pca_var_princ.return.5comps.4props.svg', height = 10, width = 10)
fviz_pca_biplot(princ.return.5comps.4props,
habillage = as.factor(habillage_5components_4props), addEllipses = TRUE,
col.var = rainbow(ncol(to_pca_5components_4props)), alpha.var = "contrib",
label = "var"
) +
scale_color_brewer(palette="Dark2")+
theme_minimal()
dev.off()
library(ggbiplot)
#print(ggbiplot(princ.return.5comps.4props, obs.scale = 1, var.scale = 1, groups = (habillage_5components_4props), ellipse = TRUE, circle = TRUE))
#biplot(princ.return.5comps.4props, choices = 1:2, scale = 1, pc.biplot = TRUE, col=habillage_5components_4props_cols)
```
#saving initial prcomp output
```{r}
save(princ.return.5comps.4props, file='princ.return.5comps.4props.Rdata')
```
prcomp output transformation
```{r}
eig.val <- get_eigenvalue(princ.return.5comps.4props)
# Eigenvalues
eig <- (princ.return.5comps.4props$sdev)^2
# Variances in percentage
variance <- eig*100/sum(eig)
# Cumulative variances
cumvar <- cumsum(variance)
#png('cumvar_5components_4props.png', height=1250, width=1250, res=130)
plot(cumvar, type='l', main='Cumulative variance for principal components \n on 4 properties for 5 components mixture', ylab='Cumulative variance (%)', xlab='Principal components')
v.5comps.4props=100 # 65
h.5comps.4props=98
abline(h=h.5comps.4props, v=v.5comps.4props, col=12)
text(v.5comps.4props-15, h.5comps.4props ,paste('Number of \nPCs=', v.5comps.4props,'\n' ,h.5comps.4props, '% of \nvariance \nretained'),srt=0.2,pos=3)
#dev.off()
```
Futher transformation - chosen number of 'rotation' columns (i.e. eigenvectors are multiplied by inital large matrix of variables)
```{r}
#pcs.from.var<-c()
#for (i in 1:v.5comps.4props) {
# load <- princ.return.5comps.4props$rotation[,i]
# pr.cp <- to_pca_5components_4props %*% load
# pr <- as.numeric(pr.cp)
# pcs.from.var<-cbind(pcs.from.var, pr)
#}
```
#saving PCA data: variable are conversed to PCs
```{r}
#rownames(pcs.from.var)<-rownames(to_pca_5components_4props)
#colnames(pcs.from.var)<-1:ncol(pcs.from.var)
```
#renaming the variable, saving the transformated output
```{r}
#mixt_5comps_after_pca_on_4_props_ae_size_mpots_gc200<- pcs.from.var
#save(mixt_5comps_after_pca_on_4_props_ae_size_mpots_gc200, file='new_no_factor_8XII_mixt_5comps_after_pca_on_4_props_ae_size_mpots_gc200.Rdata')
```
#general PCA results visualization
habillage i.e. partitions into groups
```{r}
svg(filename = 'PCA_5_components_4_properties_not_conversion_into_variables_axes_1_2.svg', width = 10, height = 7)
fviz_pca_ind(princ.return.5comps.4props, label="none", habillage=habillage_5components_4props, pointsize = 0.5, addEllipses = T, ellipse.level = 0.99)
dev.off()
svg(filename = 'PCA_5_components_4_properties_not_conversion_into_variables_axes_1_3.svg', width = 10, height = 7)
fviz_pca_ind(princ.return.5comps.4props, axes = c(1,3), label="none", habillage=habillage_5components_4props, pointsize = 0.5, addEllipses = T, ellipse.level = 0.99)
dev.off()
svg(filename = 'PCA_5_components_4_properties_not_conversion_into_variables_axes_2_3.svg', width = 10, height = 7)
fviz_pca_ind(princ.return.5comps.4props, axes = c(2,3), label="none", habillage=habillage_5components_4props, pointsize = 0.5, addEllipses = T, ellipse.level = 0.99)
dev.off()
library(rgl)
habillage.pro.not.isl.gen<-c(rep('red', length(exp_names)), rep ('blue', length(dataset_notpro)), rep('green', length(dataset_gen)), rep('orange', length(dataset_isl)), rep('magenta', 2000))
#4 components correspond to colors 1-4
open3d()
plot3d(princ.return.5comps.4props$x[,1:3], col=(habillage.pro.not.isl.gen), size=2.5)
M <- par3d("userMatrix") # #pca3d
movie3d( spin3d(), duration=15, dir="/home/mikhail/Documents/Script_2016_all/PCA/pca_movie3d/", clean = T, convert = T )
# #
library(pca3d)
pca3d(princ.return.5comps.4props, group = habillage.pro.not.isl.gen, col = habillage.pro.not.isl.gen, radius = 0.5)
#makeMoviePCA()
```
# # # SUPERVISED MACHINE LEARNING
Creating subsets for separate sequence types
```{r}
registerDoMC(cores = 3)
df <- as.data.frame(((princ.return.5comps.4props$x)[,1:100]))
#df<-cbind(habillage_5components_4props, df)
promoters <- df[1:699,]
non_promoters <- df[700:2579,]
genes <- df[2580:6006,]
islands <- df[6007:8234,]
lowscore <- df[8235:10234,]
```
Adding factor columns
```{r}
factor_to_promoters_vs_lowscore<-as.factor(c(rep('Promoter', nrow(promoters)), rep('Lowscore', nrow(lowscore))))
promoters_vs_lowscore <- cbind(factor_to_promoters_vs_lowscore, rbind(promoters, lowscore))
factor_to_promoters_vs_non_promoters<-as.factor(c(rep('Promoter', nrow(promoters)), rep('Non_promoter', nrow(non_promoters))))
promoters_vs_non_promoters <- cbind(factor_to_promoters_vs_non_promoters, rbind(promoters, non_promoters))
factor_to_promoters_vs_islands<-as.factor(c(rep('Promoter', nrow(promoters)), rep('Island', nrow(islands))))
promoters_vs_islands <- cbind(factor_to_promoters_vs_islands, rbind(promoters, islands))
factor_to_promoters_vs_genes<-as.factor(c(rep('Promoter', nrow(promoters)), rep('Gene', nrow(genes))))
promoters_vs_genes <- cbind(factor_to_promoters_vs_genes, rbind(promoters, genes))
```
#for promoters_vs_lowscore model training and evaluation
```{r}
set.seed(999)
inTraining <- createDataPartition(promoters_vs_lowscore$factor_to_promoters_vs_lowscore, p = 0.7, list = F)
training <- promoters_vs_lowscore[inTraining,]
testing <- promoters_vs_lowscore[-inTraining,]
fitControl <- trainControl(method = "repeatedcv",
number = 10,
repeats = 15,
allowParallel = T,
classProbs = T,
summaryFunction = twoClassSummary
)
```
#train_control <- trainControl(method="repeatedcv", number=10, repeats=3)
```{r}
fit_promoters_vs_lowscore <- train(factor_to_promoters_vs_lowscore ~ .,
data = training,
method = "rf",#nb, lda, nbDiscrete, nbSearch
# #preProcess=c("center", "scale"),
tuneLength = 15,
trControl = fitControl,
metric = "ROC"
)
predictionClasses_promoters_vs_lowscore <- predict(fit_promoters_vs_lowscore, newdata = testing)
predictionProb_promoters_vs_lowscore <- predict(fit_promoters_vs_lowscore, newdata = testing, type ="prob")
confusionMatrix_promoters_vs_lowscore <- confusionMatrix(data = predictionClasses_promoters_vs_lowscore, testing$factor_to_promoters_vs_lowscore)
```
For promoters_vs_non_promoters
```{r}
set.seed(999)
inTraining <- createDataPartition(promoters_vs_non_promoters$factor_to_promoters_vs_non_promoters, p = 0.7, list = F)
training <- promoters_vs_non_promoters[inTraining,]
testing <- promoters_vs_non_promoters[-inTraining,]
fitControl <- trainControl(method = "repeatedcv",
number = 10,
repeats = 3,
allowParallel = T,
classProbs = T,
summaryFunction = twoClassSummary
)
#train_control <- trainControl(method="repeatedcv", number=10, repeats=3)
fit_promoters_vs_non_promoters <- train(factor_to_promoters_vs_non_promoters ~ .,
data = training,
method = "nb",#nb, lda, nbDiscrete, nbSearch
##preProcess=c("center", "scale"),
tuneLength = 7,
trControl = fitControl,
metric = "ROC"
)
predictionClasses_promoters_vs_non_promoters <- predict(fit_promoters_vs_non_promoters, newdata = testing)
predictionProb_promoters_vs_non_promoters <- predict(fit_promoters_vs_non_promoters, newdata = testing, type ="prob")
confusionMatrix_promoters_vs_non_promoters <- confusionMatrix(data = predictionClasses_promoters_vs_non_promoters, testing$factor_to_promoters_vs_non_promoters)
```
#for promoters_vs_islands
```{r}
set.seed(999)
inTraining <- createDataPartition(promoters_vs_islands$factor_to_promoters_vs_islands, p = 0.7, list = F)
training <- promoters_vs_islands[inTraining,]
testing <- promoters_vs_islands[-inTraining,]
fitControl <- trainControl(method = "repeatedcv",
number = 10,
repeats = 3,
allowParallel = T,
classProbs = T,
summaryFunction = twoClassSummary
)
#train_control <- trainControl(method="repeatedcv", number=10, repeats=3)
fit_promoters_vs_islands <- train(factor_to_promoters_vs_islands ~ .,
data = training,
method = "nb",#nb, lda, nbDiscrete, nbSearch
##preProcess=c("center", "scale"),
tuneLength = 7,
trControl = fitControl,
metric = "ROC"
)
predictionClasses_promoters_vs_islands <- predict(fit_promoters_vs_islands, newdata = testing)
predictionProb_promoters_vs_islands <- predict(fit_promoters_vs_islands, newdata = testing, type ="prob")
confusionMatrix_promoters_vs_islands <- confusionMatrix(data = predictionClasses_promoters_vs_islands, testing$factor_to_promoters_vs_islands)
```
#for promoters_vs_genes
```{r}
set.seed(999)
inTraining <- createDataPartition(promoters_vs_genes$factor_to_promoters_vs_genes, p = 0.7, list = F)
training <- promoters_vs_genes[inTraining,]
testing <- promoters_vs_genes[-inTraining,]
fitControl <- trainControl(method = "repeatedcv",
number = 10,
repeats = 3,
allowParallel = T,
classProbs = T,
summaryFunction = twoClassSummary
)
#train_control <- trainControl(method="repeatedcv", number=10, repeats=3)
fit_promoters_vs_genes <- train(factor_to_promoters_vs_genes ~ .,
data = training,
method = "nb",#nb, lda, nbDiscrete, nbSearch
#preProcess=c("center", "scale"),
tuneLength = 7,
trControl = fitControl,
metric = "ROC"
)
predictionClasses_promoters_vs_genes <- predict(fit_promoters_vs_genes, newdata = testing)
predictionProb_promoters_vs_genes <- predict(fit_promoters_vs_genes, newdata = testing, type ="prob")
confusionMatrix_promoters_vs_genes <- confusionMatrix(data = predictionClasses_promoters_vs_genes, testing$factor_to_promoters_vs_genes)
```
Mean values for used properties for sequences sets
```{r}
aes<-grep('*aeos1$', ls(), value = T)[c(1,5,2,3,4)]
meanaes<-c()
for (i in aes) {
meanaes<-c(meanaes, mean(get(i)))
}
names(meanaes)<-c('Promoters', 'Non-promoters', 'Genes', 'Islands', 'Lowscore')
names(medianaes)<-c('Promoters', 'Non-promoters', 'Genes', 'Islands', 'Lowscore')
ds<-grep('*aeos3$', ls(), value = T)[c(1,5,2,3,4)]
meands<-c()
for (i in ds) {
meands<-c(meands, mean(get(i)))
}
names(meands)<-c('Promoters', 'Non-promoters', 'Genes', 'Islands', 'Lowscore')
eps<-grep('*mpots$', ls(), value = T)[c(1,5,2,3,4)]
meaneps<-c()
for (i in eps) {
meaneps<-c(meaneps, mean(get(i)))
}
names(meaneps)<-c('Promoters', 'Non-promoters', 'Genes', 'Islands', 'Lowscore')
gcs<-grep('*gc200$', ls(), value = T)[c(1,5,2,3,4)]
meangcs<-c()
for (i in gcs) {
meangcs<-c(meangcs, mean(get(i)))
}
names(meangcs)<-c('Promoters', 'Non-promoters', 'Genes', 'Islands', 'Lowscore')
```
Islands cluster analisys
```{r}
library(fastcluster)
islclust<-hclust.vector(princ.return.5comps.4props$x[6007:8234,], method = 'ward')
plot(islclust, labels=F)
cutisl<-cutree(islclust, k=2)
habillage2isl.clusts<-c(rep(1, length(exp_names)), rep (1, length(dataset_notpro)), rep(1, length(dataset_gen)), rep('red', length(which(cutisl==1))), rep('blue', length(which(cutisl==2))), rep(1, 2000))
fviz_pca_ind(princ.return.5comps.4props, label="none", habillage=habillage2isl.clusts, pointsize = 0.5, addEllipses = T, ellipse.level = 0.99)
fviz_pca_ind(princ.return.5comps.4props, axes = c(1,3), label="none", habillage=habillage2isl.clusts, pointsize = 0.5, addEllipses = T, ellipse.level = 0.99)
fviz_pca_ind(princ.return.5comps.4props, axes = c(2,3), label="none", habillage=habillage2isl.clusts, pointsize = 0.5, addEllipses = T, ellipse.level = 0.99)
plot3d(princ.return.5comps.4props$x[,1:3], col=(habillage2isl.clusts), size=2, cex.lab=1.8)
summary(as.vector(islaeos1[which(cutisl==1),]))
summary(as.vector(islaeos1[which(cutisl==2),]))
isl_props<-grep('isl', ls(), value=T)
lmts <- range(as.vector(islaeos1[which(cutisl==1),]),
as.vector(islaeos1[which(cutisl==2),]))
par(mfrow = c(1, 2))
boxplot(as.vector(islaeos1[which(cutisl==1),]),ylim=lmts)
boxplot(as.vector(islaeos1[which(cutisl==2),]),ylim=lmts)
lmts <- range(as.vector(islaeos3[which(cutisl==1),]),
as.vector(islaeos3[which(cutisl==2),]))
par(mfrow = c(1, 2))
boxplot(as.vector(islaeos3[which(cutisl==1),]),ylim=lmts)
boxplot(as.vector(islaeos3[which(cutisl==2),]),ylim=lmts)
summary(as.vector(islaeos3[which(cutisl==1),]))
summary(as.vector(islaeos3[which(cutisl==2),]))
summary(as.vector(islmpots[which(cutisl==1),]))
summary(as.vector(islmpots[which(cutisl==2),]))
summary(as.vector(islgc200[which(cutisl==1),]))
summary(as.vector(islgc200[which(cutisl==2),]))
summary(isltsss[which(cutisl==1)])
summary(isltsss[which(cutisl==2)])
table(islstrands[which(cutisl==1)])
table(islstrands[which(cutisl==2)])
lmts <- range(as.vector(islaeos1[which(cutisl==1),]),
as.vector(islaeos1[which(cutisl==2),]))
par(mfrow = c(1, 2))
matplot(islaeos1[which(cutisl==1),], type='l', lwd=0.03, col='red', ylim = lmts)
matplot(islaeos1[which(cutisl==2),], type='l', lwd=0.03, col='blue', ylim = lmts)
lmts <- range(as.vector(islaeos3[which(cutisl==1),]),
as.vector(islaeos3[which(cutisl==2),]))
par(mfrow = c(1, 2))
matplot(islaeos3[which(cutisl==1),], type='l', lwd=0.03, col='red', ylim = lmts)
matplot(islaeos3[which(cutisl==2),], type='l', lwd=0.03, col='blue', ylim = lmts)
lmts <- range(as.vector(islmpots[which(cutisl==1),]),
as.vector(islmpots[which(cutisl==2),]))
par(mfrow = c(1, 2))
matplot(islmpots[which(cutisl==1),], type='l', lwd=0.03, col='red', ylim = lmts)
matplot(islmpots[which(cutisl==2),], type='l', lwd=0.03, col='blue', ylim = lmts)
lmts <- range(as.vector(islgc200[which(cutisl==1),]),
as.vector(islgc200[which(cutisl==2),]))
par(mfrow = c(1, 2))
matplot(islgc200[which(cutisl==1),], type='l', lwd=0.03, col='red', ylim = lmts)
matplot(islgc200[which(cutisl==2),], type='l', lwd=0.03, col='blue', ylim = lmts)
```
# # # UNSUPERVISED MACHINE LEARNING - CLUSTER ANALYSIS
```{r}
library(fastcluster)
ward.princ.return.5comps.4props<-hclust.vector(princ.return.5comps.4props$x[,1:100], method = 'ward')
library(NbClust)
indices_that_works<-c("kl", "ch", "hartigan", "cindex", "db", "silhouette", "ratkowsky", "ball", "ptbiserial", "frey", "mcclain", "dunn", "sdbw")
all_for_5comp<-c()
for (i in indices_that_works) {
print(i)
siddres<-NbClust(data = princ.return.5comps.4props$x[,1:100], diss = NULL, distance = "euclidean", min.nc = 2, max.nc = 22, method = 'ward.D2', index = i)
assign(paste0('sidd_nbres_',i), siddres)
all_for_5comp<-c(all_for_5comp, siddres)
print(siddres$Best.nc[1])
}
library(dendextend)
d1=color_branches(as.dendrogram(ward.princ.return.5comps.4props), k=nrow(princ.return.5comps.4props$x[,1:100]), col = habillage.pro.not.isl.gen[order.dendrogram(as.dendrogram(ward.princ.return.5comps.4props))])
d1 %>% rect.dendrogram(k = 4, horiz = TRUE, border = 8, lty = 5, lwd = 2, col='red')
png('ward_on_pca_4_prop_5_components_circlized.png', height=3000, width=3000, res=300)
raised_d1<-raise.dendrogram(d1, 7000)
#raised_d1 %>% rect.dendrogram(k = 4, horiz = TRUE, border = 8, lty = 5, lwd = 2, col='red')
circlize_dendrogram(raised_d1, labels=F, dend_track_height = 0.95)
dev.off()
png('ward_on_pca_5_prop_4_components_circlized_raised.png', height=3000, width=3000, res=300)
circlize_dendrogram(raised_d1, labels=F, dend_track_height = 0.95)
##plot(raised_d1, main="Ward's method clustering for PCA results \n(promoters, non-promoters, genes, promoter islands, and lowscore sequences", labels=F)
legend("topright", legend=c('Promoters', 'Non-promoters', 'Genes', 'Promoter islands', 'Lowscore'), fill=unique(habillage.pro.not.isl.gen))
dev.off()
```
separately for properties
```{r}
library(fastcluster)
ward.princ.return.5comps.4props<-hclust.vector(aeos$x[,1:100], method = 'ward')
library(NbClust)
indices_that_works<-c("kl", "ch", "hartigan", "cindex", "db", "silhouette", "ratkowsky", "ball", "ptbiserial", "frey", "mcclain", "dunn", "sdbw")
all_for_5comp<-c()
for (i in indices_that_works) {
print(i)
siddres<-NbClust(data = princ.return.5comps.4props$x[,1:100], diss = NULL, distance = "euclidean", min.nc = 2, max.nc = 22, method = 'ward.D2', index = i)
assign(paste0('sidd_nbres_',i), siddres)
all_for_5comp<-c(all_for_5comp, siddres)
print(siddres$Best.nc[1])
}
library(dendextend)
d1=color_branches(as.dendrogram(ward.princ.return.5comps.4props), k=nrow(princ.return.5comps.4props$x[,1:100]), col = habillage.pro.not.isl.gen[order.dendrogram(as.dendrogram(ward.princ.return.5comps.4props))])
d1 %>% rect.dendrogram(k = 4, horiz = TRUE, border = 8, lty = 5, lwd = 2, col='red')
png('ward_on_pca_4_prop_5_components_circlized.png', height=3000, width=3000, res=300)
raised_d1<-raise.dendrogram(d1, 14000)
##plot(hang.dendrogram(raised_d1), horiz = TRUE)
#raised_d1 %>% rect.dendrogram(k = 4, horiz = TRUE, border = 8, lty = 5, lwd = 2, col='red')
circlize_dendrogram(raised_d1, labels=F, dend_track_height = 0.95)
dev.off()
png('ward_on_pca_5_prop_4_components_circlized_raised.png', height=3000, width=3000, res=300)
circlize_dendrogram(raised_d1, labels=F, dend_track_height = 0.95)
##plot(raised_d1, main="Ward's method clustering for PCA results \n(promoters, non-promoters, genes, promoter islands, and lowscore sequences", labels=F)
legend("topright", legend=c('Promoters', 'Non-promoters', 'Genes', 'Promoter islands', 'Lowscore'), fill=unique(habillage.pro.not.isl.gen))
dev.off()
```
```{r}
#poster
XX<-(-540):(179)
plot(XX, mpots[1,], ty='l', lwd=1.5, ylab='EP value', xlab='Sequence (angstrom)', cex.lab=1.4 )
abline(v=0, lty=2)
X<--150:50
plot(X, aeos1[1,], ty='l', lwd=1.5, ylab='Activation energy (kcal/mol)', xlab='Sequence (nts)', cex.lab=1.4 )
abline(v=0, lty=2)
plot(X, aeos3[1,], ty='l', lwd=1.5, ylab='Size of open state (nts)', xlab='Sequence (nts)', cex.lab=1.4 )
abline(v=0, lty=2)
plot(X, gc200[1,], ty='l', lwd=1.5, ylab=' GC-content for 200 b.p.', xlab='Sequence (nts)', cex.lab=1.4 )
abline(v=0, lty=2)
plot(1:100, princ.return.5comps.4props$x[1,1:100], ty='l', lwd=2, xlab='PCs most contibuting to variance', ylab='Relative units', cex.lab=1.4, col='red' )
#plot 3d for 3 main PCs colored according to cluster analysis
plot3d(princ.return.5comps.4props$x[,1:3], col=habillage.pro.not.isl.gen[order.dendrogram(as.dendrogram(ward.princ.return.5comps.4props))], size=2.5)
#d1 %>% rect.dendrogram(k = 4, horiz = TRUE, border = 8, lty = 5, lwd = 2, col='red')
#confusionMatrix visualization
par(mfrow=c(2,2))
fourfoldplot(confusionMatrix_promoters_vs_lowscore$table, color = c("#CC6666", "#99CC99"),
conf.level = 0, margin = 1, main = "Promoters vs lowscore")
fourfoldplot(confusionMatrix_promoters_vs_non_promoters$table, color = c("#CC6666", "#99CC99"),
conf.level = 0, margin = 1, main = "Promoters vs non-promoters")
fourfoldplot(confusionMatrix_promoters_vs_genes$table, color = c("#CC6666", "#99CC99"),
conf.level = 0, margin = 1, main = "Promoters vs genes")
fourfoldplot(confusionMatrix_promoters_vs_islands$table, color = c("#CC6666", "#99CC99"),
conf.level = 0, margin = 1, main = "Promoters vs islands")
ward_all_aeos1<-hclust.vector(to_pca_5components_4props[, 1:ncol(aeos1)], method = 'ward')
ward_all_aeos3<-hclust.vector(to_pca_5components_4props[, 202:(202+201)], method = 'ward')
ward_all_mpots<-hclust.vector(to_pca_5components_4props[, 403:(403+720)], method = 'ward')
ward_all_gc200<-hclust.vector(to_pca_5components_4props[,(402+721):ncol(to_pca_5components_4props)], method = 'ward')
library(dendextend)
d1aeos1=color_branches(as.dendrogram(ward_all_aeos1), k=nrow(to_pca_5components_4props), col = habillage.pro.not.isl.gen[order.dendrogram(as.dendrogram(ward_all_aeos1))])
raised_d1aeos1<-raise.dendrogram(d1aeos1, 8000)
d1aeos3=color_branches(as.dendrogram(ward_all_aeos3), k=nrow(to_pca_5components_4props), col = habillage.pro.not.isl.gen[order.dendrogram(as.dendrogram(ward_all_aeos3))])
raised_d1aeos3<-raise.dendrogram(d1aeos3, 700)
d1mpots=color_branches(as.dendrogram(ward_all_mpots), k=nrow(to_pca_5components_4props), col = habillage.pro.not.isl.gen[order.dendrogram(as.dendrogram(ward_all_mpots))])
raised_d1mpots<-raise.dendrogram(d1mpots, 700)
d1gc200=color_branches(as.dendrogram(ward_all_gc200), k=nrow(to_pca_5components_4props), col = habillage.pro.not.isl.gen[order.dendrogram(as.dendrogram(ward_all_gc200))])
raised_d1gc200<-raise.dendrogram(d1gc200, 700)
svg('ward_on_pca_4_prop_5_components_circlized_raised_all_4.png', height=15, width=15)
par(mfrow=c(2,2))
circlize_dendrogram(raised_d1aeos1, labels=F, dend_track_height = 0.95)
circlize_dendrogram(raised_d1aeos3, labels=F, dend_track_height = 0.95)
circlize_dendrogram(raised_d1mpots, labels=F, dend_track_height = 0.95)
circlize_dendrogram(raised_d1gc200, labels=F, dend_track_height = 0.95)
dev.off()
png('ward_on_pca_4_prop_4_components_circlized.png', height=3000, width=3000, res=300)
```
# # # GC3 calculation
#function to calculate GC-3
library(seqinr)
GC1<-function (s) {
GC_for_1s<-GC(s[seq(1, length(s), 3)])
return(GC_for_1s)
}
GC2<-function (s) {
GC_for_2s<-GC(s[seq(2, length(s), 3)])
return(GC_for_2s)
}
GC3<-function (s) {
GC_for_3s<-GC(s[seq(3, length(s),3)])
return(GC_for_3s)
}
GC1_2_3<-function (s) {
GC_for_1s<-GC(s[seq(1, length(s), 3)])
GC_for_2s<-GC(s[seq(2, length(s), 3)])
GC_for_3s<-GC(s[seq(3, length(s),3)])
return(list(GC_for_1s, GC_for_2s, GC_for_3s))
}
#GC by codons spread calculation
GC_spread <- function (seq) {
library(zoo)
library(seqinr)
return(range(GC1(seq), GC2(seq), GC3(seq))[1]-range(GC1(seq), GC2(seq), GC3(seq))[2])
}
rollGCspread<-function (seq, interval) {
library(zoo)
library(seqinr)
#GC_spread <- function (seq) {
# return(range(GC1(seq), GC2(seq), GC3(seq))[1]-range(GC1(seq), GC2(seq), GC3(seq))[2])
#}
return(rollapply(seq, interval, GC_spread))
}
#sapply(dataset_gen, function(x) {return(GC1_2_3(strsplit(x$seq, '')))})
gen_allseqs<-sapply(dataset_gen, function(x) {return((strsplit(x$seq, '')))})
#gc1_gen_allseqs<-sapply(gen_allseqs, GC1)
#gc2_gen_allseqs<-sapply(gen_allseqs, GC2)
#gc3_gen_allseqs<-sapply(gen_allseqs, GC3)
spread_gc1_2_3_gen<-c()
for (i in seq_along(gen_allseqs)){
spread_gc1_2_3_gen<-c(spread_gc1_2_3_gen, (range(GC1_2_3(gen_allseqs[[i]]))[2] - range(GC1_2_3(gen_allseqs[[i]]))[1]))
}
#for seqs - ALL promoters
#sapply(dataset_pro, function(x) {return(GC1_2_3(strsplit(x$seq, '')))})
pro_allseqs<-sapply(dataset_pro, function(x) {return((strsplit(x$seq, '')))})
spread_gc1_2_3_pro<-c()
for (i in seq_along(pro_allseqs)){
spread_gc1_2_3_pro<-c(spread_gc1_2_3_pro, (range(GC1_2_3(pro_allseqs[[i]]))[2] - range(GC1_2_3(pro_allseqs[[i]]))[1]))
}
#for isl_seqs - islands
#sapply(dataset_pro, function(x) {return(GC1_2_3(strsplit(x$seq, '')))})
isl_allseqs<-sapply(dataset_isl, function(x) {return((strsplit(x$seq, '')))})
spread_gc1_2_3_isl<-c()
for (i in seq_along(isl_allseqs)){
spread_gc1_2_3_isl<-c(spread_gc1_2_3_isl, (range(GC1_2_3(isl_allseqs[[i]]))[2] - range(GC1_2_3(isl_allseqs[[i]]))[1]))
}
#for lowscore_seqs - lowscore
#sapply(dataset_pro, function(x) {return(GC1_2_3(strsplit(x$seq, '')))})
lowscore_allseqs<-sapply(dataset_lowscore, function(x) {return((strsplit(x$seq, '')))})
spread_gc1_2_3_lowscore<-c()
for (i in seq_along(lowscore_allseqs)){
spread_gc1_2_3_lowscore<-c(spread_gc1_2_3_lowscore, (range(GC1_2_3(lowscore_allseqs[[i]]))[2] - range(GC1_2_3(lowscore_allseqs[[i]]))[1]))
}
#for isl_seqs - islands
#sapply(dataset_pro, function(x) {return(GC1_2_3(strsplit(x$seq, '')))})
isl_allseqs<-sapply(dataset_isl, function(x) {return((strsplit(x$seq, '')))})
spread_gc1_2_3_isl<-c()
for (i in seq_along(isl_allseqs)){
spread_gc1_2_3_isl<-c(spread_gc1_2_3_isl, (range(GC1_2_3(isl_allseqs[[i]]))[2] - range(GC1_2_3(isl_allseqs[[i]]))[1]))
}
#for notpro
#sapply(dataset_pro, function(x) {return(GC1_2_3(strsplit(x$seq, '')))})
not_allseqs<-sapply(dataset_notpro , function(x) {return((strsplit(x$seq, '')))})
spread_gc1_2_3_not<-c()
for (i in seq_along(not_allseqs)){
spread_gc1_2_3_not<-c(spread_gc1_2_3_not, (range(GC1_2_3(not_allseqs[[i]]))[2] - range(GC1_2_3(not_allseqs[[i]]))[1]))
}
#for complete genome
GC3_total_genome<-range(GC1_2_3(e.coli_U00096.2_char))[2] - range(GC1_2_3(e.coli_U00096.2_char))[1]
lmts<-range(spread_gc1_2_3_pro, spread_gc1_2_3_isl, spread_gc1_2_3_gen, spread_gc1_2_3_lowscore, spread_gc1_2_3_not )
#lmts<-c(0,0.45)
par(mfrow=c(1,5))
boxplot(spread_gc1_2_3_pro, ylim = lmts)
boxplot(spread_gc1_2_3_isl, ylim = lmts)
boxplot(spread_gc1_2_3_gen, ylim = lmts)
boxplot(spread_gc1_2_3_lowscore, ylim = lmts)
boxplot(spread_gc1_2_3_not, ylim = lmts)
mtext('GC spread by triplets for promoters, islands, genes, and lowscore sequences', at=-1.5, line=1, cex=1.5 )
| /E.coli_5_sequence_types.R | no_license | FVortex/DNAClassifiers | R | false | false | 52,426 | r | ---
title: "Machine learning: 5 components 4 properties after PCA.rmd"
author: "Mikhail Orlov"
output: pdf_document
---
Clearing workspace, setting working directory and loading R packages
```{r}
rm(list=ls())
setwd('/home/mikhail/Documents/Script_2016_all/PCA/')
library(R.matlab)
library(seqinr)
library(factoextra)
library(data.table)
library(caret)
library(doMC)
library(Biostrings)
library(reldna)
```
Reading in E. coli K12 genome (GenBank Accesion U00096.2), creating reverse complement genome and tranformation genome into character form
```{r}
e.coli_U00096.2<-unlist(read.fasta('e.coli_U00096.2.fasta', seqonly = T))
reverseComplement_e.coli_U00096.2<-as.character(reverseComplement(DNAString(e.coli_U00096.2)))
e.coli_U00096.2_char<-unlist(strsplit(e.coli_U00096.2, ''))
reverseComplement_e.coli_U00096.2_char<-unlist(strsplit(reverseComplement_e.coli_U00096.2, ''))
```
Dynamical properties calculation from within R using octave script translated according to Grinevich et al., 2013
Creating function for dynamical properties calculation
```{r}
dynchars<-function(seq, interval_size) {
if (missing(seq))
stop("Need to specify sequence (as a vector of chars)")
if (missing(interval_size))
stop("Need to specify interval size")
if(!is.character(seq))
stop("Sequence must be a character vector containing A, C, G, T letters only")
seq<-toupper(seq)
seq<-c(seq, seq[2:(interval_size)])
a<-3.4*10^(-10)
I<-c(7.6, 4.8, 8.2, 4.1)*10^(-44)
K<-c(227, 155, 220, 149)*10^(-20)
V<-c(2.09, 1.43, 3.12, 2.12)*10^(-20)
tau<-c(127, 99, 140, 84)
csA<-cumsum(seq=='A')
csT<-cumsum(seq=='T')
csG<-cumsum(seq=='G')
csC<-cumsum(seq=='C')
countA = csA[interval_size:length(csA)]-c(0, csA[1:(length(csA)-interval_size)])
countT = csT[interval_size:length(csT)]-c(0, csT[1:(length(csT)-interval_size)])
countG = csG[interval_size:length(csG)]-c(0, csG[1:(length(csG)-interval_size)])
countC = csC[interval_size:length(csC)]-c(0, csC[1:(length(csC)-interval_size)])
M<-cbind(countA, countT, countG, countC)/interval_size
M_comp<-cbind(countT, countA, countC, countG)/interval_size
M_comp<-apply(t(M_comp),1,rev)
Is<-as.numeric(M%*%I)#! numeric conversion
Ks<-as.numeric(M%*%K)
Vs<-as.numeric(M%*%V)
E01<-(8*(Ks*Vs)^0.5)* 6E23 / 4184
d1<-((Ks*a^2)/Vs)^(0.5)/a;
c1<-(Ks*a^2/Is)^0.5
m1<-E01/c1/6.011E-26
taus1<-as.numeric(M%*%tau) #!as.numeric conversion
gc1 = M[,3] + M[,4]
Is<-as.numeric(M_comp%*%I)#! numeric conversion
Ks<-as.numeric(M_comp%*%K)
Vs<-as.numeric(M_comp%*%V)
E02<- 8*(Ks*Vs)^0.5 * 6E23 / 4184;
d2<-((Ks*a^2)/Vs)^(0.5)/a;
c2<-(Ks*a^2/Is)^0.5;
m2<-E02/c2/6.011E-26;
taus2<-as.numeric(M_comp%*%tau)
gc2 = M_comp[,3] + M_comp[,4]
dynchars_return<-list(E01=E01, d1=d1, c1=c1, m1=m1, taus1=taus1, gc1=gc1, E02=E02, d2=d2, c2=c2, m2=m2, taus2=taus2, gc2=gc2)
return(dynchars_return)
}
```
Calculation the properties for a given genome using sliding window 200 nt
```{r}
dynchars_output<-dynchars(e.coli_U00096.2_char, 200)
E01<-dynchars_output$E01
E02<-dynchars_output$E02
d1<-dynchars_output$d1
d2<-dynchars_output$d2
gc200matlab1<-dynchars_output$gc1 #name of the variable is from before
gc200matlab2<-dynchars_output$gc2 #name of the variable is from before
```
Loading data on sequences of different types (promoters, non-promoters, genes, islands, and lowscore) from .Rdata files (must be copied separetely)
```{r}
load('spline_dataset_pro.Rdata')
load('spline_dataset_notpro.Rdata')
load('spline_dataset_gen.Rdata')
load('spline_dataset_isl.Rdata')
load('dataset_lowscore.Rdata')
##dataset_notpro[[1488]]<-NULL # to close to the left flank
```
Check-up for reverse strand
```{r, echo=FALSE}
gregexpr(toupper(dataset_isl$isl2228$seq), reverseComplement_e.coli_U00096.2)
nchar(e.coli_U00096.2)-dataset_isl$isl2228$tss-270
```
Extracting data on all promoters and on experimentaly found ones - including previosely calculated electrostatic potential profiles
```{r}
pro_names<-names(dataset_pro)
tsss<-c()
seqs<-c()
exp_strands<-c()
exp_tsss<-c()
exp_mpots_check<-c() #for experimentally found one
exp_names<-c()
for (i in 1:length(dataset_pro)){
tsss<-c(tsss, dataset_pro[[i]]$tss)
seqs<-c(seqs, dataset_pro[[i]]$seq)
if (dataset_pro[[i]]$evidence=='experimental'){
#exp_proms<-rbind(exp_proms, c(pro_names[i], strands[i], tsss[i], seqs[i]))
exp_names<-c(exp_names, pro_names[i])
exp_mpots_check<-rbind(exp_mpots_check, dataset_pro[[i]]$mpot)
#exp_promoters<-rbind(exp_promoters, c(pro_names[i], strands[i], tsss[i], seqs[i]))
exp_tsss<-c(exp_tsss, dataset_pro[[i]]$tss)
exp_strands<-c(exp_strands, dataset_pro[[i]]$strand)}
}
```
# # # Changing genomic coordinate used for dataset_... - TSS is according to 5'-end for both strands
```{r}
exp_tsss[which(exp_strands=='reverse')]<-nchar(e.coli_U00096.2)-exp_tsss[which(exp_strands=='reverse')]
#exp_tsss<--exp_tsss
```
Creating matrices for data on activation energy ('aeos1') and size ('aeos3'). The matrices is 699*201 - 699 promoters sequences, 201 value for a physical property profile
to 5'-end for both strands
Creating matrices of dynamical properties and GC-content for intervals [-150; 50] nts
```{r}
for (i in c('aeos1forward', 'aeos1reverse', 'aeos3forward', 'aeos3reverse', 'gc200forward', 'gc200reverse', 'mpotsforward', 'mpotsreverse')) {
assign(i, c())
}
zout<- -480:239
for (i in 1:length(exp_tsss)) {
if (exp_strands[i]=='forward') {
aeos1forward<-rbind(aeos1forward, E01[as.numeric(exp_tsss[i]-150):(exp_tsss[i]+50)])
#aeos2forward<-rbind(aeos2forward, matr1[(as.numericexp_tsss[i]-150):(as.numericexp_tsss[i]+50),2])
aeos3forward<-rbind(aeos3forward, d1[(as.numeric(exp_tsss[i])-150):(exp_tsss[i]+50)])
#aeos4forward<-rbind(aeos4forward, matr1[(as.numeric(exp_proms[i,3])-150):(as.numeric(exp_proms[i,3])+50),4])
gc200forward<-rbind(gc200forward, gc200matlab1[(as.numeric(exp_tsss[i])-150):(as.numeric(exp_tsss[i])+50)])
p<-lseqspline1D(substr(e.coli_U00096.2, exp_tsss[i]-250, exp_tsss[i]+150), bound=c(50, 350), ref=251 )
mpotsforward<-rbind(mpotsforward,
##mpot_whole_genome_forward$mpot[mpot_whole_genome_forward$x %in% (exp_tsss[i]-150):(exp_tsss[i]+50)])
p$mpot[p$x %in% zout])
} else {
aeos1reverse<-rbind(aeos1reverse, E02[(as.numeric(exp_tsss[i])-50):(as.numeric(exp_tsss[i])+150)])
#aeos2reverse<-rbind(aeos2reverse, matr1[(as.numeric(exp_tsss[i])-50):(as.numeric(exp_tsss[i])+150),2])
aeos3reverse<-rbind(aeos3reverse, d2[(as.numeric(exp_tsss[i])-50):(as.numeric(exp_tsss[i])+150)])
#aeos4reverse<-rbind(aeos4reverse, matr1[(as.numeric(exp_tsss[i])-50):(as.numeric(exp_tsss[i])+150),4])
gc200reverse<-rbind(gc200reverse, (gc200matlab2[(as.numeric(exp_tsss[i])-50):(as.numeric(exp_tsss[i])+150)]))
p<-lseqspline1D(substr(reverseComplement_e.coli_U00096.2, exp_tsss[i]-250, exp_tsss[i]+150), bound=c(50, 350), ref=251 )
mpotsreverse<-rbind(mpotsreverse,
##mpot_whole_genome_forward$mpot[mpot_whole_genome_forward$x %in% (exp_tsss[i]-150):(exp_tsss[i]+50)])
p$mpot[p$x %in% zout])
}
}
#merging matrices for forward and reverse strands together
aeos1<-rbind(aeos1forward, aeos1reverse)
#aeos2<-rbind(aeos2forward, aeos2reverse)
aeos3<-rbind(aeos3forward, aeos3reverse)
#aeos4<-rbind(aeos4forward, aeos4reverse)
gc200<-rbind(gc200forward, gc200reverse)
mpots<-rbind(mpotsforward, mpotsreverse)
```
extracting data for non-promoters. 1488 th sequences remove since it is 188 nts from flank and EP cannot be calculated
```{r}
notpro_names<-names(dataset_notpro)
nottsss<-rep(0, length(dataset_notpro))
notseqs<-c()
notstrands<-c()
#notmpots<-c()
for (i in 1:length(dataset_notpro)){
nottsss[i]<-dataset_notpro[[i]]$tss
notseqs<-toupper(c(notseqs, dataset_notpro[[i]]$seq))
notstrands<-c(notstrands, dataset_notpro[[i]]$strand)
# notmpots<-rbind(notmpots, dataset_notpro[[i]]$mpot)
}
# # # no reverse strand!
#nottsss[which(notstrands=='reverse')]<-nchar(e.coli_U00096.2)-nottsss[which(notstrands=='reverse')]
#nottsss<-nchar(e.coli_U00096.2)-nottsss
```
Matrices for physical properties profiles - creating matrices for data on activation energy ('aeos1') and size ('aeos3')
```{r}
for (i in c('notaeos1forward', 'notaeos1reverse', 'notaeos3forward', 'notaeos3reverse', 'notgc200forward', 'notgc200reverse', 'notmpotsforward', 'notmpotsreverse')) {
assign(i, c())
}
for (i in 1:length(nottsss)) {
if (notstrands[i]=='forward') {
if (nottsss[i]<250) {
notaeos1forward<-rbind(notaeos1forward, E01[(as.numeric(nottsss[i])-150):(as.numeric(nottsss[i])+50)])
#notaeos2forward<-rbind(notaeos2forward, matr1[(as.numeric(nottsss[i])-150):(as.numeric(nottsss[i])+50),2])
notaeos3forward<-rbind(notaeos3forward, d1[(as.numeric(nottsss[i])-150):(as.numeric(nottsss[i])+50)])
#notaeos4forward<-rbind(notaeos4forward, matr1[(as.numeric(nottsss[i])-150):(as.numeric(nottsss[i])+50),4])
notgc200forward<-rbind(notgc200forward, gc200matlab1[(as.numeric(nottsss[i])-150):(as.numeric(nottsss[i])+50)])
spec_for1488<-substr(
paste0(substr(e.coli_U00096.2, nchar(e.coli_U00096.2)-199, nchar(e.coli_U00096.2)), e.coli_U00096.2),
nottsss[i]-250+200, nottsss[i]+150+200)
p<-lseqspline1D(spec_for1488, bound=c(50, 350), ref=251 )
notmpotsforward<-rbind(notmpotsforward,
p$mpot[p$x %in% zout])
} else {
notaeos1forward<-rbind(notaeos1forward, E01[(as.numeric(nottsss[i])-150):(as.numeric(nottsss[i])+50)])
#notaeos2forward<-rbind(notaeos2forward, matr1[(as.numeric(nottsss[i])-150):(as.numeric(nottsss[i])+50),2])
notaeos3forward<-rbind(notaeos3forward, d1[(as.numeric(nottsss[i])-150):(as.numeric(nottsss[i])+50)])
#notaeos4forward<-rbind(notaeos4forward, matr1[(as.numeric(nottsss[i])-150):(as.numeric(nottsss[i])+50),4])
notgc200forward<-rbind(notgc200forward, gc200matlab1[(as.numeric(nottsss[i])-150):(as.numeric(nottsss[i])+50)])
p<-lseqspline1D(substr(e.coli_U00096.2, nottsss[i]-250, nottsss[i]+150), bound=c(50, 350), ref=251 )
notmpotsforward<-rbind(notmpotsforward,
##mpot_whole_genome_forward$mpot[mpot_whole_genome_forward$x %in% (exp_tsss[i]-150):(exp_tsss[i]+50)])
p$mpot[p$x %in% zout])
}
# } else {
# notaeos1reverse<-rbind(notaeos1reverse, E02[(as.numeric(nottsss[i])-50):(as.numeric(nottsss[i])+150)])
#notaeos2reverse<-rbind(notaeos2reverse, matr1[(as.numeric(nottsss[i])-50):(as.numeric(nottsss[i])+150),2])
# notaeos3reverse<-rbind(notaeos3reverse, d2[(as.numeric(nottsss[i])-50):(as.numeric(nottsss[i])+150)])
#notaeos4reverse<-rbind(notaeos4reverse, matr1[(as.numeric(nottsss[i])-50):(as.numeric(nottsss[i])+150),4])
# notgc200reverse<-rbind(notgc200reverse, (gc200matlab2[(as.numeric(nottsss[i])-50):(as.numeric(nottsss[i])+150)]))
# p<-lseqspline1D(substr(reverseComplement_e.coli_U00096.2, nottsss[i]-250, nottsss[i]+150), bound=c(50, 350), ref=251 )
# notmpotsreverse-rbind(notmpotsreverse,
##mpot_whole_genome_forward$mpot[mpot_whole_genome_forward$x %in% (exp_tsss[i]-150):(exp_tsss[i]+50)])
# p$mpot[p$x %in% zout])
}
}
#mergind data for strands is not neaded - reverse strand is empty
notaeos1<-notaeos1forward
#notaeos2<-notaeos2forward
notaeos3<-notaeos3forward
#notaeos4<-notaeos4forward
notgc200<-notgc200forward
notmpots<-notmpotsforward
```
Data extracion for genes
```{r}
gen_names<-names(dataset_gen)
gentsss<-c()
genseqs<-c
genstrands<-c()
#genmpots<-c()
for (i in 1:length(dataset_gen)){
gentsss<-c(gentsss, dataset_gen[[i]]$tss)
genseqs<-toupper(c(genseqs, dataset_gen[[i]]$seq))
genstrands<-c(genstrands, dataset_gen[[i]]$strand)
# genmpots<-rbind(genmpots, dataset_gen[[i]]$mpot)
}
```
```{r}
# # #
gentsss[which(genstrands=='reverse')]<-nchar(e.coli_U00096.2)-gentsss[which(genstrands=='reverse')]
for (i in c('genaeos1forward', 'genaeos1reverse', 'genaeos3forward', 'genaeos3reverse', 'gengc200forward', 'gengc200reverse', 'genmpotsforward', 'genmpotsreverse' )) {
assign(i, c())
}
for (i in 1:length(dataset_gen)) {
if (genstrands[i]=='forward') {
genaeos1forward<-rbind(genaeos1forward, E01[(as.numeric(gentsss[i])-150):(as.numeric(gentsss[i])+50)])
# genaeos2forward<-rbind(genaeos2forward, matr1[(as.numeric(gentsss[i])-150):(as.numeric(gentsss[i])+50),2])
genaeos3forward<-rbind(genaeos3forward, d1[(as.numeric(gentsss[i])-150):(as.numeric(gentsss[i])+50)])
# genaeos4forward<-rbind(genaeos4forward, matr1[(as.numeric(gentsss[i])-150):(as.numeric(gentsss[i])+50),4])
gengc200forward<-rbind(gengc200forward, gc200matlab1[(as.numeric(gentsss[i])-150):(as.numeric(gentsss[i])+50)])
p<-lseqspline1D(substr(e.coli_U00096.2, gentsss[i]-250, gentsss[i]+150), bound=c(50, 350), ref=251 )
genmpotsforward<-rbind(genmpotsforward,
##mpot_whole_genome_forward$mpot[mpot_whole_genome_forward$x %in% (exp_tsss[i]-150):(exp_tsss[i]+50)])
p$mpot[p$x %in% zout])
} else {
genaeos1reverse<-rbind(genaeos1reverse, E02[(as.numeric(gentsss[i])-50):(as.numeric(gentsss[i])+150)])
# genaeos2reverse<-rbind(genaeos2reverse, matr1[(as.numeric(gentsss[i])-50):(as.numeric(gentsss[i])+150),2])
genaeos3reverse<-rbind(genaeos3reverse, d2[(as.numeric(gentsss[i])-50):(as.numeric(gentsss[i])+150)])
#genaeos4reverse<-rbind(genaeos4reverse, matr1[(as.numeric(gentsss[i])-50):(as.numeric(gentsss[i])+150),4])
gengc200reverse<-rbind(gengc200reverse, (gc200matlab2[(as.numeric(gentsss[i])-50):(as.numeric(gentsss[i])+150)]))
p<-lseqspline1D(substr(reverseComplement_e.coli_U00096.2, gentsss[i]-250, gentsss[i]+150), bound=c(50, 350), ref=251 )
genmpotsreverse<-rbind(genmpotsreverse,
##mpot_whole_genome_forward$mpot[mpot_whole_genome_forward$x %in% (exp_tsss[i]-150):(exp_tsss[i]+50)])
p$mpot[p$x %in% zout])
}
}
genaeos1<-rbind(genaeos1forward, genaeos1reverse)
#genaeos2<-rbind(genaeos2forward, genaeos2reverse)
genaeos3<-rbind(genaeos3forward, genaeos3reverse)
#genaeos4<-rbind(genaeos4forward, genaeos4reverse)
gengc200<-rbind(gengc200forward, gengc200reverse)
genmpots<-rbind(genmpotsforward, genmpotsreverse)
```
Islands. data extracion
```{r}
isl_names<-names(dataset_isl)
isltsss<-c()
islseqs<-c
islstrands<-c()
#islmpots<-c()
for (i in 1:length(dataset_isl)){
isltsss<-c(isltsss, dataset_isl[[i]]$tss)
islseqs<-toupper(c(islseqs, dataset_isl[[i]]$seq))
islstrands<-c(islstrands, dataset_isl[[i]]$strand)
# islmpots<-rbind(islmpots, dataset_isl[[i]]$mpot)
}
# # #
isltsss[which(islstrands=='reverse')]<-nchar(e.coli_U00096.2)-isltsss[which(islstrands=='reverse')]
```
```{r}
for (i in c('islaeos1forward', 'islaeos1reverse', 'islaeos3forward', 'islaeos3reverse', 'islgc200forward', 'islgc200reverse', 'islmpotsforward', 'islmpotsreverse')) {
assign(i, c())
}
for (i in 1:length(dataset_isl)) {
if (islstrands[i]=='forward') {
islaeos1forward<-rbind(islaeos1forward, E01[(as.numeric(isltsss[i])-150):(as.numeric(isltsss[i])+50)])
# islaeos2forward<-rbind(islaeos2forward, matr1[(as.numeric(isltsss[i])-150):(as.numeric(isltsss[i])+50),2])
islaeos3forward<-rbind(islaeos3forward, d1[(as.numeric(isltsss[i])-150):(as.numeric(isltsss[i])+50)])
# islaeos4forward<-rbind(islaeos4forward, matr1[(as.numeric(isltsss[i])-150):(as.numeric(isltsss[i])+50),4])
islgc200forward<-rbind(islgc200forward, gc200matlab1[(as.numeric(isltsss[i])-150):(as.numeric(isltsss[i])+50)])
p<-lseqspline1D(substr(e.coli_U00096.2, isltsss[i]-250, isltsss[i]+150), bound=c(50, 350), ref=251 )
islmpotsforward<-rbind(islmpotsforward,
##mpot_whole_genome_forward$mpot[mpot_whole_genome_forward$x %in% (exp_tsss[i]-150):(exp_tsss[i]+50)])
p$mpot[p$x %in% zout])
} else {
islaeos1reverse<-rbind(islaeos1reverse, E02[(as.numeric(isltsss[i])-50):(as.numeric(isltsss[i])+150)])
# islaeos2reverse<-rbind(islaeos2reverse, matr1[(as.numeric(isltsss[i])-50):(as.numeric(isltsss[i])+150),2])
islaeos3reverse<-rbind(islaeos3reverse, d2[(as.numeric(isltsss[i])-50):(as.numeric(isltsss[i])+150)])
#islaeos4reverse<-rbind(islaeos4reverse, matr1[(as.numeric(isltsss[i])-50):(as.numeric(isltsss[i])+150),4])
islgc200reverse<-rbind(islgc200reverse, (gc200matlab2[(as.numeric(isltsss[i])-50):(as.numeric(isltsss[i])+150)]))
p<-lseqspline1D(substr(reverseComplement_e.coli_U00096.2, isltsss[i]-250, isltsss[i]+150), bound=c(50, 350), ref=251 )
islmpotsreverse<-rbind(islmpotsreverse,
##mpot_whole_genome_forward$mpot[mpot_whole_genome_forward$x %in% (exp_tsss[i]-150):(exp_tsss[i]+50)])
p$mpot[p$x %in% zout])
}
}
islaeos1<-rbind(islaeos1forward, islaeos1reverse)
#islaeos2<-rbind(islaeos2forward, islaeos2reverse)
islaeos3<-rbind(islaeos3forward, islaeos3reverse)
#islaeos4<-rbind(islaeos4forward, islaeos4reverse)
islgc200<-rbind(islgc200forward, islgc200reverse)
islmpots<-rbind(islmpotsforward, islmpotsreverse)
```
Lowscore. data extraction
only first 2000 among lowscore sequences are used!
```{r}
lowscore_names<-names(dataset_lowscore)
lowscoretsss<-c()
lowscoreseqs<-c
lowscorestrands<-c()
#lowscorempots<-c()
```
```{r}
for (i in 1:2000){
lowscoretsss<-c(lowscoretsss, dataset_lowscore[[i]]$tss)
lowscoreseqs<-toupper(c(lowscoreseqs, dataset_lowscore[[i]]$seq))
lowscorestrands<-c(lowscorestrands, dataset_lowscore[[i]]$strand)
# lowscorempots<-rbind(lowscorempots, dataset_lowscore[[i]]$mpot)
}
# # #
lowscoretsss[which(lowscorestrands=='reverse')]<-nchar(e.coli_U00096.2)-lowscoretsss[which(lowscorestrands=='reverse')]
```
```{r}
for (i in c('lowscoreaeos1forward', 'lowscoreaeos1reverse', 'lowscoreaeos3forward', 'lowscoreaeos3reverse', 'lowscoregc200forward', 'lowscoregc200reverse', 'lowscorempotsforward', 'lowscorempotsreverse' )) {
assign(i, c())
}
```
only first 2000 among lowscore sequences are used!
```{r}
for (i in 1:2000) {
if (lowscorestrands[i]=='forward') {
lowscoreaeos1forward<-rbind(lowscoreaeos1forward, E01[(as.numeric(lowscoretsss[i])-150):(as.numeric(lowscoretsss[i])+50)])
# lowscoreaeos2forward<-rbind(lowscoreaeos2forward, matr1[(as.numeric(lowscoretsss[i])-150):(as.numeric(lowscoretsss[i])+50),2])
lowscoreaeos3forward<-rbind(lowscoreaeos3forward, d1[(as.numeric(lowscoretsss[i])-150):(as.numeric(lowscoretsss[i])+50)])
# lowscoreaeos4forward<-rbind(lowscoreaeos4forward, matr1[(as.numeric(lowscoretsss[i])-150):(as.numeric(lowscoretsss[i])+50),4])
lowscoregc200forward<-rbind(lowscoregc200forward, gc200matlab1[(as.numeric(lowscoretsss[i])-150):(as.numeric(lowscoretsss[i])+50)])
p<-lseqspline1D(substr(e.coli_U00096.2, lowscoretsss[i]-250, lowscoretsss[i]+150), bound=c(50, 350), ref=251 )
lowscorempotsforward<-rbind(lowscorempotsforward,
##mpot_whole_genome_forward$mpot[mpot_whole_genome_forward$x %in% (exp_tsss[i]-150):(exp_tsss[i]+50)])
p$mpot[p$x %in% zout])
} else {
lowscoreaeos1reverse<-rbind(lowscoreaeos1reverse, E02[(as.numeric(lowscoretsss[i])-50):(as.numeric(lowscoretsss[i])+150)])
# lowscoreaeos2reverse<-rbind(lowscoreaeos2reverse, matr1[(as.numeric(lowscoretsss[i])-50):(as.numeric(lowscoretsss[i])+150),2])
lowscoreaeos3reverse<-rbind(lowscoreaeos3reverse, d2[(as.numeric(lowscoretsss[i])-50):(as.numeric(lowscoretsss[i])+150)])
#lowscoreaeos4reverse<-rbind(lowscoreaeos4reverse, matr1[(as.numeric(lowscoretsss[i])-50):(as.numeric(lowscoretsss[i])+150),4])
lowscoregc200reverse<-rbind(lowscoregc200reverse, (gc200matlab2[(as.numeric(lowscoretsss[i])-50):(as.numeric(lowscoretsss[i])+150)]))
p<-lseqspline1D(substr(reverseComplement_e.coli_U00096.2, lowscoretsss[i]-250, lowscoretsss[i]+150), bound=c(50, 350), ref=251 )
lowscorempotsreverse<-rbind(lowscorempotsreverse,
##mpot_whole_genome_forward$mpot[mpot_whole_genome_forward$x %in% (exp_tsss[i]-150):(exp_tsss[i]+50)])
p$mpot[p$x %in% zout])
}
}
lowscoreaeos1<-rbind(lowscoreaeos1forward, lowscoreaeos1reverse)
#lowscoreaeos2<-rbind(lowscoreaeos2forward, lowscoreaeos2reverse)
lowscoreaeos3<-rbind(lowscoreaeos3forward, lowscoreaeos3reverse)
#lowscoreaeos4<-rbind(lowscoreaeos4forward, lowscoreaeos4reverse)
lowscoregc200<-rbind(lowscoregc200forward, lowscoregc200reverse)
lowscorempots<-rbind(lowscorempotsforward, lowscorempotsreverse)
```
Setting names for created datasets
```{r}
rownames(aeos1)<-exp_names
rownames(aeos3)<-exp_names
rownames(mpots)<-exp_names
rownames(gc200)<-exp_names
rownames(notaeos1)<-paste0('Non_promoter_', 1:nrow(notaeos1))
rownames(notaeos3)<-paste0('Non_promoter_', 1:nrow(notaeos1))
rownames(notmpots)<-paste0('Non_promoter_', 1:nrow(notaeos1))
rownames(notgc200)<-paste0('Non_promoter_', 1:nrow(notaeos1))
rownames(genaeos1)<-paste0('Gene_', 1:nrow(genaeos1))
rownames(genaeos3)<-paste0('Gene_', 1:nrow(genaeos1))
rownames(genmpots)<-paste0('Gene_', 1:nrow(genaeos1))
rownames(gengc200)<-paste0('Gene_', 1:nrow(genaeos1))
rownames(islaeos1)<-paste0('Islands_', 1:nrow(islaeos1))
rownames(islaeos3)<-paste0('Islands_', 1:nrow(islaeos1))
rownames(islmpots)<-paste0('Islands_', 1:nrow(islaeos1))
rownames(islgc200)<-paste0('Islands_', 1:nrow(islaeos1))
rownames(lowscoreaeos1)<-paste0('Lowscore_', 1:nrow(lowscoreaeos1))
rownames(lowscoreaeos3)<-paste0('Lowscore_', 1:nrow(lowscoreaeos1))
rownames(lowscorempots)<-paste0('Lowscore_', 1:nrow(lowscoreaeos1))
rownames(lowscoregc200)<-paste0('Lowscore_', 1:nrow(lowscoreaeos1))
```
#Merging datasets. Scaling data
```{r}
to_pca_5components_4props<-rbind(cbind((aeos1), (aeos3), (mpots), (gc200)),
cbind((notaeos1), (notaeos3), (notmpots), (notgc200)),
cbind((genaeos1), (genaeos3), (genmpots), (gengc200)),
cbind((islaeos1), (islaeos3), (islmpots), (islgc200)),
cbind((lowscoreaeos1), (lowscoreaeos3), (lowscorempots), (lowscoregc200))
)
```
# saving the initial matrix for 5 components, 4 properties
```{r}
save(to_pca_5components_4props, file='to_pca_5components_4props.Rdata')
```
#setting sequences groups and variables groups
```{r}
habillage_5components_4props<-c(rep('Promoters', length(exp_names)), rep('Non-promoters', length(dataset_notpro)), rep('Genes', length(dataset_gen)), rep('Islands', length(dataset_isl)), rep('Lowscore', 2000))
habillage_5components_4props_cols<-c(rep('red', length(exp_names)), rep('blue', length(dataset_notpro)), rep('green', length(dataset_gen)), rep('yellow', length(dataset_isl)), rep('magenta', 2000))
colnames(to_pca_5components_4props)<-NULL
groups_variables <- c(rep('cyan', ncol(aeos1)), rep('navy', ncol(aeos3)), rep('darkgreen', ncol(mpots)), rep('darkred', ncol(gc200)))
```
#PRINCIPAL COMPONENTS ANALYSIS ITSELF
```{r}
princ.return.5comps.4props <- prcomp(to_pca_5components_4props, scale=T, center = T)
```
``` {r PCA variables plot using base graphics}
# Helper function :
# Correlation between variables and principal components
var_cor_func <- function(var.loadings, comp.sdev){
var.loadings*comp.sdev
}
# Variable correlation/coordinates
loadings <- princ.return.5comps.4props$rotation
sdev <- princ.return.5comps.4props$sdev
var.coord <- t(apply(loadings, 1, var_cor_func, sdev))
#Graph of variables using R base graph
#INDIVIDUALS
# Contributions of individuals
ind.coord <- princ.return.5comps.4props$x
contrib <- function(ind.coord, comp.sdev, n.ind){
100*(1/n.ind)*ind.coord^2/comp.sdev^2
}
ind.contrib <- t(apply(ind.coord,1, contrib,
princ.return.5comps.4props$sdev, nrow(ind.coord)))
svg('/home/mikhail/Documents/Script_2016_all/PCA/custom_biplot_pca_5_comps.svg', height = 10, width = 10)
plot(ind.coord[,1], ind.coord[,2], cex=0.8, pch=1,
xlab="PC1",ylab="PC2", col=habillage_5components_4props_cols)
abline(h=0, v=0, lty = 2)
#text(ind.coord[,1], ind.coord[,2], labels=rownames(ind.coord),
# cex=0.7, pos = 3)
# Plot the correlation circle
a <- seq(0, 2*pi, length = 100)
lines( cos(a)*50, sin(a)*50, type = 'l', col="gray", #*50 !
xlab = "PC1", ylab = "PC2")
abline(h = 0, v = 0, lty = 2)
# Add active variables
arrows(0, 0, var.coord[, 1]*50, var.coord[, 2]*50, #*50 so the size of arrows be proper
length = 0.1, angle = 15, code = 2, col = groups_variables)
# Add labels
#text(var.coord, labels=rownames(var.coord), cex = 1, adj=1)
legend('bottomright', legend = as.character(c('E0', 'd', 'EP', 'GC')), fill = unique(groups_variables), cex = 1.5)
legend("topright", legend=c('Promoters', 'Non-promoters', 'Genes', 'Promoter islands', 'Lowscore'), fill=unique(habillage_5components_4props_cols))
dev.off()
```
```{r 3D variables and individuals vusualization}
library(rgl)
plot3d(princ.return.5comps.4props$x[,1:3], col=habillage_5components_4props_cols, size=2, xlab = paste0('PC1, approx. ', ceiling(variance[1]), '% of variance'), ylab = paste0('PC2, approx. ', ceiling(variance[2]), '% of variance'), zlab = paste0('PC3, approx. ', ceiling(variance[3]), '% of variance'))
coords <- NULL
for (i in 1:nrow(princ.return.5comps.4props$rotation)) {
coords <- rbind(coords, rbind(c(0,0,0),princ.return.5comps.4props$rotation[i,1:3]))
}
lines3d(coords*1000, col=groups_variables, lwd=0.2)
M <- par3d("userMatrix") # #pca3d
movie3d( spin3d(), duration=15, dir="/home/mikhail/Documents/Script_2016_all/PCA/pca_movie3d/", clean = T, convert = T )
```
```{r Variables visualization}
svg('/home/mikhail/Documents/Script_2016_all/PCA/fviz_pca_var_princ.return.5comps.4props.svg', height = 10, width = 10)
fviz_pca_var(princ.return.5comps.4props, geom = c('arrow', 'text'), col.var="contrib")+
scale_color_gradient2(low="white", mid="white",
high="black", midpoint=0.1) + theme_minimal()
dev.off()
svg('/home/mikhail/Documents/Script_2016_all/PCA/fviz_pca_var_princ.return.5comps.4props.svg', height = 10, width = 10)
fviz_pca_biplot(princ.return.5comps.4props,
habillage = as.factor(habillage_5components_4props), addEllipses = TRUE,
col.var = rainbow(ncol(to_pca_5components_4props)), alpha.var = "contrib",
label = "var"
) +
scale_color_brewer(palette="Dark2")+
theme_minimal()
dev.off()
library(ggbiplot)
#print(ggbiplot(princ.return.5comps.4props, obs.scale = 1, var.scale = 1, groups = (habillage_5components_4props), ellipse = TRUE, circle = TRUE))
#biplot(princ.return.5comps.4props, choices = 1:2, scale = 1, pc.biplot = TRUE, col=habillage_5components_4props_cols)
```
#saving initial prcomp output
```{r}
save(princ.return.5comps.4props, file='princ.return.5comps.4props.Rdata')
```
prcomp output transformation
```{r}
eig.val <- get_eigenvalue(princ.return.5comps.4props)
# Eigenvalues
eig <- (princ.return.5comps.4props$sdev)^2
# Variances in percentage
variance <- eig*100/sum(eig)
# Cumulative variances
cumvar <- cumsum(variance)
#png('cumvar_5components_4props.png', height=1250, width=1250, res=130)
plot(cumvar, type='l', main='Cumulative variance for principal components \n on 4 properties for 5 components mixture', ylab='Cumulative variance (%)', xlab='Principal components')
v.5comps.4props=100 # 65
h.5comps.4props=98
abline(h=h.5comps.4props, v=v.5comps.4props, col=12)
text(v.5comps.4props-15, h.5comps.4props ,paste('Number of \nPCs=', v.5comps.4props,'\n' ,h.5comps.4props, '% of \nvariance \nretained'),srt=0.2,pos=3)
#dev.off()
```
Futher transformation - chosen number of 'rotation' columns (i.e. eigenvectors are multiplied by inital large matrix of variables)
```{r}
#pcs.from.var<-c()
#for (i in 1:v.5comps.4props) {
# load <- princ.return.5comps.4props$rotation[,i]
# pr.cp <- to_pca_5components_4props %*% load
# pr <- as.numeric(pr.cp)
# pcs.from.var<-cbind(pcs.from.var, pr)
#}
```
#saving PCA data: variable are conversed to PCs
```{r}
#rownames(pcs.from.var)<-rownames(to_pca_5components_4props)
#colnames(pcs.from.var)<-1:ncol(pcs.from.var)
```
#renaming the variable, saving the transformated output
```{r}
#mixt_5comps_after_pca_on_4_props_ae_size_mpots_gc200<- pcs.from.var
#save(mixt_5comps_after_pca_on_4_props_ae_size_mpots_gc200, file='new_no_factor_8XII_mixt_5comps_after_pca_on_4_props_ae_size_mpots_gc200.Rdata')
```
#general PCA results visualization
habillage i.e. partitions into groups
```{r}
svg(filename = 'PCA_5_components_4_properties_not_conversion_into_variables_axes_1_2.svg', width = 10, height = 7)
fviz_pca_ind(princ.return.5comps.4props, label="none", habillage=habillage_5components_4props, pointsize = 0.5, addEllipses = T, ellipse.level = 0.99)
dev.off()
svg(filename = 'PCA_5_components_4_properties_not_conversion_into_variables_axes_1_3.svg', width = 10, height = 7)
fviz_pca_ind(princ.return.5comps.4props, axes = c(1,3), label="none", habillage=habillage_5components_4props, pointsize = 0.5, addEllipses = T, ellipse.level = 0.99)
dev.off()
svg(filename = 'PCA_5_components_4_properties_not_conversion_into_variables_axes_2_3.svg', width = 10, height = 7)
fviz_pca_ind(princ.return.5comps.4props, axes = c(2,3), label="none", habillage=habillage_5components_4props, pointsize = 0.5, addEllipses = T, ellipse.level = 0.99)
dev.off()
library(rgl)
habillage.pro.not.isl.gen<-c(rep('red', length(exp_names)), rep ('blue', length(dataset_notpro)), rep('green', length(dataset_gen)), rep('orange', length(dataset_isl)), rep('magenta', 2000))
#4 components correspond to colors 1-4
open3d()
plot3d(princ.return.5comps.4props$x[,1:3], col=(habillage.pro.not.isl.gen), size=2.5)
M <- par3d("userMatrix") # #pca3d
movie3d( spin3d(), duration=15, dir="/home/mikhail/Documents/Script_2016_all/PCA/pca_movie3d/", clean = T, convert = T )
# #
library(pca3d)
pca3d(princ.return.5comps.4props, group = habillage.pro.not.isl.gen, col = habillage.pro.not.isl.gen, radius = 0.5)
#makeMoviePCA()
```
# # # SUPERVISED MACHINE LEARNING
Creating subsets for separate sequence types
```{r}
registerDoMC(cores = 3)
df <- as.data.frame(((princ.return.5comps.4props$x)[,1:100]))
#df<-cbind(habillage_5components_4props, df)
promoters <- df[1:699,]
non_promoters <- df[700:2579,]
genes <- df[2580:6006,]
islands <- df[6007:8234,]
lowscore <- df[8235:10234,]
```
Adding factor columns
```{r}
factor_to_promoters_vs_lowscore<-as.factor(c(rep('Promoter', nrow(promoters)), rep('Lowscore', nrow(lowscore))))
promoters_vs_lowscore <- cbind(factor_to_promoters_vs_lowscore, rbind(promoters, lowscore))
factor_to_promoters_vs_non_promoters<-as.factor(c(rep('Promoter', nrow(promoters)), rep('Non_promoter', nrow(non_promoters))))
promoters_vs_non_promoters <- cbind(factor_to_promoters_vs_non_promoters, rbind(promoters, non_promoters))
factor_to_promoters_vs_islands<-as.factor(c(rep('Promoter', nrow(promoters)), rep('Island', nrow(islands))))
promoters_vs_islands <- cbind(factor_to_promoters_vs_islands, rbind(promoters, islands))
factor_to_promoters_vs_genes<-as.factor(c(rep('Promoter', nrow(promoters)), rep('Gene', nrow(genes))))
promoters_vs_genes <- cbind(factor_to_promoters_vs_genes, rbind(promoters, genes))
```
#for promoters_vs_lowscore model training and evaluation
```{r}
set.seed(999)
inTraining <- createDataPartition(promoters_vs_lowscore$factor_to_promoters_vs_lowscore, p = 0.7, list = F)
training <- promoters_vs_lowscore[inTraining,]
testing <- promoters_vs_lowscore[-inTraining,]
fitControl <- trainControl(method = "repeatedcv",
number = 10,
repeats = 15,
allowParallel = T,
classProbs = T,
summaryFunction = twoClassSummary
)
```
#train_control <- trainControl(method="repeatedcv", number=10, repeats=3)
```{r}
fit_promoters_vs_lowscore <- train(factor_to_promoters_vs_lowscore ~ .,
data = training,
method = "rf",#nb, lda, nbDiscrete, nbSearch
# #preProcess=c("center", "scale"),
tuneLength = 15,
trControl = fitControl,
metric = "ROC"
)
predictionClasses_promoters_vs_lowscore <- predict(fit_promoters_vs_lowscore, newdata = testing)
predictionProb_promoters_vs_lowscore <- predict(fit_promoters_vs_lowscore, newdata = testing, type ="prob")
confusionMatrix_promoters_vs_lowscore <- confusionMatrix(data = predictionClasses_promoters_vs_lowscore, testing$factor_to_promoters_vs_lowscore)
```
For promoters_vs_non_promoters
```{r}
set.seed(999)
inTraining <- createDataPartition(promoters_vs_non_promoters$factor_to_promoters_vs_non_promoters, p = 0.7, list = F)
training <- promoters_vs_non_promoters[inTraining,]
testing <- promoters_vs_non_promoters[-inTraining,]
fitControl <- trainControl(method = "repeatedcv",
number = 10,
repeats = 3,
allowParallel = T,
classProbs = T,
summaryFunction = twoClassSummary
)
#train_control <- trainControl(method="repeatedcv", number=10, repeats=3)
fit_promoters_vs_non_promoters <- train(factor_to_promoters_vs_non_promoters ~ .,
data = training,
method = "nb",#nb, lda, nbDiscrete, nbSearch
##preProcess=c("center", "scale"),
tuneLength = 7,
trControl = fitControl,
metric = "ROC"
)
predictionClasses_promoters_vs_non_promoters <- predict(fit_promoters_vs_non_promoters, newdata = testing)
predictionProb_promoters_vs_non_promoters <- predict(fit_promoters_vs_non_promoters, newdata = testing, type ="prob")
confusionMatrix_promoters_vs_non_promoters <- confusionMatrix(data = predictionClasses_promoters_vs_non_promoters, testing$factor_to_promoters_vs_non_promoters)
```
#for promoters_vs_islands
```{r}
set.seed(999)
inTraining <- createDataPartition(promoters_vs_islands$factor_to_promoters_vs_islands, p = 0.7, list = F)
training <- promoters_vs_islands[inTraining,]
testing <- promoters_vs_islands[-inTraining,]
fitControl <- trainControl(method = "repeatedcv",
number = 10,
repeats = 3,
allowParallel = T,
classProbs = T,
summaryFunction = twoClassSummary
)
#train_control <- trainControl(method="repeatedcv", number=10, repeats=3)
fit_promoters_vs_islands <- train(factor_to_promoters_vs_islands ~ .,
data = training,
method = "nb",#nb, lda, nbDiscrete, nbSearch
##preProcess=c("center", "scale"),
tuneLength = 7,
trControl = fitControl,
metric = "ROC"
)
predictionClasses_promoters_vs_islands <- predict(fit_promoters_vs_islands, newdata = testing)
predictionProb_promoters_vs_islands <- predict(fit_promoters_vs_islands, newdata = testing, type ="prob")
confusionMatrix_promoters_vs_islands <- confusionMatrix(data = predictionClasses_promoters_vs_islands, testing$factor_to_promoters_vs_islands)
```
#for promoters_vs_genes
```{r}
set.seed(999)
inTraining <- createDataPartition(promoters_vs_genes$factor_to_promoters_vs_genes, p = 0.7, list = F)
training <- promoters_vs_genes[inTraining,]
testing <- promoters_vs_genes[-inTraining,]
fitControl <- trainControl(method = "repeatedcv",
number = 10,
repeats = 3,
allowParallel = T,
classProbs = T,
summaryFunction = twoClassSummary
)
#train_control <- trainControl(method="repeatedcv", number=10, repeats=3)
fit_promoters_vs_genes <- train(factor_to_promoters_vs_genes ~ .,
data = training,
method = "nb",#nb, lda, nbDiscrete, nbSearch
#preProcess=c("center", "scale"),
tuneLength = 7,
trControl = fitControl,
metric = "ROC"
)
predictionClasses_promoters_vs_genes <- predict(fit_promoters_vs_genes, newdata = testing)
predictionProb_promoters_vs_genes <- predict(fit_promoters_vs_genes, newdata = testing, type ="prob")
confusionMatrix_promoters_vs_genes <- confusionMatrix(data = predictionClasses_promoters_vs_genes, testing$factor_to_promoters_vs_genes)
```
Mean values for used properties for sequences sets
```{r}
aes<-grep('*aeos1$', ls(), value = T)[c(1,5,2,3,4)]
meanaes<-c()
for (i in aes) {
meanaes<-c(meanaes, mean(get(i)))
}
names(meanaes)<-c('Promoters', 'Non-promoters', 'Genes', 'Islands', 'Lowscore')
names(medianaes)<-c('Promoters', 'Non-promoters', 'Genes', 'Islands', 'Lowscore')
ds<-grep('*aeos3$', ls(), value = T)[c(1,5,2,3,4)]
meands<-c()
for (i in ds) {
meands<-c(meands, mean(get(i)))
}
names(meands)<-c('Promoters', 'Non-promoters', 'Genes', 'Islands', 'Lowscore')
eps<-grep('*mpots$', ls(), value = T)[c(1,5,2,3,4)]
meaneps<-c()
for (i in eps) {
meaneps<-c(meaneps, mean(get(i)))
}
names(meaneps)<-c('Promoters', 'Non-promoters', 'Genes', 'Islands', 'Lowscore')
gcs<-grep('*gc200$', ls(), value = T)[c(1,5,2,3,4)]
meangcs<-c()
for (i in gcs) {
meangcs<-c(meangcs, mean(get(i)))
}
names(meangcs)<-c('Promoters', 'Non-promoters', 'Genes', 'Islands', 'Lowscore')
```
Islands cluster analisys
```{r}
library(fastcluster)
islclust<-hclust.vector(princ.return.5comps.4props$x[6007:8234,], method = 'ward')
plot(islclust, labels=F)
cutisl<-cutree(islclust, k=2)
habillage2isl.clusts<-c(rep(1, length(exp_names)), rep (1, length(dataset_notpro)), rep(1, length(dataset_gen)), rep('red', length(which(cutisl==1))), rep('blue', length(which(cutisl==2))), rep(1, 2000))
fviz_pca_ind(princ.return.5comps.4props, label="none", habillage=habillage2isl.clusts, pointsize = 0.5, addEllipses = T, ellipse.level = 0.99)
fviz_pca_ind(princ.return.5comps.4props, axes = c(1,3), label="none", habillage=habillage2isl.clusts, pointsize = 0.5, addEllipses = T, ellipse.level = 0.99)
fviz_pca_ind(princ.return.5comps.4props, axes = c(2,3), label="none", habillage=habillage2isl.clusts, pointsize = 0.5, addEllipses = T, ellipse.level = 0.99)
plot3d(princ.return.5comps.4props$x[,1:3], col=(habillage2isl.clusts), size=2, cex.lab=1.8)
summary(as.vector(islaeos1[which(cutisl==1),]))
summary(as.vector(islaeos1[which(cutisl==2),]))
isl_props<-grep('isl', ls(), value=T)
lmts <- range(as.vector(islaeos1[which(cutisl==1),]),
as.vector(islaeos1[which(cutisl==2),]))
par(mfrow = c(1, 2))
boxplot(as.vector(islaeos1[which(cutisl==1),]),ylim=lmts)
boxplot(as.vector(islaeos1[which(cutisl==2),]),ylim=lmts)
lmts <- range(as.vector(islaeos3[which(cutisl==1),]),
as.vector(islaeos3[which(cutisl==2),]))
par(mfrow = c(1, 2))
boxplot(as.vector(islaeos3[which(cutisl==1),]),ylim=lmts)
boxplot(as.vector(islaeos3[which(cutisl==2),]),ylim=lmts)
summary(as.vector(islaeos3[which(cutisl==1),]))
summary(as.vector(islaeos3[which(cutisl==2),]))
summary(as.vector(islmpots[which(cutisl==1),]))
summary(as.vector(islmpots[which(cutisl==2),]))
summary(as.vector(islgc200[which(cutisl==1),]))
summary(as.vector(islgc200[which(cutisl==2),]))
summary(isltsss[which(cutisl==1)])
summary(isltsss[which(cutisl==2)])
table(islstrands[which(cutisl==1)])
table(islstrands[which(cutisl==2)])
lmts <- range(as.vector(islaeos1[which(cutisl==1),]),
as.vector(islaeos1[which(cutisl==2),]))
par(mfrow = c(1, 2))
matplot(islaeos1[which(cutisl==1),], type='l', lwd=0.03, col='red', ylim = lmts)
matplot(islaeos1[which(cutisl==2),], type='l', lwd=0.03, col='blue', ylim = lmts)
lmts <- range(as.vector(islaeos3[which(cutisl==1),]),
as.vector(islaeos3[which(cutisl==2),]))
par(mfrow = c(1, 2))
matplot(islaeos3[which(cutisl==1),], type='l', lwd=0.03, col='red', ylim = lmts)
matplot(islaeos3[which(cutisl==2),], type='l', lwd=0.03, col='blue', ylim = lmts)
lmts <- range(as.vector(islmpots[which(cutisl==1),]),
as.vector(islmpots[which(cutisl==2),]))
par(mfrow = c(1, 2))
matplot(islmpots[which(cutisl==1),], type='l', lwd=0.03, col='red', ylim = lmts)
matplot(islmpots[which(cutisl==2),], type='l', lwd=0.03, col='blue', ylim = lmts)
lmts <- range(as.vector(islgc200[which(cutisl==1),]),
as.vector(islgc200[which(cutisl==2),]))
par(mfrow = c(1, 2))
matplot(islgc200[which(cutisl==1),], type='l', lwd=0.03, col='red', ylim = lmts)
matplot(islgc200[which(cutisl==2),], type='l', lwd=0.03, col='blue', ylim = lmts)
```
# # # UNSUPERVISED MACHINE LEARNING - CLUSTER ANALYSIS
```{r}
library(fastcluster)
ward.princ.return.5comps.4props<-hclust.vector(princ.return.5comps.4props$x[,1:100], method = 'ward')
library(NbClust)
indices_that_works<-c("kl", "ch", "hartigan", "cindex", "db", "silhouette", "ratkowsky", "ball", "ptbiserial", "frey", "mcclain", "dunn", "sdbw")
all_for_5comp<-c()
for (i in indices_that_works) {
print(i)
siddres<-NbClust(data = princ.return.5comps.4props$x[,1:100], diss = NULL, distance = "euclidean", min.nc = 2, max.nc = 22, method = 'ward.D2', index = i)
assign(paste0('sidd_nbres_',i), siddres)
all_for_5comp<-c(all_for_5comp, siddres)
print(siddres$Best.nc[1])
}
library(dendextend)
d1=color_branches(as.dendrogram(ward.princ.return.5comps.4props), k=nrow(princ.return.5comps.4props$x[,1:100]), col = habillage.pro.not.isl.gen[order.dendrogram(as.dendrogram(ward.princ.return.5comps.4props))])
d1 %>% rect.dendrogram(k = 4, horiz = TRUE, border = 8, lty = 5, lwd = 2, col='red')
png('ward_on_pca_4_prop_5_components_circlized.png', height=3000, width=3000, res=300)
raised_d1<-raise.dendrogram(d1, 7000)
#raised_d1 %>% rect.dendrogram(k = 4, horiz = TRUE, border = 8, lty = 5, lwd = 2, col='red')
circlize_dendrogram(raised_d1, labels=F, dend_track_height = 0.95)
dev.off()
png('ward_on_pca_5_prop_4_components_circlized_raised.png', height=3000, width=3000, res=300)
circlize_dendrogram(raised_d1, labels=F, dend_track_height = 0.95)
##plot(raised_d1, main="Ward's method clustering for PCA results \n(promoters, non-promoters, genes, promoter islands, and lowscore sequences", labels=F)
legend("topright", legend=c('Promoters', 'Non-promoters', 'Genes', 'Promoter islands', 'Lowscore'), fill=unique(habillage.pro.not.isl.gen))
dev.off()
```
separately for properties
```{r}
library(fastcluster)
ward.princ.return.5comps.4props<-hclust.vector(aeos$x[,1:100], method = 'ward')
library(NbClust)
indices_that_works<-c("kl", "ch", "hartigan", "cindex", "db", "silhouette", "ratkowsky", "ball", "ptbiserial", "frey", "mcclain", "dunn", "sdbw")
all_for_5comp<-c()
for (i in indices_that_works) {
print(i)
siddres<-NbClust(data = princ.return.5comps.4props$x[,1:100], diss = NULL, distance = "euclidean", min.nc = 2, max.nc = 22, method = 'ward.D2', index = i)
assign(paste0('sidd_nbres_',i), siddres)
all_for_5comp<-c(all_for_5comp, siddres)
print(siddres$Best.nc[1])
}
library(dendextend)
d1=color_branches(as.dendrogram(ward.princ.return.5comps.4props), k=nrow(princ.return.5comps.4props$x[,1:100]), col = habillage.pro.not.isl.gen[order.dendrogram(as.dendrogram(ward.princ.return.5comps.4props))])
d1 %>% rect.dendrogram(k = 4, horiz = TRUE, border = 8, lty = 5, lwd = 2, col='red')
png('ward_on_pca_4_prop_5_components_circlized.png', height=3000, width=3000, res=300)
raised_d1<-raise.dendrogram(d1, 14000)
##plot(hang.dendrogram(raised_d1), horiz = TRUE)
#raised_d1 %>% rect.dendrogram(k = 4, horiz = TRUE, border = 8, lty = 5, lwd = 2, col='red')
circlize_dendrogram(raised_d1, labels=F, dend_track_height = 0.95)
dev.off()
png('ward_on_pca_5_prop_4_components_circlized_raised.png', height=3000, width=3000, res=300)
circlize_dendrogram(raised_d1, labels=F, dend_track_height = 0.95)
##plot(raised_d1, main="Ward's method clustering for PCA results \n(promoters, non-promoters, genes, promoter islands, and lowscore sequences", labels=F)
legend("topright", legend=c('Promoters', 'Non-promoters', 'Genes', 'Promoter islands', 'Lowscore'), fill=unique(habillage.pro.not.isl.gen))
dev.off()
```
```{r}
#poster
XX<-(-540):(179)
plot(XX, mpots[1,], ty='l', lwd=1.5, ylab='EP value', xlab='Sequence (angstrom)', cex.lab=1.4 )
abline(v=0, lty=2)
X<--150:50
plot(X, aeos1[1,], ty='l', lwd=1.5, ylab='Activation energy (kcal/mol)', xlab='Sequence (nts)', cex.lab=1.4 )
abline(v=0, lty=2)
plot(X, aeos3[1,], ty='l', lwd=1.5, ylab='Size of open state (nts)', xlab='Sequence (nts)', cex.lab=1.4 )
abline(v=0, lty=2)
plot(X, gc200[1,], ty='l', lwd=1.5, ylab=' GC-content for 200 b.p.', xlab='Sequence (nts)', cex.lab=1.4 )
abline(v=0, lty=2)
plot(1:100, princ.return.5comps.4props$x[1,1:100], ty='l', lwd=2, xlab='PCs most contibuting to variance', ylab='Relative units', cex.lab=1.4, col='red' )
#plot 3d for 3 main PCs colored according to cluster analysis
plot3d(princ.return.5comps.4props$x[,1:3], col=habillage.pro.not.isl.gen[order.dendrogram(as.dendrogram(ward.princ.return.5comps.4props))], size=2.5)
#d1 %>% rect.dendrogram(k = 4, horiz = TRUE, border = 8, lty = 5, lwd = 2, col='red')
#confusionMatrix visualization
par(mfrow=c(2,2))
fourfoldplot(confusionMatrix_promoters_vs_lowscore$table, color = c("#CC6666", "#99CC99"),
conf.level = 0, margin = 1, main = "Promoters vs lowscore")
fourfoldplot(confusionMatrix_promoters_vs_non_promoters$table, color = c("#CC6666", "#99CC99"),
conf.level = 0, margin = 1, main = "Promoters vs non-promoters")
fourfoldplot(confusionMatrix_promoters_vs_genes$table, color = c("#CC6666", "#99CC99"),
conf.level = 0, margin = 1, main = "Promoters vs genes")
fourfoldplot(confusionMatrix_promoters_vs_islands$table, color = c("#CC6666", "#99CC99"),
conf.level = 0, margin = 1, main = "Promoters vs islands")
ward_all_aeos1<-hclust.vector(to_pca_5components_4props[, 1:ncol(aeos1)], method = 'ward')
ward_all_aeos3<-hclust.vector(to_pca_5components_4props[, 202:(202+201)], method = 'ward')
ward_all_mpots<-hclust.vector(to_pca_5components_4props[, 403:(403+720)], method = 'ward')
ward_all_gc200<-hclust.vector(to_pca_5components_4props[,(402+721):ncol(to_pca_5components_4props)], method = 'ward')
library(dendextend)
d1aeos1=color_branches(as.dendrogram(ward_all_aeos1), k=nrow(to_pca_5components_4props), col = habillage.pro.not.isl.gen[order.dendrogram(as.dendrogram(ward_all_aeos1))])
raised_d1aeos1<-raise.dendrogram(d1aeos1, 8000)
d1aeos3=color_branches(as.dendrogram(ward_all_aeos3), k=nrow(to_pca_5components_4props), col = habillage.pro.not.isl.gen[order.dendrogram(as.dendrogram(ward_all_aeos3))])
raised_d1aeos3<-raise.dendrogram(d1aeos3, 700)
d1mpots=color_branches(as.dendrogram(ward_all_mpots), k=nrow(to_pca_5components_4props), col = habillage.pro.not.isl.gen[order.dendrogram(as.dendrogram(ward_all_mpots))])
raised_d1mpots<-raise.dendrogram(d1mpots, 700)
d1gc200=color_branches(as.dendrogram(ward_all_gc200), k=nrow(to_pca_5components_4props), col = habillage.pro.not.isl.gen[order.dendrogram(as.dendrogram(ward_all_gc200))])
raised_d1gc200<-raise.dendrogram(d1gc200, 700)
svg('ward_on_pca_4_prop_5_components_circlized_raised_all_4.png', height=15, width=15)
par(mfrow=c(2,2))
circlize_dendrogram(raised_d1aeos1, labels=F, dend_track_height = 0.95)
circlize_dendrogram(raised_d1aeos3, labels=F, dend_track_height = 0.95)
circlize_dendrogram(raised_d1mpots, labels=F, dend_track_height = 0.95)
circlize_dendrogram(raised_d1gc200, labels=F, dend_track_height = 0.95)
dev.off()
png('ward_on_pca_4_prop_4_components_circlized.png', height=3000, width=3000, res=300)
```
# # # GC3 calculation
#function to calculate GC-3
library(seqinr)
GC1<-function (s) {
GC_for_1s<-GC(s[seq(1, length(s), 3)])
return(GC_for_1s)
}
GC2<-function (s) {
GC_for_2s<-GC(s[seq(2, length(s), 3)])
return(GC_for_2s)
}
GC3<-function (s) {
GC_for_3s<-GC(s[seq(3, length(s),3)])
return(GC_for_3s)
}
GC1_2_3<-function (s) {
GC_for_1s<-GC(s[seq(1, length(s), 3)])
GC_for_2s<-GC(s[seq(2, length(s), 3)])
GC_for_3s<-GC(s[seq(3, length(s),3)])
return(list(GC_for_1s, GC_for_2s, GC_for_3s))
}
#GC by codons spread calculation
GC_spread <- function (seq) {
library(zoo)
library(seqinr)
return(range(GC1(seq), GC2(seq), GC3(seq))[1]-range(GC1(seq), GC2(seq), GC3(seq))[2])
}
rollGCspread<-function (seq, interval) {
library(zoo)
library(seqinr)
#GC_spread <- function (seq) {
# return(range(GC1(seq), GC2(seq), GC3(seq))[1]-range(GC1(seq), GC2(seq), GC3(seq))[2])
#}
return(rollapply(seq, interval, GC_spread))
}
#sapply(dataset_gen, function(x) {return(GC1_2_3(strsplit(x$seq, '')))})
gen_allseqs<-sapply(dataset_gen, function(x) {return((strsplit(x$seq, '')))})
#gc1_gen_allseqs<-sapply(gen_allseqs, GC1)
#gc2_gen_allseqs<-sapply(gen_allseqs, GC2)
#gc3_gen_allseqs<-sapply(gen_allseqs, GC3)
spread_gc1_2_3_gen<-c()
for (i in seq_along(gen_allseqs)){
spread_gc1_2_3_gen<-c(spread_gc1_2_3_gen, (range(GC1_2_3(gen_allseqs[[i]]))[2] - range(GC1_2_3(gen_allseqs[[i]]))[1]))
}
#for seqs - ALL promoters
#sapply(dataset_pro, function(x) {return(GC1_2_3(strsplit(x$seq, '')))})
pro_allseqs<-sapply(dataset_pro, function(x) {return((strsplit(x$seq, '')))})
spread_gc1_2_3_pro<-c()
for (i in seq_along(pro_allseqs)){
spread_gc1_2_3_pro<-c(spread_gc1_2_3_pro, (range(GC1_2_3(pro_allseqs[[i]]))[2] - range(GC1_2_3(pro_allseqs[[i]]))[1]))
}
#for isl_seqs - islands
#sapply(dataset_pro, function(x) {return(GC1_2_3(strsplit(x$seq, '')))})
isl_allseqs<-sapply(dataset_isl, function(x) {return((strsplit(x$seq, '')))})
spread_gc1_2_3_isl<-c()
for (i in seq_along(isl_allseqs)){
spread_gc1_2_3_isl<-c(spread_gc1_2_3_isl, (range(GC1_2_3(isl_allseqs[[i]]))[2] - range(GC1_2_3(isl_allseqs[[i]]))[1]))
}
#for lowscore_seqs - lowscore
#sapply(dataset_pro, function(x) {return(GC1_2_3(strsplit(x$seq, '')))})
lowscore_allseqs<-sapply(dataset_lowscore, function(x) {return((strsplit(x$seq, '')))})
spread_gc1_2_3_lowscore<-c()
for (i in seq_along(lowscore_allseqs)){
spread_gc1_2_3_lowscore<-c(spread_gc1_2_3_lowscore, (range(GC1_2_3(lowscore_allseqs[[i]]))[2] - range(GC1_2_3(lowscore_allseqs[[i]]))[1]))
}
#for isl_seqs - islands
#sapply(dataset_pro, function(x) {return(GC1_2_3(strsplit(x$seq, '')))})
isl_allseqs<-sapply(dataset_isl, function(x) {return((strsplit(x$seq, '')))})
spread_gc1_2_3_isl<-c()
for (i in seq_along(isl_allseqs)){
spread_gc1_2_3_isl<-c(spread_gc1_2_3_isl, (range(GC1_2_3(isl_allseqs[[i]]))[2] - range(GC1_2_3(isl_allseqs[[i]]))[1]))
}
#for notpro
#sapply(dataset_pro, function(x) {return(GC1_2_3(strsplit(x$seq, '')))})
not_allseqs<-sapply(dataset_notpro , function(x) {return((strsplit(x$seq, '')))})
spread_gc1_2_3_not<-c()
for (i in seq_along(not_allseqs)){
spread_gc1_2_3_not<-c(spread_gc1_2_3_not, (range(GC1_2_3(not_allseqs[[i]]))[2] - range(GC1_2_3(not_allseqs[[i]]))[1]))
}
#for complete genome
GC3_total_genome<-range(GC1_2_3(e.coli_U00096.2_char))[2] - range(GC1_2_3(e.coli_U00096.2_char))[1]
lmts<-range(spread_gc1_2_3_pro, spread_gc1_2_3_isl, spread_gc1_2_3_gen, spread_gc1_2_3_lowscore, spread_gc1_2_3_not )
#lmts<-c(0,0.45)
par(mfrow=c(1,5))
boxplot(spread_gc1_2_3_pro, ylim = lmts)
boxplot(spread_gc1_2_3_isl, ylim = lmts)
boxplot(spread_gc1_2_3_gen, ylim = lmts)
boxplot(spread_gc1_2_3_lowscore, ylim = lmts)
boxplot(spread_gc1_2_3_not, ylim = lmts)
mtext('GC spread by triplets for promoters, islands, genes, and lowscore sequences', at=-1.5, line=1, cex=1.5 )
|
Inventatori de cirnati facuti din iarba , dovleci si trandafiri , de aparate care decodeaza visele , de rachete cu apa , telepati cu frunzele din tara vecina , autori de solutii miraculoase pentru scos Romania din criza economica , tot felul de oameni ciudati si interesati , cu idei minunate si imposibile trec pe la redactie .
au un farmec aparte , un umor involuntar , o doza de utopie , iar unii chiar arata ca niste zaltati .
au scris si versuri , au facut si proiecte stil Michelangelo . Si daca nu crezi nici in astea , atunci macar sa le faci o vizita ca sa - ti dea un zaibar sau o plasa cu cartofi netratati cu insecticid .
cu aceeasi prietenoasa si amuzanta reactie am primit si un comunicat de presa de la Ministerul Apararii din Republica Mioritica a Romaniei .
in primul moment am trait sentimentul ca avem de - a face cu un banc .
sau cu o pagina - meteorit desprinsa din celulele nervoase ale unui martian vesel .
biroul de Presa al Ministerului Apararii transmite pe Internet , fax si alte cai de comunicare ca nu - i mai ramine de facut " decit sa le multumim celor care mor de grija altora , amintindu - le ca viata lor este scurta , iar sanatatea este si pentru ei un bun prea de pret care nu trebuie pus in pericol prin lansarea unor dezbateri cu un inalt consum emotional , stresant " .
daca n - ar fi fost semnat de Biroul de Presa al MApN , am fi putut crede ca e unul dintre panseurile multilateralului ministru Ioan Mircea Pascu , cel mindru si intelept , scris pe unul dintre traseele sale " istorice " , intre celebrul lider comunist Kim Ir Sen si ministerul propriu din Dealul Spirii .
textul are un stil cam mistocaresc , ca si plimbarile de rocker in geaca din piele ale ministrului prin fata trupelor aliniate .
dar daca citim mai atent , dincolo de aerul de mistocareala ( adeseori reusita de ministru prin emisiuni tv ) gasim o amenintare plus semnatura Biroului de Presa .
s - o fi apucat respectivul departament sa cugete pentru a - i intrece pe antici la dictoane ? " Viata lor - a ziaristilor ( n . n . ) - este scurta " ? !
precizam pentru cititori ca acest comunicat al MApN a fost trimis ca reactie ( de fapt se vrea un drept la replica in stilul visat de ministru ! ) la discutiile provocate de articolul aparut in " Wall Street Journal " .
Jurnalistul respectiv se referea la prezenta unor fosti ofiteri de Securitate in structurile armatei si serviciilor secrete .
armata , prin vocile ei adunate sub titulatura de " biroul de presa " , transmite ziaristilor ca mor ca prostii de grija altora ( a fostilor ofiteri de Securitate ) .
am fi putut trece chiar si peste mirlania din acest comunicat daca , la sfirsit , echipa MApN n - ar fi tinut sa ne reaminteasca perspectiva ei asupra vietii si sa ne avertizeze ca " viata este scurta , iar sanatatea este un bun prea de pret care nu trebuie pus in pericol prin lansarea unor dezbateri cu un inalt consum emotional , stresant " .
cu alte cuvinte , cel mai fortos si inarmat minister din Romania ne reaminteste ca , prin lansarea unor dezbateri cum e cea despre fostii ofiteri de Securitate , ne punem sanatatea in pericol .
poate ne loveste o masina sau chiar un tanc , poate ne inteapa o umbrela !
si e pacat de noi ca sintem tineri !
culmea e ca aceasta amenintare stravezie , facuta exact in stilul Securitatii , vine taman in momentul in care se poarta aceasta dezbatere despre securisti .
s - or fi temind cei de la Biroul de Presa ca ajungem la ceva dosare ? ! Se teme chiar ministrul ? !
dincolo de aerul mistocaresc , se simt o anume nervozitate si chiar amenintare !
nu ne ramine decit sa asteptam ziua in care aceeasi armata va difuza din campusurile NATO un comunicat care sa inceapa cam asa : " Bai smecherilor , cu cine v - ati gasit sa va luati voi de curea ?
stiti ce gust are plumbul de 7,62 ? ! " .
| /data/Newspapers/2002.05.11.editorial.85604.0888.r | no_license | narcis96/decrypting-alpha | R | false | false | 3,831 | r | Inventatori de cirnati facuti din iarba , dovleci si trandafiri , de aparate care decodeaza visele , de rachete cu apa , telepati cu frunzele din tara vecina , autori de solutii miraculoase pentru scos Romania din criza economica , tot felul de oameni ciudati si interesati , cu idei minunate si imposibile trec pe la redactie .
au un farmec aparte , un umor involuntar , o doza de utopie , iar unii chiar arata ca niste zaltati .
au scris si versuri , au facut si proiecte stil Michelangelo . Si daca nu crezi nici in astea , atunci macar sa le faci o vizita ca sa - ti dea un zaibar sau o plasa cu cartofi netratati cu insecticid .
cu aceeasi prietenoasa si amuzanta reactie am primit si un comunicat de presa de la Ministerul Apararii din Republica Mioritica a Romaniei .
in primul moment am trait sentimentul ca avem de - a face cu un banc .
sau cu o pagina - meteorit desprinsa din celulele nervoase ale unui martian vesel .
biroul de Presa al Ministerului Apararii transmite pe Internet , fax si alte cai de comunicare ca nu - i mai ramine de facut " decit sa le multumim celor care mor de grija altora , amintindu - le ca viata lor este scurta , iar sanatatea este si pentru ei un bun prea de pret care nu trebuie pus in pericol prin lansarea unor dezbateri cu un inalt consum emotional , stresant " .
daca n - ar fi fost semnat de Biroul de Presa al MApN , am fi putut crede ca e unul dintre panseurile multilateralului ministru Ioan Mircea Pascu , cel mindru si intelept , scris pe unul dintre traseele sale " istorice " , intre celebrul lider comunist Kim Ir Sen si ministerul propriu din Dealul Spirii .
textul are un stil cam mistocaresc , ca si plimbarile de rocker in geaca din piele ale ministrului prin fata trupelor aliniate .
dar daca citim mai atent , dincolo de aerul de mistocareala ( adeseori reusita de ministru prin emisiuni tv ) gasim o amenintare plus semnatura Biroului de Presa .
s - o fi apucat respectivul departament sa cugete pentru a - i intrece pe antici la dictoane ? " Viata lor - a ziaristilor ( n . n . ) - este scurta " ? !
precizam pentru cititori ca acest comunicat al MApN a fost trimis ca reactie ( de fapt se vrea un drept la replica in stilul visat de ministru ! ) la discutiile provocate de articolul aparut in " Wall Street Journal " .
Jurnalistul respectiv se referea la prezenta unor fosti ofiteri de Securitate in structurile armatei si serviciilor secrete .
armata , prin vocile ei adunate sub titulatura de " biroul de presa " , transmite ziaristilor ca mor ca prostii de grija altora ( a fostilor ofiteri de Securitate ) .
am fi putut trece chiar si peste mirlania din acest comunicat daca , la sfirsit , echipa MApN n - ar fi tinut sa ne reaminteasca perspectiva ei asupra vietii si sa ne avertizeze ca " viata este scurta , iar sanatatea este un bun prea de pret care nu trebuie pus in pericol prin lansarea unor dezbateri cu un inalt consum emotional , stresant " .
cu alte cuvinte , cel mai fortos si inarmat minister din Romania ne reaminteste ca , prin lansarea unor dezbateri cum e cea despre fostii ofiteri de Securitate , ne punem sanatatea in pericol .
poate ne loveste o masina sau chiar un tanc , poate ne inteapa o umbrela !
si e pacat de noi ca sintem tineri !
culmea e ca aceasta amenintare stravezie , facuta exact in stilul Securitatii , vine taman in momentul in care se poarta aceasta dezbatere despre securisti .
s - or fi temind cei de la Biroul de Presa ca ajungem la ceva dosare ? ! Se teme chiar ministrul ? !
dincolo de aerul mistocaresc , se simt o anume nervozitate si chiar amenintare !
nu ne ramine decit sa asteptam ziua in care aceeasi armata va difuza din campusurile NATO un comunicat care sa inceapa cam asa : " Bai smecherilor , cu cine v - ati gasit sa va luati voi de curea ?
stiti ce gust are plumbul de 7,62 ? ! " .
|
rm(list=ls())
install.packages('Rglpk')
library(ergm)
library(coda)
library(mail)
setwd('/homes/tscott1/win/user/elwha/PSJ_Submission/Version3/min_versions/')
load('result_allpartmod.RData')
load('result_allpartmodquad.RData')
load('result_base.RData')
load('result_dirpart.RData')
load('result_indpart.RData')
load('result_shapart.RData')
load('result_pasttie.RData')
small.control = control.logLik.ergm(
nsteps=4,
MCMC.burnin=100,
MCMC.interval=100,
MCMC.samplesize=1000,
obs.MCMC.samplesize=1000,
obs.MCMC.interval=100,
obs.MCMC.burnin=100,
MCMC.prop.args=list(p0=0.5),
warn.dyads=TRUE,
MCMC.init.maxedges=NULL,
MCMC.packagenames=NULL,
seed=24)
mod_base = logLik(object = mod_base,
add = TRUE,verbose=T,control=small.control)
mod_allpart = logLik.ergm(object = mod_allpart,
add = TRUE,verbose=T,
control=small.control)
mod_allpartquad = logLik.ergm(object = mod_allpartquad,
add = TRUE,verbose=T,control=small.control)
mod_dirpart = logLik.ergm(object = mod_dirpart,
add = TRUE,verbose=T,control=small.control)
mod_indpart = logLik.ergm(object = mod_indpart,
add = TRUE,verbose=T,control=small.control)
mod_shapart = logLik.ergm(object = mod_shapart,
add = TRUE,verbose=T,control=small.control)
modpast_pasttie = logLik.ergm(object = modpast_pasttie,
add = TRUE,verbose=T,control=small.control)
save.image('small_logliks.RData')
sendmail(recipient = 'tyler.andrew.scott@gmail.com','test loglik values added','add loglik finished')
# test <- lapply(1:100,function(x) rnorm(10000))
# system.time(x <- lapply(test,function(x) loess.smooth(x,x)))
# system.time(x <- mclapply(test,function(x) loess.smooth(x,x), mc.cores=7))
| /code/proj2/add_loglik_values.R | no_license | tylerandrewscott/elwha | R | false | false | 1,867 | r | rm(list=ls())
install.packages('Rglpk')
library(ergm)
library(coda)
library(mail)
setwd('/homes/tscott1/win/user/elwha/PSJ_Submission/Version3/min_versions/')
load('result_allpartmod.RData')
load('result_allpartmodquad.RData')
load('result_base.RData')
load('result_dirpart.RData')
load('result_indpart.RData')
load('result_shapart.RData')
load('result_pasttie.RData')
small.control = control.logLik.ergm(
nsteps=4,
MCMC.burnin=100,
MCMC.interval=100,
MCMC.samplesize=1000,
obs.MCMC.samplesize=1000,
obs.MCMC.interval=100,
obs.MCMC.burnin=100,
MCMC.prop.args=list(p0=0.5),
warn.dyads=TRUE,
MCMC.init.maxedges=NULL,
MCMC.packagenames=NULL,
seed=24)
mod_base = logLik(object = mod_base,
add = TRUE,verbose=T,control=small.control)
mod_allpart = logLik.ergm(object = mod_allpart,
add = TRUE,verbose=T,
control=small.control)
mod_allpartquad = logLik.ergm(object = mod_allpartquad,
add = TRUE,verbose=T,control=small.control)
mod_dirpart = logLik.ergm(object = mod_dirpart,
add = TRUE,verbose=T,control=small.control)
mod_indpart = logLik.ergm(object = mod_indpart,
add = TRUE,verbose=T,control=small.control)
mod_shapart = logLik.ergm(object = mod_shapart,
add = TRUE,verbose=T,control=small.control)
modpast_pasttie = logLik.ergm(object = modpast_pasttie,
add = TRUE,verbose=T,control=small.control)
save.image('small_logliks.RData')
sendmail(recipient = 'tyler.andrew.scott@gmail.com','test loglik values added','add loglik finished')
# test <- lapply(1:100,function(x) rnorm(10000))
# system.time(x <- lapply(test,function(x) loess.smooth(x,x)))
# system.time(x <- mclapply(test,function(x) loess.smooth(x,x), mc.cores=7))
|
setwd("/Users/aszostek/Projects/Kaggle/Amazon")
train <- read.csv(file="./Data/train.csv")
test <- read.csv(file="./Data/test.csv")
source("../Utils/submission_utils.R")
iteration = 20
library("e1071")
library("hash")
library("verification")
# Concatenate columns 2-10 in training set
new_train <- as.data.frame(matrix(NA, ncol=2,nrow=nrow(train)))
new_train[[1]] <- train[[1]]
new_train[[2]]<-apply(train,1,function(x) paste(x[3],x[4],x[5],x[6],x[7],x[8],x[9],x[10],sep="-"))
#new_train[[2]] <- as.factor(new_train[[2]])
# Make the same concatenation for test set
new_test <- as.data.frame(matrix(NA, ncol=2,nrow=nrow(test)))
new_test[[1]] <- test[[1]]
new_test[[2]] <- apply(test,1,function(x) paste(x[3],x[4],x[5],x[6],x[7],x[8],x[9],x[10],sep="-"))
# Now do a pivot
mean_action <- aggregate(new_train[[1]],list(new_train[[2]]),mean)
m <- mean(mean_action[[2]])
# ----------------------------------
# Make a prediction on a test set!
prediction_train <- mean_action[match(new_train[[2]],mean_action[[1]]),2]
roc.area(train[[1]], prediction_train)$A
# Use Submission 6 to replace missing values
submission <- read.csv(file="./Submissions/submission19.csv")
prediction_test <- mean_action[match(new_test[[2]],mean_action[[1]]),2]
sum(is.na(prediction_test))
prediction_test[is.na(prediction_test)] <- submission[is.na(prediction_test),2]
# -----------------------
# Submission file
id <- test[[1]]
prediction <- prediction_test
test_submission<-as.data.frame(matrix(data = NA, nrow = length(prediction),ncol=2))
test_submission[[1]] <- id
test_submission[[2]] <- prediction
names(test_submission)<-c("Id","Action")
# write file
submission_file_name = paste("./Submissions/submission",as.character(iteration),".csv",sep="")
submission_file_name
write.csv(test_submission,file=submission_file_name,row.names=FALSE,quote=FALSE)
diffsub(19,20,2,"Amazon")
| /Code/Iteration_20.R | no_license | astronerma/Amazon | R | false | false | 1,873 | r | setwd("/Users/aszostek/Projects/Kaggle/Amazon")
train <- read.csv(file="./Data/train.csv")
test <- read.csv(file="./Data/test.csv")
source("../Utils/submission_utils.R")
iteration = 20
library("e1071")
library("hash")
library("verification")
# Concatenate columns 2-10 in training set
new_train <- as.data.frame(matrix(NA, ncol=2,nrow=nrow(train)))
new_train[[1]] <- train[[1]]
new_train[[2]]<-apply(train,1,function(x) paste(x[3],x[4],x[5],x[6],x[7],x[8],x[9],x[10],sep="-"))
#new_train[[2]] <- as.factor(new_train[[2]])
# Make the same concatenation for test set
new_test <- as.data.frame(matrix(NA, ncol=2,nrow=nrow(test)))
new_test[[1]] <- test[[1]]
new_test[[2]] <- apply(test,1,function(x) paste(x[3],x[4],x[5],x[6],x[7],x[8],x[9],x[10],sep="-"))
# Now do a pivot
mean_action <- aggregate(new_train[[1]],list(new_train[[2]]),mean)
m <- mean(mean_action[[2]])
# ----------------------------------
# Make a prediction on a test set!
prediction_train <- mean_action[match(new_train[[2]],mean_action[[1]]),2]
roc.area(train[[1]], prediction_train)$A
# Use Submission 6 to replace missing values
submission <- read.csv(file="./Submissions/submission19.csv")
prediction_test <- mean_action[match(new_test[[2]],mean_action[[1]]),2]
sum(is.na(prediction_test))
prediction_test[is.na(prediction_test)] <- submission[is.na(prediction_test),2]
# -----------------------
# Submission file
id <- test[[1]]
prediction <- prediction_test
test_submission<-as.data.frame(matrix(data = NA, nrow = length(prediction),ncol=2))
test_submission[[1]] <- id
test_submission[[2]] <- prediction
names(test_submission)<-c("Id","Action")
# write file
submission_file_name = paste("./Submissions/submission",as.character(iteration),".csv",sep="")
submission_file_name
write.csv(test_submission,file=submission_file_name,row.names=FALSE,quote=FALSE)
diffsub(19,20,2,"Amazon")
|
#### ---- Function to obtain date, time, and datetime variables from character datetime string ---- ####
# Function
dtvars <- function(dsetin, datetmchar) {
ipak <- function(pkg) {
new.pkg <- pkg[!(pkg %in% installed.packages()[, "Package"])]
if (length(new.pkg)) {
install.packages(new.pkg, dependencies = TRUE)
}
sapply(pkg, require, character.only = TRUE)
}
ipak(c("tidyverse", "gsktable", "chron"))
one <- {{ dsetin }} %>%
mutate(
.len_ = str_length({{ datetmchar }}),
.token_ = if_else(.len_ == 16, "FDT",
if_else(.len_ == 10, "FD", "PD")
),
chardate1 = str_replace({{ datetmchar }}, "T", " "),
chardate2 = str_sub(chardate1, start = 1L, end = 10L),
chartime = ifelse(.token_ == "FDT", paste0(str_sub(chardate1, start = 12L, end = 16L), ":00"), NA)
)
two <- one %>%
filter(.token_ == "FDT") %>%
mutate(
dtm = strptime(chardate1, format = "%Y-%m-%d %H:%M"),
dt = as.Date(chardate2, "%Y-%m-%d"),
tm = times(chartime)
)
three <- one %>%
filter(.token_ == "FD") %>%
mutate(
dt = as.Date(chardate2, "%Y-%m-%d"),
dtm = as.POSIXct(NA)
)
four <- one %>%
filter(.token_ == "PD") %>%
mutate(
dt = as.Date(NA),
dtm = as.POSIXct(NA)
)
final <- bind_rows(two, three, four) %>%
select(-.len_, -.token_, -chardate1, -chardate2, -chartime)
if (any(is.na(final))) {
warning("Input datetime character variable contains partial datetime values.")
}
return(final)
}
# Example
birthdates <- data.frame(
BIRTHDTC = c("2018-12-31", "1997-07-15T07:55", "2007-09-19T00:02", "1995-12-09T13:30", "2020-11", "1986"),
NAME = c("Stefan", "Ritu", "Neil", "Sneha", "Damon", "Caroline")
)
example <- dtvars(dsetin = birthdates, datetmchar = BIRTHDTC)
| /function_dtvars.R | no_license | sneha-lahorani/IASCT_poster2020 | R | false | false | 1,822 | r | #### ---- Function to obtain date, time, and datetime variables from character datetime string ---- ####
# Function
dtvars <- function(dsetin, datetmchar) {
ipak <- function(pkg) {
new.pkg <- pkg[!(pkg %in% installed.packages()[, "Package"])]
if (length(new.pkg)) {
install.packages(new.pkg, dependencies = TRUE)
}
sapply(pkg, require, character.only = TRUE)
}
ipak(c("tidyverse", "gsktable", "chron"))
one <- {{ dsetin }} %>%
mutate(
.len_ = str_length({{ datetmchar }}),
.token_ = if_else(.len_ == 16, "FDT",
if_else(.len_ == 10, "FD", "PD")
),
chardate1 = str_replace({{ datetmchar }}, "T", " "),
chardate2 = str_sub(chardate1, start = 1L, end = 10L),
chartime = ifelse(.token_ == "FDT", paste0(str_sub(chardate1, start = 12L, end = 16L), ":00"), NA)
)
two <- one %>%
filter(.token_ == "FDT") %>%
mutate(
dtm = strptime(chardate1, format = "%Y-%m-%d %H:%M"),
dt = as.Date(chardate2, "%Y-%m-%d"),
tm = times(chartime)
)
three <- one %>%
filter(.token_ == "FD") %>%
mutate(
dt = as.Date(chardate2, "%Y-%m-%d"),
dtm = as.POSIXct(NA)
)
four <- one %>%
filter(.token_ == "PD") %>%
mutate(
dt = as.Date(NA),
dtm = as.POSIXct(NA)
)
final <- bind_rows(two, three, four) %>%
select(-.len_, -.token_, -chardate1, -chardate2, -chartime)
if (any(is.na(final))) {
warning("Input datetime character variable contains partial datetime values.")
}
return(final)
}
# Example
birthdates <- data.frame(
BIRTHDTC = c("2018-12-31", "1997-07-15T07:55", "2007-09-19T00:02", "1995-12-09T13:30", "2020-11", "1986"),
NAME = c("Stefan", "Ritu", "Neil", "Sneha", "Damon", "Caroline")
)
example <- dtvars(dsetin = birthdates, datetmchar = BIRTHDTC)
|
testlist <- list(x1 = numeric(0), x2 = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), y1 = 2.81776900841821e-202, y2 = numeric(0))
result <- do.call(palm:::euc_distances,testlist)
str(result) | /palm/inst/testfiles/euc_distances/libFuzzer_euc_distances/euc_distances_valgrind_files/1612968796-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 336 | r | testlist <- list(x1 = numeric(0), x2 = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), y1 = 2.81776900841821e-202, y2 = numeric(0))
result <- do.call(palm:::euc_distances,testlist)
str(result) |
library(epanet2toolkit)
### Name: ENgetnodeindex
### Title: Retrieve the index of a node
### Aliases: ENgetnodeindex
### ** Examples
# path to Net1.inp example file included with this package
inp <- file.path( find.package("epanet2toolkit"), "extdata","Net1.inp")
ENopen( inp, "Net1.rpt")
ENgetnodeindex("10")
ENgetnodeindex("23")
ENclose()
| /data/genthat_extracted_code/epanet2toolkit/examples/ENgetnodeindex.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 350 | r | library(epanet2toolkit)
### Name: ENgetnodeindex
### Title: Retrieve the index of a node
### Aliases: ENgetnodeindex
### ** Examples
# path to Net1.inp example file included with this package
inp <- file.path( find.package("epanet2toolkit"), "extdata","Net1.inp")
ENopen( inp, "Net1.rpt")
ENgetnodeindex("10")
ENgetnodeindex("23")
ENclose()
|
geneSim <-
function(gene1, gene2, ont="MF", organism="human", measure="Wang", drop="IEA", combine="rcmax.avg"){
gene1 <- as.character(gene1)
gene2 <- as.character(gene2)
wh_ont <- match.arg(ont, c("MF", "BP", "CC"))
wh_measure <- match.arg(measure, c("Resnik", "Jiang", "Lin", "Rel", "Wang"))
if(!exists("GOSemSimEnv")) .initial()
wh_organism <- match.arg(organism, get("SupportedSpecies",envir=GOSemSimEnv))
wh_combine <- match.arg(combine, c("max", "average", "rcmax", "rcmax.avg"))
go1 <- ygcGetOnt(gene1, organism= wh_organism, ontology= wh_ont, dropCodes=drop)
go2 <- ygcGetOnt(gene2, organism= wh_organism, ontology= wh_ont, dropCodes=drop)
if (sum(!is.na(go1)) == 0 || sum(!is.na(go2)) == 0) {
return (list(geneSim=NA, GO1=go1, GO2=go2))
}
sim <- mgoSim(go1,go2, wh_ont, wh_organism, wh_measure, wh_combine)
sim <- round(sim, digits=3)
return (list(geneSim=sim, GO1=go1, GO2=go2))
}
| /2X/2.7/GOSemSim/R/geneSim.R | no_license | GuangchuangYu/bioc-release | R | false | false | 910 | r | geneSim <-
function(gene1, gene2, ont="MF", organism="human", measure="Wang", drop="IEA", combine="rcmax.avg"){
gene1 <- as.character(gene1)
gene2 <- as.character(gene2)
wh_ont <- match.arg(ont, c("MF", "BP", "CC"))
wh_measure <- match.arg(measure, c("Resnik", "Jiang", "Lin", "Rel", "Wang"))
if(!exists("GOSemSimEnv")) .initial()
wh_organism <- match.arg(organism, get("SupportedSpecies",envir=GOSemSimEnv))
wh_combine <- match.arg(combine, c("max", "average", "rcmax", "rcmax.avg"))
go1 <- ygcGetOnt(gene1, organism= wh_organism, ontology= wh_ont, dropCodes=drop)
go2 <- ygcGetOnt(gene2, organism= wh_organism, ontology= wh_ont, dropCodes=drop)
if (sum(!is.na(go1)) == 0 || sum(!is.na(go2)) == 0) {
return (list(geneSim=NA, GO1=go1, GO2=go2))
}
sim <- mgoSim(go1,go2, wh_ont, wh_organism, wh_measure, wh_combine)
sim <- round(sim, digits=3)
return (list(geneSim=sim, GO1=go1, GO2=go2))
}
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
adamFitterWrap <- function(matrixVt, matrixWt, matrixF, vectorG, lags, profilesObserved, profilesRecent, Etype, Ttype, Stype, componentsNumberETS, nSeasonal, nArima, nXreg, constant, vectorYt, vectorOt, backcast) {
.Call('_smooth_adamFitterWrap', PACKAGE = 'smooth', matrixVt, matrixWt, matrixF, vectorG, lags, profilesObserved, profilesRecent, Etype, Ttype, Stype, componentsNumberETS, nSeasonal, nArima, nXreg, constant, vectorYt, vectorOt, backcast)
}
adamForecasterWrap <- function(matrixWt, matrixF, lags, profilesObserved, profilesRecent, E, T, S, componentsNumberETS, nSeasonal, nArima, nXreg, constant, horizon) {
.Call('_smooth_adamForecasterWrap', PACKAGE = 'smooth', matrixWt, matrixF, lags, profilesObserved, profilesRecent, E, T, S, componentsNumberETS, nSeasonal, nArima, nXreg, constant, horizon)
}
adamErrorerWrap <- function(matrixVt, matrixWt, matrixF, lags, profilesObserved, profilesRecent, Etype, Ttype, Stype, componentsNumberETS, nSeasonal, nArima, nXreg, constant, horizon, vectorYt, vectorOt) {
.Call('_smooth_adamErrorerWrap', PACKAGE = 'smooth', matrixVt, matrixWt, matrixF, lags, profilesObserved, profilesRecent, Etype, Ttype, Stype, componentsNumberETS, nSeasonal, nArima, nXreg, constant, horizon, vectorYt, vectorOt)
}
adamRefitterWrap <- function(matrixYt, matrixOt, arrayVt, arrayF, arrayWt, matrixG, E, T, S, lags, profilesObserved, arrayProfilesRecent, nSeasonal, componentsNumberETS, nArima, nXreg, constant) {
.Call('_smooth_adamRefitterWrap', PACKAGE = 'smooth', matrixYt, matrixOt, arrayVt, arrayF, arrayWt, matrixG, E, T, S, lags, profilesObserved, arrayProfilesRecent, nSeasonal, componentsNumberETS, nArima, nXreg, constant)
}
adamReforecasterWrap <- function(arrayErrors, arrayOt, arrayF, arrayWt, matrixG, E, T, S, lags, profilesObserved, arrayProfileRecent, nSeasonal, componentsNumberETS, nArima, nXreg, constant) {
.Call('_smooth_adamReforecasterWrap', PACKAGE = 'smooth', arrayErrors, arrayOt, arrayF, arrayWt, matrixG, E, T, S, lags, profilesObserved, arrayProfileRecent, nSeasonal, componentsNumberETS, nArima, nXreg, constant)
}
adamSimulatorWrap <- function(arrayVt, matrixErrors, matrixOt, arrayF, matrixWt, matrixG, E, T, S, lags, profilesObserved, profilesRecent, nSeasonal, componentsNumber, nArima, nXreg, constant) {
.Call('_smooth_adamSimulatorWrap', PACKAGE = 'smooth', arrayVt, matrixErrors, matrixOt, arrayF, matrixWt, matrixG, E, T, S, lags, profilesObserved, profilesRecent, nSeasonal, componentsNumber, nArima, nXreg, constant)
}
matrixPowerWrap <- function(matA, power) {
.Call('_smooth_matrixPowerWrap', PACKAGE = 'smooth', matA, power)
}
initparams <- function(Etype, Ttype, Stype, datafreq, obsR, obsallR, yt, damped, phi, smoothingparameters, initialstates, seasonalcoefs) {
.Call('_smooth_initparams', PACKAGE = 'smooth', Etype, Ttype, Stype, datafreq, obsR, obsallR, yt, damped, phi, smoothingparameters, initialstates, seasonalcoefs)
}
etsmatrices <- function(matvt, vecg, phi, Cvalues, ncomponentsR, lagsModel, fittertype, Ttype, Stype, nexovars, matat, estimpersistence, estimphi, estiminit, estiminitseason, estimxreg, matFX, vecgX, gowild, estimFX, estimgX, estiminitX) {
.Call('_smooth_etsmatrices', PACKAGE = 'smooth', matvt, vecg, phi, Cvalues, ncomponentsR, lagsModel, fittertype, Ttype, Stype, nexovars, matat, estimpersistence, estimphi, estiminit, estiminitseason, estimxreg, matFX, vecgX, gowild, estimFX, estimgX, estiminitX)
}
polysoswrap <- function(ARorders, MAorders, Iorders, ARIMAlags, nComp, AR, MA, constant, Cvalues, matvt, vecg, matF, fittertype, nexovars, matat, matFX, vecgX, estimAR, estimMA, requireConst, estimConst, estimxreg, gowild, estimFX, estimgX, estiminitX, ssarimaOld, lagsModelR, nonZeroARI, nonZeroMA) {
.Call('_smooth_polysoswrap', PACKAGE = 'smooth', ARorders, MAorders, Iorders, ARIMAlags, nComp, AR, MA, constant, Cvalues, matvt, vecg, matF, fittertype, nexovars, matat, matFX, vecgX, estimAR, estimMA, requireConst, estimConst, estimxreg, gowild, estimFX, estimgX, estiminitX, ssarimaOld, lagsModelR, nonZeroARI, nonZeroMA)
}
fitterwrap <- function(matvt, matF, matw, yt, vecg, lagsModel, Etype, Ttype, Stype, fittertype, matxt, matat, matFX, vecgX, ot) {
.Call('_smooth_fitterwrap', PACKAGE = 'smooth', matvt, matF, matw, yt, vecg, lagsModel, Etype, Ttype, Stype, fittertype, matxt, matat, matFX, vecgX, ot)
}
forecasterwrap <- function(matvt, matF, matw, h, Etype, Ttype, Stype, lagsModel, matxt, matat, matFX) {
.Call('_smooth_forecasterwrap', PACKAGE = 'smooth', matvt, matF, matw, h, Etype, Ttype, Stype, lagsModel, matxt, matat, matFX)
}
errorerwrap <- function(matvt, matF, matw, yt, h, Etype, Ttype, Stype, lagsModel, matxt, matat, matFX, ot) {
.Call('_smooth_errorerwrap', PACKAGE = 'smooth', matvt, matF, matw, yt, h, Etype, Ttype, Stype, lagsModel, matxt, matat, matFX, ot)
}
optimizerwrap <- function(matvt, matF, matw, yt, vecg, h, lagsModel, Etype, Ttype, Stype, multisteps, CFt, normalizer, fittertype, matxt, matat, matFX, vecgX, ot, SDerror) {
.Call('_smooth_optimizerwrap', PACKAGE = 'smooth', matvt, matF, matw, yt, vecg, h, lagsModel, Etype, Ttype, Stype, multisteps, CFt, normalizer, fittertype, matxt, matat, matFX, vecgX, ot, SDerror)
}
costfunc <- function(matvt, matF, matw, yt, vecg, h, lagsModel, Etype, Ttype, Stype, multisteps, CFt, normalizer, fittertype, matxt, matat, matFX, vecgX, ot, bounds, SDerror) {
.Call('_smooth_costfunc', PACKAGE = 'smooth', matvt, matF, matw, yt, vecg, h, lagsModel, Etype, Ttype, Stype, multisteps, CFt, normalizer, fittertype, matxt, matat, matFX, vecgX, ot, bounds, SDerror)
}
costfuncARIMA <- function(ARorders, MAorders, Iorders, ARIMAlags, nComp, AR, MA, constant, Cvalues, matvt, matF, matw, yt, vecg, h, lagsModelR, Etype, Ttype, Stype, multisteps, CFt, normalizer, fittertype, nexovars, matxt, matat, matFX, vecgX, ot, estimAR, estimMA, requireConst, estimConst, estimxreg, gowild, estimFX, estimgX, estiminitX, bounds, ssarimaOld, nonZeroARI, nonZeroMA, SDerror) {
.Call('_smooth_costfuncARIMA', PACKAGE = 'smooth', ARorders, MAorders, Iorders, ARIMAlags, nComp, AR, MA, constant, Cvalues, matvt, matF, matw, yt, vecg, h, lagsModelR, Etype, Ttype, Stype, multisteps, CFt, normalizer, fittertype, nexovars, matxt, matat, matFX, vecgX, ot, estimAR, estimMA, requireConst, estimConst, estimxreg, gowild, estimFX, estimgX, estiminitX, bounds, ssarimaOld, nonZeroARI, nonZeroMA, SDerror)
}
occurenceFitterWrap <- function(matvt, matF, matw, vecg, ot, modellags, Etype, Ttype, Stype, Otype, matxt, matat, matFX, vecgX) {
.Call('_smooth_occurenceFitterWrap', PACKAGE = 'smooth', matvt, matF, matw, vecg, ot, modellags, Etype, Ttype, Stype, Otype, matxt, matat, matFX, vecgX)
}
occurrenceOptimizerWrap <- function(matvt, matF, matw, vecg, ot, modellags, Etype, Ttype, Stype, Otype, matxt, matat, matFX, vecgX, bounds) {
.Call('_smooth_occurrenceOptimizerWrap', PACKAGE = 'smooth', matvt, matF, matw, vecg, ot, modellags, Etype, Ttype, Stype, Otype, matxt, matat, matFX, vecgX, bounds)
}
occurenceGeneralFitterWrap <- function(ot, modellagsA, EtypeA, TtypeA, StypeA, matvtA, matFA, matwA, vecgA, matxtA, matatA, matFXA, vecgXA, modellagsB, EtypeB, TtypeB, StypeB, matvtB, matFB, matwB, vecgB, matxtB, matatB, matFXB, vecgXB) {
.Call('_smooth_occurenceGeneralFitterWrap', PACKAGE = 'smooth', ot, modellagsA, EtypeA, TtypeA, StypeA, matvtA, matFA, matwA, vecgA, matxtA, matatA, matFXA, vecgXA, modellagsB, EtypeB, TtypeB, StypeB, matvtB, matFB, matwB, vecgB, matxtB, matatB, matFXB, vecgXB)
}
occurrenceGeneralOptimizerWrap <- function(ot, bounds, modellagsA, EtypeA, TtypeA, StypeA, matvtA, matFA, matwA, vecgA, matxtA, matatA, matFXA, vecgXA, modellagsB, EtypeB, TtypeB, StypeB, matvtB, matFB, matwB, vecgB, matxtB, matatB, matFXB, vecgXB) {
.Call('_smooth_occurrenceGeneralOptimizerWrap', PACKAGE = 'smooth', ot, bounds, modellagsA, EtypeA, TtypeA, StypeA, matvtA, matFA, matwA, vecgA, matxtA, matatA, matFXA, vecgXA, modellagsB, EtypeB, TtypeB, StypeB, matvtB, matFB, matwB, vecgB, matxtB, matatB, matFXB, vecgXB)
}
simulatorwrap <- function(arrvt, matErrors, matot, matF, matw, matg, Etype, Ttype, Stype, modellags) {
.Call('_smooth_simulatorwrap', PACKAGE = 'smooth', arrvt, matErrors, matot, matF, matw, matg, Etype, Ttype, Stype, modellags)
}
| /R/RcppExports.R | no_license | config-i1/smooth | R | false | false | 8,459 | r | # Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
adamFitterWrap <- function(matrixVt, matrixWt, matrixF, vectorG, lags, profilesObserved, profilesRecent, Etype, Ttype, Stype, componentsNumberETS, nSeasonal, nArima, nXreg, constant, vectorYt, vectorOt, backcast) {
.Call('_smooth_adamFitterWrap', PACKAGE = 'smooth', matrixVt, matrixWt, matrixF, vectorG, lags, profilesObserved, profilesRecent, Etype, Ttype, Stype, componentsNumberETS, nSeasonal, nArima, nXreg, constant, vectorYt, vectorOt, backcast)
}
adamForecasterWrap <- function(matrixWt, matrixF, lags, profilesObserved, profilesRecent, E, T, S, componentsNumberETS, nSeasonal, nArima, nXreg, constant, horizon) {
.Call('_smooth_adamForecasterWrap', PACKAGE = 'smooth', matrixWt, matrixF, lags, profilesObserved, profilesRecent, E, T, S, componentsNumberETS, nSeasonal, nArima, nXreg, constant, horizon)
}
adamErrorerWrap <- function(matrixVt, matrixWt, matrixF, lags, profilesObserved, profilesRecent, Etype, Ttype, Stype, componentsNumberETS, nSeasonal, nArima, nXreg, constant, horizon, vectorYt, vectorOt) {
.Call('_smooth_adamErrorerWrap', PACKAGE = 'smooth', matrixVt, matrixWt, matrixF, lags, profilesObserved, profilesRecent, Etype, Ttype, Stype, componentsNumberETS, nSeasonal, nArima, nXreg, constant, horizon, vectorYt, vectorOt)
}
adamRefitterWrap <- function(matrixYt, matrixOt, arrayVt, arrayF, arrayWt, matrixG, E, T, S, lags, profilesObserved, arrayProfilesRecent, nSeasonal, componentsNumberETS, nArima, nXreg, constant) {
.Call('_smooth_adamRefitterWrap', PACKAGE = 'smooth', matrixYt, matrixOt, arrayVt, arrayF, arrayWt, matrixG, E, T, S, lags, profilesObserved, arrayProfilesRecent, nSeasonal, componentsNumberETS, nArima, nXreg, constant)
}
adamReforecasterWrap <- function(arrayErrors, arrayOt, arrayF, arrayWt, matrixG, E, T, S, lags, profilesObserved, arrayProfileRecent, nSeasonal, componentsNumberETS, nArima, nXreg, constant) {
.Call('_smooth_adamReforecasterWrap', PACKAGE = 'smooth', arrayErrors, arrayOt, arrayF, arrayWt, matrixG, E, T, S, lags, profilesObserved, arrayProfileRecent, nSeasonal, componentsNumberETS, nArima, nXreg, constant)
}
adamSimulatorWrap <- function(arrayVt, matrixErrors, matrixOt, arrayF, matrixWt, matrixG, E, T, S, lags, profilesObserved, profilesRecent, nSeasonal, componentsNumber, nArima, nXreg, constant) {
.Call('_smooth_adamSimulatorWrap', PACKAGE = 'smooth', arrayVt, matrixErrors, matrixOt, arrayF, matrixWt, matrixG, E, T, S, lags, profilesObserved, profilesRecent, nSeasonal, componentsNumber, nArima, nXreg, constant)
}
matrixPowerWrap <- function(matA, power) {
.Call('_smooth_matrixPowerWrap', PACKAGE = 'smooth', matA, power)
}
initparams <- function(Etype, Ttype, Stype, datafreq, obsR, obsallR, yt, damped, phi, smoothingparameters, initialstates, seasonalcoefs) {
.Call('_smooth_initparams', PACKAGE = 'smooth', Etype, Ttype, Stype, datafreq, obsR, obsallR, yt, damped, phi, smoothingparameters, initialstates, seasonalcoefs)
}
etsmatrices <- function(matvt, vecg, phi, Cvalues, ncomponentsR, lagsModel, fittertype, Ttype, Stype, nexovars, matat, estimpersistence, estimphi, estiminit, estiminitseason, estimxreg, matFX, vecgX, gowild, estimFX, estimgX, estiminitX) {
.Call('_smooth_etsmatrices', PACKAGE = 'smooth', matvt, vecg, phi, Cvalues, ncomponentsR, lagsModel, fittertype, Ttype, Stype, nexovars, matat, estimpersistence, estimphi, estiminit, estiminitseason, estimxreg, matFX, vecgX, gowild, estimFX, estimgX, estiminitX)
}
polysoswrap <- function(ARorders, MAorders, Iorders, ARIMAlags, nComp, AR, MA, constant, Cvalues, matvt, vecg, matF, fittertype, nexovars, matat, matFX, vecgX, estimAR, estimMA, requireConst, estimConst, estimxreg, gowild, estimFX, estimgX, estiminitX, ssarimaOld, lagsModelR, nonZeroARI, nonZeroMA) {
.Call('_smooth_polysoswrap', PACKAGE = 'smooth', ARorders, MAorders, Iorders, ARIMAlags, nComp, AR, MA, constant, Cvalues, matvt, vecg, matF, fittertype, nexovars, matat, matFX, vecgX, estimAR, estimMA, requireConst, estimConst, estimxreg, gowild, estimFX, estimgX, estiminitX, ssarimaOld, lagsModelR, nonZeroARI, nonZeroMA)
}
fitterwrap <- function(matvt, matF, matw, yt, vecg, lagsModel, Etype, Ttype, Stype, fittertype, matxt, matat, matFX, vecgX, ot) {
.Call('_smooth_fitterwrap', PACKAGE = 'smooth', matvt, matF, matw, yt, vecg, lagsModel, Etype, Ttype, Stype, fittertype, matxt, matat, matFX, vecgX, ot)
}
forecasterwrap <- function(matvt, matF, matw, h, Etype, Ttype, Stype, lagsModel, matxt, matat, matFX) {
.Call('_smooth_forecasterwrap', PACKAGE = 'smooth', matvt, matF, matw, h, Etype, Ttype, Stype, lagsModel, matxt, matat, matFX)
}
errorerwrap <- function(matvt, matF, matw, yt, h, Etype, Ttype, Stype, lagsModel, matxt, matat, matFX, ot) {
.Call('_smooth_errorerwrap', PACKAGE = 'smooth', matvt, matF, matw, yt, h, Etype, Ttype, Stype, lagsModel, matxt, matat, matFX, ot)
}
optimizerwrap <- function(matvt, matF, matw, yt, vecg, h, lagsModel, Etype, Ttype, Stype, multisteps, CFt, normalizer, fittertype, matxt, matat, matFX, vecgX, ot, SDerror) {
.Call('_smooth_optimizerwrap', PACKAGE = 'smooth', matvt, matF, matw, yt, vecg, h, lagsModel, Etype, Ttype, Stype, multisteps, CFt, normalizer, fittertype, matxt, matat, matFX, vecgX, ot, SDerror)
}
costfunc <- function(matvt, matF, matw, yt, vecg, h, lagsModel, Etype, Ttype, Stype, multisteps, CFt, normalizer, fittertype, matxt, matat, matFX, vecgX, ot, bounds, SDerror) {
.Call('_smooth_costfunc', PACKAGE = 'smooth', matvt, matF, matw, yt, vecg, h, lagsModel, Etype, Ttype, Stype, multisteps, CFt, normalizer, fittertype, matxt, matat, matFX, vecgX, ot, bounds, SDerror)
}
costfuncARIMA <- function(ARorders, MAorders, Iorders, ARIMAlags, nComp, AR, MA, constant, Cvalues, matvt, matF, matw, yt, vecg, h, lagsModelR, Etype, Ttype, Stype, multisteps, CFt, normalizer, fittertype, nexovars, matxt, matat, matFX, vecgX, ot, estimAR, estimMA, requireConst, estimConst, estimxreg, gowild, estimFX, estimgX, estiminitX, bounds, ssarimaOld, nonZeroARI, nonZeroMA, SDerror) {
.Call('_smooth_costfuncARIMA', PACKAGE = 'smooth', ARorders, MAorders, Iorders, ARIMAlags, nComp, AR, MA, constant, Cvalues, matvt, matF, matw, yt, vecg, h, lagsModelR, Etype, Ttype, Stype, multisteps, CFt, normalizer, fittertype, nexovars, matxt, matat, matFX, vecgX, ot, estimAR, estimMA, requireConst, estimConst, estimxreg, gowild, estimFX, estimgX, estiminitX, bounds, ssarimaOld, nonZeroARI, nonZeroMA, SDerror)
}
occurenceFitterWrap <- function(matvt, matF, matw, vecg, ot, modellags, Etype, Ttype, Stype, Otype, matxt, matat, matFX, vecgX) {
.Call('_smooth_occurenceFitterWrap', PACKAGE = 'smooth', matvt, matF, matw, vecg, ot, modellags, Etype, Ttype, Stype, Otype, matxt, matat, matFX, vecgX)
}
occurrenceOptimizerWrap <- function(matvt, matF, matw, vecg, ot, modellags, Etype, Ttype, Stype, Otype, matxt, matat, matFX, vecgX, bounds) {
.Call('_smooth_occurrenceOptimizerWrap', PACKAGE = 'smooth', matvt, matF, matw, vecg, ot, modellags, Etype, Ttype, Stype, Otype, matxt, matat, matFX, vecgX, bounds)
}
occurenceGeneralFitterWrap <- function(ot, modellagsA, EtypeA, TtypeA, StypeA, matvtA, matFA, matwA, vecgA, matxtA, matatA, matFXA, vecgXA, modellagsB, EtypeB, TtypeB, StypeB, matvtB, matFB, matwB, vecgB, matxtB, matatB, matFXB, vecgXB) {
.Call('_smooth_occurenceGeneralFitterWrap', PACKAGE = 'smooth', ot, modellagsA, EtypeA, TtypeA, StypeA, matvtA, matFA, matwA, vecgA, matxtA, matatA, matFXA, vecgXA, modellagsB, EtypeB, TtypeB, StypeB, matvtB, matFB, matwB, vecgB, matxtB, matatB, matFXB, vecgXB)
}
occurrenceGeneralOptimizerWrap <- function(ot, bounds, modellagsA, EtypeA, TtypeA, StypeA, matvtA, matFA, matwA, vecgA, matxtA, matatA, matFXA, vecgXA, modellagsB, EtypeB, TtypeB, StypeB, matvtB, matFB, matwB, vecgB, matxtB, matatB, matFXB, vecgXB) {
.Call('_smooth_occurrenceGeneralOptimizerWrap', PACKAGE = 'smooth', ot, bounds, modellagsA, EtypeA, TtypeA, StypeA, matvtA, matFA, matwA, vecgA, matxtA, matatA, matFXA, vecgXA, modellagsB, EtypeB, TtypeB, StypeB, matvtB, matFB, matwB, vecgB, matxtB, matatB, matFXB, vecgXB)
}
simulatorwrap <- function(arrvt, matErrors, matot, matF, matw, matg, Etype, Ttype, Stype, modellags) {
.Call('_smooth_simulatorwrap', PACKAGE = 'smooth', arrvt, matErrors, matot, matF, matw, matg, Etype, Ttype, Stype, modellags)
}
|
#' plot aneuploidy
# -------------------------- plotAneuploidy -------------------------------------- #
# function to make a plot of the aneuploid data
# input: dat (data.frame), the seq data that has been run through getAneuploidy, so it
# contains aneuploid status. this can be either a single chromosome, or multiple
#
# output: p1 (ggplot.object), a ggplot object, plotted outside of this function so image parameters
# can be adjusted
plotAneuploidy <- function( dat, ref = "grch37", X.include = FALSE )
{
i.int = 1
if( ref == "grch37" )
{
ref.dat <- grch37.ref.dat
} else
if( ref == "grch38" )
{
ref.dat <- grch38.ref.dat
} else
{
print(paste(ref, "is not a valid reference genome.", sep = " "))
stop("select one of: grch37, grch38")
}
chr.labs <- c("chr1", "chr2", "chr3", "chr4", "chr5", "chr6", "chr7", "chr8", "chr9", "chr10",
"chr11", "chr12", "chr13", "chr14", "chr15", "chr16", "chr17", "chr18", "chr19",
"chr20", "chr21", "chr22", "chrX")
if( X.include == FALSE)
{
ref.dat <- ref.dat[1:22,]
chr.labs <- chr.labs[1:22]
}
levels(dat$chromosome) <- levels(ref.dat$chromosome)
levels(dat$a.stat) <- c("Amplifed", "Neutral", "Deleted")
# if there is more than one chromosome, start/end positions of the segments and reference points
# need to be adjusted to their point along whole genome (as opposed to their point in the chromosome)
# this adds the appropriate offset
# ---
ref.tmp <- ref.dat
chr.size = ref.dat$chr.size
if(length(unique(dat$chromosome)) > 1)
{
chr.lab.pos <- rep(0, dim(ref.dat)[1])
chr.lab.pos[1] <- chr.size[1] / 2
for(i in ref.tmp$chromosome)
{
if( i == "chrX" )
{
i.int <- 23
} else
{
i.int <- as.integer(gsub("chr", "", i))
}
# reference coordinates are relative to the start of the chromosome
# this code changes the reference point for ALL chromosomes to be the
# start of the genome
if(i.int > 1)
{
dat$start.pos[dat$chromosome == i] <- dat$start.pos[dat$chromosome == i] + sum(ref.tmp$chr.size[1:(i.int-1)])
dat$end.pos[dat$chromosome == i] <- dat$end.pos[dat$chromosome == i] + sum(ref.tmp$chr.size[1:(i.int-1)])
ref.tmp$centromere.start[ref.tmp$chromosome == i] <- ref.tmp$centromere.start[ref.tmp$chromosome == i] + sum(ref.tmp$chr.size[1:(i.int-1)])
ref.tmp$centromere.end[ref.tmp$chromosome == i] <- ref.tmp$centromere.end[ref.tmp$chromosome == i] + sum(ref.tmp$chr.size[1:(i.int-1)])
chr.size[i.int] <- sum(ref.tmp$chr.size[1:i.int])
chr.lab.pos[i.int] <- sum(ref.tmp$chr.size[1:(i.int - 1)]) + (ref.tmp$chr.size[i.int] / 2)
}
}
}
# ---
y <- rep(0, dim(dat)[1])
y[ dat$a.stat == "Amplified" ] <- 1
y[ dat$a.stat == "Deleted" ] <- -1
if( length(unique(dat$chromosome)) > 1)
{
chr = ref.tmp$chromosome
} else
{
chr = unique(dat$chromosome)
}
p1 <- ggplot( ) +
geom_segment(data = dat,
aes( x = dat$start.pos, y = y, xend = dat$end.pos, yend = y, colour = a.stat), size = 3) +
scale_y_continuous(breaks = seq(from = -4, to = 4, by = 0.25), limits = c(-4,4)) +
scale_color_manual(values = c("green", "blue", "red")) +
geom_vline(xintercept = ref.tmp$centromere.start[ref.tmp$chromosome %in% chr], linetype = "dashed") +
geom_vline(xintercept = ref.tmp$centromere.end[ref.tmp$chromosome %in% chr], linetype = "dashed") +
xlab("Genomic Position") +
ylab("Status") +
ggtitle( "Aneuploidy") +
labs(color = "Alteration Status", drop = FALSE) +
theme_classic( ) +
theme( axis.text.y = element_blank(), axis.ticks.y = element_blank() )
# geom_vline(xintercept = chr.size[1:23], color = "red", linetype = "dashed") +
if( i.int > 1)
{
p1 <- p1 + geom_rect(aes(xmin = 0, xmax = chr.size[1], ymin = -Inf, ymax = Inf), alpha = 0.1, fill = "blue") +
geom_rect(aes(xmin = chr.size[2], xmax = chr.size[3], ymin = -Inf, ymax = Inf), alpha = 0.1, fill = "blue") +
geom_rect(aes(xmin = chr.size[4], xmax = chr.size[5], ymin = -Inf, ymax = Inf), alpha = 0.1, fill = "blue") +
geom_rect(aes(xmin = chr.size[6], xmax = chr.size[7], ymin = -Inf, ymax = Inf), alpha = 0.1, fill = "blue") +
geom_rect(aes(xmin = chr.size[8], xmax = chr.size[9], ymin = -Inf, ymax = Inf), alpha = 0.1, fill = "blue") +
geom_rect(aes(xmin = chr.size[10], xmax = chr.size[11], ymin = -Inf, ymax = Inf), alpha = 0.1, fill = "blue") +
geom_rect(aes(xmin = chr.size[12], xmax = chr.size[13], ymin = -Inf, ymax = Inf), alpha = 0.1, fill = "blue") +
geom_rect(aes(xmin = chr.size[14], xmax = chr.size[15], ymin = -Inf, ymax = Inf), alpha = 0.1, fill = "blue") +
geom_rect(aes(xmin = chr.size[16], xmax = chr.size[17], ymin = -Inf, ymax = Inf), alpha = 0.1, fill = "blue") +
geom_rect(aes(xmin = chr.size[18], xmax = chr.size[19], ymin = -Inf, ymax = Inf), alpha = 0.1, fill = "blue") +
geom_rect(aes(xmin = chr.size[20], xmax = chr.size[21], ymin = -Inf, ymax = Inf), alpha = 0.1, fill = "blue")
p1 <- p1 + scale_x_discrete(drop = FALSE, limits = chr.lab.pos[ ref.tmp$chromosome %in% chr ],
labels = chr.labs )
}
if( X.include == TRUE)
{
p1 <- p1 + geom_rect(aes(xmin = chr.size[22], xmax = chr.size[23], ymin = -Inf, ymax = Inf), alpha = 0.1, fill = "blue")
}
# geom_vline(xintercept = ref.tmp$chr.size, color = "white", linetype = "dashed")
# scale_fill_discrete(drop = FALSE) +
if( i.int == 1)
{
p1 <- p1 + xlim(0, ref.tmp$chr.size[ref.tmp$chromosome == chr])
}
return(p1)
}
# ------------------------------------------------------------------------------- #
| /R/plotAneuploidy.R | no_license | maxwell-lab/HRDex | R | false | false | 5,892 | r |
#' plot aneuploidy
# -------------------------- plotAneuploidy -------------------------------------- #
# function to make a plot of the aneuploid data
# input: dat (data.frame), the seq data that has been run through getAneuploidy, so it
# contains aneuploid status. this can be either a single chromosome, or multiple
#
# output: p1 (ggplot.object), a ggplot object, plotted outside of this function so image parameters
# can be adjusted
plotAneuploidy <- function( dat, ref = "grch37", X.include = FALSE )
{
i.int = 1
if( ref == "grch37" )
{
ref.dat <- grch37.ref.dat
} else
if( ref == "grch38" )
{
ref.dat <- grch38.ref.dat
} else
{
print(paste(ref, "is not a valid reference genome.", sep = " "))
stop("select one of: grch37, grch38")
}
chr.labs <- c("chr1", "chr2", "chr3", "chr4", "chr5", "chr6", "chr7", "chr8", "chr9", "chr10",
"chr11", "chr12", "chr13", "chr14", "chr15", "chr16", "chr17", "chr18", "chr19",
"chr20", "chr21", "chr22", "chrX")
if( X.include == FALSE)
{
ref.dat <- ref.dat[1:22,]
chr.labs <- chr.labs[1:22]
}
levels(dat$chromosome) <- levels(ref.dat$chromosome)
levels(dat$a.stat) <- c("Amplifed", "Neutral", "Deleted")
# if there is more than one chromosome, start/end positions of the segments and reference points
# need to be adjusted to their point along whole genome (as opposed to their point in the chromosome)
# this adds the appropriate offset
# ---
ref.tmp <- ref.dat
chr.size = ref.dat$chr.size
if(length(unique(dat$chromosome)) > 1)
{
chr.lab.pos <- rep(0, dim(ref.dat)[1])
chr.lab.pos[1] <- chr.size[1] / 2
for(i in ref.tmp$chromosome)
{
if( i == "chrX" )
{
i.int <- 23
} else
{
i.int <- as.integer(gsub("chr", "", i))
}
# reference coordinates are relative to the start of the chromosome
# this code changes the reference point for ALL chromosomes to be the
# start of the genome
if(i.int > 1)
{
dat$start.pos[dat$chromosome == i] <- dat$start.pos[dat$chromosome == i] + sum(ref.tmp$chr.size[1:(i.int-1)])
dat$end.pos[dat$chromosome == i] <- dat$end.pos[dat$chromosome == i] + sum(ref.tmp$chr.size[1:(i.int-1)])
ref.tmp$centromere.start[ref.tmp$chromosome == i] <- ref.tmp$centromere.start[ref.tmp$chromosome == i] + sum(ref.tmp$chr.size[1:(i.int-1)])
ref.tmp$centromere.end[ref.tmp$chromosome == i] <- ref.tmp$centromere.end[ref.tmp$chromosome == i] + sum(ref.tmp$chr.size[1:(i.int-1)])
chr.size[i.int] <- sum(ref.tmp$chr.size[1:i.int])
chr.lab.pos[i.int] <- sum(ref.tmp$chr.size[1:(i.int - 1)]) + (ref.tmp$chr.size[i.int] / 2)
}
}
}
# ---
y <- rep(0, dim(dat)[1])
y[ dat$a.stat == "Amplified" ] <- 1
y[ dat$a.stat == "Deleted" ] <- -1
if( length(unique(dat$chromosome)) > 1)
{
chr = ref.tmp$chromosome
} else
{
chr = unique(dat$chromosome)
}
p1 <- ggplot( ) +
geom_segment(data = dat,
aes( x = dat$start.pos, y = y, xend = dat$end.pos, yend = y, colour = a.stat), size = 3) +
scale_y_continuous(breaks = seq(from = -4, to = 4, by = 0.25), limits = c(-4,4)) +
scale_color_manual(values = c("green", "blue", "red")) +
geom_vline(xintercept = ref.tmp$centromere.start[ref.tmp$chromosome %in% chr], linetype = "dashed") +
geom_vline(xintercept = ref.tmp$centromere.end[ref.tmp$chromosome %in% chr], linetype = "dashed") +
xlab("Genomic Position") +
ylab("Status") +
ggtitle( "Aneuploidy") +
labs(color = "Alteration Status", drop = FALSE) +
theme_classic( ) +
theme( axis.text.y = element_blank(), axis.ticks.y = element_blank() )
# geom_vline(xintercept = chr.size[1:23], color = "red", linetype = "dashed") +
if( i.int > 1)
{
p1 <- p1 + geom_rect(aes(xmin = 0, xmax = chr.size[1], ymin = -Inf, ymax = Inf), alpha = 0.1, fill = "blue") +
geom_rect(aes(xmin = chr.size[2], xmax = chr.size[3], ymin = -Inf, ymax = Inf), alpha = 0.1, fill = "blue") +
geom_rect(aes(xmin = chr.size[4], xmax = chr.size[5], ymin = -Inf, ymax = Inf), alpha = 0.1, fill = "blue") +
geom_rect(aes(xmin = chr.size[6], xmax = chr.size[7], ymin = -Inf, ymax = Inf), alpha = 0.1, fill = "blue") +
geom_rect(aes(xmin = chr.size[8], xmax = chr.size[9], ymin = -Inf, ymax = Inf), alpha = 0.1, fill = "blue") +
geom_rect(aes(xmin = chr.size[10], xmax = chr.size[11], ymin = -Inf, ymax = Inf), alpha = 0.1, fill = "blue") +
geom_rect(aes(xmin = chr.size[12], xmax = chr.size[13], ymin = -Inf, ymax = Inf), alpha = 0.1, fill = "blue") +
geom_rect(aes(xmin = chr.size[14], xmax = chr.size[15], ymin = -Inf, ymax = Inf), alpha = 0.1, fill = "blue") +
geom_rect(aes(xmin = chr.size[16], xmax = chr.size[17], ymin = -Inf, ymax = Inf), alpha = 0.1, fill = "blue") +
geom_rect(aes(xmin = chr.size[18], xmax = chr.size[19], ymin = -Inf, ymax = Inf), alpha = 0.1, fill = "blue") +
geom_rect(aes(xmin = chr.size[20], xmax = chr.size[21], ymin = -Inf, ymax = Inf), alpha = 0.1, fill = "blue")
p1 <- p1 + scale_x_discrete(drop = FALSE, limits = chr.lab.pos[ ref.tmp$chromosome %in% chr ],
labels = chr.labs )
}
if( X.include == TRUE)
{
p1 <- p1 + geom_rect(aes(xmin = chr.size[22], xmax = chr.size[23], ymin = -Inf, ymax = Inf), alpha = 0.1, fill = "blue")
}
# geom_vline(xintercept = ref.tmp$chr.size, color = "white", linetype = "dashed")
# scale_fill_discrete(drop = FALSE) +
if( i.int == 1)
{
p1 <- p1 + xlim(0, ref.tmp$chr.size[ref.tmp$chromosome == chr])
}
return(p1)
}
# ------------------------------------------------------------------------------- #
|
x <- list(
list(
c(
a = 0.33333334,
c = 0.33333334,
g = 0.33333334),
c(
a = 0.6666667,
c = 0.16666667,
g = 0.16666667),
c(
a = 0.4,
c = 0.2,
g = 0.4),
c(
a = 0.33333334,
c = 0.33333334,
g = 0.33333334),
c(
a = 0.21052633,
c = 0.5263158,
g = 0.26315793),
c(
a = 0.25,
c = 0.19999999,
g = 0.54999995),
c(
a = 0.22727273,
c = 0.22727273,
g = 0.5454545),
c(
a = 0.5483871,
c = 0.25806454,
g = 0.1935484),
c(
a = 0.18750001,
c = 0.53125006,
g = 0.28125))) | /tests/testthat/data/ppm-mix-5.R | permissive | Computational-Cognitive-Musicology-Lab/ppm | R | false | false | 451 | r |
x <- list(
list(
c(
a = 0.33333334,
c = 0.33333334,
g = 0.33333334),
c(
a = 0.6666667,
c = 0.16666667,
g = 0.16666667),
c(
a = 0.4,
c = 0.2,
g = 0.4),
c(
a = 0.33333334,
c = 0.33333334,
g = 0.33333334),
c(
a = 0.21052633,
c = 0.5263158,
g = 0.26315793),
c(
a = 0.25,
c = 0.19999999,
g = 0.54999995),
c(
a = 0.22727273,
c = 0.22727273,
g = 0.5454545),
c(
a = 0.5483871,
c = 0.25806454,
g = 0.1935484),
c(
a = 0.18750001,
c = 0.53125006,
g = 0.28125))) |
Problem7.24 <- data.frame(
"Block" = c(
'Block 1',
'Block 1',
'Block 1',
'Block 2',
'Block 2',
'Block 1',
'Block 2',
'Block 2',
'Block 2',
'Block 1',
'Block 1',
'Block 2',
'Block 2',
'Block 1',
'Block 2',
'Block 1',
'Block 2',
'Block 2',
'Block 1',
'Block 2',
'Block 1',
'Block 2',
'Block 1',
'Block 1'
),
"CuttingSpeed" = c(
-1,
-1,
-1,
1,
1,
1,
-1,
-1,
-1,
1,
1,
1,
-1,
-1,
-1,
1,
1,
1,
-1,
-1,
-1,
1,
1,
1
),
"ToolGeometry" = c(
-1,
-1,
-1,
-1,
-1,
-1,
1,
1,
1,
1,
1,
1,
-1,
-1,
-1,
-1,
-1,
-1,
1,
1,
1,
1,
1,
1
),
"CuttingAngle" = c(
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
),
"LifeHours" = c(
22,
31,
25,
32,
43,
29,
35,
34,
50,
55,
47,
46,
44,
45,
38,
40,
37,
36,
60,
50,
54,
39,
41,
47
))
| /data/Problem7.24.R | no_license | ehassler/MontgomeryDAE | R | false | false | 989 | r | Problem7.24 <- data.frame(
"Block" = c(
'Block 1',
'Block 1',
'Block 1',
'Block 2',
'Block 2',
'Block 1',
'Block 2',
'Block 2',
'Block 2',
'Block 1',
'Block 1',
'Block 2',
'Block 2',
'Block 1',
'Block 2',
'Block 1',
'Block 2',
'Block 2',
'Block 1',
'Block 2',
'Block 1',
'Block 2',
'Block 1',
'Block 1'
),
"CuttingSpeed" = c(
-1,
-1,
-1,
1,
1,
1,
-1,
-1,
-1,
1,
1,
1,
-1,
-1,
-1,
1,
1,
1,
-1,
-1,
-1,
1,
1,
1
),
"ToolGeometry" = c(
-1,
-1,
-1,
-1,
-1,
-1,
1,
1,
1,
1,
1,
1,
-1,
-1,
-1,
-1,
-1,
-1,
1,
1,
1,
1,
1,
1
),
"CuttingAngle" = c(
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1
),
"LifeHours" = c(
22,
31,
25,
32,
43,
29,
35,
34,
50,
55,
47,
46,
44,
45,
38,
40,
37,
36,
60,
50,
54,
39,
41,
47
))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_inclusionLevels.R
\name{inclusionLevelsUI}
\alias{inclusionLevelsUI}
\title{Interface of the alternative splicing event quantification module}
\usage{
inclusionLevelsUI(id, panel)
}
\arguments{
\item{id}{Character: identifier}
\item{panel}{Function to process HTML elements}
}
\value{
HTML elements
}
\description{
Interface of the alternative splicing event quantification module
}
| /man/inclusionLevelsUI.Rd | no_license | IlyaFinkelshteyn/psichomics | R | false | true | 468 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_inclusionLevels.R
\name{inclusionLevelsUI}
\alias{inclusionLevelsUI}
\title{Interface of the alternative splicing event quantification module}
\usage{
inclusionLevelsUI(id, panel)
}
\arguments{
\item{id}{Character: identifier}
\item{panel}{Function to process HTML elements}
}
\value{
HTML elements
}
\description{
Interface of the alternative splicing event quantification module
}
|
runEdgeR <- function(e) {
design <- model.matrix(~ pData(e)$condition)
dgel <- DGEList(exprs(e))
dgel <- edgeR::calcNormFactors(dgel)
dgel=estimateDisp(dgel,design)
edger.fit <- glmFit(dgel, design)
edger.lrt <- glmLRT(edger.fit)
#predbeta <- predFC(exprs(e), design, offset=getOffset(dgel), dispersion=dgel$tagwise.dispersion)
#predbeta10 <- predFC(exprs(e), design, prior.count=10, offset=getOffset(dgel), dispersion=dgel$tagwise.dispersion)
pvals <- edger.lrt$table$PValue
padj <- p.adjust(pvals,method="BH")
padj[is.na(padj)] <- 1
list(pvals=pvals, padj=padj)
}
runEdgeRRobust <- function(e) {
design <- model.matrix(~ pData(e)$condition)
dgel <- DGEList(exprs(e))
dgel <- edgeR::calcNormFactors(dgel)
# settings for robust from robinson_lab/edgeR_robust/robust_simulation.R
dgel <- estimateGLMRobustDisp(dgel, design, maxit=6)
edger.fit <- glmFit(dgel, design)
edger.lrt <- glmLRT(edger.fit)
predbeta <- predFC(exprs(e), design, offset=getOffset(dgel), dispersion=dgel$tagwise.dispersion)
pvals <- edger.lrt$table$PValue
padj <- p.adjust(pvals,method="BH")
padj[is.na(padj)] <- 1
list(pvals=pvals, padj=padj)
}
runVoom <- function(e) {
design <- model.matrix(~ condition, pData(e))
dgel <- DGEList(exprs(e))
dgel <- edgeR::calcNormFactors(dgel)
v <- voom(dgel,design,plot=FALSE)
fit <- lmFit(v,design)
fit <- eBayes(fit)
tt <- topTable(fit,coef=ncol(design),n=nrow(dgel),sort.by="none")
pvals <- tt$P.Value
padj <- p.adjust(pvals,method="BH")
padj[is.na(padj)] <- 1
list(pvals=pvals, padj=padj, beta=tt$logFC)
}
runDESeq2 <- function(e, retDDS=FALSE) {
library(DESeq2)
dds <- DESeqDataSetFromMatrix(exprs(e), DataFrame(pData(e)), ~ condition)
dds <- DESeq(dds,betaPrior=TRUE,quiet=TRUE)
res <- results(dds)
beta <- res$log2FoldChange
pvals <- res$pvalue
padj <- res$padj
pvals[is.na(pvals)] <- 1
padj[is.na(padj)] <- 1
return(list(pvals=pvals, padj=padj, beta=beta))
}
runDESeq2_poscounts <- function(e, retDDS=FALSE) {
library(DESeq2)
dds <- DESeqDataSetFromMatrix(exprs(e), DataFrame(pData(e)), ~ condition)
dds <- estimateSizeFactors(dds,type="poscounts")
dds <- estimateDispersions(dds)
dds <- nbinomWaldTest(dds,betaPrior=TRUE)
res <- results(dds)
beta <- res$log2FoldChange
pvals <- res$pvalue
padj <- res$padj
pvals[is.na(pvals)] <- 1
padj[is.na(padj)] <- 1
return(list(pvals=pvals, padj=padj, beta=beta))
}
.comboGroups <- function(truths)
# Function that returns a list of vectors of indices,
# where each vector refers to the rows with the same
# combination of TRUE/FALSE values in 'truths'.
#
# written by Aaron Lun
# Created 24 October 2014
{
# Integer packing will only work for 31 libraries at a time.
assembly <- list()
collected <- 0L
step <- 31L
bits <- as.integer(2^(1:step-1L))
while (collected < ncol(truths)) {
upper <- pmin(ncol(truths) - collected, step)
keys <- t(truths[,collected+1:upper,drop=FALSE]) * bits[1:upper]
assembly[[length(assembly)+1L]] <- as.integer(colSums(keys))
collected <- collected + step
}
# Figuring out the unique components.
o <- do.call(order, assembly)
nr <- nrow(truths)
is.different <- logical(nr)
for (i in 1:length(assembly)) {
is.different <- is.different | c(TRUE, diff(assembly[[i]][o])!=0L)
}
first.of.each <- which(is.different)
last.of.each <- c(first.of.each[-1]-1L, nr)
# Returning the groups.
output <- list()
for (u in 1:length(first.of.each)) {
output[[u]] <- o[first.of.each[u]:last.of.each[u]]
}
return(output)
}
.residDF <- function(zero, design)
# Effective residual degrees of freedom after adjusting for exact zeros
# Gordon Smyth and Aaron Lun
# Created 6 Jan 2014. Last modified 2 Sep 2014
{
nlib <- ncol(zero)
ncoef <- ncol(design)
nzero <- as.integer(rowSums(zero))
# Default is no zero
DF <- rep(nlib-ncoef,length(nzero))
# All zero case
DF[nzero==nlib] <- 0L
# Anything in between?
somezero <- nzero>0L & nzero<nlib
if(any(somezero)) {
zero2 <- zero[somezero,,drop=FALSE]
groupings <- .comboGroups(zero2)
# Identifying the true residual d.f. for each of these rows.
DF2 <- nlib-nzero[somezero]
for (u in 1:length(groupings)) {
i <- groupings[[u]]
zeroi <- zero2[i[1],]
DF2[i] <- DF2[i]-qr(design[!zeroi,,drop=FALSE])$rank
}
DF2 <- pmax(DF2, 0L)
DF[somezero] <- DF2
}
DF
}
estimateDispWeighted = function (y, design = NULL, prior.df = NULL, trend.method = "locfit", tagwise = TRUE, span = NULL, min.row.sum = 5, grid.length = 21,
grid.range = c(-10, 10), robust = FALSE, winsor.tail.p = c(0.05,
0.1), tol = 1e-06, weights=NULL)
{
#adjusted by Koen VdB on 04 March 2016
if (!is(y, "DGEList"))
stop("y must be a DGEList")
trend.method <- match.arg(trend.method, c("none", "loess",
"locfit", "movingave"))
ntags <- nrow(y$counts)
nlibs <- ncol(y$counts)
offset <- getOffset(y)
AveLogCPM <- aveLogCPM(y)
offset <- expandAsMatrix(offset, dim(y))
sel <- rowSums(y$counts) >= min.row.sum
spline.pts <- seq(from = grid.range[1], to = grid.range[2],
length = grid.length)
spline.disp <- 0.1 * 2^spline.pts
grid.vals <- spline.disp/(1 + spline.disp)
l0 <- matrix(0, sum(sel), grid.length)
if (is.null(design)) {
cat("Design matrix not provided. Switch to the classic mode.\n")
group <- y$samples$group <- as.factor(y$samples$group)
if (length(levels(group)) == 1)
design <- matrix(1, nlibs, 1)
else design <- model.matrix(~group)
if (all(tabulate(group) <= 1)) {
warning("There is no replication, setting dispersion to NA.")
y$common.dispersion <- NA
return(y)
}
pseudo.obj <- y[sel, ]
q2q.out <- equalizeLibSizes(y[sel, ], dispersion = 0.01)
pseudo.obj$counts <- q2q.out$pseudo
ysplit <- splitIntoGroups(pseudo.obj)
delta <- optimize(commonCondLogLikDerDelta, interval = c(1e-04,
100/(100 + 1)), tol = tol, maximum = TRUE, y = ysplit,
der = 0)
delta <- delta$maximum
disp <- delta/(1 - delta)
q2q.out <- equalizeLibSizes(y[sel, ], dispersion = disp)
pseudo.obj$counts <- q2q.out$pseudo
ysplit <- splitIntoGroups(pseudo.obj)
for (j in 1:grid.length) for (i in 1:length(ysplit)) l0[,
j] <- condLogLikDerDelta(ysplit[[i]], grid.vals[j],
der = 0) + l0[, j]
}
else {
design <- as.matrix(design)
if (ncol(design) >= ncol(y$counts)) {
warning("No residual df: setting dispersion to NA")
y$common.dispersion <- NA
return(y)
}
glmfit <- glmFit(y$counts[sel, ], design, offset = offset[sel,
], dispersion = 0.05, prior.count = 0, weights=weights[sel,]) ###www
zerofit <- (glmfit$fitted.values < 1e-04) & (glmfit$counts <
1e-04)
by.group <- .comboGroups(zerofit)
for (subg in by.group) {
cur.nzero <- !zerofit[subg[1], ]
if (!any(cur.nzero)) {
next
}
if (all(cur.nzero)) {
redesign <- design
}
else {
redesign <- design[cur.nzero, , drop = FALSE]
QR <- qr(redesign)
redesign <- redesign[, QR$pivot[1:QR$rank], drop = FALSE]
if (nrow(redesign) == ncol(redesign)) {
next
}
}
last.beta <- NULL
for (i in 1:grid.length) {
out <- adjustedProfileLik(spline.disp[i], y = y$counts[sel,
][subg, cur.nzero, drop = FALSE], design = redesign,
offset = offset[sel, ][subg, cur.nzero, drop = FALSE],
start = last.beta, get.coef = TRUE, weights=weights[sel,][subg, cur.nzero, drop = FALSE]) ###www
l0[subg, i] <- out$apl
last.beta <- out$beta
}
}
}
out.1 <- WLEB(theta = spline.pts, loglik = l0, covariate = AveLogCPM[sel],
trend.method = trend.method, span = span, individual = FALSE,
m0.out = TRUE)
y$common.dispersion <- 0.1 * 2^out.1$overall
disp.trend <- 0.1 * 2^out.1$trend
y$trended.dispersion <- rep(disp.trend[which.min(AveLogCPM[sel])],
ntags)
y$trended.dispersion[sel] <- disp.trend
y$trend.method <- trend.method
y$AveLogCPM <- AveLogCPM
y$span <- out.1$span
if (!tagwise)
return(y)
if (is.null(prior.df)) {
glmfit <- glmFit(y$counts[sel, ], design, offset = offset[sel,
], dispersion = disp.trend, prior.count = 0, weights=weights[sel,]) ###www
df.residual <- glmfit$df.residual
zerofit <- (glmfit$fitted.values < 1e-04) & (glmfit$counts <
1e-04)
df.residual <- .residDF(zerofit, design)
s2 <- glmfit$deviance/df.residual
s2[df.residual == 0] <- 0
s2 <- pmax(s2, 0)
s2.fit <- squeezeVar(s2, df = df.residual, covariate = AveLogCPM[sel],
robust = robust, winsor.tail.p = winsor.tail.p)
prior.df <- s2.fit$df.prior
}
ncoefs <- ncol(design)
prior.n <- prior.df/(nlibs - ncoefs)
if (trend.method != "none") {
y$tagwise.dispersion <- y$trended.dispersion
}
else {
y$tagwise.dispersion <- rep(y$common.dispersion, ntags)
}
too.large <- prior.n > 1e+06
if (!all(too.large)) {
temp.n <- prior.n
if (any(too.large)) {
temp.n[too.large] <- 1e+06
}
out.2 <- WLEB(theta = spline.pts, loglik = l0, prior.n = temp.n,
covariate = AveLogCPM[sel], trend.method = trend.method,
span = span, overall = FALSE, trend = FALSE, m0 = out.1$shared.loglik)
if (!robust) {
y$tagwise.dispersion[sel] <- 0.1 * 2^out.2$individual
}
else {
y$tagwise.dispersion[sel][!too.large] <- 0.1 * 2^out.2$individual[!too.large]
}
}
if (!robust) {
y$prior.df <- prior.df
y$prior.n <- prior.n
}
else {
y$prior.df <- y$prior.n <- rep(Inf, ntags)
y$prior.df[sel] <- prior.df
y$prior.n[sel] <- prior.n
}
y
}
zeroWeightsLibSizeDispFast <- function(counts, design, colData=NULL, initialWeightAt0=TRUE, maxit=100, plot=FALSE, plotW=FALSE, designZI=NULL, llTol=1e-4, normalization="TMM"){
require(edgeR) ; require(DESeq2)
if(plot | plotW) par(mfrow=c(1,plot+plotW))
if(normalization=="TMM"){
counts <- DGEList(counts)
counts = edgeR::calcNormFactors(counts)
} else if(normalization=="DESeq2"){
designFormula=as.formula(paste0("~",paste(names(attr(design,"contrasts")),collapse="+")))
dse = DESeqDataSetFromMatrix(counts, colData=colData, design=designFormula)
dse = DESeq2::estimateSizeFactors(dse)
counts <- DGEList(counts)
counts$samples$norm.factors = 1/dse$sizeFactor
} else if(normalization=="DESeq2_pos"){
designFormula=as.formula(paste0("~",paste(names(attr(design,"contrasts")),collapse="+")))
dse = DESeqDataSetFromMatrix(counts, colData=colData, design=designFormula)
dse = DESeq2::estimateSizeFactors(dse, type="poscounts")
counts <- DGEList(counts)
counts$samples$norm.factors = 1/dse$sizeFactor
}
effLibSize <- counts$samples$lib.size*counts$samples$norm.factors
logEffLibSize <- log(effLibSize)
zeroId <- counts$counts==0
w <- matrix(1,nrow=nrow(counts),ncol=ncol(counts), dimnames=list(c(1:nrow(counts)), NULL))
## starting values based on P(zero) in the library
for(k in 1:ncol(w)) w[counts$counts[,k]==0,k] <- 1-mean(counts$counts[,k]==0)
llOld <- matrix(-1e4,nrow=nrow(counts),ncol=ncol(counts))
likCOld <- matrix(0,nrow=nrow(counts),ncol=ncol(counts))
converged=FALSE
j=0
for(i in 1:maxit){
j=j+1
zeroId <- counts$counts==0
counts$weights <- w
### M-step counts
#only estimate dispersions every 5 iterations
#if(i==1 | is.wholenumber(i/10)){
if(i==1 | converged){
counts <- estimateGLMCommonDisp(counts, design, interval=c(0,10))
counts <- estimateGLMTagwiseDisp(counts, design, prior.df=0, min.row.sum=1)
}
if(plot) plotBCV(counts)
fit <- glmFit(counts, design)
likC <- dnbinom(counts$counts, mu=fit$fitted.values, size=1/counts$tagwise.dispersion)
### M-step mixture parameter: model zero probability
successes <- colSums(1-w) #P(zero)
failures <- colSums(w) #1-P(zero)
if(is.null(designZI)){
zeroFit <- glm(cbind(successes,failures) ~ logEffLibSize, family="binomial")} else{
zeroFit <- glm(cbind(successes,failures) ~-1+designZI, family="binomial")}
pi0Hat <- predict(zeroFit,type="response")
## E-step: Given estimated parameters, calculate expected value of weights
pi0HatMat <- expandAsMatrix(pi0Hat,dim=dim(counts),byrow=TRUE)
w <- 1-pi0HatMat*zeroId/(pi0HatMat*zeroId+(1-pi0HatMat)*likC*zeroId+1e-15)
## data log-likelihood
if(i>1) llOld=ll
ll <- log(pi0HatMat*zeroId + (1-pi0HatMat)*likC)
delta <- (rowSums(ll)-rowSums(llOld))/(rowSums(llOld)+llTol)
if(mean(abs(delta) < llTol)>.999){ #if 99.9% has converged
if(j==1 & mean(abs(delta) < llTol)>.999){ #final convergence?
cat(paste0("converged. \n")) ; return(w)}
j=0
converged=TRUE} else {converged=FALSE}
cat(paste0("iteration: ",i,". mean conv.: ",mean(abs(delta) < llTol),"\n"))
if(plotW) hist(w[zeroId],main=paste0("iteration: ",i,". mean conv.: ",mean(abs(delta) < llTol)))
}
return(w)
}
pvalueAdjustment_kvdb <- function(baseMean, filter, pValue,
theta, alpha=0.05, pAdjustMethod="BH") {
# perform independent filtering
if (missing(filter)) {
filter <- baseMean
}
if (missing(theta)) {
lowerQuantile <- mean(filter == 0)
if (lowerQuantile < .95) upperQuantile <- .95 else upperQuantile <- 1
theta <- seq(lowerQuantile, upperQuantile, length=50)
}
# do filtering using genefilter
stopifnot(length(theta) > 1)
filtPadj <- filtered_p(filter=filter, test=pValue,
theta=theta, method=pAdjustMethod)
numRej <- colSums(filtPadj < alpha, na.rm = TRUE)
# prevent over-aggressive filtering when all genes are null,
# by requiring the max number of rejections is above a fitted curve.
# If the max number of rejection is not greater than 10, then don't
# perform independent filtering at all.
lo.fit <- lowess(numRej ~ theta, f=1/5)
if (max(numRej) <= 10) {
j <- 1
} else {
residual <- if (all(numRej==0)) {
0
} else {
numRej[numRej > 0] - lo.fit$y[numRej > 0]
}
thresh <- max(lo.fit$y) - sqrt(mean(residual^2))
j <- if (any(numRej > thresh)) {
which(numRej > thresh)[1]
} else {
1
}
}
padj <- filtPadj[, j, drop=TRUE]
cutoffs <- quantile(filter, theta)
filterThreshold <- cutoffs[j]
filterNumRej <- data.frame(theta=theta, numRej=numRej)
filterTheta <- theta[j]
return(list(padj=padj, filterThreshold=filterThreshold, filterTheta=filterTheta, filterNumRej = filterNumRej, lo.fit=lo.fit, alpha=alpha))
}
source("~/Dropbox/phdKoen/singleCell/githubPaper/singleCellPaper/method/glmLRTOld.R")
runEdgeREMLibSize=function(e){
#function(counts, group, design=NULL, mc.cores=2, niter=50){
design <- model.matrix(~ pData(e)$condition)
library(edgeR) ; library(genefilter)
d <- DGEList(exprs(e))
d <- edgeR::calcNormFactors(d)
#not adding a design matrix models the zeroes with the library size automatically
effLogLibSize = log(d$samples$lib.size*d$samples$norm.factors)
pickingSession = pData(e)[,"Picking sessions"]
designZI = model.matrix(~effLogLibSize + pickingSession)
zeroWeights = zeroWeightsLibSizeDispFast(d, design, plot=FALSE, maxit=200, initialWeightAt0=TRUE, plotW=FALSE, designZI=designZI)
d$weights = zeroWeights
d=estimateDispWeighted(d,design,weights=zeroWeights, grid.range=c(-15,15))
#plotBCV(d)
edger.fit <- glmFit(d, design) #uses weights
edger.fit$df.residual <- rowSums(edger.fit$weights)-ncol(design)
edger.lrt <- glmLRTOld(edger.fit,coef=2,test="F")
pvals <- edger.lrt$table$PValue
baseMean = unname(rowMeans(sweep(d$counts,2,d$samples$norm.factors,FUN="*")))
hlp <- pvalueAdjustment_kvdb(baseMean=baseMean, pValue=pvals)
padj <- hlp$padj
#padj <- p.adjust(pval,method="BH")
padj[is.na(padj)] <- 1
out=list(pvals=pvals,padj=padj)
out[is.na(out)] <- 1
return(out)
}
runScde <- function(e){
require(scde)
# calculate models
o.ifm <- scde.error.models(counts = exprs(e), groups = pData(e)$condition, n.cores = 1, threshold.segmentation = TRUE, save.crossfit.plots = FALSE, save.model.plots = FALSE, verbose = 0)
# filter out cells that don't show positive correlation with
# the expected expression magnitudes (very poor fits)
#valid.cells <- o.ifm$corr.a > 0
#table(valid.cells)
#o.ifm <- o.ifm[valid.cells, ]
# estimate gene expression prior
o.prior <- scde.expression.prior(models = o.ifm, counts = exprs(e), length.out = 400, show.plot = FALSE)
# run differential expression tests on all genes.
ediff <- scde.expression.difference(o.ifm, exprs(e), o.prior, groups = pData(e)$condition, n.randomizations = 100, n.cores = 1, verbose = 0)
pvals=(1-pnorm(abs(ediff$Z)))*2
padj=p.adjust(pvals,method="BH")
list(Z=ediff$Z,pvals=pvals,padj=padj)
}
runMAST <- function(e){
require(MAST)
counts=exprs(e)
tpm <- counts*1e6/colSums(counts)
sca <- FromMatrix('SingleCellAssay', t(tpm), cData=data.frame(group=pData(e)$condition))
ngeneson <- apply(exprs(sca),1,function(x)mean(x>0))
CD <- cData(sca)
CD$ngeneson <- ngeneson
CD$cngeneson <- CD$ngeneson-mean(ngeneson)
cData(sca) <- CD
## differential expression
fit <- zlm.SingleCellAssay(~cngeneson+group,sca=sca,method="bayesglm",ebayes=TRUE)
L=matrix(0,nrow=ncol(coef(fit,"D")))
rownames(L)=colnames(coef(fit,"D"))
L["groupB",]=1
lrFit <- lrTest(fit, hypothesis=L)
pval=lrFit[,'hurdle','Pr(>Chisq)']
padj=p.adjust(pval,method="BH")
list(pvals=pval,padj=padj)
}
runMAST_count <- function(e){
require(MAST)
counts=exprs(e)
tpm <- counts*1e6/colSums(counts)
sca <- FromMatrix('SingleCellAssay', t(tpm), cData=data.frame(group=pData(e)$condition))
ngeneson <- apply(exprs(sca),1,function(x)mean(x>0))
CD <- cData(sca)
CD$ngeneson <- ngeneson
CD$cngeneson <- CD$ngeneson-mean(ngeneson)
cData(sca) <- CD
## differential expression
fit <- zlm.SingleCellAssay(~cngeneson+group,sca=sca,method="bayesglm",ebayes=TRUE)
L=matrix(0,nrow=ncol(coef(fit,"D")))
rownames(L)=colnames(coef(fit,"D"))
L["groupB",]=1
lrFit <- lrTest(fit, hypothesis=L)
pval=lrFit[,'cont','Pr(>Chisq)']
padj=p.adjust(pval,method="BH")
list(pvals=pval,padj=padj)
}
runLimmaHurdle <- function(e){
## limma voom pipeline ##
library(limma)
counts=exprs(e)
group=pData(e)$condition
zeroId=counts==0
design = model.matrix(~group)
nf <- edgeR::calcNormFactors(counts)
y <- voom(counts, design, plot=FALSE, lib.size = colSums(counts)*nf, weights=1-zeroId)
y$weights=(1-zeroId)*y$weights
fit <- lmFit(y, design)
fit <- eBayes(fit)
tt <- topTable(fit,coef=2,n=nrow(counts), sort.by = "none")
pval <- tt$P.Value
padj <- tt$adj.P.Val
lfc <- tt$logFC
list(pvals = pval, padj = padj)
}
runEdgeRHurdle <- function(e) {
design <- model.matrix(~ pData(e)$condition)
dgel <- DGEList(exprs(e))
dgel$weights <- 1-(exprs(e)==0)
dgel <- edgeR::calcNormFactors(dgel)
dgel=estimateDisp(dgel,design)
edger.fit <- glmFit(dgel, design)
edger.lrt <- glmLRT(edger.fit)
pvals <- edger.lrt$table$PValue
padj <- p.adjust(pvals,method="BH")
padj[is.na(padj)] <- 1
list(pvals=pvals, padj=padj)
}
runMetagenomeSeq <- function(e){
require(metagenomeSeq)
design <- model.matrix(~pData(e)$condition)
pheno <- AnnotatedDataFrame(data.frame(group=pData(e)$condition))
rownames(pheno) <- colnames(exprs(e))
p <- cumNormStatFast(exprs(e))
dat <- newMRexperiment(counts=exprs(e), phenoData=pheno, featureData = NULL, libSize = colSums(exprs(e)), normFactors = metagenomeSeq::calcNormFactors(exprs(e), p=p))
fit <- fitZig(dat,design)
pvals <- fit$eb$p.value[,"pData(e)$conditionB"]
padj <- p.adjust(pvals,method="BH")
list(pvals=pvals, padj=padj)
}
runDESeq2Zero <- function(e){
## implement DESeq2 ##
library(DESeq2) ; library(genefilter)
condition=pData(e)$condition
colData=DataFrame(pData(e))
dse <- DESeqDataSetFromMatrix(exprs(e), colData, ~ condition)
dse <- estimateSizeFactors(dse, type="poscounts")
effLogLibSize <- log(colSums(counts(dse))*(1/sizeFactors(dse)))
pickingSession = pData(e)[,"Picking sessions"]
designZI=model.matrix(~effLogLibSize + pickingSession)
zeroWeights = zeroWeightsLibSizeDispFast(counts(dse), design=model.matrix(~condition), colData=colData, plot=FALSE, maxit=200, initialWeightAt0=TRUE, plotW=FALSE, normalization="DESeq2_pos", designZI=designZI)
dimnames(zeroWeights) = NULL
assays(dse)[["weights"]] = zeroWeights
dse <- estimateDispersions(dse)
dse <- nbinomWaldTest(dse, betaPrior=TRUE)
#dse <- DESeq(dse, betaPrior=TRUE)
res <- results(dse)
baseMean=unname(rowMeans(sweep(counts(dse),2,1/sizeFactors(dse),FUN="*")))
pvalDesZero = 2*(1-pt(abs(res$stat),df=rowSums(zeroWeights)-2))
padjusted = pvalueAdjustment_kvdb(pValue=pvalDesZero, filter=baseMean, alpha=0.05)
list(pvals=pvalDesZero, padj=padjusted$padj)
}
# try betaPrior=TRUE and betaPrior=FALSE
# for both poscounts in dse and zeroWEights
# poscounts in zeroWEeigts but tmm in dse
# tmm in zeroweights and dse
# runDESeq2ZeroTest <- function(e){
# ## implement DESeq2 ##
# library(DESeq2) ; library(genefilter)
# condition=pData(e)$condition
# colData=DataFrame(pData(e))
# pickingSession = pData(e)[,"Picking sessions"]
# dse <- DESeqDataSetFromMatrix(exprs(e), colData, ~ condition)
# #dse <- estimateSizeFactors(dse, type="poscounts")
# #dse = estimateSizeFactors(dse)
# sizeFactors(dse) = rep(1,120)
# #d=DGEList(exprs(e))
# #d=edgeR::calcNormFactors(d)
# #sizeFactors(dse)=1/d$samples$norm.factors
# effLogLibSize <- log(colSums(counts(dse))*(1/sizeFactors(dse)))
# designZI=model.matrix(~effLogLibSize + pickingSession)
# zeroWeights = zeroWeightsLibSizeDispFast(counts(dse), design=model.matrix(~condition), colData=colData, plot=FALSE, maxit=200, initialWeightAt0=TRUE, plotW=FALSE, normalization="DESeq2_pos", designZI=designZI)
# dimnames(zeroWeights) = NULL
# assays(dse)[["weights"]] = zeroWeights
# dse <- estimateDispersions(dse)
# dse <- nbinomWaldTest(dse, betaPrior=FALSE)
# #dse <- DESeq(dse, betaPrior=TRUE)
# res <- results(dse, cooksCutoff=Inf)
# baseMean=unname(rowMeans(sweep(counts(dse),2,1/sizeFactors(dse),FUN="*")))
# pvalDesZero = 2*(1-pt(abs(res$stat),df=rowSums(zeroWeights)-2))
# padjusted = pvalueAdjustment_kvdb(pValue=pvalDesZero, filter=baseMean, alpha=0.05)
# list(pvals=pvalDesZero, padj=padjusted$padj)
# }
| /singleCellPaper/case/fpr/runScriptsUsoskin.R | no_license | statOmics/zingeRPaper | R | false | false | 23,360 | r |
runEdgeR <- function(e) {
design <- model.matrix(~ pData(e)$condition)
dgel <- DGEList(exprs(e))
dgel <- edgeR::calcNormFactors(dgel)
dgel=estimateDisp(dgel,design)
edger.fit <- glmFit(dgel, design)
edger.lrt <- glmLRT(edger.fit)
#predbeta <- predFC(exprs(e), design, offset=getOffset(dgel), dispersion=dgel$tagwise.dispersion)
#predbeta10 <- predFC(exprs(e), design, prior.count=10, offset=getOffset(dgel), dispersion=dgel$tagwise.dispersion)
pvals <- edger.lrt$table$PValue
padj <- p.adjust(pvals,method="BH")
padj[is.na(padj)] <- 1
list(pvals=pvals, padj=padj)
}
runEdgeRRobust <- function(e) {
design <- model.matrix(~ pData(e)$condition)
dgel <- DGEList(exprs(e))
dgel <- edgeR::calcNormFactors(dgel)
# settings for robust from robinson_lab/edgeR_robust/robust_simulation.R
dgel <- estimateGLMRobustDisp(dgel, design, maxit=6)
edger.fit <- glmFit(dgel, design)
edger.lrt <- glmLRT(edger.fit)
predbeta <- predFC(exprs(e), design, offset=getOffset(dgel), dispersion=dgel$tagwise.dispersion)
pvals <- edger.lrt$table$PValue
padj <- p.adjust(pvals,method="BH")
padj[is.na(padj)] <- 1
list(pvals=pvals, padj=padj)
}
runVoom <- function(e) {
design <- model.matrix(~ condition, pData(e))
dgel <- DGEList(exprs(e))
dgel <- edgeR::calcNormFactors(dgel)
v <- voom(dgel,design,plot=FALSE)
fit <- lmFit(v,design)
fit <- eBayes(fit)
tt <- topTable(fit,coef=ncol(design),n=nrow(dgel),sort.by="none")
pvals <- tt$P.Value
padj <- p.adjust(pvals,method="BH")
padj[is.na(padj)] <- 1
list(pvals=pvals, padj=padj, beta=tt$logFC)
}
runDESeq2 <- function(e, retDDS=FALSE) {
library(DESeq2)
dds <- DESeqDataSetFromMatrix(exprs(e), DataFrame(pData(e)), ~ condition)
dds <- DESeq(dds,betaPrior=TRUE,quiet=TRUE)
res <- results(dds)
beta <- res$log2FoldChange
pvals <- res$pvalue
padj <- res$padj
pvals[is.na(pvals)] <- 1
padj[is.na(padj)] <- 1
return(list(pvals=pvals, padj=padj, beta=beta))
}
runDESeq2_poscounts <- function(e, retDDS=FALSE) {
library(DESeq2)
dds <- DESeqDataSetFromMatrix(exprs(e), DataFrame(pData(e)), ~ condition)
dds <- estimateSizeFactors(dds,type="poscounts")
dds <- estimateDispersions(dds)
dds <- nbinomWaldTest(dds,betaPrior=TRUE)
res <- results(dds)
beta <- res$log2FoldChange
pvals <- res$pvalue
padj <- res$padj
pvals[is.na(pvals)] <- 1
padj[is.na(padj)] <- 1
return(list(pvals=pvals, padj=padj, beta=beta))
}
.comboGroups <- function(truths)
# Function that returns a list of vectors of indices,
# where each vector refers to the rows with the same
# combination of TRUE/FALSE values in 'truths'.
#
# written by Aaron Lun
# Created 24 October 2014
{
# Integer packing will only work for 31 libraries at a time.
assembly <- list()
collected <- 0L
step <- 31L
bits <- as.integer(2^(1:step-1L))
while (collected < ncol(truths)) {
upper <- pmin(ncol(truths) - collected, step)
keys <- t(truths[,collected+1:upper,drop=FALSE]) * bits[1:upper]
assembly[[length(assembly)+1L]] <- as.integer(colSums(keys))
collected <- collected + step
}
# Figuring out the unique components.
o <- do.call(order, assembly)
nr <- nrow(truths)
is.different <- logical(nr)
for (i in 1:length(assembly)) {
is.different <- is.different | c(TRUE, diff(assembly[[i]][o])!=0L)
}
first.of.each <- which(is.different)
last.of.each <- c(first.of.each[-1]-1L, nr)
# Returning the groups.
output <- list()
for (u in 1:length(first.of.each)) {
output[[u]] <- o[first.of.each[u]:last.of.each[u]]
}
return(output)
}
.residDF <- function(zero, design)
# Effective residual degrees of freedom after adjusting for exact zeros
# Gordon Smyth and Aaron Lun
# Created 6 Jan 2014. Last modified 2 Sep 2014
{
nlib <- ncol(zero)
ncoef <- ncol(design)
nzero <- as.integer(rowSums(zero))
# Default is no zero
DF <- rep(nlib-ncoef,length(nzero))
# All zero case
DF[nzero==nlib] <- 0L
# Anything in between?
somezero <- nzero>0L & nzero<nlib
if(any(somezero)) {
zero2 <- zero[somezero,,drop=FALSE]
groupings <- .comboGroups(zero2)
# Identifying the true residual d.f. for each of these rows.
DF2 <- nlib-nzero[somezero]
for (u in 1:length(groupings)) {
i <- groupings[[u]]
zeroi <- zero2[i[1],]
DF2[i] <- DF2[i]-qr(design[!zeroi,,drop=FALSE])$rank
}
DF2 <- pmax(DF2, 0L)
DF[somezero] <- DF2
}
DF
}
estimateDispWeighted = function (y, design = NULL, prior.df = NULL, trend.method = "locfit", tagwise = TRUE, span = NULL, min.row.sum = 5, grid.length = 21,
grid.range = c(-10, 10), robust = FALSE, winsor.tail.p = c(0.05,
0.1), tol = 1e-06, weights=NULL)
{
#adjusted by Koen VdB on 04 March 2016
if (!is(y, "DGEList"))
stop("y must be a DGEList")
trend.method <- match.arg(trend.method, c("none", "loess",
"locfit", "movingave"))
ntags <- nrow(y$counts)
nlibs <- ncol(y$counts)
offset <- getOffset(y)
AveLogCPM <- aveLogCPM(y)
offset <- expandAsMatrix(offset, dim(y))
sel <- rowSums(y$counts) >= min.row.sum
spline.pts <- seq(from = grid.range[1], to = grid.range[2],
length = grid.length)
spline.disp <- 0.1 * 2^spline.pts
grid.vals <- spline.disp/(1 + spline.disp)
l0 <- matrix(0, sum(sel), grid.length)
if (is.null(design)) {
cat("Design matrix not provided. Switch to the classic mode.\n")
group <- y$samples$group <- as.factor(y$samples$group)
if (length(levels(group)) == 1)
design <- matrix(1, nlibs, 1)
else design <- model.matrix(~group)
if (all(tabulate(group) <= 1)) {
warning("There is no replication, setting dispersion to NA.")
y$common.dispersion <- NA
return(y)
}
pseudo.obj <- y[sel, ]
q2q.out <- equalizeLibSizes(y[sel, ], dispersion = 0.01)
pseudo.obj$counts <- q2q.out$pseudo
ysplit <- splitIntoGroups(pseudo.obj)
delta <- optimize(commonCondLogLikDerDelta, interval = c(1e-04,
100/(100 + 1)), tol = tol, maximum = TRUE, y = ysplit,
der = 0)
delta <- delta$maximum
disp <- delta/(1 - delta)
q2q.out <- equalizeLibSizes(y[sel, ], dispersion = disp)
pseudo.obj$counts <- q2q.out$pseudo
ysplit <- splitIntoGroups(pseudo.obj)
for (j in 1:grid.length) for (i in 1:length(ysplit)) l0[,
j] <- condLogLikDerDelta(ysplit[[i]], grid.vals[j],
der = 0) + l0[, j]
}
else {
design <- as.matrix(design)
if (ncol(design) >= ncol(y$counts)) {
warning("No residual df: setting dispersion to NA")
y$common.dispersion <- NA
return(y)
}
glmfit <- glmFit(y$counts[sel, ], design, offset = offset[sel,
], dispersion = 0.05, prior.count = 0, weights=weights[sel,]) ###www
zerofit <- (glmfit$fitted.values < 1e-04) & (glmfit$counts <
1e-04)
by.group <- .comboGroups(zerofit)
for (subg in by.group) {
cur.nzero <- !zerofit[subg[1], ]
if (!any(cur.nzero)) {
next
}
if (all(cur.nzero)) {
redesign <- design
}
else {
redesign <- design[cur.nzero, , drop = FALSE]
QR <- qr(redesign)
redesign <- redesign[, QR$pivot[1:QR$rank], drop = FALSE]
if (nrow(redesign) == ncol(redesign)) {
next
}
}
last.beta <- NULL
for (i in 1:grid.length) {
out <- adjustedProfileLik(spline.disp[i], y = y$counts[sel,
][subg, cur.nzero, drop = FALSE], design = redesign,
offset = offset[sel, ][subg, cur.nzero, drop = FALSE],
start = last.beta, get.coef = TRUE, weights=weights[sel,][subg, cur.nzero, drop = FALSE]) ###www
l0[subg, i] <- out$apl
last.beta <- out$beta
}
}
}
out.1 <- WLEB(theta = spline.pts, loglik = l0, covariate = AveLogCPM[sel],
trend.method = trend.method, span = span, individual = FALSE,
m0.out = TRUE)
y$common.dispersion <- 0.1 * 2^out.1$overall
disp.trend <- 0.1 * 2^out.1$trend
y$trended.dispersion <- rep(disp.trend[which.min(AveLogCPM[sel])],
ntags)
y$trended.dispersion[sel] <- disp.trend
y$trend.method <- trend.method
y$AveLogCPM <- AveLogCPM
y$span <- out.1$span
if (!tagwise)
return(y)
if (is.null(prior.df)) {
glmfit <- glmFit(y$counts[sel, ], design, offset = offset[sel,
], dispersion = disp.trend, prior.count = 0, weights=weights[sel,]) ###www
df.residual <- glmfit$df.residual
zerofit <- (glmfit$fitted.values < 1e-04) & (glmfit$counts <
1e-04)
df.residual <- .residDF(zerofit, design)
s2 <- glmfit$deviance/df.residual
s2[df.residual == 0] <- 0
s2 <- pmax(s2, 0)
s2.fit <- squeezeVar(s2, df = df.residual, covariate = AveLogCPM[sel],
robust = robust, winsor.tail.p = winsor.tail.p)
prior.df <- s2.fit$df.prior
}
ncoefs <- ncol(design)
prior.n <- prior.df/(nlibs - ncoefs)
if (trend.method != "none") {
y$tagwise.dispersion <- y$trended.dispersion
}
else {
y$tagwise.dispersion <- rep(y$common.dispersion, ntags)
}
too.large <- prior.n > 1e+06
if (!all(too.large)) {
temp.n <- prior.n
if (any(too.large)) {
temp.n[too.large] <- 1e+06
}
out.2 <- WLEB(theta = spline.pts, loglik = l0, prior.n = temp.n,
covariate = AveLogCPM[sel], trend.method = trend.method,
span = span, overall = FALSE, trend = FALSE, m0 = out.1$shared.loglik)
if (!robust) {
y$tagwise.dispersion[sel] <- 0.1 * 2^out.2$individual
}
else {
y$tagwise.dispersion[sel][!too.large] <- 0.1 * 2^out.2$individual[!too.large]
}
}
if (!robust) {
y$prior.df <- prior.df
y$prior.n <- prior.n
}
else {
y$prior.df <- y$prior.n <- rep(Inf, ntags)
y$prior.df[sel] <- prior.df
y$prior.n[sel] <- prior.n
}
y
}
zeroWeightsLibSizeDispFast <- function(counts, design, colData=NULL, initialWeightAt0=TRUE, maxit=100, plot=FALSE, plotW=FALSE, designZI=NULL, llTol=1e-4, normalization="TMM"){
require(edgeR) ; require(DESeq2)
if(plot | plotW) par(mfrow=c(1,plot+plotW))
if(normalization=="TMM"){
counts <- DGEList(counts)
counts = edgeR::calcNormFactors(counts)
} else if(normalization=="DESeq2"){
designFormula=as.formula(paste0("~",paste(names(attr(design,"contrasts")),collapse="+")))
dse = DESeqDataSetFromMatrix(counts, colData=colData, design=designFormula)
dse = DESeq2::estimateSizeFactors(dse)
counts <- DGEList(counts)
counts$samples$norm.factors = 1/dse$sizeFactor
} else if(normalization=="DESeq2_pos"){
designFormula=as.formula(paste0("~",paste(names(attr(design,"contrasts")),collapse="+")))
dse = DESeqDataSetFromMatrix(counts, colData=colData, design=designFormula)
dse = DESeq2::estimateSizeFactors(dse, type="poscounts")
counts <- DGEList(counts)
counts$samples$norm.factors = 1/dse$sizeFactor
}
effLibSize <- counts$samples$lib.size*counts$samples$norm.factors
logEffLibSize <- log(effLibSize)
zeroId <- counts$counts==0
w <- matrix(1,nrow=nrow(counts),ncol=ncol(counts), dimnames=list(c(1:nrow(counts)), NULL))
## starting values based on P(zero) in the library
for(k in 1:ncol(w)) w[counts$counts[,k]==0,k] <- 1-mean(counts$counts[,k]==0)
llOld <- matrix(-1e4,nrow=nrow(counts),ncol=ncol(counts))
likCOld <- matrix(0,nrow=nrow(counts),ncol=ncol(counts))
converged=FALSE
j=0
for(i in 1:maxit){
j=j+1
zeroId <- counts$counts==0
counts$weights <- w
### M-step counts
#only estimate dispersions every 5 iterations
#if(i==1 | is.wholenumber(i/10)){
if(i==1 | converged){
counts <- estimateGLMCommonDisp(counts, design, interval=c(0,10))
counts <- estimateGLMTagwiseDisp(counts, design, prior.df=0, min.row.sum=1)
}
if(plot) plotBCV(counts)
fit <- glmFit(counts, design)
likC <- dnbinom(counts$counts, mu=fit$fitted.values, size=1/counts$tagwise.dispersion)
### M-step mixture parameter: model zero probability
successes <- colSums(1-w) #P(zero)
failures <- colSums(w) #1-P(zero)
if(is.null(designZI)){
zeroFit <- glm(cbind(successes,failures) ~ logEffLibSize, family="binomial")} else{
zeroFit <- glm(cbind(successes,failures) ~-1+designZI, family="binomial")}
pi0Hat <- predict(zeroFit,type="response")
## E-step: Given estimated parameters, calculate expected value of weights
pi0HatMat <- expandAsMatrix(pi0Hat,dim=dim(counts),byrow=TRUE)
w <- 1-pi0HatMat*zeroId/(pi0HatMat*zeroId+(1-pi0HatMat)*likC*zeroId+1e-15)
## data log-likelihood
if(i>1) llOld=ll
ll <- log(pi0HatMat*zeroId + (1-pi0HatMat)*likC)
delta <- (rowSums(ll)-rowSums(llOld))/(rowSums(llOld)+llTol)
if(mean(abs(delta) < llTol)>.999){ #if 99.9% has converged
if(j==1 & mean(abs(delta) < llTol)>.999){ #final convergence?
cat(paste0("converged. \n")) ; return(w)}
j=0
converged=TRUE} else {converged=FALSE}
cat(paste0("iteration: ",i,". mean conv.: ",mean(abs(delta) < llTol),"\n"))
if(plotW) hist(w[zeroId],main=paste0("iteration: ",i,". mean conv.: ",mean(abs(delta) < llTol)))
}
return(w)
}
pvalueAdjustment_kvdb <- function(baseMean, filter, pValue,
theta, alpha=0.05, pAdjustMethod="BH") {
# perform independent filtering
if (missing(filter)) {
filter <- baseMean
}
if (missing(theta)) {
lowerQuantile <- mean(filter == 0)
if (lowerQuantile < .95) upperQuantile <- .95 else upperQuantile <- 1
theta <- seq(lowerQuantile, upperQuantile, length=50)
}
# do filtering using genefilter
stopifnot(length(theta) > 1)
filtPadj <- filtered_p(filter=filter, test=pValue,
theta=theta, method=pAdjustMethod)
numRej <- colSums(filtPadj < alpha, na.rm = TRUE)
# prevent over-aggressive filtering when all genes are null,
# by requiring the max number of rejections is above a fitted curve.
# If the max number of rejection is not greater than 10, then don't
# perform independent filtering at all.
lo.fit <- lowess(numRej ~ theta, f=1/5)
if (max(numRej) <= 10) {
j <- 1
} else {
residual <- if (all(numRej==0)) {
0
} else {
numRej[numRej > 0] - lo.fit$y[numRej > 0]
}
thresh <- max(lo.fit$y) - sqrt(mean(residual^2))
j <- if (any(numRej > thresh)) {
which(numRej > thresh)[1]
} else {
1
}
}
padj <- filtPadj[, j, drop=TRUE]
cutoffs <- quantile(filter, theta)
filterThreshold <- cutoffs[j]
filterNumRej <- data.frame(theta=theta, numRej=numRej)
filterTheta <- theta[j]
return(list(padj=padj, filterThreshold=filterThreshold, filterTheta=filterTheta, filterNumRej = filterNumRej, lo.fit=lo.fit, alpha=alpha))
}
source("~/Dropbox/phdKoen/singleCell/githubPaper/singleCellPaper/method/glmLRTOld.R")
runEdgeREMLibSize=function(e){
#function(counts, group, design=NULL, mc.cores=2, niter=50){
design <- model.matrix(~ pData(e)$condition)
library(edgeR) ; library(genefilter)
d <- DGEList(exprs(e))
d <- edgeR::calcNormFactors(d)
#not adding a design matrix models the zeroes with the library size automatically
effLogLibSize = log(d$samples$lib.size*d$samples$norm.factors)
pickingSession = pData(e)[,"Picking sessions"]
designZI = model.matrix(~effLogLibSize + pickingSession)
zeroWeights = zeroWeightsLibSizeDispFast(d, design, plot=FALSE, maxit=200, initialWeightAt0=TRUE, plotW=FALSE, designZI=designZI)
d$weights = zeroWeights
d=estimateDispWeighted(d,design,weights=zeroWeights, grid.range=c(-15,15))
#plotBCV(d)
edger.fit <- glmFit(d, design) #uses weights
edger.fit$df.residual <- rowSums(edger.fit$weights)-ncol(design)
edger.lrt <- glmLRTOld(edger.fit,coef=2,test="F")
pvals <- edger.lrt$table$PValue
baseMean = unname(rowMeans(sweep(d$counts,2,d$samples$norm.factors,FUN="*")))
hlp <- pvalueAdjustment_kvdb(baseMean=baseMean, pValue=pvals)
padj <- hlp$padj
#padj <- p.adjust(pval,method="BH")
padj[is.na(padj)] <- 1
out=list(pvals=pvals,padj=padj)
out[is.na(out)] <- 1
return(out)
}
runScde <- function(e){
require(scde)
# calculate models
o.ifm <- scde.error.models(counts = exprs(e), groups = pData(e)$condition, n.cores = 1, threshold.segmentation = TRUE, save.crossfit.plots = FALSE, save.model.plots = FALSE, verbose = 0)
# filter out cells that don't show positive correlation with
# the expected expression magnitudes (very poor fits)
#valid.cells <- o.ifm$corr.a > 0
#table(valid.cells)
#o.ifm <- o.ifm[valid.cells, ]
# estimate gene expression prior
o.prior <- scde.expression.prior(models = o.ifm, counts = exprs(e), length.out = 400, show.plot = FALSE)
# run differential expression tests on all genes.
ediff <- scde.expression.difference(o.ifm, exprs(e), o.prior, groups = pData(e)$condition, n.randomizations = 100, n.cores = 1, verbose = 0)
pvals=(1-pnorm(abs(ediff$Z)))*2
padj=p.adjust(pvals,method="BH")
list(Z=ediff$Z,pvals=pvals,padj=padj)
}
runMAST <- function(e){
require(MAST)
counts=exprs(e)
tpm <- counts*1e6/colSums(counts)
sca <- FromMatrix('SingleCellAssay', t(tpm), cData=data.frame(group=pData(e)$condition))
ngeneson <- apply(exprs(sca),1,function(x)mean(x>0))
CD <- cData(sca)
CD$ngeneson <- ngeneson
CD$cngeneson <- CD$ngeneson-mean(ngeneson)
cData(sca) <- CD
## differential expression
fit <- zlm.SingleCellAssay(~cngeneson+group,sca=sca,method="bayesglm",ebayes=TRUE)
L=matrix(0,nrow=ncol(coef(fit,"D")))
rownames(L)=colnames(coef(fit,"D"))
L["groupB",]=1
lrFit <- lrTest(fit, hypothesis=L)
pval=lrFit[,'hurdle','Pr(>Chisq)']
padj=p.adjust(pval,method="BH")
list(pvals=pval,padj=padj)
}
runMAST_count <- function(e){
require(MAST)
counts=exprs(e)
tpm <- counts*1e6/colSums(counts)
sca <- FromMatrix('SingleCellAssay', t(tpm), cData=data.frame(group=pData(e)$condition))
ngeneson <- apply(exprs(sca),1,function(x)mean(x>0))
CD <- cData(sca)
CD$ngeneson <- ngeneson
CD$cngeneson <- CD$ngeneson-mean(ngeneson)
cData(sca) <- CD
## differential expression
fit <- zlm.SingleCellAssay(~cngeneson+group,sca=sca,method="bayesglm",ebayes=TRUE)
L=matrix(0,nrow=ncol(coef(fit,"D")))
rownames(L)=colnames(coef(fit,"D"))
L["groupB",]=1
lrFit <- lrTest(fit, hypothesis=L)
pval=lrFit[,'cont','Pr(>Chisq)']
padj=p.adjust(pval,method="BH")
list(pvals=pval,padj=padj)
}
runLimmaHurdle <- function(e){
## limma voom pipeline ##
library(limma)
counts=exprs(e)
group=pData(e)$condition
zeroId=counts==0
design = model.matrix(~group)
nf <- edgeR::calcNormFactors(counts)
y <- voom(counts, design, plot=FALSE, lib.size = colSums(counts)*nf, weights=1-zeroId)
y$weights=(1-zeroId)*y$weights
fit <- lmFit(y, design)
fit <- eBayes(fit)
tt <- topTable(fit,coef=2,n=nrow(counts), sort.by = "none")
pval <- tt$P.Value
padj <- tt$adj.P.Val
lfc <- tt$logFC
list(pvals = pval, padj = padj)
}
runEdgeRHurdle <- function(e) {
design <- model.matrix(~ pData(e)$condition)
dgel <- DGEList(exprs(e))
dgel$weights <- 1-(exprs(e)==0)
dgel <- edgeR::calcNormFactors(dgel)
dgel=estimateDisp(dgel,design)
edger.fit <- glmFit(dgel, design)
edger.lrt <- glmLRT(edger.fit)
pvals <- edger.lrt$table$PValue
padj <- p.adjust(pvals,method="BH")
padj[is.na(padj)] <- 1
list(pvals=pvals, padj=padj)
}
runMetagenomeSeq <- function(e){
require(metagenomeSeq)
design <- model.matrix(~pData(e)$condition)
pheno <- AnnotatedDataFrame(data.frame(group=pData(e)$condition))
rownames(pheno) <- colnames(exprs(e))
p <- cumNormStatFast(exprs(e))
dat <- newMRexperiment(counts=exprs(e), phenoData=pheno, featureData = NULL, libSize = colSums(exprs(e)), normFactors = metagenomeSeq::calcNormFactors(exprs(e), p=p))
fit <- fitZig(dat,design)
pvals <- fit$eb$p.value[,"pData(e)$conditionB"]
padj <- p.adjust(pvals,method="BH")
list(pvals=pvals, padj=padj)
}
runDESeq2Zero <- function(e){
## implement DESeq2 ##
library(DESeq2) ; library(genefilter)
condition=pData(e)$condition
colData=DataFrame(pData(e))
dse <- DESeqDataSetFromMatrix(exprs(e), colData, ~ condition)
dse <- estimateSizeFactors(dse, type="poscounts")
effLogLibSize <- log(colSums(counts(dse))*(1/sizeFactors(dse)))
pickingSession = pData(e)[,"Picking sessions"]
designZI=model.matrix(~effLogLibSize + pickingSession)
zeroWeights = zeroWeightsLibSizeDispFast(counts(dse), design=model.matrix(~condition), colData=colData, plot=FALSE, maxit=200, initialWeightAt0=TRUE, plotW=FALSE, normalization="DESeq2_pos", designZI=designZI)
dimnames(zeroWeights) = NULL
assays(dse)[["weights"]] = zeroWeights
dse <- estimateDispersions(dse)
dse <- nbinomWaldTest(dse, betaPrior=TRUE)
#dse <- DESeq(dse, betaPrior=TRUE)
res <- results(dse)
baseMean=unname(rowMeans(sweep(counts(dse),2,1/sizeFactors(dse),FUN="*")))
pvalDesZero = 2*(1-pt(abs(res$stat),df=rowSums(zeroWeights)-2))
padjusted = pvalueAdjustment_kvdb(pValue=pvalDesZero, filter=baseMean, alpha=0.05)
list(pvals=pvalDesZero, padj=padjusted$padj)
}
# try betaPrior=TRUE and betaPrior=FALSE
# for both poscounts in dse and zeroWEights
# poscounts in zeroWEeigts but tmm in dse
# tmm in zeroweights and dse
# runDESeq2ZeroTest <- function(e){
# ## implement DESeq2 ##
# library(DESeq2) ; library(genefilter)
# condition=pData(e)$condition
# colData=DataFrame(pData(e))
# pickingSession = pData(e)[,"Picking sessions"]
# dse <- DESeqDataSetFromMatrix(exprs(e), colData, ~ condition)
# #dse <- estimateSizeFactors(dse, type="poscounts")
# #dse = estimateSizeFactors(dse)
# sizeFactors(dse) = rep(1,120)
# #d=DGEList(exprs(e))
# #d=edgeR::calcNormFactors(d)
# #sizeFactors(dse)=1/d$samples$norm.factors
# effLogLibSize <- log(colSums(counts(dse))*(1/sizeFactors(dse)))
# designZI=model.matrix(~effLogLibSize + pickingSession)
# zeroWeights = zeroWeightsLibSizeDispFast(counts(dse), design=model.matrix(~condition), colData=colData, plot=FALSE, maxit=200, initialWeightAt0=TRUE, plotW=FALSE, normalization="DESeq2_pos", designZI=designZI)
# dimnames(zeroWeights) = NULL
# assays(dse)[["weights"]] = zeroWeights
# dse <- estimateDispersions(dse)
# dse <- nbinomWaldTest(dse, betaPrior=FALSE)
# #dse <- DESeq(dse, betaPrior=TRUE)
# res <- results(dse, cooksCutoff=Inf)
# baseMean=unname(rowMeans(sweep(counts(dse),2,1/sizeFactors(dse),FUN="*")))
# pvalDesZero = 2*(1-pt(abs(res$stat),df=rowSums(zeroWeights)-2))
# padjusted = pvalueAdjustment_kvdb(pValue=pvalDesZero, filter=baseMean, alpha=0.05)
# list(pvals=pvalDesZero, padj=padjusted$padj)
# }
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/set_init_model_function.R
\name{random.init.model}
\alias{random.init.model}
\title{Generate Random Starting Model}
\usage{
random.init.model(number.par, crit.parms = NULL, no.fit = NULL)
}
\arguments{
\item{number.par}{The number of total parameters available.}
\item{crit.parms}{A list containing vectors which specify the critical parameter sets. Needs to be given by index and not by name (for conversion see \code{\link{set.crit.parms}}).}
\item{no.fit}{A vector containing the indices of the parameters which are not to be fitted.}
}
\value{
A vector containing the parameter indices of the random model.
}
\description{
Generates a random starting model (if specified by the user in \code{\link{famos}}), taking into account the critical conditions and the parameters which should not be fitted.
}
\examples{
#set critical conditions
crits <- list(c(1,2,3), c(4,5))
#generate random model
random.init.model(number.par = 20)
random.init.model(number.par = 20, crit.parms = crits)
}
| /man/random.init.model.Rd | no_license | cran/FAMoS | R | false | true | 1,068 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/set_init_model_function.R
\name{random.init.model}
\alias{random.init.model}
\title{Generate Random Starting Model}
\usage{
random.init.model(number.par, crit.parms = NULL, no.fit = NULL)
}
\arguments{
\item{number.par}{The number of total parameters available.}
\item{crit.parms}{A list containing vectors which specify the critical parameter sets. Needs to be given by index and not by name (for conversion see \code{\link{set.crit.parms}}).}
\item{no.fit}{A vector containing the indices of the parameters which are not to be fitted.}
}
\value{
A vector containing the parameter indices of the random model.
}
\description{
Generates a random starting model (if specified by the user in \code{\link{famos}}), taking into account the critical conditions and the parameters which should not be fitted.
}
\examples{
#set critical conditions
crits <- list(c(1,2,3), c(4,5))
#generate random model
random.init.model(number.par = 20)
random.init.model(number.par = 20, crit.parms = crits)
}
|
data= tibble::tibble(Fraud= sample(rep(0:1,c(90,10))),
payeeCity=rep_len(x=1:10, length.out=100),
requestedAmountNormalizedCurrency=
rep_len(c(100,1000,10000,500,2500), length.out=100),
ki_FirstTransactionByMainEntitySes_WebCommercial_AcpSession_V1=
sample(c(100,200,5000,400),100,replace = TRUE),
aisVar_actimizeCumulativeAmtForMEInRcntPrd=
sample(c(100,200,5000,400),100,replace = TRUE),
ki_BurstInNewPayeeActivity_WebCommercial_ExternalInternationalTransfer_V1=
rep_len(c(0,1,2,3),length.out = 100),
cd4_hour = sample(0:23,100,replace = TRUE),
ki_ActivityWithSuspiciousForeignCountryForParentEntitySusFor_WebCommercial_ExternalInternationalTransfer_V2
=sample(0:1,100,replace = TRUE),
aisVar_actimizeAvgSingleAmtForPEInYr=
sample(c(20.1,105.2,200,5000,4200),100,replace = TRUE))
| /R/data_prep.R | permissive | NanaAkwasiAbayieBoateng/LiftUtilities | R | false | false | 1,098 | r |
data= tibble::tibble(Fraud= sample(rep(0:1,c(90,10))),
payeeCity=rep_len(x=1:10, length.out=100),
requestedAmountNormalizedCurrency=
rep_len(c(100,1000,10000,500,2500), length.out=100),
ki_FirstTransactionByMainEntitySes_WebCommercial_AcpSession_V1=
sample(c(100,200,5000,400),100,replace = TRUE),
aisVar_actimizeCumulativeAmtForMEInRcntPrd=
sample(c(100,200,5000,400),100,replace = TRUE),
ki_BurstInNewPayeeActivity_WebCommercial_ExternalInternationalTransfer_V1=
rep_len(c(0,1,2,3),length.out = 100),
cd4_hour = sample(0:23,100,replace = TRUE),
ki_ActivityWithSuspiciousForeignCountryForParentEntitySusFor_WebCommercial_ExternalInternationalTransfer_V2
=sample(0:1,100,replace = TRUE),
aisVar_actimizeAvgSingleAmtForPEInYr=
sample(c(20.1,105.2,200,5000,4200),100,replace = TRUE))
|
library(parallel)
simRep <- 5000 # Replication times in one simulation
pvalue.true <- .05 # Testing type I error
b.var <- c(0.0025) # The set of varaince of random covariates b as random slope
smooth <- 1 # measurement error is added to M if smooth = 0; no measurement error is added if sooth = 1
cores <- 4
r.sim <- b.var
run_one_sample <- function(iter){
library(refund)
library(lme4)
library(nlme)
library(arm)
library(RLRsim)
library(MASS)
set.seed(iter+15000)
D <- 80 # grid number total
nSubj <- 200 # 200 # I the number of curves
nRep <- 50 # 20 # datasets for each covariance function
totalN <- nSubj * nRep
thetaK.true <- 2
timeGrid <- (1:D)/D
npc.true <- 3
percent <- 0.95
SNR <- 3 # 5, signal noise ratio'
sd.epsilon <- 1 # or 0.5
delta.true <- 0.5
a.mean <- 0
gamma.true <- 2
gammaVar.true <- 1
# hot
gammaI.true <- mapply(rnorm, nSubj, gamma.true, rep(sqrt(gammaVar.true), 1))
gammaI.true <- gammaI.true[rep(1:nrow(gammaI.true), each = nRep), ]
# warm
gammaI2.true <- mapply(rnorm, nSubj, gamma.true, rep(sqrt(gammaVar.true), 1))
gammaI2.true <- gammaI2.true[rep(1:nrow(gammaI2.true), each = nRep), ]
dummyX <- rbinom(n = totalN, size = 1, prob = 0.5) # dummyX
#generate functional covariates
lambda.sim <- function(degree) {
return(0.5^(degree - 1))
}
psi.fourier <- function(t, degree) {
result <- NA
if(degree == 1){
result <- sqrt(2) * sinpi(2*t)
}else if(degree == 2){
result <- sqrt(2) * cospi(4*t)
}else if(degree == 3){
result <- sqrt(2) * sinpi(4*t)
}
return(result)
}
lambdaVec.true <- mapply(lambda.sim, 1: npc.true)
psi.true <- matrix(data = mapply(psi.fourier, rep(timeGrid, npc.true), rep(1:npc.true, each=D)),
nrow = npc.true,
ncol = D,
byrow = TRUE)
ascore.true <- mvrnorm(totalN, rep(a.mean, npc.true), diag(lambdaVec.true))
Mt.true <- ascore.true %*% psi.true
error <- rnorm(totalN, mean = 0, sd = sd.epsilon)
thetaIK.true <- mvrnorm(nSubj, rep(thetaK.true, npc.true), diag(rep(r.sim, npc.true)))
thetaIK.true <- thetaIK.true[rep(1:nrow(thetaIK.true), each = nRep), ]
betaM.true <- thetaIK.true * ascore.true
betaM.true <- rowSums(betaM.true)
Y <- delta.true + dummyX * gammaI.true + (dummyX - 1) * gammaI2.true + betaM.true + error
##########################################################################
ID <- rep(1:nSubj, each = nRep)
if(smooth == 0){
Merror.Var <- sum(lambdaVec.true) / SNR #SNR = sum(lambdaVec.true)/Merror.Var
Mt.hat <- Mt.true + matrix(rnorm(totalN*D, mean=0, sd = sqrt(Merror.Var)), totalN, D)
}
if(smooth == 1){
Merror.Var <- 0 #SNR = sum(lambdaVec.true)/Merror.Var
Mt.hat <- Mt.true
}
M <- Mt.hat
# M <- M - matrix(rep(colMeans(M), each = totalN), totalN, D) # center:column-means are 0
t <- (1:D)/D
knots <- 5 # previous setting 10
p <- 5 # previous setting p <- 7, the number of degree for B-splines we use
results <- fpca.face(M, center = TRUE, argvals = t, knots = knots, pve = percent, p = p, lambda = 0) # pve need to be chosen!
npc <- results$npc
score <- results$scores
ascore <- score[, 1:npc]/sqrt(D)
# plot(results$efunctions[,2]*sqrt(D))
# lines(1:80, psi.fourier(timeGrid, 2)) #match very well
# to compare lambda: results$evalues/(D))
# to compare estimated M, Mt.hat, Mt.true
# a<-results$scores %*% t(results$efunctions)
# plot(M[300,]) #Mt.hat
# lines(a[300,]+results$mu,col="red") # estimated M
# lines(Mt.true[300,], col="blue") #true Mt
###########################################################################
dummyX <- cbind(dummyX, -dummyX + 1)
z.sim.uni = c()
ID.uni <- c(rbind(matrix(1:(nSubj*npc),
nrow = npc,
ncol = nSubj),
matrix(0, nrow = nRep - npc, ncol = nSubj)))
for(k in 1:nSubj){
svd <- svd(ascore[((k-1)*nRep+1):(k*nRep), ] %*% t(ascore[((k-1)*nRep+1):(k*nRep), ])) #SVD on A_i
u.tra <- t(svd$v)
u <- svd$u
d <- (svd$d)[1:npc]
# u <- cbind(u, Null(u))
Y[((k-1)*nRep+1):(k*nRep)] <- u.tra %*% Y[((k-1)*nRep+1):(k*nRep)]
dummyX[((k-1)*nRep+1):(k*nRep), ] <- u.tra %*% dummyX[((k-1)*nRep+1):(k*nRep), ]
ascore[((k-1)*nRep+1):(k*nRep), ] <- rbind(u.tra[1:npc, ] %*% ascore[((k-1)*nRep+1):(k*nRep), ],
matrix(0,
nrow = nRep - npc,
ncol = npc))
z.sim.uni <- c(z.sim.uni, sqrt(d), rep(0, nRep - npc))
}
###########################################################################
designMatrix <- data.frame(rating = Y,
temp.1 = dummyX[, 1],
temp.2 = dummyX[, 2],
ID = as.factor(ID),
ID.uni = as.factor(ID.uni),
ascore = ascore,
z.sim.uni = z.sim.uni)
# 'lmer' model
designMatrix.lmm <- designMatrix
additive0.sim <- paste(1:npc, collapse = " + ascore.")
additive.sim <- paste(1:npc, collapse = " | ID) + (0 + ascore.")
# Confusion of modifying
model.sim <- as.formula(paste("rating ~ 1 + temp.1 + temp.2 + ascore.",
additive0.sim,
" + (0 + temp.1 | ID) + (0 + temp.2 | ID) + (0 + z.sim.uni | ID.uni)",
sep = ""))
fullReml <- lmer(model.sim, data = designMatrix.lmm)
f.slope <- as.formula(paste("rating ~ 1 + temp.1 + temp.2 + ascore.",
additive0.sim,
" + (0 + z.sim.uni | ID.uni)",
sep = ""))
m.slope <- lmer(f.slope, data = designMatrix.lmm)
f0 <- as.formula(" . ~ . - (0 + z.sim.uni | ID.uni)")
m0 <- update(fullReml, f0)
tests2 <- exactRLRT(m.slope, fullReml, m0)
pvalues.bonf <- tests2$p[1]
###################################################################################
return(list(realTau = r.sim,
pvalues.bonf = pvalues.bonf,
Merror.Var = Merror.Var,
smooth = smooth,
npc = npc,
tests2 = tests2))
}
# Setup parallel
#cores <- detectCores()
cluster <- makeCluster(cores)
clusterSetRNGStream(cluster, 20170822)
# for(nRandCovariate in 1 * 2){ # START out-outer loop
# clusterExport(cluster, c("nRandCovariate")) # casting the coefficient parameter on the random effects' covariance function
# fileName <- paste("power_", b.var, "_grp20-rep20-", nRandCovariate,".RData", sep = "") # Saving file's name
clusterExport(cluster, c("r.sim", "smooth")) # casting the coefficient parameter on the random effects' covariance function
fileName <- paste("f_power_", smooth, "_",b.var,"_seed4_grp200-rep50.RData", sep = "") # Saving file's name
# run the simulation
loopIndex <- 1
# resultDoubleList.sim <- list()
#power1.sim <- list()
power2.sim <- list()
# for(r.sim in b.var){ # START outer loop
node_results <- parLapply(cluster, 1:simRep, run_one_sample)
# result1.sim <- lapply(node_results, function(x) {list(realTau = x$realTau,
# pvalue = x$pvalue)})
#result2.sim <- lapply(node_results, function(x) {list(realTau = x$realTau,
# pvalues.bonf = x$pvalues.bonf,
# smooth = x$smooth,
# npc = x$npc)})
#resultDoubleList.sim[[loopIndex]] <- node_results
#save.image(file=fileName) # Auto Save
#table1.sim <- sapply(result1.sim, function(x) {
# c(sens = (sum(x$pvalue <= pvalue.true) > 0))})
#Power1 <- mean(table1.sim)
#cat("nRandCovariate: ", nRandCovariate, fill = TRUE)
#cat("Power1: ", Power1, fill = TRUE)
#power1.sim[[loopIndex]] <- list(Power = Power1, realTau = r.sim)
table2.sim <- sapply(node_results, function(x) {
c(overall.sens = (sum(x$pvalues.bonf <= pvalue.true) > 0))})
Power2 <- mean(table2.sim)
#cat("Power2: ", Power2, fill = TRUE)
power2.sim[[loopIndex]] <- list(Power = Power2, realTau = r.sim, smooth = smooth)
# loopIndex <- loopIndex + 1
# } # End outer loop
save.image(file=fileName) # Auto Save
# par(mfrow=c(2,1))
# Histogram plots
# hist(sapply(result1.sim, function(x) x$pvalue),
# main = "Histogram of p-value for lme model",
# xlab = "p-value")
# hist(sapply(result2.sim, function(x) x$pvalues.bonf),
# main = "Histogram of p-value for lmer model",
# xlab = "p-value")
# hist(sapply(resultDoubleList.sim[[1]], function(x) (x$tests1)$statistic[1]),
# breaks = (0:110)/10,
# main = "Histogram of test-statistic for lme model",
# xlab = "Test Statistics")
#
# hist(sapply(resultDoubleList.sim[[1]], function(x) (x$tests2)[1,1]),
# breaks = (0:100)/10,
# main = "Histogram of test-statistic for lmer model",
# xlab = "Test Statistics")
#} # End out-outer loop
stopCluster(cluster) | /full simulation/03.13.2018/power test/variance0.0025/seed4/power_0.0025_pca_s_seed4_200_50.R | no_license | wma9/FMRI-project | R | false | false | 9,166 | r | library(parallel)
simRep <- 5000 # Replication times in one simulation
pvalue.true <- .05 # Testing type I error
b.var <- c(0.0025) # The set of varaince of random covariates b as random slope
smooth <- 1 # measurement error is added to M if smooth = 0; no measurement error is added if sooth = 1
cores <- 4
r.sim <- b.var
run_one_sample <- function(iter){
library(refund)
library(lme4)
library(nlme)
library(arm)
library(RLRsim)
library(MASS)
set.seed(iter+15000)
D <- 80 # grid number total
nSubj <- 200 # 200 # I the number of curves
nRep <- 50 # 20 # datasets for each covariance function
totalN <- nSubj * nRep
thetaK.true <- 2
timeGrid <- (1:D)/D
npc.true <- 3
percent <- 0.95
SNR <- 3 # 5, signal noise ratio'
sd.epsilon <- 1 # or 0.5
delta.true <- 0.5
a.mean <- 0
gamma.true <- 2
gammaVar.true <- 1
# hot
gammaI.true <- mapply(rnorm, nSubj, gamma.true, rep(sqrt(gammaVar.true), 1))
gammaI.true <- gammaI.true[rep(1:nrow(gammaI.true), each = nRep), ]
# warm
gammaI2.true <- mapply(rnorm, nSubj, gamma.true, rep(sqrt(gammaVar.true), 1))
gammaI2.true <- gammaI2.true[rep(1:nrow(gammaI2.true), each = nRep), ]
dummyX <- rbinom(n = totalN, size = 1, prob = 0.5) # dummyX
#generate functional covariates
lambda.sim <- function(degree) {
return(0.5^(degree - 1))
}
psi.fourier <- function(t, degree) {
result <- NA
if(degree == 1){
result <- sqrt(2) * sinpi(2*t)
}else if(degree == 2){
result <- sqrt(2) * cospi(4*t)
}else if(degree == 3){
result <- sqrt(2) * sinpi(4*t)
}
return(result)
}
lambdaVec.true <- mapply(lambda.sim, 1: npc.true)
psi.true <- matrix(data = mapply(psi.fourier, rep(timeGrid, npc.true), rep(1:npc.true, each=D)),
nrow = npc.true,
ncol = D,
byrow = TRUE)
ascore.true <- mvrnorm(totalN, rep(a.mean, npc.true), diag(lambdaVec.true))
Mt.true <- ascore.true %*% psi.true
error <- rnorm(totalN, mean = 0, sd = sd.epsilon)
thetaIK.true <- mvrnorm(nSubj, rep(thetaK.true, npc.true), diag(rep(r.sim, npc.true)))
thetaIK.true <- thetaIK.true[rep(1:nrow(thetaIK.true), each = nRep), ]
betaM.true <- thetaIK.true * ascore.true
betaM.true <- rowSums(betaM.true)
Y <- delta.true + dummyX * gammaI.true + (dummyX - 1) * gammaI2.true + betaM.true + error
##########################################################################
ID <- rep(1:nSubj, each = nRep)
if(smooth == 0){
Merror.Var <- sum(lambdaVec.true) / SNR #SNR = sum(lambdaVec.true)/Merror.Var
Mt.hat <- Mt.true + matrix(rnorm(totalN*D, mean=0, sd = sqrt(Merror.Var)), totalN, D)
}
if(smooth == 1){
Merror.Var <- 0 #SNR = sum(lambdaVec.true)/Merror.Var
Mt.hat <- Mt.true
}
M <- Mt.hat
# M <- M - matrix(rep(colMeans(M), each = totalN), totalN, D) # center:column-means are 0
t <- (1:D)/D
knots <- 5 # previous setting 10
p <- 5 # previous setting p <- 7, the number of degree for B-splines we use
results <- fpca.face(M, center = TRUE, argvals = t, knots = knots, pve = percent, p = p, lambda = 0) # pve need to be chosen!
npc <- results$npc
score <- results$scores
ascore <- score[, 1:npc]/sqrt(D)
# plot(results$efunctions[,2]*sqrt(D))
# lines(1:80, psi.fourier(timeGrid, 2)) #match very well
# to compare lambda: results$evalues/(D))
# to compare estimated M, Mt.hat, Mt.true
# a<-results$scores %*% t(results$efunctions)
# plot(M[300,]) #Mt.hat
# lines(a[300,]+results$mu,col="red") # estimated M
# lines(Mt.true[300,], col="blue") #true Mt
###########################################################################
dummyX <- cbind(dummyX, -dummyX + 1)
z.sim.uni = c()
ID.uni <- c(rbind(matrix(1:(nSubj*npc),
nrow = npc,
ncol = nSubj),
matrix(0, nrow = nRep - npc, ncol = nSubj)))
for(k in 1:nSubj){
svd <- svd(ascore[((k-1)*nRep+1):(k*nRep), ] %*% t(ascore[((k-1)*nRep+1):(k*nRep), ])) #SVD on A_i
u.tra <- t(svd$v)
u <- svd$u
d <- (svd$d)[1:npc]
# u <- cbind(u, Null(u))
Y[((k-1)*nRep+1):(k*nRep)] <- u.tra %*% Y[((k-1)*nRep+1):(k*nRep)]
dummyX[((k-1)*nRep+1):(k*nRep), ] <- u.tra %*% dummyX[((k-1)*nRep+1):(k*nRep), ]
ascore[((k-1)*nRep+1):(k*nRep), ] <- rbind(u.tra[1:npc, ] %*% ascore[((k-1)*nRep+1):(k*nRep), ],
matrix(0,
nrow = nRep - npc,
ncol = npc))
z.sim.uni <- c(z.sim.uni, sqrt(d), rep(0, nRep - npc))
}
###########################################################################
designMatrix <- data.frame(rating = Y,
temp.1 = dummyX[, 1],
temp.2 = dummyX[, 2],
ID = as.factor(ID),
ID.uni = as.factor(ID.uni),
ascore = ascore,
z.sim.uni = z.sim.uni)
# 'lmer' model
designMatrix.lmm <- designMatrix
additive0.sim <- paste(1:npc, collapse = " + ascore.")
additive.sim <- paste(1:npc, collapse = " | ID) + (0 + ascore.")
# Confusion of modifying
model.sim <- as.formula(paste("rating ~ 1 + temp.1 + temp.2 + ascore.",
additive0.sim,
" + (0 + temp.1 | ID) + (0 + temp.2 | ID) + (0 + z.sim.uni | ID.uni)",
sep = ""))
fullReml <- lmer(model.sim, data = designMatrix.lmm)
f.slope <- as.formula(paste("rating ~ 1 + temp.1 + temp.2 + ascore.",
additive0.sim,
" + (0 + z.sim.uni | ID.uni)",
sep = ""))
m.slope <- lmer(f.slope, data = designMatrix.lmm)
f0 <- as.formula(" . ~ . - (0 + z.sim.uni | ID.uni)")
m0 <- update(fullReml, f0)
tests2 <- exactRLRT(m.slope, fullReml, m0)
pvalues.bonf <- tests2$p[1]
###################################################################################
return(list(realTau = r.sim,
pvalues.bonf = pvalues.bonf,
Merror.Var = Merror.Var,
smooth = smooth,
npc = npc,
tests2 = tests2))
}
# Setup parallel
#cores <- detectCores()
cluster <- makeCluster(cores)
clusterSetRNGStream(cluster, 20170822)
# for(nRandCovariate in 1 * 2){ # START out-outer loop
# clusterExport(cluster, c("nRandCovariate")) # casting the coefficient parameter on the random effects' covariance function
# fileName <- paste("power_", b.var, "_grp20-rep20-", nRandCovariate,".RData", sep = "") # Saving file's name
clusterExport(cluster, c("r.sim", "smooth")) # casting the coefficient parameter on the random effects' covariance function
fileName <- paste("f_power_", smooth, "_",b.var,"_seed4_grp200-rep50.RData", sep = "") # Saving file's name
# run the simulation
loopIndex <- 1
# resultDoubleList.sim <- list()
#power1.sim <- list()
power2.sim <- list()
# for(r.sim in b.var){ # START outer loop
node_results <- parLapply(cluster, 1:simRep, run_one_sample)
# result1.sim <- lapply(node_results, function(x) {list(realTau = x$realTau,
# pvalue = x$pvalue)})
#result2.sim <- lapply(node_results, function(x) {list(realTau = x$realTau,
# pvalues.bonf = x$pvalues.bonf,
# smooth = x$smooth,
# npc = x$npc)})
#resultDoubleList.sim[[loopIndex]] <- node_results
#save.image(file=fileName) # Auto Save
#table1.sim <- sapply(result1.sim, function(x) {
# c(sens = (sum(x$pvalue <= pvalue.true) > 0))})
#Power1 <- mean(table1.sim)
#cat("nRandCovariate: ", nRandCovariate, fill = TRUE)
#cat("Power1: ", Power1, fill = TRUE)
#power1.sim[[loopIndex]] <- list(Power = Power1, realTau = r.sim)
table2.sim <- sapply(node_results, function(x) {
c(overall.sens = (sum(x$pvalues.bonf <= pvalue.true) > 0))})
Power2 <- mean(table2.sim)
#cat("Power2: ", Power2, fill = TRUE)
power2.sim[[loopIndex]] <- list(Power = Power2, realTau = r.sim, smooth = smooth)
# loopIndex <- loopIndex + 1
# } # End outer loop
save.image(file=fileName) # Auto Save
# par(mfrow=c(2,1))
# Histogram plots
# hist(sapply(result1.sim, function(x) x$pvalue),
# main = "Histogram of p-value for lme model",
# xlab = "p-value")
# hist(sapply(result2.sim, function(x) x$pvalues.bonf),
# main = "Histogram of p-value for lmer model",
# xlab = "p-value")
# hist(sapply(resultDoubleList.sim[[1]], function(x) (x$tests1)$statistic[1]),
# breaks = (0:110)/10,
# main = "Histogram of test-statistic for lme model",
# xlab = "Test Statistics")
#
# hist(sapply(resultDoubleList.sim[[1]], function(x) (x$tests2)[1,1]),
# breaks = (0:100)/10,
# main = "Histogram of test-statistic for lmer model",
# xlab = "Test Statistics")
#} # End out-outer loop
stopCluster(cluster) |
library(shiny)
library(ggplot2)
linkedScatterUI <- function(id) {
ns <- NS(id)
fluidRow(
column(6, plotOutput(ns("plot1"), brush = ns("brush"))),
column(6, plotOutput(ns("plot2"), brush = ns("brush")))
)
}
linkedScatter <- function(input, output, session, data, left, right) {
# Yields the data frame with an additional column "selected_"
# that indicates whether that observation is brushed
dataWithSelection <- reactive({
brushedPoints(data(), input$brush, allRows = TRUE)
})
output$plot1 <- renderPlot({
scatterPlot(dataWithSelection(), left())
})
output$plot2 <- renderPlot({
scatterPlot(dataWithSelection(), right())
})
return(dataWithSelection)
}
scatterPlot <- function(data, cols) {
ggplot(data, aes_string(x = cols[1], y = cols[2])) +
geom_point(aes(color = selected_)) +
scale_color_manual(values = c("black", "#66D65C"), guide = FALSE)
} | /module/linked_scatter.R | no_license | duocang/RShiny | R | false | false | 979 | r | library(shiny)
library(ggplot2)
linkedScatterUI <- function(id) {
ns <- NS(id)
fluidRow(
column(6, plotOutput(ns("plot1"), brush = ns("brush"))),
column(6, plotOutput(ns("plot2"), brush = ns("brush")))
)
}
linkedScatter <- function(input, output, session, data, left, right) {
# Yields the data frame with an additional column "selected_"
# that indicates whether that observation is brushed
dataWithSelection <- reactive({
brushedPoints(data(), input$brush, allRows = TRUE)
})
output$plot1 <- renderPlot({
scatterPlot(dataWithSelection(), left())
})
output$plot2 <- renderPlot({
scatterPlot(dataWithSelection(), right())
})
return(dataWithSelection)
}
scatterPlot <- function(data, cols) {
ggplot(data, aes_string(x = cols[1], y = cols[2])) +
geom_point(aes(color = selected_)) +
scale_color_manual(values = c("black", "#66D65C"), guide = FALSE)
} |
context("mergeCLusters")
source("create_objects.R")
test_that("`mergeClusters` works with matrix and ClusterExperiment objects", {
cl1 <- clusterSingle(smSimData, clusterFunction="pam",
subsample=FALSE, sequential=FALSE,
clusterDArgs=list(k=6),isCount=FALSE)
leg<-clusterLegend(cl1)[[primaryClusterIndex(cl1)]]
leg[,"name"]<-letters[1:6]
clusterLegend(cl1)[[primaryClusterIndex(cl1)]]<-leg
clustWithDendro <- makeDendrogram(cl1)
#matrix version
mergedList <- mergeClusters(x=transform(cl1), isCount=FALSE,
cl=primaryCluster(cl1),
dendro=clustWithDendro@dendro_clusters,
mergeMethod="adjP", plotInfo="mergeMethod")
clustMerged <- mergeClusters(clustWithDendro, mergeMethod="none",plotInfo="all")
clustMerged <- mergeClusters(clustWithDendro, mergeMethod="none", plotInfo="adjP")
clustMerged <- mergeClusters(clustWithDendro, mergeMethod="none", plotInfo="locfdr")
clustMerged <- mergeClusters(clustWithDendro, mergeMethod="locfdr", plotInfo="mergeMethod")
clustMerged <- mergeClusters(clustWithDendro, mergeMethod="MB", plotInfo="mergeMethod")
clustMerged <- mergeClusters(clustWithDendro, mergeMethod="JC", plotInfo="mergeMethod")
clustMerged <- mergeClusters(clustWithDendro, mergeMethod="adjP", plotInfo="mergeMethod")
expect_error(clustMerged <- mergeClusters(clustWithDendro, mergeMethod="none", plotInfo="mergeMethod"),"can only plot 'mergeMethod' results if one method is selected")
clustMerged <- mergeClusters(clustWithDendro, mergeMethod="adjP", plotInfo="none")
expect_true("mergeClusters" %in% clusterTypes(clustMerged))
expect_true("mergeClusters" %in% colnames(clusterMatrix(clustMerged)))
clustMerged <- mergeClusters(clustWithDendro, mergeMethod="adjP", plotInfo="mergeMethod",leafType="samples",labelType="colorblock")
clustMerged <- mergeClusters(clustWithDendro, mergeMethod="adjP", plotInfo="mergeMethod",leafType="samples",labelType="name")
clustMerged <- mergeClusters(clustWithDendro, mergeMethod="adjP", plotInfo="mergeMethod",leafType="clusters",labelType="colorblock")
clustMerged <- mergeClusters(clustWithDendro, mergeMethod="adjP", plotInfo="mergeMethod",leafType="clusters",labelType="name")
expect_error(mergeClusters(x=transform(clustWithDendro), isCount=FALSE,
cl=primaryCluster(clustWithDendro),plot="none",
mergeMethod="adjP",
dendro=clustWithDendro@dendro_samples),
"Not a valid input dendrogram")
#test if already exists
clustMerged <- mergeClusters(clustWithDendro, mergeMethod="adjP")
primaryClusterIndex(clustMerged)<-2
clustMerged<- makeDendrogram(clustMerged)
clustMerged2<-mergeClusters(clustMerged,mergeMethod="adjP")
expect_true("mergeClusters.1" %in% clusterTypes(clustMerged2))
expect_true(!"combineMany.1" %in% clusterTypes(clustMerged2))
expect_true(!"clusterMany.1" %in% clusterTypes(clustMerged2))
removeClusters(clustMerged, whichRemove = "mergeClusters")
})
test_that("`mergeClusters` preserves the colData and rowData of SE", {
cl <- clusterSingle(smSimSE, clusterFunction="pam",
subsample=FALSE, sequential=FALSE,
clusterDArgs=list(k=6),isCount=FALSE)
cl <- makeDendrogram(cl)
cl <- mergeClusters(cl, mergeMethod = "adjP")
expect_equal(colData(cl),colData(smSimSE))
expect_equal(rownames(cl),rownames(smSimSE))
expect_equal(colnames(cl),colnames(smSimSE))
expect_equal(metadata(cl),metadata(smSimSE))
expect_equal(rowData(cl),rowData(smSimSE))
})
test_that("`mergeClusters` works with unassignedSamples", {
clustWithDendro <- makeDendrogram(ceSim,unassignedSamples = c("outgroup"))
clustMerged <- mergeClusters(clustWithDendro, mergeMethod="adjP", plotInfo="mergeMethod",leafType="samples",labelType="colorblock")
clustMerged <- mergeClusters(clustWithDendro, mergeMethod="adjP", plotInfo="mergeMethod",leafType="samples",labelType="name")
clustMerged <- mergeClusters(clustWithDendro, mergeMethod="adjP", plotInfo="mergeMethod",leafType="clusters",labelType="colorblock")
clustMerged <- mergeClusters(clustWithDendro, mergeMethod="adjP", plotInfo="mergeMethod",leafType="clusters",labelType="name")
clustWithDendro <- makeDendrogram(ceSim,unassignedSamples = c("cluster"))
expect_warning(mergeClusters(clustWithDendro, mergeMethod="adjP", plotInfo="mergeMethod",leafType="samples",labelType="colorblock"))
expect_warning(mergeClusters(clustWithDendro, mergeMethod="adjP", plotInfo="mergeMethod",leafType="samples",labelType="name"))
clustMerged <- mergeClusters(clustWithDendro, mergeMethod="adjP", plotInfo="mergeMethod",leafType="clusters",labelType="colorblock")
clustMerged <- mergeClusters(clustWithDendro, mergeMethod="adjP", plotInfo="mergeMethod",leafType="clusters",labelType="name")
})
| /tests/testthat/test_mergeClusters.R | no_license | 12379Monty/clusterExperiment | R | false | false | 4,954 | r | context("mergeCLusters")
source("create_objects.R")
test_that("`mergeClusters` works with matrix and ClusterExperiment objects", {
cl1 <- clusterSingle(smSimData, clusterFunction="pam",
subsample=FALSE, sequential=FALSE,
clusterDArgs=list(k=6),isCount=FALSE)
leg<-clusterLegend(cl1)[[primaryClusterIndex(cl1)]]
leg[,"name"]<-letters[1:6]
clusterLegend(cl1)[[primaryClusterIndex(cl1)]]<-leg
clustWithDendro <- makeDendrogram(cl1)
#matrix version
mergedList <- mergeClusters(x=transform(cl1), isCount=FALSE,
cl=primaryCluster(cl1),
dendro=clustWithDendro@dendro_clusters,
mergeMethod="adjP", plotInfo="mergeMethod")
clustMerged <- mergeClusters(clustWithDendro, mergeMethod="none",plotInfo="all")
clustMerged <- mergeClusters(clustWithDendro, mergeMethod="none", plotInfo="adjP")
clustMerged <- mergeClusters(clustWithDendro, mergeMethod="none", plotInfo="locfdr")
clustMerged <- mergeClusters(clustWithDendro, mergeMethod="locfdr", plotInfo="mergeMethod")
clustMerged <- mergeClusters(clustWithDendro, mergeMethod="MB", plotInfo="mergeMethod")
clustMerged <- mergeClusters(clustWithDendro, mergeMethod="JC", plotInfo="mergeMethod")
clustMerged <- mergeClusters(clustWithDendro, mergeMethod="adjP", plotInfo="mergeMethod")
expect_error(clustMerged <- mergeClusters(clustWithDendro, mergeMethod="none", plotInfo="mergeMethod"),"can only plot 'mergeMethod' results if one method is selected")
clustMerged <- mergeClusters(clustWithDendro, mergeMethod="adjP", plotInfo="none")
expect_true("mergeClusters" %in% clusterTypes(clustMerged))
expect_true("mergeClusters" %in% colnames(clusterMatrix(clustMerged)))
clustMerged <- mergeClusters(clustWithDendro, mergeMethod="adjP", plotInfo="mergeMethod",leafType="samples",labelType="colorblock")
clustMerged <- mergeClusters(clustWithDendro, mergeMethod="adjP", plotInfo="mergeMethod",leafType="samples",labelType="name")
clustMerged <- mergeClusters(clustWithDendro, mergeMethod="adjP", plotInfo="mergeMethod",leafType="clusters",labelType="colorblock")
clustMerged <- mergeClusters(clustWithDendro, mergeMethod="adjP", plotInfo="mergeMethod",leafType="clusters",labelType="name")
expect_error(mergeClusters(x=transform(clustWithDendro), isCount=FALSE,
cl=primaryCluster(clustWithDendro),plot="none",
mergeMethod="adjP",
dendro=clustWithDendro@dendro_samples),
"Not a valid input dendrogram")
#test if already exists
clustMerged <- mergeClusters(clustWithDendro, mergeMethod="adjP")
primaryClusterIndex(clustMerged)<-2
clustMerged<- makeDendrogram(clustMerged)
clustMerged2<-mergeClusters(clustMerged,mergeMethod="adjP")
expect_true("mergeClusters.1" %in% clusterTypes(clustMerged2))
expect_true(!"combineMany.1" %in% clusterTypes(clustMerged2))
expect_true(!"clusterMany.1" %in% clusterTypes(clustMerged2))
removeClusters(clustMerged, whichRemove = "mergeClusters")
})
test_that("`mergeClusters` preserves the colData and rowData of SE", {
cl <- clusterSingle(smSimSE, clusterFunction="pam",
subsample=FALSE, sequential=FALSE,
clusterDArgs=list(k=6),isCount=FALSE)
cl <- makeDendrogram(cl)
cl <- mergeClusters(cl, mergeMethod = "adjP")
expect_equal(colData(cl),colData(smSimSE))
expect_equal(rownames(cl),rownames(smSimSE))
expect_equal(colnames(cl),colnames(smSimSE))
expect_equal(metadata(cl),metadata(smSimSE))
expect_equal(rowData(cl),rowData(smSimSE))
})
test_that("`mergeClusters` works with unassignedSamples", {
clustWithDendro <- makeDendrogram(ceSim,unassignedSamples = c("outgroup"))
clustMerged <- mergeClusters(clustWithDendro, mergeMethod="adjP", plotInfo="mergeMethod",leafType="samples",labelType="colorblock")
clustMerged <- mergeClusters(clustWithDendro, mergeMethod="adjP", plotInfo="mergeMethod",leafType="samples",labelType="name")
clustMerged <- mergeClusters(clustWithDendro, mergeMethod="adjP", plotInfo="mergeMethod",leafType="clusters",labelType="colorblock")
clustMerged <- mergeClusters(clustWithDendro, mergeMethod="adjP", plotInfo="mergeMethod",leafType="clusters",labelType="name")
clustWithDendro <- makeDendrogram(ceSim,unassignedSamples = c("cluster"))
expect_warning(mergeClusters(clustWithDendro, mergeMethod="adjP", plotInfo="mergeMethod",leafType="samples",labelType="colorblock"))
expect_warning(mergeClusters(clustWithDendro, mergeMethod="adjP", plotInfo="mergeMethod",leafType="samples",labelType="name"))
clustMerged <- mergeClusters(clustWithDendro, mergeMethod="adjP", plotInfo="mergeMethod",leafType="clusters",labelType="colorblock")
clustMerged <- mergeClusters(clustWithDendro, mergeMethod="adjP", plotInfo="mergeMethod",leafType="clusters",labelType="name")
})
|
library(shiny)
library(readxl)
library(shinyjs)
library(dplyr)
library(stringr)
source("functions.R")
ui <- bootstrapPage(
theme = "theme.css",
tags$head(
includeScript("js/iframeSizer.contentWindow.min.js")
),
tags$head(tags$script(src="scripts.js")),
useShinyjs(),
div(id = "mobile",
div(id = "buttonScreen",
style = "display: flex; width: 100%; text-align: center; justify-content: center;",
tags$button(
id = "buscar",
class = "btn btn-default action-button shiny-bound-input",
width = "100%",
img(src = "img/botones ceular-12.png")
),
hr(),
tags$button(
id = "crear",
style = "display: flex; width: 100%; text-align: center; justify-content: center;",
class = "btn btn-default action-button shiny-bound-input",
width = "100%",
img(src = "img/botones ceular-13.png")
)
),
div(id = "crearScreen", class = "crearScreen",
div(id="heading", style = "width: 100%; display: none;",
img(src = "img/botones ceular-13.png", style = "display: block; margin-left: auto; margin-right: auto;"),
p(id = "ref", '"Tomado de: Gran Libro de la Cocina Colombiana"')
),
div(id = "left",
uiOutput("select_ingUI"),
uiOutput("ing_count"),
uiOutput("selected_ing_list"),
br(),
br(),
uiOutput("priceUI", style = "display: none;"),
br(),
uiOutput("select_regionUI"),
br(),
tags$button(id = "volver1",
class = "btn btn-default action-button shiny-bound-input",
style = "border: none;border-radius: unset;background: transparent;display: none;",
img(src="img/back.svg", style="width:30px; height:30px;"))
),
div(id = "right", style = "display: none;",
div(id = "recetas_title",
div(id = "recetas", "Recetas"),
br(),
tags$button(
id = "orderTiempo",
class = "btn btn-default action-button shiny-bound-input",
img(src = "img/iconos especial cocina 50-04.png")
),
br()
),
uiOutput('results')
)
),
div(id = "buscarScreen", style = "display: none;",
div(id = "search",
tags$img(src = "img/Iconos especial cocina-01.png"),
uiOutput("searchNameUI")
),
br(),
uiOutput("show_receta"),
tags$button(id = "volver2",
class = "btn btn-default action-button shiny-bound-input",
style = "border: none;border-radius: unset;background: transparent;",
img(src="img/back.svg", style="width:30px; height:30px;"))
)
)
)
recetas <- readRDS("data/recetas.Rda")
server <- function(input, output, session) {
rv <- reactiveValues(
lastClick = "volver",
lastClickTiempo = "asc"
)
dataBuscar <- reactive({
d <- recetas %>%
group_by(uid) %>%
filter(row_number() == 1) %>%
ungroup()
tmp <- search_table(input$searchName, d, "name") %>%
head(5)
hasSearchTerm <- !is.null(input$searchName) && input$searchName != ""
if (!hasSearchTerm && session$clientData$url_search != "") {
url <- parseQueryString(session$clientData$url_search)
if (url$id == "recetas_prohibidas") {
tmp <- d %>%
filter(prohibida == TRUE)
}
}
tmp
})
dataCrear <- reactive({
d <- recetas %>%
group_by(uid) %>%
filter(row_number() == 1) %>%
ungroup()
if (!is.null(input$select_ing)) {
uids_to_show <- recetas %>%
filter(ing %in% input$select_ing) %>%
count(uid) %>%
filter(n == length(input$select_ing))
d <- d %>%
filter(uid %in% uids_to_show$uid)
}
if (!is.null(input$price)) {
d <- d %>%
filter(price <= input$price)
}
if (!is.null(input$region) && input$region != "Todos") {
d <- d %>%
filter(region == input$region)
}
d
})
observeEvent(input$buscar, {
rv$lastClick <- "buscar"
})
observeEvent(input$crear, {
rv$lastClick <- "crear"
})
observeEvent(input$volver1, {
rv$lastClick <- "volver"
})
observeEvent(input$volver2, {
rv$lastClick <- "volver"
})
observeEvent(input$orderTiempo, {
if (rv$lastClickTiempo == "desc") {
rv$lastClickTiempo <- "asc"
} else {
rv$lastClickTiempo <- "desc"
}
})
observe({
hide("buttonScreen")
hide("crearScreen")
hide("buscarScreen")
hide("heading")
hide("right")
hide("volver1")
if (rv$lastClick == "buscar") {
showElement("buscarScreen")
} else if (rv$lastClick == "volver") {
showElement("buttonScreen")
} else {
showElement("crearScreen")
showElement("heading")
showElement("right")
showElement("volver1")
}
})
observeEvent(session$clientData$url_search, once = TRUE, {
if (session$clientData$url_search != "") {
url <- parseQueryString(session$clientData$url_search)
rv$lastClick <- "buscar"
if (url$id != "recetas_prohibidas") {
showRecetaModal(url$id)
}
}
})
observeEvent(input$last_btn, {
showRecetaModal(input$last_btn[[1]])
})
output$select_ingUI <- renderUI({
d <- recetas %>%
filter(!is.na(ing))
choices <- setNames(unique(d$ing), purrr::map(unique(d$ing), firstup))
selectizeInput("select_ing",
label = NULL,
choices = choices,
width = "100%",
multiple = TRUE,
options = list(plugins = list("remove_button"),
placeholder = "Escribe los ingredientes")
)
})
output$selected_ing_list <- renderUI({
choices <- NULL
if (!is.null(input$select_ing)) {
choices <- setNames(input$select_ing, purrr::map(input$select_ing, firstup))
}
checkboxGroupInput("selected_ing_checkbox_group", label = NULL,
choices = choices,
selected = input$select_ing
)
})
observeEvent(input$selected_ing_checkbox_group, {
selectedOptions <- list()
if (!is.null(input$selected_ing_checkbox_group))
selectedOptions <- input$selected_ing_checkbox_group
if (length(selectedOptions) < length(input$select_ing))
updateSelectizeInput(session, "select_ing", selected = selectedOptions)
}, ignoreNULL = FALSE, priority = 10)
output$ing_count <- renderUI({
n <- 0
if (!is.null(input$selected_ing_checkbox_group)) {
n <- length(input$selected_ing_checkbox_group)
}
htmlTemplate("templates/ing_count.html",
n = n
)
})
output$select_regionUI <- renderUI({
regiones <- recetas %>%
count(region) %>%
na.omit()
regiones_list <- append("Todos", regiones$region)
radioButtons("region",
"Filtre por región",
choices = regiones_list)
})
output$priceUI <- renderUI({
div(id = "price",
sliderInput("price", min = 0, max = 100,
htmlTemplate("templates/price_label.html"),
value = 100, width = "100%", pre = "$ ", post = " mil")
)
})
output$searchNameUI <- renderUI({
textInput("searchName", placeholder = "BUSCA TU RECETA",
label = NULL, width = "100%")
})
showRecetaModal <- function(uidInput) {
receta <- recetas %>%
filter(uid == uidInput) %>%
group_by(uid) %>%
filter(row_number() == 1)
ingsListNew <- ""
if (!is.na(receta$ings)) {
ingsList <- receta$ings %>%
str_split("·")
ingsListNew <- ingsList[[1]] %>%
str_trim() %>%
purrr::map(function(ingLine) {
div(style = "font-size: 10pt; font-weight: 300;", ingLine)
})
}
fillDownloadData(uidInput, "Modal")
showModal(modalDialog(
title = tags$span(receta$name, id = "modal_title"),
htmlTemplate("templates/receta_detail.html",
instructions = receta$instruc,
dificultadImage = getDifcultadImage(receta$dificultad),
dificultadText = getDifcultadText(receta$dificultad),
twitter = getTwitterLink(uidInput),
facebook = getFacebookLink(uidInput),
pinterest = getPinterestLink(uidInput),
whatsapp = getWhatsAppLink(uidInput),
tiempo = ifelse(is.na(receta$tiempo_mins), "", paste(receta$tiempo_mins, " mins")),
hiddenTiempo = ifelse(is.na(receta$tiempo_mins), "hidden", ""),
hiddenDificultad = ifelse(is.na(receta$dificultad), "hidden", ""),
download = uiOutput(paste0("downloadButtonModal", uidInput)),
ings = ingsListNew
),
footer = modalButton("Cerrar")
))
}
observeEvent(dataBuscar(), {
output$show_receta <- renderUI({
d <- dataBuscar()
if (nrow(d) > 0 && rv$lastClick == "buscar") {
purrr::map(1:nrow(d), function(i) {
html <- htmlTemplate("templates/receta_list.html",
id = d$uid[i],
name = d$name[i],
tiempo = ifelse(is.na(d$tiempo_mins[i]), "", paste(d$tiempo_mins[i], " mins")),
hiddenTiempo = ifelse(is.na(d$tiempo_mins[i]), "hidden", "")
)
html
})
} else {
noResults()
}
})
})
fillDownloadData <- function (id, namespace = "List") {
output[[paste0("downloadButton", namespace, id)]] <- renderUI({
downloadLink(paste0("downloadData", namespace, id),
div(style="display:flex; font-weight: 300; font-size: 8pt;",
img(src="img/Iconos especial cocina-05.png", class="image_smaller", style="margin-top: 2px;font-weight: 300;"),
p("Descargar", style="margin-top: 3px;")
)
)
})
receta <- recetas %>%
filter(uid == id) %>%
filter(row_number() == 1)
ings_lines <- str_split(receta$ings, "·")
column1 <- "- "
column2 <- "- "
if (!is.na(ings_lines)) {
ings_lines_length <- length(ings_lines[[1]])
n_column1 <- ings_lines_length - round(ings_lines_length / 2)
column1 <- ings_lines[[1]][1:n_column1] %>%
str_replace("\n", "") %>%
str_trim() %>%
paste(collapse = "\n- ") %>%
paste('-', .)
column2 <- ings_lines[[1]][(n_column1+1):ings_lines_length] %>%
str_replace("\n", "") %>%
str_trim() %>%
paste(collapse = "\n- ") %>%
paste('-', .)
}
output[[paste0("downloadData", namespace, id)]] <- downloadHandler(
paste0('receta_', id, '.pdf'),
content = function(file) {
params <- list(
name = receta$name,
instruc = receta$instruc
)
fileConn <- file(paste0("download_template", ".Rmd"))
writeLines(c("---\nparams:\noutput:\n pdf_document:\n latex_engine: xelatex\n template: download.tex\n keep_tex: true\nname: \"`r params$name`\"\ninstruc: \"`r params$instruc`\"\ncolumn1:", column1,"column2:", column2, "---"), fileConn)
close(fileConn)
rmarkdown::render(paste0("download_template", ".Rmd"),
params = params,
output_file = paste0("built_report", ".pdf"))
readBin(con = paste0("built_report", ".pdf"),
what = "raw",
n = file.info(paste0("built_report", ".pdf"))[, "size"]) %>%
writeBin(con = file)
contentType = paste0("built_report", ".pdf")
}
)
}
output$results <- renderUI({
if (!is.null(dataCrear()) && nrow(dataCrear()) > 0 && rv$lastClick == "crear") {
if (rv$lastClickTiempo == "desc") {
d <- dataCrear() %>%
arrange(desc(tiempo_mins))
} else {
d <- dataCrear() %>%
arrange(tiempo_mins)
}
withProgress(message = 'Leyendo las recetas', value = 0, {
purrr::map(1:nrow(d), function(i) {
incProgress(1/nrow(d), detail = paste("receta ", i))
recetaId <- d$uid[i]
receta <- recetas %>%
filter(uid == recetaId)
fillDownloadData(recetaId)
html <- htmlTemplate("templates/receta_list_detailed.html",
id = recetaId,
name = d$name[i],
dificultadImage = getDifcultadImage(d$dificultad[i]),
dificultadText = getDifcultadText(d$dificultad[i]),
tiempo = ifelse(is.na(d$tiempo_mins[i]), "", paste(d$tiempo_mins[i], " mins")),
ingredientes = createIngredientesText(receta$ing) ,
twitter = getTwitterLink(recetaId),
facebook = getFacebookLink(recetaId),
pinterest = getPinterestLink(recetaId),
whatsapp = getWhatsAppLink(recetaId),
hiddenTiempo = ifelse(is.na(d$tiempo_mins[i]), "hidden", ""),
hiddenDificultad = ifelse(is.na(d$dificultad[i]), "hidden", ""),
download = uiOutput(paste0("downloadButtonList", recetaId))
)
html
})
})
} else {
noResults()
}
})
}
shinyApp(ui = ui, server = server)
| /app.R | no_license | randommonkey/cocina-colombiana | R | false | false | 13,998 | r | library(shiny)
library(readxl)
library(shinyjs)
library(dplyr)
library(stringr)
source("functions.R")
ui <- bootstrapPage(
theme = "theme.css",
tags$head(
includeScript("js/iframeSizer.contentWindow.min.js")
),
tags$head(tags$script(src="scripts.js")),
useShinyjs(),
div(id = "mobile",
div(id = "buttonScreen",
style = "display: flex; width: 100%; text-align: center; justify-content: center;",
tags$button(
id = "buscar",
class = "btn btn-default action-button shiny-bound-input",
width = "100%",
img(src = "img/botones ceular-12.png")
),
hr(),
tags$button(
id = "crear",
style = "display: flex; width: 100%; text-align: center; justify-content: center;",
class = "btn btn-default action-button shiny-bound-input",
width = "100%",
img(src = "img/botones ceular-13.png")
)
),
div(id = "crearScreen", class = "crearScreen",
div(id="heading", style = "width: 100%; display: none;",
img(src = "img/botones ceular-13.png", style = "display: block; margin-left: auto; margin-right: auto;"),
p(id = "ref", '"Tomado de: Gran Libro de la Cocina Colombiana"')
),
div(id = "left",
uiOutput("select_ingUI"),
uiOutput("ing_count"),
uiOutput("selected_ing_list"),
br(),
br(),
uiOutput("priceUI", style = "display: none;"),
br(),
uiOutput("select_regionUI"),
br(),
tags$button(id = "volver1",
class = "btn btn-default action-button shiny-bound-input",
style = "border: none;border-radius: unset;background: transparent;display: none;",
img(src="img/back.svg", style="width:30px; height:30px;"))
),
div(id = "right", style = "display: none;",
div(id = "recetas_title",
div(id = "recetas", "Recetas"),
br(),
tags$button(
id = "orderTiempo",
class = "btn btn-default action-button shiny-bound-input",
img(src = "img/iconos especial cocina 50-04.png")
),
br()
),
uiOutput('results')
)
),
div(id = "buscarScreen", style = "display: none;",
div(id = "search",
tags$img(src = "img/Iconos especial cocina-01.png"),
uiOutput("searchNameUI")
),
br(),
uiOutput("show_receta"),
tags$button(id = "volver2",
class = "btn btn-default action-button shiny-bound-input",
style = "border: none;border-radius: unset;background: transparent;",
img(src="img/back.svg", style="width:30px; height:30px;"))
)
)
)
recetas <- readRDS("data/recetas.Rda")
server <- function(input, output, session) {
rv <- reactiveValues(
lastClick = "volver",
lastClickTiempo = "asc"
)
dataBuscar <- reactive({
d <- recetas %>%
group_by(uid) %>%
filter(row_number() == 1) %>%
ungroup()
tmp <- search_table(input$searchName, d, "name") %>%
head(5)
hasSearchTerm <- !is.null(input$searchName) && input$searchName != ""
if (!hasSearchTerm && session$clientData$url_search != "") {
url <- parseQueryString(session$clientData$url_search)
if (url$id == "recetas_prohibidas") {
tmp <- d %>%
filter(prohibida == TRUE)
}
}
tmp
})
dataCrear <- reactive({
d <- recetas %>%
group_by(uid) %>%
filter(row_number() == 1) %>%
ungroup()
if (!is.null(input$select_ing)) {
uids_to_show <- recetas %>%
filter(ing %in% input$select_ing) %>%
count(uid) %>%
filter(n == length(input$select_ing))
d <- d %>%
filter(uid %in% uids_to_show$uid)
}
if (!is.null(input$price)) {
d <- d %>%
filter(price <= input$price)
}
if (!is.null(input$region) && input$region != "Todos") {
d <- d %>%
filter(region == input$region)
}
d
})
observeEvent(input$buscar, {
rv$lastClick <- "buscar"
})
observeEvent(input$crear, {
rv$lastClick <- "crear"
})
observeEvent(input$volver1, {
rv$lastClick <- "volver"
})
observeEvent(input$volver2, {
rv$lastClick <- "volver"
})
observeEvent(input$orderTiempo, {
if (rv$lastClickTiempo == "desc") {
rv$lastClickTiempo <- "asc"
} else {
rv$lastClickTiempo <- "desc"
}
})
observe({
hide("buttonScreen")
hide("crearScreen")
hide("buscarScreen")
hide("heading")
hide("right")
hide("volver1")
if (rv$lastClick == "buscar") {
showElement("buscarScreen")
} else if (rv$lastClick == "volver") {
showElement("buttonScreen")
} else {
showElement("crearScreen")
showElement("heading")
showElement("right")
showElement("volver1")
}
})
observeEvent(session$clientData$url_search, once = TRUE, {
if (session$clientData$url_search != "") {
url <- parseQueryString(session$clientData$url_search)
rv$lastClick <- "buscar"
if (url$id != "recetas_prohibidas") {
showRecetaModal(url$id)
}
}
})
observeEvent(input$last_btn, {
showRecetaModal(input$last_btn[[1]])
})
output$select_ingUI <- renderUI({
d <- recetas %>%
filter(!is.na(ing))
choices <- setNames(unique(d$ing), purrr::map(unique(d$ing), firstup))
selectizeInput("select_ing",
label = NULL,
choices = choices,
width = "100%",
multiple = TRUE,
options = list(plugins = list("remove_button"),
placeholder = "Escribe los ingredientes")
)
})
output$selected_ing_list <- renderUI({
choices <- NULL
if (!is.null(input$select_ing)) {
choices <- setNames(input$select_ing, purrr::map(input$select_ing, firstup))
}
checkboxGroupInput("selected_ing_checkbox_group", label = NULL,
choices = choices,
selected = input$select_ing
)
})
observeEvent(input$selected_ing_checkbox_group, {
selectedOptions <- list()
if (!is.null(input$selected_ing_checkbox_group))
selectedOptions <- input$selected_ing_checkbox_group
if (length(selectedOptions) < length(input$select_ing))
updateSelectizeInput(session, "select_ing", selected = selectedOptions)
}, ignoreNULL = FALSE, priority = 10)
output$ing_count <- renderUI({
n <- 0
if (!is.null(input$selected_ing_checkbox_group)) {
n <- length(input$selected_ing_checkbox_group)
}
htmlTemplate("templates/ing_count.html",
n = n
)
})
output$select_regionUI <- renderUI({
regiones <- recetas %>%
count(region) %>%
na.omit()
regiones_list <- append("Todos", regiones$region)
radioButtons("region",
"Filtre por región",
choices = regiones_list)
})
output$priceUI <- renderUI({
div(id = "price",
sliderInput("price", min = 0, max = 100,
htmlTemplate("templates/price_label.html"),
value = 100, width = "100%", pre = "$ ", post = " mil")
)
})
output$searchNameUI <- renderUI({
textInput("searchName", placeholder = "BUSCA TU RECETA",
label = NULL, width = "100%")
})
showRecetaModal <- function(uidInput) {
receta <- recetas %>%
filter(uid == uidInput) %>%
group_by(uid) %>%
filter(row_number() == 1)
ingsListNew <- ""
if (!is.na(receta$ings)) {
ingsList <- receta$ings %>%
str_split("·")
ingsListNew <- ingsList[[1]] %>%
str_trim() %>%
purrr::map(function(ingLine) {
div(style = "font-size: 10pt; font-weight: 300;", ingLine)
})
}
fillDownloadData(uidInput, "Modal")
showModal(modalDialog(
title = tags$span(receta$name, id = "modal_title"),
htmlTemplate("templates/receta_detail.html",
instructions = receta$instruc,
dificultadImage = getDifcultadImage(receta$dificultad),
dificultadText = getDifcultadText(receta$dificultad),
twitter = getTwitterLink(uidInput),
facebook = getFacebookLink(uidInput),
pinterest = getPinterestLink(uidInput),
whatsapp = getWhatsAppLink(uidInput),
tiempo = ifelse(is.na(receta$tiempo_mins), "", paste(receta$tiempo_mins, " mins")),
hiddenTiempo = ifelse(is.na(receta$tiempo_mins), "hidden", ""),
hiddenDificultad = ifelse(is.na(receta$dificultad), "hidden", ""),
download = uiOutput(paste0("downloadButtonModal", uidInput)),
ings = ingsListNew
),
footer = modalButton("Cerrar")
))
}
observeEvent(dataBuscar(), {
output$show_receta <- renderUI({
d <- dataBuscar()
if (nrow(d) > 0 && rv$lastClick == "buscar") {
purrr::map(1:nrow(d), function(i) {
html <- htmlTemplate("templates/receta_list.html",
id = d$uid[i],
name = d$name[i],
tiempo = ifelse(is.na(d$tiempo_mins[i]), "", paste(d$tiempo_mins[i], " mins")),
hiddenTiempo = ifelse(is.na(d$tiempo_mins[i]), "hidden", "")
)
html
})
} else {
noResults()
}
})
})
fillDownloadData <- function (id, namespace = "List") {
output[[paste0("downloadButton", namespace, id)]] <- renderUI({
downloadLink(paste0("downloadData", namespace, id),
div(style="display:flex; font-weight: 300; font-size: 8pt;",
img(src="img/Iconos especial cocina-05.png", class="image_smaller", style="margin-top: 2px;font-weight: 300;"),
p("Descargar", style="margin-top: 3px;")
)
)
})
receta <- recetas %>%
filter(uid == id) %>%
filter(row_number() == 1)
ings_lines <- str_split(receta$ings, "·")
column1 <- "- "
column2 <- "- "
if (!is.na(ings_lines)) {
ings_lines_length <- length(ings_lines[[1]])
n_column1 <- ings_lines_length - round(ings_lines_length / 2)
column1 <- ings_lines[[1]][1:n_column1] %>%
str_replace("\n", "") %>%
str_trim() %>%
paste(collapse = "\n- ") %>%
paste('-', .)
column2 <- ings_lines[[1]][(n_column1+1):ings_lines_length] %>%
str_replace("\n", "") %>%
str_trim() %>%
paste(collapse = "\n- ") %>%
paste('-', .)
}
output[[paste0("downloadData", namespace, id)]] <- downloadHandler(
paste0('receta_', id, '.pdf'),
content = function(file) {
params <- list(
name = receta$name,
instruc = receta$instruc
)
fileConn <- file(paste0("download_template", ".Rmd"))
writeLines(c("---\nparams:\noutput:\n pdf_document:\n latex_engine: xelatex\n template: download.tex\n keep_tex: true\nname: \"`r params$name`\"\ninstruc: \"`r params$instruc`\"\ncolumn1:", column1,"column2:", column2, "---"), fileConn)
close(fileConn)
rmarkdown::render(paste0("download_template", ".Rmd"),
params = params,
output_file = paste0("built_report", ".pdf"))
readBin(con = paste0("built_report", ".pdf"),
what = "raw",
n = file.info(paste0("built_report", ".pdf"))[, "size"]) %>%
writeBin(con = file)
contentType = paste0("built_report", ".pdf")
}
)
}
output$results <- renderUI({
if (!is.null(dataCrear()) && nrow(dataCrear()) > 0 && rv$lastClick == "crear") {
if (rv$lastClickTiempo == "desc") {
d <- dataCrear() %>%
arrange(desc(tiempo_mins))
} else {
d <- dataCrear() %>%
arrange(tiempo_mins)
}
withProgress(message = 'Leyendo las recetas', value = 0, {
purrr::map(1:nrow(d), function(i) {
incProgress(1/nrow(d), detail = paste("receta ", i))
recetaId <- d$uid[i]
receta <- recetas %>%
filter(uid == recetaId)
fillDownloadData(recetaId)
html <- htmlTemplate("templates/receta_list_detailed.html",
id = recetaId,
name = d$name[i],
dificultadImage = getDifcultadImage(d$dificultad[i]),
dificultadText = getDifcultadText(d$dificultad[i]),
tiempo = ifelse(is.na(d$tiempo_mins[i]), "", paste(d$tiempo_mins[i], " mins")),
ingredientes = createIngredientesText(receta$ing) ,
twitter = getTwitterLink(recetaId),
facebook = getFacebookLink(recetaId),
pinterest = getPinterestLink(recetaId),
whatsapp = getWhatsAppLink(recetaId),
hiddenTiempo = ifelse(is.na(d$tiempo_mins[i]), "hidden", ""),
hiddenDificultad = ifelse(is.na(d$dificultad[i]), "hidden", ""),
download = uiOutput(paste0("downloadButtonList", recetaId))
)
html
})
})
} else {
noResults()
}
})
}
shinyApp(ui = ui, server = server)
|
##----------------------------------------------------------------------------##
#' no_retweets
#'
#' Mutes retweets from most recent n users
#'
#' @param n Number of most recent users to mute retweets.
#' @param refresh Logical indicating whether to refresh friends list.
#' @return API response object.
#' @export
#' @importFrom rtweet post_follow
#' @importFrom httr warn_for_status
no_retweets <- function(n = 1000L, refresh = TRUE) {
do.call("no_retweets_", list(n = n, refresh = refresh))
}
#' no_retweets_
#'
#' Mutes retweets from most recent n users
#'
#' @param n Number of most recent users to mute retweets.
#' @param refresh Logical indicating whether to refresh friends list.
#' @return API response object.
#' @importFrom rtweet post_follow
#' @importFrom httr warn_for_status
no_retweets_ <- function(n = 1000L, refresh = TRUE) {
user <- home_user()
if (refresh || !".fds" %in% ls(envir = .trickrtweet, all.names = TRUE)) {
fds <- as.character(rtweet::get_friends(user)[["user_id"]])
assign(".fds", fds, envir = .trickrtweet)
} else {
fds <- get(".fds", envir = .trickrtweet)
}
## modify to follow without retweets
f <- function(x) {
for (i in seq_along(x)) {
r <- rtweet::post_follow(x[i], retweets = FALSE)
httr::warn_for_status(r)
}
}
f(fds[seq_len(n)])
}
| /R/no_retweets.R | no_license | mkearney/trickrtweet | R | false | false | 1,329 | r | ##----------------------------------------------------------------------------##
#' no_retweets
#'
#' Mutes retweets from most recent n users
#'
#' @param n Number of most recent users to mute retweets.
#' @param refresh Logical indicating whether to refresh friends list.
#' @return API response object.
#' @export
#' @importFrom rtweet post_follow
#' @importFrom httr warn_for_status
no_retweets <- function(n = 1000L, refresh = TRUE) {
do.call("no_retweets_", list(n = n, refresh = refresh))
}
#' no_retweets_
#'
#' Mutes retweets from most recent n users
#'
#' @param n Number of most recent users to mute retweets.
#' @param refresh Logical indicating whether to refresh friends list.
#' @return API response object.
#' @importFrom rtweet post_follow
#' @importFrom httr warn_for_status
no_retweets_ <- function(n = 1000L, refresh = TRUE) {
user <- home_user()
if (refresh || !".fds" %in% ls(envir = .trickrtweet, all.names = TRUE)) {
fds <- as.character(rtweet::get_friends(user)[["user_id"]])
assign(".fds", fds, envir = .trickrtweet)
} else {
fds <- get(".fds", envir = .trickrtweet)
}
## modify to follow without retweets
f <- function(x) {
for (i in seq_along(x)) {
r <- rtweet::post_follow(x[i], retweets = FALSE)
httr::warn_for_status(r)
}
}
f(fds[seq_len(n)])
}
|
# create index of included cities
index <- c("Kansas City", "St. Joseph", "St. Louis", "Springfield", "University City")
year <- 1990
# use index to parse places
places %>%
filter(NAME %in% index) %>%
rename(name = NAME, placefp = PLACEFP) %>%
mutate(year = rep(year, n())) %>%
select(year, name, placefp) %>%
arrange(name) -> placesSub
# create kansas city object
kansas_city <- tibble(
name = "Kansas City",
pop = 434711,
homicide = 121,
rape = 517,
robbery = 4492,
ag_assault = 5957,
burglary = 11640,
larceny = 23229,
mv_larceny = 10352,
arson = 509
)
# create st. jo object
st_jo <- tibble(
name = "St. Joseph",
pop = 71852,
homicide = 4,
rape = 16,
robbery = 26,
ag_assault = 514,
burglary = 908,
larceny = 2696,
mv_larceny = 203,
arson = 30
)
# create st. louis object
st_louis <- tibble(
name = "St. Louis",
pop = 396685,
homicide = 177,
rape = 331,
robbery = 4708,
ag_assault = 8466,
burglary = 11531,
larceny = 24564,
mv_larceny = 8422,
arson = 687
)
# create springfield object
springfield <- tibble(
name = "Springfield",
pop = 140494,
homicide = 7,
rape = 58,
robbery = 139,
ag_assault = 367,
burglary = 2354,
larceny = 9193,
mv_larceny = 431,
arson = 91
)
# create university city object
u_city <- tibble(
name = "University City",
pop = 40087,
homicide = 2,
rape = 15,
robbery = 110,
ag_assault = 147,
burglary = 637,
larceny = 1445,
mv_larceny = 369,
arson = 18
)
# combine
data <- bind_rows(kansas_city, st_jo, st_louis, springfield, u_city)
# add place data
data <- left_join(placesSub, data, by = "name")
# clean-up enviornment
rm(placesSub, kansas_city, st_jo, st_louis, springfield, u_city)
# update population object
popTable <- subset_tables(input = data, update = popTable, table = "population")
# update homicide object
homicideTable <- subset_tables(input = data, update = homicideTable, table = "homicide")
# update robbery object
robberyTable <- subset_tables(input = data, update = robberyTable, table = "robbery")
# update aggrevated assault object
agAssaultTable <- subset_tables(input = data, update = agAssaultTable, table = "aggravated assault")
# create rape object
rapeTable <- subset_tables(input = data, update = rapeTable, table = "rape")
# create burlary object
burglaryTable <- subset_tables(input = data, update = burglaryTable, table = "burglary")
# create larceny object
larcenyTable <- subset_tables(input = data, update = larcenyTable, table = "larceny")
# create auto theft object
autoTheftTable <- subset_tables(input = data, update = autoTheftTable, table = "auto theft")
# create arson object
arsonTable <- subset_tables(input = data, update = arsonTable, table = "arson")
# clean-up enviornment
rm(index, data, year)
| /source/data_1990.R | permissive | slu-openGIS/MO_CRIME_Database | R | false | false | 2,798 | r |
# create index of included cities
index <- c("Kansas City", "St. Joseph", "St. Louis", "Springfield", "University City")
year <- 1990
# use index to parse places
places %>%
filter(NAME %in% index) %>%
rename(name = NAME, placefp = PLACEFP) %>%
mutate(year = rep(year, n())) %>%
select(year, name, placefp) %>%
arrange(name) -> placesSub
# create kansas city object
kansas_city <- tibble(
name = "Kansas City",
pop = 434711,
homicide = 121,
rape = 517,
robbery = 4492,
ag_assault = 5957,
burglary = 11640,
larceny = 23229,
mv_larceny = 10352,
arson = 509
)
# create st. jo object
st_jo <- tibble(
name = "St. Joseph",
pop = 71852,
homicide = 4,
rape = 16,
robbery = 26,
ag_assault = 514,
burglary = 908,
larceny = 2696,
mv_larceny = 203,
arson = 30
)
# create st. louis object
st_louis <- tibble(
name = "St. Louis",
pop = 396685,
homicide = 177,
rape = 331,
robbery = 4708,
ag_assault = 8466,
burglary = 11531,
larceny = 24564,
mv_larceny = 8422,
arson = 687
)
# create springfield object
springfield <- tibble(
name = "Springfield",
pop = 140494,
homicide = 7,
rape = 58,
robbery = 139,
ag_assault = 367,
burglary = 2354,
larceny = 9193,
mv_larceny = 431,
arson = 91
)
# create university city object
u_city <- tibble(
name = "University City",
pop = 40087,
homicide = 2,
rape = 15,
robbery = 110,
ag_assault = 147,
burglary = 637,
larceny = 1445,
mv_larceny = 369,
arson = 18
)
# combine
data <- bind_rows(kansas_city, st_jo, st_louis, springfield, u_city)
# add place data
data <- left_join(placesSub, data, by = "name")
# clean-up enviornment
rm(placesSub, kansas_city, st_jo, st_louis, springfield, u_city)
# update population object
popTable <- subset_tables(input = data, update = popTable, table = "population")
# update homicide object
homicideTable <- subset_tables(input = data, update = homicideTable, table = "homicide")
# update robbery object
robberyTable <- subset_tables(input = data, update = robberyTable, table = "robbery")
# update aggrevated assault object
agAssaultTable <- subset_tables(input = data, update = agAssaultTable, table = "aggravated assault")
# create rape object
rapeTable <- subset_tables(input = data, update = rapeTable, table = "rape")
# create burlary object
burglaryTable <- subset_tables(input = data, update = burglaryTable, table = "burglary")
# create larceny object
larcenyTable <- subset_tables(input = data, update = larcenyTable, table = "larceny")
# create auto theft object
autoTheftTable <- subset_tables(input = data, update = autoTheftTable, table = "auto theft")
# create arson object
arsonTable <- subset_tables(input = data, update = arsonTable, table = "arson")
# clean-up enviornment
rm(index, data, year)
|
library(psych)
library(readxl)
library(ggplot2)
#import the data
mydata <- read.table(file.choose(), header = T)
View(mydata)
model<-lm(mydata$band1_sur0.67 ~ I(mydata$band2_RAC0.86*mydata$NDVI)
+I(mydata$S_angle*mydata$band2_RAC0.86)
+mydata$band2_RAC0.86+mydata$S_angle)
summary(model)
pred.b1 <- predict(model)
head(pred.b1)
v<-data.frame(x=mydata$band1_sur0.67,pred.b1)
head(v)
g <- ggplot(v, aes(x, pred.b1,label=pred.b1)) +geom_smooth(method = "lm",se=FALSE,color="black",formula = pred.b1~x)+
geom_point()
print(g)
###########################################
plot(pred.b1,mydata$band1_sur0.67, pch = 20, col = 2, xlim = c(min(mydata$band1_sur0.67),max(mydata$band1_sur0.67)), ylim = c(min(pred.b1),max(pred.b1))
,xlab = 'Estimated Surface Reflectance', ylab = 'Derived Surface Reflectance')
abline(0,1, col = 4)
#####################################
m<-mydata[c(1:5000),c(1:7)]
head(m)
m | /DB_NG_SIR.R | no_license | BijoyKrGayen/DB | R | false | false | 945 | r | library(psych)
library(readxl)
library(ggplot2)
#import the data
mydata <- read.table(file.choose(), header = T)
View(mydata)
model<-lm(mydata$band1_sur0.67 ~ I(mydata$band2_RAC0.86*mydata$NDVI)
+I(mydata$S_angle*mydata$band2_RAC0.86)
+mydata$band2_RAC0.86+mydata$S_angle)
summary(model)
pred.b1 <- predict(model)
head(pred.b1)
v<-data.frame(x=mydata$band1_sur0.67,pred.b1)
head(v)
g <- ggplot(v, aes(x, pred.b1,label=pred.b1)) +geom_smooth(method = "lm",se=FALSE,color="black",formula = pred.b1~x)+
geom_point()
print(g)
###########################################
plot(pred.b1,mydata$band1_sur0.67, pch = 20, col = 2, xlim = c(min(mydata$band1_sur0.67),max(mydata$band1_sur0.67)), ylim = c(min(pred.b1),max(pred.b1))
,xlab = 'Estimated Surface Reflectance', ylab = 'Derived Surface Reflectance')
abline(0,1, col = 4)
#####################################
m<-mydata[c(1:5000),c(1:7)]
head(m)
m |
context("Check logistic confidence intervals")
test_that("Intervals between 0 and 1",{
x <- rnorm(10)
y <- rbinom(10, 1, .5)
dat <- data.frame(x, y)
o <- glm(y ~ x, data = dat, family = binomial)
j <- predict_CI(o)
expect_true(all(j$low.ci > 0 & j$up.ci < 1))
})
| /inst/tests/test-logistic.R | no_license | arturochian/glmExtras | R | false | false | 292 | r | context("Check logistic confidence intervals")
test_that("Intervals between 0 and 1",{
x <- rnorm(10)
y <- rbinom(10, 1, .5)
dat <- data.frame(x, y)
o <- glm(y ~ x, data = dat, family = binomial)
j <- predict_CI(o)
expect_true(all(j$low.ci > 0 & j$up.ci < 1))
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/filters.R
\name{filter_image}
\alias{filter_image}
\title{Filter: display an image}
\usage{
filter_image(image)
}
\arguments{
\item{image}{A link or path to an image resource.}
}
\description{
Display an image using a URL or a relative path to an on-disk resource.
}
\examples{
# Place an image (obtained via an image
# link) within a rectangle element using
# the `filter_image()` filter
SVG(width = 500, height = 500) \%>\%
svg_filter(
id = "image",
filters = list(
filter_image(
image = "https://www.r-project.org/logo/Rlogo.png"
)
)
) \%>\%
svg_rect(
x = 25, y = 25,
width = "50\%", height = "50\%",
attrs = svg_attrs_pres(filter = "image")
)
}
| /man/filter_image.Rd | permissive | aespar21/omsvg | R | false | true | 780 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/filters.R
\name{filter_image}
\alias{filter_image}
\title{Filter: display an image}
\usage{
filter_image(image)
}
\arguments{
\item{image}{A link or path to an image resource.}
}
\description{
Display an image using a URL or a relative path to an on-disk resource.
}
\examples{
# Place an image (obtained via an image
# link) within a rectangle element using
# the `filter_image()` filter
SVG(width = 500, height = 500) \%>\%
svg_filter(
id = "image",
filters = list(
filter_image(
image = "https://www.r-project.org/logo/Rlogo.png"
)
)
) \%>\%
svg_rect(
x = 25, y = 25,
width = "50\%", height = "50\%",
attrs = svg_attrs_pres(filter = "image")
)
}
|
#' Hawkeye Rhabdo Football Study
#'
#' @description Rhabdo dataset including None and Any
#'
#' @docType data
#'
#' @usage data(Rhabdo3)
#'
#' @format An object of class \code{"data.table"}
#'
#' @references BIOS:7410 class notes
#'
#' @examples
#' data(Rhabdo3)
#' print(Rhabdo3)
#' tabs <- proc_freq(Rhabdo3, "Freq", "Shakes*Rhabdo / ChiSq CellChi2")
"Rhabdo3" | /R/rhabdo3_data.R | no_license | collinn/sassyR | R | false | false | 369 | r | #' Hawkeye Rhabdo Football Study
#'
#' @description Rhabdo dataset including None and Any
#'
#' @docType data
#'
#' @usage data(Rhabdo3)
#'
#' @format An object of class \code{"data.table"}
#'
#' @references BIOS:7410 class notes
#'
#' @examples
#' data(Rhabdo3)
#' print(Rhabdo3)
#' tabs <- proc_freq(Rhabdo3, "Freq", "Shakes*Rhabdo / ChiSq CellChi2")
"Rhabdo3" |
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{renderSvgPanZoom}
\alias{renderSvgPanZoom}
\title{Widget render function for use in Shiny}
\usage{
renderSvgPanZoom(expr, env = parent.frame(), quoted = FALSE)
}
\description{
Widget render function for use in Shiny
}
| /man/renderSvgPanZoom.Rd | permissive | dshen1/svgPanZoom | R | false | false | 278 | rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\name{renderSvgPanZoom}
\alias{renderSvgPanZoom}
\title{Widget render function for use in Shiny}
\usage{
renderSvgPanZoom(expr, env = parent.frame(), quoted = FALSE)
}
\description{
Widget render function for use in Shiny
}
|
/plot1.R | no_license | InigoMI/ExData_Plotting1 | R | false | false | 1,100 | r | ||
shinyServer(
function(input, output) {
output$text1 <- renderText({
paste("You have selected this",input$var)
})
output$text2 <- renderText({
paste("You have selected that goes from",
input$range[1],"to",input$range[2])
})
}
) | /server.R | no_license | hhk123g/Rscript | R | false | false | 268 | r | shinyServer(
function(input, output) {
output$text1 <- renderText({
paste("You have selected this",input$var)
})
output$text2 <- renderText({
paste("You have selected that goes from",
input$range[1],"to",input$range[2])
})
}
) |
## This function creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL ## initialize inv as NULL; will hold value of matrix inverse
set <- function(y) { ## define the set function to assign new
x <<- y ## value of matrix in parent environment
inv <<- NULL ## if there is a new matrix, reset inv to NULL
}
get <- function() x ## define the get fucntion - returns value of the matrix argument
setinverse <- function(inverse) inv <<- inverse ## assigns value of inv in parent environment
getinverse <- function() inv ## gets the value of inv where called
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) ## you need this in order to refer
## to the functions with the $ operator
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
| /cachematrix.R | no_license | thulasitk/ProgrammingAssignment2 | R | false | false | 1,250 | r | ## This function creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL ## initialize inv as NULL; will hold value of matrix inverse
set <- function(y) { ## define the set function to assign new
x <<- y ## value of matrix in parent environment
inv <<- NULL ## if there is a new matrix, reset inv to NULL
}
get <- function() x ## define the get fucntion - returns value of the matrix argument
setinverse <- function(inverse) inv <<- inverse ## assigns value of inv in parent environment
getinverse <- function() inv ## gets the value of inv where called
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) ## you need this in order to refer
## to the functions with the $ operator
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
|
#' Reproduce the gene or exons used in the RangedSummarizedExperiment objects
#'
#' This function reproduces the gene or exon level information used for creating
#' the \link[SummarizedExperiment]{RangedSummarizedExperiment-class}
#' objects provided by recount. The annotation is based on
#' Gencode v25 with the gene-level
#' information extracted with \code{genes()} (see
#' \link[GenomicFeatures]{transcripts} with default arguments.
#'
#' @param level Either \code{genes} or \code{exon}. It specifies whether to
#' return Gene or exon level information as a
#' \link[GenomicRanges]{GRanges-class} or
#' \link[GenomicRanges]{GRangesList-class} object respectively. The gene level
#' information contains the width of the disjoint exons for that given gene
#' which can be used to normalize the counts provided by recount.
#' Can also be \code{both} in which case a 2 element list with the exon and the
#' gene output is returned.
#' @param db Either \code{Gencode.v25} (default) or
#' \code{EnsDb.Hsapiens.v79}. The default option reproduces the annotation
#' used when creating recount. EnsDb.Hsapiens.v79 can be used
#' for an alternative annotation as showcased in the recount vignette.
#'
#' @return Either a \link[GenomicRanges]{GRanges-class} object like
#' \link{recount_genes} or a \link[GenomicRanges]{GRangesList-class} object
#' like \link{recount_exons}.
#'
#' @details
#'
#' For Gencode.v25, we use the comprehensive gene annotation (regions:
#' \code{CHR}) from \url{https://www.gencodegenes.org/releases/25.html}
#' (GRCh38.p7).
#'
#' @author Leonardo Collado-Torres
#' @export
#'
#' @import GenomicRanges
#'
#' @seealso \link{recount_genes}, \link{recount_exons},
#' \url{https://github.com/nellore},
#' \url{https://jhubiostatistics.shinyapps.io/recount/}
#'
#' @examples
#'
#' \dontrun{
#' ## Reproduce gene level information
#' genes <- reproduce_ranges()
#'
#' ## Compare against recount_genes
#' length(genes)
#' length(recount_genes)
#' }
#'
reproduce_ranges <- function(level = 'gene', db = 'Gencode.v25') {
## Check input
level <- tolower(level)
stopifnot(level %in% c('gene', 'exon', 'both'))
stopifnot(db %in% c('Gencode.v25', 'EnsDb.Hsapiens.v79'))
## Load required packages
.load_install('GenomicFeatures')
if (db == 'Gencode.v25') {
txdb <- GenomicFeatures::makeTxDbFromGFF('ftp://ftp.sanger.ac.uk/pub/gencode/Gencode_human/release_25/gencode.v25.annotation.gff3.gz',
format = 'gff3', organism = 'Homo sapiens')
} else if(db == 'EnsDb.Hsapiens.v79') {
.load_install('EnsDb.Hsapiens.v79')
txdb <- EnsDb.Hsapiens.v79::EnsDb.Hsapiens.v79
}
## Get genes with default option single.strand.genes.only = TRUE
genes <- GenomicFeatures::genes(txdb)
## Get Exons
exons <- GenomicFeatures::exonsBy(txdb, by = 'gene')
## Keep only exons for gene ids that we selected previously
if(!all(names(exons) %in% names(genes))) {
warning('Dropping exons with gene ids not present in the gene list')
exons <- exons[names(exons) %in% names(genes)]
}
## Disjoin exons by gene so the exons won't be overlapping each other inside a gene
exons <- GenomicRanges::disjoin(exons)
if(level == 'exon') return(exons)
## For 'gene' or 'both', continue by:
## * adding length of disjoint exons by gene
genes$bp_length <- sum(GenomicRanges::width(exons))
## * adding gene symbol
.load_install('org.Hs.eg.db')
if(db == 'Gencode.v25') {
gene_info <- AnnotationDbi::mapIds(org.Hs.eg.db::org.Hs.eg.db,
gsub('\\..*', '', names(genes)), 'SYMBOL', 'ENSEMBL',
multiVals = 'CharacterList')
} else if(db == 'EnsDb.Hsapiens.v79') {
gene_info <- AnnotationDbi::mapIds(org.Hs.eg.db::org.Hs.eg.db,
names(genes), 'SYMBOL', 'ENSEMBL', multiVals = 'CharacterList')
}
genes$symbol <- gene_info
if(level == 'gene') {
return(genes)
} else if (level == 'both') {
return(list('exon' = exons, 'gene' = genes))
}
}
| /R/reproduce_ranges.R | no_license | wenwenmin/Project-NBT2017-recount | R | false | false | 4,105 | r | #' Reproduce the gene or exons used in the RangedSummarizedExperiment objects
#'
#' This function reproduces the gene or exon level information used for creating
#' the \link[SummarizedExperiment]{RangedSummarizedExperiment-class}
#' objects provided by recount. The annotation is based on
#' Gencode v25 with the gene-level
#' information extracted with \code{genes()} (see
#' \link[GenomicFeatures]{transcripts} with default arguments.
#'
#' @param level Either \code{genes} or \code{exon}. It specifies whether to
#' return Gene or exon level information as a
#' \link[GenomicRanges]{GRanges-class} or
#' \link[GenomicRanges]{GRangesList-class} object respectively. The gene level
#' information contains the width of the disjoint exons for that given gene
#' which can be used to normalize the counts provided by recount.
#' Can also be \code{both} in which case a 2 element list with the exon and the
#' gene output is returned.
#' @param db Either \code{Gencode.v25} (default) or
#' \code{EnsDb.Hsapiens.v79}. The default option reproduces the annotation
#' used when creating recount. EnsDb.Hsapiens.v79 can be used
#' for an alternative annotation as showcased in the recount vignette.
#'
#' @return Either a \link[GenomicRanges]{GRanges-class} object like
#' \link{recount_genes} or a \link[GenomicRanges]{GRangesList-class} object
#' like \link{recount_exons}.
#'
#' @details
#'
#' For Gencode.v25, we use the comprehensive gene annotation (regions:
#' \code{CHR}) from \url{https://www.gencodegenes.org/releases/25.html}
#' (GRCh38.p7).
#'
#' @author Leonardo Collado-Torres
#' @export
#'
#' @import GenomicRanges
#'
#' @seealso \link{recount_genes}, \link{recount_exons},
#' \url{https://github.com/nellore},
#' \url{https://jhubiostatistics.shinyapps.io/recount/}
#'
#' @examples
#'
#' \dontrun{
#' ## Reproduce gene level information
#' genes <- reproduce_ranges()
#'
#' ## Compare against recount_genes
#' length(genes)
#' length(recount_genes)
#' }
#'
reproduce_ranges <- function(level = 'gene', db = 'Gencode.v25') {
## Check input
level <- tolower(level)
stopifnot(level %in% c('gene', 'exon', 'both'))
stopifnot(db %in% c('Gencode.v25', 'EnsDb.Hsapiens.v79'))
## Load required packages
.load_install('GenomicFeatures')
if (db == 'Gencode.v25') {
txdb <- GenomicFeatures::makeTxDbFromGFF('ftp://ftp.sanger.ac.uk/pub/gencode/Gencode_human/release_25/gencode.v25.annotation.gff3.gz',
format = 'gff3', organism = 'Homo sapiens')
} else if(db == 'EnsDb.Hsapiens.v79') {
.load_install('EnsDb.Hsapiens.v79')
txdb <- EnsDb.Hsapiens.v79::EnsDb.Hsapiens.v79
}
## Get genes with default option single.strand.genes.only = TRUE
genes <- GenomicFeatures::genes(txdb)
## Get Exons
exons <- GenomicFeatures::exonsBy(txdb, by = 'gene')
## Keep only exons for gene ids that we selected previously
if(!all(names(exons) %in% names(genes))) {
warning('Dropping exons with gene ids not present in the gene list')
exons <- exons[names(exons) %in% names(genes)]
}
## Disjoin exons by gene so the exons won't be overlapping each other inside a gene
exons <- GenomicRanges::disjoin(exons)
if(level == 'exon') return(exons)
## For 'gene' or 'both', continue by:
## * adding length of disjoint exons by gene
genes$bp_length <- sum(GenomicRanges::width(exons))
## * adding gene symbol
.load_install('org.Hs.eg.db')
if(db == 'Gencode.v25') {
gene_info <- AnnotationDbi::mapIds(org.Hs.eg.db::org.Hs.eg.db,
gsub('\\..*', '', names(genes)), 'SYMBOL', 'ENSEMBL',
multiVals = 'CharacterList')
} else if(db == 'EnsDb.Hsapiens.v79') {
gene_info <- AnnotationDbi::mapIds(org.Hs.eg.db::org.Hs.eg.db,
names(genes), 'SYMBOL', 'ENSEMBL', multiVals = 'CharacterList')
}
genes$symbol <- gene_info
if(level == 'gene') {
return(genes)
} else if (level == 'both') {
return(list('exon' = exons, 'gene' = genes))
}
}
|
"accTrainTest" <-
function(x=matrix(rnorm(1000), ncol=20), cl=factor(rep(1:3,c(7,9,4))),
traintest=divideUp(cl, nset=2), nfeatures=NULL, print.acc=FALSE,
print.progress=TRUE){
traintest <- factor(traintest)
train <- traintest==levels(traintest)[1]
testset <- traintest==levels(traintest)[2]
cl1 <- cl[train]
cl2 <- cl[testset]
ng1 <- length(cl1)
ng2 <- length(cl2)
maxg <- max(c(ng1-length(unique(cl1))-2,
ng2-length(unique(cl2))-2))
if(is.null(nfeatures)){
max.features <- maxg
nfeatures <- 1:max.features
} else
{
if(max(nfeatures)>maxg)nfeatures <- nfeatures[nfeatures<=maxg]
max.features <- max(nfeatures)
}
ord1 <- orderFeatures(x, cl, subset=train)[1:max.features]
ord2 <- orderFeatures(x, cl, subset=testset)[1:max.features]
ord <- unique(c(ord1, ord2))
sub1 <- match(ord1, ord)
sub2 <- match(ord2, ord)
df1 <- data.frame(t(x[ord, train]))
df2 <- data.frame(t(x[ord, testset]))
acc1 <- acc2 <- numeric(max(nfeatures))
for(i in nfeatures){
if(print.progress)cat(paste(i, ":", sep=""))
df1.lda <- lda(df1[, sub1[1:i], drop=FALSE], cl1)
hat2 <- predict(df1.lda, newdata=df2[, sub1[1:i], drop=FALSE])$class
tab <- table(hat2, cl2)
acc1[i] <- sum(tab[row(tab)==col(tab)])/sum(tab)
df2.lda <- lda(df2[, sub2[1:i], drop=FALSE], cl2)
hat1 <- predict(df2.lda, newdata=df1[, sub2[1:i], drop=FALSE])$class
tab <- table(hat1, cl1)
acc2[i] <- sum(tab[row(tab)==col(tab)])/sum(tab)
}
cat("\n")
if(print.acc){
print(round(acc1,2))
print(round(acc2,2))
}
maxacc1 <- max(acc1)
maxacc2 <- max(acc2)
sub1 <- match(maxacc1, acc1)
sub2 <- match(maxacc2, acc2)
nextacc1 <- max(acc1[acc1<1])
nextacc2 <- max(acc1[acc1<2])
lower1 <- maxacc1-sqrt(nextacc1*(1-nextacc1)/ng1)
lower2 <- maxacc2-sqrt(nextacc2*(1-nextacc2)/ng2)
lsub1 <- min((1:ng1)[acc1>lower1])
lsub2 <- min((1:ng2)[acc2>lower2])
lower <- c("Best accuracy, less 1SD ",
paste(paste(round(c(lower1, lower2),2), c(lsub1, lsub2),
sep=" ("), " features) ", sep=""))
best <- c("Best accuracy",
paste(paste(round(c(maxacc1, maxacc2),2), c(sub1, sub2),
sep=" ("), " features)", sep=""))
acc.df <- cbind(lower, best)
dimnames(acc.df) <- list(c("Training/test split",
"I (training) / II (test) ",
"II (training) / I (test) "),c("",""))
print(acc.df, quote=FALSE)
invisible(list(sub1.2=ord1, acc1.2=acc1, sub2.1=ord2, acc2.1=acc2))
}
| /R/accTrainTest.R | no_license | cran/hddplot | R | false | false | 2,715 | r | "accTrainTest" <-
function(x=matrix(rnorm(1000), ncol=20), cl=factor(rep(1:3,c(7,9,4))),
traintest=divideUp(cl, nset=2), nfeatures=NULL, print.acc=FALSE,
print.progress=TRUE){
traintest <- factor(traintest)
train <- traintest==levels(traintest)[1]
testset <- traintest==levels(traintest)[2]
cl1 <- cl[train]
cl2 <- cl[testset]
ng1 <- length(cl1)
ng2 <- length(cl2)
maxg <- max(c(ng1-length(unique(cl1))-2,
ng2-length(unique(cl2))-2))
if(is.null(nfeatures)){
max.features <- maxg
nfeatures <- 1:max.features
} else
{
if(max(nfeatures)>maxg)nfeatures <- nfeatures[nfeatures<=maxg]
max.features <- max(nfeatures)
}
ord1 <- orderFeatures(x, cl, subset=train)[1:max.features]
ord2 <- orderFeatures(x, cl, subset=testset)[1:max.features]
ord <- unique(c(ord1, ord2))
sub1 <- match(ord1, ord)
sub2 <- match(ord2, ord)
df1 <- data.frame(t(x[ord, train]))
df2 <- data.frame(t(x[ord, testset]))
acc1 <- acc2 <- numeric(max(nfeatures))
for(i in nfeatures){
if(print.progress)cat(paste(i, ":", sep=""))
df1.lda <- lda(df1[, sub1[1:i], drop=FALSE], cl1)
hat2 <- predict(df1.lda, newdata=df2[, sub1[1:i], drop=FALSE])$class
tab <- table(hat2, cl2)
acc1[i] <- sum(tab[row(tab)==col(tab)])/sum(tab)
df2.lda <- lda(df2[, sub2[1:i], drop=FALSE], cl2)
hat1 <- predict(df2.lda, newdata=df1[, sub2[1:i], drop=FALSE])$class
tab <- table(hat1, cl1)
acc2[i] <- sum(tab[row(tab)==col(tab)])/sum(tab)
}
cat("\n")
if(print.acc){
print(round(acc1,2))
print(round(acc2,2))
}
maxacc1 <- max(acc1)
maxacc2 <- max(acc2)
sub1 <- match(maxacc1, acc1)
sub2 <- match(maxacc2, acc2)
nextacc1 <- max(acc1[acc1<1])
nextacc2 <- max(acc1[acc1<2])
lower1 <- maxacc1-sqrt(nextacc1*(1-nextacc1)/ng1)
lower2 <- maxacc2-sqrt(nextacc2*(1-nextacc2)/ng2)
lsub1 <- min((1:ng1)[acc1>lower1])
lsub2 <- min((1:ng2)[acc2>lower2])
lower <- c("Best accuracy, less 1SD ",
paste(paste(round(c(lower1, lower2),2), c(lsub1, lsub2),
sep=" ("), " features) ", sep=""))
best <- c("Best accuracy",
paste(paste(round(c(maxacc1, maxacc2),2), c(sub1, sub2),
sep=" ("), " features)", sep=""))
acc.df <- cbind(lower, best)
dimnames(acc.df) <- list(c("Training/test split",
"I (training) / II (test) ",
"II (training) / I (test) "),c("",""))
print(acc.df, quote=FALSE)
invisible(list(sub1.2=ord1, acc1.2=acc1, sub2.1=ord2, acc2.1=acc2))
}
|
## Exploratory Data Analysis - Course Project 1
##
## Creates 'Plot 3'.
##
## Data set information states that the source data contains 2075259 observations.
## Use lubridate library functions to simplify data and time handling
library(lubridate)
## Create data directory if it does not yet exist.
if(!file.exists("data")) {dir.create("data")}
## Download and uncompress data file.
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl, destfile = "./data/hpc.zip", mode = 'wb')
unzip(zipfile = "./data/hpc.zip", exdir = "./data")
## Extract column names from first row to set as col names in data frame.
varNames <- colnames(read.table("./data/household_power_consumption.txt", sep = ";", nrow = 1, header = TRUE))
columnClasses <- c("char", "char", "num", "num", "num", "num", "int", "int", "int")
## Read the file into data frame 'hpc_data'
hpc_data <- read.table("./data/household_power_consumption.txt", sep=";", as.is = TRUE, col.names = varNames, skip=0, na.strings = "?", header=TRUE)
## Convert time and date fields
## Add new column "DateTime" at the beginning of the data frame.
hpc_data <- cbind(DateTime = 0, hpc_data)
## Use "lubridate" functions to populate new column with combined date/time of class POSIXct.
hpc_data$DateTime <- dmy(hpc_data$Date) + hms(hpc_data$Time)
## Delete unnecessary (old) 'Date' and 'Time' variables
hpc_data$Date <- NULL
hpc_data$Time <- NULL
## Subset hpc_data to observations between 2007-02-01 and 2007-02-02.
minDate <- ymd("2007-02-01")
maxDate <- ymd("2007-02-03")
hpc_data <- hpc_data[which(hpc_data$DateTime >= minDate & hpc_data$DateTime < maxDate), ]
## Create Plot 3 on screen
par(mfrow = c(1, 1), cex.lab = 0.75, mar = c(3, 4, 3.5, 5.0))
with(hpc_data, {
plot(Sub_metering_1 ~ DateTime, type = "l",
xlab = "", ylab = "Energy sub metering")
lines(Sub_metering_2 ~ DateTime,
xlab = "", ylab = "Energy sub metering", col = "red")
lines(Sub_metering_3 ~ DateTime,
xlab = "", ylab = "Energy sub metering", col = "blue")
legend("topright", lty = 1, col = c("black", "red", "blue"),
cex = 0.7, title.adj = 1, xjust = 1,
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
})
## Do not copy the "screen" plot to the png device, because this seems
## to truncate the legend text on the right in the .png file. Instead,
## create the .png file directly (this solves the problem).
png(file = "plot3.png", width = 480, height = 480)
with(hpc_data, {
plot(Sub_metering_1 ~ DateTime, type = "l",
xlab = "", ylab = "Energy sub metering")
lines(Sub_metering_2 ~ DateTime,
xlab = "", ylab = "Energy sub metering", col = "red")
lines(Sub_metering_3 ~ DateTime,
xlab = "", ylab = "Energy sub metering", col = "blue")
legend("topright", lty = 1, col = c("black", "red", "blue"),
cex = 1.0, title.adj = 1, xjust = 1,
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
})
dev.off()
| /plot3.R | no_license | melancurion/ExData_Plotting1 | R | false | false | 3,134 | r | ## Exploratory Data Analysis - Course Project 1
##
## Creates 'Plot 3'.
##
## Data set information states that the source data contains 2075259 observations.
## Use lubridate library functions to simplify data and time handling
library(lubridate)
## Create data directory if it does not yet exist.
if(!file.exists("data")) {dir.create("data")}
## Download and uncompress data file.
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl, destfile = "./data/hpc.zip", mode = 'wb')
unzip(zipfile = "./data/hpc.zip", exdir = "./data")
## Extract column names from first row to set as col names in data frame.
varNames <- colnames(read.table("./data/household_power_consumption.txt", sep = ";", nrow = 1, header = TRUE))
columnClasses <- c("char", "char", "num", "num", "num", "num", "int", "int", "int")
## Read the file into data frame 'hpc_data'
hpc_data <- read.table("./data/household_power_consumption.txt", sep=";", as.is = TRUE, col.names = varNames, skip=0, na.strings = "?", header=TRUE)
## Convert time and date fields
## Add new column "DateTime" at the beginning of the data frame.
hpc_data <- cbind(DateTime = 0, hpc_data)
## Use "lubridate" functions to populate new column with combined date/time of class POSIXct.
hpc_data$DateTime <- dmy(hpc_data$Date) + hms(hpc_data$Time)
## Delete unnecessary (old) 'Date' and 'Time' variables
hpc_data$Date <- NULL
hpc_data$Time <- NULL
## Subset hpc_data to observations between 2007-02-01 and 2007-02-02.
minDate <- ymd("2007-02-01")
maxDate <- ymd("2007-02-03")
hpc_data <- hpc_data[which(hpc_data$DateTime >= minDate & hpc_data$DateTime < maxDate), ]
## Create Plot 3 on screen
par(mfrow = c(1, 1), cex.lab = 0.75, mar = c(3, 4, 3.5, 5.0))
with(hpc_data, {
plot(Sub_metering_1 ~ DateTime, type = "l",
xlab = "", ylab = "Energy sub metering")
lines(Sub_metering_2 ~ DateTime,
xlab = "", ylab = "Energy sub metering", col = "red")
lines(Sub_metering_3 ~ DateTime,
xlab = "", ylab = "Energy sub metering", col = "blue")
legend("topright", lty = 1, col = c("black", "red", "blue"),
cex = 0.7, title.adj = 1, xjust = 1,
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
})
## Do not copy the "screen" plot to the png device, because this seems
## to truncate the legend text on the right in the .png file. Instead,
## create the .png file directly (this solves the problem).
png(file = "plot3.png", width = 480, height = 480)
with(hpc_data, {
plot(Sub_metering_1 ~ DateTime, type = "l",
xlab = "", ylab = "Energy sub metering")
lines(Sub_metering_2 ~ DateTime,
xlab = "", ylab = "Energy sub metering", col = "red")
lines(Sub_metering_3 ~ DateTime,
xlab = "", ylab = "Energy sub metering", col = "blue")
legend("topright", lty = 1, col = c("black", "red", "blue"),
cex = 1.0, title.adj = 1, xjust = 1,
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
})
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fish.R
\docType{data}
\name{fish}
\alias{fish}
\title{Sample data set}
\description{
A sample data set, used in tests and some examples.
}
\keyword{data}
| /man/fish.Rd | no_license | cran/ggeffects | R | false | true | 242 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fish.R
\docType{data}
\name{fish}
\alias{fish}
\title{Sample data set}
\description{
A sample data set, used in tests and some examples.
}
\keyword{data}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/treeda-functions.R
\name{makeResponseMatrix}
\alias{makeResponseMatrix}
\title{Make response matrix}
\usage{
makeResponseMatrix(response, class.names = NULL)
}
\arguments{
\item{response}{A factor or character vector containing the
classes.}
\item{class.names}{A character vector giving the possible levels of
the factor. If NULL, it will be generated from the levels of
response.}
}
\value{
A dummy variable matrix with column names giving the class
names.
}
\description{
Create a dummy variable matrix for the response
}
\keyword{internal}
| /man/makeResponseMatrix.Rd | no_license | cran/treeDA | R | false | true | 622 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/treeda-functions.R
\name{makeResponseMatrix}
\alias{makeResponseMatrix}
\title{Make response matrix}
\usage{
makeResponseMatrix(response, class.names = NULL)
}
\arguments{
\item{response}{A factor or character vector containing the
classes.}
\item{class.names}{A character vector giving the possible levels of
the factor. If NULL, it will be generated from the levels of
response.}
}
\value{
A dummy variable matrix with column names giving the class
names.
}
\description{
Create a dummy variable matrix for the response
}
\keyword{internal}
|
\name{NAH}
\alias{NAH}
\docType{data}
\title{
National Accounts Historical Series 1970 to 1995
}
\description{
This dataset contains the px objects for all of the tables in the Historical National Accounts.
}
\usage{NAH01}
\format{
An object of class \code{px}.
\describe{
\item{\code{NAH01}}{An object of class \code{px}}
\item{\code{NAH02}}{An object of class \code{px}}
\item{\code{NAH03}}{An object of class \code{px}}
\item{\code{NAH04}}{An object of class \code{px}}
\item{\code{NAH05}}{An object of class \code{px}}
\item{\code{NAH06}}{An object of class \code{px}}
\item{\code{NAH07}}{An object of class \code{px}}
\item{\code{NAH09}}{An object of class \code{px}}
\item{\code{NAH10}}{An object of class \code{px}}
\item{\code{NAH11}}{An object of class \code{px}}
\item{\code{NAH12}}{An object of class \code{px}}
\item{\code{NAH13}}{An object of class \code{px}}
\item{\code{NAH14}}{An object of class \code{px}}
\item{\code{NAH15}}{An object of class \code{px}}
\item{\code{NAH16}}{An object of class \code{px}}
\item{\code{NAH17}}{An object of class \code{px}}
\item{\code{NAH18}}{An object of class \code{px}}
\item{\code{NAH19}}{An object of class \code{px}}
\item{\code{NAH20}}{An object of class \code{px}}
\item{\code{NAH21}}{An object of class \code{px}}
\item{\code{NAH22}}{An object of class \code{px}}
\item{\code{NAH23}}{An object of class \code{px}}
\item{\code{NAH24}}{An object of class \code{px}}
\item{\code{NAH25}}{An object of class \code{px}}
\item{\code{NAH26}}{An object of class \code{px}}
\item{\code{NAH27}}{An object of class \code{px}}
\item{\code{NAH28}}{An object of class \code{px}}
\item{\code{NAH29}}{An object of class \code{px}}
\item{\code{NAH30}}{An object of class \code{px}}
}
}
\details{
%% ~~ If necessary, more details than the __description__ above ~~
}
\source{
%% ~~ reference to a publication or URL from which the data were obtained ~~
}
\references{
%% ~~ possibly secondary sources and usages ~~
}
\examples{
}
\keyword{datasets}
| /man/NAH.Rd | no_license | xprimexinverse/statbanker | R | false | false | 2,114 | rd | \name{NAH}
\alias{NAH}
\docType{data}
\title{
National Accounts Historical Series 1970 to 1995
}
\description{
This dataset contains the px objects for all of the tables in the Historical National Accounts.
}
\usage{NAH01}
\format{
An object of class \code{px}.
\describe{
\item{\code{NAH01}}{An object of class \code{px}}
\item{\code{NAH02}}{An object of class \code{px}}
\item{\code{NAH03}}{An object of class \code{px}}
\item{\code{NAH04}}{An object of class \code{px}}
\item{\code{NAH05}}{An object of class \code{px}}
\item{\code{NAH06}}{An object of class \code{px}}
\item{\code{NAH07}}{An object of class \code{px}}
\item{\code{NAH09}}{An object of class \code{px}}
\item{\code{NAH10}}{An object of class \code{px}}
\item{\code{NAH11}}{An object of class \code{px}}
\item{\code{NAH12}}{An object of class \code{px}}
\item{\code{NAH13}}{An object of class \code{px}}
\item{\code{NAH14}}{An object of class \code{px}}
\item{\code{NAH15}}{An object of class \code{px}}
\item{\code{NAH16}}{An object of class \code{px}}
\item{\code{NAH17}}{An object of class \code{px}}
\item{\code{NAH18}}{An object of class \code{px}}
\item{\code{NAH19}}{An object of class \code{px}}
\item{\code{NAH20}}{An object of class \code{px}}
\item{\code{NAH21}}{An object of class \code{px}}
\item{\code{NAH22}}{An object of class \code{px}}
\item{\code{NAH23}}{An object of class \code{px}}
\item{\code{NAH24}}{An object of class \code{px}}
\item{\code{NAH25}}{An object of class \code{px}}
\item{\code{NAH26}}{An object of class \code{px}}
\item{\code{NAH27}}{An object of class \code{px}}
\item{\code{NAH28}}{An object of class \code{px}}
\item{\code{NAH29}}{An object of class \code{px}}
\item{\code{NAH30}}{An object of class \code{px}}
}
}
\details{
%% ~~ If necessary, more details than the __description__ above ~~
}
\source{
%% ~~ reference to a publication or URL from which the data were obtained ~~
}
\references{
%% ~~ possibly secondary sources and usages ~~
}
\examples{
}
\keyword{datasets}
|
library(CR)
### Name: CureRate-class
### Title: Class '"CureRate"'
### Aliases: CureRate-class CR-CureRate-method
### Keywords: classes
### ** Examples
showClass("CureRate")
## Input Data
###############
# rho - specifies the value of rho in the G-rho test (Harrington and Fleming, 1982).
# rho = 0 gives the logrank test, and rho = 1 the Peto-Peto Wilcoxon test
#(and rho = -1 the test discussed by Gray and Tsiatis, 1989).
rho = 0
numreps = 500
##cureobs - probability of cure on the observation arm of the study
cureobs = .359
##curerx - probability of cure on the treatment arm of the study
curerx = .459
##medobs - median survival among the non-cured on observation
medobs = .747
##medrx - median survival among the non-cured on treatment
medrx = .859
##acrate - rate at which patients accrue, combined over both
##observation and treatment arms of the study
acrate = 232
##probrx - probability of assignment to the treatment arm
probrx = .5
##actime - accrual duration
actime = c(3.3, 3.5, 3.8);
##futime - followup duration
futime = c(2.0, 2.5)
##info - vector of information times for the interim looks
##must be an increasing sequence in (0,1]
info = c( .35, .61, .86, 1.0)
##crits - vector of critical values corresponding to the
##interim analysis time points specified in info
crits = c(3.6128, 2.6506, 2.1894, 2.0536)
##############################################################
### Log-rank test: rho=0 (default)
mycr<-curerate(rho ,numreps,cureobs,curerx,medobs,medrx,acrate,
probrx,actime,futime,info,crits)
mycr ### (*)
show(mycr) ### same as above
showcr(mycr) ### same as above
unclass(mycr)
showcr(mycr,full.results=TRUE)
showcr(mycr,indac=3,indfu=1)
| /data/genthat_extracted_code/CR/examples/CureRate-class.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,713 | r | library(CR)
### Name: CureRate-class
### Title: Class '"CureRate"'
### Aliases: CureRate-class CR-CureRate-method
### Keywords: classes
### ** Examples
showClass("CureRate")
## Input Data
###############
# rho - specifies the value of rho in the G-rho test (Harrington and Fleming, 1982).
# rho = 0 gives the logrank test, and rho = 1 the Peto-Peto Wilcoxon test
#(and rho = -1 the test discussed by Gray and Tsiatis, 1989).
rho = 0
numreps = 500
##cureobs - probability of cure on the observation arm of the study
cureobs = .359
##curerx - probability of cure on the treatment arm of the study
curerx = .459
##medobs - median survival among the non-cured on observation
medobs = .747
##medrx - median survival among the non-cured on treatment
medrx = .859
##acrate - rate at which patients accrue, combined over both
##observation and treatment arms of the study
acrate = 232
##probrx - probability of assignment to the treatment arm
probrx = .5
##actime - accrual duration
actime = c(3.3, 3.5, 3.8);
##futime - followup duration
futime = c(2.0, 2.5)
##info - vector of information times for the interim looks
##must be an increasing sequence in (0,1]
info = c( .35, .61, .86, 1.0)
##crits - vector of critical values corresponding to the
##interim analysis time points specified in info
crits = c(3.6128, 2.6506, 2.1894, 2.0536)
##############################################################
### Log-rank test: rho=0 (default)
mycr<-curerate(rho ,numreps,cureobs,curerx,medobs,medrx,acrate,
probrx,actime,futime,info,crits)
mycr ### (*)
show(mycr) ### same as above
showcr(mycr) ### same as above
unclass(mycr)
showcr(mycr,full.results=TRUE)
showcr(mycr,indac=3,indfu=1)
|
serverDga <- function(input, output, session, getData){
priorDist <- reactive({
if( !is.null(getData())){
dat <- getData()
if (input$DataType == "Aggregate"){
nobs <- sum(dat[[length(dat)]])
ncap <- ncol(dat) - 1
}else{
nobs <- nrow(dat)
ncap <- ncol(dat)
}
updateNumericInput(session, "dgaPriorDelta",value = 1 / 2^ncap)
}else
return(NULL)
if(input$dgaPriorType == "lnorm"){
mu <- log(input$dgaPriorMedian)
if(input$dgaPrior90 <= input$dgaPriorMedian){
showNotification("Prior 90th percentile must be larger than the median")
return(NULL)
}
ssd <- (log(input$dgaPrior90) - mu) / qnorm(.9)
x <- 0:(input$dgaNMax - nobs) + nobs
values <- dlnorm(x,mu,ssd)
}else{
x <- 1:(input$dgaNMax - nobs) + nobs
values <- 1 / (1:(input$dgaNMax - nobs))
}
values <- values / sum(values)
out <- list(x=x, values=values)
out
})
output$dgaPrior <- renderPlot({
if (is.null(getData())) {
return(NULL)
}
prior <- priorDist()
dgaPriorType <- input$dgaPriorType
future({
x <- prior$x
values <- prior$values
if(dgaPriorType == "lnorm")
titl <- "Log-normal Prior"
else
titl <- "Non-informative Prior (p(x) ~ 1/ (Population Size - Sample Size))"
lower90 <- x[min(which(cumsum(values) >= .1))]
upper90 <- x[min(which(cumsum(values) >= .9))]
p <- ggplot() +
geom_line(aes(x=x,y=values)) +
geom_vline(xintercept = lower90, color="red") +
geom_vline(xintercept = upper90, color="red") +
xlab("Population Size (red lines = 10th and 90th percentiles)") +
ylab("Prior Probability") +
ggtitle(titl) +
theme_bw() +
xlim(c(0,max(x)))
}) %...>% (function(p){
print(p)
})
})
output$dgaCumPrior <- renderPlot({
if (is.null(getData())) {
return(NULL)
}
prior <- priorDist()
dgaPriorType <- input$dgaPriorType
future({
x <- prior$x
values <- prior$values
if(dgaPriorType == "lnorm")
titl <- "Log-normal Prior"
else
titl <- "Non-informative Prior (p(x) ~ 1/ (Population Size - Sample Size))"
lower90 <- x[min(which(cumsum(values) >= .1))]
upper90 <- x[min(which(cumsum(values) >= .9))]
p <- ggplot() +
geom_line(aes(x=x,y=cumsum(values))) +
xlab("Population Size") +
ylab("Prior Cumulative Probability") +
ggtitle(titl) +
theme_bw() +
xlim(c(0,max(x)))
}) %...>% (function(p){
print(p)
})
})
dgaPriorValid <- reactive({
if(input$dgaPriorType == "lnorm"){
if(input$dgaPrior90 <= input$dgaPriorMedian){
return("Prior 90th percentile must be larger than the median")
}
if(input$dgaNMax <= input$dgaPrior90){
return("Maximum population size percentile must be larger than the 90th percentile")
}
}
""
})
dga <- reactive({
if (is.null(getData())) {
return(NULL)
}
if(dgaPriorValid() != ""){
showNotification(dgaPriorValid())
return(NULL)
}
dat <- getData()
if (input$DataType == "Aggregate") {
dat <- disaggregate(dat[,-ncol(dat)], dat[[ncol(dat)]])
}
if(ncol(dat) > 5){
showNotification("Bayesian model averaging can only be performed on <= 5 sources")
return(NULL)
}
if(ncol(dat) == 3)
graphs <- graphs3
else if(ncol(dat) == 4)
graphs <- graphs4
else
graphs <- graphs5
nobs <- nrow(dat)
rec <- make.strata(dat, locations=rep("a",nrow(dat)))$overlap.counts
rec <- array(rec, dim=rep(2, ncol(dat)))
mu <- log(input$dgaPriorMedian)
ssd <- (log(input$dgaPrior90) - mu) / qnorm(.9)
nmax <- input$dgaNMax - nobs
delta <- input$dgaPriorDelta
prior <- priorDist()
future({
x <- prior$x
post <- bma.cr(rec,
delta=delta,
Nmissing=x - nobs,
logprior = log(prior$values),
graphs = graphs)
list(prior=prior, post=post)
})
})
output$dgaSaturatedWarning <- renderText({
dga <- dga()
if(is.null(dga))
return(NULL)
dga <- value(dga)
post <- dga$post
psat <- sum(post[nrow(post), ])
if(psat >= .15){
inc <- ""
if(!input$dgaSaturated)
inc <- " (not included in estimates) "
return(paste0("Warning: The posterior probability of the saturated model",
inc,
" is ",
round(psat*100),
"%. Estimates may be unreliable."))
}
return(NULL)
})
output$dgaTable <- renderTable({
dga <- dga()
if(is.null(dga))
return(NULL)
dga <- value(dga)
post <- dga$post
if(!input$dgaSaturated){
post <- post[-nrow(post), , drop=FALSE]
}
postN <- colSums(post)
postN <- postN / sum(postN)
x <- dga$prior$x
mn <- sum(x * postN)
med <- x[which(cumsum(postN) > .5)[1]]
# HDI
opt <- optimize(
function(cut){
abs(.05 - sum(postN*(postN <= cut)))
},
interval = c(0,max(postN))
)
inInterval <- which(postN > opt$minimum)
lower <- x[inInterval[1]]
upper <- x[inInterval[length(inInterval)]]
#lower <- x[which(cumsum(postN) > .025)[1]]
#upper <- x[which(cumsum(postN) > .975)[1]]
result <- data.frame(mn, med, lower, upper)
names(result) <- c("Mean","Median","95% Lower","95% Upper")
round(result)
}, digits=0)
output$dgaPlot <- renderPlot({
dga <- dga()
if(is.null(dga))
return(NULL)
dga <- value(dga)
x <- dga$prior$x
post <- dga$post
if(!input$dgaSaturated){
post <- post[-nrow(post), , drop=FALSE]
}
postN <- colSums(post)
postN <- postN / sum(postN)
ind <- cumsum(postN) < .995
plotPosteriorN(post[,ind], x[ind])
})
output$dgaModelPost <- renderTable({
dga <- dga()
if(is.null(dga))
return(NULL)
dga <- value(dga)
x <- dga$prior$x
post <- dga$post
dat <- getData()
if (input$DataType == "Aggregate") {
dat <- disaggregate(dat[,-ncol(dat)], dat[[ncol(dat)]])
}
if(ncol(dat) > 5){
showNotification("Bayesian model averaging can only be performed on <= 5 sources")
return(NULL)
}
if(ncol(dat) == 3)
graphs <- graphs3
else if(ncol(dat) == 4)
graphs <- graphs4
else
graphs <- graphs5
if(!input$dgaSaturated){
post <- post[-nrow(post), , drop=FALSE]
graphs <- graphs[-length(graphs)]
}
mp <- rowSums(post)
means <- apply(post, 1, function(p){
p <- p / sum(p)
sum(p * x)
})
means <- as.integer(round(means))
mp <- mp / sum(mp)
mp <- round(mp * 100, 3)
data.frame(Interaction=formatGraphs(graphs),
`Posterior Probability (%)` = mp,
`Expected Pop. Size` = means,
check.names=FALSE)
})
getMarkdownReport <- function(includeDf=TRUE){
objToString <- function(expr){
paste(capture.output(dput(eval(expr))), collapse = "\n")
}
rmd <- paste0('
## Bayesian Model Averaging
```{r}
if(!exists("input")) input <- list()
input$DataType <- ',objToString(input$DataType),'
input$dgaPriorType <- ',objToString(input$dgaPriorType),'
input$dgaPriorMedian <- ',objToString(input$dgaPriorMedian),'
input$dgaPriorDelta <- ',objToString(input$dgaPriorDelta),'
input$dgaNMax <- ',objToString(input$dgaNMax),'
input$dgaPrior90 <- ',objToString(input$dgaPrior90),'
input$dgaSaturated <- ',objToString(input$dgaSaturated),'
library(dga)
library(ggplot2)
library(shinyrecap)
```
')
if(includeDf){
rmd <- paste0(rmd,
"
### Input Data
```{r}
df <- ",
objToString(getData()),
"
getData <- function(disag=FALSE){
if(disag && ",objToString(input$DataType) ,"== \"Aggregate\")
df <- disaggregate(df[-length(df)],df[[length(df)]])
df
}
knitr::kable(df)
```
")
}
if("Prior" %in% input$dgaReportCheckBox){
rmd <- paste0(rmd,'
### Prior Distribution
```{r}
if( !is.null(getData())){
dat <- getData()
if (input$DataType == "Aggregate"){
nobs <- sum(dat[[length(dat)]])
ncap <- ncol(dat) - 1
}else{
nobs <- nrow(dat)
ncap <- ncol(dat)
}
}else
return(NULL)
if(input$dgaPriorType == "lnorm"){
mu <- log(input$dgaPriorMedian)
ssd <- (log(input$dgaPrior90) - mu) / qnorm(.9)
x <- 0:(input$dgaNMax - nobs) + nobs
values <- dlnorm(x,mu,ssd)
}else{
x <- 1:(input$dgaNMax - nobs) + nobs
values <- 1 / (1:(input$dgaNMax - nobs))
}
values <- values / sum(values)
prior <- list(x=x, values=values)
priorDist <- function() prior
dgaPriorType <- input$dgaPriorType
x <- prior$x
values <- prior$values
if(dgaPriorType == "lnorm"){
titl <- "Log-normal Prior"
}else{
titl <- "Non-informative Prior (p(x) ~ 1/ (Population Size - Sample Size))"
}
lower90 <- x[min(which(cumsum(values) >= .1))]
upper90 <- x[min(which(cumsum(values) >= .9))]
p <- ggplot() +
geom_line(aes(x=x,y=values)) +
geom_vline(xintercept = lower90, color="red") +
geom_vline(xintercept = upper90, color="red") +
xlab("Population Size (red lines = 10th and 90th percentiles)") +
ylab("Prior Probability") +
ggtitle(titl) +
theme_bw() +
xlim(c(0,max(x)))
print(p)
p <- ggplot() +
geom_line(aes(x=x,y=cumsum(values))) +
xlab("Population Size") +
ylab("Prior Cumulative Probability") +
ggtitle(titl) +
theme_bw() +
xlim(c(0,max(x)))
print(p)
```
')
}
if("Posterior" %in% input$dgaReportCheckBox){
rmd <- paste0(rmd,'
### Posterior Distribution
```{r}
dat <- getData()
if (input$DataType == "Aggregate") {
dat <- disaggregate(dat[,-ncol(dat)], dat[[ncol(dat)]])
}
if(ncol(dat) == 3){
data(graphs3)
graphs <- graphs3
}else if(ncol(dat) == 4){
data(graphs4)
graphs <- graphs4
}else{
data(graphs5)
graphs <- graphs5
}
nobs <- nrow(dat)
rec <- make.strata(dat, locations=rep("a",nrow(dat)))$overlap.counts
rec <- array(rec, dim=rep(2, ncol(dat)))
mu <- log(input$dgaPriorMedian)
ssd <- (log(input$dgaPrior90) - mu) / qnorm(.9)
nmax <- input$dgaNMax - nobs
delta <- input$dgaPriorDelta
prior <- priorDist()
x <- prior$x
post <- bma.cr(rec,
delta=delta,
Nmissing=x - nobs,
logprior = log(prior$values),
graphs = graphs)
dga <- list(prior=prior, post=post)
post <- dga$post
if(!input$dgaSaturated){
post <- post[-nrow(post), , drop=FALSE]
}
postN <- colSums(post)
postN <- postN / sum(postN)
x <- dga$prior$x
mn <- sum(x * postN)
med <- x[which(cumsum(postN) > .5)[1]]
# HDI
opt <- optimize(
function(cut){
abs(.05 - sum(postN*(postN <= cut)))
},
interval = c(0,max(postN))
)
inInterval <- which(postN > opt$minimum)
lower <- x[inInterval[1]]
upper <- x[inInterval[length(inInterval)]]
#lower <- x[which(cumsum(postN) > .025)[1]]
#upper <- x[which(cumsum(postN) > .975)[1]]
result <- data.frame(mn, med, lower, upper)
names(result) <- c("Mean","Median","95% Lower","95% Upper")
result %>% knitr::kable(digits=0)
postN <- colSums(post)
postN <- postN / sum(postN)
ind <- cumsum(postN) < .995
plotPosteriorN(post[,ind], x[ind])
```
')
}
if("Model Summaries" %in% input$dgaReportCheckBox){
rmd <- paste0(rmd,'
### BMA Individual Model Summaries
```{r}
if(!input$dgaSaturated){
graphs <- graphs[-length(graphs)]
}
mp <- rowSums(post)
means <- apply(post, 1, function(p){
p <- p / sum(p)
sum(p * x)
})
means <- as.integer(round(means))
mp <- mp / sum(mp)
mp <- round(mp * 100, 3)
data.frame(Interaction=formatGraphs(graphs),
`Posterior Probability (%)` = mp,
`Expected Pop. Size` = means,
check.names=FALSE) %>% knitr::kable()
```
')
}
rmd
}
output$dgaDownloadReport <- downloadHandler(
filename = function(){
ext <- if(input$dgaReportFormat == "html_document"){
"html"
}else if(input$dgaReportFormat == "word_document"){
"doc"
}else{
"pdf"
}
paste0("bma_report.", ext)
},
content = function(file){
fm <- paste0('
---
title: "Bayesian Model Averaging Report"
author: "shinyrecap"
output: ',input$dgaReportFormat,'
---
')
setup <- paste0(
'
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = ',input$dgaReportCode,')
```
'
)
rmd <- paste0(fm, setup, getMarkdownReport())
tempReport <- file.path(tempdir(), "report.Rmd")
cat(rmd, file=tempReport)
shinyjs::disable("dgaDownloadReport")
note <- showNotification("Generating Report...", duration=NULL)
rr <- try(rmarkdown::render(tempReport, output_file = file,
envir = new.env(parent = globalenv())
))
removeNotification(note)
shinyjs::enable("dgaDownloadReport")
rr
}
)
list(
getMarkdownReport=getMarkdownReport
)
}
| /inst/apps/rcapture/server-dga.R | permissive | kdctran/shinyrecap | R | false | false | 13,480 | r | serverDga <- function(input, output, session, getData){
priorDist <- reactive({
if( !is.null(getData())){
dat <- getData()
if (input$DataType == "Aggregate"){
nobs <- sum(dat[[length(dat)]])
ncap <- ncol(dat) - 1
}else{
nobs <- nrow(dat)
ncap <- ncol(dat)
}
updateNumericInput(session, "dgaPriorDelta",value = 1 / 2^ncap)
}else
return(NULL)
if(input$dgaPriorType == "lnorm"){
mu <- log(input$dgaPriorMedian)
if(input$dgaPrior90 <= input$dgaPriorMedian){
showNotification("Prior 90th percentile must be larger than the median")
return(NULL)
}
ssd <- (log(input$dgaPrior90) - mu) / qnorm(.9)
x <- 0:(input$dgaNMax - nobs) + nobs
values <- dlnorm(x,mu,ssd)
}else{
x <- 1:(input$dgaNMax - nobs) + nobs
values <- 1 / (1:(input$dgaNMax - nobs))
}
values <- values / sum(values)
out <- list(x=x, values=values)
out
})
output$dgaPrior <- renderPlot({
if (is.null(getData())) {
return(NULL)
}
prior <- priorDist()
dgaPriorType <- input$dgaPriorType
future({
x <- prior$x
values <- prior$values
if(dgaPriorType == "lnorm")
titl <- "Log-normal Prior"
else
titl <- "Non-informative Prior (p(x) ~ 1/ (Population Size - Sample Size))"
lower90 <- x[min(which(cumsum(values) >= .1))]
upper90 <- x[min(which(cumsum(values) >= .9))]
p <- ggplot() +
geom_line(aes(x=x,y=values)) +
geom_vline(xintercept = lower90, color="red") +
geom_vline(xintercept = upper90, color="red") +
xlab("Population Size (red lines = 10th and 90th percentiles)") +
ylab("Prior Probability") +
ggtitle(titl) +
theme_bw() +
xlim(c(0,max(x)))
}) %...>% (function(p){
print(p)
})
})
output$dgaCumPrior <- renderPlot({
if (is.null(getData())) {
return(NULL)
}
prior <- priorDist()
dgaPriorType <- input$dgaPriorType
future({
x <- prior$x
values <- prior$values
if(dgaPriorType == "lnorm")
titl <- "Log-normal Prior"
else
titl <- "Non-informative Prior (p(x) ~ 1/ (Population Size - Sample Size))"
lower90 <- x[min(which(cumsum(values) >= .1))]
upper90 <- x[min(which(cumsum(values) >= .9))]
p <- ggplot() +
geom_line(aes(x=x,y=cumsum(values))) +
xlab("Population Size") +
ylab("Prior Cumulative Probability") +
ggtitle(titl) +
theme_bw() +
xlim(c(0,max(x)))
}) %...>% (function(p){
print(p)
})
})
dgaPriorValid <- reactive({
if(input$dgaPriorType == "lnorm"){
if(input$dgaPrior90 <= input$dgaPriorMedian){
return("Prior 90th percentile must be larger than the median")
}
if(input$dgaNMax <= input$dgaPrior90){
return("Maximum population size percentile must be larger than the 90th percentile")
}
}
""
})
dga <- reactive({
if (is.null(getData())) {
return(NULL)
}
if(dgaPriorValid() != ""){
showNotification(dgaPriorValid())
return(NULL)
}
dat <- getData()
if (input$DataType == "Aggregate") {
dat <- disaggregate(dat[,-ncol(dat)], dat[[ncol(dat)]])
}
if(ncol(dat) > 5){
showNotification("Bayesian model averaging can only be performed on <= 5 sources")
return(NULL)
}
if(ncol(dat) == 3)
graphs <- graphs3
else if(ncol(dat) == 4)
graphs <- graphs4
else
graphs <- graphs5
nobs <- nrow(dat)
rec <- make.strata(dat, locations=rep("a",nrow(dat)))$overlap.counts
rec <- array(rec, dim=rep(2, ncol(dat)))
mu <- log(input$dgaPriorMedian)
ssd <- (log(input$dgaPrior90) - mu) / qnorm(.9)
nmax <- input$dgaNMax - nobs
delta <- input$dgaPriorDelta
prior <- priorDist()
future({
x <- prior$x
post <- bma.cr(rec,
delta=delta,
Nmissing=x - nobs,
logprior = log(prior$values),
graphs = graphs)
list(prior=prior, post=post)
})
})
output$dgaSaturatedWarning <- renderText({
dga <- dga()
if(is.null(dga))
return(NULL)
dga <- value(dga)
post <- dga$post
psat <- sum(post[nrow(post), ])
if(psat >= .15){
inc <- ""
if(!input$dgaSaturated)
inc <- " (not included in estimates) "
return(paste0("Warning: The posterior probability of the saturated model",
inc,
" is ",
round(psat*100),
"%. Estimates may be unreliable."))
}
return(NULL)
})
output$dgaTable <- renderTable({
dga <- dga()
if(is.null(dga))
return(NULL)
dga <- value(dga)
post <- dga$post
if(!input$dgaSaturated){
post <- post[-nrow(post), , drop=FALSE]
}
postN <- colSums(post)
postN <- postN / sum(postN)
x <- dga$prior$x
mn <- sum(x * postN)
med <- x[which(cumsum(postN) > .5)[1]]
# HDI
opt <- optimize(
function(cut){
abs(.05 - sum(postN*(postN <= cut)))
},
interval = c(0,max(postN))
)
inInterval <- which(postN > opt$minimum)
lower <- x[inInterval[1]]
upper <- x[inInterval[length(inInterval)]]
#lower <- x[which(cumsum(postN) > .025)[1]]
#upper <- x[which(cumsum(postN) > .975)[1]]
result <- data.frame(mn, med, lower, upper)
names(result) <- c("Mean","Median","95% Lower","95% Upper")
round(result)
}, digits=0)
output$dgaPlot <- renderPlot({
dga <- dga()
if(is.null(dga))
return(NULL)
dga <- value(dga)
x <- dga$prior$x
post <- dga$post
if(!input$dgaSaturated){
post <- post[-nrow(post), , drop=FALSE]
}
postN <- colSums(post)
postN <- postN / sum(postN)
ind <- cumsum(postN) < .995
plotPosteriorN(post[,ind], x[ind])
})
output$dgaModelPost <- renderTable({
dga <- dga()
if(is.null(dga))
return(NULL)
dga <- value(dga)
x <- dga$prior$x
post <- dga$post
dat <- getData()
if (input$DataType == "Aggregate") {
dat <- disaggregate(dat[,-ncol(dat)], dat[[ncol(dat)]])
}
if(ncol(dat) > 5){
showNotification("Bayesian model averaging can only be performed on <= 5 sources")
return(NULL)
}
if(ncol(dat) == 3)
graphs <- graphs3
else if(ncol(dat) == 4)
graphs <- graphs4
else
graphs <- graphs5
if(!input$dgaSaturated){
post <- post[-nrow(post), , drop=FALSE]
graphs <- graphs[-length(graphs)]
}
mp <- rowSums(post)
means <- apply(post, 1, function(p){
p <- p / sum(p)
sum(p * x)
})
means <- as.integer(round(means))
mp <- mp / sum(mp)
mp <- round(mp * 100, 3)
data.frame(Interaction=formatGraphs(graphs),
`Posterior Probability (%)` = mp,
`Expected Pop. Size` = means,
check.names=FALSE)
})
getMarkdownReport <- function(includeDf=TRUE){
objToString <- function(expr){
paste(capture.output(dput(eval(expr))), collapse = "\n")
}
rmd <- paste0('
## Bayesian Model Averaging
```{r}
if(!exists("input")) input <- list()
input$DataType <- ',objToString(input$DataType),'
input$dgaPriorType <- ',objToString(input$dgaPriorType),'
input$dgaPriorMedian <- ',objToString(input$dgaPriorMedian),'
input$dgaPriorDelta <- ',objToString(input$dgaPriorDelta),'
input$dgaNMax <- ',objToString(input$dgaNMax),'
input$dgaPrior90 <- ',objToString(input$dgaPrior90),'
input$dgaSaturated <- ',objToString(input$dgaSaturated),'
library(dga)
library(ggplot2)
library(shinyrecap)
```
')
if(includeDf){
rmd <- paste0(rmd,
"
### Input Data
```{r}
df <- ",
objToString(getData()),
"
getData <- function(disag=FALSE){
if(disag && ",objToString(input$DataType) ,"== \"Aggregate\")
df <- disaggregate(df[-length(df)],df[[length(df)]])
df
}
knitr::kable(df)
```
")
}
if("Prior" %in% input$dgaReportCheckBox){
rmd <- paste0(rmd,'
### Prior Distribution
```{r}
if( !is.null(getData())){
dat <- getData()
if (input$DataType == "Aggregate"){
nobs <- sum(dat[[length(dat)]])
ncap <- ncol(dat) - 1
}else{
nobs <- nrow(dat)
ncap <- ncol(dat)
}
}else
return(NULL)
if(input$dgaPriorType == "lnorm"){
mu <- log(input$dgaPriorMedian)
ssd <- (log(input$dgaPrior90) - mu) / qnorm(.9)
x <- 0:(input$dgaNMax - nobs) + nobs
values <- dlnorm(x,mu,ssd)
}else{
x <- 1:(input$dgaNMax - nobs) + nobs
values <- 1 / (1:(input$dgaNMax - nobs))
}
values <- values / sum(values)
prior <- list(x=x, values=values)
priorDist <- function() prior
dgaPriorType <- input$dgaPriorType
x <- prior$x
values <- prior$values
if(dgaPriorType == "lnorm"){
titl <- "Log-normal Prior"
}else{
titl <- "Non-informative Prior (p(x) ~ 1/ (Population Size - Sample Size))"
}
lower90 <- x[min(which(cumsum(values) >= .1))]
upper90 <- x[min(which(cumsum(values) >= .9))]
p <- ggplot() +
geom_line(aes(x=x,y=values)) +
geom_vline(xintercept = lower90, color="red") +
geom_vline(xintercept = upper90, color="red") +
xlab("Population Size (red lines = 10th and 90th percentiles)") +
ylab("Prior Probability") +
ggtitle(titl) +
theme_bw() +
xlim(c(0,max(x)))
print(p)
p <- ggplot() +
geom_line(aes(x=x,y=cumsum(values))) +
xlab("Population Size") +
ylab("Prior Cumulative Probability") +
ggtitle(titl) +
theme_bw() +
xlim(c(0,max(x)))
print(p)
```
')
}
if("Posterior" %in% input$dgaReportCheckBox){
rmd <- paste0(rmd,'
### Posterior Distribution
```{r}
dat <- getData()
if (input$DataType == "Aggregate") {
dat <- disaggregate(dat[,-ncol(dat)], dat[[ncol(dat)]])
}
if(ncol(dat) == 3){
data(graphs3)
graphs <- graphs3
}else if(ncol(dat) == 4){
data(graphs4)
graphs <- graphs4
}else{
data(graphs5)
graphs <- graphs5
}
nobs <- nrow(dat)
rec <- make.strata(dat, locations=rep("a",nrow(dat)))$overlap.counts
rec <- array(rec, dim=rep(2, ncol(dat)))
mu <- log(input$dgaPriorMedian)
ssd <- (log(input$dgaPrior90) - mu) / qnorm(.9)
nmax <- input$dgaNMax - nobs
delta <- input$dgaPriorDelta
prior <- priorDist()
x <- prior$x
post <- bma.cr(rec,
delta=delta,
Nmissing=x - nobs,
logprior = log(prior$values),
graphs = graphs)
dga <- list(prior=prior, post=post)
post <- dga$post
if(!input$dgaSaturated){
post <- post[-nrow(post), , drop=FALSE]
}
postN <- colSums(post)
postN <- postN / sum(postN)
x <- dga$prior$x
mn <- sum(x * postN)
med <- x[which(cumsum(postN) > .5)[1]]
# HDI
opt <- optimize(
function(cut){
abs(.05 - sum(postN*(postN <= cut)))
},
interval = c(0,max(postN))
)
inInterval <- which(postN > opt$minimum)
lower <- x[inInterval[1]]
upper <- x[inInterval[length(inInterval)]]
#lower <- x[which(cumsum(postN) > .025)[1]]
#upper <- x[which(cumsum(postN) > .975)[1]]
result <- data.frame(mn, med, lower, upper)
names(result) <- c("Mean","Median","95% Lower","95% Upper")
result %>% knitr::kable(digits=0)
postN <- colSums(post)
postN <- postN / sum(postN)
ind <- cumsum(postN) < .995
plotPosteriorN(post[,ind], x[ind])
```
')
}
if("Model Summaries" %in% input$dgaReportCheckBox){
rmd <- paste0(rmd,'
### BMA Individual Model Summaries
```{r}
if(!input$dgaSaturated){
graphs <- graphs[-length(graphs)]
}
mp <- rowSums(post)
means <- apply(post, 1, function(p){
p <- p / sum(p)
sum(p * x)
})
means <- as.integer(round(means))
mp <- mp / sum(mp)
mp <- round(mp * 100, 3)
data.frame(Interaction=formatGraphs(graphs),
`Posterior Probability (%)` = mp,
`Expected Pop. Size` = means,
check.names=FALSE) %>% knitr::kable()
```
')
}
rmd
}
output$dgaDownloadReport <- downloadHandler(
filename = function(){
ext <- if(input$dgaReportFormat == "html_document"){
"html"
}else if(input$dgaReportFormat == "word_document"){
"doc"
}else{
"pdf"
}
paste0("bma_report.", ext)
},
content = function(file){
fm <- paste0('
---
title: "Bayesian Model Averaging Report"
author: "shinyrecap"
output: ',input$dgaReportFormat,'
---
')
setup <- paste0(
'
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = ',input$dgaReportCode,')
```
'
)
rmd <- paste0(fm, setup, getMarkdownReport())
tempReport <- file.path(tempdir(), "report.Rmd")
cat(rmd, file=tempReport)
shinyjs::disable("dgaDownloadReport")
note <- showNotification("Generating Report...", duration=NULL)
rr <- try(rmarkdown::render(tempReport, output_file = file,
envir = new.env(parent = globalenv())
))
removeNotification(note)
shinyjs::enable("dgaDownloadReport")
rr
}
)
list(
getMarkdownReport=getMarkdownReport
)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/as-mcmcrs.R
\name{as.mcmcrs}
\alias{as.mcmcrs}
\alias{as.mcmcrs.list}
\alias{as.mcmcrs.mcmcr}
\title{Convert to an mcmcrs object}
\usage{
as.mcmcrs(x, ...)
\method{as.mcmcrs}{list}(x, ...)
\method{as.mcmcrs}{mcmcr}(x, name = "mcmcr1", ...)
}
\arguments{
\item{x}{An MCMC object.}
\item{...}{Unused.}
\item{name}{A string specifying the element name.}
}
\value{
An mcmcrs object.
}
\description{
Converts an MCMC object to an \code{\link{mcmcrs-object}}.
}
\section{Methods (by class)}{
\itemize{
\item \code{list}: Convert a list of \code{\link{mcmcr-object}s} to an mcmcrs object
\item \code{mcmcr}: Convert an \code{\link{mcmcr-object}} to an mcmcrs object
}}
\examples{
as.mcmcrs(mcmcr::mcmcr_example)
}
| /man/as.mcmcrs.Rd | permissive | krlmlr/mcmcr | R | false | true | 791 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/as-mcmcrs.R
\name{as.mcmcrs}
\alias{as.mcmcrs}
\alias{as.mcmcrs.list}
\alias{as.mcmcrs.mcmcr}
\title{Convert to an mcmcrs object}
\usage{
as.mcmcrs(x, ...)
\method{as.mcmcrs}{list}(x, ...)
\method{as.mcmcrs}{mcmcr}(x, name = "mcmcr1", ...)
}
\arguments{
\item{x}{An MCMC object.}
\item{...}{Unused.}
\item{name}{A string specifying the element name.}
}
\value{
An mcmcrs object.
}
\description{
Converts an MCMC object to an \code{\link{mcmcrs-object}}.
}
\section{Methods (by class)}{
\itemize{
\item \code{list}: Convert a list of \code{\link{mcmcr-object}s} to an mcmcrs object
\item \code{mcmcr}: Convert an \code{\link{mcmcr-object}} to an mcmcrs object
}}
\examples{
as.mcmcrs(mcmcr::mcmcr_example)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/422.p-Confidence_p-Bias_CC_All_Graph.R
\name{PlotpCOpBICLT}
\alias{PlotpCOpBICLT}
\title{Plots p-confidence and p-bias for continuity corrected Logit Wald method}
\usage{
PlotpCOpBICLT(n, alp, c)
}
\arguments{
\item{n}{- Number of trials}
\item{alp}{- Alpha value (significance level required)}
\item{c}{- Continuity correction}
}
\description{
Plots p-confidence and p-bias for continuity corrected Logit Wald method
}
\details{
p-confidence and p-bias plots for continuity corrected Logit Wald method
}
\examples{
n=5; alp=0.05;c=1/(2*n)
PlotpCOpBICLT(n,alp,c)
}
\seealso{
Other p-confidence and p-bias of continuity corrected methods: \code{\link{PlotpCOpBICAS}},
\code{\link{PlotpCOpBICAll}},
\code{\link{PlotpCOpBICSC}}, \code{\link{PlotpCOpBICTW}},
\code{\link{PlotpCOpBICWD}}, \code{\link{pCOpBICAS}},
\code{\link{pCOpBICAll}}, \code{\link{pCOpBICLT}},
\code{\link{pCOpBICSC}}, \code{\link{pCOpBICTW}},
\code{\link{pCOpBICWD}}
}
| /man/PlotpCOpBICLT.Rd | no_license | cran/proportion | R | false | true | 1,064 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/422.p-Confidence_p-Bias_CC_All_Graph.R
\name{PlotpCOpBICLT}
\alias{PlotpCOpBICLT}
\title{Plots p-confidence and p-bias for continuity corrected Logit Wald method}
\usage{
PlotpCOpBICLT(n, alp, c)
}
\arguments{
\item{n}{- Number of trials}
\item{alp}{- Alpha value (significance level required)}
\item{c}{- Continuity correction}
}
\description{
Plots p-confidence and p-bias for continuity corrected Logit Wald method
}
\details{
p-confidence and p-bias plots for continuity corrected Logit Wald method
}
\examples{
n=5; alp=0.05;c=1/(2*n)
PlotpCOpBICLT(n,alp,c)
}
\seealso{
Other p-confidence and p-bias of continuity corrected methods: \code{\link{PlotpCOpBICAS}},
\code{\link{PlotpCOpBICAll}},
\code{\link{PlotpCOpBICSC}}, \code{\link{PlotpCOpBICTW}},
\code{\link{PlotpCOpBICWD}}, \code{\link{pCOpBICAS}},
\code{\link{pCOpBICAll}}, \code{\link{pCOpBICLT}},
\code{\link{pCOpBICSC}}, \code{\link{pCOpBICTW}},
\code{\link{pCOpBICWD}}
}
|
#-*- R -*-
## Script from Fourth Edition of `Modern Applied Statistics with S'
# Chapter 7 Generalized Linear Models
library(MASS)
options(width=65, digits=5, height=9999)
pdf(file="ch07.pdf", width=8, height=6, pointsize=9)
options(contrasts = c("contr.treatment", "contr.poly"))
ax.1 <- glm(Postwt ~ Prewt + Treat + offset(Prewt),
family = gaussian, data = anorexia)
summary(ax.1)
# 7.2 Binomial data
options(contrasts = c("contr.treatment", "contr.poly"))
ldose <- rep(0:5, 2)
numdead <- c(1, 4, 9, 13, 18, 20, 0, 2, 6, 10, 12, 16)
sex <- factor(rep(c("M", "F"), c(6, 6)))
SF <- cbind(numdead, numalive = 20 - numdead)
budworm.lg <- glm(SF ~ sex*ldose, family = binomial)
summary(budworm.lg)
plot(c(1,32), c(0,1), type = "n", xlab = "dose",
ylab = "prob", log = "x")
text(2^ldose, numdead/20, labels = as.character(sex))
ld <- seq(0, 5, 0.1)
lines(2^ld, predict(budworm.lg, data.frame(ldose = ld,
sex = factor(rep("M", length(ld)), levels = levels(sex))),
type = "response"), col = 3)
lines(2^ld, predict(budworm.lg, data.frame(ldose = ld,
sex = factor(rep("F", length(ld)), levels = levels(sex))),
type = "response"), lty = 2, col = 2)
budworm.lgA <- update(budworm.lg, . ~ sex * I(ldose - 3))
summary(budworm.lgA, cor = F)$coefficients
anova(update(budworm.lg, . ~ . + sex * I(ldose^2)),
test = "Chisq")
budworm.lg0 <- glm(SF ~ sex + ldose - 1, family = binomial)
summary(budworm.lg0, cor = F)$coefficients
dose.p(budworm.lg0, cf = c(1,3), p = 1:3/4)
dose.p(update(budworm.lg0, family = binomial(link = probit)),
cf = c(1, 3), p = 1:3/4)
options(contrasts = c("contr.treatment", "contr.poly"))
attach(birthwt)
race <- factor(race, labels = c("white", "black", "other"))
table(ptl)
ptd <- factor(ptl > 0)
table(ftv)
ftv <- factor(ftv)
levels(ftv)[-(1:2)] <- "2+"
table(ftv) # as a check
bwt <- data.frame(low = factor(low), age, lwt, race,
smoke = (smoke > 0), ptd, ht = (ht > 0), ui = (ui > 0), ftv)
detach(); rm(race, ptd, ftv)
birthwt.glm <- glm(low ~ ., family = binomial, data = bwt)
summary(birthwt.glm)
birthwt.step <- stepAIC(birthwt.glm, trace = FALSE)
birthwt.step$anova
birthwt.step2 <- stepAIC(birthwt.glm, ~ .^2 + I(scale(age)^2)
+ I(scale(lwt)^2), trace = FALSE)
birthwt.step2$anova
summary(birthwt.step2)$coef
table(bwt$low, predict(birthwt.step2) > 0)
## R has a similar gam() in package gam and a different gam() in package mgcv
if(require(gam)) {
attach(bwt)
age1 <- age*(ftv=="1"); age2 <- age*(ftv=="2+")
birthwt.gam <- gam(low ~ s(age) + s(lwt) + smoke + ptd +
ht + ui + ftv + s(age1) + s(age2) + smoke:ui, binomial,
bwt, bf.maxit=25)
print(summary(birthwt.gam))
print(table(low, predict(birthwt.gam) > 0))
par(mfrow = c(2, 2))
if(interactive()) plot(birthwt.gam, ask = TRUE, se = TRUE)
par(mfrow = c(1, 1))
detach()
}
library(mgcv)
attach(bwt)
age1 <- age*(ftv=="1"); age2 <- age*(ftv=="2+")
(birthwt.gam <- gam(low ~ s(age) + s(lwt) + smoke + ptd +
ht + ui + ftv + s(age1) + s(age2) + smoke:ui, binomial, bwt))
table(low, predict(birthwt.gam) > 0)
par(mfrow = c(2, 2))
plot(birthwt.gam, se = TRUE)
par(mfrow = c(1, 1))
detach()
# 7.3 Poisson models
names(housing)
house.glm0 <- glm(Freq ~ Infl*Type*Cont + Sat,
family = poisson, data = housing)
summary(house.glm0)
addterm(house.glm0, ~. + Sat:(Infl+Type+Cont), test = "Chisq")
house.glm1 <- update(house.glm0, . ~ . + Sat:(Infl+Type+Cont))
summary(house.glm1)
1 - pchisq(deviance(house.glm1), house.glm1$df.resid)
dropterm(house.glm1, test = "Chisq")
addterm(house.glm1, ~. + Sat:(Infl+Type+Cont)^2, test = "Chisq")
hnames <- lapply(housing[, -5], levels) # omit Freq
house.pm <- predict(house.glm1, expand.grid(hnames),
type = "response") # poisson means
house.pm <- matrix(house.pm, ncol = 3, byrow = TRUE,
dimnames = list(NULL, hnames[[1]]))
house.pr <- house.pm/drop(house.pm %*% rep(1, 3))
cbind(expand.grid(hnames[-1]), round(house.pr, 2))
loglm(Freq ~ Infl*Type*Cont + Sat*(Infl+Type+Cont),
data = housing)
library(nnet)
(house.mult <- multinom(Sat ~ Infl + Type + Cont,
weights = Freq, data = housing))
house.mult2 <- multinom(Sat ~ Infl*Type*Cont,
weights = Freq, data = housing)
anova(house.mult, house.mult2, test = "none")
house.pm <- predict(house.mult, expand.grid(hnames[-1]),
type = "probs")
cbind(expand.grid(hnames[-1]), round(house.pm, 2))
house.cpr <- apply(house.pr, 1, cumsum)
logit <- function(x) log(x/(1-x))
house.ld <- logit(house.cpr[2, ]) - logit(house.cpr[1, ])
sort(drop(house.ld))
mean(.Last.value)
house.plr <- polr(Sat ~ Infl + Type + Cont,
data = housing, weights = Freq)
house.plr
house.pr1 <- predict(house.plr, expand.grid(hnames[-1]),
type = "probs")
cbind(expand.grid(hnames[-1]), round(house.pr1, 2))
Fr <- matrix(housing$Freq, ncol = 3, byrow = TRUE)
2 * sum(Fr * log(house.pr/house.pr1))
house.plr2 <- stepAIC(house.plr, ~.^2)
house.plr2$anova
# 7.4 A negative binomial family
glm(Days ~ .^4, family = poisson, data = quine)
quine.nb <- glm(Days ~ .^4, family = negative.binomial(2), data = quine)
quine.nb0 <- update(quine.nb, . ~ Sex/(Age + Eth*Lrn))
anova(quine.nb0, quine.nb, test = "Chisq")
quine.nb <- glm.nb(Days ~ .^4, data = quine)
quine.nb2 <- stepAIC(quine.nb)
quine.nb2$anova
dropterm(quine.nb2, test = "Chisq")
quine.nb3 <-
update(quine.nb2, . ~ . - Eth:Age:Lrn - Sex:Age:Lrn)
anova(quine.nb2, quine.nb3)
c(theta = quine.nb2$theta, SE = quine.nb2$SE)
par(mfrow = c(2,2), pty = "m")
rs <- resid(quine.nb2, type = "deviance")
plot(predict(quine.nb2), rs, xlab = "Linear predictors",
ylab = "Deviance residuals")
abline(h = 0, lty = 2)
qqnorm(rs, ylab = "Deviance residuals")
qqline(rs)
par(mfrow = c(1,1))
# End of ch07
| /R-Portable/library/MASS/scripts/ch07.R | permissive | ksasso/Electron_ShinyApp_Deployment | R | false | false | 5,901 | r | #-*- R -*-
## Script from Fourth Edition of `Modern Applied Statistics with S'
# Chapter 7 Generalized Linear Models
library(MASS)
options(width=65, digits=5, height=9999)
pdf(file="ch07.pdf", width=8, height=6, pointsize=9)
options(contrasts = c("contr.treatment", "contr.poly"))
ax.1 <- glm(Postwt ~ Prewt + Treat + offset(Prewt),
family = gaussian, data = anorexia)
summary(ax.1)
# 7.2 Binomial data
options(contrasts = c("contr.treatment", "contr.poly"))
ldose <- rep(0:5, 2)
numdead <- c(1, 4, 9, 13, 18, 20, 0, 2, 6, 10, 12, 16)
sex <- factor(rep(c("M", "F"), c(6, 6)))
SF <- cbind(numdead, numalive = 20 - numdead)
budworm.lg <- glm(SF ~ sex*ldose, family = binomial)
summary(budworm.lg)
plot(c(1,32), c(0,1), type = "n", xlab = "dose",
ylab = "prob", log = "x")
text(2^ldose, numdead/20, labels = as.character(sex))
ld <- seq(0, 5, 0.1)
lines(2^ld, predict(budworm.lg, data.frame(ldose = ld,
sex = factor(rep("M", length(ld)), levels = levels(sex))),
type = "response"), col = 3)
lines(2^ld, predict(budworm.lg, data.frame(ldose = ld,
sex = factor(rep("F", length(ld)), levels = levels(sex))),
type = "response"), lty = 2, col = 2)
budworm.lgA <- update(budworm.lg, . ~ sex * I(ldose - 3))
summary(budworm.lgA, cor = F)$coefficients
anova(update(budworm.lg, . ~ . + sex * I(ldose^2)),
test = "Chisq")
budworm.lg0 <- glm(SF ~ sex + ldose - 1, family = binomial)
summary(budworm.lg0, cor = F)$coefficients
dose.p(budworm.lg0, cf = c(1,3), p = 1:3/4)
dose.p(update(budworm.lg0, family = binomial(link = probit)),
cf = c(1, 3), p = 1:3/4)
options(contrasts = c("contr.treatment", "contr.poly"))
attach(birthwt)
race <- factor(race, labels = c("white", "black", "other"))
table(ptl)
ptd <- factor(ptl > 0)
table(ftv)
ftv <- factor(ftv)
levels(ftv)[-(1:2)] <- "2+"
table(ftv) # as a check
bwt <- data.frame(low = factor(low), age, lwt, race,
smoke = (smoke > 0), ptd, ht = (ht > 0), ui = (ui > 0), ftv)
detach(); rm(race, ptd, ftv)
birthwt.glm <- glm(low ~ ., family = binomial, data = bwt)
summary(birthwt.glm)
birthwt.step <- stepAIC(birthwt.glm, trace = FALSE)
birthwt.step$anova
birthwt.step2 <- stepAIC(birthwt.glm, ~ .^2 + I(scale(age)^2)
+ I(scale(lwt)^2), trace = FALSE)
birthwt.step2$anova
summary(birthwt.step2)$coef
table(bwt$low, predict(birthwt.step2) > 0)
## R has a similar gam() in package gam and a different gam() in package mgcv
if(require(gam)) {
attach(bwt)
age1 <- age*(ftv=="1"); age2 <- age*(ftv=="2+")
birthwt.gam <- gam(low ~ s(age) + s(lwt) + smoke + ptd +
ht + ui + ftv + s(age1) + s(age2) + smoke:ui, binomial,
bwt, bf.maxit=25)
print(summary(birthwt.gam))
print(table(low, predict(birthwt.gam) > 0))
par(mfrow = c(2, 2))
if(interactive()) plot(birthwt.gam, ask = TRUE, se = TRUE)
par(mfrow = c(1, 1))
detach()
}
library(mgcv)
attach(bwt)
age1 <- age*(ftv=="1"); age2 <- age*(ftv=="2+")
(birthwt.gam <- gam(low ~ s(age) + s(lwt) + smoke + ptd +
ht + ui + ftv + s(age1) + s(age2) + smoke:ui, binomial, bwt))
table(low, predict(birthwt.gam) > 0)
par(mfrow = c(2, 2))
plot(birthwt.gam, se = TRUE)
par(mfrow = c(1, 1))
detach()
# 7.3 Poisson models
names(housing)
house.glm0 <- glm(Freq ~ Infl*Type*Cont + Sat,
family = poisson, data = housing)
summary(house.glm0)
addterm(house.glm0, ~. + Sat:(Infl+Type+Cont), test = "Chisq")
house.glm1 <- update(house.glm0, . ~ . + Sat:(Infl+Type+Cont))
summary(house.glm1)
1 - pchisq(deviance(house.glm1), house.glm1$df.resid)
dropterm(house.glm1, test = "Chisq")
addterm(house.glm1, ~. + Sat:(Infl+Type+Cont)^2, test = "Chisq")
hnames <- lapply(housing[, -5], levels) # omit Freq
house.pm <- predict(house.glm1, expand.grid(hnames),
type = "response") # poisson means
house.pm <- matrix(house.pm, ncol = 3, byrow = TRUE,
dimnames = list(NULL, hnames[[1]]))
house.pr <- house.pm/drop(house.pm %*% rep(1, 3))
cbind(expand.grid(hnames[-1]), round(house.pr, 2))
loglm(Freq ~ Infl*Type*Cont + Sat*(Infl+Type+Cont),
data = housing)
library(nnet)
(house.mult <- multinom(Sat ~ Infl + Type + Cont,
weights = Freq, data = housing))
house.mult2 <- multinom(Sat ~ Infl*Type*Cont,
weights = Freq, data = housing)
anova(house.mult, house.mult2, test = "none")
house.pm <- predict(house.mult, expand.grid(hnames[-1]),
type = "probs")
cbind(expand.grid(hnames[-1]), round(house.pm, 2))
house.cpr <- apply(house.pr, 1, cumsum)
logit <- function(x) log(x/(1-x))
house.ld <- logit(house.cpr[2, ]) - logit(house.cpr[1, ])
sort(drop(house.ld))
mean(.Last.value)
house.plr <- polr(Sat ~ Infl + Type + Cont,
data = housing, weights = Freq)
house.plr
house.pr1 <- predict(house.plr, expand.grid(hnames[-1]),
type = "probs")
cbind(expand.grid(hnames[-1]), round(house.pr1, 2))
Fr <- matrix(housing$Freq, ncol = 3, byrow = TRUE)
2 * sum(Fr * log(house.pr/house.pr1))
house.plr2 <- stepAIC(house.plr, ~.^2)
house.plr2$anova
# 7.4 A negative binomial family
glm(Days ~ .^4, family = poisson, data = quine)
quine.nb <- glm(Days ~ .^4, family = negative.binomial(2), data = quine)
quine.nb0 <- update(quine.nb, . ~ Sex/(Age + Eth*Lrn))
anova(quine.nb0, quine.nb, test = "Chisq")
quine.nb <- glm.nb(Days ~ .^4, data = quine)
quine.nb2 <- stepAIC(quine.nb)
quine.nb2$anova
dropterm(quine.nb2, test = "Chisq")
quine.nb3 <-
update(quine.nb2, . ~ . - Eth:Age:Lrn - Sex:Age:Lrn)
anova(quine.nb2, quine.nb3)
c(theta = quine.nb2$theta, SE = quine.nb2$SE)
par(mfrow = c(2,2), pty = "m")
rs <- resid(quine.nb2, type = "deviance")
plot(predict(quine.nb2), rs, xlab = "Linear predictors",
ylab = "Deviance residuals")
abline(h = 0, lty = 2)
qqnorm(rs, ylab = "Deviance residuals")
qqline(rs)
par(mfrow = c(1,1))
# End of ch07
|
library(devtools)
library(shinyapps)
#this.dir <- dirname(parent.frame(2)$ofile)
#setwd(this.dir)
devtools::load_all('../../../packages/bcPcaAnalysis')
devtools::document('../../../packages/bcPcaAnalysis')
pca_universe = '/Users/Albi/Dropbox/barcoded-PCA/2015-08-30/Additional.file.6.txt'
expression_file = '/Users/Albi/Dropbox/barcoded-PCA/2015-08-30/Additional.file.14.txt'
protein_abundance_file = "/Users/Albi/Dropbox/Roth Lab/projects/bc_pca_git/data/paxdb_abundance.tsv"
output_path <- '/Users/Albi/Dropbox/Roth Lab/projects/bc_pca_git/scripts/exploration/shiny_pca_ma/www'
my_predictions <- pca_ma_prediction(pca_universe
,protein_abundance_file,
expression_file,'ethanol',
expression_condition_regexp='Ethanol.4h')
ui <- fluidPage(
sidebarPanel(
sliderInput('label_size', 'Label Size', 0,5,1.23,step = 1/100),
sliderInput('xsize', 'Point Size', 0,10,0.8,step = 0.1),
sliderInput('point_red', 'Point colour red', 0,1,0.15,step = 1/255),
sliderInput('point_green', 'Point colour green', 0,1,0.22,step = 1/255),
sliderInput('point_blue', 'Point colour blue', 0,1,0.33,step = 1/255),
sliderInput('point_transparent', 'Point colour opacity', 0,1,0.37,step = 1/255),
sliderInput('outline_transparent', 'Outline opacity', 0,1,0,step = 1/255)
),
mainPanel(
htmlOutput('file1'))
)
server <- function(input, output) {
generate_file <- reactive({
reactive_element <- input$xsize;
outfile <- paste(c(output_path,'plot1.pdf'),collapse='/')
print(outfile)
#tempfile(fileext = ".png")
CairoPDF(outfile, width=5, height=5)
pca_ma_prediction_plot(
my_predictions,output_path = '',draw = T,
point_size = input$xsize,
point_colours = rgb(
input$point_red,
input$point_green,
input$point_blue,
input$point_transparent
),
outline_colours = rgb(
0,0,0,input$outline_transparent
),
label_size = input$label_size
)
dev.off()
return('plot1.pdf')
}
)
output$file1 <- renderUI({tags$iframe(style="height:900px; width:100%; scrolling=yes",
src=generate_file())})
}
shinyApp(ui = ui, server = server)
| /scripts/exploration/shiny_pca_ma/app.R | no_license | a3cel2/bc_pca_git | R | false | false | 2,309 | r | library(devtools)
library(shinyapps)
#this.dir <- dirname(parent.frame(2)$ofile)
#setwd(this.dir)
devtools::load_all('../../../packages/bcPcaAnalysis')
devtools::document('../../../packages/bcPcaAnalysis')
pca_universe = '/Users/Albi/Dropbox/barcoded-PCA/2015-08-30/Additional.file.6.txt'
expression_file = '/Users/Albi/Dropbox/barcoded-PCA/2015-08-30/Additional.file.14.txt'
protein_abundance_file = "/Users/Albi/Dropbox/Roth Lab/projects/bc_pca_git/data/paxdb_abundance.tsv"
output_path <- '/Users/Albi/Dropbox/Roth Lab/projects/bc_pca_git/scripts/exploration/shiny_pca_ma/www'
my_predictions <- pca_ma_prediction(pca_universe
,protein_abundance_file,
expression_file,'ethanol',
expression_condition_regexp='Ethanol.4h')
ui <- fluidPage(
sidebarPanel(
sliderInput('label_size', 'Label Size', 0,5,1.23,step = 1/100),
sliderInput('xsize', 'Point Size', 0,10,0.8,step = 0.1),
sliderInput('point_red', 'Point colour red', 0,1,0.15,step = 1/255),
sliderInput('point_green', 'Point colour green', 0,1,0.22,step = 1/255),
sliderInput('point_blue', 'Point colour blue', 0,1,0.33,step = 1/255),
sliderInput('point_transparent', 'Point colour opacity', 0,1,0.37,step = 1/255),
sliderInput('outline_transparent', 'Outline opacity', 0,1,0,step = 1/255)
),
mainPanel(
htmlOutput('file1'))
)
server <- function(input, output) {
generate_file <- reactive({
reactive_element <- input$xsize;
outfile <- paste(c(output_path,'plot1.pdf'),collapse='/')
print(outfile)
#tempfile(fileext = ".png")
CairoPDF(outfile, width=5, height=5)
pca_ma_prediction_plot(
my_predictions,output_path = '',draw = T,
point_size = input$xsize,
point_colours = rgb(
input$point_red,
input$point_green,
input$point_blue,
input$point_transparent
),
outline_colours = rgb(
0,0,0,input$outline_transparent
),
label_size = input$label_size
)
dev.off()
return('plot1.pdf')
}
)
output$file1 <- renderUI({tags$iframe(style="height:900px; width:100%; scrolling=yes",
src=generate_file())})
}
shinyApp(ui = ui, server = server)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/process_sinan_malaria.R
\name{process_sinan_malaria}
\alias{process_sinan_malaria}
\title{Process SINAN Malaria variables from DataSUS}
\usage{
process_sinan_malaria(data, municipality_data = TRUE)
}
\arguments{
\item{data}{\code{data.frame} created by \code{fetch_datasus()}.}
\item{municipality_data}{optional logical. \code{TRUE} by default, creates new variables in the dataset informing the full name and other details about the municipality of residence.}
}
\description{
\code{process_sinan_malaria} processes SINAN Malaria variables retrieved by \code{fetch_datasus()}.
}
\details{
This function processes SINAN Malaria variables retrieved by \code{fetch_datasus()}, informing labels for categoric variables including NA values.
}
\examples{
\dontrun{
df <- fetch_datasus(year_start = 2016, year_end = 2016,
uf = "RJ", information_system = "SINAN-MALARIA-FINAL")
df_a <- process_sinan_malaria(df)
df_b <- process_sinan_malaria(df, municipality_data = FALSE)
}
}
| /man/process_sinan_malaria.Rd | permissive | rfsaldanha/microdatasus | R | false | true | 1,049 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/process_sinan_malaria.R
\name{process_sinan_malaria}
\alias{process_sinan_malaria}
\title{Process SINAN Malaria variables from DataSUS}
\usage{
process_sinan_malaria(data, municipality_data = TRUE)
}
\arguments{
\item{data}{\code{data.frame} created by \code{fetch_datasus()}.}
\item{municipality_data}{optional logical. \code{TRUE} by default, creates new variables in the dataset informing the full name and other details about the municipality of residence.}
}
\description{
\code{process_sinan_malaria} processes SINAN Malaria variables retrieved by \code{fetch_datasus()}.
}
\details{
This function processes SINAN Malaria variables retrieved by \code{fetch_datasus()}, informing labels for categoric variables including NA values.
}
\examples{
\dontrun{
df <- fetch_datasus(year_start = 2016, year_end = 2016,
uf = "RJ", information_system = "SINAN-MALARIA-FINAL")
df_a <- process_sinan_malaria(df)
df_b <- process_sinan_malaria(df, municipality_data = FALSE)
}
}
|
if(RUN_TESTS)
{
ct1 <- champion_tree(20, 40, 25)
ct2 <- champion_tree(20, 30, 20)
test_that("Ops works with two trees", {
expect_true(ct1 >= ct2)
expect_true(ct1 > ct2)
expect_false(ct1 == ct2)
expect_error(ct1 + ct2)
})
test_that("Ops works with one tree and a number", {
expect_true(ct1 >= 60)
expect_true(ct1 > 60)
expect_true(ct1 == 66)
expect_error(ct1 + 66)
})
}
| /tests/testthat/test-ops.R | no_license | eheinzen/championtrees | R | false | false | 414 | r |
if(RUN_TESTS)
{
ct1 <- champion_tree(20, 40, 25)
ct2 <- champion_tree(20, 30, 20)
test_that("Ops works with two trees", {
expect_true(ct1 >= ct2)
expect_true(ct1 > ct2)
expect_false(ct1 == ct2)
expect_error(ct1 + ct2)
})
test_that("Ops works with one tree and a number", {
expect_true(ct1 >= 60)
expect_true(ct1 > 60)
expect_true(ct1 == 66)
expect_error(ct1 + 66)
})
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/run.yates.algo.R
\name{run.yates.algo}
\alias{run.yates.algo}
\title{A Function To implement Yates' Algorithm to compute the Sum Squares
of (2^n) - 1 Factorial Effects in a 2^n Factorial Experiment.
The Factorial Experiment can be carried out using any one of the designs, i.e.,
CRD, RBD or LSD.}
\usage{
run.yates.algo(trt.combo, trt.total, n, r)
}
\arguments{
\item{trt.combo}{A factor type character vector taking input for the treatment
combinations in a 2^n experiment considered in the standard
order.}
\item{trt.total}{A numeric vector storing the corresponding treatment (treatment combination)
totals, for instance in a 2^2 experiment we have :- [1],[a],[b],[ab] .}
\item{n}{The number of Factors under consideration in a 2^n Factorial Experiment.}
\item{r}{The number of replicates/blocks, for a CRD : the number of replicates,
for a RBD : the number of blocks and for a LSD : the number of treatments itself.}
}
\value{
The Sum Of Squares of the 2^n - 1 Factorial Effects in a 2^n Factorial
Experiment in the Standard Order, a numeric vector.
}
\description{
The Function implements Yates' Algorithm and returns the SS of the required
number of Factorial Effects in the given 2^n Factorial Experiment.
}
\details{
For Example, in case of a 2^2 experiment, the function would return,
SS(A), SS(B) and SS(AB) by implementing the Yates' Algorithm, i.e., the SS due to the 3 required Factorial Effects,
among which two are the Main Effects and one is the First Order Interaction Effect.
Note that, while entering the trt.combo or the trt vector as shown in the example
below, you have to maintain the same pattern and order of the assigned treatments
following which you have entered the response variable values y.
}
\examples{
# The Response Variable as provided in the given design layout.
y = c(90,74,81,83,77,81,88,73,
93,78,85,80,78,80,82,70,
98,85,88,84,82,85,88,79,
98,72,87,85,99,79,87,80,
95,76,83,86,90,75,84,80,
100,82,91,86,98,81,86,85)
# Number of Replicates or Blocks, whichever applicable in the considered Factorial
# Experiment.
r = 3
# Total number of factors in the 2^n Factorial Experiment under consideration.
n = 4
# The Treatment Allocation as mentioned in a factor type character vector.
trt = as.factor(c(rep(1:8,each=1,times=3),rep(9:16,each=1,times=3)))
# The Relevant Treatment Combinations in the 2^n Factorial Experiment in the order as mentioned.
trt.combo = as.factor(c('1','a','b','ab','c','ac','bc','abc',
'd','ad','bd','abd','cd','acd','bcd','abcd'))
# The Treatment Totals using the aggregate() function.
trt.total = aggregate(y,by = list(trt),sum)$x
# Finally calling the function run.yates.algo() to get the desired SS'.
SS.factorial.effects = run.yates.algo(trt.combo,trt.total,n,r)
}
\seealso{
A Special Mention : Prof. Debjit Sengupta who helped to visualize and develop the
concept eventually making it possible for me to implement it through coding.
}
\author{
Somjit Roy
}
| /man/run.yates.algo.Rd | permissive | Roy-SR-007/YatesAlgo.FactorialExp | R | false | true | 3,050 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/run.yates.algo.R
\name{run.yates.algo}
\alias{run.yates.algo}
\title{A Function To implement Yates' Algorithm to compute the Sum Squares
of (2^n) - 1 Factorial Effects in a 2^n Factorial Experiment.
The Factorial Experiment can be carried out using any one of the designs, i.e.,
CRD, RBD or LSD.}
\usage{
run.yates.algo(trt.combo, trt.total, n, r)
}
\arguments{
\item{trt.combo}{A factor type character vector taking input for the treatment
combinations in a 2^n experiment considered in the standard
order.}
\item{trt.total}{A numeric vector storing the corresponding treatment (treatment combination)
totals, for instance in a 2^2 experiment we have :- [1],[a],[b],[ab] .}
\item{n}{The number of Factors under consideration in a 2^n Factorial Experiment.}
\item{r}{The number of replicates/blocks, for a CRD : the number of replicates,
for a RBD : the number of blocks and for a LSD : the number of treatments itself.}
}
\value{
The Sum Of Squares of the 2^n - 1 Factorial Effects in a 2^n Factorial
Experiment in the Standard Order, a numeric vector.
}
\description{
The Function implements Yates' Algorithm and returns the SS of the required
number of Factorial Effects in the given 2^n Factorial Experiment.
}
\details{
For Example, in case of a 2^2 experiment, the function would return,
SS(A), SS(B) and SS(AB) by implementing the Yates' Algorithm, i.e., the SS due to the 3 required Factorial Effects,
among which two are the Main Effects and one is the First Order Interaction Effect.
Note that, while entering the trt.combo or the trt vector as shown in the example
below, you have to maintain the same pattern and order of the assigned treatments
following which you have entered the response variable values y.
}
\examples{
# The Response Variable as provided in the given design layout.
y = c(90,74,81,83,77,81,88,73,
93,78,85,80,78,80,82,70,
98,85,88,84,82,85,88,79,
98,72,87,85,99,79,87,80,
95,76,83,86,90,75,84,80,
100,82,91,86,98,81,86,85)
# Number of Replicates or Blocks, whichever applicable in the considered Factorial
# Experiment.
r = 3
# Total number of factors in the 2^n Factorial Experiment under consideration.
n = 4
# The Treatment Allocation as mentioned in a factor type character vector.
trt = as.factor(c(rep(1:8,each=1,times=3),rep(9:16,each=1,times=3)))
# The Relevant Treatment Combinations in the 2^n Factorial Experiment in the order as mentioned.
trt.combo = as.factor(c('1','a','b','ab','c','ac','bc','abc',
'd','ad','bd','abd','cd','acd','bcd','abcd'))
# The Treatment Totals using the aggregate() function.
trt.total = aggregate(y,by = list(trt),sum)$x
# Finally calling the function run.yates.algo() to get the desired SS'.
SS.factorial.effects = run.yates.algo(trt.combo,trt.total,n,r)
}
\seealso{
A Special Mention : Prof. Debjit Sengupta who helped to visualize and develop the
concept eventually making it possible for me to implement it through coding.
}
\author{
Somjit Roy
}
|
euclideanDistance <- function(u, v)
{
sqrt(sum((u - v)^2))
}
sortObjectsByDist <- function(xl, z, metricFunction =
euclideanDistance)
{
l <- dim(xl)[1]
n <- dim(xl)[2] - 1
min <- c(1, metricFunction(xl[1, 1:n], z))
for (i in 2:l)
{
dist <- c(i,metricFunction(xl[i, 1:n], z))
if (min[2]>dist[2]){
min <- dist
}
}
return (min);
}
colors <- c("setosa" = "red", "versicolor" = "green3",
"virginica" = "blue")
plot(iris[, 3:4], pch = 21, bg = colors[iris$Species], col
= colors[iris$Species], asp = 1)
z <- c(1, 2)
xl <- iris[, 3:5]
n <- dim(xl)[2] - 1
class <- xl[sortObjectsByDist(xl,z)[1], n + 1]
points(z[1], z[2], pch = 22, bg = colors[class], asp = 1)
| /1nn.r | no_license | Ismailodabashi/SMPR | R | false | false | 724 | r | euclideanDistance <- function(u, v)
{
sqrt(sum((u - v)^2))
}
sortObjectsByDist <- function(xl, z, metricFunction =
euclideanDistance)
{
l <- dim(xl)[1]
n <- dim(xl)[2] - 1
min <- c(1, metricFunction(xl[1, 1:n], z))
for (i in 2:l)
{
dist <- c(i,metricFunction(xl[i, 1:n], z))
if (min[2]>dist[2]){
min <- dist
}
}
return (min);
}
colors <- c("setosa" = "red", "versicolor" = "green3",
"virginica" = "blue")
plot(iris[, 3:4], pch = 21, bg = colors[iris$Species], col
= colors[iris$Species], asp = 1)
z <- c(1, 2)
xl <- iris[, 3:5]
n <- dim(xl)[2] - 1
class <- xl[sortObjectsByDist(xl,z)[1], n + 1]
points(z[1], z[2], pch = 22, bg = colors[class], asp = 1)
|
setwd("/Users/jameshanley/Dropbox/Courses/statbook/")
d1 = read.table("First31.txt",as.is=TRUE)
d2 = read.table("Last3352.txt",as.is=TRUE)
d = rbind(d1,d2)
str(d)
summary(d)
head(d)
ds=read.csv("veh0126.csv",as.is=TRUE,skip=6)
head(ds[,1:10],6)
tail(ds[,1:10],15)
first = function(x) strsplit(x," ")[[1]][1]
ds$Main = unlist( lapply(ds$Make,first) )
sum(ds$Total)
fr = aggregate(ds$Total,by=list(Main=ds$Main),sum) ; str(fr)
TOP = fr$Main[fr$x>10000]
ds = ds[ds$Main %in% TOP,c(1,2,125,126)]
str(ds)
Fr = aggregate(ds$Total,by=list(Main=ds$Generic.model.1),sum) ;
Fr = Fr[order(-Fr$x),] ; str(Fr)
Fr = Fr[ Fr$x >= 1000, ] ; head(Fr)
Fr = Fr[ order(Fr$Main), ] ;
Fr$Main = gsub("VAUXHALL", "OPEL", Fr$Main)
head(Fr)
head(d)
Fr$Length = NA
for(i in 1:length(Fr$Main)){
hits = grep(Fr$Main[i],d$Model,ignore.case=TRUE)
if(length(hits) > 0 ) Fr$Length[i] = round(median(d$Length[hits]))
}
Fr = Fr[!is.na(Fr$Length),]
names(Fr)[1:2] = c("Make.Model","No.Registered")
str(Fr)
head(Fr)
tail(Fr)
sum(Fr$No.Registered)
summary(Fr)
hist(Fr$Length,breaks=25)
###########
ALREADY=TRUE
if(!ALREADY) {
library(RCurl)
url.0 = "https://www.cars-data.com/en/"
www.0 = "www.cars-data.com/en/"
TXT = getURL(url.0)
txt=strsplit(TXT,"\n")[[1]]; length(txt)
txt = txt[grep("footerbrands",txt)+1]
txt = strsplit(txt,"<a href=")[[1]]
company = txt[-1]
n = length(company)
CO=rep(NA,n)
for(i in 1:n){
CO[i]=strsplit(strsplit(company[i],">")[[1]][2],"<")[[1]][1]
}
m = matrix(NA,length(Fr$Main),length(CO))
for(i in 1:length(Fr$Main)){
for( j in 1:length(CO)){
hit = grep(CO[j],Fr$Main[i],ignore.case =TRUE)
if(length(hit)==1) m[i,j] = 1
}
}
h = apply(m,1,sum,na.rm=TRUE)
Fr = Fr[h>0,] ; str(Fr)
H = apply(m,2,sum,na.rm=TRUE)
Make.Index = (1:length(CO))[H>0]
company = company[Make.Index]
n = length(company)
url1 =rep(NA,n)
MODEL= rep(NA,100000)
LENGTH=rep(NA,100000)
ii = 0
for( i in 33:n){
print(i)
Txt = strsplit(company[i], " rel=")[[1]][1]
nk = nchar(Txt)
url1[i] = substr(Txt,2,nk-1)
TXT2 = getURL(url1[i])
txt2=strsplit(TXT2,"\n")[[1]]; length(txt2)
txt2=txt2[grep("col-4",txt2)]
txt2 = txt2[ grep(url1[i],txt2) ]
n2 = length(txt2)
print(c("n2",n2))
url2 =rep(NA,n2)
for( j in 1:n2){
#print(j)
Txt2 = strsplit(txt2[j], " title=")[[1]][1]
nk2 = nchar(Txt2)
url2[j] = substr(Txt2,38,nk2-1)
#print(url2[j])
#print(noquote(""))
TXT3 = getURL(url2[j])
txt3=strsplit(TXT3,"\n")[[1]]; length(txt3)
txt3 = txt3[grep("col-4",txt3)]
txt3 = txt3[grep("www.cars-data.com/en",txt3)]
#print(txt3)
#print(noquote(""))
n3 = length(txt3)
#print(n3)
url3 =rep(NA,n3)
for( k in ceiling( median(1:n3) ) ){
#print(k)
Txt3 = strsplit(txt3[k], " title=")[[1]][1]
Txt3 = strsplit(Txt3, "href=")[[1]][2]
nk = nchar(Txt3)
url3[k] = substr(Txt3,2,nk-1)
TXT4 = getURL(url3[k])
txt4=strsplit(TXT4,"\n")[[1]];
txt4 = txt4[grep("-specs/",txt4)]
n4 = length(txt4)
url4 =rep(NA,n4)
for( m in ceiling( median(1:n4) ) ){
Line = strsplit(txt4[m], " title=")[[1]]
model = Line[2]
model = strsplit(model,">")[[1]][2]
model = strsplit(model,"<")[[1]][1]
Txt4 = Line[1]
Txt4 = strsplit(Txt4, "href=")[[1]][2]
nk = nchar(Txt4)
url4[m] = substr(Txt4,2,nk-1)
#print(url4[m])
TXT5 = getURL(url4[m])
txt5=strsplit(TXT5,"\n")[[1]];
line.no = grep("EXTERIOR DIMENSIONS",txt5)+1
Txt = txt5[line.no]
Txt = strsplit(Txt, "length")[[1]][2]
Txt = strsplit(Txt, "</dd>")[[1]][1]
Txt = strsplit(Txt, ">")[[1]]
Txt = Txt[ length(Txt) ]
ii=ii+1
MODEL[ii] = model
LENGTH[ii] = Txt
#print(url4[m])
#print(txt)
if( (ii %% 10) == 0 ) print(ii)
} # m
} # k
#print(noquote(""))
} # j
} # i
ds = data.frame(Model=MODEL,
Length=as.numeric(substr(LENGTH,1,4)),
stringsAsFactors=FALSE)
str(ds)
ds = ds[!is.na(ds$Length),]
str(ds)
} # ALREADY
| /GetCarData.R | permissive | JamesHanley/statbook | R | false | false | 4,559 | r |
setwd("/Users/jameshanley/Dropbox/Courses/statbook/")
d1 = read.table("First31.txt",as.is=TRUE)
d2 = read.table("Last3352.txt",as.is=TRUE)
d = rbind(d1,d2)
str(d)
summary(d)
head(d)
ds=read.csv("veh0126.csv",as.is=TRUE,skip=6)
head(ds[,1:10],6)
tail(ds[,1:10],15)
first = function(x) strsplit(x," ")[[1]][1]
ds$Main = unlist( lapply(ds$Make,first) )
sum(ds$Total)
fr = aggregate(ds$Total,by=list(Main=ds$Main),sum) ; str(fr)
TOP = fr$Main[fr$x>10000]
ds = ds[ds$Main %in% TOP,c(1,2,125,126)]
str(ds)
Fr = aggregate(ds$Total,by=list(Main=ds$Generic.model.1),sum) ;
Fr = Fr[order(-Fr$x),] ; str(Fr)
Fr = Fr[ Fr$x >= 1000, ] ; head(Fr)
Fr = Fr[ order(Fr$Main), ] ;
Fr$Main = gsub("VAUXHALL", "OPEL", Fr$Main)
head(Fr)
head(d)
Fr$Length = NA
for(i in 1:length(Fr$Main)){
hits = grep(Fr$Main[i],d$Model,ignore.case=TRUE)
if(length(hits) > 0 ) Fr$Length[i] = round(median(d$Length[hits]))
}
Fr = Fr[!is.na(Fr$Length),]
names(Fr)[1:2] = c("Make.Model","No.Registered")
str(Fr)
head(Fr)
tail(Fr)
sum(Fr$No.Registered)
summary(Fr)
hist(Fr$Length,breaks=25)
###########
ALREADY=TRUE
if(!ALREADY) {
library(RCurl)
url.0 = "https://www.cars-data.com/en/"
www.0 = "www.cars-data.com/en/"
TXT = getURL(url.0)
txt=strsplit(TXT,"\n")[[1]]; length(txt)
txt = txt[grep("footerbrands",txt)+1]
txt = strsplit(txt,"<a href=")[[1]]
company = txt[-1]
n = length(company)
CO=rep(NA,n)
for(i in 1:n){
CO[i]=strsplit(strsplit(company[i],">")[[1]][2],"<")[[1]][1]
}
m = matrix(NA,length(Fr$Main),length(CO))
for(i in 1:length(Fr$Main)){
for( j in 1:length(CO)){
hit = grep(CO[j],Fr$Main[i],ignore.case =TRUE)
if(length(hit)==1) m[i,j] = 1
}
}
h = apply(m,1,sum,na.rm=TRUE)
Fr = Fr[h>0,] ; str(Fr)
H = apply(m,2,sum,na.rm=TRUE)
Make.Index = (1:length(CO))[H>0]
company = company[Make.Index]
n = length(company)
url1 =rep(NA,n)
MODEL= rep(NA,100000)
LENGTH=rep(NA,100000)
ii = 0
for( i in 33:n){
print(i)
Txt = strsplit(company[i], " rel=")[[1]][1]
nk = nchar(Txt)
url1[i] = substr(Txt,2,nk-1)
TXT2 = getURL(url1[i])
txt2=strsplit(TXT2,"\n")[[1]]; length(txt2)
txt2=txt2[grep("col-4",txt2)]
txt2 = txt2[ grep(url1[i],txt2) ]
n2 = length(txt2)
print(c("n2",n2))
url2 =rep(NA,n2)
for( j in 1:n2){
#print(j)
Txt2 = strsplit(txt2[j], " title=")[[1]][1]
nk2 = nchar(Txt2)
url2[j] = substr(Txt2,38,nk2-1)
#print(url2[j])
#print(noquote(""))
TXT3 = getURL(url2[j])
txt3=strsplit(TXT3,"\n")[[1]]; length(txt3)
txt3 = txt3[grep("col-4",txt3)]
txt3 = txt3[grep("www.cars-data.com/en",txt3)]
#print(txt3)
#print(noquote(""))
n3 = length(txt3)
#print(n3)
url3 =rep(NA,n3)
for( k in ceiling( median(1:n3) ) ){
#print(k)
Txt3 = strsplit(txt3[k], " title=")[[1]][1]
Txt3 = strsplit(Txt3, "href=")[[1]][2]
nk = nchar(Txt3)
url3[k] = substr(Txt3,2,nk-1)
TXT4 = getURL(url3[k])
txt4=strsplit(TXT4,"\n")[[1]];
txt4 = txt4[grep("-specs/",txt4)]
n4 = length(txt4)
url4 =rep(NA,n4)
for( m in ceiling( median(1:n4) ) ){
Line = strsplit(txt4[m], " title=")[[1]]
model = Line[2]
model = strsplit(model,">")[[1]][2]
model = strsplit(model,"<")[[1]][1]
Txt4 = Line[1]
Txt4 = strsplit(Txt4, "href=")[[1]][2]
nk = nchar(Txt4)
url4[m] = substr(Txt4,2,nk-1)
#print(url4[m])
TXT5 = getURL(url4[m])
txt5=strsplit(TXT5,"\n")[[1]];
line.no = grep("EXTERIOR DIMENSIONS",txt5)+1
Txt = txt5[line.no]
Txt = strsplit(Txt, "length")[[1]][2]
Txt = strsplit(Txt, "</dd>")[[1]][1]
Txt = strsplit(Txt, ">")[[1]]
Txt = Txt[ length(Txt) ]
ii=ii+1
MODEL[ii] = model
LENGTH[ii] = Txt
#print(url4[m])
#print(txt)
if( (ii %% 10) == 0 ) print(ii)
} # m
} # k
#print(noquote(""))
} # j
} # i
ds = data.frame(Model=MODEL,
Length=as.numeric(substr(LENGTH,1,4)),
stringsAsFactors=FALSE)
str(ds)
ds = ds[!is.na(ds$Length),]
str(ds)
} # ALREADY
|
data(dataADaMCDISCP01)
labelVars <- attr(dataADaMCDISCP01, "labelVars")
# example of simple adverse event table
dataAE <- dataADaMCDISCP01$ADAE
subjectsSafety <- subset(dataADaMCDISCP01$ADSL, SAFFL == "Y")$USUBJID
# compute counts of subjects presenting each AE
tableAE <- stats::aggregate(
formula = USUBJID ~ AESOC:AEDECOD,
data = dataAE,
FUN = function(usubjid) length(unique(usubjid))
)
colnames(tableAE)[colnames(tableAE) == "USUBJID"] <- "N"
# and percentages
tableAE$perc <- round(tableAE$N/length(subjectsSafety)*100, 3)
# sort records in decreasing percentage
tableAE <- tableAE[order(tableAE$perc, decreasing = TRUE), ]
# extract new variables labels
tableAELabels <- getLabelVar(
var = colnames(tableAE),
labelVars = labelVars,
label = c(N = '# subjects', perc = "% subjects")
)
# 'colnames' for DT should be specified as c('new name' = 'old name', ...)
tableAELabelsDT <- setNames(names(tableAELabels), tableAELabels)
## create table with bar
# default:
getClinDT(
data = tableAE,
barVar = "perc",
colnames = tableAELabelsDT
)
# specify range for the bar
getClinDT(
data = tableAE,
filter = "none",
barVar = "perc",
barRange = c(0, 100),
colnames = tableAELabelsDT
)
# change color according to threshold
getClinDT(
data = tableAE,
filter = "none",
barVar = "perc",
barColorThr = seq(from = 0, to = 100, by = 25),
colnames = tableAELabelsDT
)
## group per system organ class (and decreasing N):
tableAESOC <- aggregate(formula = N ~ AESOC, data = tableAE, FUN = sum)
tableAE$AESOC <- factor(tableAE$AESOC,
levels = tableAESOC[order(tableAESOC$N, decreasing = FALSE), "AESOC"]
)
tableAE <- tableAE[order(tableAE$AESOC, tableAE$perc, decreasing = TRUE), ]
getClinDT(
data = tableAE,
filter = "none",
barVar = "perc",
barRange = c(0, 100),
colnames = tableAELabelsDT,
rowGroupVar = "AESOC",
pageLength = Inf
)
# expand the subject ID column, will
# be accessible when clicking on the '+' button
# Format URL correctly with: 'escape',
# please note that indexing starts at 0!
getClinDT(
data = tableAE,
barVar = "perc",
colnames = tableAELabelsDT,
expandVar = "USUBJID",
escape = grep("USUBJID", colnames(tableAE))-1
)
# fix size for columns
getClinDT(
data = tableAE,
colnames = tableAELabelsDT,
fixedColumns = list(leftColumns = 1),
columnsWidth = c(0.1, 0.7, 0.1, 0.1),
width = "350px" # change dimension table
)
# change default buttons
getClinDT(
data = tableAE,
colnames = tableAELabelsDT,
# remove general filter
filter = "none",
# custom set of buttons
buttons = getClinDTButtons(type = c("csv", "excel", "pdf"))
)
# add button to select columns
getClinDT(
data = tableAE,
colnames = tableAELabelsDT,
# custom set of buttons
buttons = getClinDTButtons(typeExtra = "colvis")
)
# export pdf in landscape format
buttons <- getClinDTButtons(
opts = list(pdf = list(orientation = "landscape"))
)
getClinDT(
data = tableAE,
colnames = tableAELabelsDT,
# custom set of buttons
buttons = buttons
)
# hide the first column:
getClinDT(
data = tableAE,
nonVisibleVar = "AESOC"
)
# with specific caption
library(htmltools)
caption <- tags$caption(
"Number of subjects with adverse events grouped by system organ class.",
br(),
paste(
"Percentages are based on the total number of patients having",
"received a first study treatment."
)
)
getClinDT(
data = tableAE,
filter = "none",
barVar = "perc",
barRange = c(0, 100),
pageLength = Inf,
colnames = tableAELabelsDT,
rowGroupVar = "AESOC",
caption = caption
) | /package/clinUtils/inst/examples/getClinDT-example.R | no_license | Lion666/clinUtils | R | false | false | 3,501 | r | data(dataADaMCDISCP01)
labelVars <- attr(dataADaMCDISCP01, "labelVars")
# example of simple adverse event table
dataAE <- dataADaMCDISCP01$ADAE
subjectsSafety <- subset(dataADaMCDISCP01$ADSL, SAFFL == "Y")$USUBJID
# compute counts of subjects presenting each AE
tableAE <- stats::aggregate(
formula = USUBJID ~ AESOC:AEDECOD,
data = dataAE,
FUN = function(usubjid) length(unique(usubjid))
)
colnames(tableAE)[colnames(tableAE) == "USUBJID"] <- "N"
# and percentages
tableAE$perc <- round(tableAE$N/length(subjectsSafety)*100, 3)
# sort records in decreasing percentage
tableAE <- tableAE[order(tableAE$perc, decreasing = TRUE), ]
# extract new variables labels
tableAELabels <- getLabelVar(
var = colnames(tableAE),
labelVars = labelVars,
label = c(N = '# subjects', perc = "% subjects")
)
# 'colnames' for DT should be specified as c('new name' = 'old name', ...)
tableAELabelsDT <- setNames(names(tableAELabels), tableAELabels)
## create table with bar
# default:
getClinDT(
data = tableAE,
barVar = "perc",
colnames = tableAELabelsDT
)
# specify range for the bar
getClinDT(
data = tableAE,
filter = "none",
barVar = "perc",
barRange = c(0, 100),
colnames = tableAELabelsDT
)
# change color according to threshold
getClinDT(
data = tableAE,
filter = "none",
barVar = "perc",
barColorThr = seq(from = 0, to = 100, by = 25),
colnames = tableAELabelsDT
)
## group per system organ class (and decreasing N):
tableAESOC <- aggregate(formula = N ~ AESOC, data = tableAE, FUN = sum)
tableAE$AESOC <- factor(tableAE$AESOC,
levels = tableAESOC[order(tableAESOC$N, decreasing = FALSE), "AESOC"]
)
tableAE <- tableAE[order(tableAE$AESOC, tableAE$perc, decreasing = TRUE), ]
getClinDT(
data = tableAE,
filter = "none",
barVar = "perc",
barRange = c(0, 100),
colnames = tableAELabelsDT,
rowGroupVar = "AESOC",
pageLength = Inf
)
# expand the subject ID column, will
# be accessible when clicking on the '+' button
# Format URL correctly with: 'escape',
# please note that indexing starts at 0!
getClinDT(
data = tableAE,
barVar = "perc",
colnames = tableAELabelsDT,
expandVar = "USUBJID",
escape = grep("USUBJID", colnames(tableAE))-1
)
# fix size for columns
getClinDT(
data = tableAE,
colnames = tableAELabelsDT,
fixedColumns = list(leftColumns = 1),
columnsWidth = c(0.1, 0.7, 0.1, 0.1),
width = "350px" # change dimension table
)
# change default buttons
getClinDT(
data = tableAE,
colnames = tableAELabelsDT,
# remove general filter
filter = "none",
# custom set of buttons
buttons = getClinDTButtons(type = c("csv", "excel", "pdf"))
)
# add button to select columns
getClinDT(
data = tableAE,
colnames = tableAELabelsDT,
# custom set of buttons
buttons = getClinDTButtons(typeExtra = "colvis")
)
# export pdf in landscape format
buttons <- getClinDTButtons(
opts = list(pdf = list(orientation = "landscape"))
)
getClinDT(
data = tableAE,
colnames = tableAELabelsDT,
# custom set of buttons
buttons = buttons
)
# hide the first column:
getClinDT(
data = tableAE,
nonVisibleVar = "AESOC"
)
# with specific caption
library(htmltools)
caption <- tags$caption(
"Number of subjects with adverse events grouped by system organ class.",
br(),
paste(
"Percentages are based on the total number of patients having",
"received a first study treatment."
)
)
getClinDT(
data = tableAE,
filter = "none",
barVar = "perc",
barRange = c(0, 100),
pageLength = Inf,
colnames = tableAELabelsDT,
rowGroupVar = "AESOC",
caption = caption
) |
rm(list = ls())
source("lib.R")
source("fun.R")
load('data/stationary_data_ext.RData')
# investment level -----
invest_plot <-
ggplot(df %>% na.omit) +
geom_line(aes(y = investment, x = time(df %>% na.omit)))+
labs(title = "",
y = "Валовое накопление основного капитала",
x = "Дата") +
theme_bw()
cairo_pdf("plot/invest_plot.pdf", width = 10, height = 5)
print(invest_plot)
dev.off()
## all vars plot ----
all_plot <-
df %>%
na.omit %>%
as.data.frame %>%
rownames_to_column('date') %>%
melt(id.vars = 'date') %>%
group_by(variable) %>%
mutate(value = scale(value),
date = as.Date(as.yearqtr(date))) %>%
ggplot() +
geom_line(aes(date, value, group=variable,
alpha = ifelse(variable %in% c('investment'), 1, 0.3)), show.legend = F)+
labs(title = "",
y = "",
x = "") +
scale_x_date(limits = c('1996-01-01', '2019-01-01') %>% as.Date)+
geom_vline(xintercept =c('1996-01-01','2000-01-01') %>% as.Date, linetype='dashed')+
scale_y_continuous(limits = c(-5, 3))+
labs(x = 'Дата',
y='')+
theme_bw()
cairo_pdf("plot/allvars.pdf", width = 10, height = 5)
print(all_plot)
dev.off()
##### rmsfe table -----
load('shinydata.RData')
scoredf$model <- factor(scoredf$model,
levels = c("Случайное блуждание","AR",
"Adaptive LASSO",
"Elastic Net",
"LASSO",
"Post-LASSO",
"Ridge",
"Spike and Slab",
'Бустинг (eta = 0,1)',
'Бустинг (eta = 0,2)',
'Бустинг (eta = 0,3)',
'Бустинг (eta = 0,4)',
'Случайный лес (N = 100)',
'Случайный лес (N = 500)',
'Случайный лес (N = 1000)',
'Случайный лес (N = 2000)'
))
scoredf %>%
filter(type == 'test') %>%
filter(startdt == '1996-01-01') %>%
filter(lag==0, h > 0) %>%
group_by(model, lag, h, startdt) %>%
summarise(rmse = mean(rmse)) %>%
dcast(model ~ h) %>%
xtable %>%
print(include.rownames = FALSE)
scoredf %>%
filter(type == 'test') %>%
filter(startdt == '2000-01-01') %>%
filter(lag==0, h > 0) %>%
group_by(model, lag, h, startdt) %>%
summarise(rmse = mean(rmse)) %>%
dcast(model ~ h) %>%
xtable %>%
print(include.rownames = FALSE)
#### dm test ----
dmdf <- get.dm(out_true %>%
filter(lag==0, h > 0) %>%
na.omit)
dmdiff <- scoredf %>%
filter(type == 'test') %>%
group_by(model, lag, h) %>%
arrange(startdt) %>%
summarise(diff = rmse[1]-rmse[2]) %>%
inner_join(dmdf, by =c('model', 'lag', 'h'))
dmdiff %>%
filter(!model %in% c('Случайное блуждание')) %>%
mutate(diff = paste0(format(round(diff,3), nsmall = 2),
ifelse(pvalue <= 0.1,
ifelse(pvalue > 0.05, '.',
ifelse (pvalue > 0.01, "*",
ifelse(pvalue > 0.001, '**', '***'))),'')),
pvalue = paste0(' (',format(round(pvalue,3), nsmall = 2),')'),
lastrow = '') %>%
melt(id.vars = c('model', 'h'), measure.vars = c('diff', 'pvalue', 'lastrow')) %>%
mutate(model_id = model,
model = ifelse(variable == 'diff',model, ifelse(variable == 'pvalue', ' ', ''))) %>%
mutate(model = factor(model, levels = model %>% unique %>% sort %>% rev)) %>%
dcast(model_id+model~h) %>%
select(-model_id) %>%
# select(-model) %>%
# add_column('h =' = '', .after = 1) %>%
xtable %>%
print(include.rownames = FALSE)
dmsd <- dmdf %>%
filter(!model %in% c('Случайное блуждание'), h>0) %>%
mutate(Изменение = ifelse(pvalue > 0.05,
'0',
ifelse(stat < 0,
'+',
'-')
)) %>%
ggplot(aes(factor(h), factor(model, levels = rev(unique(model))))) +
geom_tile(aes(fill = Изменение),color='grey')+
theme_bw()+
labs(y = 'Модель',
x = 'Горизонт прогнозирования')+
theme(legend.position="bottom")
cairo_pdf('plot/dmsd.pdf')
print(dmsd)
dev.off()
#### dm test 2 (между разными моделями) ----
IMat <-out_true%>%
filter(h > 0) %>%
na.omit %>%
filter(date > as.Date(as.yearqtr( enddt)+h/4),
date <= as.Date(as.yearqtr( enddt)+(h+1)/4))
n_models <- IMat$model %>% unique %>% length
outmat <- expand.grid(i = 1:n_models,
j = 1:n_models,
startdt = c('1996-01-01','2000-01-01'),
h = 1L:8L) %>%
split(1:nrow(.)) %>%
map_dfr(function(x){
i <- x$i
j <- x$j
inmat <- IMat %>%
filter(h == x$h,
startdt == as.character(x$startdt)) %>%
dcast(date~ model, value.var = 'pred') %>%
select(-date) %>%
as.matrix
realized <- IMat %>%
filter(h == x$h,
startdt == as.character(x$startdt),
model == 'LASSO') %>%
na.omit %>%
pull(true)
h1 <- ifelse((inmat[,i]-realized)^2 <
(inmat[ ,j]-realized)^2, 'more', 'less')
if(i != j){
data.frame(model_column = colnames(inmat)[j],
model_row = colnames(inmat)[i],
h1 = h1,
pvalue =DM.test(inmat[, i],inmat[, j],
realized,loss.type="SE",
c=TRUE,H1=h1) %>%
.$p.value ,
h = x$h,
startdt = x$startdt)
} else{
data.frame(model_column = colnames(inmat)[j],
model_row = colnames(inmat)[i],
h1 = 'same',
pvalue =1,
h = x$h,
startdt = x$startdt)
}
}
)
h.labs <- c(#'h = 0',
"h = 1", 'h = 2', "h = 3", 'h = 4',"h = 5", 'h = 6', "h = 7", 'h = 8')
names(h.labs) <- c(#"0",
"1", '2','3', '4', '5', '6', '7', '8')
dm_96_toplot <- outmat %>%
mutate(pvalue = ifelse(is.nan(pvalue), 1, pvalue)) %>%
filter(startdt == '1996-01-01'#,
# model_column != 'Случайное блуждание',
# model_row != 'Случайное блуждание'
) %>%
mutate(
# Изменение = ifelse(pvalue > 0.05,
# ifelse(pvalue == 1,
# '0',
# '0'),
# ifelse(h1 == 'less', '-', '+')),
Изменение = ifelse(pvalue > 0.1,
'не значимо',
ifelse(pvalue > 0.05,
ifelse(h1 == 'less', '-.','+.'),
ifelse(
pvalue > 0.01,
ifelse(h1 == 'less', '-*','+*'), ifelse(h1 == 'less', '-**','+**')))
),
model_column=factor(model_column,
levels = c("AR","Adaptive LASSO","Elastic Net","LASSO","Post-LASSO","Ridge",
levels = c('Случайное блуждание',
"AR","Adaptive LASSO",
"Elastic Net","LASSO","Post-LASSO","Ridge",
"Spike and Slab",
"Бустинг (eta = 0,1)",
"Бустинг (eta = 0,2)",
"Бустинг (eta = 0,3)",
"Бустинг (eta = 0,4)",
"Случайный лес (N = 100)","Случайный лес (N = 500)"
,"Случайный лес (N = 1000)",
"Случайный лес (N = 2000)"))),
model_row=factor(model_row,
levels = c("AR","Adaptive LASSO","Elastic Net","LASSO","Post-LASSO","Ridge",
levels = c('Случайное блуждание',
"AR","Adaptive LASSO","Elastic Net","LASSO","Post-LASSO","Ridge",
"Spike and Slab",
"Бустинг (eta = 0,1)",
"Бустинг (eta = 0,2)",
"Бустинг (eta = 0,3)",
"Бустинг (eta = 0,4)",
"Случайный лес (N = 100)","Случайный лес (N = 500)"
,"Случайный лес (N = 1000)",
"Случайный лес (N = 2000)") %>% rev))) %>%
mutate(Изменение = factor(Изменение, levels = c('+.', '-.', '+*','-*', '+**', '-**', 'не значимо')))
dm_96 <- dm_96_toplot%>%
ggplot(aes(model_column, model_row)) +
geom_tile(aes(fill = Изменение),color='grey')+
theme_bw()+
labs(x = '',
y = '')+
theme(legend.position="bottom",
legend.title=element_blank(),
axis.text.y = element_text(size=8),
axis.text.x = element_text(angle = 90, size=8))+
facet_wrap(~h,
labeller = labeller(h = h.labs))+
scale_fill_manual(values = c("#add1a9",
'#db696f',
'#71d466',
'#d9454d',
"#2bd918",
'#d60f1a',
'white'))+
scale_x_discrete(labels = c('Случайное блуждание',"AR","Adaptive LASSO","Elastic Net","LASSO","Post-LASSO","Ridge",
"Spike and Slab",
"Бустинг (0.1)",
"Бустинг (0.2)",
"Бустинг (0.3)",
"Бустинг (0.4)",
"Случайный лес (100)" ,"Случайный лес (500)"
,"Случайный лес (1000)",
"Случайный лес (2000)"))+
scale_y_discrete(labels = c('Случайное блуждание',"AR","Adaptive LASSO","Elastic Net","LASSO","Post-LASSO","Ridge",
"Spike and Slab",
"Бустинг (0.1)",
"Бустинг (0.2)",
"Бустинг (0.3)",
"Бустинг (0.4)",
"Случайный лес (100)" ,
"Случайный лес (500)"
,"Случайный лес (1000)",
"Случайный лес (2000)") %>% rev)
cairo_pdf('plot/dm96.pdf')
print(dm_96)
dev.off()
dm_00_toplot <- outmat %>%
mutate(pvalue = ifelse(is.nan(pvalue), 1, pvalue)) %>%
filter(startdt == '2000-01-01'#,
# model_column != 'Случайное блуждание',
# model_row != 'Случайное блуждание'
) %>%
mutate(
# Изменение = ifelse(pvalue > 0.05,
# ifelse(pvalue == 1,
# '0',
# '0'),
# ifelse(h1 == 'less', '-', '+')),
Изменение = ifelse(pvalue > 0.1,
'не значимо',
ifelse(pvalue > 0.05,
ifelse(h1 == 'less', '-.','+.'),
ifelse(
pvalue > 0.01,
ifelse(h1 == 'less', '-*','+*'), ifelse(h1 == 'less', '-**','+**')))
),
model_column=factor(model_column,
levels = c("AR","Adaptive LASSO","Elastic Net","LASSO","Post-LASSO","Ridge",
levels = c('Случайное блуждание',
"AR","Adaptive LASSO",
"Elastic Net","LASSO","Post-LASSO","Ridge",
"Spike and Slab",
"Бустинг (eta = 0,1)",
"Бустинг (eta = 0,2)",
"Бустинг (eta = 0,3)",
"Бустинг (eta = 0,4)",
"Случайный лес (N = 100)","Случайный лес (N = 500)"
,"Случайный лес (N = 1000)",
"Случайный лес (N = 2000)"))),
model_row=factor(model_row,
levels = c("AR","Adaptive LASSO","Elastic Net","LASSO","Post-LASSO","Ridge",
levels = c('Случайное блуждание',
"AR","Adaptive LASSO","Elastic Net","LASSO","Post-LASSO","Ridge",
"Spike and Slab",
"Бустинг (eta = 0,1)",
"Бустинг (eta = 0,2)",
"Бустинг (eta = 0,3)",
"Бустинг (eta = 0,4)",
"Случайный лес (N = 100)","Случайный лес (N = 500)"
,"Случайный лес (N = 1000)",
"Случайный лес (N = 2000)") %>% rev))) %>%
mutate(Изменение = factor(Изменение, levels = c('+.', '-.', '+*','-*', '+**', '-**', 'не значимо')))
dm_00 <- dm_00_toplot%>%
ggplot(aes(model_column, model_row)) +
geom_tile(aes(fill = Изменение),color='grey')+
theme_bw()+
labs(x = '',
y = '')+
theme(legend.position="bottom",
legend.title=element_blank(),
axis.text.y = element_text(size=8),
axis.text.x = element_text(angle = 90, size=8))+
facet_wrap(~h,
labeller = labeller(h = h.labs))+
scale_fill_manual(values = c("#add1a9",
'#db696f',
'#71d466',
'#d9454d',
"#2bd918",
'#d60f1a',
'white'))+
scale_x_discrete(labels = c('Случайное блуждание',"AR","Adaptive LASSO","Elastic Net","LASSO","Post-LASSO","Ridge",
"Spike and Slab",
"Бустинг (0.1)",
"Бустинг (0.2)",
"Бустинг (0.3)",
"Бустинг (0.4)",
"Случайный лес (100)" ,"Случайный лес (500)"
,"Случайный лес (1000)",
"Случайный лес (2000)"))+
scale_y_discrete(labels = c('Случайное блуждание',"AR","Adaptive LASSO","Elastic Net","LASSO","Post-LASSO","Ridge",
"Spike and Slab",
"Бустинг (0.1)",
"Бустинг (0.2)",
"Бустинг (0.3)",
"Бустинг (0.4)",
"Случайный лес (100)" ,
"Случайный лес (500)"
,"Случайный лес (1000)",
"Случайный лес (2000)") %>% rev)
cairo_pdf('plot/dm00.pdf')
print(dm_00)
dev.off()
# lasso coefs ----
# сначала надо найти sd каждой переменной в каждой тренировочной выборке и поделить на него коэффициент
load('data/stationary_data_ext.RData')
# load('data/stationary_data_ext.RData')
sddata <- expand.grid(startdt = c(as.Date('1996-01-01'), as.Date('2000-01-01')),
enddt = seq(as.Date('2012-10-01'), as.Date('2018-10-01'), by = 'quarter')
) %>%
split(seq(1:nrow(.))) %>%
map_dfr(function(x){
df %>% na.omit %>% as.data.frame() %>%
rownames_to_column('date')%>%
mutate(date = as.yearqtr(date) %>% as.Date) %>%
filter(date >= x$startdt, date <= x$enddt) %>%
select(-date) %>%
sapply( sd) %>%
as.data.frame %>%
t %>%
as_tibble %>%
mutate(enddt = x$enddt,
startdt = x$startdt, .)
})
# вычисление предикторов lasso ----
source('fun.R', encoding = 'utf-8')
source('lib.r')
load('out/full/out_lasso.RData')
load('out/full/out_zero.RData')
lasso_beta <-
c(#out_zero[151:200],
out_lasso[-c(1:50)]
) %>%
plyr::compact()%>%
map_dfr(
function(x, i){
x$startdt = as.character(x$startdt)
x$startdt = ifelse(x$startdt == '1996-04-01', '1996-01-01', x$startdt)
x$startdt = as.Date(x$startdt)
if(x$h == 0){
actsd <- sddata %>% filter(startdt == x$startdt,
enddt == x$enddt) %>%
select(-c(investment, startdt, enddt, invest2gdp, GDPEA_Q_DIRI))
# s.d. of y
ysd <- sddata %>% filter(startdt == x$startdt,
enddt == x$enddt) %>%
pull(investment) %>%
as.numeric
} else{
actsd <- sddata %>% filter(startdt == x$startdt,
enddt == x$enddt) %>%
select(-c(startdt, enddt, gdplag, investmentlag, invest2gdplag))
ysd <- actsd[1,1] %>%
as.numeric
}
betaval = x$model_fit$beta
if(!all((betaval%>% rownames) ==(actsd %>% colnames))){
print(actsd %>% colnames)
print(betaval%>% rownames)
stop()
}
if(length(x$model) == 0|
length(x$h) == 0|
length(x$startdt) == 0|
length(x$enddt) == 0|
length(betaval%>% rownames)==0|
length((betaval%>% as.numeric)/(actsd[1,] %>% as.numeric)*(ysd)) == 0){
print(actsd[1,])
stop()
}
data.frame(model = x$model,
h = x$h,
startdt=x$startdt,
enddt = x$enddt,
predictor = betaval%>% rownames,
beta = (betaval%>% as.numeric)/(actsd[1,] %>% as.numeric)*(ysd)
)
}
)
h.labs <- c(#'h = 0',
"h = 1", 'h = 2', "h = 3", 'h = 4',"h = 5", 'h = 6', "h = 7", 'h = 8')
names(h.labs) <- c(#"0",
"1", '2','3', '4', '5', '6', '7', '8')
lasso_nonzero <- lasso_beta %>%
mutate(startdt = factor(startdt, c('2000-01-01','1996-01-01'),
labels = c('2000.I', '1996.I'))) %>%
group_by( h, startdt, enddt) %>%
summarise(nz = sum(beta != 0)) %>%
ggplot(aes(enddt, nz, linetype = startdt))+
geom_line()+
labs(title = "",
y = "Количество переменных",
x = "Дата",
color = '',
linetype = 'Левая граница\nвыборки')+
facet_wrap(~h, scales = 'free',
labeller = labeller(h = h.labs))+
theme_bw()+
theme(legend.position="bottom")
# количество переменных
cairo_pdf('plot/lasso_nonzero.pdf')
print(lasso_nonzero)
dev.off()
lasso_p <- lasso_beta %>%
group_by(predictor, h, startdt) %>%
filter(
startdt== '2000-01-01'
,
predictor %in% c(
'investment',
'mkr_1d',
'mkr_7d',
'gov_6m',
'GKO',
'invest2gdp',
'oil',
'rts',
'GDPEA_Q_DIRI',
'gdplag', 'investmentlag', 'invest2gdplag'
#,
#'RTRD_Q_DIRI',
#'EMPLDEC_Q',
#'CONI_Q_CHI', # индекс цен на строительно-монтажные работы
#'CNSTR_Q_DIRI'# индекс работ в строительств
)
) %>%
ungroup%>%
#mutate(predictor = correct.names.pred(predictor)) %>%
group_by(h, predictor) %>%
mutate(beta_mean = mean(beta)) %>%
ungroup %>%
group_by(h, startdt, enddt) %>%
arrange(desc(abs(beta_mean))) %>%
#filter(row_number()<=5) %>%
ungroup() %>%
ggplot()+
geom_line(aes(enddt, beta, color=interaction(predictor,startdt)))+
facet_wrap(vars(h))
plotly::ggplotly(lasso_p)
# lasso coefs h <=4
lasso_beta %>%
mutate(predictor = correct.names.pred(predictor)) %>%
group_by(predictor, h, startdt) %>%
filter(startdt== '2000-01-01') %>%
filter(h<=4) %>%
group_by(predictor, h) %>%
summarise(beta = mean(beta)) %>%
ungroup %>%
group_by(h) %>%
arrange(desc(abs(beta))) %>%
mutate(rn = row_number(),
pred_beta = paste0(predictor,' ', round(beta,3))) %>%
filter(rn<=5) %>%
ungroup %>%
dcast(rn~h, value.var = 'pred_beta') %>%
xtable %>%
print(include.rownames = FALSE)
# lasso coefs h >4
lasso_beta %>%
mutate(predictor = correct.names.pred(predictor)) %>%
group_by(predictor, h, startdt) %>%
filter(startdt== '2000-01-01') %>%
filter(h>4) %>%
group_by(predictor, h) %>%
summarise(beta = mean(beta)/100) %>%
ungroup %>%
group_by(h) %>%
arrange(desc(abs(beta))) %>%
mutate(rn = row_number(),
pred_beta = paste0(predictor,' ', round(beta,3))) %>%
filter(rn<=5) %>%
ungroup %>%
dcast(rn~h, value.var = 'pred_beta') %>%
xtable %>%
print(include.rownames = FALSE)
lasso_beta %>%
mutate(predictor = correct.names.pred(predictor)) %>%
group_by(predictor, h, startdt) %>%
filter(startdt== '2000-01-01') %>%
filter(h %in% c(0,1,2,7,8 )) %>%
group_by(predictor, h) %>%
summarise(beta = mean(beta)/100) %>%
ungroup %>%
group_by(h) %>%
arrange(desc(abs(beta))) %>%
mutate(rn = row_number(),
pred_beta = paste0(predictor,' ', round(beta,3))) %>%
filter(rn<=5) %>%
ungroup %>%
dcast(rn~h, value.var = 'pred_beta') %>%
xtable %>%
print(include.rownames = FALSE)
# ВВП -----
gdp <- lasso_beta %>%
group_by(predictor, h, startdt) %>%
filter(h<2,
startdt== '2000-01-01',
predictor %in% c(
'GDPEA_Q_DIRI',
'gdplag'
#,
#'RTRD_Q_DIRI',
#'EMPLDEC_Q',
#'CONI_Q_CHI', # индекс цен на строительно-монтажные работы
#'CNSTR_Q_DIRI'# индекс работ в строительств
)
) %>%
ungroup%>%
mutate(predictor = correct.names.pred(predictor)) %>%
group_by(h, predictor) %>%
mutate(beta_mean = mean(beta)/100) %>%
ungroup %>%
group_by(h, startdt, enddt) %>%
arrange(desc(abs(beta_mean))) %>%
ungroup() %>%
mutate(h = as.factor(h)) %>%
ggplot()+
geom_line(aes(enddt, beta))+
facet_grid(h~., scales = 'free',
labeller = labeller(h = h.labs))+
labs(title = "",
y = "Коэффициент",
x = "Дата") +
theme_bw()
cairo_pdf('plot/gdp.pdf')
print(gdp)
dev.off()
lasso_beta %>%
group_by(predictor, h, startdt) %>%
filter(h<3,
startdt== '2000-01-01',
predictor %in% c(
'oil'
#,
#'RTRD_Q_DIRI',
#'EMPLDEC_Q',
#'CONI_Q_CHI', # индекс цен на строительно-монтажные работы
#'CNSTR_Q_DIRI'# индекс работ в строительств
)
) %>%
ungroup%>%
mutate(predictor = correct.names.pred(predictor)) %>%
group_by(h, predictor) %>%
mutate(beta_mean = mean(beta)/100) %>%
ungroup %>%
group_by(h, startdt, enddt) %>%
arrange(desc(abs(beta_mean))) %>%
#filter(row_number()<=5) %>%
ungroup() %>%
ggplot()+
geom_line(aes(enddt, beta))+
facet_grid(h~., scales = 'free',
labeller = labeller(h = h.labs))+
labs(title = "",
y = "Коэффициент",
x = "Дата") +
theme_bw()
invest <- lasso_beta %>%
group_by(predictor, h, startdt) %>%
filter(h < 2,
startdt== '2000-01-01'
,
predictor %in% c(
'investment',
'investmentlag'
# 'mkr_1d',
# 'mkr_7d',
# 'gov_6m',
# 'GKO',
# 'invest2gdp',
# 'oil',
# 'rts',
)
) %>%
ungroup%>%
mutate(predictor = correct.names.pred(predictor)) %>%
group_by(h, predictor) %>%
mutate(beta_mean = mean(beta)/100) %>%
ungroup %>%
group_by(h, startdt, enddt) %>%
arrange(desc(abs(beta_mean))) %>%
#filter(row_number()<=5) %>%
ungroup() %>%
ggplot()+
geom_line(aes(enddt, beta))+
facet_grid(h~., scales = 'free',
labeller = labeller(h = h.labs))+
labs(title = "",
y = "Коэффициент",
x = "Дата") +
theme_bw()
cairo_pdf('plot/invest.pdf')
print(invest)
dev.off()
mkr_7d <- lasso_beta %>%
group_by(predictor, h, startdt) %>%
filter(h>0,h<4,
startdt== '2000-01-01'
,
predictor %in% c(
'mkr_7d'
# 'mkr_7d',
# 'gov_6m',
# 'GKO',
# 'invest2gdp',
# 'oil',
# 'rts',
)
) %>%
ungroup%>%
mutate(predictor = correct.names.pred(predictor)) %>%
group_by(h, predictor) %>%
mutate(beta_mean = mean(beta)/100) %>%
ungroup %>%
group_by(h, startdt, enddt) %>%
arrange(desc(abs(beta_mean))) %>%
#filter(row_number()<=5) %>%
ungroup() %>%
ggplot()+
geom_line(aes(enddt, beta))+
facet_grid(h~., scales = 'free',
labeller = labeller(h = h.labs))+
labs(title = "",
y = "Коэффициент",
x = "Дата") +
theme_bw()
cairo_pdf('plot/mkr_7d.pdf')
print(mkr_7d)
dev.off()
invest2gdp <-
lasso_beta %>%
group_by(predictor, h, startdt) %>%
filter(h>6,
startdt== '2000-01-01'
,
predictor %in% c(
'invest2gdp',
'invest2gdplag'
# 'oil'
# 'rts'
)
) %>%
ungroup%>%
mutate(predictor = correct.names.pred(predictor)) %>%
group_by(h, predictor) %>%
mutate(beta_mean = mean(beta)/100) %>%
ungroup %>%
group_by(h, startdt, enddt) %>%
arrange(desc(abs(beta_mean))) %>%
#filter(row_number()<=5) %>%
ungroup() %>%
ggplot()+
geom_line(aes(enddt, beta))+
facet_grid(h~., scales = 'free',
labeller = labeller(h = h.labs))+
labs(title = "",
y = "Коэффициент",
x = "Дата") +
theme_bw()
cairo_pdf('plot/invest2gdp.pdf')
print(invest2gdp)
dev.off()
rts <- lasso_beta %>%
group_by(predictor, h, startdt) %>%
filter(h>0, h<4,
startdt== '2000-01-01'
,
predictor %in% c(
'rts'
)
) %>%
ungroup%>%
mutate(predictor = correct.names.pred(predictor)) %>%
group_by(h, predictor) %>%
mutate(beta_mean = mean(beta)/100) %>%
ungroup %>%
group_by(h, startdt, enddt) %>%
arrange(desc(abs(beta_mean))) %>%
#filter(row_number()<=5) %>%
ungroup() %>%
ggplot()+
geom_line(aes(enddt, beta))+
facet_grid(h~., scales = 'free',
labeller = labeller(h = h.labs))+
labs(title = "",
y = "Коэффициент",
x = "Дата") +
theme_bw()
cairo_pdf('plot/rts.pdf')
print(rts)
dev.off()
# in 9
gdp <- lasso_beta %>%
group_by(predictor, h, startdt) %>%
filter(
startdt== '2000-01-01',
#' predictor %in% c(
#' 'GDPEA_Q_DIRI'
#' #,
#' #'RTRD_Q_DIRI',
#' #'EMPLDEC_Q',
#' #'CONI_Q_CHI', # индекс цен на строительно-монтажные работы
#' #'CNSTR_Q_DIRI'# индекс работ в строительств
#' )
) %>%
ungroup%>%
mutate(predictor = correct.names.pred(predictor)) %>%
group_by(h, predictor) %>%
mutate(beta_mean = mean(beta)) %>%
ungroup %>%
group_by(h, startdt, enddt) %>%
arrange(desc(abs(beta_mean))) %>%
ungroup() %>%
group_by(h, predictor, startdt) %>%
filter(mean(abs(beta))>0) %>%
ungroup %>%
mutate(h = as.factor(h)) %>%
ggplot()+
geom_line(aes(enddt, beta, group = predictor),
alpha = 0.1)+
facet_wrap(h~.,
labeller = labeller(h = h.labs))+
labs(title = "",
y = "Коэффициент",
x = "Дата") +
theme_bw()+
theme(axis.title.y=element_blank(),
axis.text.y=element_blank(),
axis.ticks.y=element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
scale_y_continuous(trans= 'asinh')+
geom_line(data = lasso_beta %>%
group_by(predictor, h, startdt) %>%
filter(
startdt== '2000-01-01',
predictor %in% c(
'GDPEA_Q_DIRI'
#,
#'RTRD_Q_DIRI',
#'EMPLDEC_Q',
#'CONI_Q_CHI', # индекс цен на строительно-монтажные работы
#'CNSTR_Q_DIRI'# индекс работ в строительств
)
) %>%
ungroup%>%
mutate(predictor = correct.names.pred(predictor)) %>%
group_by(h, predictor) %>%
mutate(beta_mean = mean(beta)) %>%
ungroup %>%
group_by(h, startdt, enddt) %>%
arrange(desc(abs(beta_mean))) %>%
ungroup() %>%
group_by(h, predictor, startdt) %>%
filter(mean(abs(beta))>0) %>%
ungroup %>%
mutate(h = as.factor(h)),
mapping = aes(enddt, beta),
size =1)
cairo_pdf('plot/gdp9.pdf')
print(gdp)
dev.off()
invest <- lasso_beta %>%
group_by(predictor, h, startdt) %>%
filter(
startdt== '2000-01-01',
#' )
) %>%
ungroup%>%
mutate(predictor = correct.names.pred(predictor)) %>%
group_by(h, predictor) %>%
mutate(beta_mean = mean(beta)) %>%
ungroup %>%
group_by(h, startdt, enddt) %>%
arrange(desc(abs(beta_mean))) %>%
ungroup() %>%
group_by(h, predictor, startdt) %>%
filter(mean(abs(beta))>0) %>%
ungroup %>%
mutate(h = as.factor(h)) %>%
ggplot()+
geom_line(aes(enddt, beta, group = predictor),
alpha = 0.1)+
facet_wrap(h~.,
labeller = labeller(h = h.labs))+
labs(title = "",
y = "Коэффициент",
x = "Дата") +
theme_bw()+
theme(axis.title.y=element_blank(),
axis.text.y=element_blank(),
axis.ticks.y=element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
scale_y_continuous(trans= 'asinh')+
geom_line(data = lasso_beta %>%
group_by(predictor, h, startdt) %>%
filter(
startdt== '2000-01-01',
predictor %in% c(
'investment'
#,
#'RTRD_Q_DIRI',
#'EMPLDEC_Q',
#'CONI_Q_CHI', # индекс цен на строительно-монтажные работы
#'CNSTR_Q_DIRI'# индекс работ в строительств
)
) %>%
ungroup%>%
mutate(predictor = correct.names.pred(predictor)) %>%
group_by(h, predictor) %>%
mutate(beta_mean = mean(beta)) %>%
ungroup %>%
group_by(h, startdt, enddt) %>%
arrange(desc(abs(beta_mean))) %>%
ungroup() %>%
group_by(h, predictor, startdt) %>%
filter(mean(abs(beta))>0) %>%
ungroup %>%
mutate(h = as.factor(h)),
mapping = aes(enddt, beta),
size =1)
cairo_pdf('plot/invest9.pdf')
print(invest)
dev.off()
invest2gdp <- lasso_beta %>%
group_by(predictor, h, startdt) %>%
filter(
startdt== '2000-01-01',
#' predictor %in% c(
#' 'GDPEA_Q_DIRI'
#' #,
#' #'RTRD_Q_DIRI',
#' #'EMPLDEC_Q',
#' #'CONI_Q_CHI', # индекс цен на строительно-монтажные работы
#' #'CNSTR_Q_DIRI'# индекс работ в строительств
#' )
) %>%
ungroup%>%
mutate(predictor = correct.names.pred(predictor)) %>%
group_by(h, predictor) %>%
mutate(beta_mean = mean(beta)) %>%
ungroup %>%
group_by(h, startdt, enddt) %>%
arrange(desc(abs(beta_mean))) %>%
ungroup() %>%
group_by(h, predictor, startdt) %>%
filter(mean(abs(beta))>0) %>%
ungroup %>%
mutate(h = as.factor(h)) %>%
ggplot()+
geom_line(aes(enddt, beta, group = predictor),
alpha = 0.1)+
facet_wrap(h~.,
labeller = labeller(h = h.labs))+
labs(title = "",
y = "Коэффициент",
x = "Дата") +
theme_bw()+
theme(axis.title.y=element_blank(),
axis.text.y=element_blank(),
axis.ticks.y=element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank()
)+
scale_y_continuous(trans= 'asinh')+
geom_line(data = lasso_beta %>%
group_by(predictor, h, startdt) %>%
filter(
startdt== '2000-01-01',
predictor %in% c(
'invest2gdp'
#,
#'RTRD_Q_DIRI',
#'EMPLDEC_Q',
#'CONI_Q_CHI', # индекс цен на строительно-монтажные работы
#'CNSTR_Q_DIRI'# индекс работ в строительств
)
) %>%
ungroup%>%
mutate(predictor = correct.names.pred(predictor)) %>%
group_by(h, predictor) %>%
mutate(beta_mean = mean(beta)) %>%
ungroup %>%
group_by(h, startdt, enddt) %>%
arrange(desc(abs(beta_mean))) %>%
ungroup() %>%
group_by(h, predictor, startdt) %>%
filter(mean(abs(beta))>0) %>%
ungroup %>%
mutate(h = as.factor(h)),
mapping = aes(enddt, beta),
size =1)
cairo_pdf('plot/invest2gdp9.pdf')
print(invest2gdp)
dev.off()
rts <- lasso_beta %>%
group_by(predictor, h, startdt) %>%
filter(
startdt== '2000-01-01',
#' predictor %in% c(
#' 'GDPEA_Q_DIRI'
#' #,
#' #'RTRD_Q_DIRI',
#' #'EMPLDEC_Q',
#' #'CONI_Q_CHI', # индекс цен на строительно-монтажные работы
#' #'CNSTR_Q_DIRI'# индекс работ в строительств
#' )
) %>%
ungroup%>%
mutate(predictor = correct.names.pred(predictor)) %>%
group_by(h, predictor) %>%
mutate(beta_mean = mean(beta)) %>%
ungroup %>%
group_by(h, startdt, enddt) %>%
arrange(desc(abs(beta_mean))) %>%
ungroup() %>%
group_by(h, predictor, startdt) %>%
filter(mean(abs(beta))>0) %>%
ungroup %>%
mutate(h = as.factor(h)) %>%
ggplot()+
geom_line(aes(enddt, beta, group = predictor),
alpha = 0.1)+
facet_wrap(h~.,
labeller = labeller(h = h.labs))+
labs(title = "",
y = "Коэффициент",
x = "Дата") +
theme_bw()+
theme(axis.title.y=element_blank(),
axis.text.y=element_blank(),
axis.ticks.y=element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
scale_y_continuous(trans= 'asinh')+
geom_line(data = lasso_beta %>%
group_by(predictor, h, startdt) %>%
filter(
startdt== '2000-01-01',
predictor %in% c(
'rts'
#,
#'RTRD_Q_DIRI',
#'EMPLDEC_Q',
#'CONI_Q_CHI', # индекс цен на строительно-монтажные работы
#'CNSTR_Q_DIRI'# индекс работ в строительств
)
) %>%
ungroup%>%
mutate(predictor = correct.names.pred(predictor)) %>%
group_by(h, predictor) %>%
mutate(beta_mean = mean(beta)) %>%
ungroup %>%
group_by(h, startdt, enddt) %>%
arrange(desc(abs(beta_mean))) %>%
ungroup() %>%
group_by(h, predictor, startdt) %>%
filter(mean(abs(beta))>0) %>%
ungroup %>%
mutate(h = as.factor(h)),
mapping = aes(enddt, beta),
size =1)
cairo_pdf('plot/rts9.pdf')
print(rts)
dev.off()
mkr <- lasso_beta %>%
group_by(predictor, h, startdt) %>%
filter(
startdt== '2000-01-01',
#' predictor %in% c(
#' 'GDPEA_Q_DIRI'
#' #,
#' #'RTRD_Q_DIRI',
#' #'EMPLDEC_Q',
#' #'CONI_Q_CHI', # индекс цен на строительно-монтажные работы
#' #'CNSTR_Q_DIRI'# индекс работ в строительств
#' )
) %>%
ungroup%>%
mutate(predictor = correct.names.pred(predictor)) %>%
group_by(h, predictor) %>%
mutate(beta_mean = mean(beta)) %>%
ungroup %>%
group_by(h, startdt, enddt) %>%
arrange(desc(abs(beta_mean))) %>%
ungroup() %>%
group_by(h, predictor, startdt) %>%
filter(mean(abs(beta))>0) %>%
ungroup %>%
mutate(h = as.factor(h)) %>%
ggplot()+
geom_line(aes(enddt, beta, group = predictor),
alpha = 0.1)+
facet_wrap(h~.,
labeller = labeller(h = h.labs))+
labs(title = "",
y = "Коэффициент",
x = "Дата") +
theme_bw()+
theme(axis.title.y=element_blank(),
axis.text.y=element_blank(),
axis.ticks.y=element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
scale_y_continuous(trans= 'asinh')+
geom_line(data = lasso_beta %>%
group_by(predictor, h, startdt) %>%
filter(
startdt== '2000-01-01',
predictor %in% c(
'mkr_7d'
#,
#'RTRD_Q_DIRI',
#'EMPLDEC_Q',
#'CONI_Q_CHI', # индекс цен на строительно-монтажные работы
#'CNSTR_Q_DIRI'# индекс работ в строительств
)
) %>%
ungroup%>%
mutate(predictor = correct.names.pred(predictor)) %>%
group_by(h, predictor) %>%
mutate(beta_mean = mean(beta)) %>%
ungroup %>%
group_by(h, startdt, enddt) %>%
arrange(desc(abs(beta_mean))) %>%
ungroup() %>%
group_by(h, predictor, startdt) %>%
filter(mean(abs(beta))>0) %>%
ungroup %>%
mutate(h = as.factor(h)),
mapping = aes(enddt, beta),
size =1)
cairo_pdf('plot/mkr9.pdf')
print(mkr)
dev.off()
library(scales)
asinh_trans <- function(){
trans_new(name = 'asinh', transform = function(x) asinh(x),
inverse = function(x) sinh(x))
}
# список рядов ----
source('fun.R', encoding = 'utf-8')
load('data/stationary_data_ext.RData')
tibble(name = df %>% names()) %>%
mutate(Название = correct.names.pred(name),
Трансформация = ifelse(name %in% c('reer','neer','oil','rts'),
'1',
ifelse(name %in% c('investment', 'CPI_Q_CHI',
'invest2gdp',
# 'deflator', только с 1996
'GDPEA_Q_DIRI',
'EMPLDEC_Q',
'UNEMPL_Q_SH',
'CONSTR_Q_NAT',
###### 'TRP_Q_PASS_DIRI',
'WAG_Q',
'CONI_Q_CHI',
'CTI_Q_CHI',
'AGR_Q_DIRI',
'RTRD_Q_DIRI',
'HHI_Q_DIRI',
'M0_Q',
'M2_Q',
#### 'IR_Q',
#### 'ICR_Q',
'CBREV_Q',
'CBEX_Q',
'FBREV_Q',
'FBEX_Q',
'RDEXRO_Q',# официальный курс доллара
'RDEXRM_Q',# курс доллара на ммвб
'LIAB_T_Q',# кредиторская задолженность в среднем за период
'LIAB_UNP_Q',# просроченная кредиторская задолженность в среднем за период
'LIAB_S_Q',# кредиторская задолженность поставщикам в среднем за период
'LIAB_B_Q',# кредиторская задолженность в бюджет в среднем за период
'DBT_T_Q',#дебиторская задолженность в среднем за период
'DBT_UNP_Q',#просроченная дебиторская задолженность в среднем за период
########## 'DBT_P_Q',# дебиторская задолженность покупателей в среднем за период
'EX_T_Q',# экспорт
'IM_T_Q',# импорт
'PPI_EA_Q' # (после 2004-01)
), '2', '0'
)),
Источник = ifelse(name %in% c('mkr_1d', 'mkr_7d'),
'Банк России',
ifelse(name %in% c('reer', 'neer',
'oil', 'rts'),
'Bloomberg',
ifelse(name == 'invest2gdp','Расчеты автора',
'Росстат'
))
)) %>% select(-name) %>%
arrange(Название) %>%
xtable %>%
print(include.rownames = FALSE)
# investment RTRD_Q_DIRI GDPEA_Q_DIRI UNEMPL_Q_SH CPI_Q_CHI
load('data/raw.RData')
# med forecast -----
out_cumulative_med <- out_cumulative
out_cumulative_med$model <- factor(out_cumulative_med$model,
levels = c("Случайное блуждание","AR",
"Adaptive LASSO",
"Elastic Net",
"LASSO",
"Post-LASSO",
"Ridge",
"Spike and Slab",
'Бустинг (eta = 0,1)',
'Бустинг (eta = 0,2)',
'Бустинг (eta = 0,3)',
'Бустинг (eta = 0,4)',
'Случайный лес (N = 100)',
'Случайный лес (N = 500)',
'Случайный лес (N = 1000)',
'Случайный лес (N = 2000)'))
out_cumulative_med$model <- plyr::mapvalues(out_cumulative_med$model, from = c("Случайное блуждание","AR",
"Adaptive LASSO",
"Elastic Net",
"LASSO",
"Post-LASSO",
"Ridge",
"Spike and Slab",
'Бустинг (eta = 0,1)',
'Бустинг (eta = 0,2)',
'Бустинг (eta = 0,3)',
'Бустинг (eta = 0,4)',
'Случайный лес (N = 100)',
'Случайный лес (N = 500)',
'Случайный лес (N = 1000)',
'Случайный лес (N = 2000)'), to = c("Случайное блуждание","AR",
"Adaptive LASSO",
"Elastic Net",
"LASSO",
"Post-LASSO",
"Ridge",
"Spike and Slab",
'Бустинг (0,1)',
'Бустинг (0,2)',
'Бустинг (0,3)',
'Бустинг (0,4)',
'Случайный лес (100)',
'Случайный лес (500)',
'Случайный лес (1000)',
'Случайный лес (2000)'))
med_forecast <- import('data/med_forecast.csv', encoding = 'UTF-8', header = TRUE) %>%
melt %>%
set_names(c('fctname', 'year', 'value')) %>%
mutate(year = as.character(year) %>% as.numeric) %>%
mutate(fctyear = substr(fctname, 1, 4) %>% as.numeric) %>%
filter(fctyear < year)
my_forecast <-
out_cumulative_med %>%
dplyr::group_by(forecastdate, model, h) %>%
filter(enddt == forecastdate) %>%
ungroup() %>%
filter(h!=0) %>%
filter(h > 1, h < 6) %>%
mutate(year = year(date),
h_year = if_else(h<=4, 1, 2)) %>%
dplyr::group_by(model,h_year, year, startdt, forecastdate) %>%
summarise(pred = sum(pred_cumulative),
true_lag = sum(true_lag),
true = sum(true_cumulative)) %>%
mutate(pred = 100*(pred/ true_lag - 1),
true = 100*(true/ true_lag - 1)) %>%
ungroup %>% select(-forecastdate)
raw_y <- rawdata$investment %>%
as.data.frame() %>%
rownames_to_column('year') %>%
mutate(year = year(as.yearqtr(year))) %>%
group_by(year) %>%
summarise(investment = sum(investment)) %>%
mutate(investment = 100*(investment/lag(investment)-1))
forec_vs <- my_forecast %>%
select(-c(true_lag, true)) %>%
filter(h_year ==1, startdt == max(startdt)) %>%
filter(!is.na(pred)) %>%
filter(!model %in% c('Случайное блуждание', 'AR',
'Бустинг (0,1)',
'Бустинг (0,3)',
'Бустинг (0,4)',
'Случайный лес (100)',
'Случайный лес (500)',
'Случайный лес (1000)'
))
plot1 <- forec_vs %>% ggplot()+
geom_bar(aes(year, pred, fill = model),
stat="identity",
# fill = 'white',
position = 'dodge',
#position = position_dodge2(width = 0.9, preserve = "single"),
color='black')+
scale_fill_discrete(name = "Модель")+
theme(legend.position="right",
legend.justification="left",
legend.margin=ggplot2::margin(0,0,0,0),
legend.box.margin=ggplot2::margin(10,10,10,10))
plot2 <- ggplot()+
geom_bar(aes(year, value, group=fctname,
fill = 'Прогноз МЭР',
alpha ='Прогноз МЭР'), med_forecast %>%
group_by(year) %>%
filter(fctyear== max(fctyear)) %>%
filter(year <2019, year > 2013 ) %>%
mutate(fctname = factor(fctname,
levels = c('2013 (консервативный)',
'2013(базовый) ',
'2014',
'2015',
'2016 (базовый +) ',
'2016 (базовый)',
'2017')))
,
stat="identity",
position = 'dodge'
)+
scale_alpha_manual(values = 0.4)+
scale_fill_manual(values = 'blue')+
guides(fill = guide_legend(" "),
alpha = guide_legend(" "))+
theme(legend.position="right",
legend.justification="left",
legend.margin=ggplot2::margin(0,0,0,0),
legend.box.margin=ggplot2::margin(10,10,10,10))
plot3 <- ggplot()+
geom_point(aes(year, investment, color = 'Наблюдаемые значения'),
data = raw_y %>% filter(year <2019,year >2013),
size = 2)+
geom_line(aes(year, investment, color = 'Наблюдаемые значения'),
data = raw_y %>% filter(year <2019,year >2013)
)+
scale_size_manual(values = 2)+
scale_color_manual(values = 'black')+
guides(size = guide_legend(" "),
color = guide_legend(" "))+
theme(legend.position="right",
legend.justification="left",
legend.margin=ggplot2::margin(0,0,0,0),
legend.box.margin=ggplot2::margin(10,10,10,10))
p <- forec_vs %>% ggplot()+
geom_bar(aes(year, pred, fill = model),
stat="identity",
# fill = 'white',
position = 'dodge',
#position = position_dodge2(width = 0.9, preserve = "single"),
color='black')+
geom_bar(aes(year, value, group=fctname,
),
fill = 'blue',
alpha =0.4,
med_forecast %>%
group_by(year) %>%
filter(fctyear== max(fctyear)) %>%
filter(year <2019, year > 2013 )%>%
mutate(fctname = factor(fctname,
levels = c('2013 (консервативный)',
'2013(базовый) ',
'2014',
'2015',
'2016 (базовый +) ',
'2016 (базовый)',
'2017')))
,
stat="identity",
position = 'dodge'
)+geom_point(aes(year, investment),
data = raw_y %>% filter(year <2019,year >2013),
color = 'black', size = 2)+
geom_line(aes(year, investment),
data = raw_y %>% filter(year <2019,year >2013),
color = 'black')+
scale_fill_discrete(guide="none")+
labs(#title = 'Инвестиции в России: прогнозы МЭР и прогнозы автора',
#subtitle = 'Горизонт прогнозирования - один год',
x = 'Дата',
y = 'Изменение валового накопления основного капитала,\n в % к прошлому году')+
theme_bw()
blank <- grid.rect(gp=gpar(col="white"))
grid.arrange(p,
arrangeGrob(g_legend(plot1),
g_legend(plot2),
g_legend(plot3),
blank,
nrow=4),
ncol=2,widths=c(7,3))
all_for <- bind_rows(med_forecast %>%
group_by(year) %>%
filter(fctyear== max(fctyear)) %>%
filter(year <2019, year > 2013) %>%
mutate(model = 'МЭР',
pred = value) %>%
filter(!fctname %in% c('2013(базовый)', '2016 (базовый)')) %>%
select(model, year, pred),
forec_vs %>% select(model, year, pred)) %>%
ungroup %>%
inner_join(raw_y, by = 'year') %>%
group_by(model) %>%
summarise(rmse = sqrt(sum((pred-investment)^2)))
all_for
#ggsave(file="plot/med_forecast.pdf", medfor)
# волосы для всех прогнозов
# вариант 2 сумма квадратов ошибок на каждую дату прогноза
# с ростом h растет и абсолютная ошибка,
# поэтому делим ошибку одной модели на среднюю ошибку для каждого h
# na.omit %>%
# filter(h<=2) %>%
# mutate(error = (pred - true)^2) %>%
# group_by(h) %>%
# mutate(error = (error-mean(error))/sd(error)) %>%
# ungroup %>%
# group_by(forecastdate, model, startdt) %>%
# summarise(sse = mean(error)) %>%
# ggplot()+
# geom_line(aes(forecastdate, sse,
# color = factor(startdt)))+
# facet_wrap(vars(model))
##### рисуем не прогноз, а текущее объяснение
out_true %>%
filter(enddt < as.Date(as.yearqtr(date)-h/4)) %>%
group_by(date, h, model, startdt) %>%
filter(enddt == max(enddt)) %>%
ungroup %>%
mutate(forecastdate = as.Date(as.yearqtr(date) -h/4)) %>%
filter(#startdt == '2000-01-01',
forecastdate <='2019-01-01',
h==0,
model != 'Random Walk') %>%
# вариант 1 просто рисуем прогнозы
ggplot()+
stat_summary(aes(x = date, y = true),
fun.y=mean, geom='line', alpha = 0.5, size = 4, color = 'grey')+
geom_line(aes(date, pred),
linetype = 'dashed')+
facet_wrap(vars(model))+
scale_y_continuous(limits = c(-0.2, 0.15))+
labs(x = 'Дата',
y = 'Квартальное изменение валового накопления\nосновного капитала')
#### ошибки во времени
out_true %>%
filter(enddt < as.Date(as.yearqtr(date)-h/4)) %>%
group_by(date, h, model, startdt) %>%
filter(enddt == max(enddt)) %>%
ungroup %>%
mutate(forecastdate = as.Date(as.yearqtr(date) -h/4)) %>%
mutate(pred = ifelse(h == 0, true, pred)) %>%
filter(#startdt == '1996-01-01',
model != 'AR',
forecastdate <='2019-01-01',
h>0,
model != 'Random Walk') %>%
# вариант 2 сумма квадратов ошибок на каждую дату прогноза
# с ростом h растет и абсолютная ошибка,
# поэтому делим ошибку одной модели на среднюю ошибку для каждого h
na.omit %>%
filter(h<=2) %>%
mutate(error = (pred - true)^2) %>%
group_by(h) %>%
mutate(error = (error-mean(error))/sd(error)) %>%
ungroup %>%
group_by(forecastdate, model, startdt) %>%
summarise(sse = mean(error)) %>%
ggplot()+
geom_line(aes(forecastdate, sse,
color = factor(startdt)))+
facet_wrap(vars(model))
### gif прогнозы
library(gapminder)
# Charge libraries:
library(gganimate)
library(gapminder)
library(gganimate)
library(gifski)
library(png)
out_hair <- out_true %>%
filter(enddt < as.Date(as.yearqtr(date)-h/4)) %>%
group_by(date, h, model, startdt) %>%
filter(enddt == max(enddt)) %>%
ungroup %>%
mutate(forecastdate = as.Date(as.yearqtr(date) -h/4)) %>%
mutate(pred = ifelse(h == 0, true, pred)) %>%
mutate(giftime =as.numeric(forecastdate)+0.2*((date -forecastdate) %>% as.numeric())) %>%
filter(forecastdate <='2019-01-01') %>%
mutate(true = ifelse(date <= forecastdate, true, NA))
fordata <- out_hair %>%
filter(startdt ==
'2000-01-01',
h<5,
! model %in%
c('Случайное блуждание',
'Бустинг (eta = 0,1)','Бустинг (eta = 0,3)',
'Бустинг (eta = 0,4)',
'Случайный лес (N = 100)',
'Случайный лес (N = 500)',
'Случайный лес (N = 1000)'))
fordata$model <- factor(fordata$model,
levels = c("Случайное блуждание","AR",
"Adaptive LASSO",
"Elastic Net",
"LASSO",
"Post-LASSO",
"Ridge",
"Spike and Slab",
# 'Бустинг (eta = 0,1)',
'Бустинг (eta = 0,2)',
# 'Бустинг (eta = 0,3)',
# 'Бустинг (eta = 0,4)',
# 'Случайный лес (N = 100)',
# 'Случайный лес (N = 500)',
# 'Случайный лес (N = 1000)',
'Случайный лес (N = 2000)'))
fordata$model <- plyr::mapvalues(fordata$model, from = c("Случайное блуждание","AR",
"Adaptive LASSO",
"Elastic Net",
"LASSO",
"Post-LASSO",
"Ridge",
"Spike and Slab",
# 'Бустинг (eta = 0,1)',
'Бустинг (eta = 0,2)',
# 'Бустинг (eta = 0,3)',
# 'Бустинг (eta = 0,4)',
#
# 'Случайный лес (N = 100)',
# 'Случайный лес (N = 500)',
# 'Случайный лес (N = 1000)',
'Случайный лес (N = 2000)'),
to = c("Случайное блуждание","AR",
"Adaptive LASSO",
"Elastic Net",
"LASSO",
"Post-LASSO",
"Ridge",
"Spike and Slab",
# 'Бустинг (0,1)',
'Бустинг (0,2)',
# 'Бустинг (0,3)',
# 'Бустинг (0,4)',
#
# 'Случайный лес (100)',
# 'Случайный лес (500)',
# 'Случайный лес (1000)',
'Случайный лес (2000)'))
for(modeli in (fordata$model %>% unique)){
myplot <- ggplot(fordata %>%
filter(model == modeli) %>%
mutate(true_na = ifelse(date <= forecastdate, true, NA)))+
geom_path(data = fordata %>%
filter(model == modeli) %>%
mutate(true_na = ifelse(date <= forecastdate, true, NA)) %>% na.omit,
aes(date, true_na), alpha = 0.5, size = 2, color = 'grey')+
geom_line(aes(date, pred, color = forecastdate,
group = interaction(startdt,
forecastdate)),
#size = 1,
show.legend = FALSE,
linetype = 'dashed'
)+
#facet_wrap(vars(model))+
scale_y_continuous(limits = c(-0.2, 0.3))+
labs(x = 'Дата',
y = 'Квартальное изменение валового накопления\nосновного капитала (разность логарифмов)')+
transition_reveal(giftime) +
ease_aes('linear')+
theme_minimal()
animate(myplot, duration = 10, fps = 20, width = 1000, height = 1000, renderer = gifski_renderer())
anim_save(paste0("plot/gif/",modeli,".gif"))
}
myplot <- ggplot(fordata %>%
mutate(true_na = ifelse(date <= forecastdate, true, NA)))+
geom_path(data = fordata %>%
mutate(true_na = ifelse(date <= forecastdate, true, NA)) %>% na.omit,
aes(date, true_na), alpha = 0.5, size = 2, color = 'grey')+
geom_line(aes(date, pred, color = forecastdate,
group = interaction(startdt,
forecastdate)),
#size = 1,
show.legend = FALSE,
linetype = 'dashed'
)+
facet_wrap(vars(model))+
scale_y_continuous(limits = c(-0.2, 0.3))+
labs(x = 'Дата',
y = 'Квартальное изменение валового накопления\nосновного капитала (разность логарифмов)')+
transition_reveal(giftime) +
ease_aes('linear')+
theme_minimal()
animate(myplot, duration = 10, fps = 5, width = 200, height = 200, renderer = gifski_renderer())
anim_save(paste0("plot/gif/",modeli,".gif"))
# static hair plot ----
hair <- ggplot(fordata%>%
mutate(true_na = ifelse(date <= forecastdate, true, NA)) %>%
filter())+
geom_path(data = fordata %>%
mutate(true_na = ifelse(date <= forecastdate, true, NA)) %>% na.omit,
aes(date, true_na,
alpha = 'Наблюдаемые\nзначения',
color = 'Наблюдаемые\nзначения',
size = 'Наблюдаемые\nзначения',
linetype = 'Наблюдаемые\nзначения'))+
geom_line(aes(date, pred,
group = interaction(startdt,
forecastdate),
alpha = 'Прогноз',
color = 'Прогноз',
size = 'Прогноз',
linetype = 'Прогноз')
)+
facet_wrap(vars(model))+
scale_y_continuous(limits = c(-0.2, 0.15))+
labs(x = 'Дата',
y = 'Квартальное изменение валового
накопления\nосновного капитала (разность логарифмов)')+
theme_bw()+
# scale_alpha_manual(values = c(0.5, 1))+
# scale_size_manual(values = c(2,0.7))+
# scale_color_manual(values = c('grey', 'black'))+
scale_colour_manual(name="",
values=c('grey', 'black'),
guide = guide_legend(override.aes=list(linetype=c(1,2),
alpha = c(0.5, 1),
size = c(2, 0.6)))) +
scale_size_manual(name="Size",values=c(2,0.6), guide="none") +
scale_alpha_manual(name="Alpha",values=c(0.5,1), guide="none") +
scale_linetype_manual(name="Type",values=c(1,2), guide="none") +
theme(legend.position="bottom")
cairo_pdf("plot/hair.pdf")
print(hair)
dev.off()
library(gapminder)
p <- ggplot(
gapminder,
aes(x = gdpPercap, y=lifeExp, size = pop, colour = country)
) +
geom_point(show.legend = FALSE, alpha = 0.7) +
scale_color_viridis_d() +
scale_size(range = c(2, 12)) +
scale_x_log10() +
labs(x = "GDP per capita", y = "Life expectancy")
p <- p + transition_time(year) +
labs(title = "Year: {frame_time}")
cairo_pdf(p)
print(p)
dev.off()
#####
p <- fordata %>%
accumulate_by(~giftime) %>%
ggplot()+
geom_line(aes(x = date,
y = true,
frame = frame),
color='grey',
size = 2,
linetype = 'solid',
alpha = 0.5)
ggplotly(p) %>%
animation_opts(
frame = 100,
transition = 0,
redraw = FALSE
)
### штрафная функция для методов регуляризации ----
load('out/full/out_adalasso.RData')
load('out/full/out_elnet.RData')
load('out/full/out_lasso.RData')
load('out/full/out_postlasso.RData')
load('out/full/out_ridge.RData')
load('out/full/out_zero.RData')
regular_norm <-
c(out_zero[c(1:50, 101:250, 301:350)],
out_adalasso[-c(1:50)],
out_elnet[-c(1:50)],
out_lasso[-c(1:50)],
out_postlasso[-c(1:50)],
out_ridge[-c(1:50)]
) %>%
plyr::compact()%>%
map_dfr(function(x){
if(x$model=='postlasso'){
norm <- x$model_fit$coefficients %>% abs %>% sum
} else if(x$model %in% c('lasso', 'adalasso')){
norm <- x$model_fit$beta %>% abs %>% sum
} else if(x$model == 'ridge'){
norm <- x$model_fit$beta^2 %>% sum
} else if(x$model == 'elnet'){
norm <- 0.5*(x$model_fit$beta^2 %>% sum +
x$model_fit$beta %>% abs %>% sum)
}
data.frame(
model = x$model,
startdt =x$startdt,
enddt =x$enddt,
h = x$h,
norm = norm
)
})
regular_norm %>%
ggplot()+
geom_line(aes(enddt, norm, color = factor(startdt)))+
facet_grid(model~h, scales = 'free_y')
# вывод не оч
| /plotting.R | no_license | vishalbelsare/investment_forecasting | R | false | false | 71,263 | r | rm(list = ls())
source("lib.R")
source("fun.R")
load('data/stationary_data_ext.RData')
# investment level -----
invest_plot <-
ggplot(df %>% na.omit) +
geom_line(aes(y = investment, x = time(df %>% na.omit)))+
labs(title = "",
y = "Валовое накопление основного капитала",
x = "Дата") +
theme_bw()
cairo_pdf("plot/invest_plot.pdf", width = 10, height = 5)
print(invest_plot)
dev.off()
## all vars plot ----
all_plot <-
df %>%
na.omit %>%
as.data.frame %>%
rownames_to_column('date') %>%
melt(id.vars = 'date') %>%
group_by(variable) %>%
mutate(value = scale(value),
date = as.Date(as.yearqtr(date))) %>%
ggplot() +
geom_line(aes(date, value, group=variable,
alpha = ifelse(variable %in% c('investment'), 1, 0.3)), show.legend = F)+
labs(title = "",
y = "",
x = "") +
scale_x_date(limits = c('1996-01-01', '2019-01-01') %>% as.Date)+
geom_vline(xintercept =c('1996-01-01','2000-01-01') %>% as.Date, linetype='dashed')+
scale_y_continuous(limits = c(-5, 3))+
labs(x = 'Дата',
y='')+
theme_bw()
cairo_pdf("plot/allvars.pdf", width = 10, height = 5)
print(all_plot)
dev.off()
##### rmsfe table -----
load('shinydata.RData')
scoredf$model <- factor(scoredf$model,
levels = c("Случайное блуждание","AR",
"Adaptive LASSO",
"Elastic Net",
"LASSO",
"Post-LASSO",
"Ridge",
"Spike and Slab",
'Бустинг (eta = 0,1)',
'Бустинг (eta = 0,2)',
'Бустинг (eta = 0,3)',
'Бустинг (eta = 0,4)',
'Случайный лес (N = 100)',
'Случайный лес (N = 500)',
'Случайный лес (N = 1000)',
'Случайный лес (N = 2000)'
))
scoredf %>%
filter(type == 'test') %>%
filter(startdt == '1996-01-01') %>%
filter(lag==0, h > 0) %>%
group_by(model, lag, h, startdt) %>%
summarise(rmse = mean(rmse)) %>%
dcast(model ~ h) %>%
xtable %>%
print(include.rownames = FALSE)
scoredf %>%
filter(type == 'test') %>%
filter(startdt == '2000-01-01') %>%
filter(lag==0, h > 0) %>%
group_by(model, lag, h, startdt) %>%
summarise(rmse = mean(rmse)) %>%
dcast(model ~ h) %>%
xtable %>%
print(include.rownames = FALSE)
#### dm test ----
dmdf <- get.dm(out_true %>%
filter(lag==0, h > 0) %>%
na.omit)
dmdiff <- scoredf %>%
filter(type == 'test') %>%
group_by(model, lag, h) %>%
arrange(startdt) %>%
summarise(diff = rmse[1]-rmse[2]) %>%
inner_join(dmdf, by =c('model', 'lag', 'h'))
dmdiff %>%
filter(!model %in% c('Случайное блуждание')) %>%
mutate(diff = paste0(format(round(diff,3), nsmall = 2),
ifelse(pvalue <= 0.1,
ifelse(pvalue > 0.05, '.',
ifelse (pvalue > 0.01, "*",
ifelse(pvalue > 0.001, '**', '***'))),'')),
pvalue = paste0(' (',format(round(pvalue,3), nsmall = 2),')'),
lastrow = '') %>%
melt(id.vars = c('model', 'h'), measure.vars = c('diff', 'pvalue', 'lastrow')) %>%
mutate(model_id = model,
model = ifelse(variable == 'diff',model, ifelse(variable == 'pvalue', ' ', ''))) %>%
mutate(model = factor(model, levels = model %>% unique %>% sort %>% rev)) %>%
dcast(model_id+model~h) %>%
select(-model_id) %>%
# select(-model) %>%
# add_column('h =' = '', .after = 1) %>%
xtable %>%
print(include.rownames = FALSE)
dmsd <- dmdf %>%
filter(!model %in% c('Случайное блуждание'), h>0) %>%
mutate(Изменение = ifelse(pvalue > 0.05,
'0',
ifelse(stat < 0,
'+',
'-')
)) %>%
ggplot(aes(factor(h), factor(model, levels = rev(unique(model))))) +
geom_tile(aes(fill = Изменение),color='grey')+
theme_bw()+
labs(y = 'Модель',
x = 'Горизонт прогнозирования')+
theme(legend.position="bottom")
cairo_pdf('plot/dmsd.pdf')
print(dmsd)
dev.off()
#### dm test 2 (между разными моделями) ----
IMat <-out_true%>%
filter(h > 0) %>%
na.omit %>%
filter(date > as.Date(as.yearqtr( enddt)+h/4),
date <= as.Date(as.yearqtr( enddt)+(h+1)/4))
n_models <- IMat$model %>% unique %>% length
outmat <- expand.grid(i = 1:n_models,
j = 1:n_models,
startdt = c('1996-01-01','2000-01-01'),
h = 1L:8L) %>%
split(1:nrow(.)) %>%
map_dfr(function(x){
i <- x$i
j <- x$j
inmat <- IMat %>%
filter(h == x$h,
startdt == as.character(x$startdt)) %>%
dcast(date~ model, value.var = 'pred') %>%
select(-date) %>%
as.matrix
realized <- IMat %>%
filter(h == x$h,
startdt == as.character(x$startdt),
model == 'LASSO') %>%
na.omit %>%
pull(true)
h1 <- ifelse((inmat[,i]-realized)^2 <
(inmat[ ,j]-realized)^2, 'more', 'less')
if(i != j){
data.frame(model_column = colnames(inmat)[j],
model_row = colnames(inmat)[i],
h1 = h1,
pvalue =DM.test(inmat[, i],inmat[, j],
realized,loss.type="SE",
c=TRUE,H1=h1) %>%
.$p.value ,
h = x$h,
startdt = x$startdt)
} else{
data.frame(model_column = colnames(inmat)[j],
model_row = colnames(inmat)[i],
h1 = 'same',
pvalue =1,
h = x$h,
startdt = x$startdt)
}
}
)
h.labs <- c(#'h = 0',
"h = 1", 'h = 2', "h = 3", 'h = 4',"h = 5", 'h = 6', "h = 7", 'h = 8')
names(h.labs) <- c(#"0",
"1", '2','3', '4', '5', '6', '7', '8')
dm_96_toplot <- outmat %>%
mutate(pvalue = ifelse(is.nan(pvalue), 1, pvalue)) %>%
filter(startdt == '1996-01-01'#,
# model_column != 'Случайное блуждание',
# model_row != 'Случайное блуждание'
) %>%
mutate(
# Изменение = ifelse(pvalue > 0.05,
# ifelse(pvalue == 1,
# '0',
# '0'),
# ifelse(h1 == 'less', '-', '+')),
Изменение = ifelse(pvalue > 0.1,
'не значимо',
ifelse(pvalue > 0.05,
ifelse(h1 == 'less', '-.','+.'),
ifelse(
pvalue > 0.01,
ifelse(h1 == 'less', '-*','+*'), ifelse(h1 == 'less', '-**','+**')))
),
model_column=factor(model_column,
levels = c("AR","Adaptive LASSO","Elastic Net","LASSO","Post-LASSO","Ridge",
levels = c('Случайное блуждание',
"AR","Adaptive LASSO",
"Elastic Net","LASSO","Post-LASSO","Ridge",
"Spike and Slab",
"Бустинг (eta = 0,1)",
"Бустинг (eta = 0,2)",
"Бустинг (eta = 0,3)",
"Бустинг (eta = 0,4)",
"Случайный лес (N = 100)","Случайный лес (N = 500)"
,"Случайный лес (N = 1000)",
"Случайный лес (N = 2000)"))),
model_row=factor(model_row,
levels = c("AR","Adaptive LASSO","Elastic Net","LASSO","Post-LASSO","Ridge",
levels = c('Случайное блуждание',
"AR","Adaptive LASSO","Elastic Net","LASSO","Post-LASSO","Ridge",
"Spike and Slab",
"Бустинг (eta = 0,1)",
"Бустинг (eta = 0,2)",
"Бустинг (eta = 0,3)",
"Бустинг (eta = 0,4)",
"Случайный лес (N = 100)","Случайный лес (N = 500)"
,"Случайный лес (N = 1000)",
"Случайный лес (N = 2000)") %>% rev))) %>%
mutate(Изменение = factor(Изменение, levels = c('+.', '-.', '+*','-*', '+**', '-**', 'не значимо')))
dm_96 <- dm_96_toplot%>%
ggplot(aes(model_column, model_row)) +
geom_tile(aes(fill = Изменение),color='grey')+
theme_bw()+
labs(x = '',
y = '')+
theme(legend.position="bottom",
legend.title=element_blank(),
axis.text.y = element_text(size=8),
axis.text.x = element_text(angle = 90, size=8))+
facet_wrap(~h,
labeller = labeller(h = h.labs))+
scale_fill_manual(values = c("#add1a9",
'#db696f',
'#71d466',
'#d9454d',
"#2bd918",
'#d60f1a',
'white'))+
scale_x_discrete(labels = c('Случайное блуждание',"AR","Adaptive LASSO","Elastic Net","LASSO","Post-LASSO","Ridge",
"Spike and Slab",
"Бустинг (0.1)",
"Бустинг (0.2)",
"Бустинг (0.3)",
"Бустинг (0.4)",
"Случайный лес (100)" ,"Случайный лес (500)"
,"Случайный лес (1000)",
"Случайный лес (2000)"))+
scale_y_discrete(labels = c('Случайное блуждание',"AR","Adaptive LASSO","Elastic Net","LASSO","Post-LASSO","Ridge",
"Spike and Slab",
"Бустинг (0.1)",
"Бустинг (0.2)",
"Бустинг (0.3)",
"Бустинг (0.4)",
"Случайный лес (100)" ,
"Случайный лес (500)"
,"Случайный лес (1000)",
"Случайный лес (2000)") %>% rev)
cairo_pdf('plot/dm96.pdf')
print(dm_96)
dev.off()
dm_00_toplot <- outmat %>%
mutate(pvalue = ifelse(is.nan(pvalue), 1, pvalue)) %>%
filter(startdt == '2000-01-01'#,
# model_column != 'Случайное блуждание',
# model_row != 'Случайное блуждание'
) %>%
mutate(
# Изменение = ifelse(pvalue > 0.05,
# ifelse(pvalue == 1,
# '0',
# '0'),
# ifelse(h1 == 'less', '-', '+')),
Изменение = ifelse(pvalue > 0.1,
'не значимо',
ifelse(pvalue > 0.05,
ifelse(h1 == 'less', '-.','+.'),
ifelse(
pvalue > 0.01,
ifelse(h1 == 'less', '-*','+*'), ifelse(h1 == 'less', '-**','+**')))
),
model_column=factor(model_column,
levels = c("AR","Adaptive LASSO","Elastic Net","LASSO","Post-LASSO","Ridge",
levels = c('Случайное блуждание',
"AR","Adaptive LASSO",
"Elastic Net","LASSO","Post-LASSO","Ridge",
"Spike and Slab",
"Бустинг (eta = 0,1)",
"Бустинг (eta = 0,2)",
"Бустинг (eta = 0,3)",
"Бустинг (eta = 0,4)",
"Случайный лес (N = 100)","Случайный лес (N = 500)"
,"Случайный лес (N = 1000)",
"Случайный лес (N = 2000)"))),
model_row=factor(model_row,
levels = c("AR","Adaptive LASSO","Elastic Net","LASSO","Post-LASSO","Ridge",
levels = c('Случайное блуждание',
"AR","Adaptive LASSO","Elastic Net","LASSO","Post-LASSO","Ridge",
"Spike and Slab",
"Бустинг (eta = 0,1)",
"Бустинг (eta = 0,2)",
"Бустинг (eta = 0,3)",
"Бустинг (eta = 0,4)",
"Случайный лес (N = 100)","Случайный лес (N = 500)"
,"Случайный лес (N = 1000)",
"Случайный лес (N = 2000)") %>% rev))) %>%
mutate(Изменение = factor(Изменение, levels = c('+.', '-.', '+*','-*', '+**', '-**', 'не значимо')))
dm_00 <- dm_00_toplot%>%
ggplot(aes(model_column, model_row)) +
geom_tile(aes(fill = Изменение),color='grey')+
theme_bw()+
labs(x = '',
y = '')+
theme(legend.position="bottom",
legend.title=element_blank(),
axis.text.y = element_text(size=8),
axis.text.x = element_text(angle = 90, size=8))+
facet_wrap(~h,
labeller = labeller(h = h.labs))+
scale_fill_manual(values = c("#add1a9",
'#db696f',
'#71d466',
'#d9454d',
"#2bd918",
'#d60f1a',
'white'))+
scale_x_discrete(labels = c('Случайное блуждание',"AR","Adaptive LASSO","Elastic Net","LASSO","Post-LASSO","Ridge",
"Spike and Slab",
"Бустинг (0.1)",
"Бустинг (0.2)",
"Бустинг (0.3)",
"Бустинг (0.4)",
"Случайный лес (100)" ,"Случайный лес (500)"
,"Случайный лес (1000)",
"Случайный лес (2000)"))+
scale_y_discrete(labels = c('Случайное блуждание',"AR","Adaptive LASSO","Elastic Net","LASSO","Post-LASSO","Ridge",
"Spike and Slab",
"Бустинг (0.1)",
"Бустинг (0.2)",
"Бустинг (0.3)",
"Бустинг (0.4)",
"Случайный лес (100)" ,
"Случайный лес (500)"
,"Случайный лес (1000)",
"Случайный лес (2000)") %>% rev)
cairo_pdf('plot/dm00.pdf')
print(dm_00)
dev.off()
# lasso coefs ----
# сначала надо найти sd каждой переменной в каждой тренировочной выборке и поделить на него коэффициент
load('data/stationary_data_ext.RData')
# load('data/stationary_data_ext.RData')
sddata <- expand.grid(startdt = c(as.Date('1996-01-01'), as.Date('2000-01-01')),
enddt = seq(as.Date('2012-10-01'), as.Date('2018-10-01'), by = 'quarter')
) %>%
split(seq(1:nrow(.))) %>%
map_dfr(function(x){
df %>% na.omit %>% as.data.frame() %>%
rownames_to_column('date')%>%
mutate(date = as.yearqtr(date) %>% as.Date) %>%
filter(date >= x$startdt, date <= x$enddt) %>%
select(-date) %>%
sapply( sd) %>%
as.data.frame %>%
t %>%
as_tibble %>%
mutate(enddt = x$enddt,
startdt = x$startdt, .)
})
# вычисление предикторов lasso ----
source('fun.R', encoding = 'utf-8')
source('lib.r')
load('out/full/out_lasso.RData')
load('out/full/out_zero.RData')
lasso_beta <-
c(#out_zero[151:200],
out_lasso[-c(1:50)]
) %>%
plyr::compact()%>%
map_dfr(
function(x, i){
x$startdt = as.character(x$startdt)
x$startdt = ifelse(x$startdt == '1996-04-01', '1996-01-01', x$startdt)
x$startdt = as.Date(x$startdt)
if(x$h == 0){
actsd <- sddata %>% filter(startdt == x$startdt,
enddt == x$enddt) %>%
select(-c(investment, startdt, enddt, invest2gdp, GDPEA_Q_DIRI))
# s.d. of y
ysd <- sddata %>% filter(startdt == x$startdt,
enddt == x$enddt) %>%
pull(investment) %>%
as.numeric
} else{
actsd <- sddata %>% filter(startdt == x$startdt,
enddt == x$enddt) %>%
select(-c(startdt, enddt, gdplag, investmentlag, invest2gdplag))
ysd <- actsd[1,1] %>%
as.numeric
}
betaval = x$model_fit$beta
if(!all((betaval%>% rownames) ==(actsd %>% colnames))){
print(actsd %>% colnames)
print(betaval%>% rownames)
stop()
}
if(length(x$model) == 0|
length(x$h) == 0|
length(x$startdt) == 0|
length(x$enddt) == 0|
length(betaval%>% rownames)==0|
length((betaval%>% as.numeric)/(actsd[1,] %>% as.numeric)*(ysd)) == 0){
print(actsd[1,])
stop()
}
data.frame(model = x$model,
h = x$h,
startdt=x$startdt,
enddt = x$enddt,
predictor = betaval%>% rownames,
beta = (betaval%>% as.numeric)/(actsd[1,] %>% as.numeric)*(ysd)
)
}
)
h.labs <- c(#'h = 0',
"h = 1", 'h = 2', "h = 3", 'h = 4',"h = 5", 'h = 6', "h = 7", 'h = 8')
names(h.labs) <- c(#"0",
"1", '2','3', '4', '5', '6', '7', '8')
lasso_nonzero <- lasso_beta %>%
mutate(startdt = factor(startdt, c('2000-01-01','1996-01-01'),
labels = c('2000.I', '1996.I'))) %>%
group_by( h, startdt, enddt) %>%
summarise(nz = sum(beta != 0)) %>%
ggplot(aes(enddt, nz, linetype = startdt))+
geom_line()+
labs(title = "",
y = "Количество переменных",
x = "Дата",
color = '',
linetype = 'Левая граница\nвыборки')+
facet_wrap(~h, scales = 'free',
labeller = labeller(h = h.labs))+
theme_bw()+
theme(legend.position="bottom")
# количество переменных
cairo_pdf('plot/lasso_nonzero.pdf')
print(lasso_nonzero)
dev.off()
lasso_p <- lasso_beta %>%
group_by(predictor, h, startdt) %>%
filter(
startdt== '2000-01-01'
,
predictor %in% c(
'investment',
'mkr_1d',
'mkr_7d',
'gov_6m',
'GKO',
'invest2gdp',
'oil',
'rts',
'GDPEA_Q_DIRI',
'gdplag', 'investmentlag', 'invest2gdplag'
#,
#'RTRD_Q_DIRI',
#'EMPLDEC_Q',
#'CONI_Q_CHI', # индекс цен на строительно-монтажные работы
#'CNSTR_Q_DIRI'# индекс работ в строительств
)
) %>%
ungroup%>%
#mutate(predictor = correct.names.pred(predictor)) %>%
group_by(h, predictor) %>%
mutate(beta_mean = mean(beta)) %>%
ungroup %>%
group_by(h, startdt, enddt) %>%
arrange(desc(abs(beta_mean))) %>%
#filter(row_number()<=5) %>%
ungroup() %>%
ggplot()+
geom_line(aes(enddt, beta, color=interaction(predictor,startdt)))+
facet_wrap(vars(h))
plotly::ggplotly(lasso_p)
# lasso coefs h <=4
lasso_beta %>%
mutate(predictor = correct.names.pred(predictor)) %>%
group_by(predictor, h, startdt) %>%
filter(startdt== '2000-01-01') %>%
filter(h<=4) %>%
group_by(predictor, h) %>%
summarise(beta = mean(beta)) %>%
ungroup %>%
group_by(h) %>%
arrange(desc(abs(beta))) %>%
mutate(rn = row_number(),
pred_beta = paste0(predictor,' ', round(beta,3))) %>%
filter(rn<=5) %>%
ungroup %>%
dcast(rn~h, value.var = 'pred_beta') %>%
xtable %>%
print(include.rownames = FALSE)
# lasso coefs h >4
lasso_beta %>%
mutate(predictor = correct.names.pred(predictor)) %>%
group_by(predictor, h, startdt) %>%
filter(startdt== '2000-01-01') %>%
filter(h>4) %>%
group_by(predictor, h) %>%
summarise(beta = mean(beta)/100) %>%
ungroup %>%
group_by(h) %>%
arrange(desc(abs(beta))) %>%
mutate(rn = row_number(),
pred_beta = paste0(predictor,' ', round(beta,3))) %>%
filter(rn<=5) %>%
ungroup %>%
dcast(rn~h, value.var = 'pred_beta') %>%
xtable %>%
print(include.rownames = FALSE)
lasso_beta %>%
mutate(predictor = correct.names.pred(predictor)) %>%
group_by(predictor, h, startdt) %>%
filter(startdt== '2000-01-01') %>%
filter(h %in% c(0,1,2,7,8 )) %>%
group_by(predictor, h) %>%
summarise(beta = mean(beta)/100) %>%
ungroup %>%
group_by(h) %>%
arrange(desc(abs(beta))) %>%
mutate(rn = row_number(),
pred_beta = paste0(predictor,' ', round(beta,3))) %>%
filter(rn<=5) %>%
ungroup %>%
dcast(rn~h, value.var = 'pred_beta') %>%
xtable %>%
print(include.rownames = FALSE)
# ВВП -----
gdp <- lasso_beta %>%
group_by(predictor, h, startdt) %>%
filter(h<2,
startdt== '2000-01-01',
predictor %in% c(
'GDPEA_Q_DIRI',
'gdplag'
#,
#'RTRD_Q_DIRI',
#'EMPLDEC_Q',
#'CONI_Q_CHI', # индекс цен на строительно-монтажные работы
#'CNSTR_Q_DIRI'# индекс работ в строительств
)
) %>%
ungroup%>%
mutate(predictor = correct.names.pred(predictor)) %>%
group_by(h, predictor) %>%
mutate(beta_mean = mean(beta)/100) %>%
ungroup %>%
group_by(h, startdt, enddt) %>%
arrange(desc(abs(beta_mean))) %>%
ungroup() %>%
mutate(h = as.factor(h)) %>%
ggplot()+
geom_line(aes(enddt, beta))+
facet_grid(h~., scales = 'free',
labeller = labeller(h = h.labs))+
labs(title = "",
y = "Коэффициент",
x = "Дата") +
theme_bw()
cairo_pdf('plot/gdp.pdf')
print(gdp)
dev.off()
lasso_beta %>%
group_by(predictor, h, startdt) %>%
filter(h<3,
startdt== '2000-01-01',
predictor %in% c(
'oil'
#,
#'RTRD_Q_DIRI',
#'EMPLDEC_Q',
#'CONI_Q_CHI', # индекс цен на строительно-монтажные работы
#'CNSTR_Q_DIRI'# индекс работ в строительств
)
) %>%
ungroup%>%
mutate(predictor = correct.names.pred(predictor)) %>%
group_by(h, predictor) %>%
mutate(beta_mean = mean(beta)/100) %>%
ungroup %>%
group_by(h, startdt, enddt) %>%
arrange(desc(abs(beta_mean))) %>%
#filter(row_number()<=5) %>%
ungroup() %>%
ggplot()+
geom_line(aes(enddt, beta))+
facet_grid(h~., scales = 'free',
labeller = labeller(h = h.labs))+
labs(title = "",
y = "Коэффициент",
x = "Дата") +
theme_bw()
invest <- lasso_beta %>%
group_by(predictor, h, startdt) %>%
filter(h < 2,
startdt== '2000-01-01'
,
predictor %in% c(
'investment',
'investmentlag'
# 'mkr_1d',
# 'mkr_7d',
# 'gov_6m',
# 'GKO',
# 'invest2gdp',
# 'oil',
# 'rts',
)
) %>%
ungroup%>%
mutate(predictor = correct.names.pred(predictor)) %>%
group_by(h, predictor) %>%
mutate(beta_mean = mean(beta)/100) %>%
ungroup %>%
group_by(h, startdt, enddt) %>%
arrange(desc(abs(beta_mean))) %>%
#filter(row_number()<=5) %>%
ungroup() %>%
ggplot()+
geom_line(aes(enddt, beta))+
facet_grid(h~., scales = 'free',
labeller = labeller(h = h.labs))+
labs(title = "",
y = "Коэффициент",
x = "Дата") +
theme_bw()
cairo_pdf('plot/invest.pdf')
print(invest)
dev.off()
mkr_7d <- lasso_beta %>%
group_by(predictor, h, startdt) %>%
filter(h>0,h<4,
startdt== '2000-01-01'
,
predictor %in% c(
'mkr_7d'
# 'mkr_7d',
# 'gov_6m',
# 'GKO',
# 'invest2gdp',
# 'oil',
# 'rts',
)
) %>%
ungroup%>%
mutate(predictor = correct.names.pred(predictor)) %>%
group_by(h, predictor) %>%
mutate(beta_mean = mean(beta)/100) %>%
ungroup %>%
group_by(h, startdt, enddt) %>%
arrange(desc(abs(beta_mean))) %>%
#filter(row_number()<=5) %>%
ungroup() %>%
ggplot()+
geom_line(aes(enddt, beta))+
facet_grid(h~., scales = 'free',
labeller = labeller(h = h.labs))+
labs(title = "",
y = "Коэффициент",
x = "Дата") +
theme_bw()
cairo_pdf('plot/mkr_7d.pdf')
print(mkr_7d)
dev.off()
invest2gdp <-
lasso_beta %>%
group_by(predictor, h, startdt) %>%
filter(h>6,
startdt== '2000-01-01'
,
predictor %in% c(
'invest2gdp',
'invest2gdplag'
# 'oil'
# 'rts'
)
) %>%
ungroup%>%
mutate(predictor = correct.names.pred(predictor)) %>%
group_by(h, predictor) %>%
mutate(beta_mean = mean(beta)/100) %>%
ungroup %>%
group_by(h, startdt, enddt) %>%
arrange(desc(abs(beta_mean))) %>%
#filter(row_number()<=5) %>%
ungroup() %>%
ggplot()+
geom_line(aes(enddt, beta))+
facet_grid(h~., scales = 'free',
labeller = labeller(h = h.labs))+
labs(title = "",
y = "Коэффициент",
x = "Дата") +
theme_bw()
cairo_pdf('plot/invest2gdp.pdf')
print(invest2gdp)
dev.off()
rts <- lasso_beta %>%
group_by(predictor, h, startdt) %>%
filter(h>0, h<4,
startdt== '2000-01-01'
,
predictor %in% c(
'rts'
)
) %>%
ungroup%>%
mutate(predictor = correct.names.pred(predictor)) %>%
group_by(h, predictor) %>%
mutate(beta_mean = mean(beta)/100) %>%
ungroup %>%
group_by(h, startdt, enddt) %>%
arrange(desc(abs(beta_mean))) %>%
#filter(row_number()<=5) %>%
ungroup() %>%
ggplot()+
geom_line(aes(enddt, beta))+
facet_grid(h~., scales = 'free',
labeller = labeller(h = h.labs))+
labs(title = "",
y = "Коэффициент",
x = "Дата") +
theme_bw()
cairo_pdf('plot/rts.pdf')
print(rts)
dev.off()
# in 9
gdp <- lasso_beta %>%
group_by(predictor, h, startdt) %>%
filter(
startdt== '2000-01-01',
#' predictor %in% c(
#' 'GDPEA_Q_DIRI'
#' #,
#' #'RTRD_Q_DIRI',
#' #'EMPLDEC_Q',
#' #'CONI_Q_CHI', # индекс цен на строительно-монтажные работы
#' #'CNSTR_Q_DIRI'# индекс работ в строительств
#' )
) %>%
ungroup%>%
mutate(predictor = correct.names.pred(predictor)) %>%
group_by(h, predictor) %>%
mutate(beta_mean = mean(beta)) %>%
ungroup %>%
group_by(h, startdt, enddt) %>%
arrange(desc(abs(beta_mean))) %>%
ungroup() %>%
group_by(h, predictor, startdt) %>%
filter(mean(abs(beta))>0) %>%
ungroup %>%
mutate(h = as.factor(h)) %>%
ggplot()+
geom_line(aes(enddt, beta, group = predictor),
alpha = 0.1)+
facet_wrap(h~.,
labeller = labeller(h = h.labs))+
labs(title = "",
y = "Коэффициент",
x = "Дата") +
theme_bw()+
theme(axis.title.y=element_blank(),
axis.text.y=element_blank(),
axis.ticks.y=element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
scale_y_continuous(trans= 'asinh')+
geom_line(data = lasso_beta %>%
group_by(predictor, h, startdt) %>%
filter(
startdt== '2000-01-01',
predictor %in% c(
'GDPEA_Q_DIRI'
#,
#'RTRD_Q_DIRI',
#'EMPLDEC_Q',
#'CONI_Q_CHI', # индекс цен на строительно-монтажные работы
#'CNSTR_Q_DIRI'# индекс работ в строительств
)
) %>%
ungroup%>%
mutate(predictor = correct.names.pred(predictor)) %>%
group_by(h, predictor) %>%
mutate(beta_mean = mean(beta)) %>%
ungroup %>%
group_by(h, startdt, enddt) %>%
arrange(desc(abs(beta_mean))) %>%
ungroup() %>%
group_by(h, predictor, startdt) %>%
filter(mean(abs(beta))>0) %>%
ungroup %>%
mutate(h = as.factor(h)),
mapping = aes(enddt, beta),
size =1)
cairo_pdf('plot/gdp9.pdf')
print(gdp)
dev.off()
invest <- lasso_beta %>%
group_by(predictor, h, startdt) %>%
filter(
startdt== '2000-01-01',
#' )
) %>%
ungroup%>%
mutate(predictor = correct.names.pred(predictor)) %>%
group_by(h, predictor) %>%
mutate(beta_mean = mean(beta)) %>%
ungroup %>%
group_by(h, startdt, enddt) %>%
arrange(desc(abs(beta_mean))) %>%
ungroup() %>%
group_by(h, predictor, startdt) %>%
filter(mean(abs(beta))>0) %>%
ungroup %>%
mutate(h = as.factor(h)) %>%
ggplot()+
geom_line(aes(enddt, beta, group = predictor),
alpha = 0.1)+
facet_wrap(h~.,
labeller = labeller(h = h.labs))+
labs(title = "",
y = "Коэффициент",
x = "Дата") +
theme_bw()+
theme(axis.title.y=element_blank(),
axis.text.y=element_blank(),
axis.ticks.y=element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
scale_y_continuous(trans= 'asinh')+
geom_line(data = lasso_beta %>%
group_by(predictor, h, startdt) %>%
filter(
startdt== '2000-01-01',
predictor %in% c(
'investment'
#,
#'RTRD_Q_DIRI',
#'EMPLDEC_Q',
#'CONI_Q_CHI', # индекс цен на строительно-монтажные работы
#'CNSTR_Q_DIRI'# индекс работ в строительств
)
) %>%
ungroup%>%
mutate(predictor = correct.names.pred(predictor)) %>%
group_by(h, predictor) %>%
mutate(beta_mean = mean(beta)) %>%
ungroup %>%
group_by(h, startdt, enddt) %>%
arrange(desc(abs(beta_mean))) %>%
ungroup() %>%
group_by(h, predictor, startdt) %>%
filter(mean(abs(beta))>0) %>%
ungroup %>%
mutate(h = as.factor(h)),
mapping = aes(enddt, beta),
size =1)
cairo_pdf('plot/invest9.pdf')
print(invest)
dev.off()
invest2gdp <- lasso_beta %>%
group_by(predictor, h, startdt) %>%
filter(
startdt== '2000-01-01',
#' predictor %in% c(
#' 'GDPEA_Q_DIRI'
#' #,
#' #'RTRD_Q_DIRI',
#' #'EMPLDEC_Q',
#' #'CONI_Q_CHI', # индекс цен на строительно-монтажные работы
#' #'CNSTR_Q_DIRI'# индекс работ в строительств
#' )
) %>%
ungroup%>%
mutate(predictor = correct.names.pred(predictor)) %>%
group_by(h, predictor) %>%
mutate(beta_mean = mean(beta)) %>%
ungroup %>%
group_by(h, startdt, enddt) %>%
arrange(desc(abs(beta_mean))) %>%
ungroup() %>%
group_by(h, predictor, startdt) %>%
filter(mean(abs(beta))>0) %>%
ungroup %>%
mutate(h = as.factor(h)) %>%
ggplot()+
geom_line(aes(enddt, beta, group = predictor),
alpha = 0.1)+
facet_wrap(h~.,
labeller = labeller(h = h.labs))+
labs(title = "",
y = "Коэффициент",
x = "Дата") +
theme_bw()+
theme(axis.title.y=element_blank(),
axis.text.y=element_blank(),
axis.ticks.y=element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank()
)+
scale_y_continuous(trans= 'asinh')+
geom_line(data = lasso_beta %>%
group_by(predictor, h, startdt) %>%
filter(
startdt== '2000-01-01',
predictor %in% c(
'invest2gdp'
#,
#'RTRD_Q_DIRI',
#'EMPLDEC_Q',
#'CONI_Q_CHI', # индекс цен на строительно-монтажные работы
#'CNSTR_Q_DIRI'# индекс работ в строительств
)
) %>%
ungroup%>%
mutate(predictor = correct.names.pred(predictor)) %>%
group_by(h, predictor) %>%
mutate(beta_mean = mean(beta)) %>%
ungroup %>%
group_by(h, startdt, enddt) %>%
arrange(desc(abs(beta_mean))) %>%
ungroup() %>%
group_by(h, predictor, startdt) %>%
filter(mean(abs(beta))>0) %>%
ungroup %>%
mutate(h = as.factor(h)),
mapping = aes(enddt, beta),
size =1)
cairo_pdf('plot/invest2gdp9.pdf')
print(invest2gdp)
dev.off()
rts <- lasso_beta %>%
group_by(predictor, h, startdt) %>%
filter(
startdt== '2000-01-01',
#' predictor %in% c(
#' 'GDPEA_Q_DIRI'
#' #,
#' #'RTRD_Q_DIRI',
#' #'EMPLDEC_Q',
#' #'CONI_Q_CHI', # индекс цен на строительно-монтажные работы
#' #'CNSTR_Q_DIRI'# индекс работ в строительств
#' )
) %>%
ungroup%>%
mutate(predictor = correct.names.pred(predictor)) %>%
group_by(h, predictor) %>%
mutate(beta_mean = mean(beta)) %>%
ungroup %>%
group_by(h, startdt, enddt) %>%
arrange(desc(abs(beta_mean))) %>%
ungroup() %>%
group_by(h, predictor, startdt) %>%
filter(mean(abs(beta))>0) %>%
ungroup %>%
mutate(h = as.factor(h)) %>%
ggplot()+
geom_line(aes(enddt, beta, group = predictor),
alpha = 0.1)+
facet_wrap(h~.,
labeller = labeller(h = h.labs))+
labs(title = "",
y = "Коэффициент",
x = "Дата") +
theme_bw()+
theme(axis.title.y=element_blank(),
axis.text.y=element_blank(),
axis.ticks.y=element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
scale_y_continuous(trans= 'asinh')+
geom_line(data = lasso_beta %>%
group_by(predictor, h, startdt) %>%
filter(
startdt== '2000-01-01',
predictor %in% c(
'rts'
#,
#'RTRD_Q_DIRI',
#'EMPLDEC_Q',
#'CONI_Q_CHI', # индекс цен на строительно-монтажные работы
#'CNSTR_Q_DIRI'# индекс работ в строительств
)
) %>%
ungroup%>%
mutate(predictor = correct.names.pred(predictor)) %>%
group_by(h, predictor) %>%
mutate(beta_mean = mean(beta)) %>%
ungroup %>%
group_by(h, startdt, enddt) %>%
arrange(desc(abs(beta_mean))) %>%
ungroup() %>%
group_by(h, predictor, startdt) %>%
filter(mean(abs(beta))>0) %>%
ungroup %>%
mutate(h = as.factor(h)),
mapping = aes(enddt, beta),
size =1)
cairo_pdf('plot/rts9.pdf')
print(rts)
dev.off()
mkr <- lasso_beta %>%
group_by(predictor, h, startdt) %>%
filter(
startdt== '2000-01-01',
#' predictor %in% c(
#' 'GDPEA_Q_DIRI'
#' #,
#' #'RTRD_Q_DIRI',
#' #'EMPLDEC_Q',
#' #'CONI_Q_CHI', # индекс цен на строительно-монтажные работы
#' #'CNSTR_Q_DIRI'# индекс работ в строительств
#' )
) %>%
ungroup%>%
mutate(predictor = correct.names.pred(predictor)) %>%
group_by(h, predictor) %>%
mutate(beta_mean = mean(beta)) %>%
ungroup %>%
group_by(h, startdt, enddt) %>%
arrange(desc(abs(beta_mean))) %>%
ungroup() %>%
group_by(h, predictor, startdt) %>%
filter(mean(abs(beta))>0) %>%
ungroup %>%
mutate(h = as.factor(h)) %>%
ggplot()+
geom_line(aes(enddt, beta, group = predictor),
alpha = 0.1)+
facet_wrap(h~.,
labeller = labeller(h = h.labs))+
labs(title = "",
y = "Коэффициент",
x = "Дата") +
theme_bw()+
theme(axis.title.y=element_blank(),
axis.text.y=element_blank(),
axis.ticks.y=element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
scale_y_continuous(trans= 'asinh')+
geom_line(data = lasso_beta %>%
group_by(predictor, h, startdt) %>%
filter(
startdt== '2000-01-01',
predictor %in% c(
'mkr_7d'
#,
#'RTRD_Q_DIRI',
#'EMPLDEC_Q',
#'CONI_Q_CHI', # индекс цен на строительно-монтажные работы
#'CNSTR_Q_DIRI'# индекс работ в строительств
)
) %>%
ungroup%>%
mutate(predictor = correct.names.pred(predictor)) %>%
group_by(h, predictor) %>%
mutate(beta_mean = mean(beta)) %>%
ungroup %>%
group_by(h, startdt, enddt) %>%
arrange(desc(abs(beta_mean))) %>%
ungroup() %>%
group_by(h, predictor, startdt) %>%
filter(mean(abs(beta))>0) %>%
ungroup %>%
mutate(h = as.factor(h)),
mapping = aes(enddt, beta),
size =1)
cairo_pdf('plot/mkr9.pdf')
print(mkr)
dev.off()
library(scales)
asinh_trans <- function(){
trans_new(name = 'asinh', transform = function(x) asinh(x),
inverse = function(x) sinh(x))
}
# список рядов ----
source('fun.R', encoding = 'utf-8')
load('data/stationary_data_ext.RData')
tibble(name = df %>% names()) %>%
mutate(Название = correct.names.pred(name),
Трансформация = ifelse(name %in% c('reer','neer','oil','rts'),
'1',
ifelse(name %in% c('investment', 'CPI_Q_CHI',
'invest2gdp',
# 'deflator', только с 1996
'GDPEA_Q_DIRI',
'EMPLDEC_Q',
'UNEMPL_Q_SH',
'CONSTR_Q_NAT',
###### 'TRP_Q_PASS_DIRI',
'WAG_Q',
'CONI_Q_CHI',
'CTI_Q_CHI',
'AGR_Q_DIRI',
'RTRD_Q_DIRI',
'HHI_Q_DIRI',
'M0_Q',
'M2_Q',
#### 'IR_Q',
#### 'ICR_Q',
'CBREV_Q',
'CBEX_Q',
'FBREV_Q',
'FBEX_Q',
'RDEXRO_Q',# официальный курс доллара
'RDEXRM_Q',# курс доллара на ммвб
'LIAB_T_Q',# кредиторская задолженность в среднем за период
'LIAB_UNP_Q',# просроченная кредиторская задолженность в среднем за период
'LIAB_S_Q',# кредиторская задолженность поставщикам в среднем за период
'LIAB_B_Q',# кредиторская задолженность в бюджет в среднем за период
'DBT_T_Q',#дебиторская задолженность в среднем за период
'DBT_UNP_Q',#просроченная дебиторская задолженность в среднем за период
########## 'DBT_P_Q',# дебиторская задолженность покупателей в среднем за период
'EX_T_Q',# экспорт
'IM_T_Q',# импорт
'PPI_EA_Q' # (после 2004-01)
), '2', '0'
)),
Источник = ifelse(name %in% c('mkr_1d', 'mkr_7d'),
'Банк России',
ifelse(name %in% c('reer', 'neer',
'oil', 'rts'),
'Bloomberg',
ifelse(name == 'invest2gdp','Расчеты автора',
'Росстат'
))
)) %>% select(-name) %>%
arrange(Название) %>%
xtable %>%
print(include.rownames = FALSE)
# investment RTRD_Q_DIRI GDPEA_Q_DIRI UNEMPL_Q_SH CPI_Q_CHI
load('data/raw.RData')
# med forecast -----
out_cumulative_med <- out_cumulative
out_cumulative_med$model <- factor(out_cumulative_med$model,
levels = c("Случайное блуждание","AR",
"Adaptive LASSO",
"Elastic Net",
"LASSO",
"Post-LASSO",
"Ridge",
"Spike and Slab",
'Бустинг (eta = 0,1)',
'Бустинг (eta = 0,2)',
'Бустинг (eta = 0,3)',
'Бустинг (eta = 0,4)',
'Случайный лес (N = 100)',
'Случайный лес (N = 500)',
'Случайный лес (N = 1000)',
'Случайный лес (N = 2000)'))
out_cumulative_med$model <- plyr::mapvalues(out_cumulative_med$model, from = c("Случайное блуждание","AR",
"Adaptive LASSO",
"Elastic Net",
"LASSO",
"Post-LASSO",
"Ridge",
"Spike and Slab",
'Бустинг (eta = 0,1)',
'Бустинг (eta = 0,2)',
'Бустинг (eta = 0,3)',
'Бустинг (eta = 0,4)',
'Случайный лес (N = 100)',
'Случайный лес (N = 500)',
'Случайный лес (N = 1000)',
'Случайный лес (N = 2000)'), to = c("Случайное блуждание","AR",
"Adaptive LASSO",
"Elastic Net",
"LASSO",
"Post-LASSO",
"Ridge",
"Spike and Slab",
'Бустинг (0,1)',
'Бустинг (0,2)',
'Бустинг (0,3)',
'Бустинг (0,4)',
'Случайный лес (100)',
'Случайный лес (500)',
'Случайный лес (1000)',
'Случайный лес (2000)'))
med_forecast <- import('data/med_forecast.csv', encoding = 'UTF-8', header = TRUE) %>%
melt %>%
set_names(c('fctname', 'year', 'value')) %>%
mutate(year = as.character(year) %>% as.numeric) %>%
mutate(fctyear = substr(fctname, 1, 4) %>% as.numeric) %>%
filter(fctyear < year)
my_forecast <-
out_cumulative_med %>%
dplyr::group_by(forecastdate, model, h) %>%
filter(enddt == forecastdate) %>%
ungroup() %>%
filter(h!=0) %>%
filter(h > 1, h < 6) %>%
mutate(year = year(date),
h_year = if_else(h<=4, 1, 2)) %>%
dplyr::group_by(model,h_year, year, startdt, forecastdate) %>%
summarise(pred = sum(pred_cumulative),
true_lag = sum(true_lag),
true = sum(true_cumulative)) %>%
mutate(pred = 100*(pred/ true_lag - 1),
true = 100*(true/ true_lag - 1)) %>%
ungroup %>% select(-forecastdate)
raw_y <- rawdata$investment %>%
as.data.frame() %>%
rownames_to_column('year') %>%
mutate(year = year(as.yearqtr(year))) %>%
group_by(year) %>%
summarise(investment = sum(investment)) %>%
mutate(investment = 100*(investment/lag(investment)-1))
forec_vs <- my_forecast %>%
select(-c(true_lag, true)) %>%
filter(h_year ==1, startdt == max(startdt)) %>%
filter(!is.na(pred)) %>%
filter(!model %in% c('Случайное блуждание', 'AR',
'Бустинг (0,1)',
'Бустинг (0,3)',
'Бустинг (0,4)',
'Случайный лес (100)',
'Случайный лес (500)',
'Случайный лес (1000)'
))
plot1 <- forec_vs %>% ggplot()+
geom_bar(aes(year, pred, fill = model),
stat="identity",
# fill = 'white',
position = 'dodge',
#position = position_dodge2(width = 0.9, preserve = "single"),
color='black')+
scale_fill_discrete(name = "Модель")+
theme(legend.position="right",
legend.justification="left",
legend.margin=ggplot2::margin(0,0,0,0),
legend.box.margin=ggplot2::margin(10,10,10,10))
plot2 <- ggplot()+
geom_bar(aes(year, value, group=fctname,
fill = 'Прогноз МЭР',
alpha ='Прогноз МЭР'), med_forecast %>%
group_by(year) %>%
filter(fctyear== max(fctyear)) %>%
filter(year <2019, year > 2013 ) %>%
mutate(fctname = factor(fctname,
levels = c('2013 (консервативный)',
'2013(базовый) ',
'2014',
'2015',
'2016 (базовый +) ',
'2016 (базовый)',
'2017')))
,
stat="identity",
position = 'dodge'
)+
scale_alpha_manual(values = 0.4)+
scale_fill_manual(values = 'blue')+
guides(fill = guide_legend(" "),
alpha = guide_legend(" "))+
theme(legend.position="right",
legend.justification="left",
legend.margin=ggplot2::margin(0,0,0,0),
legend.box.margin=ggplot2::margin(10,10,10,10))
plot3 <- ggplot()+
geom_point(aes(year, investment, color = 'Наблюдаемые значения'),
data = raw_y %>% filter(year <2019,year >2013),
size = 2)+
geom_line(aes(year, investment, color = 'Наблюдаемые значения'),
data = raw_y %>% filter(year <2019,year >2013)
)+
scale_size_manual(values = 2)+
scale_color_manual(values = 'black')+
guides(size = guide_legend(" "),
color = guide_legend(" "))+
theme(legend.position="right",
legend.justification="left",
legend.margin=ggplot2::margin(0,0,0,0),
legend.box.margin=ggplot2::margin(10,10,10,10))
p <- forec_vs %>% ggplot()+
geom_bar(aes(year, pred, fill = model),
stat="identity",
# fill = 'white',
position = 'dodge',
#position = position_dodge2(width = 0.9, preserve = "single"),
color='black')+
geom_bar(aes(year, value, group=fctname,
),
fill = 'blue',
alpha =0.4,
med_forecast %>%
group_by(year) %>%
filter(fctyear== max(fctyear)) %>%
filter(year <2019, year > 2013 )%>%
mutate(fctname = factor(fctname,
levels = c('2013 (консервативный)',
'2013(базовый) ',
'2014',
'2015',
'2016 (базовый +) ',
'2016 (базовый)',
'2017')))
,
stat="identity",
position = 'dodge'
)+geom_point(aes(year, investment),
data = raw_y %>% filter(year <2019,year >2013),
color = 'black', size = 2)+
geom_line(aes(year, investment),
data = raw_y %>% filter(year <2019,year >2013),
color = 'black')+
scale_fill_discrete(guide="none")+
labs(#title = 'Инвестиции в России: прогнозы МЭР и прогнозы автора',
#subtitle = 'Горизонт прогнозирования - один год',
x = 'Дата',
y = 'Изменение валового накопления основного капитала,\n в % к прошлому году')+
theme_bw()
blank <- grid.rect(gp=gpar(col="white"))
grid.arrange(p,
arrangeGrob(g_legend(plot1),
g_legend(plot2),
g_legend(plot3),
blank,
nrow=4),
ncol=2,widths=c(7,3))
all_for <- bind_rows(med_forecast %>%
group_by(year) %>%
filter(fctyear== max(fctyear)) %>%
filter(year <2019, year > 2013) %>%
mutate(model = 'МЭР',
pred = value) %>%
filter(!fctname %in% c('2013(базовый)', '2016 (базовый)')) %>%
select(model, year, pred),
forec_vs %>% select(model, year, pred)) %>%
ungroup %>%
inner_join(raw_y, by = 'year') %>%
group_by(model) %>%
summarise(rmse = sqrt(sum((pred-investment)^2)))
all_for
#ggsave(file="plot/med_forecast.pdf", medfor)
# волосы для всех прогнозов
# вариант 2 сумма квадратов ошибок на каждую дату прогноза
# с ростом h растет и абсолютная ошибка,
# поэтому делим ошибку одной модели на среднюю ошибку для каждого h
# na.omit %>%
# filter(h<=2) %>%
# mutate(error = (pred - true)^2) %>%
# group_by(h) %>%
# mutate(error = (error-mean(error))/sd(error)) %>%
# ungroup %>%
# group_by(forecastdate, model, startdt) %>%
# summarise(sse = mean(error)) %>%
# ggplot()+
# geom_line(aes(forecastdate, sse,
# color = factor(startdt)))+
# facet_wrap(vars(model))
##### рисуем не прогноз, а текущее объяснение
out_true %>%
filter(enddt < as.Date(as.yearqtr(date)-h/4)) %>%
group_by(date, h, model, startdt) %>%
filter(enddt == max(enddt)) %>%
ungroup %>%
mutate(forecastdate = as.Date(as.yearqtr(date) -h/4)) %>%
filter(#startdt == '2000-01-01',
forecastdate <='2019-01-01',
h==0,
model != 'Random Walk') %>%
# вариант 1 просто рисуем прогнозы
ggplot()+
stat_summary(aes(x = date, y = true),
fun.y=mean, geom='line', alpha = 0.5, size = 4, color = 'grey')+
geom_line(aes(date, pred),
linetype = 'dashed')+
facet_wrap(vars(model))+
scale_y_continuous(limits = c(-0.2, 0.15))+
labs(x = 'Дата',
y = 'Квартальное изменение валового накопления\nосновного капитала')
#### ошибки во времени
out_true %>%
filter(enddt < as.Date(as.yearqtr(date)-h/4)) %>%
group_by(date, h, model, startdt) %>%
filter(enddt == max(enddt)) %>%
ungroup %>%
mutate(forecastdate = as.Date(as.yearqtr(date) -h/4)) %>%
mutate(pred = ifelse(h == 0, true, pred)) %>%
filter(#startdt == '1996-01-01',
model != 'AR',
forecastdate <='2019-01-01',
h>0,
model != 'Random Walk') %>%
# вариант 2 сумма квадратов ошибок на каждую дату прогноза
# с ростом h растет и абсолютная ошибка,
# поэтому делим ошибку одной модели на среднюю ошибку для каждого h
na.omit %>%
filter(h<=2) %>%
mutate(error = (pred - true)^2) %>%
group_by(h) %>%
mutate(error = (error-mean(error))/sd(error)) %>%
ungroup %>%
group_by(forecastdate, model, startdt) %>%
summarise(sse = mean(error)) %>%
ggplot()+
geom_line(aes(forecastdate, sse,
color = factor(startdt)))+
facet_wrap(vars(model))
### gif прогнозы
library(gapminder)
# Charge libraries:
library(gganimate)
library(gapminder)
library(gganimate)
library(gifski)
library(png)
out_hair <- out_true %>%
filter(enddt < as.Date(as.yearqtr(date)-h/4)) %>%
group_by(date, h, model, startdt) %>%
filter(enddt == max(enddt)) %>%
ungroup %>%
mutate(forecastdate = as.Date(as.yearqtr(date) -h/4)) %>%
mutate(pred = ifelse(h == 0, true, pred)) %>%
mutate(giftime =as.numeric(forecastdate)+0.2*((date -forecastdate) %>% as.numeric())) %>%
filter(forecastdate <='2019-01-01') %>%
mutate(true = ifelse(date <= forecastdate, true, NA))
fordata <- out_hair %>%
filter(startdt ==
'2000-01-01',
h<5,
! model %in%
c('Случайное блуждание',
'Бустинг (eta = 0,1)','Бустинг (eta = 0,3)',
'Бустинг (eta = 0,4)',
'Случайный лес (N = 100)',
'Случайный лес (N = 500)',
'Случайный лес (N = 1000)'))
fordata$model <- factor(fordata$model,
levels = c("Случайное блуждание","AR",
"Adaptive LASSO",
"Elastic Net",
"LASSO",
"Post-LASSO",
"Ridge",
"Spike and Slab",
# 'Бустинг (eta = 0,1)',
'Бустинг (eta = 0,2)',
# 'Бустинг (eta = 0,3)',
# 'Бустинг (eta = 0,4)',
# 'Случайный лес (N = 100)',
# 'Случайный лес (N = 500)',
# 'Случайный лес (N = 1000)',
'Случайный лес (N = 2000)'))
fordata$model <- plyr::mapvalues(fordata$model, from = c("Случайное блуждание","AR",
"Adaptive LASSO",
"Elastic Net",
"LASSO",
"Post-LASSO",
"Ridge",
"Spike and Slab",
# 'Бустинг (eta = 0,1)',
'Бустинг (eta = 0,2)',
# 'Бустинг (eta = 0,3)',
# 'Бустинг (eta = 0,4)',
#
# 'Случайный лес (N = 100)',
# 'Случайный лес (N = 500)',
# 'Случайный лес (N = 1000)',
'Случайный лес (N = 2000)'),
to = c("Случайное блуждание","AR",
"Adaptive LASSO",
"Elastic Net",
"LASSO",
"Post-LASSO",
"Ridge",
"Spike and Slab",
# 'Бустинг (0,1)',
'Бустинг (0,2)',
# 'Бустинг (0,3)',
# 'Бустинг (0,4)',
#
# 'Случайный лес (100)',
# 'Случайный лес (500)',
# 'Случайный лес (1000)',
'Случайный лес (2000)'))
for(modeli in (fordata$model %>% unique)){
myplot <- ggplot(fordata %>%
filter(model == modeli) %>%
mutate(true_na = ifelse(date <= forecastdate, true, NA)))+
geom_path(data = fordata %>%
filter(model == modeli) %>%
mutate(true_na = ifelse(date <= forecastdate, true, NA)) %>% na.omit,
aes(date, true_na), alpha = 0.5, size = 2, color = 'grey')+
geom_line(aes(date, pred, color = forecastdate,
group = interaction(startdt,
forecastdate)),
#size = 1,
show.legend = FALSE,
linetype = 'dashed'
)+
#facet_wrap(vars(model))+
scale_y_continuous(limits = c(-0.2, 0.3))+
labs(x = 'Дата',
y = 'Квартальное изменение валового накопления\nосновного капитала (разность логарифмов)')+
transition_reveal(giftime) +
ease_aes('linear')+
theme_minimal()
animate(myplot, duration = 10, fps = 20, width = 1000, height = 1000, renderer = gifski_renderer())
anim_save(paste0("plot/gif/",modeli,".gif"))
}
myplot <- ggplot(fordata %>%
mutate(true_na = ifelse(date <= forecastdate, true, NA)))+
geom_path(data = fordata %>%
mutate(true_na = ifelse(date <= forecastdate, true, NA)) %>% na.omit,
aes(date, true_na), alpha = 0.5, size = 2, color = 'grey')+
geom_line(aes(date, pred, color = forecastdate,
group = interaction(startdt,
forecastdate)),
#size = 1,
show.legend = FALSE,
linetype = 'dashed'
)+
facet_wrap(vars(model))+
scale_y_continuous(limits = c(-0.2, 0.3))+
labs(x = 'Дата',
y = 'Квартальное изменение валового накопления\nосновного капитала (разность логарифмов)')+
transition_reveal(giftime) +
ease_aes('linear')+
theme_minimal()
animate(myplot, duration = 10, fps = 5, width = 200, height = 200, renderer = gifski_renderer())
anim_save(paste0("plot/gif/",modeli,".gif"))
# static hair plot ----
hair <- ggplot(fordata%>%
mutate(true_na = ifelse(date <= forecastdate, true, NA)) %>%
filter())+
geom_path(data = fordata %>%
mutate(true_na = ifelse(date <= forecastdate, true, NA)) %>% na.omit,
aes(date, true_na,
alpha = 'Наблюдаемые\nзначения',
color = 'Наблюдаемые\nзначения',
size = 'Наблюдаемые\nзначения',
linetype = 'Наблюдаемые\nзначения'))+
geom_line(aes(date, pred,
group = interaction(startdt,
forecastdate),
alpha = 'Прогноз',
color = 'Прогноз',
size = 'Прогноз',
linetype = 'Прогноз')
)+
facet_wrap(vars(model))+
scale_y_continuous(limits = c(-0.2, 0.15))+
labs(x = 'Дата',
y = 'Квартальное изменение валового
накопления\nосновного капитала (разность логарифмов)')+
theme_bw()+
# scale_alpha_manual(values = c(0.5, 1))+
# scale_size_manual(values = c(2,0.7))+
# scale_color_manual(values = c('grey', 'black'))+
scale_colour_manual(name="",
values=c('grey', 'black'),
guide = guide_legend(override.aes=list(linetype=c(1,2),
alpha = c(0.5, 1),
size = c(2, 0.6)))) +
scale_size_manual(name="Size",values=c(2,0.6), guide="none") +
scale_alpha_manual(name="Alpha",values=c(0.5,1), guide="none") +
scale_linetype_manual(name="Type",values=c(1,2), guide="none") +
theme(legend.position="bottom")
cairo_pdf("plot/hair.pdf")
print(hair)
dev.off()
library(gapminder)
p <- ggplot(
gapminder,
aes(x = gdpPercap, y=lifeExp, size = pop, colour = country)
) +
geom_point(show.legend = FALSE, alpha = 0.7) +
scale_color_viridis_d() +
scale_size(range = c(2, 12)) +
scale_x_log10() +
labs(x = "GDP per capita", y = "Life expectancy")
p <- p + transition_time(year) +
labs(title = "Year: {frame_time}")
cairo_pdf(p)
print(p)
dev.off()
#####
p <- fordata %>%
accumulate_by(~giftime) %>%
ggplot()+
geom_line(aes(x = date,
y = true,
frame = frame),
color='grey',
size = 2,
linetype = 'solid',
alpha = 0.5)
ggplotly(p) %>%
animation_opts(
frame = 100,
transition = 0,
redraw = FALSE
)
### штрафная функция для методов регуляризации ----
load('out/full/out_adalasso.RData')
load('out/full/out_elnet.RData')
load('out/full/out_lasso.RData')
load('out/full/out_postlasso.RData')
load('out/full/out_ridge.RData')
load('out/full/out_zero.RData')
regular_norm <-
c(out_zero[c(1:50, 101:250, 301:350)],
out_adalasso[-c(1:50)],
out_elnet[-c(1:50)],
out_lasso[-c(1:50)],
out_postlasso[-c(1:50)],
out_ridge[-c(1:50)]
) %>%
plyr::compact()%>%
map_dfr(function(x){
if(x$model=='postlasso'){
norm <- x$model_fit$coefficients %>% abs %>% sum
} else if(x$model %in% c('lasso', 'adalasso')){
norm <- x$model_fit$beta %>% abs %>% sum
} else if(x$model == 'ridge'){
norm <- x$model_fit$beta^2 %>% sum
} else if(x$model == 'elnet'){
norm <- 0.5*(x$model_fit$beta^2 %>% sum +
x$model_fit$beta %>% abs %>% sum)
}
data.frame(
model = x$model,
startdt =x$startdt,
enddt =x$enddt,
h = x$h,
norm = norm
)
})
regular_norm %>%
ggplot()+
geom_line(aes(enddt, norm, color = factor(startdt)))+
facet_grid(model~h, scales = 'free_y')
# вывод не оч
|
## deviance ##
deviance.nrm <-
function(object, ...)
{
### number of parameters
nme <- length(object$erg_distr$mean_est) - 1
#object$ctrl$sigmaest
nva <- object$ctrl$sigmaest * (length(object$erg_distr$sig_est) -1)
npar <- ncol(object$reshOBJ$Qmat) + nme + nva - length(object$ctrl$Clist)
structure((-2)*object$last_mstep$value, df=npar)
}
## logLik ##
logLik.nrm <-
function(object,...)
{
### number of parameters
nme <- length(object$erg_distr$mean_est) - 1
nva <- object$ctrl$sigmaest * (length(object$erg_distr$sig_est) -1)
npar <- ncol(object$reshOBJ$Qmat) + nme + nva - length(object$ctrl$Clist)
# number of observations
nobs <- sum(sapply(object$reshOBJ$d,nrow))
return(structure(object$last_mstep$value, df=npar, nobs=nobs))
}
| /R/deviance.nrm.R | no_license | ferambot/mcIRT | R | false | false | 771 | r | ## deviance ##
deviance.nrm <-
function(object, ...)
{
### number of parameters
nme <- length(object$erg_distr$mean_est) - 1
#object$ctrl$sigmaest
nva <- object$ctrl$sigmaest * (length(object$erg_distr$sig_est) -1)
npar <- ncol(object$reshOBJ$Qmat) + nme + nva - length(object$ctrl$Clist)
structure((-2)*object$last_mstep$value, df=npar)
}
## logLik ##
logLik.nrm <-
function(object,...)
{
### number of parameters
nme <- length(object$erg_distr$mean_est) - 1
nva <- object$ctrl$sigmaest * (length(object$erg_distr$sig_est) -1)
npar <- ncol(object$reshOBJ$Qmat) + nme + nva - length(object$ctrl$Clist)
# number of observations
nobs <- sum(sapply(object$reshOBJ$d,nrow))
return(structure(object$last_mstep$value, df=npar, nobs=nobs))
}
|
library(shiny)
shinyUI(fluidPage(
titlePanel("BMI (Body Mass Index) Calculator"),
sidebarLayout(
sidebarPanel(
numericInput("height", "What is your height (cm)?", value = NULL, min = 10, max = 250, step = 5),
numericInput("weight", "What is your weight (kg)?", value = NULL, min = 10, max = 250, step = 5),
submitButton("Calculate BMI")
),
mainPanel(
p('The BMI (Body Mass Index) is used by the medical profession to quickly and simply determine a person weight in regard to their height.'),
p('From a straight forward calculation the BMI factor can be gained and gives a measure which can be used to determine if a person is underweight, of normal weight, overweight or obese.'),
h3("Your BMI is "), verbatimTextOutput("BMI"),
p('You are '), verbatimTextOutput("GROUP"))
)
)
) | /ui.R | no_license | AngeLinda/shinyapp | R | false | false | 831 | r | library(shiny)
shinyUI(fluidPage(
titlePanel("BMI (Body Mass Index) Calculator"),
sidebarLayout(
sidebarPanel(
numericInput("height", "What is your height (cm)?", value = NULL, min = 10, max = 250, step = 5),
numericInput("weight", "What is your weight (kg)?", value = NULL, min = 10, max = 250, step = 5),
submitButton("Calculate BMI")
),
mainPanel(
p('The BMI (Body Mass Index) is used by the medical profession to quickly and simply determine a person weight in regard to their height.'),
p('From a straight forward calculation the BMI factor can be gained and gives a measure which can be used to determine if a person is underweight, of normal weight, overweight or obese.'),
h3("Your BMI is "), verbatimTextOutput("BMI"),
p('You are '), verbatimTextOutput("GROUP"))
)
)
) |
require("shiny")
require("shinydashboard")
require("shinyWidgets")
require("ggplot2")
require("ggforce")
require("reshape2")
require("colourpicker")
require("FlexDotPlot")
require("htmltools")
require("bsplus")
require("DT")
| /inst/app/global.R | no_license | Simon-Leonard/FlexDotPlot | R | false | false | 225 | r | require("shiny")
require("shinydashboard")
require("shinyWidgets")
require("ggplot2")
require("ggforce")
require("reshape2")
require("colourpicker")
require("FlexDotPlot")
require("htmltools")
require("bsplus")
require("DT")
|
library(UsingR)
#plot actual points
#first col has father'sheight in inches and last one has son's height in inches
father.son
#plot data
ggplot() + geom_point(data=father.son,aes(x=fheight,y=sheight))
#try to guess a line and plot our guessed line
x<-c(60,75)
y<-c(63,78)
line<-data.frame(x,y)
ggplot() + geom_point(data=father.son,aes(x=fheight,y=sheight))+geom_line(data=line,aes(x=x,y=y))
#plot points as per our guessed line
#by our guess slope (78-63)/(75-60) = 1 hence (y-63)/(x-60)=1 therfore line is y-63 = x-60 => y = x+3
x<-father.son$fheight
y<-father.son$sheight
group<-1:1078 # 1078 father son pairs
frame<-data.frame(x,y,group)
y<-x+3 #our guessed equation
fitted<-data.frame(x,y,group)
head(frame)
head(fitted)
ggplot()+ geom_point(data=frame, aes(x=x,y=y))+geom_point(data=fitted,aes(x=x,y=y),color="red")
x<-c(50,80) #see range of x values
y<-x+3
line<-data.frame(x,y)
ggplot()+ geom_point(data=frame, aes(x=x,y=y))+geom_point(data=fitted,aes(x=x,y=y),color="red")+geom_line(data=line,aes(x=x,y=y),color="blue")
#plot the differences or residuals for guessed line
#now we have our guessed line, we take a straight line error from each data point to the line on both ends
differences<-rbind(frame,fitted) #remember variables shall have same names in dataframes to bind those togethre i.e.x,y and group in this case
ggplot()+ geom_point(data=frame, aes(x=x,y=y))+geom_point(data=fitted,aes(x=x,y=y),color="red")+geom_line(data=line,aes(x=x,y=y),color="blue")+geom_line(data=differences,aes(x=x,y=y,group=group),color="darkorchid2")
#to see how well the line fits lets square all differences/residuals, we need a line with minimum square errors
sum((fitted$y-frame$y)^2)
#to find the best fitting line R has built in function for linear model
fit <- lm(y~x,frame)
intercept<-fit$coefficients[1]
slope<-fit$coefficients[2]
x<-c(57,78)
y<-x*slope+intercept
best_fitting_line<-data.frame(x,y)
ggplot()+ geom_point(data=frame, aes(x=x,y=y))+geom_point(data=fitted,aes(x=x,y=y),color="red")+geom_line(data=line,aes(x=x,y=y),color="blue")+geom_line(data=differences,aes(x=x,y=y,group=group),color="darkorchid2")+geom_line(data=best_fitting_line,aes(x=x,y=y),color="dodgerblue1")
#plot points as pr this best line
x<-father.son$fheight
y<-x*slope+intercept #our best fitting equation
best_fitted<-data.frame(x,y,group)
ggplot()+ geom_point(data=frame, aes(x=x,y=y))+geom_point(data=fitted,aes(x=x,y=y),color="red")+geom_line(data=line,aes(x=x,y=y),color="blue")+geom_line(data=differences,aes(x=x,y=y,group=group),color="darkorchid2")+geom_line(data=best_fitting_line,aes(x=x,y=y),color="dodgerblue1")+geom_point(data=best_fitted,aes(x=x,y=y),color="goldenrod1")
#plot differences for ths best line
least_differences<-rbind(frame,best_fitted)
ggplot()+ geom_point(data=frame, aes(x=x,y=y))+geom_point(data=fitted,aes(x=x,y=y),color="red")+geom_line(data=line,aes(x=x,y=y),color="blue")+geom_line(data=differences,aes(x=x,y=y,group=group),color="darkorchid2")+geom_line(data=best_fitting_line,aes(x=x,y=y),color="dodgerblue1")+geom_point(data=best_fitted,aes(x=x,y=y),color="goldenrod1")+geom_line(data=least_differences,aes(x=x,y=y,group=group),color="firebrick4")
#plot finally without guess
ggplot()+ geom_point(data=frame, aes(x=x,y=y))+geom_line(data=best_fitting_line,aes(x=x,y=y),color="dodgerblue1")+geom_point(data=best_fitted,aes(x=x,y=y),color="goldenrod1")+geom_line(data=least_differences,aes(x=x,y=y,group=group),color="firebrick4")
| /R Linear regression/LinearRegression.R | no_license | roopaliv/linear-regression-in-R | R | false | false | 3,470 | r |
library(UsingR)
#plot actual points
#first col has father'sheight in inches and last one has son's height in inches
father.son
#plot data
ggplot() + geom_point(data=father.son,aes(x=fheight,y=sheight))
#try to guess a line and plot our guessed line
x<-c(60,75)
y<-c(63,78)
line<-data.frame(x,y)
ggplot() + geom_point(data=father.son,aes(x=fheight,y=sheight))+geom_line(data=line,aes(x=x,y=y))
#plot points as per our guessed line
#by our guess slope (78-63)/(75-60) = 1 hence (y-63)/(x-60)=1 therfore line is y-63 = x-60 => y = x+3
x<-father.son$fheight
y<-father.son$sheight
group<-1:1078 # 1078 father son pairs
frame<-data.frame(x,y,group)
y<-x+3 #our guessed equation
fitted<-data.frame(x,y,group)
head(frame)
head(fitted)
ggplot()+ geom_point(data=frame, aes(x=x,y=y))+geom_point(data=fitted,aes(x=x,y=y),color="red")
x<-c(50,80) #see range of x values
y<-x+3
line<-data.frame(x,y)
ggplot()+ geom_point(data=frame, aes(x=x,y=y))+geom_point(data=fitted,aes(x=x,y=y),color="red")+geom_line(data=line,aes(x=x,y=y),color="blue")
#plot the differences or residuals for guessed line
#now we have our guessed line, we take a straight line error from each data point to the line on both ends
differences<-rbind(frame,fitted) #remember variables shall have same names in dataframes to bind those togethre i.e.x,y and group in this case
ggplot()+ geom_point(data=frame, aes(x=x,y=y))+geom_point(data=fitted,aes(x=x,y=y),color="red")+geom_line(data=line,aes(x=x,y=y),color="blue")+geom_line(data=differences,aes(x=x,y=y,group=group),color="darkorchid2")
#to see how well the line fits lets square all differences/residuals, we need a line with minimum square errors
sum((fitted$y-frame$y)^2)
#to find the best fitting line R has built in function for linear model
fit <- lm(y~x,frame)
intercept<-fit$coefficients[1]
slope<-fit$coefficients[2]
x<-c(57,78)
y<-x*slope+intercept
best_fitting_line<-data.frame(x,y)
ggplot()+ geom_point(data=frame, aes(x=x,y=y))+geom_point(data=fitted,aes(x=x,y=y),color="red")+geom_line(data=line,aes(x=x,y=y),color="blue")+geom_line(data=differences,aes(x=x,y=y,group=group),color="darkorchid2")+geom_line(data=best_fitting_line,aes(x=x,y=y),color="dodgerblue1")
#plot points as pr this best line
x<-father.son$fheight
y<-x*slope+intercept #our best fitting equation
best_fitted<-data.frame(x,y,group)
ggplot()+ geom_point(data=frame, aes(x=x,y=y))+geom_point(data=fitted,aes(x=x,y=y),color="red")+geom_line(data=line,aes(x=x,y=y),color="blue")+geom_line(data=differences,aes(x=x,y=y,group=group),color="darkorchid2")+geom_line(data=best_fitting_line,aes(x=x,y=y),color="dodgerblue1")+geom_point(data=best_fitted,aes(x=x,y=y),color="goldenrod1")
#plot differences for ths best line
least_differences<-rbind(frame,best_fitted)
ggplot()+ geom_point(data=frame, aes(x=x,y=y))+geom_point(data=fitted,aes(x=x,y=y),color="red")+geom_line(data=line,aes(x=x,y=y),color="blue")+geom_line(data=differences,aes(x=x,y=y,group=group),color="darkorchid2")+geom_line(data=best_fitting_line,aes(x=x,y=y),color="dodgerblue1")+geom_point(data=best_fitted,aes(x=x,y=y),color="goldenrod1")+geom_line(data=least_differences,aes(x=x,y=y,group=group),color="firebrick4")
#plot finally without guess
ggplot()+ geom_point(data=frame, aes(x=x,y=y))+geom_line(data=best_fitting_line,aes(x=x,y=y),color="dodgerblue1")+geom_point(data=best_fitted,aes(x=x,y=y),color="goldenrod1")+geom_line(data=least_differences,aes(x=x,y=y,group=group),color="firebrick4")
|
## Data Cleaning - Report 5 ##
odp <- read.csv("Data/CANS2.csv", na.strings = "NA")
names(odp)
odp$sex = factor(odp$sex, labels=c("Female", "Male"))
library(lubridate)
odp$date.of.birth <- ymd(odp$dob)
odp$date.of.e <- ymd(odp$doe)
odp$start.date <- ymd(odp$start_date)
odp$end.date <- ymd(odp$end_date)
odp$age.years <- new_interval(odp$date.of.birth, odp$start.date) / duration(num = 1, units = "years")
odp$age.months <- new_interval(odp$date.of.birth, odp$start.date) / duration(num = 1, units = "months")
odp$age.days <- new_interval(odp$date.of.birth, odp$start.date) / duration(num = 1, units = "days")
age.labels <- c("0-4 years old", "5-7 years old", "8-11 years old", "12-14 years old", "15-17 years old", "18+ years old")
odp$age.groups <- cut(odp$age.years, breaks = c(0, 5, 8, 12, 15, 18, 100), labels = age.labels, include.lowest = TRUE)
odp$length.of.stay.months <- new_interval(odp$start.date, odp$end.date) / duration(num = 1, units = "months")
los.labels <- c("Under 1 month", "1 to 2 months", "3 to 6 months", "7 to 11 months", "1 to 2 years", "2+ years")
odp$los.months <- cut(odp$length.of.stay.months, breaks = c(0, 1, 3, 7, 12, 24, 100), labels = los.labels, include.lowest = FALSE)
my.data = subset(odp, v3=="X" & length.of.stay.months >= 1 & client_num != 500000, )
library(reshape)
mdata <- melt(my.data, id = c("client_FID","agency_program_FID", "intake"))
df2 <- my.data[!(duplicated(my.data[c("client_FID","intake")]) | duplicated(my.data[c("client_FID","intake")], fromLast = TRUE)), ]
w <- reshape(df2,
timevar = "intake",
idvar = c("client_FID", "agency_program_FID", "start_date", "end_date"),
direction = "wide")
library(dplyr)
w2 = w %>% select(client_FID, agency_program_FID, ldf_total.intake, ldf_total.discharge, everything())
df.ldf = subset(w2, ldf_total.intake != "NA" & ldf_total.discharge != "NA", )
df.ldf$ldf_total.intake = as.numeric(as.character(df.ldf$ldf_total.intake))
df.ldf$ldf_total.discharge = as.numeric(as.character(df.ldf$ldf_total.discharge))
df <- df.ldf
attach(df)
df$agencyID[df$agency_FID.intake == 2] <- 10010
df$agencyID[df$agency_FID.intake == 3] <- 10011
df$agencyID[df$agency_FID.intake == 11] <- 10024
df$agencyID[df$agency_FID.intake == 12] <- 10034
df$agencyID[df$agency_FID.intake == 24] <- 10096
df$program[agency_program_FID == 154 | agency_program_FID == 155 | agency_program_FID == 156 | agency_program_FID == 157 | agency_program_FID == 158 | agency_program_FID == 159 | agency_program_FID == 209] <- "Day Treatment"
df$program[agency_program_FID == 8 |
agency_program_FID == 11 |
agency_program_FID == 15 |
agency_program_FID == 32 |
agency_program_FID == 33 |
agency_program_FID == 42 |
agency_program_FID == 53 |
agency_program_FID == 54 |
agency_program_FID == 55 |
agency_program_FID == 56 |
agency_program_FID == 57 |
agency_program_FID == 58 |
agency_program_FID == 59 |
agency_program_FID == 60 |
agency_program_FID == 61 |
agency_program_FID == 62 |
agency_program_FID == 63 |
agency_program_FID == 64 |
agency_program_FID == 65 |
agency_program_FID == 66 |
agency_program_FID == 91 |
agency_program_FID == 93 |
agency_program_FID == 94 |
agency_program_FID == 96 |
agency_program_FID == 100 |
agency_program_FID == 102 |
agency_program_FID == 103 |
agency_program_FID == 104 |
agency_program_FID == 138 |
agency_program_FID == 168 |
agency_program_FID == 169 |
agency_program_FID == 171 |
agency_program_FID == 173 |
agency_program_FID == 175 |
agency_program_FID == 177 |
agency_program_FID == 179 |
agency_program_FID == 180 |
agency_program_FID == 182 |
agency_program_FID == 184 |
agency_program_FID == 187 |
agency_program_FID == 197 |
agency_program_FID == 205] <- "Family Foster Care"
df$program[agency_program_FID == 1 |
agency_program_FID == 22 |
agency_program_FID == 23 |
agency_program_FID == 27 |
agency_program_FID == 40 |
agency_program_FID == 41 |
agency_program_FID == 101 |
agency_program_FID == 111 |
agency_program_FID == 151 |
agency_program_FID == 188 |
agency_program_FID == 189 |
agency_program_FID == 190] <- "Group Home"
df$program[agency_program_FID == 99 | agency_program_FID == 112 | agency_program_FID == 152 | agency_program_FID == 185] <- "Independent Living"
df$program[agency_program_FID == 3 |
agency_program_FID == 10 |
agency_program_FID == 16 |
agency_program_FID == 17 |
agency_program_FID == 18 |
agency_program_FID == 20 |
agency_program_FID == 21 |
agency_program_FID == 24 |
agency_program_FID == 31 |
agency_program_FID == 51 |
agency_program_FID == 87 |
agency_program_FID == 88 |
agency_program_FID == 89 |
agency_program_FID == 106 |
agency_program_FID == 109 |
agency_program_FID == 114 |
agency_program_FID == 139 |
agency_program_FID == 140 |
agency_program_FID == 164 |
agency_program_FID == 198 |
agency_program_FID == 199 |
agency_program_FID == 203 |
agency_program_FID == 207] <- "Open Residential Treatment"
df$program[agency_program_FID == 167 |
agency_program_FID == 191 |
agency_program_FID == 192 |
agency_program_FID == 202 |
agency_program_FID == 204] <- "Other"
df$program[agency_program_FID == 215 |
agency_program_FID == 216 |
agency_program_FID == 217 |
agency_program_FID == 219 |
agency_program_FID == 220] <- "Outpatient Mental Health"
df$program[agency_program_FID == 4 |
agency_program_FID == 5 |
agency_program_FID == 6 |
agency_program_FID == 28 |
agency_program_FID == 29 |
agency_program_FID == 30 |
agency_program_FID == 208 |
agency_program_FID == 212 |
agency_program_FID == 213 |
agency_program_FID == 214] <- "Secure Residential Treatment"
df$program[agency_program_FID == 2 |
agency_program_FID == 7 |
agency_program_FID == 9 |
agency_program_FID == 12 |
agency_program_FID == 13 |
agency_program_FID == 14 |
agency_program_FID == 19 |
agency_program_FID == 25 |
agency_program_FID == 34 |
agency_program_FID == 35 |
agency_program_FID == 36 |
agency_program_FID == 37 |
agency_program_FID == 38 |
agency_program_FID == 39 |
agency_program_FID == 43 |
agency_program_FID == 44 |
agency_program_FID == 46 |
agency_program_FID == 47 |
agency_program_FID == 48 |
agency_program_FID == 50 |
agency_program_FID == 52 |
agency_program_FID == 67 |
agency_program_FID == 68 |
agency_program_FID == 69 |
agency_program_FID == 70 |
agency_program_FID == 71 |
agency_program_FID == 72 |
agency_program_FID == 73 |
agency_program_FID == 74 |
agency_program_FID == 75 |
agency_program_FID == 76 |
agency_program_FID == 77 |
agency_program_FID == 78 |
agency_program_FID == 79 |
agency_program_FID == 80 |
agency_program_FID == 81 |
agency_program_FID == 82 |
agency_program_FID == 83 |
agency_program_FID == 84 |
agency_program_FID == 85 |
agency_program_FID == 86 |
agency_program_FID == 90 |
agency_program_FID == 92 |
agency_program_FID == 105 |
agency_program_FID == 107 |
agency_program_FID == 108 |
agency_program_FID == 110 |
agency_program_FID == 113 |
agency_program_FID == 115 |
agency_program_FID == 135 |
agency_program_FID == 136 |
agency_program_FID == 137 |
agency_program_FID == 141 |
agency_program_FID == 142 |
agency_program_FID == 143 |
agency_program_FID == 144 |
agency_program_FID == 145 |
agency_program_FID == 146 |
agency_program_FID == 147 |
agency_program_FID == 148 |
agency_program_FID == 149 |
agency_program_FID == 150 |
agency_program_FID == 153 |
agency_program_FID == 160 |
agency_program_FID == 161 |
agency_program_FID == 162 |
agency_program_FID == 163 |
agency_program_FID == 165 |
agency_program_FID == 170 |
agency_program_FID == 172 |
agency_program_FID == 174 |
agency_program_FID == 176 |
agency_program_FID == 178 |
agency_program_FID == 181 |
agency_program_FID == 183 |
agency_program_FID == 186 |
agency_program_FID == 193 |
agency_program_FID == 194 |
agency_program_FID == 195 |
agency_program_FID == 196 |
agency_program_FID == 200 |
agency_program_FID == 201 |
agency_program_FID == 210 |
agency_program_FID == 211 |
agency_program_FID == 218] <- "Treatment Foster Care"
detach(df)
odp <- read.csv("Data/CANSb5.csv", na.strings = "NA")
names(odp)
odp$sex = factor(odp$sex, labels=c("Female", "Male"))
library(lubridate)
odp$date.of.birth <- ymd(odp$dob)
odp$date.of.e <- ymd(odp$doe)
odp$start.date <- ymd(odp$start_date)
odp$end.date <- ymd(odp$end_date)
odp$age.years <- new_interval(odp$date.of.birth, odp$start.date) / duration(num = 1, units = "years")
odp$age.months <- new_interval(odp$date.of.birth, odp$start.date) / duration(num = 1, units = "months")
odp$age.days <- new_interval(odp$date.of.birth, odp$start.date) / duration(num = 1, units = "days")
age.labels <- c("0-4 years old", "5-7 years old", "8-11 years old", "12-14 years old", "15-17 years old", "18+ years old")
odp$age.groups <- cut(odp$age.years, breaks = c(0, 5, 8, 12, 15, 18, 100), labels = age.labels, include.lowest = TRUE)
odp$length.of.stay.months <- new_interval(odp$start.date, odp$end.date) / duration(num = 1, units = "months")
los.labels <- c("Under 1 month", "1 to 2 months", "3 to 6 months", "7 to 11 months", "1 to 2 years", "2+ years")
odp$los.months <- cut(odp$length.of.stay.months, breaks = c(0, 1, 3, 7, 12, 24, 100), labels = los.labels, include.lowest = FALSE)
my.data = subset(odp, v3=="X" & length.of.stay.months >= 1 & client_num != 500000, )
library(reshape)
mdata <- melt(my.data, id = c("client_FID","agency_program_FID", "intake"))
df22 <- my.data[!(duplicated(my.data[c("client_FID","intake")]) | duplicated(my.data[c("client_FID","intake")], fromLast = TRUE)), ]
w <- reshape(df22,
timevar = "intake",
idvar = c("client_FID", "agency_program_FID", "start_date", "end_date"),
direction = "wide")
library(dplyr)
w2 = w %>% select(client_FID, agency_program_FID, ldf_total.intake, ldf_total.discharge, everything())
df2.ldf2 = subset(w2, ldf_total.intake != "NA" & ldf_total.discharge != "NA", )
df2.ldf2$ldf_total.intake = as.numeric(as.character(df2.ldf2$ldf_total.intake))
df2.ldf2$ldf_total.discharge = as.numeric(as.character(df2.ldf2$ldf_total.discharge))
df2 <- df2.ldf2
attach(df2)
df2$agencyID[df2$agency_FID.intake == 2] <- 10010
df2$agencyID[df2$agency_FID.intake == 3] <- 10011
df2$agencyID[df2$agency_FID.intake == 11] <- 10024
df2$agencyID[df2$agency_FID.intake == 12] <- 10034
df2$agencyID[df2$agency_FID.intake == 24] <- 10096
df2$program[agency_program_FID == 154 | agency_program_FID == 155 | agency_program_FID == 156 | agency_program_FID == 157 | agency_program_FID == 158 | agency_program_FID == 159 | agency_program_FID == 209] <- "Day Treatment"
df2$program[agency_program_FID == 8 |
agency_program_FID == 11 |
agency_program_FID == 15 |
agency_program_FID == 32 |
agency_program_FID == 33 |
agency_program_FID == 42 |
agency_program_FID == 53 |
agency_program_FID == 54 |
agency_program_FID == 55 |
agency_program_FID == 56 |
agency_program_FID == 57 |
agency_program_FID == 58 |
agency_program_FID == 59 |
agency_program_FID == 60 |
agency_program_FID == 61 |
agency_program_FID == 62 |
agency_program_FID == 63 |
agency_program_FID == 64 |
agency_program_FID == 65 |
agency_program_FID == 66 |
agency_program_FID == 91 |
agency_program_FID == 93 |
agency_program_FID == 94 |
agency_program_FID == 96 |
agency_program_FID == 100 |
agency_program_FID == 102 |
agency_program_FID == 103 |
agency_program_FID == 104 |
agency_program_FID == 138 |
agency_program_FID == 168 |
agency_program_FID == 169 |
agency_program_FID == 171 |
agency_program_FID == 173 |
agency_program_FID == 175 |
agency_program_FID == 177 |
agency_program_FID == 179 |
agency_program_FID == 180 |
agency_program_FID == 182 |
agency_program_FID == 184 |
agency_program_FID == 187 |
agency_program_FID == 197 |
agency_program_FID == 205] <- "Family Foster Care"
df2$program[agency_program_FID == 1 |
agency_program_FID == 22 |
agency_program_FID == 23 |
agency_program_FID == 27 |
agency_program_FID == 40 |
agency_program_FID == 41 |
agency_program_FID == 101 |
agency_program_FID == 111 |
agency_program_FID == 151 |
agency_program_FID == 188 |
agency_program_FID == 189 |
agency_program_FID == 190] <- "Group Home"
df2$program[agency_program_FID == 99 | agency_program_FID == 112 | agency_program_FID == 152 | agency_program_FID == 185] <- "Independent Living"
df2$program[agency_program_FID == 3 |
agency_program_FID == 10 |
agency_program_FID == 16 |
agency_program_FID == 17 |
agency_program_FID == 18 |
agency_program_FID == 20 |
agency_program_FID == 21 |
agency_program_FID == 24 |
agency_program_FID == 31 |
agency_program_FID == 51 |
agency_program_FID == 87 |
agency_program_FID == 88 |
agency_program_FID == 89 |
agency_program_FID == 106 |
agency_program_FID == 109 |
agency_program_FID == 114 |
agency_program_FID == 139 |
agency_program_FID == 140 |
agency_program_FID == 164 |
agency_program_FID == 198 |
agency_program_FID == 199 |
agency_program_FID == 203 |
agency_program_FID == 207] <- "Open Residential Treatment"
df2$program[agency_program_FID == 167 |
agency_program_FID == 191 |
agency_program_FID == 192 |
agency_program_FID == 202 |
agency_program_FID == 204] <- "Other"
df2$program[agency_program_FID == 215 |
agency_program_FID == 216 |
agency_program_FID == 217 |
agency_program_FID == 219 |
agency_program_FID == 220] <- "Outpatient Mental Health"
df2$program[agency_program_FID == 4 |
agency_program_FID == 5 |
agency_program_FID == 6 |
agency_program_FID == 28 |
agency_program_FID == 29 |
agency_program_FID == 30 |
agency_program_FID == 208 |
agency_program_FID == 212 |
agency_program_FID == 213 |
agency_program_FID == 214] <- "Secure Residential Treatment"
df2$program[agency_program_FID == 2 |
agency_program_FID == 7 |
agency_program_FID == 9 |
agency_program_FID == 12 |
agency_program_FID == 13 |
agency_program_FID == 14 |
agency_program_FID == 19 |
agency_program_FID == 25 |
agency_program_FID == 34 |
agency_program_FID == 35 |
agency_program_FID == 36 |
agency_program_FID == 37 |
agency_program_FID == 38 |
agency_program_FID == 39 |
agency_program_FID == 43 |
agency_program_FID == 44 |
agency_program_FID == 46 |
agency_program_FID == 47 |
agency_program_FID == 48 |
agency_program_FID == 50 |
agency_program_FID == 52 |
agency_program_FID == 67 |
agency_program_FID == 68 |
agency_program_FID == 69 |
agency_program_FID == 70 |
agency_program_FID == 71 |
agency_program_FID == 72 |
agency_program_FID == 73 |
agency_program_FID == 74 |
agency_program_FID == 75 |
agency_program_FID == 76 |
agency_program_FID == 77 |
agency_program_FID == 78 |
agency_program_FID == 79 |
agency_program_FID == 80 |
agency_program_FID == 81 |
agency_program_FID == 82 |
agency_program_FID == 83 |
agency_program_FID == 84 |
agency_program_FID == 85 |
agency_program_FID == 86 |
agency_program_FID == 90 |
agency_program_FID == 92 |
agency_program_FID == 105 |
agency_program_FID == 107 |
agency_program_FID == 108 |
agency_program_FID == 110 |
agency_program_FID == 113 |
agency_program_FID == 115 |
agency_program_FID == 135 |
agency_program_FID == 136 |
agency_program_FID == 137 |
agency_program_FID == 141 |
agency_program_FID == 142 |
agency_program_FID == 143 |
agency_program_FID == 144 |
agency_program_FID == 145 |
agency_program_FID == 146 |
agency_program_FID == 147 |
agency_program_FID == 148 |
agency_program_FID == 149 |
agency_program_FID == 150 |
agency_program_FID == 153 |
agency_program_FID == 160 |
agency_program_FID == 161 |
agency_program_FID == 162 |
agency_program_FID == 163 |
agency_program_FID == 165 |
agency_program_FID == 170 |
agency_program_FID == 172 |
agency_program_FID == 174 |
agency_program_FID == 176 |
agency_program_FID == 178 |
agency_program_FID == 181 |
agency_program_FID == 183 |
agency_program_FID == 186 |
agency_program_FID == 193 |
agency_program_FID == 194 |
agency_program_FID == 195 |
agency_program_FID == 196 |
agency_program_FID == 200 |
agency_program_FID == 201 |
agency_program_FID == 210 |
agency_program_FID == 211 |
agency_program_FID == 218] <- "Treatment Foster Care"
detach(df2)
| /Scripts/DataCleaning6A.R | no_license | abdalah/OACCA | R | false | false | 21,369 | r | ## Data Cleaning - Report 5 ##
odp <- read.csv("Data/CANS2.csv", na.strings = "NA")
names(odp)
odp$sex = factor(odp$sex, labels=c("Female", "Male"))
library(lubridate)
odp$date.of.birth <- ymd(odp$dob)
odp$date.of.e <- ymd(odp$doe)
odp$start.date <- ymd(odp$start_date)
odp$end.date <- ymd(odp$end_date)
odp$age.years <- new_interval(odp$date.of.birth, odp$start.date) / duration(num = 1, units = "years")
odp$age.months <- new_interval(odp$date.of.birth, odp$start.date) / duration(num = 1, units = "months")
odp$age.days <- new_interval(odp$date.of.birth, odp$start.date) / duration(num = 1, units = "days")
age.labels <- c("0-4 years old", "5-7 years old", "8-11 years old", "12-14 years old", "15-17 years old", "18+ years old")
odp$age.groups <- cut(odp$age.years, breaks = c(0, 5, 8, 12, 15, 18, 100), labels = age.labels, include.lowest = TRUE)
odp$length.of.stay.months <- new_interval(odp$start.date, odp$end.date) / duration(num = 1, units = "months")
los.labels <- c("Under 1 month", "1 to 2 months", "3 to 6 months", "7 to 11 months", "1 to 2 years", "2+ years")
odp$los.months <- cut(odp$length.of.stay.months, breaks = c(0, 1, 3, 7, 12, 24, 100), labels = los.labels, include.lowest = FALSE)
my.data = subset(odp, v3=="X" & length.of.stay.months >= 1 & client_num != 500000, )
library(reshape)
mdata <- melt(my.data, id = c("client_FID","agency_program_FID", "intake"))
df2 <- my.data[!(duplicated(my.data[c("client_FID","intake")]) | duplicated(my.data[c("client_FID","intake")], fromLast = TRUE)), ]
w <- reshape(df2,
timevar = "intake",
idvar = c("client_FID", "agency_program_FID", "start_date", "end_date"),
direction = "wide")
library(dplyr)
w2 = w %>% select(client_FID, agency_program_FID, ldf_total.intake, ldf_total.discharge, everything())
df.ldf = subset(w2, ldf_total.intake != "NA" & ldf_total.discharge != "NA", )
df.ldf$ldf_total.intake = as.numeric(as.character(df.ldf$ldf_total.intake))
df.ldf$ldf_total.discharge = as.numeric(as.character(df.ldf$ldf_total.discharge))
df <- df.ldf
attach(df)
df$agencyID[df$agency_FID.intake == 2] <- 10010
df$agencyID[df$agency_FID.intake == 3] <- 10011
df$agencyID[df$agency_FID.intake == 11] <- 10024
df$agencyID[df$agency_FID.intake == 12] <- 10034
df$agencyID[df$agency_FID.intake == 24] <- 10096
df$program[agency_program_FID == 154 | agency_program_FID == 155 | agency_program_FID == 156 | agency_program_FID == 157 | agency_program_FID == 158 | agency_program_FID == 159 | agency_program_FID == 209] <- "Day Treatment"
df$program[agency_program_FID == 8 |
agency_program_FID == 11 |
agency_program_FID == 15 |
agency_program_FID == 32 |
agency_program_FID == 33 |
agency_program_FID == 42 |
agency_program_FID == 53 |
agency_program_FID == 54 |
agency_program_FID == 55 |
agency_program_FID == 56 |
agency_program_FID == 57 |
agency_program_FID == 58 |
agency_program_FID == 59 |
agency_program_FID == 60 |
agency_program_FID == 61 |
agency_program_FID == 62 |
agency_program_FID == 63 |
agency_program_FID == 64 |
agency_program_FID == 65 |
agency_program_FID == 66 |
agency_program_FID == 91 |
agency_program_FID == 93 |
agency_program_FID == 94 |
agency_program_FID == 96 |
agency_program_FID == 100 |
agency_program_FID == 102 |
agency_program_FID == 103 |
agency_program_FID == 104 |
agency_program_FID == 138 |
agency_program_FID == 168 |
agency_program_FID == 169 |
agency_program_FID == 171 |
agency_program_FID == 173 |
agency_program_FID == 175 |
agency_program_FID == 177 |
agency_program_FID == 179 |
agency_program_FID == 180 |
agency_program_FID == 182 |
agency_program_FID == 184 |
agency_program_FID == 187 |
agency_program_FID == 197 |
agency_program_FID == 205] <- "Family Foster Care"
df$program[agency_program_FID == 1 |
agency_program_FID == 22 |
agency_program_FID == 23 |
agency_program_FID == 27 |
agency_program_FID == 40 |
agency_program_FID == 41 |
agency_program_FID == 101 |
agency_program_FID == 111 |
agency_program_FID == 151 |
agency_program_FID == 188 |
agency_program_FID == 189 |
agency_program_FID == 190] <- "Group Home"
df$program[agency_program_FID == 99 | agency_program_FID == 112 | agency_program_FID == 152 | agency_program_FID == 185] <- "Independent Living"
df$program[agency_program_FID == 3 |
agency_program_FID == 10 |
agency_program_FID == 16 |
agency_program_FID == 17 |
agency_program_FID == 18 |
agency_program_FID == 20 |
agency_program_FID == 21 |
agency_program_FID == 24 |
agency_program_FID == 31 |
agency_program_FID == 51 |
agency_program_FID == 87 |
agency_program_FID == 88 |
agency_program_FID == 89 |
agency_program_FID == 106 |
agency_program_FID == 109 |
agency_program_FID == 114 |
agency_program_FID == 139 |
agency_program_FID == 140 |
agency_program_FID == 164 |
agency_program_FID == 198 |
agency_program_FID == 199 |
agency_program_FID == 203 |
agency_program_FID == 207] <- "Open Residential Treatment"
df$program[agency_program_FID == 167 |
agency_program_FID == 191 |
agency_program_FID == 192 |
agency_program_FID == 202 |
agency_program_FID == 204] <- "Other"
df$program[agency_program_FID == 215 |
agency_program_FID == 216 |
agency_program_FID == 217 |
agency_program_FID == 219 |
agency_program_FID == 220] <- "Outpatient Mental Health"
df$program[agency_program_FID == 4 |
agency_program_FID == 5 |
agency_program_FID == 6 |
agency_program_FID == 28 |
agency_program_FID == 29 |
agency_program_FID == 30 |
agency_program_FID == 208 |
agency_program_FID == 212 |
agency_program_FID == 213 |
agency_program_FID == 214] <- "Secure Residential Treatment"
df$program[agency_program_FID == 2 |
agency_program_FID == 7 |
agency_program_FID == 9 |
agency_program_FID == 12 |
agency_program_FID == 13 |
agency_program_FID == 14 |
agency_program_FID == 19 |
agency_program_FID == 25 |
agency_program_FID == 34 |
agency_program_FID == 35 |
agency_program_FID == 36 |
agency_program_FID == 37 |
agency_program_FID == 38 |
agency_program_FID == 39 |
agency_program_FID == 43 |
agency_program_FID == 44 |
agency_program_FID == 46 |
agency_program_FID == 47 |
agency_program_FID == 48 |
agency_program_FID == 50 |
agency_program_FID == 52 |
agency_program_FID == 67 |
agency_program_FID == 68 |
agency_program_FID == 69 |
agency_program_FID == 70 |
agency_program_FID == 71 |
agency_program_FID == 72 |
agency_program_FID == 73 |
agency_program_FID == 74 |
agency_program_FID == 75 |
agency_program_FID == 76 |
agency_program_FID == 77 |
agency_program_FID == 78 |
agency_program_FID == 79 |
agency_program_FID == 80 |
agency_program_FID == 81 |
agency_program_FID == 82 |
agency_program_FID == 83 |
agency_program_FID == 84 |
agency_program_FID == 85 |
agency_program_FID == 86 |
agency_program_FID == 90 |
agency_program_FID == 92 |
agency_program_FID == 105 |
agency_program_FID == 107 |
agency_program_FID == 108 |
agency_program_FID == 110 |
agency_program_FID == 113 |
agency_program_FID == 115 |
agency_program_FID == 135 |
agency_program_FID == 136 |
agency_program_FID == 137 |
agency_program_FID == 141 |
agency_program_FID == 142 |
agency_program_FID == 143 |
agency_program_FID == 144 |
agency_program_FID == 145 |
agency_program_FID == 146 |
agency_program_FID == 147 |
agency_program_FID == 148 |
agency_program_FID == 149 |
agency_program_FID == 150 |
agency_program_FID == 153 |
agency_program_FID == 160 |
agency_program_FID == 161 |
agency_program_FID == 162 |
agency_program_FID == 163 |
agency_program_FID == 165 |
agency_program_FID == 170 |
agency_program_FID == 172 |
agency_program_FID == 174 |
agency_program_FID == 176 |
agency_program_FID == 178 |
agency_program_FID == 181 |
agency_program_FID == 183 |
agency_program_FID == 186 |
agency_program_FID == 193 |
agency_program_FID == 194 |
agency_program_FID == 195 |
agency_program_FID == 196 |
agency_program_FID == 200 |
agency_program_FID == 201 |
agency_program_FID == 210 |
agency_program_FID == 211 |
agency_program_FID == 218] <- "Treatment Foster Care"
detach(df)
odp <- read.csv("Data/CANSb5.csv", na.strings = "NA")
names(odp)
odp$sex = factor(odp$sex, labels=c("Female", "Male"))
library(lubridate)
odp$date.of.birth <- ymd(odp$dob)
odp$date.of.e <- ymd(odp$doe)
odp$start.date <- ymd(odp$start_date)
odp$end.date <- ymd(odp$end_date)
odp$age.years <- new_interval(odp$date.of.birth, odp$start.date) / duration(num = 1, units = "years")
odp$age.months <- new_interval(odp$date.of.birth, odp$start.date) / duration(num = 1, units = "months")
odp$age.days <- new_interval(odp$date.of.birth, odp$start.date) / duration(num = 1, units = "days")
age.labels <- c("0-4 years old", "5-7 years old", "8-11 years old", "12-14 years old", "15-17 years old", "18+ years old")
odp$age.groups <- cut(odp$age.years, breaks = c(0, 5, 8, 12, 15, 18, 100), labels = age.labels, include.lowest = TRUE)
odp$length.of.stay.months <- new_interval(odp$start.date, odp$end.date) / duration(num = 1, units = "months")
los.labels <- c("Under 1 month", "1 to 2 months", "3 to 6 months", "7 to 11 months", "1 to 2 years", "2+ years")
odp$los.months <- cut(odp$length.of.stay.months, breaks = c(0, 1, 3, 7, 12, 24, 100), labels = los.labels, include.lowest = FALSE)
my.data = subset(odp, v3=="X" & length.of.stay.months >= 1 & client_num != 500000, )
library(reshape)
mdata <- melt(my.data, id = c("client_FID","agency_program_FID", "intake"))
df22 <- my.data[!(duplicated(my.data[c("client_FID","intake")]) | duplicated(my.data[c("client_FID","intake")], fromLast = TRUE)), ]
w <- reshape(df22,
timevar = "intake",
idvar = c("client_FID", "agency_program_FID", "start_date", "end_date"),
direction = "wide")
library(dplyr)
w2 = w %>% select(client_FID, agency_program_FID, ldf_total.intake, ldf_total.discharge, everything())
df2.ldf2 = subset(w2, ldf_total.intake != "NA" & ldf_total.discharge != "NA", )
df2.ldf2$ldf_total.intake = as.numeric(as.character(df2.ldf2$ldf_total.intake))
df2.ldf2$ldf_total.discharge = as.numeric(as.character(df2.ldf2$ldf_total.discharge))
df2 <- df2.ldf2
attach(df2)
df2$agencyID[df2$agency_FID.intake == 2] <- 10010
df2$agencyID[df2$agency_FID.intake == 3] <- 10011
df2$agencyID[df2$agency_FID.intake == 11] <- 10024
df2$agencyID[df2$agency_FID.intake == 12] <- 10034
df2$agencyID[df2$agency_FID.intake == 24] <- 10096
df2$program[agency_program_FID == 154 | agency_program_FID == 155 | agency_program_FID == 156 | agency_program_FID == 157 | agency_program_FID == 158 | agency_program_FID == 159 | agency_program_FID == 209] <- "Day Treatment"
df2$program[agency_program_FID == 8 |
agency_program_FID == 11 |
agency_program_FID == 15 |
agency_program_FID == 32 |
agency_program_FID == 33 |
agency_program_FID == 42 |
agency_program_FID == 53 |
agency_program_FID == 54 |
agency_program_FID == 55 |
agency_program_FID == 56 |
agency_program_FID == 57 |
agency_program_FID == 58 |
agency_program_FID == 59 |
agency_program_FID == 60 |
agency_program_FID == 61 |
agency_program_FID == 62 |
agency_program_FID == 63 |
agency_program_FID == 64 |
agency_program_FID == 65 |
agency_program_FID == 66 |
agency_program_FID == 91 |
agency_program_FID == 93 |
agency_program_FID == 94 |
agency_program_FID == 96 |
agency_program_FID == 100 |
agency_program_FID == 102 |
agency_program_FID == 103 |
agency_program_FID == 104 |
agency_program_FID == 138 |
agency_program_FID == 168 |
agency_program_FID == 169 |
agency_program_FID == 171 |
agency_program_FID == 173 |
agency_program_FID == 175 |
agency_program_FID == 177 |
agency_program_FID == 179 |
agency_program_FID == 180 |
agency_program_FID == 182 |
agency_program_FID == 184 |
agency_program_FID == 187 |
agency_program_FID == 197 |
agency_program_FID == 205] <- "Family Foster Care"
df2$program[agency_program_FID == 1 |
agency_program_FID == 22 |
agency_program_FID == 23 |
agency_program_FID == 27 |
agency_program_FID == 40 |
agency_program_FID == 41 |
agency_program_FID == 101 |
agency_program_FID == 111 |
agency_program_FID == 151 |
agency_program_FID == 188 |
agency_program_FID == 189 |
agency_program_FID == 190] <- "Group Home"
df2$program[agency_program_FID == 99 | agency_program_FID == 112 | agency_program_FID == 152 | agency_program_FID == 185] <- "Independent Living"
df2$program[agency_program_FID == 3 |
agency_program_FID == 10 |
agency_program_FID == 16 |
agency_program_FID == 17 |
agency_program_FID == 18 |
agency_program_FID == 20 |
agency_program_FID == 21 |
agency_program_FID == 24 |
agency_program_FID == 31 |
agency_program_FID == 51 |
agency_program_FID == 87 |
agency_program_FID == 88 |
agency_program_FID == 89 |
agency_program_FID == 106 |
agency_program_FID == 109 |
agency_program_FID == 114 |
agency_program_FID == 139 |
agency_program_FID == 140 |
agency_program_FID == 164 |
agency_program_FID == 198 |
agency_program_FID == 199 |
agency_program_FID == 203 |
agency_program_FID == 207] <- "Open Residential Treatment"
df2$program[agency_program_FID == 167 |
agency_program_FID == 191 |
agency_program_FID == 192 |
agency_program_FID == 202 |
agency_program_FID == 204] <- "Other"
df2$program[agency_program_FID == 215 |
agency_program_FID == 216 |
agency_program_FID == 217 |
agency_program_FID == 219 |
agency_program_FID == 220] <- "Outpatient Mental Health"
df2$program[agency_program_FID == 4 |
agency_program_FID == 5 |
agency_program_FID == 6 |
agency_program_FID == 28 |
agency_program_FID == 29 |
agency_program_FID == 30 |
agency_program_FID == 208 |
agency_program_FID == 212 |
agency_program_FID == 213 |
agency_program_FID == 214] <- "Secure Residential Treatment"
df2$program[agency_program_FID == 2 |
agency_program_FID == 7 |
agency_program_FID == 9 |
agency_program_FID == 12 |
agency_program_FID == 13 |
agency_program_FID == 14 |
agency_program_FID == 19 |
agency_program_FID == 25 |
agency_program_FID == 34 |
agency_program_FID == 35 |
agency_program_FID == 36 |
agency_program_FID == 37 |
agency_program_FID == 38 |
agency_program_FID == 39 |
agency_program_FID == 43 |
agency_program_FID == 44 |
agency_program_FID == 46 |
agency_program_FID == 47 |
agency_program_FID == 48 |
agency_program_FID == 50 |
agency_program_FID == 52 |
agency_program_FID == 67 |
agency_program_FID == 68 |
agency_program_FID == 69 |
agency_program_FID == 70 |
agency_program_FID == 71 |
agency_program_FID == 72 |
agency_program_FID == 73 |
agency_program_FID == 74 |
agency_program_FID == 75 |
agency_program_FID == 76 |
agency_program_FID == 77 |
agency_program_FID == 78 |
agency_program_FID == 79 |
agency_program_FID == 80 |
agency_program_FID == 81 |
agency_program_FID == 82 |
agency_program_FID == 83 |
agency_program_FID == 84 |
agency_program_FID == 85 |
agency_program_FID == 86 |
agency_program_FID == 90 |
agency_program_FID == 92 |
agency_program_FID == 105 |
agency_program_FID == 107 |
agency_program_FID == 108 |
agency_program_FID == 110 |
agency_program_FID == 113 |
agency_program_FID == 115 |
agency_program_FID == 135 |
agency_program_FID == 136 |
agency_program_FID == 137 |
agency_program_FID == 141 |
agency_program_FID == 142 |
agency_program_FID == 143 |
agency_program_FID == 144 |
agency_program_FID == 145 |
agency_program_FID == 146 |
agency_program_FID == 147 |
agency_program_FID == 148 |
agency_program_FID == 149 |
agency_program_FID == 150 |
agency_program_FID == 153 |
agency_program_FID == 160 |
agency_program_FID == 161 |
agency_program_FID == 162 |
agency_program_FID == 163 |
agency_program_FID == 165 |
agency_program_FID == 170 |
agency_program_FID == 172 |
agency_program_FID == 174 |
agency_program_FID == 176 |
agency_program_FID == 178 |
agency_program_FID == 181 |
agency_program_FID == 183 |
agency_program_FID == 186 |
agency_program_FID == 193 |
agency_program_FID == 194 |
agency_program_FID == 195 |
agency_program_FID == 196 |
agency_program_FID == 200 |
agency_program_FID == 201 |
agency_program_FID == 210 |
agency_program_FID == 211 |
agency_program_FID == 218] <- "Treatment Foster Care"
detach(df2)
|
#=====================================================================================#
# R script for Andrew's exercice during the Biodiversity Modelling 2021 Summer School #
#=====================================================================================#
dataset <- readr::read_csv2("https://raw.githubusercontent.com/BiodiversityModelling2021/Elliot/main/sides.csv")
sides <- dataset$sides_hit
# Exploration
hist(sides, freq = F)
curve(dbinom(x, size = 4, p = 0.5), n = 5, type = "l", xlim = c(0, 4), add = T)
# Likelihood estimation
## Estimation of p
j <- 1
LL <- numeric(20)
for (i in seq(0.01, 1, 0.05)) {
LL[j] <- sum(dbinom(x = sides, size = 4, i, log = TRUE))
j <- j + 1
}
P_estimated <- seq(0.01, 1, 0.05)[which.max(LL)]
## Plotting likelihood profile
plot(x = seq(0.01, 1, 0.05), y = LL)
abline(v = P_estimated, col = "red")
## Sampling from candidate distribution
test1 <- rbinom(20, size = 4, p = P_estimated)
hist(test1, freq = F)
# Simulated annealing
## Initialization of likelihood, candidate and temperature functions
h <- function(obs, pars) { sum(dbinom(x = obs, size = 4, pars, log = TRUE)) }
c_x <- function(pars_lo, pars_hi) { runif(1, pars_lo, pars_hi) }
T_fn <- function(T0, alpha, step) { T0 * exp(alpha * step) }
## Initialization of parameters
res <- matrix(nrow = nsteps, ncol = 3)
T0 <- 10
alpha <- -0.001
nsteps <- 10000
pars0 <- 0.01
pars_lo <- 0
pars_hi <- 1
## Main loop
for (step in 1:nsteps) {
pars1 <- pars0
pars1 <- c_x(pars_lo, pars_hi)
h1 <- h(sides, pars1)
h0 <- h(sides, pars0)
diff <- h1 - h0
if (diff > 0) {
pars0 <- pars1
} else {
p <- exp(diff / T_fn(T0, alpha, step))
if (runif(1) < p) {
pars0 <- pars1
}
}
res[step,] <- c(step, pars0, h(sides, pars0))
}
## Exploration of results
plot(c(1:nsteps), res[,3], type = "l", xlab = "Time step", ylab = "h(x)", cex = 2, log = "x")
hist(sides, freq = F)
add_model <- function(step) {
P_estimated = res[step, 2]
curve(dbinom(x, size = 4, p = P_estimated), n = 5, type = "l", xlim = c(0, 4), add = T)
}
add_model(10)
add_model(100)
add_model(1000)
add_model(10000)
## Sampling from candidate distribution
test2 <- rbinom(20, size = 4, p = res[10000,2])
hist(test2, freq = F)
| /script.R | no_license | BiodiversityModelling2021/Elliot | R | false | false | 2,268 | r | #=====================================================================================#
# R script for Andrew's exercice during the Biodiversity Modelling 2021 Summer School #
#=====================================================================================#
dataset <- readr::read_csv2("https://raw.githubusercontent.com/BiodiversityModelling2021/Elliot/main/sides.csv")
sides <- dataset$sides_hit
# Exploration
hist(sides, freq = F)
curve(dbinom(x, size = 4, p = 0.5), n = 5, type = "l", xlim = c(0, 4), add = T)
# Likelihood estimation
## Estimation of p
j <- 1
LL <- numeric(20)
for (i in seq(0.01, 1, 0.05)) {
LL[j] <- sum(dbinom(x = sides, size = 4, i, log = TRUE))
j <- j + 1
}
P_estimated <- seq(0.01, 1, 0.05)[which.max(LL)]
## Plotting likelihood profile
plot(x = seq(0.01, 1, 0.05), y = LL)
abline(v = P_estimated, col = "red")
## Sampling from candidate distribution
test1 <- rbinom(20, size = 4, p = P_estimated)
hist(test1, freq = F)
# Simulated annealing
## Initialization of likelihood, candidate and temperature functions
h <- function(obs, pars) { sum(dbinom(x = obs, size = 4, pars, log = TRUE)) }
c_x <- function(pars_lo, pars_hi) { runif(1, pars_lo, pars_hi) }
T_fn <- function(T0, alpha, step) { T0 * exp(alpha * step) }
## Initialization of parameters
res <- matrix(nrow = nsteps, ncol = 3)
T0 <- 10
alpha <- -0.001
nsteps <- 10000
pars0 <- 0.01
pars_lo <- 0
pars_hi <- 1
## Main loop
for (step in 1:nsteps) {
pars1 <- pars0
pars1 <- c_x(pars_lo, pars_hi)
h1 <- h(sides, pars1)
h0 <- h(sides, pars0)
diff <- h1 - h0
if (diff > 0) {
pars0 <- pars1
} else {
p <- exp(diff / T_fn(T0, alpha, step))
if (runif(1) < p) {
pars0 <- pars1
}
}
res[step,] <- c(step, pars0, h(sides, pars0))
}
## Exploration of results
plot(c(1:nsteps), res[,3], type = "l", xlab = "Time step", ylab = "h(x)", cex = 2, log = "x")
hist(sides, freq = F)
add_model <- function(step) {
P_estimated = res[step, 2]
curve(dbinom(x, size = 4, p = P_estimated), n = 5, type = "l", xlim = c(0, 4), add = T)
}
add_model(10)
add_model(100)
add_model(1000)
add_model(10000)
## Sampling from candidate distribution
test2 <- rbinom(20, size = 4, p = res[10000,2])
hist(test2, freq = F)
|
% File src/library/methods/man/initialize-methods.Rd
% Part of the R package, https://www.R-project.org
% Copyright 1995-2007 R Core Team
% Distributed under GPL 2 or later
\name{initialize-methods}
\docType{methods}
\title{Methods to Initialize New Objects from a Class}
\alias{initialize-methods}
\alias{initialize,ANY-method}
\alias{initialize,traceable-method}
\alias{initialize,signature-method}
\alias{initialize,environment-method}
\alias{initialize,.environment-method}
\description{
The arguments to function \code{\link{new}} to create an object from a
particular class can be interpreted specially for that class, by the
definition of a method for function \code{initialize} for the class.
This documentation describes some existing methods, and also outlines
how to write new ones.
}
\section{Methods}{
\describe{
\item{\code{signature(.Object = "ANY")}}{
The default method for \code{initialize} takes either named or
unnamed arguments. Argument names must be the names of slots in
this class definition, and the corresponding arguments must be
valid objects for the slot (that is, have the same class as
specified for the slot, or some superclass of that class). If the
object comes from a superclass, it is not coerced strictly, so
normally it will retain its current class (specifically,
\code{\link{as}(object, Class, strict = FALSE)}).
Unnamed arguments must be objects of this class, of one of its
superclasses, or one of its subclasses (from the class, from a
class this class extends, or from a class that extends this
class). If the object is from a superclass, this normally defines
some of the slots in the object. If the object is from a
subclass, the new object is that argument, coerced to the current
class.
Unnamed arguments are processed first, in the order they appear.
Then named arguments are processed. Therefore, explicit values
for slots always override any values inferred from superclass or
subclass arguments.
}
\item{\code{signature(.Object = "traceable")}}{
Objects of a class that extends \code{traceable} are used to
implement debug tracing (see class \linkS4class{traceable} and
\code{\link{trace}}).
The \code{initialize} method for these classes takes special
arguments \code{def, tracer, exit, at, print}. The first of these
is the object to use as the original definition (e.g., a
function). The others correspond to the arguments to
\code{\link{trace}}.
}
\item{\code{signature(.Object = "environment")}, \code{signature(.Object = ".environment")}}{
The \code{initialize} method for environments takes a named list
of objects to be used to initialize the environment. Subclasses
of \code{"environment"} inherit an initialize method through
\code{".environment"}, which has the additional effect of
allocating a new environment. If you define your own method for
such a subclass, be sure either to call the existing method via
\code{\link{callNextMethod}} or allocate an environment in your
method, since environments are references and are not duplicated
automatically.
}
\item{\code{signature(.Object = "signature")}}{
This is a method for internal use only.
It takes an optional \code{functionDef} argument to provide a
generic function with a \code{signature} slot to define the
argument names. See \link{Methods_Details} for details.
}
}
}
\section{Writing Initialization Methods}{
Initialization methods provide a general mechanism corresponding to
generator functions in other languages.
The arguments to \code{\link{initialize}} are \code{.Object} and
\dots. Nearly always, \code{initialize} is called from \code{new},
not directly. The \code{.Object} argument is then the
prototype object from the class.
Two techniques are often appropriate for \code{initialize} methods:
special argument names and \code{callNextMethod}.
You may want argument names that are more natural to your users than
the (default) slot names. These will be the formal arguments to
your method definition, in addition to \code{.Object} (always) and
\dots (optionally). For example, the method for class
\code{"traceable"} documented above would be created by a call to
\code{\link{setMethod}} of the form:
\preformatted{ setMethod("initialize", "traceable",
function(.Object, def, tracer, exit, at, print) \dots
)
}
In this example, no other arguments are meaningful, and the resulting
method will throw an error if other names are supplied.
When your new class extends another class, you may want to call the
initialize method for this superclass (either a special method or the
default). For example, suppose you want to define a method for your
class, with special argument \code{x}, but you also want users to be
able to set slots specifically. If you want \code{x} to override the
slot information, the beginning of your method definition might look
something like this:
\preformatted{ function(.Object, x, ...) \{
Object <- callNextMethod(.Object, ...)
if(!missing(x)) \{ # do something with x
}
You could also choose to have the inherited method override, by first
interpreting \code{x}, and then calling the next method.
}
\keyword{methods}
\keyword{programming}
| /bin/R-3.5.1/src/library/methods/man/initialize-methods.Rd | permissive | lifebit-ai/exomedepth | R | false | false | 5,496 | rd | % File src/library/methods/man/initialize-methods.Rd
% Part of the R package, https://www.R-project.org
% Copyright 1995-2007 R Core Team
% Distributed under GPL 2 or later
\name{initialize-methods}
\docType{methods}
\title{Methods to Initialize New Objects from a Class}
\alias{initialize-methods}
\alias{initialize,ANY-method}
\alias{initialize,traceable-method}
\alias{initialize,signature-method}
\alias{initialize,environment-method}
\alias{initialize,.environment-method}
\description{
The arguments to function \code{\link{new}} to create an object from a
particular class can be interpreted specially for that class, by the
definition of a method for function \code{initialize} for the class.
This documentation describes some existing methods, and also outlines
how to write new ones.
}
\section{Methods}{
\describe{
\item{\code{signature(.Object = "ANY")}}{
The default method for \code{initialize} takes either named or
unnamed arguments. Argument names must be the names of slots in
this class definition, and the corresponding arguments must be
valid objects for the slot (that is, have the same class as
specified for the slot, or some superclass of that class). If the
object comes from a superclass, it is not coerced strictly, so
normally it will retain its current class (specifically,
\code{\link{as}(object, Class, strict = FALSE)}).
Unnamed arguments must be objects of this class, of one of its
superclasses, or one of its subclasses (from the class, from a
class this class extends, or from a class that extends this
class). If the object is from a superclass, this normally defines
some of the slots in the object. If the object is from a
subclass, the new object is that argument, coerced to the current
class.
Unnamed arguments are processed first, in the order they appear.
Then named arguments are processed. Therefore, explicit values
for slots always override any values inferred from superclass or
subclass arguments.
}
\item{\code{signature(.Object = "traceable")}}{
Objects of a class that extends \code{traceable} are used to
implement debug tracing (see class \linkS4class{traceable} and
\code{\link{trace}}).
The \code{initialize} method for these classes takes special
arguments \code{def, tracer, exit, at, print}. The first of these
is the object to use as the original definition (e.g., a
function). The others correspond to the arguments to
\code{\link{trace}}.
}
\item{\code{signature(.Object = "environment")}, \code{signature(.Object = ".environment")}}{
The \code{initialize} method for environments takes a named list
of objects to be used to initialize the environment. Subclasses
of \code{"environment"} inherit an initialize method through
\code{".environment"}, which has the additional effect of
allocating a new environment. If you define your own method for
such a subclass, be sure either to call the existing method via
\code{\link{callNextMethod}} or allocate an environment in your
method, since environments are references and are not duplicated
automatically.
}
\item{\code{signature(.Object = "signature")}}{
This is a method for internal use only.
It takes an optional \code{functionDef} argument to provide a
generic function with a \code{signature} slot to define the
argument names. See \link{Methods_Details} for details.
}
}
}
\section{Writing Initialization Methods}{
Initialization methods provide a general mechanism corresponding to
generator functions in other languages.
The arguments to \code{\link{initialize}} are \code{.Object} and
\dots. Nearly always, \code{initialize} is called from \code{new},
not directly. The \code{.Object} argument is then the
prototype object from the class.
Two techniques are often appropriate for \code{initialize} methods:
special argument names and \code{callNextMethod}.
You may want argument names that are more natural to your users than
the (default) slot names. These will be the formal arguments to
your method definition, in addition to \code{.Object} (always) and
\dots (optionally). For example, the method for class
\code{"traceable"} documented above would be created by a call to
\code{\link{setMethod}} of the form:
\preformatted{ setMethod("initialize", "traceable",
function(.Object, def, tracer, exit, at, print) \dots
)
}
In this example, no other arguments are meaningful, and the resulting
method will throw an error if other names are supplied.
When your new class extends another class, you may want to call the
initialize method for this superclass (either a special method or the
default). For example, suppose you want to define a method for your
class, with special argument \code{x}, but you also want users to be
able to set slots specifically. If you want \code{x} to override the
slot information, the beginning of your method definition might look
something like this:
\preformatted{ function(.Object, x, ...) \{
Object <- callNextMethod(.Object, ...)
if(!missing(x)) \{ # do something with x
}
You could also choose to have the inherited method override, by first
interpreting \code{x}, and then calling the next method.
}
\keyword{methods}
\keyword{programming}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/analysis_fit_methods.R
\name{fit_method.rlm}
\alias{fit_method.rlm}
\title{Compute robust linear model fit of growth trajectory}
\usage{
\method{fit_method}{rlm}(dat, ...)
}
\arguments{
\item{dat}{data frame specifying x and y coordinates of data to fit}
\item{\ldots}{additional parameters passed to \code{\link[MASS]{rlm}}, also \code{p} which is the order of polynomial fit (default is quadratic, p=2)}
}
\description{
Compute robust linear model fit of growth trajectory
}
\note{
The trajectory fitting functions are most easily accessed through calling \code{\link{fit_trajectory}} with the \code{method} argument to specify the modeling approach to use.
These fitting functions can easily be replaced by simply calling the associated R methods, but are provided for convenience to standardize input/output to simplify swapping fitting methods.
}
| /man/fit_method.rlm.Rd | permissive | hafen/hbgd | R | false | true | 932 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/analysis_fit_methods.R
\name{fit_method.rlm}
\alias{fit_method.rlm}
\title{Compute robust linear model fit of growth trajectory}
\usage{
\method{fit_method}{rlm}(dat, ...)
}
\arguments{
\item{dat}{data frame specifying x and y coordinates of data to fit}
\item{\ldots}{additional parameters passed to \code{\link[MASS]{rlm}}, also \code{p} which is the order of polynomial fit (default is quadratic, p=2)}
}
\description{
Compute robust linear model fit of growth trajectory
}
\note{
The trajectory fitting functions are most easily accessed through calling \code{\link{fit_trajectory}} with the \code{method} argument to specify the modeling approach to use.
These fitting functions can easily be replaced by simply calling the associated R methods, but are provided for convenience to standardize input/output to simplify swapping fitting methods.
}
|
############################################################
###### topic modelling ######
############################################################
library(topicmodels)
library(NLP)
library(tm)
string_convert<-function(lyr) {
result<-data.frame()
for(row in 1:(nrow(lyr))){
words<-NULL
for(col in 2:(ncol(lyr)-1)){
if(lyr[row,col]!=0){
kw<-rep(colnames(lyr)[col],lyr[row,col])
kw<-paste(kw,collapse=' ')
words<-paste(words,kw)
}
}
result[row,1]<-words
}
return(result)
}
adjust_prob<-function(lyr,label){
new_lyr<-lyr[-237,]
df<-cbind(label,new_lyr)
result<-data.frame(matrix(seq(20),nrow=max(as.numeric(label)),ncol=5000))
for(lb in 1:max(as.numeric(label))) {
holder<-subset(df,df[,1]==lb)
holder<-colSums(holder[,3:5002])
tot<-sum(holder)
prop<-holder/tot
result[lb,]<-prop
}
result<-t(result)
result<-cbind(colnames(new_lyr)[2:5001],result)
result <- result[-c(1,2,5:29),]
return(result)
}
lyrics<-string_convert(lyric)
Vlyrics<-c()
for(i in 1:2350){
Vlyrics<-c(Vlyrics,lyrics[[1]][i])
}
Vlyrics<-Vlyrics[-237]
Vlyrics<-VCorpus(VectorSource(Vlyrics))
Vlyrics<-tm_map(Vlyrics,stripWhitespace)
Vlyrics<-tm_map(Vlyrics,removeWords,stopwords("english"))
dtm<-DocumentTermMatrix(Vlyrics,control=list(weight=weightTfIdf))
k3<-20
set.seed(123)
result20<-LDA(dtm,k=k3,method="VEM",control=list(seed=2010))
label20<-topics(result20,1)
rank20<-terms(result20,100)
write.csv(label20,file="label20.csv")
write.csv(rank20,file="termsrank20.csv")
adjustprob20<-adjust_prob(lyr,label20)
prob_new<-adjustprob20[,-c(1)]
prob_num<-matrix(0,5000,20)
for(i in 1:20){
for(j in 1:4973){
prob_num[j,i]<-as.numeric(prob_new[j,i])
}
}
rownames(prob_num)<-adjustprob20[,1]
############################################################ | /doc/training_topic_modelling.R | no_license | TZstatsADS/Fall2016-proj4-qy2166 | R | false | false | 1,858 | r | ############################################################
###### topic modelling ######
############################################################
library(topicmodels)
library(NLP)
library(tm)
string_convert<-function(lyr) {
result<-data.frame()
for(row in 1:(nrow(lyr))){
words<-NULL
for(col in 2:(ncol(lyr)-1)){
if(lyr[row,col]!=0){
kw<-rep(colnames(lyr)[col],lyr[row,col])
kw<-paste(kw,collapse=' ')
words<-paste(words,kw)
}
}
result[row,1]<-words
}
return(result)
}
adjust_prob<-function(lyr,label){
new_lyr<-lyr[-237,]
df<-cbind(label,new_lyr)
result<-data.frame(matrix(seq(20),nrow=max(as.numeric(label)),ncol=5000))
for(lb in 1:max(as.numeric(label))) {
holder<-subset(df,df[,1]==lb)
holder<-colSums(holder[,3:5002])
tot<-sum(holder)
prop<-holder/tot
result[lb,]<-prop
}
result<-t(result)
result<-cbind(colnames(new_lyr)[2:5001],result)
result <- result[-c(1,2,5:29),]
return(result)
}
lyrics<-string_convert(lyric)
Vlyrics<-c()
for(i in 1:2350){
Vlyrics<-c(Vlyrics,lyrics[[1]][i])
}
Vlyrics<-Vlyrics[-237]
Vlyrics<-VCorpus(VectorSource(Vlyrics))
Vlyrics<-tm_map(Vlyrics,stripWhitespace)
Vlyrics<-tm_map(Vlyrics,removeWords,stopwords("english"))
dtm<-DocumentTermMatrix(Vlyrics,control=list(weight=weightTfIdf))
k3<-20
set.seed(123)
result20<-LDA(dtm,k=k3,method="VEM",control=list(seed=2010))
label20<-topics(result20,1)
rank20<-terms(result20,100)
write.csv(label20,file="label20.csv")
write.csv(rank20,file="termsrank20.csv")
adjustprob20<-adjust_prob(lyr,label20)
prob_new<-adjustprob20[,-c(1)]
prob_num<-matrix(0,5000,20)
for(i in 1:20){
for(j in 1:4973){
prob_num[j,i]<-as.numeric(prob_new[j,i])
}
}
rownames(prob_num)<-adjustprob20[,1]
############################################################ |
data <- c('east','west','east','west','north','east')
data
class(data)
is.factor(data)
f_data <- factor(data)
f_data
is.factor(f_data)
# Data Frame
height <- c('160','170','180','173')
weight <- c(48, 73, 90, 56)
gender <- c('male','female','male','female')
df <- data.frame(height, weight, gender, stringsAsFactors = FALSE)
#colnames(df) <- c('')
#df$gender
class(df)
is.factor(df$gender)
df$gender
is.factor(df$height)
df$height
# Factor Levels
f_data2 <- gl(3,4, labels=c('seoul','tokyo','beijing'))
f_data2
# Data import
df2 <- read.csv("seoul.csv", fileEncoding = 'utf-8', header=TRUE, sep=',', stringsAsFactors = FALSE)
#View(df2)
#str(df2$날짜)
class(df2)
df2
emp_data <- data.frame(emp_id=c(1:3),emp_name=c('홍길동','이순신','강감찬'), salary=c(100,200,300),start_data=c('2018-10-10','2018-10-12','2018-10-12'), stringAsFactors=FALSE)
summary(emp_data)
emp_data | /R Daily/0307/0307 Data Frame.R | no_license | athletejuan/notebook | R | false | false | 918 | r | data <- c('east','west','east','west','north','east')
data
class(data)
is.factor(data)
f_data <- factor(data)
f_data
is.factor(f_data)
# Data Frame
height <- c('160','170','180','173')
weight <- c(48, 73, 90, 56)
gender <- c('male','female','male','female')
df <- data.frame(height, weight, gender, stringsAsFactors = FALSE)
#colnames(df) <- c('')
#df$gender
class(df)
is.factor(df$gender)
df$gender
is.factor(df$height)
df$height
# Factor Levels
f_data2 <- gl(3,4, labels=c('seoul','tokyo','beijing'))
f_data2
# Data import
df2 <- read.csv("seoul.csv", fileEncoding = 'utf-8', header=TRUE, sep=',', stringsAsFactors = FALSE)
#View(df2)
#str(df2$날짜)
class(df2)
df2
emp_data <- data.frame(emp_id=c(1:3),emp_name=c('홍길동','이순신','강감찬'), salary=c(100,200,300),start_data=c('2018-10-10','2018-10-12','2018-10-12'), stringAsFactors=FALSE)
summary(emp_data)
emp_data |
require(twitteR)
library(ROAuth)
library(plyr)
library(httr)
library(ggplot2)
library(wordcloud)
library(RColorBrewer)
library(stringr)
library(syuzhet)
cKey = "kVD68TfuxhZOd25hzNhLEZe2n"
Skey = "nAvUUNt8SMSb3M9NuIkq0GHhlM2I8WPa9xZY1V9T5g0DyA1Vw3"
A_token="1698053497-Q88KMdihT6iA0nHMJQejDvjdCeClRMwgmLcPNHi"
A_tokenS="9i9QKfkI0s1y6xmgryIpIjDwcJUUQyYs0Txnnz8L4mALN"
setup_twitter_oauth(cKey,Skey,A_token,A_tokenS)
ML <- searchTwitter("Indira canteen",n=1000,lang = "en")
txt = sapply(ML,function(x) x$getText())
txt = gsub("(RT|via)((?:\\b\\W*@\\w+)+)", "", txt)
# remove at people
txt = gsub("@\\w+", "", txt)
# remove punctuation
txt = gsub("[[:punct:]]", "", txt)
# remove numbers
txt = gsub("[[:digit:]]", "", txt)
# remove html links
txt = gsub("http\\w+", "", txt)
# remove unnecessary spaces
txt = gsub("[ \t]{2,}", "", txt)
txt = gsub("^\\s+|\\s+$", "", txt)
txt = tolower(txt)
word.list = str_split(txt, '\\s+') # splits the tweets by word in a list
words = unlist(word.list) # turns the list into vector
wordcloud(words,random.order = F,colors = rainbow(50))
k = get_sentiment(some_txt)(txt)
h = get_nrc_sentiment(txt)
sentimentscore = data.frame(colSums(h[,]))
sentimentscore = cbind("sentiment"=rownames(sentimentscore),sentimentscore)
#ggplot(data=sentimentscore,aes(sentiment,y))+geom_bar(aes(fill=sentiment))
barplot(
sort(colSums(h[, 1:10])),
horiz = TRUE,
cex.names = 0.7,
las = 1, col=blues9,
main = "Emotions about India canteen", xlab="Percentage"
)
| /sentiment_analysis.r | no_license | Pavithra25/sentiment-analysis | R | false | false | 1,491 | r | require(twitteR)
library(ROAuth)
library(plyr)
library(httr)
library(ggplot2)
library(wordcloud)
library(RColorBrewer)
library(stringr)
library(syuzhet)
cKey = "kVD68TfuxhZOd25hzNhLEZe2n"
Skey = "nAvUUNt8SMSb3M9NuIkq0GHhlM2I8WPa9xZY1V9T5g0DyA1Vw3"
A_token="1698053497-Q88KMdihT6iA0nHMJQejDvjdCeClRMwgmLcPNHi"
A_tokenS="9i9QKfkI0s1y6xmgryIpIjDwcJUUQyYs0Txnnz8L4mALN"
setup_twitter_oauth(cKey,Skey,A_token,A_tokenS)
ML <- searchTwitter("Indira canteen",n=1000,lang = "en")
txt = sapply(ML,function(x) x$getText())
txt = gsub("(RT|via)((?:\\b\\W*@\\w+)+)", "", txt)
# remove at people
txt = gsub("@\\w+", "", txt)
# remove punctuation
txt = gsub("[[:punct:]]", "", txt)
# remove numbers
txt = gsub("[[:digit:]]", "", txt)
# remove html links
txt = gsub("http\\w+", "", txt)
# remove unnecessary spaces
txt = gsub("[ \t]{2,}", "", txt)
txt = gsub("^\\s+|\\s+$", "", txt)
txt = tolower(txt)
word.list = str_split(txt, '\\s+') # splits the tweets by word in a list
words = unlist(word.list) # turns the list into vector
wordcloud(words,random.order = F,colors = rainbow(50))
k = get_sentiment(some_txt)(txt)
h = get_nrc_sentiment(txt)
sentimentscore = data.frame(colSums(h[,]))
sentimentscore = cbind("sentiment"=rownames(sentimentscore),sentimentscore)
#ggplot(data=sentimentscore,aes(sentiment,y))+geom_bar(aes(fill=sentiment))
barplot(
sort(colSums(h[, 1:10])),
horiz = TRUE,
cex.names = 0.7,
las = 1, col=blues9,
main = "Emotions about India canteen", xlab="Percentage"
)
|
# 3. faza: Vizualizacija podatkov
# Uvozimo zemljevid.
zemljevid <- uvozi.zemljevid("http://baza.fmf.uni-lj.si/OB.zip", "OB",
pot.zemljevida="OB", encoding="UTF-8")
levels(zemljevid$OB_UIME) <- levels(zemljevid$OB_UIME) %>%
{ gsub("Slovenskih", "Slov.", .) } %>% { gsub("-", " - ", .) }
zemljevid$OB_UIME <- factor(zemljevid$OB_UIME, levels=levels(obcine$obcina))
zemljevid <- fortify(zemljevid)
# Izračunamo povprečno velikost družine
povprecja <- druzine %>% group_by(obcina) %>%
summarise(povprecje=sum(velikost.druzine * stevilo.druzin) / sum(stevilo.druzin))
#Moj zemljevid
Slovenija <- uvozi.zemljevid("http://biogeo.ucdavis.edu/data/gadm2.8/shp/SVN_adm_shp.zip",
"SVN_adm1") %>% fortify()
| /vizualizacija/uvozi.zemljevid.r | permissive | JanKolenc/APPR-2018-19 | R | false | false | 765 | r | # 3. faza: Vizualizacija podatkov
# Uvozimo zemljevid.
zemljevid <- uvozi.zemljevid("http://baza.fmf.uni-lj.si/OB.zip", "OB",
pot.zemljevida="OB", encoding="UTF-8")
levels(zemljevid$OB_UIME) <- levels(zemljevid$OB_UIME) %>%
{ gsub("Slovenskih", "Slov.", .) } %>% { gsub("-", " - ", .) }
zemljevid$OB_UIME <- factor(zemljevid$OB_UIME, levels=levels(obcine$obcina))
zemljevid <- fortify(zemljevid)
# Izračunamo povprečno velikost družine
povprecja <- druzine %>% group_by(obcina) %>%
summarise(povprecje=sum(velikost.druzine * stevilo.druzin) / sum(stevilo.druzin))
#Moj zemljevid
Slovenija <- uvozi.zemljevid("http://biogeo.ucdavis.edu/data/gadm2.8/shp/SVN_adm_shp.zip",
"SVN_adm1") %>% fortify()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/jamba.r
\name{printDebug}
\alias{printDebug}
\title{print colorized output to R console}
\usage{
printDebug(..., fgText = NULL, bgText = NULL, fgTime = "cyan",
timeStamp = TRUE, comment = TRUE, formatNumbers = TRUE,
trim = TRUE, digits = NULL, nsmall = 0L, justify = "left",
big.mark = "", small.mark = "", zero.print = NULL, width = NULL,
doColor = NULL, splitComments = FALSE, collapse = "", sep = ",",
detectColors = TRUE, darkFactor = c(1, 1.5), sFactor = c(1, 1.5),
lightMode = checkLightMode(), Crange = NULL, Lrange = NULL,
removeNA = FALSE, replaceNULL = NULL,
adjustRgb = getOption("jam.adjustRgb"), byLine = FALSE,
verbose = FALSE, indent = "", keepNA = TRUE, file = "",
append = TRUE, invert = FALSE, htmlOut = FALSE, x)
}
\arguments{
\item{...}{text to be printed to the R console.}
\item{fgText}{vector of R compatible colors, or a list of vectors of
R compatible colors, to define the foreground colors. In the latter
case, each vector is applied to each list item from '...'}
\item{bgText}{vector of R compatible colors, or a list of vectors,
to define the background color.}
\item{fgTime}{character R color to colorize the time}
\item{timeStamp}{logical whether to include a time stamp in output}
\item{comment}{logical whether to prefix output with '##' as a comment}
\item{formatNumbers}{logical whether to format numbers using
\code{\link[base]{format}} which controls the number of digits displayed.}
\item{trim, digits, nsmall, justify, big.mark, small.mark, zero.print, width}{parameters sent to the \code{\link[base]{format}} function.}
\item{doColor}{NULL or logical indicating whether to colorize output. If
NULL it detects whether the crayon package is available and console
color is enabled.}
\item{splitComments}{logical whether to color each element independently
without light-dark alternating pattern.}
\item{collapse}{character collapse string used to separate list items,
by default "" so text separation is expected in the input data.}
\item{sep}{character separator used to separate vector elements, when
a list items contains a vector.}
\item{detectColors}{logical whether to detect and potentially try to
correct console color capabilities.}
\item{darkFactor}{numeric darkness to apply to alternative vector values
when using alternating light-dark color shading.}
\item{sFactor}{numeric color saturation to apply to alternative vector
values when using alternating light-dark color shading.}
\item{lightMode}{boolean or NULL, indicating whether the text background
color is light, thus imposing a maximum brightness for colors displayed.
It use lightMode if defined by the function caller, otherwise it will
use options("jam.lightMode") if defined, lastly it will attempt to detect
whether running inside Rstudio by checking the environment variable
"RSTUDIO", and if so it will assign lightMode TRUE.}
\item{Crange}{numeric range of chroma values, ranging
between 0 and 100. When NULL, default values will be
assigned to Crange by \code{setCLranges()}.}
\item{Lrange}{numeric range of luminance values, ranging
between 0 and 100. When NULL, default values will be
assigned to Lrange by \code{setCLranges()}.}
\item{removeNA}{logical whether to remove NA values and not print to
the console.}
\item{replaceNULL}{character or NULL, optionally replace NULL elements
with non-NULL character value.}
\item{adjustRgb}{numeric value adjustment used during the conversion of
RGB colors to ANSI colors, which is inherently lossy. If not defined,
it uses the default returned by \code{setCLranges()} which itself uses
\code{getOption("jam.adjustRgb")} with default=0. In order to boost
color contrast, an alternate value of -0.1 is suggested.}
\item{byLine}{logical whether to delimit lists by line instead of
using collapse to combine them onto one line.}
\item{verbose}{logical whether to print verbose output}
\item{indent}{character optional characters used as a prefix to indent
output.}
\item{file}{passed to \code{cat}, to allow sending output to
a specified file.}
\item{append}{logical whether to append output, relevant only when
\code{file} specifies a filename.}
\item{invert}{logical indicating whether foreground and background
colors should be switched.}
\item{htmlOut}{logical indicating whether to print HTML span
output, using format
\code{<span style="color:fg;background-color:bg">text</span>}.}
}
\value{
This function is called for the by-product of printing
debug output, it returns \code{invisible(NULL)}, no output.
}
\description{
print colorized output to R console
}
\details{
This function prints colorized output to the R console, with some
rules for colorizing the output to help visually distinguish items.
Its output also by default begins with comment '#' characters, a
datetimestamp, so it the output is copied back into the R console it
will not cause a new command to be run.
The colorization uses a vector or list of colors for fgText (foreground)
and bgText (background.), applied to each item in '...'. When an item in
'...' is a vector, each vector element is colored alternating light
and dark from that base color, to give visual indication of each element.
The next item in '...' receives the next color from fgText, and so on.
Colors in fgText are recycled to the length of '...'
}
\examples{
printDebug("Testing ", "default ", "printDebug().");
printDebug("List of vectors:", c("one", "two", "three"));
printDebug("List of vectors:", c("one", "two", "three"),
c("four", "five", "six"), collapse=" ");
# slightly different style, one entry per line, indented:
printDebug("List of vectors:", c("one", "two", "three"),
c("four", "five", "six"), collapse="\\n ");
# in an R console, or when writing to a log file, the
# following output text is colored
printDebug(c("red", "blue", "yellow"));
}
\seealso{
Other jam practical functions: \code{\link{applyCLrange}},
\code{\link{breakDensity}}, \code{\link{checkLightMode}},
\code{\link{colNum2excelName}}, \code{\link{exp2signed}},
\code{\link{fileInfo}}, \code{\link{fixYellowHue}},
\code{\link{fixYellow}}, \code{\link{getAxisLabel}},
\code{\link{handleArgsText}}, \code{\link{isFALSEV}},
\code{\link{isTRUEV}}, \code{\link{jamba}},
\code{\link{jargs}}, \code{\link{kable_coloring}},
\code{\link{log2signed}}, \code{\link{make_styles}},
\code{\link{mergeAllXY}}, \code{\link{minorLogTicks}},
\code{\link{newestFile}}, \code{\link{printDebugI}},
\code{\link{renameColumn}}, \code{\link{rmInfinite}},
\code{\link{rmNA}}, \code{\link{rmNULL}},
\code{\link{sclass}}, \code{\link{sdima}},
\code{\link{sdim}}, \code{\link{setCLranges}},
\code{\link{setPrompt}}, \code{\link{ssdima}},
\code{\link{ssdim}}
}
\concept{jam practical functions}
| /man/printDebug.Rd | no_license | hjanime/jamba | R | false | true | 6,824 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/jamba.r
\name{printDebug}
\alias{printDebug}
\title{print colorized output to R console}
\usage{
printDebug(..., fgText = NULL, bgText = NULL, fgTime = "cyan",
timeStamp = TRUE, comment = TRUE, formatNumbers = TRUE,
trim = TRUE, digits = NULL, nsmall = 0L, justify = "left",
big.mark = "", small.mark = "", zero.print = NULL, width = NULL,
doColor = NULL, splitComments = FALSE, collapse = "", sep = ",",
detectColors = TRUE, darkFactor = c(1, 1.5), sFactor = c(1, 1.5),
lightMode = checkLightMode(), Crange = NULL, Lrange = NULL,
removeNA = FALSE, replaceNULL = NULL,
adjustRgb = getOption("jam.adjustRgb"), byLine = FALSE,
verbose = FALSE, indent = "", keepNA = TRUE, file = "",
append = TRUE, invert = FALSE, htmlOut = FALSE, x)
}
\arguments{
\item{...}{text to be printed to the R console.}
\item{fgText}{vector of R compatible colors, or a list of vectors of
R compatible colors, to define the foreground colors. In the latter
case, each vector is applied to each list item from '...'}
\item{bgText}{vector of R compatible colors, or a list of vectors,
to define the background color.}
\item{fgTime}{character R color to colorize the time}
\item{timeStamp}{logical whether to include a time stamp in output}
\item{comment}{logical whether to prefix output with '##' as a comment}
\item{formatNumbers}{logical whether to format numbers using
\code{\link[base]{format}} which controls the number of digits displayed.}
\item{trim, digits, nsmall, justify, big.mark, small.mark, zero.print, width}{parameters sent to the \code{\link[base]{format}} function.}
\item{doColor}{NULL or logical indicating whether to colorize output. If
NULL it detects whether the crayon package is available and console
color is enabled.}
\item{splitComments}{logical whether to color each element independently
without light-dark alternating pattern.}
\item{collapse}{character collapse string used to separate list items,
by default "" so text separation is expected in the input data.}
\item{sep}{character separator used to separate vector elements, when
a list items contains a vector.}
\item{detectColors}{logical whether to detect and potentially try to
correct console color capabilities.}
\item{darkFactor}{numeric darkness to apply to alternative vector values
when using alternating light-dark color shading.}
\item{sFactor}{numeric color saturation to apply to alternative vector
values when using alternating light-dark color shading.}
\item{lightMode}{boolean or NULL, indicating whether the text background
color is light, thus imposing a maximum brightness for colors displayed.
It use lightMode if defined by the function caller, otherwise it will
use options("jam.lightMode") if defined, lastly it will attempt to detect
whether running inside Rstudio by checking the environment variable
"RSTUDIO", and if so it will assign lightMode TRUE.}
\item{Crange}{numeric range of chroma values, ranging
between 0 and 100. When NULL, default values will be
assigned to Crange by \code{setCLranges()}.}
\item{Lrange}{numeric range of luminance values, ranging
between 0 and 100. When NULL, default values will be
assigned to Lrange by \code{setCLranges()}.}
\item{removeNA}{logical whether to remove NA values and not print to
the console.}
\item{replaceNULL}{character or NULL, optionally replace NULL elements
with non-NULL character value.}
\item{adjustRgb}{numeric value adjustment used during the conversion of
RGB colors to ANSI colors, which is inherently lossy. If not defined,
it uses the default returned by \code{setCLranges()} which itself uses
\code{getOption("jam.adjustRgb")} with default=0. In order to boost
color contrast, an alternate value of -0.1 is suggested.}
\item{byLine}{logical whether to delimit lists by line instead of
using collapse to combine them onto one line.}
\item{verbose}{logical whether to print verbose output}
\item{indent}{character optional characters used as a prefix to indent
output.}
\item{file}{passed to \code{cat}, to allow sending output to
a specified file.}
\item{append}{logical whether to append output, relevant only when
\code{file} specifies a filename.}
\item{invert}{logical indicating whether foreground and background
colors should be switched.}
\item{htmlOut}{logical indicating whether to print HTML span
output, using format
\code{<span style="color:fg;background-color:bg">text</span>}.}
}
\value{
This function is called for the by-product of printing
debug output, it returns \code{invisible(NULL)}, no output.
}
\description{
print colorized output to R console
}
\details{
This function prints colorized output to the R console, with some
rules for colorizing the output to help visually distinguish items.
Its output also by default begins with comment '#' characters, a
datetimestamp, so it the output is copied back into the R console it
will not cause a new command to be run.
The colorization uses a vector or list of colors for fgText (foreground)
and bgText (background.), applied to each item in '...'. When an item in
'...' is a vector, each vector element is colored alternating light
and dark from that base color, to give visual indication of each element.
The next item in '...' receives the next color from fgText, and so on.
Colors in fgText are recycled to the length of '...'
}
\examples{
printDebug("Testing ", "default ", "printDebug().");
printDebug("List of vectors:", c("one", "two", "three"));
printDebug("List of vectors:", c("one", "two", "three"),
c("four", "five", "six"), collapse=" ");
# slightly different style, one entry per line, indented:
printDebug("List of vectors:", c("one", "two", "three"),
c("four", "five", "six"), collapse="\\n ");
# in an R console, or when writing to a log file, the
# following output text is colored
printDebug(c("red", "blue", "yellow"));
}
\seealso{
Other jam practical functions: \code{\link{applyCLrange}},
\code{\link{breakDensity}}, \code{\link{checkLightMode}},
\code{\link{colNum2excelName}}, \code{\link{exp2signed}},
\code{\link{fileInfo}}, \code{\link{fixYellowHue}},
\code{\link{fixYellow}}, \code{\link{getAxisLabel}},
\code{\link{handleArgsText}}, \code{\link{isFALSEV}},
\code{\link{isTRUEV}}, \code{\link{jamba}},
\code{\link{jargs}}, \code{\link{kable_coloring}},
\code{\link{log2signed}}, \code{\link{make_styles}},
\code{\link{mergeAllXY}}, \code{\link{minorLogTicks}},
\code{\link{newestFile}}, \code{\link{printDebugI}},
\code{\link{renameColumn}}, \code{\link{rmInfinite}},
\code{\link{rmNA}}, \code{\link{rmNULL}},
\code{\link{sclass}}, \code{\link{sdima}},
\code{\link{sdim}}, \code{\link{setCLranges}},
\code{\link{setPrompt}}, \code{\link{ssdima}},
\code{\link{ssdim}}
}
\concept{jam practical functions}
|
#' Exception messages
#' @param code An exception code
#' @return A list containing the following elements:
#' \item{code}{the exception code}
#' \item{response}{the message}
#' \item{debugging}{suggestions for debugging}
#' @examples
#' exception("S.1")
#' @export
exception = function(code){
exceptions = data.frame(rbind(
S.1 = c("id invalid","id already exists, set a unique identifier"),
S.2 = c("saving failed","filename already exists and overwriting is forbidden, choose another id or allow overwriting"),
S.3 = c("path not found","path saved in controls not found, check spelling"),
C.1 = c("controls invalid","controls is not checked, supply controls to check_controls"),
C.2 = c("controls incomplete","some controls have to be specified, see Readme-file"),
C.3 = c("controls misspecified","some controls do not fulfill restrictions, see Readme-file"),
C.4 = c("controls contains unsupported elements","some controls are not supported and ignored, check spelling"),
C.5 = c("iterlim reached","selected estimation run reached the iteration limit, consider increasing iterlim"),
C.6 = c("possibly unidentified states","some states might be unidentified, consider reducing number of states"),
C.7 = c("gamma SDD not allowed","gamma SDD only allowed for non-negative data, select t SDD instead"),
D.1 = c("from invalid","selected values too low, from is set to lower bound of '1902-01-01'"),
D.2 = c("symbol unknown","symbol for name not saved, supply symbol to function download_data"),
D.3 = c("symbol invalid","symbol does not exist on finance.yahoo.com, check spelling"),
D.4 = c("data invalid","'data' must have a column named 'Date' and columns specified by data_col in controls"),
F.1 = c("stationary distribution set to uniform distribution","computation of stationary distribution failed, continue with uniform distribution"),
F.2 = c("initialisation failed","the likelihood function could not be computed at any of the selected start values, increase runs or adjust scale_par in controls"),
F.3 = c("bad start values","the likelihood function could not be computed at more than half of the selected start values, increase runs or adjust scale_par in controls"),
F.4 = c("estimation failed","none of the estimation runs ended successfully, adapt accept_codes or increase runs"),
F.5 = c("alpha invalid","value of alpha must be between 0 and 1"),
F.6 = c("some confidence intervals could not be computed","the corresponding estimates may lie close to the boundaries of their parameter space, the confidence intervals may be unreliable and are therefore replaced by NA."),
V.1 = c("events ignored","events is only used for empirical data"),
V.2 = c("events invalid","make sure that dates and names in events are of the same length")
))
colnames(exceptions) = c("response","debugging")
if(code %in% rownames(exceptions)){
return(list("code" = code, "response" = exceptions[code,"response"], "debugging" = exceptions[code,"debugging"]))
} else {
message("Error code unknown")
}
} | /R/exception.R | no_license | minghao2016/fHMM | R | false | false | 3,100 | r | #' Exception messages
#' @param code An exception code
#' @return A list containing the following elements:
#' \item{code}{the exception code}
#' \item{response}{the message}
#' \item{debugging}{suggestions for debugging}
#' @examples
#' exception("S.1")
#' @export
exception = function(code){
exceptions = data.frame(rbind(
S.1 = c("id invalid","id already exists, set a unique identifier"),
S.2 = c("saving failed","filename already exists and overwriting is forbidden, choose another id or allow overwriting"),
S.3 = c("path not found","path saved in controls not found, check spelling"),
C.1 = c("controls invalid","controls is not checked, supply controls to check_controls"),
C.2 = c("controls incomplete","some controls have to be specified, see Readme-file"),
C.3 = c("controls misspecified","some controls do not fulfill restrictions, see Readme-file"),
C.4 = c("controls contains unsupported elements","some controls are not supported and ignored, check spelling"),
C.5 = c("iterlim reached","selected estimation run reached the iteration limit, consider increasing iterlim"),
C.6 = c("possibly unidentified states","some states might be unidentified, consider reducing number of states"),
C.7 = c("gamma SDD not allowed","gamma SDD only allowed for non-negative data, select t SDD instead"),
D.1 = c("from invalid","selected values too low, from is set to lower bound of '1902-01-01'"),
D.2 = c("symbol unknown","symbol for name not saved, supply symbol to function download_data"),
D.3 = c("symbol invalid","symbol does not exist on finance.yahoo.com, check spelling"),
D.4 = c("data invalid","'data' must have a column named 'Date' and columns specified by data_col in controls"),
F.1 = c("stationary distribution set to uniform distribution","computation of stationary distribution failed, continue with uniform distribution"),
F.2 = c("initialisation failed","the likelihood function could not be computed at any of the selected start values, increase runs or adjust scale_par in controls"),
F.3 = c("bad start values","the likelihood function could not be computed at more than half of the selected start values, increase runs or adjust scale_par in controls"),
F.4 = c("estimation failed","none of the estimation runs ended successfully, adapt accept_codes or increase runs"),
F.5 = c("alpha invalid","value of alpha must be between 0 and 1"),
F.6 = c("some confidence intervals could not be computed","the corresponding estimates may lie close to the boundaries of their parameter space, the confidence intervals may be unreliable and are therefore replaced by NA."),
V.1 = c("events ignored","events is only used for empirical data"),
V.2 = c("events invalid","make sure that dates and names in events are of the same length")
))
colnames(exceptions) = c("response","debugging")
if(code %in% rownames(exceptions)){
return(list("code" = code, "response" = exceptions[code,"response"], "debugging" = exceptions[code,"debugging"]))
} else {
message("Error code unknown")
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.