content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
# ################################################################# #
#### Filtering Data ####
# ################################################################# #
# ----------------------------------------------------------------- #
#---- #1. 4 ways to select rows ----
# ----------------------------------------------------------------- #
#first let's create our data frame
Age <- c(30, 60, 15, 70, 25, 26)
Name <- c("Eric", "Seth", 'John', "Bonsu" ,'Elizabeth', 'Regina')
Sex <- c('Male', 'Male', 'Male', 'Male','Female', 'Female')
Money_in_Account <- c(30.00, 40.01, 600.04, 400.98, 4999.9, 300000)
#create data frame
df <- data.frame(age=Age, name= Name, sex = Sex, savings = Money_in_Account)
print(df)
#General way to select in base R
# nameOfData[command to select rows , command to select columns]
#1a. First way to select column: Using column positional numbering (starts from 1 ...)
#Eg. I want to select first column (age with pos 1) and last col (savings with pos 4)
#function
nameOfdataset[ row condition to select rows, column condition to select column]
firstway <- df[ , c(1,2, 4)] # or
print(firstway)
firstcol_filter <- c(1, 4)
firstway_copy <- df[ , firstcol_filter ]
print(firstway_copy)
#or removing columns you do not want
firstway_otherway <- df[ , -c(2, 3)] # if you want to select col from 2 to 10 use c(2:10)
print(firstway_otherway)
#1b. Using names of columns to do subselection
secondway <- df[ , names(df) %in% c("age","name", "savings")]
#1b. use the subset function takes three arguments:
# argument 1: name of data frame
# argument 2: command to select rows
# aregment 3: command to select columns : c(....)
thirdway <- subset(df, , select = c("age", "savings"))
|
/Week2/LectureCode/saturday/part1b_columnfiltering.R
|
no_license
|
Fosu-Tony/Data_Smart_Science
|
R
| false
| false
| 1,795
|
r
|
# ################################################################# #
#### Filtering Data ####
# ################################################################# #
# ----------------------------------------------------------------- #
#---- #1. 4 ways to select rows ----
# ----------------------------------------------------------------- #
#first let's create our data frame
Age <- c(30, 60, 15, 70, 25, 26)
Name <- c("Eric", "Seth", 'John', "Bonsu" ,'Elizabeth', 'Regina')
Sex <- c('Male', 'Male', 'Male', 'Male','Female', 'Female')
Money_in_Account <- c(30.00, 40.01, 600.04, 400.98, 4999.9, 300000)
#create data frame
df <- data.frame(age=Age, name= Name, sex = Sex, savings = Money_in_Account)
print(df)
#General way to select in base R
# nameOfData[command to select rows , command to select columns]
#1a. First way to select column: Using column positional numbering (starts from 1 ...)
#Eg. I want to select first column (age with pos 1) and last col (savings with pos 4)
#function
nameOfdataset[ row condition to select rows, column condition to select column]
firstway <- df[ , c(1,2, 4)] # or
print(firstway)
firstcol_filter <- c(1, 4)
firstway_copy <- df[ , firstcol_filter ]
print(firstway_copy)
#or removing columns you do not want
firstway_otherway <- df[ , -c(2, 3)] # if you want to select col from 2 to 10 use c(2:10)
print(firstway_otherway)
#1b. Using names of columns to do subselection
secondway <- df[ , names(df) %in% c("age","name", "savings")]
#1b. use the subset function takes three arguments:
# argument 1: name of data frame
# argument 2: command to select rows
# aregment 3: command to select columns : c(....)
thirdway <- subset(df, , select = c("age", "savings"))
|
testlist <- list(x = structure(c(2.31584307392677e+77, 6.55736417819481e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(multivariance:::doubleCenterBiasCorrected,testlist)
str(result)
|
/multivariance/inst/testfiles/doubleCenterBiasCorrected/AFL_doubleCenterBiasCorrected/doubleCenterBiasCorrected_valgrind_files/1613140799-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 321
|
r
|
testlist <- list(x = structure(c(2.31584307392677e+77, 6.55736417819481e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(multivariance:::doubleCenterBiasCorrected,testlist)
str(result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getEstimator.R
\name{getEstimator}
\alias{getEstimator}
\title{extract the posterior mean of the parameters}
\usage{
getEstimator(object, estimator = "gamma", Pmax = 0, beta.type = "marginal")
}
\arguments{
\item{object}{an object of class \code{BayesSUR}}
\item{estimator}{the name of one estimator. Default is the latent indicator estimator "\code{gamma}". Other options "\code{beta}", "\code{Gy}", "\code{CPO}" and "\code{logP}"
correspond the marginal (conditional) coefficient matrix if \code{beta.type="marginal"}(\code{"conditional"}), response graph and conditional predictive ordinate (CPO) respectively}
\item{Pmax}{threshold that truncate the estimator "\code{gamma}" or "\code{Gy}". Default is \code{0}}
\item{beta.type}{the type of output beta. Default is \code{marginal}, giving marginal beta estimation. If \code{beta.type="conditional"}, it gives conditional beta estimation}
}
\value{
Return the estimator from an object of class \code{BayesSUR}. It is a matrix if the length of argument \code{marginal} is greater than 1. Otherwise, it is a list
}
\description{
Extract the posterior mean of the parameters of a \code{BayesSUR} class object.
}
\examples{
data("exampleEQTL", package = "BayesSUR")
hyperpar <- list( a_w = 2 , b_w = 5 )
set.seed(9173)
fit <- BayesSUR(Y = exampleEQTL[["blockList"]][[1]],
X = exampleEQTL[["blockList"]][[2]],
data = exampleEQTL[["data"]], outFilePath = tempdir(),
nIter = 100, burnin = 50, nChains = 2, gammaPrior = "hotspot",
hyperpar = hyperpar, tmpFolder = "tmp/" )
## check output
# extract the posterior mean of the coefficients matrix
beta_hat <- getEstimator(fit, estimator="beta")
}
|
/BayesSUR/man/getEstimator.Rd
|
no_license
|
akhikolla/ClusterTests
|
R
| false
| true
| 1,790
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getEstimator.R
\name{getEstimator}
\alias{getEstimator}
\title{extract the posterior mean of the parameters}
\usage{
getEstimator(object, estimator = "gamma", Pmax = 0, beta.type = "marginal")
}
\arguments{
\item{object}{an object of class \code{BayesSUR}}
\item{estimator}{the name of one estimator. Default is the latent indicator estimator "\code{gamma}". Other options "\code{beta}", "\code{Gy}", "\code{CPO}" and "\code{logP}"
correspond the marginal (conditional) coefficient matrix if \code{beta.type="marginal"}(\code{"conditional"}), response graph and conditional predictive ordinate (CPO) respectively}
\item{Pmax}{threshold that truncate the estimator "\code{gamma}" or "\code{Gy}". Default is \code{0}}
\item{beta.type}{the type of output beta. Default is \code{marginal}, giving marginal beta estimation. If \code{beta.type="conditional"}, it gives conditional beta estimation}
}
\value{
Return the estimator from an object of class \code{BayesSUR}. It is a matrix if the length of argument \code{marginal} is greater than 1. Otherwise, it is a list
}
\description{
Extract the posterior mean of the parameters of a \code{BayesSUR} class object.
}
\examples{
data("exampleEQTL", package = "BayesSUR")
hyperpar <- list( a_w = 2 , b_w = 5 )
set.seed(9173)
fit <- BayesSUR(Y = exampleEQTL[["blockList"]][[1]],
X = exampleEQTL[["blockList"]][[2]],
data = exampleEQTL[["data"]], outFilePath = tempdir(),
nIter = 100, burnin = 50, nChains = 2, gammaPrior = "hotspot",
hyperpar = hyperpar, tmpFolder = "tmp/" )
## check output
# extract the posterior mean of the coefficients matrix
beta_hat <- getEstimator(fit, estimator="beta")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MapSchedule.R
\name{mapSchedule}
\alias{mapSchedule}
\title{Data Parallel Scheduler}
\usage{
mapSchedule(graph)
}
\arguments{
\item{graph}{\linkS4class{TaskGraph}}
}
\description{
DEPRECATED. Use scheduleDataParallel instead.
}
\details{
This function
detects parallelism through the use of top level calls to R's
apply family of functions and through analysis of \code{for} loops.
Currently supported apply style functions include
\code{\link[base]{lapply}} and \code{\link[base]{mapply}}. It doesn't
parallelize all for loops that can be parallelized, but it does do the
common ones listed in the example.
Consider using this if:
\itemize{
\item \code{code} is slow
\item \code{code} uses for loops or one of the apply functions mentioned above
\item You have access to machine with multiple cores that supports
\code{\link[parallel]{makeForkCluster}} (Any UNIX variant should work,
ie. Mac)
\item You're unfamiliar with parallel programming in R
}
Don't use this if:
\itemize{
\item \code{code} is fast enough for your application
\item \code{code} is already parallel, either explicitly with a package
such as parallel, or implicitly, say through a multi threaded BLAS
\item You need maximum performance at all costs. In this case you need
to carefully profile and interface appropriately with a high
performance library.
}
Currently this function support \code{for} loops that update 0 or 1
global variables. For those that update a single variable the update
must be on the last line of the loop body, so the for loop should have
the following form:
\code{
for(i in ...){
...
x[i] <- ...
}
}
If the last line doesn't update the variable then it's not clear that
the loop can be parallelized.
Road map of features to implement:
\itemize{
\item Prevent from parallelizing calls that are themselves in the body
of a loop.
}
}
\examples{
# Each iteration of the for loop writes to a different file- good!
# If they write to the same file this will break.
pfile <- makeParallel(parse(text = "
fnames <- paste0(1:10, '.txt')
for(f in fname){
writeLines('testing...', f)
}"))
# A couple examples in one script
serial_code <- parse(text = "
x1 <- lapply(1:10, exp)
n <- 10
x2 <- rep(NA, n)
for(i in seq(n)) x2[[i]] <- exp(i + 1)
")
p <- makeParallel(serial_code)
eval(serial_code)
x1
x2
rm(x1, x2)
# x1 and x2 should now be back and the same as they were for serial
eval(writeCode(p))
x1
x2
}
|
/man/mapSchedule.Rd
|
no_license
|
jfontestad/makeParallel
|
R
| false
| true
| 2,566
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MapSchedule.R
\name{mapSchedule}
\alias{mapSchedule}
\title{Data Parallel Scheduler}
\usage{
mapSchedule(graph)
}
\arguments{
\item{graph}{\linkS4class{TaskGraph}}
}
\description{
DEPRECATED. Use scheduleDataParallel instead.
}
\details{
This function
detects parallelism through the use of top level calls to R's
apply family of functions and through analysis of \code{for} loops.
Currently supported apply style functions include
\code{\link[base]{lapply}} and \code{\link[base]{mapply}}. It doesn't
parallelize all for loops that can be parallelized, but it does do the
common ones listed in the example.
Consider using this if:
\itemize{
\item \code{code} is slow
\item \code{code} uses for loops or one of the apply functions mentioned above
\item You have access to machine with multiple cores that supports
\code{\link[parallel]{makeForkCluster}} (Any UNIX variant should work,
ie. Mac)
\item You're unfamiliar with parallel programming in R
}
Don't use this if:
\itemize{
\item \code{code} is fast enough for your application
\item \code{code} is already parallel, either explicitly with a package
such as parallel, or implicitly, say through a multi threaded BLAS
\item You need maximum performance at all costs. In this case you need
to carefully profile and interface appropriately with a high
performance library.
}
Currently this function support \code{for} loops that update 0 or 1
global variables. For those that update a single variable the update
must be on the last line of the loop body, so the for loop should have
the following form:
\code{
for(i in ...){
...
x[i] <- ...
}
}
If the last line doesn't update the variable then it's not clear that
the loop can be parallelized.
Road map of features to implement:
\itemize{
\item Prevent from parallelizing calls that are themselves in the body
of a loop.
}
}
\examples{
# Each iteration of the for loop writes to a different file- good!
# If they write to the same file this will break.
pfile <- makeParallel(parse(text = "
fnames <- paste0(1:10, '.txt')
for(f in fname){
writeLines('testing...', f)
}"))
# A couple examples in one script
serial_code <- parse(text = "
x1 <- lapply(1:10, exp)
n <- 10
x2 <- rep(NA, n)
for(i in seq(n)) x2[[i]] <- exp(i + 1)
")
p <- makeParallel(serial_code)
eval(serial_code)
x1
x2
rm(x1, x2)
# x1 and x2 should now be back and the same as they were for serial
eval(writeCode(p))
x1
x2
}
|
###===============================###===============================###
### Guillaume Evin
### 19/05/2017, Grenoble
### IRSTEA
### guillaume.evin@irstea.fr
###
### Provide utilities for the estimation and simulation of the GWex
### multi-site temperature model.
###===============================###===============================###
#==============================================================================
# mySgedFit
#
# fit nu and xi parameters of the SGED distribution
#
# @param x vector of numerical values
# @export
# @return \item{estimated parameters}{Vector of two parameters: \eqn{nu} and \eqn{xi}}
#
# @author Guillaume Evin
mySgedFit = function(x){
start = c(nu = 2, xi = 1)
loglik = function(param, x) {
f = -sum(log(fGarch::dsged(x, 0, 1, param[1], param[2])))
}
fit = nlminb(start = start, objective = loglik,
lower = c(0, 0), upper = c(Inf, Inf), x = x)
return(fit$par)
}
#==============================================================================
# get.nonleap.dm
#
# Day/Month for a non-leap year
#
# @return \item{Vector of 365 dates for a non-leap years}{String with the format day/month}
#
# @author Guillaume Evin
get.nonleap.dm = function(){
# vec Dates for a non-leap year
vec.Date.seas = seq(from=as.Date("2001/01/01"),to=as.Date("2001/12/31"),by=1)
vec.Date.seas.dm = format(vec.Date.seas,'%d%m')
# return results
return(vec.Date.seas.dm)
}
#==============================================================================
# get.seasonal
#
# Estimate the annual cycle of temperature. For each day, we apply the function \code{fun} to all temperature data for this day (ex. the mean
# of all January, 1st). This cycle is smoothed with the \code{\link{lowess}} function.
#
# @param x temperature data
# @param vec.Dates vector of Dates associated to x
# @param myfun function apply for each day
#
# @export
#
# @return A vector of length 365, the annual cycle of temperature
#
# @author Guillaume Evin
get.seasonal = function(x,vec.Dates,myfun='mean'){
# day/month for the observed dates
vec.Dates.dm = format(vec.Dates,'%d%m')
# day/month for a non-leap year
nonleap.dm = get.nonleap.dm()
# for 'normal' days
seas = vector(length=365)
for(i.d in 1:365){
is.d = vec.Dates.dm==nonleap.dm[i.d]
seas[i.d] = do.call(myfun,list(x=x[is.d],na.rm=T))
}
# smooth trend
seas.smooth = lowess(seas,f=1/10)$y
return(seas.smooth)
}
#==============================================================================
# predictTempCycle
#
# Return the seasonal cycle for an arbitrary period. The seasonal trend \code{season.fit} is repeated to match the sequence given in \code{vec.Dates}. For leap years, the trend returns equivalent values for the 28/02 and 29/02 days.
#
# @param season.fit seasonal trend: vector 365
# @param vec.Dates vector of Dates to predict
#
# @export
#
# @return A vector giving the seasonal cycles for the time period in vec.Dates
#
# @author Guillaume Evin
predictTempCycle = function(season.fit,vec.Dates){
# length of the simulations
n = length(vec.Dates)
# day/month for the dates to simulate
vec.Dates.dm = format(vec.Dates,'%d%m')
# day/month for a non-leap year
nonleap.dm = get.nonleap.dm()
# for 'normal' days
seas.sim = vector(length=n)
for(i.d in 1:365){
is.d = vec.Dates.dm==nonleap.dm[i.d]
seas.sim[is.d] = season.fit[i.d]
}
# for leap years
is.28feb = which(nonleap.dm=="2802")
is.leap = vec.Dates.dm=="2902"
seas.sim[is.leap] = season.fit[is.28feb]
return(seas.sim)
}
#==============================================================================
# get.period.fitting.temp
#
# Get 3-month moving window for one month
#
# @param m.char 3-letter name of a month (e.g. 'JAN')
#
# @return return 3-month period corresponding to this month (ex c(1,2,3) for February)
#
# @author Guillaume Evin
get.period.fitting.temp = function(m.char){
# m.char:
# list of months
list.m = get.list.month()
# index of this month
i.m = which(list.m==m.char)
# return 3-month period corresponding to this month
vec.m.ext = c(11,12,1:12,1,2)
return(vec.m.ext[i.m:(i.m+4)])
}
#==============================================================================
# predict.trend
#
# Return the trend for an arbitrary period
#
# @param vec.slope slopes for the 12 months
# @param vec.Dates vector of Dates to predict
#
# @return return a vector giving the long-term trend for the time period in vec.Dates
#
# @author Guillaume Evin
predict.trend = function(vec.slope,vec.Dates){
# length of the simulations
n = length(vec.Dates)
# month
vec.m = as.numeric(format(vec.Dates,'%m'))
# years
vec.y.raw = as.numeric(format(vec.Dates,'%Y'))
vec.y = vec.y.raw - min(vec.y.raw)
vec.y.unique = unique(vec.y)
# compute trends
trend.sim = vector(length=n)
for(y in vec.y.unique){
for(m in 1:12){
is.per = vec.y==y & vec.m==m
trend.sim[is.per] = y*vec.slope[m]
}
}
return(trend.sim)
}
#==============================================================================
# fit.GWex.temp
#
# estimate all the parameters for the G-Wex model of temperature
# @references Evin, G., A.-C. Favre, and B. Hingray. 2018. “Stochastic Generators of Multi-Site Daily Temperature: Comparison of Performances in Various Applications.”
# Theoretical and Applied Climatology, 1–14. doi.org/10.1007/s00704-018-2404-x.
#
# @param objGwexObs object of class \code{\linkS4class{GwexObs}}
# @param listOption list with the following fields:
# \itemize{
# \item \strong{hasTrend}: logical value, do we fit a linear trend for the long-term change, =FALSE by default
# \item \strong{objGwexPrec}: object of class \code{\linkS4class{GwexObs}} containing precipitation observations. If provided, we assume that temperature must be modelled and simulated according to the precipitation states 'dry' and 'wet'. For each state, a seasonal cycle is fitted (mean and sd).
# \item \strong{typeMargin}: 'SGED' (default) or 'Gaussian': type of marginal distribution.
# \item \strong{depStation}: logical value, do we apply a Autoregressive Multivariate Autoregressive model (order 1) =TRUE by default
# }
#
# @export
#
# @return a list containing the list of options \code{listOption} and the list of estimated parameters \code{listPar}:
#
# \itemize{
# \item \strong{list.trend}: Annual trend for the average temperature, for each month: Raw (in \code{lm}) and smoothed (in \code{smooth}).
# \item \strong{list.par.margin}: List with one element per station. Each element contains the seasonal cycle for the average temperature \code{season.mean}, the corresponding cycle for the standard deviation \code{season.std} and the parameters of the marginal distributions for each month \code{SkewNormal.par}.
# \item \strong{list.par.dep}: Lst with one element per month. Each element contains the matrix of correlations \eqn{M_0} for the spatial dependence.
# }
# @author Guillaume Evin
fit.GWex.temp = function(objGwexObs,listOption=NULL){
######### Check inputs and assign default values ##########
if(is.null(listOption)){
listOption = list()
}else{
if(!is.list(listOption)) stop('listOption must be a list')
}
# hasTrend
if('hasTrend' %in% names(listOption)){
hasTrend = listOption[['hasTrend']]
if(!is.logical(hasTrend)) stop('hasTrend must be logical')
}else{
hasTrend = F
listOption[['hasTrend']] = hasTrend
}
# objGwexPrec: if objGwexPrec is present, we condition the temperature model to precipitation
# observations/simulations (see Wilks, 2009)
if('objGwexPrec' %in% names(listOption)){
condPrec = T
objGwexPrec = listOption[['objGwexPrec']]
th = 0.2 # threshold to separate dry/wet states
}else{
condPrec = F
}
listOption[['condPrec']] = condPrec
# isParallel: not for temperature
listOption[['isParallel']] = FALSE
# typeMargin: 'SGED' (default) or 'Gaussian'
if('typeMargin' %in% names(listOption)){
typeMargin = listOption[['typeMargin']]
if(!typeMargin%in%c('SGED','Gaussian')) stop('typeMargin must be equal to SGED or Gaussian')
}else{
typeMargin = 'SGED'
listOption[['typeMargin']] = typeMargin
}
# depStation:
# - 'MAR1': applies a Multivariate Autoregressive process to include temporal and spatial dependences.
# - 'Gaussian': Just include a spatial dependence.
if('depStation' %in% names(listOption)){
depStation = listOption[['depStation']]
if(!depStation%in%c('MAR1','Gaussian')) stop('depStation must be equal to MAR1 or Gaussian')
}else{
depStation = 'MAR1'
listOption[['depStation']] = depStation
}
######### Initialize some objects ##########
# Precipitation matrix
mat.T = objGwexObs@obs
# number of stations
p = ncol(mat.T)
# Dates
vec.dates = objGwexObs@date
n.day = length(vec.dates)
# Years
vec.y = strftime(vec.dates, "%Y")
n.y = length(unique(vec.y))
# Months
vec.month = as.numeric(strftime(vec.dates, "%m"))
# liste des mois
vec.month.char = get.list.month()
n.m = length(vec.month.char)
# initialise some objects
list.par.margin = list()
u = matrix(nrow = n.day, ncol=p)
######## NON-STATIONARITY TREND ###########
if(hasTrend){
# estimate the trend by season
lm.slope = vector(length=n.m)
for(m in 1:n.m){
# period for this month
per.m = get.period.fitting.month(vec.month.char[m])
is.per = vec.month%in%per.m
# regional mean
mat.T.per = apply(mat.T[is.per,],1,mean,na.rm=T)
# annual averages
t.mean = aggregate(mat.T.per,by=list(y=vec.y[is.per]),FUN=mean)$x
# apply a linear regression
lm.model = lm(y~x,data = list(x=1:n.y,y=t.mean))
# retrieve slope
lm.slope[m] = lm.model$coefficients[2]
}
# smooth these slopes
smooth.slope = lowess(lm.slope)$y
# predicted trend
t.trend = predict.trend(smooth.slope,vec.dates)
# return list
list.trend = list(lm = lm.slope, smooth = smooth.slope)
}else{
t.trend = NULL
list.trend = list()
}
#====================== MARGINS ========================
# for the progress bar
pb <- txtProgressBar()
Tdetrend = matrix(nrow=n.day,ncol=p)
for(i.st in 1:p){
# vector of temperature for this station
t.st = mat.T[,i.st]
######## NON-STATIONARITY TREND ###########
if(hasTrend){
# remove the trend
t.detrend = t.st - t.trend
}else{
# otherwise we do not apply a trend
t.detrend = t.st
}
Tdetrend[,i.st] = t.detrend
######## SEASONALITY ###########
# empirical estimate of the seasonal cycle for the mean and sd of the temperature
# we first remove this seasonality for each station
# if we condition on precipitation values, we fit a seasonal cycles for two precipitatio states:
# (wet / dry)
if(condPrec){
t.mean.season = t.std.season = list()
t.std = vector(length=n.day)
for(state in c('dry','wet')){
# filter temperature obs corresponding to this precipitation states
t.sel = t.detrend
if(state == 'dry'){
is.state = objGwexPrec@obs[,i.st]<=th
}else if(state == 'wet'){
is.state = objGwexPrec@obs[,i.st]>th
}
is.state[is.na(is.state)] = F
t.sel[!is.state] = NA
# seasonal cycle of the daily mean: average for each day of the year
t.mean.season[[state]] = get.seasonal(t.sel,vec.dates,"mean")
t.mean.pred = predictTempCycle(t.mean.season[[state]],vec.dates)
t.sh = t.sel - t.mean.pred # remove mean
# seasonal cycle of the daily sd: smooth estimate of sd and average for each day of the year
t.std.season[[state]] = get.seasonal(t.sh,vec.dates,"sd")
t.std.pred = predictTempCycle(t.std.season[[state]],vec.dates)
t.std[is.state] = t.sh[is.state]/t.std.pred[is.state] # standardise
}
}else{
# seasonal cycle of the daily mean: average for each day of the year
t.mean.season = get.seasonal(t.detrend,vec.dates,"mean")
t.mean.pred = predictTempCycle(t.mean.season,vec.dates)
t.sh = t.detrend - t.mean.pred # remove mean
# seasonal cycle of the daily sd: smooth estimate of sd and average for each day of the year
t.std.season = get.seasonal(t.sh,vec.dates,"sd")
t.std.pred = predictTempCycle(t.std.season,vec.dates)
t.std = t.sh/t.std.pred # standardise
}
######## MARGINAL DISTRIBUTION ###########
# A Skew normal distribution is fitted to the standardized temperature
# initialize list
list.SkewNormal.par = list()
if(typeMargin=='SGED'){
# for each month
for(m in vec.month.char){
# three month period
per.m = get.period.fitting.month(m)
# donnees filtrees
t.std.per = t.std[vec.month%in%per.m]
t.filt = t.std.per[!is.na(t.std.per)]
# fit SkewNormal distribution
list.SkewNormal.par[[m]] = mySgedFit(t.filt)
}
}
# PIT transform (u are the inputs for copula functions)
if(typeMargin=='Gaussian'){
u[,i.st] = pnorm(t.std,mean=0, sd=1)
}else if(typeMargin=='SGED'){
for(m in 1:12){
is.m = (vec.month==m)
par.m = list.SkewNormal.par[[vec.month.char[m]]]
u[is.m,i.st] = fGarch::psged(t.std[is.m],mean=0, sd=1, nu=par.m[1], xi=par.m[2])
}
}
######## ALL PARAMETERS FOR THE MARGINS ###########
list.par.margin[[i.st]] = list(season.mean = t.mean.season,
season.std = t.std.season,
SkewNormal.par = list.SkewNormal.par)
# progress bar
setTxtProgressBar(pb, i.st/(p+12))
}
# detrend temperature data
list.trend[['Tdetrend']] = Tdetrend
#========== TEMPORAL AND SPATIAL DEPENDENCE ============
list.par.dep = list()
# Gaussian quantiles
q.gau = qnorm(u)
# for each month
for(m in vec.month.char){
# three month period
per.m = get.period.fitting.month(m)
# donnees filtrees
q.gau.per = q.gau[vec.month%in%per.m,]
n.day = nrow(q.gau.per)
if(depStation=='MAR1'){
# inter-site correlations between all pairs of
# stations, at lag-0 and lag-1
q.lag = cbind(q.gau.per[2:n.day,],q.gau.per[1:(n.day-1),])
# pairwise estimation of a correlation matrix with the Kendall tau
corALL = cor(q.lag, method="pearson", use="pairwise.complete.obs")
# inter-site Pearson correlations + its inverse
M0 = corALL[1:p,1:p]
M0inv = MASS::ginv(M0)
# lag-1 correlations between pairs of stations
M1 = corALL[1:p,(p+1):(2*p)]
# covariance matrices of the MAR(1) process (Matalas, 1967)
A = M1%*%M0inv
covZ = M0 - M1%*%M0inv%*%t(M1)
# ALL PARAMETERS FOR THE MAR(1)
list.par.dep[[m]] = list(M0=M0,M1=M1,A=A,covZ=covZ)
}else if(depStation=='Gaussian'){
# pairwise estimation of a correlation matrix with the Kendall tau
M0 = cor(q.gau.per, method="pearson", use="pairwise.complete.obs")
list.par.dep[[m]] = list(M0=M0)
}
# progress bar
i.st = i.st+1
setTxtProgressBar(pb, i.st/(p+12))
}
# close progress bar
close(pb)
# return options and estimated parameters
listPar=list(Xt = q.gau,
list.trend = list.trend,
list.par.margin=list.par.margin,
list.par.dep=list.par.dep,
p=p)
return(list(listOption=listOption,listPar=listPar))
}
#==============================================================================
# sim.GWex.temp.1it
#
# Simulate one scenario of temperatures from the GWex model
#
# @param objGwexFit object of class GwexFit
# @param vec.Dates vector of dates
# @param myseed seed of the random generation, to be fixed if the results need to be replicated
# @param matSimPrec optional: matrix of precipitation simulation if temperature are generated conditionally to precipitation states "dry" and "wet"
#
# @export
# @return \item{matrix}{Temperature simulated for the dates contained in vec.Dates at the different stations}
#
# @author Guillaume Evin
sim.GWex.temp.1it = function(objGwexFit,vec.Dates,myseed,matSimPrec){
# set seed of random generation
set.seed(myseed)
# number of stations
p = getNbStations(objGwexFit)
# retreive option (model conditional to precipitation?)
condPrec = objGwexFit@fit$listOption$condPrec
if(condPrec) th = 0.2
# caracteristics of the time series generated
n = length(vec.Dates)
vec.month = as.numeric(strftime(vec.Dates, "%m"))
# liste des mois
vec.month.char = get.list.month()
n.m = length(vec.month.char)
# initialise quantities
Yt.Gau = Yt.Pr = Yt.std = Yt.detrend = Yt = matrix(nrow=n,ncol=p)
###____ Spatial and temporal dependence between the stations _____###
# type of dependence
depStation = objGwexFit@fit$listOption$depStation
# If we have an autoregressive process, we simulate one variate for each time step
if(depStation=='MAR1'){
# difference of periods
vec.per = get.list.month()[vec.month]
change.per = c(TRUE,vec.per[2:n]!=vec.per[1:(n-1)])
# Parameters for the first iteration
PAR.DEP = objGwexFit@fit$listPar$list.par.dep[[vec.per[1]]]
# iteration 1 for the spatial dependence
Yt.Gau[1,] = MASS::mvrnorm(n=1, mu=rep(0,p), Sigma=PAR.DEP[['M0']])
for(t in 2:n){
if(change.per[t]){
PAR.DEP = objGwexFit@fit$listPar$list.par.dep[[vec.per[t]]]
}
# generate from the corresponding multivariate distribution
# t-1
Yt.Gau.prev = t(PAR.DEP$A%*%Yt.Gau[t-1,])
# generate from a multivariate Gaussian
inno = MASS::mvrnorm(n=1, mu=rep(0,p), Sigma=PAR.DEP[['covZ']])
# MAR(1)
Yt.Gau[t,] = Yt.Gau.prev + inno
}
}else{
# Spatial dependence only, we simulate month by month
for(m in 1:n.m){
# retrieve parameters
PAR.DEP = objGwexFit@fit$listPar$list.par.dep[[m]]
# for all days concerned by this month
is.m = (vec.month==m)
n.sim = sum(is.m)
# simulate the spatial dependence
Yt.Gau[is.m,] = MASS::mvrnorm(n=n.sim, mu=rep(0,p), Sigma=PAR.DEP[['M0']])
}
}
# transformation in probability for Gaussian variates
Yt.Pr = pnorm(Yt.Gau)
###____________ Inverse-CDF ____________###
# parameters
typeMargin = objGwexFit@fit$listOption$typeMargin
PAR.margin = objGwexFit@fit$listPar$list.par.margin
for(i.st in 1:p){
if(typeMargin=='Gaussian'){
Yt.std[,i.st] = qnorm(p=Yt.Pr[,i.st], mean=0, sd=1)
}else if(typeMargin=='SGED'){
for(m in 1:n.m){
par.m = PAR.margin[[i.st]]$SkewNormal.par[[m]]
is.m = (vec.month==m)
Yt.std[is.m,i.st] = fGarch::qsged(p=Yt.Pr[is.m,i.st], mean=0, sd=1, nu=par.m[1], xi=par.m[2])
}
}
}
###___ Add cycle and un-standardize _____###
for(i.st in 1:p){
# retrieve seasonal cycles
season.mean = PAR.margin[[i.st]]$season.mean
season.std = PAR.margin[[i.st]]$season.std
# predict seasonal cycles (conditionally to precipitation sim or not)
if(condPrec){
x.mean.pred = x.std.pred = vector(length=n)
for(state in c("dry","wet")){
if(state == 'dry'){
is.state = matSimPrec[,i.st]<=th
}else if(state == 'wet'){
is.state = matSimPrec[,i.st]>th
}
x.mean.pred[is.state] = predictTempCycle(season.mean[[state]],vec.Dates[is.state])
x.std.pred[is.state] = predictTempCycle(season.std[[state]],vec.Dates[is.state])
}
}else{
x.mean.pred = predictTempCycle(season.mean,vec.Dates)
x.std.pred = predictTempCycle(season.std,vec.Dates)
}
# unstandardize
Yt.detrend[,i.st] = Yt.std[,i.st]*x.std.pred + x.mean.pred
}
###___ Add long-term trends _____###
hasTrend = objGwexFit@fit$listOption$hasTrend
if(hasTrend){
# trend for the simulaed period
t.trend = predict.trend(objGwexFit@fit$listPar$list.trend$smooth,vec.Dates)
for(i.st in 1:p){
# add trend
Yt[,i.st] = Yt.detrend[,i.st] + t.trend
}
}else{
# otherwise, we simply return Yt.detrend
Yt = Yt.detrend
}
# return results
return(list(Yt=Yt,Xt=Yt.Gau,Zt=Yt.std,Tdetrend=Yt.detrend))
}
|
/R/GWexTemp_lib.r
|
no_license
|
guillaumeevin/GWEX
|
R
| false
| false
| 20,909
|
r
|
###===============================###===============================###
### Guillaume Evin
### 19/05/2017, Grenoble
### IRSTEA
### guillaume.evin@irstea.fr
###
### Provide utilities for the estimation and simulation of the GWex
### multi-site temperature model.
###===============================###===============================###
#==============================================================================
# mySgedFit
#
# fit nu and xi parameters of the SGED distribution
#
# @param x vector of numerical values
# @export
# @return \item{estimated parameters}{Vector of two parameters: \eqn{nu} and \eqn{xi}}
#
# @author Guillaume Evin
mySgedFit = function(x){
start = c(nu = 2, xi = 1)
loglik = function(param, x) {
f = -sum(log(fGarch::dsged(x, 0, 1, param[1], param[2])))
}
fit = nlminb(start = start, objective = loglik,
lower = c(0, 0), upper = c(Inf, Inf), x = x)
return(fit$par)
}
#==============================================================================
# get.nonleap.dm
#
# Day/Month for a non-leap year
#
# @return \item{Vector of 365 dates for a non-leap years}{String with the format day/month}
#
# @author Guillaume Evin
get.nonleap.dm = function(){
# vec Dates for a non-leap year
vec.Date.seas = seq(from=as.Date("2001/01/01"),to=as.Date("2001/12/31"),by=1)
vec.Date.seas.dm = format(vec.Date.seas,'%d%m')
# return results
return(vec.Date.seas.dm)
}
#==============================================================================
# get.seasonal
#
# Estimate the annual cycle of temperature. For each day, we apply the function \code{fun} to all temperature data for this day (ex. the mean
# of all January, 1st). This cycle is smoothed with the \code{\link{lowess}} function.
#
# @param x temperature data
# @param vec.Dates vector of Dates associated to x
# @param myfun function apply for each day
#
# @export
#
# @return A vector of length 365, the annual cycle of temperature
#
# @author Guillaume Evin
get.seasonal = function(x,vec.Dates,myfun='mean'){
# day/month for the observed dates
vec.Dates.dm = format(vec.Dates,'%d%m')
# day/month for a non-leap year
nonleap.dm = get.nonleap.dm()
# for 'normal' days
seas = vector(length=365)
for(i.d in 1:365){
is.d = vec.Dates.dm==nonleap.dm[i.d]
seas[i.d] = do.call(myfun,list(x=x[is.d],na.rm=T))
}
# smooth trend
seas.smooth = lowess(seas,f=1/10)$y
return(seas.smooth)
}
#==============================================================================
# predictTempCycle
#
# Return the seasonal cycle for an arbitrary period. The seasonal trend \code{season.fit} is repeated to match the sequence given in \code{vec.Dates}. For leap years, the trend returns equivalent values for the 28/02 and 29/02 days.
#
# @param season.fit seasonal trend: vector 365
# @param vec.Dates vector of Dates to predict
#
# @export
#
# @return A vector giving the seasonal cycles for the time period in vec.Dates
#
# @author Guillaume Evin
predictTempCycle = function(season.fit,vec.Dates){
# length of the simulations
n = length(vec.Dates)
# day/month for the dates to simulate
vec.Dates.dm = format(vec.Dates,'%d%m')
# day/month for a non-leap year
nonleap.dm = get.nonleap.dm()
# for 'normal' days
seas.sim = vector(length=n)
for(i.d in 1:365){
is.d = vec.Dates.dm==nonleap.dm[i.d]
seas.sim[is.d] = season.fit[i.d]
}
# for leap years
is.28feb = which(nonleap.dm=="2802")
is.leap = vec.Dates.dm=="2902"
seas.sim[is.leap] = season.fit[is.28feb]
return(seas.sim)
}
#==============================================================================
# get.period.fitting.temp
#
# Get 3-month moving window for one month
#
# @param m.char 3-letter name of a month (e.g. 'JAN')
#
# @return return 3-month period corresponding to this month (ex c(1,2,3) for February)
#
# @author Guillaume Evin
get.period.fitting.temp = function(m.char){
# m.char:
# list of months
list.m = get.list.month()
# index of this month
i.m = which(list.m==m.char)
# return 3-month period corresponding to this month
vec.m.ext = c(11,12,1:12,1,2)
return(vec.m.ext[i.m:(i.m+4)])
}
#==============================================================================
# predict.trend
#
# Return the trend for an arbitrary period
#
# @param vec.slope slopes for the 12 months
# @param vec.Dates vector of Dates to predict
#
# @return return a vector giving the long-term trend for the time period in vec.Dates
#
# @author Guillaume Evin
predict.trend = function(vec.slope,vec.Dates){
# length of the simulations
n = length(vec.Dates)
# month
vec.m = as.numeric(format(vec.Dates,'%m'))
# years
vec.y.raw = as.numeric(format(vec.Dates,'%Y'))
vec.y = vec.y.raw - min(vec.y.raw)
vec.y.unique = unique(vec.y)
# compute trends
trend.sim = vector(length=n)
for(y in vec.y.unique){
for(m in 1:12){
is.per = vec.y==y & vec.m==m
trend.sim[is.per] = y*vec.slope[m]
}
}
return(trend.sim)
}
#==============================================================================
# fit.GWex.temp
#
# estimate all the parameters for the G-Wex model of temperature
# @references Evin, G., A.-C. Favre, and B. Hingray. 2018. “Stochastic Generators of Multi-Site Daily Temperature: Comparison of Performances in Various Applications.”
# Theoretical and Applied Climatology, 1–14. doi.org/10.1007/s00704-018-2404-x.
#
# @param objGwexObs object of class \code{\linkS4class{GwexObs}}
# @param listOption list with the following fields:
# \itemize{
# \item \strong{hasTrend}: logical value, do we fit a linear trend for the long-term change, =FALSE by default
# \item \strong{objGwexPrec}: object of class \code{\linkS4class{GwexObs}} containing precipitation observations. If provided, we assume that temperature must be modelled and simulated according to the precipitation states 'dry' and 'wet'. For each state, a seasonal cycle is fitted (mean and sd).
# \item \strong{typeMargin}: 'SGED' (default) or 'Gaussian': type of marginal distribution.
# \item \strong{depStation}: logical value, do we apply a Autoregressive Multivariate Autoregressive model (order 1) =TRUE by default
# }
#
# @export
#
# @return a list containing the list of options \code{listOption} and the list of estimated parameters \code{listPar}:
#
# \itemize{
# \item \strong{list.trend}: Annual trend for the average temperature, for each month: Raw (in \code{lm}) and smoothed (in \code{smooth}).
# \item \strong{list.par.margin}: List with one element per station. Each element contains the seasonal cycle for the average temperature \code{season.mean}, the corresponding cycle for the standard deviation \code{season.std} and the parameters of the marginal distributions for each month \code{SkewNormal.par}.
# \item \strong{list.par.dep}: Lst with one element per month. Each element contains the matrix of correlations \eqn{M_0} for the spatial dependence.
# }
# @author Guillaume Evin
fit.GWex.temp = function(objGwexObs,listOption=NULL){
######### Check inputs and assign default values ##########
if(is.null(listOption)){
listOption = list()
}else{
if(!is.list(listOption)) stop('listOption must be a list')
}
# hasTrend
if('hasTrend' %in% names(listOption)){
hasTrend = listOption[['hasTrend']]
if(!is.logical(hasTrend)) stop('hasTrend must be logical')
}else{
hasTrend = F
listOption[['hasTrend']] = hasTrend
}
# objGwexPrec: if objGwexPrec is present, we condition the temperature model to precipitation
# observations/simulations (see Wilks, 2009)
if('objGwexPrec' %in% names(listOption)){
condPrec = T
objGwexPrec = listOption[['objGwexPrec']]
th = 0.2 # threshold to separate dry/wet states
}else{
condPrec = F
}
listOption[['condPrec']] = condPrec
# isParallel: not for temperature
listOption[['isParallel']] = FALSE
# typeMargin: 'SGED' (default) or 'Gaussian'
if('typeMargin' %in% names(listOption)){
typeMargin = listOption[['typeMargin']]
if(!typeMargin%in%c('SGED','Gaussian')) stop('typeMargin must be equal to SGED or Gaussian')
}else{
typeMargin = 'SGED'
listOption[['typeMargin']] = typeMargin
}
# depStation:
# - 'MAR1': applies a Multivariate Autoregressive process to include temporal and spatial dependences.
# - 'Gaussian': Just include a spatial dependence.
if('depStation' %in% names(listOption)){
depStation = listOption[['depStation']]
if(!depStation%in%c('MAR1','Gaussian')) stop('depStation must be equal to MAR1 or Gaussian')
}else{
depStation = 'MAR1'
listOption[['depStation']] = depStation
}
######### Initialize some objects ##########
# Precipitation matrix
mat.T = objGwexObs@obs
# number of stations
p = ncol(mat.T)
# Dates
vec.dates = objGwexObs@date
n.day = length(vec.dates)
# Years
vec.y = strftime(vec.dates, "%Y")
n.y = length(unique(vec.y))
# Months
vec.month = as.numeric(strftime(vec.dates, "%m"))
# liste des mois
vec.month.char = get.list.month()
n.m = length(vec.month.char)
# initialise some objects
list.par.margin = list()
u = matrix(nrow = n.day, ncol=p)
######## NON-STATIONARITY TREND ###########
if(hasTrend){
# estimate the trend by season
lm.slope = vector(length=n.m)
for(m in 1:n.m){
# period for this month
per.m = get.period.fitting.month(vec.month.char[m])
is.per = vec.month%in%per.m
# regional mean
mat.T.per = apply(mat.T[is.per,],1,mean,na.rm=T)
# annual averages
t.mean = aggregate(mat.T.per,by=list(y=vec.y[is.per]),FUN=mean)$x
# apply a linear regression
lm.model = lm(y~x,data = list(x=1:n.y,y=t.mean))
# retrieve slope
lm.slope[m] = lm.model$coefficients[2]
}
# smooth these slopes
smooth.slope = lowess(lm.slope)$y
# predicted trend
t.trend = predict.trend(smooth.slope,vec.dates)
# return list
list.trend = list(lm = lm.slope, smooth = smooth.slope)
}else{
t.trend = NULL
list.trend = list()
}
#====================== MARGINS ========================
# for the progress bar
pb <- txtProgressBar()
Tdetrend = matrix(nrow=n.day,ncol=p)
for(i.st in 1:p){
# vector of temperature for this station
t.st = mat.T[,i.st]
######## NON-STATIONARITY TREND ###########
if(hasTrend){
# remove the trend
t.detrend = t.st - t.trend
}else{
# otherwise we do not apply a trend
t.detrend = t.st
}
Tdetrend[,i.st] = t.detrend
######## SEASONALITY ###########
# empirical estimate of the seasonal cycle for the mean and sd of the temperature
# we first remove this seasonality for each station
# if we condition on precipitation values, we fit a seasonal cycles for two precipitatio states:
# (wet / dry)
if(condPrec){
t.mean.season = t.std.season = list()
t.std = vector(length=n.day)
for(state in c('dry','wet')){
# filter temperature obs corresponding to this precipitation states
t.sel = t.detrend
if(state == 'dry'){
is.state = objGwexPrec@obs[,i.st]<=th
}else if(state == 'wet'){
is.state = objGwexPrec@obs[,i.st]>th
}
is.state[is.na(is.state)] = F
t.sel[!is.state] = NA
# seasonal cycle of the daily mean: average for each day of the year
t.mean.season[[state]] = get.seasonal(t.sel,vec.dates,"mean")
t.mean.pred = predictTempCycle(t.mean.season[[state]],vec.dates)
t.sh = t.sel - t.mean.pred # remove mean
# seasonal cycle of the daily sd: smooth estimate of sd and average for each day of the year
t.std.season[[state]] = get.seasonal(t.sh,vec.dates,"sd")
t.std.pred = predictTempCycle(t.std.season[[state]],vec.dates)
t.std[is.state] = t.sh[is.state]/t.std.pred[is.state] # standardise
}
}else{
# seasonal cycle of the daily mean: average for each day of the year
t.mean.season = get.seasonal(t.detrend,vec.dates,"mean")
t.mean.pred = predictTempCycle(t.mean.season,vec.dates)
t.sh = t.detrend - t.mean.pred # remove mean
# seasonal cycle of the daily sd: smooth estimate of sd and average for each day of the year
t.std.season = get.seasonal(t.sh,vec.dates,"sd")
t.std.pred = predictTempCycle(t.std.season,vec.dates)
t.std = t.sh/t.std.pred # standardise
}
######## MARGINAL DISTRIBUTION ###########
# A Skew normal distribution is fitted to the standardized temperature
# initialize list
list.SkewNormal.par = list()
if(typeMargin=='SGED'){
# for each month
for(m in vec.month.char){
# three month period
per.m = get.period.fitting.month(m)
# donnees filtrees
t.std.per = t.std[vec.month%in%per.m]
t.filt = t.std.per[!is.na(t.std.per)]
# fit SkewNormal distribution
list.SkewNormal.par[[m]] = mySgedFit(t.filt)
}
}
# PIT transform (u are the inputs for copula functions)
if(typeMargin=='Gaussian'){
u[,i.st] = pnorm(t.std,mean=0, sd=1)
}else if(typeMargin=='SGED'){
for(m in 1:12){
is.m = (vec.month==m)
par.m = list.SkewNormal.par[[vec.month.char[m]]]
u[is.m,i.st] = fGarch::psged(t.std[is.m],mean=0, sd=1, nu=par.m[1], xi=par.m[2])
}
}
######## ALL PARAMETERS FOR THE MARGINS ###########
list.par.margin[[i.st]] = list(season.mean = t.mean.season,
season.std = t.std.season,
SkewNormal.par = list.SkewNormal.par)
# progress bar
setTxtProgressBar(pb, i.st/(p+12))
}
# detrend temperature data
list.trend[['Tdetrend']] = Tdetrend
#========== TEMPORAL AND SPATIAL DEPENDENCE ============
list.par.dep = list()
# Gaussian quantiles
q.gau = qnorm(u)
# for each month
for(m in vec.month.char){
# three month period
per.m = get.period.fitting.month(m)
# donnees filtrees
q.gau.per = q.gau[vec.month%in%per.m,]
n.day = nrow(q.gau.per)
if(depStation=='MAR1'){
# inter-site correlations between all pairs of
# stations, at lag-0 and lag-1
q.lag = cbind(q.gau.per[2:n.day,],q.gau.per[1:(n.day-1),])
# pairwise estimation of a correlation matrix with the Kendall tau
corALL = cor(q.lag, method="pearson", use="pairwise.complete.obs")
# inter-site Pearson correlations + its inverse
M0 = corALL[1:p,1:p]
M0inv = MASS::ginv(M0)
# lag-1 correlations between pairs of stations
M1 = corALL[1:p,(p+1):(2*p)]
# covariance matrices of the MAR(1) process (Matalas, 1967)
A = M1%*%M0inv
covZ = M0 - M1%*%M0inv%*%t(M1)
# ALL PARAMETERS FOR THE MAR(1)
list.par.dep[[m]] = list(M0=M0,M1=M1,A=A,covZ=covZ)
}else if(depStation=='Gaussian'){
# pairwise estimation of a correlation matrix with the Kendall tau
M0 = cor(q.gau.per, method="pearson", use="pairwise.complete.obs")
list.par.dep[[m]] = list(M0=M0)
}
# progress bar
i.st = i.st+1
setTxtProgressBar(pb, i.st/(p+12))
}
# close progress bar
close(pb)
# return options and estimated parameters
listPar=list(Xt = q.gau,
list.trend = list.trend,
list.par.margin=list.par.margin,
list.par.dep=list.par.dep,
p=p)
return(list(listOption=listOption,listPar=listPar))
}
#==============================================================================
# sim.GWex.temp.1it
#
# Simulate one scenario of temperatures from the GWex model
#
# @param objGwexFit object of class GwexFit
# @param vec.Dates vector of dates
# @param myseed seed of the random generation, to be fixed if the results need to be replicated
# @param matSimPrec optional: matrix of precipitation simulation if temperature are generated conditionally to precipitation states "dry" and "wet"
#
# @export
# @return \item{matrix}{Temperature simulated for the dates contained in vec.Dates at the different stations}
#
# @author Guillaume Evin
sim.GWex.temp.1it = function(objGwexFit,vec.Dates,myseed,matSimPrec){
# set seed of random generation
set.seed(myseed)
# number of stations
p = getNbStations(objGwexFit)
# retreive option (model conditional to precipitation?)
condPrec = objGwexFit@fit$listOption$condPrec
if(condPrec) th = 0.2
# caracteristics of the time series generated
n = length(vec.Dates)
vec.month = as.numeric(strftime(vec.Dates, "%m"))
# liste des mois
vec.month.char = get.list.month()
n.m = length(vec.month.char)
# initialise quantities
Yt.Gau = Yt.Pr = Yt.std = Yt.detrend = Yt = matrix(nrow=n,ncol=p)
###____ Spatial and temporal dependence between the stations _____###
# type of dependence
depStation = objGwexFit@fit$listOption$depStation
# If we have an autoregressive process, we simulate one variate for each time step
if(depStation=='MAR1'){
# difference of periods
vec.per = get.list.month()[vec.month]
change.per = c(TRUE,vec.per[2:n]!=vec.per[1:(n-1)])
# Parameters for the first iteration
PAR.DEP = objGwexFit@fit$listPar$list.par.dep[[vec.per[1]]]
# iteration 1 for the spatial dependence
Yt.Gau[1,] = MASS::mvrnorm(n=1, mu=rep(0,p), Sigma=PAR.DEP[['M0']])
for(t in 2:n){
if(change.per[t]){
PAR.DEP = objGwexFit@fit$listPar$list.par.dep[[vec.per[t]]]
}
# generate from the corresponding multivariate distribution
# t-1
Yt.Gau.prev = t(PAR.DEP$A%*%Yt.Gau[t-1,])
# generate from a multivariate Gaussian
inno = MASS::mvrnorm(n=1, mu=rep(0,p), Sigma=PAR.DEP[['covZ']])
# MAR(1)
Yt.Gau[t,] = Yt.Gau.prev + inno
}
}else{
# Spatial dependence only, we simulate month by month
for(m in 1:n.m){
# retrieve parameters
PAR.DEP = objGwexFit@fit$listPar$list.par.dep[[m]]
# for all days concerned by this month
is.m = (vec.month==m)
n.sim = sum(is.m)
# simulate the spatial dependence
Yt.Gau[is.m,] = MASS::mvrnorm(n=n.sim, mu=rep(0,p), Sigma=PAR.DEP[['M0']])
}
}
# transformation in probability for Gaussian variates
Yt.Pr = pnorm(Yt.Gau)
###____________ Inverse-CDF ____________###
# parameters
typeMargin = objGwexFit@fit$listOption$typeMargin
PAR.margin = objGwexFit@fit$listPar$list.par.margin
for(i.st in 1:p){
if(typeMargin=='Gaussian'){
Yt.std[,i.st] = qnorm(p=Yt.Pr[,i.st], mean=0, sd=1)
}else if(typeMargin=='SGED'){
for(m in 1:n.m){
par.m = PAR.margin[[i.st]]$SkewNormal.par[[m]]
is.m = (vec.month==m)
Yt.std[is.m,i.st] = fGarch::qsged(p=Yt.Pr[is.m,i.st], mean=0, sd=1, nu=par.m[1], xi=par.m[2])
}
}
}
###___ Add cycle and un-standardize _____###
for(i.st in 1:p){
# retrieve seasonal cycles
season.mean = PAR.margin[[i.st]]$season.mean
season.std = PAR.margin[[i.st]]$season.std
# predict seasonal cycles (conditionally to precipitation sim or not)
if(condPrec){
x.mean.pred = x.std.pred = vector(length=n)
for(state in c("dry","wet")){
if(state == 'dry'){
is.state = matSimPrec[,i.st]<=th
}else if(state == 'wet'){
is.state = matSimPrec[,i.st]>th
}
x.mean.pred[is.state] = predictTempCycle(season.mean[[state]],vec.Dates[is.state])
x.std.pred[is.state] = predictTempCycle(season.std[[state]],vec.Dates[is.state])
}
}else{
x.mean.pred = predictTempCycle(season.mean,vec.Dates)
x.std.pred = predictTempCycle(season.std,vec.Dates)
}
# unstandardize
Yt.detrend[,i.st] = Yt.std[,i.st]*x.std.pred + x.mean.pred
}
###___ Add long-term trends _____###
hasTrend = objGwexFit@fit$listOption$hasTrend
if(hasTrend){
# trend for the simulaed period
t.trend = predict.trend(objGwexFit@fit$listPar$list.trend$smooth,vec.Dates)
for(i.st in 1:p){
# add trend
Yt[,i.st] = Yt.detrend[,i.st] + t.trend
}
}else{
# otherwise, we simply return Yt.detrend
Yt = Yt.detrend
}
# return results
return(list(Yt=Yt,Xt=Yt.Gau,Zt=Yt.std,Tdetrend=Yt.detrend))
}
|
# read_questionnaires.R
################# libraries #########################
# auxiliar library for "smartbind" method
library("gtools", lib.loc="~/R/x86_64-pc-linux-gnu-library/3.3")
################## functions ##########################
read_questionnaire <- function(route, className) {
# Reads pre or post questionnaire of a class from csv file.
#
# Args:
# route: complete route of .csv file to be read.
# className: string with name of class to be add to the data.
#
# Returns:
# Data frame with read information from csv file
# with the added column of className.
# load data from csv file
data <- read.csv(file=route, header=TRUE, sep=",")
## cleaning data ##
# remove unnecessary columns
data$id<-NULL
data$lastpage<-NULL
data$startlanguage<-NULL
data$submitdate<-NULL
# include class column
data$class <- className
# return data
return(data)
}
read_data_from_class <- function(route, className, session) {
# Reads questionnaires from a given class and session.
#
# Args:
# route: route of .csv file to be read.
# className: string with name of class to be add to the data.
# session: number of class session
#
# Returns:
# Data frame with read information from both csv files
# with the added column of className.
# pre questionnaire data
Pre <- read_questionnaire(paste(route, "1-pre-", tolower(className), "-", session, ".csv", sep=''), className)
# post questionnaire data
Post <- read_questionnaire(paste(route, "2-post-", tolower(className), "-", session, ".csv", sep=''), className)
# merge data from pre and post questionnaires
data <- merge(Pre, Post)
return(data)
}
read_data <- function(route, classes) {
# Reads pre and post questionnaire of a given list of classes.
#
# Args:
# route: route of .csv files to be read.
# classes: array of classes with name : number of sessions.
#
# Returns:
# Data frame with read information for all classes
# with the added column of className.
# initialize data
data <- NULL
# for each class
for (class in names(classes)) {
# number of sessions for that class
sessions <- classes[[class]]
s <- 1
while (s <= sessions) {
if (is.null(data))
data <- read_data_from_class(route, class, s)
else
data <- smartbind(data, read_data_from_class(route, class, s))
s <- s+1
}
}
return(data)
}
list_of_classes <- function() {
# Auxiliar function to define the array of classes.
#
# Returns:
# List with the classes
# and, for each class, the number of sessions completed
l <- list()
l[["1ESO"]] <- 4
l[["2ESO"]] <- 4
l[["3ESO"]] <- 4
l[["4ESO"]] <- 4
l[["1BACH"]] <- 1
return(l)
}
########### evaluacion formativa ###################
evaluacionFormativa <- function() {
# load data from evaluation
data3ESO <- read_data_from_class("/home/cristina/R/data/", "3ESO", "prueba")
dataBachillerato <- read_data_from_class("/home/cristina/R/data/", "Bachillerato", "prueba")
# join data from different groups together
dataEvaluacionFormativa <- rbind(data3ESO, dataBachillerato)
# export data
write.table(data, "/home/cristina/R/tdata/dataEvaluacionFormativa.csv", sep=",")
}
########### TV ###################
questionnairesTV <- function() {
# load data from TVs
dataTV <- read_data_from_class("C:/Users/Cristina/Dropbox/TFM/Trazas Experimentos FirstAidGame/dia 6 - tele/", "tele", "")
# change columns name
colnames(dataTV)[which(names(dataTV) == "token")] <- "CODE"
colnames(dataTV)[which(names(dataTV) == "SEXO")] <- "SEX"
colnames(dataTV)[which(names(dataTV) == "EDAD")] <- "AGE"
return(dataTV)
# export data
# write.table(data, "/home/cristina/R/tdata/dataQuestionnairesTV.csv", sep=",")
}
#################### main ############################
read_questionnaires <- function() {
# define list of classes
mylist <- list_of_classes()
# read data
data <- read_data("/home/cristina/R/data/", mylist)
# change columns name
colnames(data)[which(names(data) == "token")] <- "CODE"
colnames(data)[which(names(data) == "SEXO")] <- "SEX"
colnames(data)[which(names(data) == "EDAD")] <- "AGE"
# export data
write.table(data, "/home/cristina/R/tdata/dataQuestionnaires.csv", sep=",")
return(data)
}
|
/src/read_questionnaires.R
|
no_license
|
crisal24/data-mining-gla
|
R
| false
| false
| 4,554
|
r
|
# read_questionnaires.R
################# libraries #########################
# auxiliar library for "smartbind" method
library("gtools", lib.loc="~/R/x86_64-pc-linux-gnu-library/3.3")
################## functions ##########################
read_questionnaire <- function(route, className) {
# Reads pre or post questionnaire of a class from csv file.
#
# Args:
# route: complete route of .csv file to be read.
# className: string with name of class to be add to the data.
#
# Returns:
# Data frame with read information from csv file
# with the added column of className.
# load data from csv file
data <- read.csv(file=route, header=TRUE, sep=",")
## cleaning data ##
# remove unnecessary columns
data$id<-NULL
data$lastpage<-NULL
data$startlanguage<-NULL
data$submitdate<-NULL
# include class column
data$class <- className
# return data
return(data)
}
read_data_from_class <- function(route, className, session) {
# Reads questionnaires from a given class and session.
#
# Args:
# route: route of .csv file to be read.
# className: string with name of class to be add to the data.
# session: number of class session
#
# Returns:
# Data frame with read information from both csv files
# with the added column of className.
# pre questionnaire data
Pre <- read_questionnaire(paste(route, "1-pre-", tolower(className), "-", session, ".csv", sep=''), className)
# post questionnaire data
Post <- read_questionnaire(paste(route, "2-post-", tolower(className), "-", session, ".csv", sep=''), className)
# merge data from pre and post questionnaires
data <- merge(Pre, Post)
return(data)
}
read_data <- function(route, classes) {
# Reads pre and post questionnaire of a given list of classes.
#
# Args:
# route: route of .csv files to be read.
# classes: array of classes with name : number of sessions.
#
# Returns:
# Data frame with read information for all classes
# with the added column of className.
# initialize data
data <- NULL
# for each class
for (class in names(classes)) {
# number of sessions for that class
sessions <- classes[[class]]
s <- 1
while (s <= sessions) {
if (is.null(data))
data <- read_data_from_class(route, class, s)
else
data <- smartbind(data, read_data_from_class(route, class, s))
s <- s+1
}
}
return(data)
}
list_of_classes <- function() {
# Auxiliar function to define the array of classes.
#
# Returns:
# List with the classes
# and, for each class, the number of sessions completed
l <- list()
l[["1ESO"]] <- 4
l[["2ESO"]] <- 4
l[["3ESO"]] <- 4
l[["4ESO"]] <- 4
l[["1BACH"]] <- 1
return(l)
}
########### evaluacion formativa ###################
evaluacionFormativa <- function() {
# load data from evaluation
data3ESO <- read_data_from_class("/home/cristina/R/data/", "3ESO", "prueba")
dataBachillerato <- read_data_from_class("/home/cristina/R/data/", "Bachillerato", "prueba")
# join data from different groups together
dataEvaluacionFormativa <- rbind(data3ESO, dataBachillerato)
# export data
write.table(data, "/home/cristina/R/tdata/dataEvaluacionFormativa.csv", sep=",")
}
########### TV ###################
questionnairesTV <- function() {
# load data from TVs
dataTV <- read_data_from_class("C:/Users/Cristina/Dropbox/TFM/Trazas Experimentos FirstAidGame/dia 6 - tele/", "tele", "")
# change columns name
colnames(dataTV)[which(names(dataTV) == "token")] <- "CODE"
colnames(dataTV)[which(names(dataTV) == "SEXO")] <- "SEX"
colnames(dataTV)[which(names(dataTV) == "EDAD")] <- "AGE"
return(dataTV)
# export data
# write.table(data, "/home/cristina/R/tdata/dataQuestionnairesTV.csv", sep=",")
}
#################### main ############################
read_questionnaires <- function() {
# define list of classes
mylist <- list_of_classes()
# read data
data <- read_data("/home/cristina/R/data/", mylist)
# change columns name
colnames(data)[which(names(data) == "token")] <- "CODE"
colnames(data)[which(names(data) == "SEXO")] <- "SEX"
colnames(data)[which(names(data) == "EDAD")] <- "AGE"
# export data
write.table(data, "/home/cristina/R/tdata/dataQuestionnaires.csv", sep=",")
return(data)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.globalaccelerator_operations.R
\name{create_listener}
\alias{create_listener}
\title{Create a listener to process inbound connections from clients to an accelerator}
\usage{
create_listener(AcceleratorArn, PortRanges, Protocol,
ClientAffinity = NULL, IdempotencyToken)
}
\arguments{
\item{AcceleratorArn}{[required] The Amazon Resource Name (ARN) of your accelerator.}
\item{PortRanges}{[required] The list of port ranges to support for connections from clients to your accelerator.}
\item{Protocol}{[required] The protocol for connections from clients to your accelerator.}
\item{ClientAffinity}{Client affinity lets you direct all requests from a user to the same endpoint, if you have stateful applications, regardless of the port and protocol of the client request. Clienty affinity gives you control over whether to always route each client to the same specific endpoint.
AWS Global Accelerator uses a consistent-flow hashing algorithm to choose the optimal endpoint for a connection. If client affinity is \code{NONE}, Global Accelerator uses the "five-tuple" (5-tuple) properties---source IP address, source port, destination IP address, destination port, and protocol---to select the hash value, and then chooses the best endpoint. However, with this setting, if someone uses different ports to connect to Global Accelerator, their connections might not be always routed to the same endpoint because the hash value changes.
If you want a given client to always be routed to the same endpoint, set client affinity to \code{SOURCE_IP} instead. When you use the \code{SOURCE_IP} setting, Global Accelerator uses the "two-tuple" (2-tuple) properties--- source (client) IP address and destination IP address---to select the hash value.
The default value is \code{NONE}.}
\item{IdempotencyToken}{[required] A unique, case-sensitive identifier that you provide to ensure the idempotency---that is, the uniqueness---of the request.}
}
\description{
Create a listener to process inbound connections from clients to an accelerator. Connections arrive to assigned static IP addresses on a port, port range, or list of port ranges that you specify. To see an AWS CLI example of creating a listener, scroll down to \strong{Example}.
}
\section{Accepted Parameters}{
\preformatted{create_listener(
AcceleratorArn = "string",
PortRanges = list(
list(
FromPort = 123,
ToPort = 123
)
),
Protocol = "TCP"|"UDP",
ClientAffinity = "NONE"|"SOURCE_IP",
IdempotencyToken = "string"
)
}
}
|
/service/paws.globalaccelerator/man/create_listener.Rd
|
permissive
|
CR-Mercado/paws
|
R
| false
| true
| 2,598
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.globalaccelerator_operations.R
\name{create_listener}
\alias{create_listener}
\title{Create a listener to process inbound connections from clients to an accelerator}
\usage{
create_listener(AcceleratorArn, PortRanges, Protocol,
ClientAffinity = NULL, IdempotencyToken)
}
\arguments{
\item{AcceleratorArn}{[required] The Amazon Resource Name (ARN) of your accelerator.}
\item{PortRanges}{[required] The list of port ranges to support for connections from clients to your accelerator.}
\item{Protocol}{[required] The protocol for connections from clients to your accelerator.}
\item{ClientAffinity}{Client affinity lets you direct all requests from a user to the same endpoint, if you have stateful applications, regardless of the port and protocol of the client request. Clienty affinity gives you control over whether to always route each client to the same specific endpoint.
AWS Global Accelerator uses a consistent-flow hashing algorithm to choose the optimal endpoint for a connection. If client affinity is \code{NONE}, Global Accelerator uses the "five-tuple" (5-tuple) properties---source IP address, source port, destination IP address, destination port, and protocol---to select the hash value, and then chooses the best endpoint. However, with this setting, if someone uses different ports to connect to Global Accelerator, their connections might not be always routed to the same endpoint because the hash value changes.
If you want a given client to always be routed to the same endpoint, set client affinity to \code{SOURCE_IP} instead. When you use the \code{SOURCE_IP} setting, Global Accelerator uses the "two-tuple" (2-tuple) properties--- source (client) IP address and destination IP address---to select the hash value.
The default value is \code{NONE}.}
\item{IdempotencyToken}{[required] A unique, case-sensitive identifier that you provide to ensure the idempotency---that is, the uniqueness---of the request.}
}
\description{
Create a listener to process inbound connections from clients to an accelerator. Connections arrive to assigned static IP addresses on a port, port range, or list of port ranges that you specify. To see an AWS CLI example of creating a listener, scroll down to \strong{Example}.
}
\section{Accepted Parameters}{
\preformatted{create_listener(
AcceleratorArn = "string",
PortRanges = list(
list(
FromPort = 123,
ToPort = 123
)
),
Protocol = "TCP"|"UDP",
ClientAffinity = "NONE"|"SOURCE_IP",
IdempotencyToken = "string"
)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mods.R
\name{post_abstain_mod}
\alias{post_abstain_mod}
\title{Abstain a mod}
\usage{
post_abstain_mod(game_domain_name, mod_id, version)
}
\arguments{
\item{game_domain_name}{A character. Game domain to get the recent mod updates for.}
\item{mod_id}{A numeric. ID of the mod to get changelogs for.}
\item{version}{A character. Mod version.}
}
\value{
}
\description{
Abstain from endorsing a mod
}
\examples{
\dontrun{
library(nexusmodsr)
nexus_auth()
abstain <- post_abstain_mod("Stardew Valley", 2400, "3.11.0")
}
}
\references{
https://app.swaggerhub.com/apis-docs/NexusMods/nexus-mods_public_api_params_in_form_data/1.0#/Mods/post_v1_games_game_domain_name_mods_id_abstain.json
}
\concept{mod}
|
/man/post_abstain_mod.Rd
|
permissive
|
KoderKow/nexusmodsr
|
R
| false
| true
| 783
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mods.R
\name{post_abstain_mod}
\alias{post_abstain_mod}
\title{Abstain a mod}
\usage{
post_abstain_mod(game_domain_name, mod_id, version)
}
\arguments{
\item{game_domain_name}{A character. Game domain to get the recent mod updates for.}
\item{mod_id}{A numeric. ID of the mod to get changelogs for.}
\item{version}{A character. Mod version.}
}
\value{
}
\description{
Abstain from endorsing a mod
}
\examples{
\dontrun{
library(nexusmodsr)
nexus_auth()
abstain <- post_abstain_mod("Stardew Valley", 2400, "3.11.0")
}
}
\references{
https://app.swaggerhub.com/apis-docs/NexusMods/nexus-mods_public_api_params_in_form_data/1.0#/Mods/post_v1_games_game_domain_name_mods_id_abstain.json
}
\concept{mod}
|
# Written by: Kelvin Abrokwa-Johnson
# Admissions Rate Line Chart
setwd('/users/kelvinabrokwa/documents/repositories/wm-stats-blog/admissions/kelvin/final post') # Make sure to change this working directory to fit your computer
library(googleVis)
raw = read.csv('agg_data_4.csv', header=TRUE, check.names=FALSE)
year <- c(raw$Year)
male_apps <- c(raw$'Total first-year men who applied')
fem_apps <- c(raw$'Total first-year women who applied')
df_apps <- data.frame(year, Male=male_apps, Female=fem_apps)
male_adm <- c(raw$'Total first-year men who were admitted')
fem_adm <- c(raw$'Total first-year women who were admitted')
df_adm <- data.frame(year, Male=male_adm, Female=fem_adm)
male_enr <- c(raw$'Total full-time first-year men who enrolled')
fem_enr <- c(raw$'Total part-time first-year men who enrolled')
df_enr <- data.frame(year, Male=male_enr, Female=fem_enr)
male_adm_rate <- signif(((male_adm / male_apps) * 100), digits=4)
fem_adm_rate <- signif(((fem_adm / fem_apps) * 100), digits=4)
df_adm_rate <- data.frame(year, Male=male_adm_rate, Female=fem_adm_rate)
line_adm_rate <- gvisLineChart(df_adm_rate,
options=list(title="Acceptance Rate (number accepted / number of applicants)",
vAxes="[{title:'Percentage'}]",
hAxes="[{title:'Year'}]",
fontSize=10,
width=550))
plot(line_adm_rate)
|
/admissions/Kelvin/Final Post/adm_rate.R
|
no_license
|
kelvinabrokwa/wm-stats-blog
|
R
| false
| false
| 1,519
|
r
|
# Written by: Kelvin Abrokwa-Johnson
# Admissions Rate Line Chart
setwd('/users/kelvinabrokwa/documents/repositories/wm-stats-blog/admissions/kelvin/final post') # Make sure to change this working directory to fit your computer
library(googleVis)
raw = read.csv('agg_data_4.csv', header=TRUE, check.names=FALSE)
year <- c(raw$Year)
male_apps <- c(raw$'Total first-year men who applied')
fem_apps <- c(raw$'Total first-year women who applied')
df_apps <- data.frame(year, Male=male_apps, Female=fem_apps)
male_adm <- c(raw$'Total first-year men who were admitted')
fem_adm <- c(raw$'Total first-year women who were admitted')
df_adm <- data.frame(year, Male=male_adm, Female=fem_adm)
male_enr <- c(raw$'Total full-time first-year men who enrolled')
fem_enr <- c(raw$'Total part-time first-year men who enrolled')
df_enr <- data.frame(year, Male=male_enr, Female=fem_enr)
male_adm_rate <- signif(((male_adm / male_apps) * 100), digits=4)
fem_adm_rate <- signif(((fem_adm / fem_apps) * 100), digits=4)
df_adm_rate <- data.frame(year, Male=male_adm_rate, Female=fem_adm_rate)
line_adm_rate <- gvisLineChart(df_adm_rate,
options=list(title="Acceptance Rate (number accepted / number of applicants)",
vAxes="[{title:'Percentage'}]",
hAxes="[{title:'Year'}]",
fontSize=10,
width=550))
plot(line_adm_rate)
|
select_state <- function(df){
state_df <- df %>%
filter(sch_id == 999) %>% # filter for state id number
select(-sch_name) %>% # this is redudndant - just reads "state total"
mutate(dist_name = "State") # make this label consistent
return(state_df)
}
select_dist <- function(df){
dist_df <- df %>%
filter(sch_id != 999) %>% # exclude state id number
filter(str_length(sch_id) == 3) %>% # only include id numbers w/ 3 chars
select(-sch_name) # remove redundant col - all values are "District Total"
return(dist_df)
}
select_sch <- function(df){
sch_df <- df %>%
filter(str_length(sch_id) == 6) # only include id numbers w/ 6 chars
return(sch_df)
}
|
/raw_data/select_level.R
|
no_license
|
Kaustuv2809/kysrc
|
R
| false
| false
| 694
|
r
|
select_state <- function(df){
state_df <- df %>%
filter(sch_id == 999) %>% # filter for state id number
select(-sch_name) %>% # this is redudndant - just reads "state total"
mutate(dist_name = "State") # make this label consistent
return(state_df)
}
select_dist <- function(df){
dist_df <- df %>%
filter(sch_id != 999) %>% # exclude state id number
filter(str_length(sch_id) == 3) %>% # only include id numbers w/ 3 chars
select(-sch_name) # remove redundant col - all values are "District Total"
return(dist_df)
}
select_sch <- function(df){
sch_df <- df %>%
filter(str_length(sch_id) == 6) # only include id numbers w/ 6 chars
return(sch_df)
}
|
print ("This file created in R studio")
Print ("Now lives on GitHub")
|
/Testing.R
|
no_license
|
BhavarthShah/Test-Repository
|
R
| false
| false
| 70
|
r
|
print ("This file created in R studio")
Print ("Now lives on GitHub")
|
\name{alpha}
\docType{data}
\alias{alpha}
\encoding{UTF-8}
\title{Genetic Components of Alcoholism}
\description{
Levels of expressed alpha synuclein mRNA in three groups of allele lengths of
NACP-REP1.
}
\usage{alpha}
\format{
A data frame with 97 observations on 2 variables.
\describe{
\item{\code{alength}}{
allele length, a factor with levels \code{"short"}, \code{"intermediate"}
and \code{"long"}.
}
\item{\code{elevel}}{
expression levels of alpha synuclein mRNA.
}
}
}
\details{
Various studies have linked alcohol dependence phenotypes to chromosome 4.
One candidate gene is NACP (non-amyloid component of plaques), coding for
alpha synuclein. \enc{Bönsch}{Boensch} \emph{et al.} (2005) found longer
alleles of NACP-REP1 in alcohol-dependent patients compared with healthy
controls and reported that the allele lengths show some association with
levels of expressed alpha synuclein mRNA.
}
\source{
\enc{Bönsch}{Boensch}, D., Lederer, T., Reulbach, U., Hothorn, T., Kornhuber,
J. and Bleich, S. (2005). Joint analysis of the \emph{NACP}-REP1 marker
within the alpha synuclein gene concludes association with alcohol dependence.
\emph{Human Molecular Genetics} \bold{14}(7), 967--971.
\doi{10.1093/hmg/ddi090}
}
\references{
Hothorn, T., Hornik, K., van de Wiel, M. A. and Zeileis, A. (2006). A Lego
system for conditional inference. \emph{The American Statistician}
\bold{60}(3), 257--263. \doi{10.1198/000313006X118430}
Winell, H. and \enc{Lindbäck}{Lindbaeck}, J. (2018). A general
score-independent test for order-restricted inference. \emph{Statistics in
Medicine} \bold{37}(21), 3078--3090. \doi{10.1002/sim.7690}
}
\examples{
## Boxplots
boxplot(elevel ~ alength, data = alpha)
## Asymptotic Kruskal-Wallis test
kruskal_test(elevel ~ alength, data = alpha)
## Asymptotic Kruskal-Wallis test using midpoint scores
kruskal_test(elevel ~ alength, data = alpha,
scores = list(alength = c(2, 7, 11)))
## Asymptotic score-independent test
## Winell and Lindbaeck (2018)
(it <- independence_test(elevel ~ alength, data = alpha,
ytrafo = function(data)
trafo(data, numeric_trafo = rank_trafo),
xtrafo = function(data)
trafo(data, factor_trafo = function(x)
zheng_trafo(as.ordered(x)))))
## Extract the "best" set of scores
ss <- statistic(it, type = "standardized")
idx <- which(abs(ss) == max(abs(ss)), arr.ind = TRUE)
ss[idx[1], idx[2], drop = FALSE]
}
\keyword{datasets}
|
/man/alpha.Rd
|
no_license
|
cran/coin
|
R
| false
| false
| 2,636
|
rd
|
\name{alpha}
\docType{data}
\alias{alpha}
\encoding{UTF-8}
\title{Genetic Components of Alcoholism}
\description{
Levels of expressed alpha synuclein mRNA in three groups of allele lengths of
NACP-REP1.
}
\usage{alpha}
\format{
A data frame with 97 observations on 2 variables.
\describe{
\item{\code{alength}}{
allele length, a factor with levels \code{"short"}, \code{"intermediate"}
and \code{"long"}.
}
\item{\code{elevel}}{
expression levels of alpha synuclein mRNA.
}
}
}
\details{
Various studies have linked alcohol dependence phenotypes to chromosome 4.
One candidate gene is NACP (non-amyloid component of plaques), coding for
alpha synuclein. \enc{Bönsch}{Boensch} \emph{et al.} (2005) found longer
alleles of NACP-REP1 in alcohol-dependent patients compared with healthy
controls and reported that the allele lengths show some association with
levels of expressed alpha synuclein mRNA.
}
\source{
\enc{Bönsch}{Boensch}, D., Lederer, T., Reulbach, U., Hothorn, T., Kornhuber,
J. and Bleich, S. (2005). Joint analysis of the \emph{NACP}-REP1 marker
within the alpha synuclein gene concludes association with alcohol dependence.
\emph{Human Molecular Genetics} \bold{14}(7), 967--971.
\doi{10.1093/hmg/ddi090}
}
\references{
Hothorn, T., Hornik, K., van de Wiel, M. A. and Zeileis, A. (2006). A Lego
system for conditional inference. \emph{The American Statistician}
\bold{60}(3), 257--263. \doi{10.1198/000313006X118430}
Winell, H. and \enc{Lindbäck}{Lindbaeck}, J. (2018). A general
score-independent test for order-restricted inference. \emph{Statistics in
Medicine} \bold{37}(21), 3078--3090. \doi{10.1002/sim.7690}
}
\examples{
## Boxplots
boxplot(elevel ~ alength, data = alpha)
## Asymptotic Kruskal-Wallis test
kruskal_test(elevel ~ alength, data = alpha)
## Asymptotic Kruskal-Wallis test using midpoint scores
kruskal_test(elevel ~ alength, data = alpha,
scores = list(alength = c(2, 7, 11)))
## Asymptotic score-independent test
## Winell and Lindbaeck (2018)
(it <- independence_test(elevel ~ alength, data = alpha,
ytrafo = function(data)
trafo(data, numeric_trafo = rank_trafo),
xtrafo = function(data)
trafo(data, factor_trafo = function(x)
zheng_trafo(as.ordered(x)))))
## Extract the "best" set of scores
ss <- statistic(it, type = "standardized")
idx <- which(abs(ss) == max(abs(ss)), arr.ind = TRUE)
ss[idx[1], idx[2], drop = FALSE]
}
\keyword{datasets}
|
getTimeID = function(tt, n, isRandom){
ni = numeric(n)
tjID = ni
if(isRandom == 1){
for(i in 1:n){
ni[i] = sum(!is.na(tt[i,]))
if(ni[i] > 1)
tjID[i] = sample(1:ni[i],1) #find random index of random time point to be used in the CV prediction par
#for ni >= 2
}
}else{
for(i in 1:n){
ni[i] = sum(!is.na(tt[i,]))
tjID[i] = ((1000+i) %% ni[i])+1
}
}
list(tjID = tjID, ni = ni)
}
|
/R/getTimeID.R
|
no_license
|
cran/PACE
|
R
| false
| false
| 533
|
r
|
getTimeID = function(tt, n, isRandom){
ni = numeric(n)
tjID = ni
if(isRandom == 1){
for(i in 1:n){
ni[i] = sum(!is.na(tt[i,]))
if(ni[i] > 1)
tjID[i] = sample(1:ni[i],1) #find random index of random time point to be used in the CV prediction par
#for ni >= 2
}
}else{
for(i in 1:n){
ni[i] = sum(!is.na(tt[i,]))
tjID[i] = ((1000+i) %% ni[i])+1
}
}
list(tjID = tjID, ni = ni)
}
|
#' Create a protocol template for the study
#'
#' @details
#' This function will create a template protocol
#'
#' @param outputLocation Directory location where you want the protocol written to
#' @export
createPlpProtocol <- function(outputLocation = getwd()){
predictionAnalysisListFile <- system.file("settings",
"predictionAnalysisList.json",
package = "ABCciprofloxacin")
#figure1 <- 'vignettes/Figure1.png'
figure1 <- system.file("doc",
"Figure1.png",
package = "PatientLevelPrediction")
#============== STYLES =======================================================
style_title <- officer::shortcuts$fp_bold(font.size = 28)
style_title_italic <- officer::shortcuts$fp_bold(font.size = 30, italic = TRUE)
style_toc <- officer::shortcuts$fp_bold(font.size = 16)
style_helper_text <- officer::shortcuts$fp_italic(color = "#FF8C00")
style_citation <- officer::shortcuts$fp_italic(shading.color = "grey")
style_table_title <- officer::shortcuts$fp_bold(font.size = 14, italic = TRUE)
style_hidden_text <- officer::shortcuts$fp_italic(color = "#FFFFFF")
#============== VARIABLES ====================================================
json <- tryCatch({ParallelLogger::loadSettingsFromJson(file=predictionAnalysisListFile)},
error=function(cond) {
stop('Issue with json file...')
})
#analysis information
analysisList <- PatientLevelPrediction::loadPredictionAnalysisList(predictionAnalysisListFile)
targetCohortNamesList <- paste(analysisList$cohortNames, collapse = ', ')
targetCohorts <- as.data.frame(cbind(analysisList$cohortIds,analysisList$cohortNames,rep("TBD",length(analysisList$cohortNames))), stringsAsFactors = FALSE)
names(targetCohorts) <- c("Cohort ID", "Cohort Name","Description")
targetCohorts <- targetCohorts[order(as.numeric(targetCohorts$`Cohort ID`)),]
outcomeCohortNamesList <- paste(analysisList$outcomeNames, collapse = ', ')
outcomeCohorts <- as.data.frame(cbind(analysisList$outcomeIds,analysisList$outcomeNames,rep("TBD",length(analysisList$outcomeNames))), stringsAsFactors = FALSE)
names(outcomeCohorts) <- c("Cohort ID", "Cohort Name","Description")
outcomeCohorts <- outcomeCohorts[order(as.numeric(outcomeCohorts$`Cohort ID`)),]
#time at risk
tar <- unique(
lapply(json$populationSettings, function(x)
paste0("Risk Window Start: ",x$riskWindowStart,
', Add Exposure Days to Start: ',x$addExposureDaysToStart,
', Risk Window End: ', x$riskWindowEnd,
', Add Exposure Days to End: ', x$addExposureDaysToEnd)))
tarDF <- as.data.frame(rep(times = length(tar),''), stringsAsFactors = FALSE)
names(tarDF) <- c("Time at Risk")
for(i in 1:length(tar)){
tarDF[i,1] <- paste0("[Time at Risk Settings #", i, '] ', tar[[i]])
}
tarList <- paste(tarDF$`Time at Risk`, collapse = ', ')
tarListDF <- as.data.frame(tarList)
covSettings <- lapply(json$covariateSettings, function(x) cbind(names(x), unlist(lapply(x, function(x2) paste(x2, collapse=', ')))))
popSettings <- lapply(json$populationSettings, function(x) cbind(names(x), unlist(lapply(x, function(x2) paste(x2, collapse=', ')))))
plpModelSettings <- PatientLevelPrediction::createPlpModelSettings(modelList = analysisList$modelAnalysisList$models,
covariateSettingList = json$covariateSettings,
populationSettingList = json$populationSettings)
m1 <-merge(targetCohorts$`Cohort Name`,outcomeCohorts$`Cohort Name`)
names(m1) <- c("Target Cohort Name","Outcome Cohort Name")
modelSettings <- unique(data.frame(plpModelSettings$settingLookupTable$modelSettingId,plpModelSettings$settingLookupTable$modelSettingName))
names(modelSettings) <- c("Model Settings Id", "Model Settings Description")
m2 <- merge(m1,modelSettings)
covSet <- unique(data.frame(plpModelSettings$settingLookupTable$covariateSettingId))
names(covSet) <- "Covariate Settings ID"
m3 <- merge(m2,covSet)
popSet <-unique(data.frame( plpModelSettings$settingLookupTable$populationSettingId))
names(popSet) <- c("Population Settings ID")
completeAnalysisList <- merge(m3,popSet)
completeAnalysisList$ID <- seq.int(nrow(completeAnalysisList))
concepts <- formatConcepts(json)
#-----------------------------------------------------------------------------
#============== CITATIONS =====================================================
plpCitation <- paste0("Citation: ", utils::citation("PatientLevelPrediction")$textVersion)
tripodCitation <- paste0("Citation: Collins, G., et al. (2017.02.01). 'Transparent reporting of a multivariable prediction model for individual prognosis or diagnosis (TRIPOD): The TRIPOD statement.' from https://www.equator-network.org/reporting-guidelines/tripod-statement/ ")
progressCitation <- paste0("Citation: Steyerberg EW, Moons KG, van der Windt DA, Hayden JA, Perel P, Schroter S, Riley RD, Hemingway H, Altman DG; PROGRESS Group. Prognosis Research Strategy (PROGRESS) 3: prognostic model research. PLoS Med. 2013;10(2):e1001381. doi: 10.1371/journal.pmed.1001381. Epub 2013 Feb 5. Review. PubMed PMID: 23393430; PubMed Central PMCID: PMC3564751.")
rCitation <- paste0("Citation: R Core Team (2013). R: A language and environment for statistical computing. R Foundation for Statistical Computing, Vienna, Austria. URL http://www.R-project.org/.")
#-----------------------------------------------------------------------------
#============== CREATE DOCUMENT ==============================================
# create new word document
doc = officer::read_docx()
#-----------------------------------------------------------------------------
#============ TITLE PAGE =====================================================
title <- officer::fpar(
officer::ftext("Patient-Level Prediction: ", prop = style_title),
officer::ftext(json$packageName, prop = style_title_italic)
)
doc <- doc %>%
officer::body_add_par("") %>%
officer::body_add_par("") %>%
officer::body_add_par("") %>%
officer::body_add_fpar(title) %>%
officer::body_add_par("") %>%
officer::body_add_par("") %>%
officer::body_add_par(paste0("Prepared on: ", Sys.Date()), style = "Normal") %>%
officer::body_add_par(paste0("Created by: ", json$createdBy$name, " (", json$createdBy$email,")"), style = "Normal") %>%
officer::body_add_break()
#-----------------------------------------------------------------------------
#============ TOC ============================================================
toc <- officer::fpar(
officer::ftext("Table of Contents", prop = style_toc)
)
doc <- doc %>%
officer::body_add_fpar(toc) %>%
officer::body_add_toc(level = 2) %>%
officer::body_add_break()
#-----------------------------------------------------------------------------
#============ LIST OF ABBREVIATIONS ==========================================
abb <- data.frame(rbind(
c("AUC", "Area Under the Receiver Operating Characteristic Curve"),
c("CDM","Common Data Model"),
c("O","Outcome Cohort"),
c("OHDSI","Observational Health Data Sciences & Informatics"),
c("OMOP","Observational Medical Outcomes Partnership"),
c("T", "Target Cohort"),
c("TAR", "Time at Risk")
))
names(abb) <- c("Abbreviation","Phrase")
abb <- abb[order(abb$Abbreviation),]
doc <- doc %>%
officer::body_add_par("List of Abbreviations", style = "heading 1") %>%
officer::body_add_par("") %>%
officer::body_add_table(abb, header = TRUE) %>%
officer::body_add_par("") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext("<< Rest to be completed outside of ATLAS >>", prop = style_helper_text)
))
#-----------------------------------------------------------------------------
#============ RESPONSIBLE PARTIES ============================================
doc <- doc %>%
officer::body_add_par("Responsible Parties", style = "heading 1") %>%
officer::body_add_par("") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext("<< To be completed outside of ATLAS ", prop = style_helper_text)
)) %>%
officer::body_add_par("") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext("Includes author, investigator, and reviewer names and sponsor information. >>", prop = style_helper_text)
))
#-----------------------------------------------------------------------------
#============ Executive Summary ==============================================
doc <- doc %>%
officer::body_add_par("Executive Summary", style = "heading 1") %>%
officer::body_add_par("") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext("<< A few statements about the rational and background for this study. >>", prop = style_helper_text)
)) %>%
officer::body_add_par("") %>%
officer::body_add_par(paste0("The objective of this study is to develop and validate patient-level prediction models for patients in ",
length(json$targetIds)," target cohort(s) (",
targetCohortNamesList,") to predict ",
length(json$outcomeIds)," outcome(s) (",
outcomeCohortNamesList,") for ",
length(tar)," time at risk(s) (",
tarList,")."),
style = "Normal") %>%
officer::body_add_par("") %>%
officer::body_add_par(paste0("The prediction will be implemented using ",
length(json$modelSettings)," algorithms (",
paste(lapply(analysisList$modelAnalysisList$models, function(x) x$name), collapse = ', '),")."),
style = "Normal")
#-----------------------------------------------------------------------------
#============ RATIONAL & BACKGROUND ==========================================
doc <- doc %>%
officer::body_add_par("Rational & Background", style = "heading 1") %>%
officer::body_add_par("") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext("<< To be completed outside of ATLAS.", prop = style_helper_text)
)) %>%
officer::body_add_par("") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext("Provide a short description of the reason that led to the initiation of or need for the study and add a short critical review of available published and unpublished data to explain gaps in knowledge that the study is intended to fill. >>", prop = style_helper_text)
))
#-----------------------------------------------------------------------------
#============ OBJECTIVE ======================================================
prep_objective <- merge(analysisList$cohortNames, analysisList$outcomeNames)
objective <- merge(prep_objective, tarListDF )
names(objective) <-c("Target Cohorts","Outcome Cohorts","Time at Risk")
doc <- doc %>%
officer::body_add_par("Objective", style = "heading 1") %>%
officer::body_add_par("") %>%
officer::body_add_par(paste0("The objective is to develop and validate patient-level prediction models for the following prediction problems:"),style = "Normal") %>%
officer::body_add_par("") %>%
officer::body_add_table(objective, header = TRUE, style = "Table Professional")
#-----------------------------------------------------------------------------
#============ METHODS ======================================================
doc <- doc %>%
officer::body_add_par("Methods", style = "heading 1") %>%
#```````````````````````````````````````````````````````````````````````````
officer::body_add_par("Study Design", style = "heading 2") %>%
officer::body_add_par("This study will follow a retrospective, observational, patient-level prediction design. We define 'retrospective' to mean the study will be conducted using data already collected prior to the start of the study. We define 'observational' to mean there is no intervention or treatment assignment imposed by the study. We define 'patient-level prediction' as a modeling process wherein an outcome is predicted within a time at risk relative to the target cohort start and/or end date. Prediction is performed using a set of covariates derived using data prior to the start of the target cohort.",style = "Normal") %>%
officer::body_add_par("") %>%
officer::body_add_par("Figure 1, illustrates the prediction problem we will address. Among a population at risk, we aim to predict which patients at a defined moment in time (t = 0) will experience some outcome during a time-at-risk. Prediction is done using only information about the patients in an observation window prior to that moment in time.", style="Normal") %>%
officer::body_add_par("") %>%
officer::body_add_img(src = figure1, width = 6.5, height = 2.01, style = "centered") %>%
officer::body_add_par("Figure 1: The prediction problem", style="graphic title") %>%
officer::body_add_par("") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext(plpCitation, prop = style_citation)
)) %>%
officer::body_add_par("") %>%
officer::body_add_par("We follow the PROGRESS best practice recommendations for model development and the TRIPOD guidance for transparent reporting of the model results.", style="Normal") %>%
officer::body_add_par("") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext(progressCitation, prop = style_citation)
)) %>%
officer::body_add_par("") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext(tripodCitation, prop = style_citation)
)) %>%
#```````````````````````````````````````````````````````````````````````````
officer::body_add_par("Data Source(s)", style = "heading 2") %>%
officer::body_add_par("") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext("<< To be completed outside of ATLAS.", prop = style_helper_text)
)) %>%
officer::body_add_par("") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext("For each database, provide database full name, version information (if applicable), the start and end dates of data capture, and a brief description of the data source. Also include information on data storage (e.g. software and IT environment, database maintenance and anti-fraud protection, archiving) and data protection.", prop = style_helper_text)
)) %>%
officer::body_add_par("") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext("Important Citations: OMOP Common Data Model: 'OMOP Common Data Model (CDM).' from https://github.com/OHDSI/CommonDataModel.", prop = style_helper_text)
)) %>%
#```````````````````````````````````````````````````````````````````````````
officer::body_add_par("Study Populations", style = "heading 2") %>%
officer::body_add_par("Target Cohort(s) [T]", style = "heading 3") %>%
officer::body_add_par("") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext("<< Currently cohort definitions need to be grabbed from ATLAS, in a Cohort Definition, Export Tab, from Text View. >>", prop = style_helper_text)
)) %>%
officer::body_add_par("") %>%
officer::body_add_table(targetCohorts, header = TRUE, style = "Table Professional") %>%
officer::body_add_par("") %>%
officer::body_add_par("Outcome Cohorts(s) [O]", style = "heading 3") %>%
officer::body_add_par("") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext("<< Currently cohort definitions need to be grabbed from ATLAS, in a Cohort Definition, Export Tab, from Text View. >>", prop = style_helper_text)
)) %>%
officer::body_add_par("") %>%
officer::body_add_table(outcomeCohorts, header = TRUE, style = "Table Professional") %>%
officer::body_add_par("") %>%
officer::body_add_par("Time at Risk", style = "heading 3") %>%
officer::body_add_par("") %>%
officer::body_add_table(tarDF, header = TRUE, style = "Table Professional") %>%
officer::body_add_par("Additional Population Settings", style = "heading 3") %>%
officer::body_add_par("")
for(i in 1:length(popSettings)){
onePopSettings <- as.data.frame(popSettings[i])
names(onePopSettings) <- c("Item","Settings")
doc <- doc %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext(paste0("Population Settings #",i), prop = style_table_title)
)) %>%
officer::body_add_table(onePopSettings, header = TRUE, style = "Table Professional") %>%
officer::body_add_par("")
}
#```````````````````````````````````````````````````````````````````````````
algorithms <- data.frame(rbind(
c("Lasso Logistic Regression", "Lasso logistic regression belongs to the family of generalized linear models, where a linear combination of the variables is learned and finally a logistic function maps the linear combination to a value between 0 and 1. The lasso regularization adds a cost based on model complexity to the objective function when training the model. This cost is the sum of the absolute values of the linear combination of the coefficients. The model automatically performs feature selection by minimizing this cost. We use the Cyclic coordinate descent for logistic, Poisson and survival analysis (Cyclops) package to perform large-scale regularized logistic regression: https://github.com/OHDSI/Cyclops"),
c("Gradient boosting machine", "Gradient boosting machines is a boosting ensemble technique and in our framework it combines multiple decision trees. Boosting works by iteratively adding decision trees but adds more weight to the data-points that are misclassified by prior decision trees in the cost function when training the next tree. We use Extreme Gradient Boosting, which is an efficient implementation of the gradient boosting framework implemented in the xgboost R package available from CRAN."),
c("Random forest", "Random forest is a bagging ensemble technique that combines multiple decision trees. The idea behind bagging is to reduce the likelihood of overfitting, by using weak classifiers, but combining multiple diverse weak classifiers into a strong classifier. Random forest accomplishes this by training multiple decision trees but only using a subset of the variables in each tree and the subset of variables differ between trees. Our packages uses the sklearn learn implementation of Random Forest in python."),
c("KNN", "K-nearest neighbors (KNN) is an algorithm that uses some metric to find the K closest labelled data-points, given the specified metric, to a new unlabelled data-point. The prediction of the new data-points is then the most prevalent class of the K-nearest labelled data-points. There is a sharing limitation of KNN, as the model requires labelled data to perform the prediction on new data, and it is often not possible to share this data across data sites. We included the BigKnn classifier developed in OHDSI which is a large scale k-nearest neighbor classifier using the Lucene search engine: https://github.com/OHDSI/BigKnn"),
c("AdaBoost", "AdaBoost is a boosting ensemble technique. Boosting works by iteratively adding decision trees but adds more weight to the data-points that are misclassified by prior decision trees in the cost function when training the next tree. We use the sklearn 'AdaboostClassifier' implementation in Python."),
c("DecisionTree", "A decision tree is a classifier that partitions the variable space using individual tests selected using a greedy approach. It aims to find partitions that have the highest information gain to separate the classes. The decision tree can easily overfit by enabling a large number of partitions (tree depth) and often needs some regularization (e.g., pruning or specifying hyper-parameters that limit the complexity of the model). We use the sklearn 'DecisionTreeClassifier' implementation in Python."),
c("Neural network", "Neural networks contain multiple layers that weight their inputs using an non-linear function. The first layer is the input layer, the last layer is the output layer the between are the hidden layers. Neural networks are generally trained using feed forward back-propagation. This is when you go through the network with a data-point and calculate the error between the true label and predicted label, then go backwards through the network and update the linear function weights based on the error. This can also be performed as a batch, where multiple data-points are feed through the network before being updated. We use the sklearn 'MLPClassifier' implementation in Python."),
c("Naive Bayes","The Naive Bayes algorithm applies the Bayes' theorem with the 'naive' assumption of conditional independence between every pair of features given the value of the class variable. Based on the likelihood of the data belong to a class and the prior distribution of the class, a posterior distribution is obtained.")
))
names(algorithms) <- c("Algorithm","Description")
algorithms <- algorithms[order(algorithms$Algorithm),]
modelIDs <- as.data.frame(sapply(analysisList$modelAnalysisList$models, function(x) x$name))
names(modelIDs) <- c("ID")
algorithmsFiltered <- algorithms[algorithms$Algorithm %in% modelIDs$ID,]
modelEvaluation <- data.frame(rbind(
c("ROC Plot", "The ROC plot plots the sensitivity against 1-specificity on the test set. The plot shows how well the model is able to discriminate between the people with the outcome and those without. The dashed diagonal line is the performance of a model that randomly assigns predictions. The higher the area under the ROC plot the better the discrimination of the model."),
c("Calibration Plot", "The calibration plot shows how close the predicted risk is to the observed risk. The diagonal dashed line thus indicates a perfectly calibrated model. The ten (or fewer) dots represent the mean predicted values for each quantile plotted against the observed fraction of people in that quantile who had the outcome (observed fraction). The straight black line is the linear regression using these 10 plotted quantile mean predicted vs observed fraction points. The two blue straight lines represented the 95% lower and upper confidence intervals of the slope of the fitted line."),
c("Smooth Calibration Plot", "Similar to the traditional calibration shown above the Smooth Calibration plot shows the relationship between predicted and observed risk. the major difference is that the smooth fit allows for a more fine grained examination of this. Whereas the traditional plot will be heavily influenced by the areas with the highest density of data the smooth plot will provide the same information for this region as well as a more accurate interpretation of areas with lower density. the plot also contains information on the distribution of the outcomes relative to predicted risk. However the increased information game comes at a computational cost. It is recommended to use the traditional plot for examination and then to produce the smooth plot for final versions."),
c("Prediction Distribution Plots", "The preference distribution plots are the preference score distributions corresponding to i) people in the test set with the outcome (red) and ii) people in the test set without the outcome (blue)."),
c("Box Plots", "The prediction distribution boxplots are box plots for the predicted risks of the people in the test set with the outcome (class 1: blue) and without the outcome (class 0: red)."),
c("Test-Train Similarity Plot", "The test-train similarity is presented by plotting the mean covariate values in the train set against those in the test set for people with and without the outcome."),
c("Variable Scatter Plot", "The variable scatter plot shows the mean covariate value for the people with the outcome against the mean covariate value for the people without the outcome. The size and color of the dots correspond to the importance of the covariates in the trained model (size of beta) and its direction (sign of beta with green meaning positive and red meaning negative), respectively."),
c("Precision Recall Plot", "The precision-recall curve is valuable for dataset with a high imbalance between the size of the positive and negative class. It shows the tradeoff between precision and recall for different threshold. High precision relates to a low false positive rate, and high recall relates to a low false negative rate. High scores for both show that the classifier is returning accurate results (high precision), as well as returning a majority of all positive results (high recall). A high area under the curve represents both high recall and high precision."),
c("Demographic Summary Plot", "This plot shows for females and males the expected and observed risk in different age groups together with a confidence area.")
))
names(modelEvaluation) <- c("Evaluation","Description")
modelEvaluation <- modelEvaluation[order(modelEvaluation$Evaluation),]
doc <- doc %>%
officer::body_add_par("Statistical Analysis Method(s)", style = "heading 2") %>%
officer::body_add_par("Algorithms", style = "heading 3") %>%
officer::body_add_par("") %>%
officer::body_add_table(algorithmsFiltered, header = TRUE, style = "Table Professional") %>%
officer::body_add_par("") %>%
officer::body_add_par("Model Evaluation", style = "heading 3") %>%
officer::body_add_par("") %>%
officer::body_add_par("The following evaluations will be performed on the model:", style = "Normal") %>%
officer::body_add_par("") %>%
officer::body_add_table(modelEvaluation, header = TRUE, style = "Table Professional") %>%
officer::body_add_par("") %>%
#```````````````````````````````````````````````````````````````````````````
officer::body_add_par("Quality Control", style = "heading 2") %>%
officer::body_add_par("") %>%
officer::body_add_par("The PatientLevelPrediction package itself, as well as other OHDSI packages on which PatientLevelPrediction depends, use unit tests for validation.",style="Normal") %>%
officer::body_add_par("") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext(plpCitation, prop = style_citation)
)) %>%
#```````````````````````````````````````````````````````````````````````````
officer::body_add_par("Tools", style = "heading 2") %>%
officer::body_add_par("") %>%
officer::body_add_par("This study will be designed using OHDSI tools and run with R.",style="Normal") %>%
officer::body_add_par("") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext(rCitation, prop = style_citation)
)) %>%
officer::body_add_par("") %>%
officer::body_add_par("More information about the tools can be found in the Appendix 'Study Generation Version Information'.", style = "Normal")
#-----------------------------------------------------------------------------
#============ DIAGNOSTICS ====================================================
doc <- doc %>%
officer::body_add_par("Diagnostics", style = "heading 1") %>%
officer::body_add_par("") %>%
officer::body_add_par("Reviewing the incidence rates of the outcomes in the target population prior to performing the analysis will allow us to assess its feasibility. The full table can be found in the 'Table and Figures' section under 'Incidence Rate of Target & Outcome'.",style="Normal") %>%
officer::body_add_par("") %>%
officer::body_add_par("Additionally, reviewing the characteristics of the cohorts provides insight into the cohorts being reviewed. The full table can be found below in the 'Table and Figures' section under 'Characterization'.",style="Normal")
#-----------------------------------------------------------------------------
#============ DATA ANALYSIS PLAN =============================================
doc <- doc %>%
officer::body_add_par("Data Analysis Plan", style = "heading 1") %>%
#```````````````````````````````````````````````````````````````````````````
officer::body_add_par("Algorithm Settings", style = "heading 2") %>%
officer::body_add_par("")
for(i in 1:length(json$modelSettings)){
modelSettingsTitle <- names(json$modelSettings[[i]])
modelSettings <- lapply(json$modelSettings[[i]], function(x) cbind(names(x), unlist(lapply(x, function(x2) paste(x2, collapse=', ')))))
oneModelSettings <- as.data.frame(modelSettings)
names(oneModelSettings) <- c("Covariates","Settings")
doc <- doc %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext(paste0("Model Settings Settings #",i, " - ",modelSettingsTitle), prop = style_table_title)
)) %>%
officer::body_add_table(oneModelSettings, header = TRUE, style = "Table Professional") %>%
officer::body_add_par("")
}
#```````````````````````````````````````````````````````````````````````````
covStatement1 <- paste0("The covariates (constructed using records on or prior to the target cohort start date) are used within this prediction mode include the following.")
covStatement2 <- paste0(" Each covariate needs to contain at least ",
json$runPlpArgs$minCovariateFraction,
" subjects to be considered for the model.")
if(json$runPlpArgs$minCovariateFraction == 0){
covStatement <- covStatement1
}else {
covStatement <- paste0(covStatement1,covStatement2)
}
doc <- doc %>%
officer::body_add_par("Covariate Settings", style = "heading 2") %>%
officer::body_add_par("") %>%
officer::body_add_par(covStatement,
style="Normal") %>%
officer::body_add_par("")
for(i in 1:length(covSettings)){
oneCovSettings <- as.data.frame(covSettings[i])
names(oneCovSettings) <- c("Covariates","Settings")
doc <- doc %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext(paste0("Covariate Settings #",i), prop = style_table_title)
)) %>%
officer::body_add_table(oneCovSettings, header = TRUE, style = "Table Professional") %>%
officer::body_add_par("")
}
#```````````````````````````````````````````````````````````````````````````
doc <- doc %>%
officer::body_add_par("Model Development & Evaluation", style = "heading 2") %>%
officer::body_add_par("") %>%
officer::body_add_par(paste0("To build and internally validate the models, we will partition the labelled data into a train set (",
(1-analysisList$testFraction)*100,
"%) and a test set (",
analysisList$testFraction*100,
"%)."),
style = "Normal") %>%
officer::body_add_par("") %>%
officer::body_add_par(paste0("The hyper-parameters for the models will be assessed using ",
analysisList$nfold,
"-fold cross validation on the train set and a final model will be trained using the full train set and optimal hyper-parameters."),
style = "Normal") %>%
officer::body_add_par("") %>%
officer::body_add_par("The internal validity of the models will be assessed on the test set. We will use the area under the receiver operating characteristic curve (AUC) to evaluate the discriminative performance of the models and plot the predicted risk against the observed fraction to visualize the calibration. See 'Model Evaluation' section for more detailed information about additional model evaluation metrics.") %>%
officer::body_add_par("") %>%
#```````````````````````````````````````````````````````````````````````````
officer::body_add_par("Analysis Execution Settings", style = "heading 2") %>%
officer::body_add_par("") %>%
officer::body_add_par(paste0("There are ",
length(json$targetIds),
" target cohorts evaluated for ",
length(json$outcomeIds),
" outcomes over ",
length(json$modelSettings),
" models over ",
length(covSettings),
" covariates settings and over ",
length(popSettings),
" population settings. In total there are ",
length(json$targetIds) * length(json$outcomeIds) * length(json$modelSettings) * length(covSettings) * length(popSettings),
" analysis performed. For a full list refer to appendix 'Complete Analysis List'."),
style = "Normal") %>%
officer::body_add_par("")
#-----------------------------------------------------------------------------
#============ STRENGTHS & LIMITATIONS ========================================
doc <- doc %>%
officer::body_add_par("Strengths & Limitations", style = "heading 1") %>%
officer::body_add_par("") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext("<< To be completed outside of ATLAS.", prop = style_helper_text)
)) %>%
officer::body_add_par("") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext("Some limitations to consider:",
prop = style_helper_text),
officer::ftext("--It may not be possible to develop prediction models for rare outcomes. ",
prop = style_helper_text),
officer::ftext("--Not all medical events are recorded into the observational datasets and some recordings can be incorrect. This could potentially lead to outcome misclassification.",
prop = style_helper_text),
officer::ftext("--The prediction models are only applicable to the population of patients represented by the data used to train the model and may not be generalizable to the wider population. >>",
prop = style_helper_text)
))
#-----------------------------------------------------------------------------
#============ PROTECTION OF HUMAN SUBJECTS ===================================
doc <- doc %>%
officer::body_add_par("Protection of Human Subjects", style = "heading 1") %>%
officer::body_add_par("") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext("<< To be completed outside of ATLAS.", prop = style_helper_text)
)) %>%
officer::body_add_par("") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext("Describe any additional safeguards that are appropriate for the data being used.",
prop = style_helper_text)
)) %>%
officer::body_add_par("") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext("Here is an example statement:", prop = style_helper_text),
officer::ftext("Confidentiality of patient records will be maintained always. All study reports will contain aggregate data only and will not identify individual patients or physicians. At no time during the study will the sponsor receive patient identifying information except when it is required by regulations in case of reporting adverse events.", prop = style_helper_text),
officer::ftext(">>", prop = style_helper_text)
))
#-----------------------------------------------------------------------------
#============ DISSEMINATING & COMMUNICATING ==================================
doc <- doc %>%
officer::body_add_par("Plans for Disseminating & Communicating Study Results", style = "heading 1") %>%
officer::body_add_par("") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext("<< To be completed outside of ATLAS.", prop = style_helper_text)
)) %>%
officer::body_add_par("") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext("List any plans for submission of progress reports, final reports, and publications.",
prop = style_helper_text),
officer::ftext(">>",
prop = style_helper_text)
)) %>%
officer::body_add_break()
#-----------------------------------------------------------------------------
#============ TABLES & FIGURES ===============================================
doc <- doc %>%
officer::body_add_par("Tables & Figures", style = "heading 1") %>%
#```````````````````````````````````````````````````````````````````````````
officer::body_add_par("Incidence Rate of Target & Outcome", style = "heading 2") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext("<< add incidence here. >>", prop = style_hidden_text)
)) %>%
officer::body_add_par("") %>%
#```````````````````````````````````````````````````````````````````````````
officer::body_add_par("Characterization", style = "heading 2") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext("<< add characterization table here. >>", prop = style_hidden_text)
)) %>%
officer::body_add_par("") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext("<< add results here. >>", prop = style_hidden_text)
)) %>%
officer::body_add_break()
#-----------------------------------------------------------------------------
#============ APPENDICES =====================================================
doc <- doc %>%
officer::body_add_par("Appendices", style = "heading 1") %>%
#```````````````````````````````````````````````````````````````````````````
officer::body_add_par("Study Generation Version Information", style = "heading 2") %>%
officer::body_add_par("") %>%
officer::body_add_par(paste0("Skeleton Version: ",json$skeletonType," - ", json$skeletonVersion),style="Normal") %>%
officer::body_add_par(paste0("Identifier / Organization: ",json$organizationName),style="Normal") %>%
officer::body_add_break() %>%
officer::body_end_section_continuous() %>%
#```````````````````````````````````````````````````````````````````````````
officer::body_add_par("Code List", style = "heading 2") %>%
officer::body_add_par("")
for(i in 1:length(concepts$uniqueConceptSets)){
conceptSetId <- paste0("Concept Set #",concepts$uniqueConceptSets[[i]]$conceptId,
" - ",concepts$uniqueConceptSets[[i]]$conceptName)
conceptSetTable <- as.data.frame(concepts$uniqueConceptSets[[i]]$conceptExpressionTable)
id <- as.data.frame(concepts$conceptTableSummary[which(concepts$conceptTableSummary$newConceptId == i),]$cohortDefinitionId)
names(id) <- c("ID")
outcomeCohortsForConceptSet <- outcomeCohorts[outcomeCohorts$`Cohort ID` %in% id$ID,]
targetCohortsForConceptSet <- targetCohorts[targetCohorts$`Cohort ID` %in% id$ID,]
cohortsForConceptSet <- rbind(outcomeCohortsForConceptSet,targetCohortsForConceptSet)
cohortsForConceptSet <- cohortsForConceptSet[,1:2]
doc <- doc %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext(conceptSetId, prop = style_table_title)
)) %>%
officer::body_add_table(conceptSetTable[,c(1,2,4,6,7,8,9,10,11,12)], header = TRUE, style = "Table Professional") %>%
officer::body_add_par("") %>%
officer::body_add_par("Cohorts that use this Concept Set:", style = "Normal") %>%
officer::body_add_par("") %>%
officer::body_add_table(cohortsForConceptSet, header = TRUE, style = "Table Professional") %>%
officer::body_add_par("")
}
#```````````````````````````````````````````````````````````````````````````
doc <- doc %>%
officer::body_add_break() %>%
officer::body_end_section_landscape() %>%
officer::body_add_par("Complete Analysis List", style = "heading 2") %>%
officer::body_add_par("") %>%
officer::body_add_par("Below is a complete list of analysis that will be performed. Definitions for the column 'Covariate Settings ID' can be found above in the 'Covariate Settings' section. Definitions for the 'Population Settings Id' can be found above in the 'Additional Population Settings' section.",style="Normal") %>%
officer::body_add_par("") %>%
officer::body_add_table(completeAnalysisList[,c(7,1,2,3,4,5,6)], header = TRUE, style = "Table Professional") %>%
officer::body_add_break()
doc <- doc %>% officer::body_add_fpar(
officer::fpar(
officer::ftext("<< add models here >>", prop = style_hidden_text)
)) %>% officer::body_add_par("")
#-----------------------------------------------------------------------------
#============ REFERNCES ======================================================
doc <- doc %>%
officer::body_add_par("References", style = "heading 1") %>%
officer::body_add_par("") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext("<< To be completed outside of ATLAS. >>", prop = style_helper_text)
))
#-----------------------------------------------------------------------------
if(!dir.exists(outputLocation)){
dir.create(outputLocation, recursive = T)
}
print(doc, target = file.path(outputLocation,'protocol.docx'))
}
#' createMultiPlpReport
#'
#' @description
#' Creates a word document report of the prediction
#' @details
#' The function creates a word document containing the analysis details, data summary and prediction model results.
#' @param analysisLocation The location of the multiple patient-level prediction study
#' @param protocolLocation The location of the auto generated patient-level prediction protocol
#' @param includeModels Whether to include the models into the results document
#'
#' @return
#' A work document containing the results of the study is saved into the doc directory in the analysisLocation
#' @export
createMultiPlpReport <- function(analysisLocation,
protocolLocation = file.path(analysisLocation,'doc','protocol.docx'),
includeModels = F){
if(!dir.exists(analysisLocation)){
stop('Directory input for analysisLocation does not exists')
}
# this fucntion creates a lsit for analysis with
# internal validation table, internal validation plots
# external validation table, external validation plots
modelsExtraction <- getModelInfo(analysisLocation)
# add checks for suitable files expected - protocol/summary
if(!file.exists(protocolLocation)){
stop('Protocol location invalid')
}
#================ Check for protocol =========================
# if exists load it and add results section - else return error
doc = tryCatch(officer::read_docx(path=protocolLocation),
error = function(e) stop(e))
heading1 <- 'heading 1'
heading2 <- 'heading 2'
heading3 <- 'heading 3'
tableStyle <- "Table Professional"
# Find the sections to add the results to (results + appendix)
doc %>%
officer::cursor_reach(keyword = "<< add results here. >>") %>% officer::cursor_forward() %>%
officer::body_add_par("Results", style = heading1)
for(model in modelsExtraction){
if(!is.null(model$internalPerformance)){
doc %>% officer::body_add_par(paste('Analysis',model$analysisId), style = heading2) %>%
officer::body_add_par('Description', style = heading3) %>%
officer::body_add_par(paste0("The predicton model within ", model$T,
" predict ", model$O, " during ", model$tar,
" developed using database ", model$D),
style = "Normal") %>%
officer::body_add_par("") %>%
officer::body_add_par("Internal Performance", style = heading3) %>%
officer::body_add_table(model$internalPerformance, style = tableStyle) %>%
officer::body_add_gg(model$scatterPlot)
if(!is.null(model$internalPlots[[7]])){
doc %>% rvg::body_add_vg(code = do.call(gridExtra::grid.arrange, c(model$internalPlots, list(layout_matrix=rbind(c(1,2),
c(3,4),
c(5,6),
c(7,7),
c(7,7),
c(8,8),
c(9,9)
)))))} else{
model$internalPlots[[7]] <- NULL
doc %>% rvg::body_add_vg(code = do.call(gridExtra::grid.arrange, c(model$internalPlots, list(layout_matrix=rbind(c(1,2),
c(3,4),
c(5,6),
c(7,7),
c(8,8)
)))))
}
}
if(!is.null(model$externalPerformance)){
doc %>% officer::body_add_par("") %>%
officer::body_add_par("External Performance", style = heading3) %>%
officer::body_add_table(model$externalPerformance, style = tableStyle) %>%
rvg::body_add_vg(code = do.call(gridExtra::grid.arrange, model$externalRocPlots)) %>%
rvg::body_add_vg(code = do.call(gridExtra::grid.arrange, model$externalCalPlots))
}
doc %>% officer::body_add_break()
}
if(includeModels){
# move the cursor at the end of the document
doc %>%
officer::cursor_reach(keyword = "<< add models here >>") %>% officer::cursor_forward() %>%
officer::body_add_par("Developed Models", style = heading2)
for(model in modelsExtraction){
if(!is.null(model$modelTable)){
doc %>% officer::body_add_par(paste('Analysis',model$analysisId), style = heading3) %>%
officer::body_add_table(model$modelTable, style = tableStyle) %>%
officer::body_add_break()
}
}
}
# print the document to the doc directory:
if(!dir.exists(file.path(analysisLocation,'doc'))){
dir.create(file.path(analysisLocation,'doc'), recursive = T)
}
print(doc, target = file.path(analysisLocation,'doc','plpMultiReport.docx'))
return(TRUE)
}
getModelInfo <- function(analysisLocation){
settings <- utils::read.csv(file.path(analysisLocation, "settings.csv"))
modelSettings <- lapply((1:nrow(settings))[order(settings$analysisId)], function(i) {getModelFromSettings(analysisLocation,settings[i,])})
return(modelSettings)
}
getModelFromSettings <- function(analysisLocation,x){
result <- list(analysisId = x$analysisId, T = x$cohortName,
D = x$devDatabase, O = x$outcomeName,
tar = paste0(x$riskWindowStart, ' days after ',
ifelse(x$addExposureDaysToStart==1, 'cohort end','cohort start'),
' to ', x$riskWindowEnd, ' days after ',
ifelse(x$addExposureDaysToEnd==1, 'cohort end','cohort start')),
model = x$modelSettingName)
if(!dir.exists(file.path(as.character(x$plpResultFolder),'plpResult'))){
return(NULL)
}
plpResult <- PatientLevelPrediction::loadPlpResult(file.path(as.character(x$plpResultFolder),'plpResult'))
modelTable <- plpResult$model$varImp
result$modelTable <- modelTable[modelTable$covariateValue!=0,]
if(!is.null(plpResult$performanceEvaluation)){
internalPerformance <- plpResult$performanceEvaluation$evaluationStatistics
internalPerformance <- as.data.frame(internalPerformance)
internalPerformance$Value <- format(as.double(as.character(internalPerformance$Value)), digits = 2, nsmall = 0, scientific = F)
class(internalPerformance$Value) <- 'double'
result$internalPerformance <- reshape2::dcast(internalPerformance, Metric ~ Eval, value.var = 'Value', fun.aggregate = mean)
result$internalPlots <- list(
PatientLevelPrediction::plotSparseRoc(plpResult$performanceEvaluation),
PatientLevelPrediction::plotPrecisionRecall(plpResult$performanceEvaluation),
PatientLevelPrediction::plotF1Measure(plpResult$performanceEvaluation),
PatientLevelPrediction::plotPredictionDistribution(plpResult$performanceEvaluation),
PatientLevelPrediction::plotSparseCalibration( plpResult$performanceEvaluation),
PatientLevelPrediction::plotSparseCalibration2( plpResult$performanceEvaluation),
PatientLevelPrediction::plotDemographicSummary( plpResult$performanceEvaluation),
PatientLevelPrediction::plotPreferencePDF(plpResult$performanceEvaluation),
PatientLevelPrediction::plotPredictedPDF(plpResult$performanceEvaluation)
)} else{
result$internalPlots <- NULL
}
result$scatterPlot <- PatientLevelPrediction::plotVariableScatterplot(plpResult$covariateSummary)
# get external results if they exist
externalPerformance <- c()
ind <- grep(paste0('Analysis_', x$analysisId,'/'),
dir(file.path(analysisLocation,'Validation'), recursive = T))
if(length(ind)>0){
vals <- dir(file.path(analysisLocation,'Validation'), recursive = T)[ind]
externalRocPlots <- list()
externalCalPlots <- list()
length(externalRocPlots) <- length(vals)
length(externalCalPlots) <- length(vals)
for(k in 1:length(vals)){
val <- vals[k]
nameDat <- strsplit(val, '\\/')[[1]][1]
val <- readRDS(file.path(analysisLocation,'Validation',val))
sum <- as.data.frame(val[[1]]$performanceEvaluation$evaluationStatistics)
sum$database <- nameDat
externalPerformance <- rbind(externalPerformance, sum)
externalCalPlots[[k]] <- PatientLevelPrediction::plotSparseCalibration2(val[[1]]$performanceEvaluation, type='validation') + ggplot2::labs(title=paste(nameDat))
externalRocPlots[[k]] <- PatientLevelPrediction::plotSparseRoc(val[[1]]$performanceEvaluation, type='validation')+ ggplot2::labs(title=paste(nameDat))
}
externalPerformance <- as.data.frame(externalPerformance)
externalPerformance$Value <- format(as.double(as.character(externalPerformance$Value)), digits = 2, nsmall = 0, scientific = F)
class(externalPerformance$Value) <- 'double'
result$externalPerformance <- reshape2::dcast(externalPerformance, Metric ~ database, value.var = 'Value', fun.aggregate = mean)
result$externalCalPlots <- externalCalPlots
result$externalRocPlots <- externalRocPlots
}
return(result)
}
|
/AbxBetterChoice/ABCciprofloxacin/R/createPlpProtocol.R
|
no_license
|
ABMI/AbxBetterChoice
|
R
| false
| false
| 52,104
|
r
|
#' Create a protocol template for the study
#'
#' @details
#' This function will create a template protocol
#'
#' @param outputLocation Directory location where you want the protocol written to
#' @export
createPlpProtocol <- function(outputLocation = getwd()){
predictionAnalysisListFile <- system.file("settings",
"predictionAnalysisList.json",
package = "ABCciprofloxacin")
#figure1 <- 'vignettes/Figure1.png'
figure1 <- system.file("doc",
"Figure1.png",
package = "PatientLevelPrediction")
#============== STYLES =======================================================
style_title <- officer::shortcuts$fp_bold(font.size = 28)
style_title_italic <- officer::shortcuts$fp_bold(font.size = 30, italic = TRUE)
style_toc <- officer::shortcuts$fp_bold(font.size = 16)
style_helper_text <- officer::shortcuts$fp_italic(color = "#FF8C00")
style_citation <- officer::shortcuts$fp_italic(shading.color = "grey")
style_table_title <- officer::shortcuts$fp_bold(font.size = 14, italic = TRUE)
style_hidden_text <- officer::shortcuts$fp_italic(color = "#FFFFFF")
#============== VARIABLES ====================================================
json <- tryCatch({ParallelLogger::loadSettingsFromJson(file=predictionAnalysisListFile)},
error=function(cond) {
stop('Issue with json file...')
})
#analysis information
analysisList <- PatientLevelPrediction::loadPredictionAnalysisList(predictionAnalysisListFile)
targetCohortNamesList <- paste(analysisList$cohortNames, collapse = ', ')
targetCohorts <- as.data.frame(cbind(analysisList$cohortIds,analysisList$cohortNames,rep("TBD",length(analysisList$cohortNames))), stringsAsFactors = FALSE)
names(targetCohorts) <- c("Cohort ID", "Cohort Name","Description")
targetCohorts <- targetCohorts[order(as.numeric(targetCohorts$`Cohort ID`)),]
outcomeCohortNamesList <- paste(analysisList$outcomeNames, collapse = ', ')
outcomeCohorts <- as.data.frame(cbind(analysisList$outcomeIds,analysisList$outcomeNames,rep("TBD",length(analysisList$outcomeNames))), stringsAsFactors = FALSE)
names(outcomeCohorts) <- c("Cohort ID", "Cohort Name","Description")
outcomeCohorts <- outcomeCohorts[order(as.numeric(outcomeCohorts$`Cohort ID`)),]
#time at risk
tar <- unique(
lapply(json$populationSettings, function(x)
paste0("Risk Window Start: ",x$riskWindowStart,
', Add Exposure Days to Start: ',x$addExposureDaysToStart,
', Risk Window End: ', x$riskWindowEnd,
', Add Exposure Days to End: ', x$addExposureDaysToEnd)))
tarDF <- as.data.frame(rep(times = length(tar),''), stringsAsFactors = FALSE)
names(tarDF) <- c("Time at Risk")
for(i in 1:length(tar)){
tarDF[i,1] <- paste0("[Time at Risk Settings #", i, '] ', tar[[i]])
}
tarList <- paste(tarDF$`Time at Risk`, collapse = ', ')
tarListDF <- as.data.frame(tarList)
covSettings <- lapply(json$covariateSettings, function(x) cbind(names(x), unlist(lapply(x, function(x2) paste(x2, collapse=', ')))))
popSettings <- lapply(json$populationSettings, function(x) cbind(names(x), unlist(lapply(x, function(x2) paste(x2, collapse=', ')))))
plpModelSettings <- PatientLevelPrediction::createPlpModelSettings(modelList = analysisList$modelAnalysisList$models,
covariateSettingList = json$covariateSettings,
populationSettingList = json$populationSettings)
m1 <-merge(targetCohorts$`Cohort Name`,outcomeCohorts$`Cohort Name`)
names(m1) <- c("Target Cohort Name","Outcome Cohort Name")
modelSettings <- unique(data.frame(plpModelSettings$settingLookupTable$modelSettingId,plpModelSettings$settingLookupTable$modelSettingName))
names(modelSettings) <- c("Model Settings Id", "Model Settings Description")
m2 <- merge(m1,modelSettings)
covSet <- unique(data.frame(plpModelSettings$settingLookupTable$covariateSettingId))
names(covSet) <- "Covariate Settings ID"
m3 <- merge(m2,covSet)
popSet <-unique(data.frame( plpModelSettings$settingLookupTable$populationSettingId))
names(popSet) <- c("Population Settings ID")
completeAnalysisList <- merge(m3,popSet)
completeAnalysisList$ID <- seq.int(nrow(completeAnalysisList))
concepts <- formatConcepts(json)
#-----------------------------------------------------------------------------
#============== CITATIONS =====================================================
plpCitation <- paste0("Citation: ", utils::citation("PatientLevelPrediction")$textVersion)
tripodCitation <- paste0("Citation: Collins, G., et al. (2017.02.01). 'Transparent reporting of a multivariable prediction model for individual prognosis or diagnosis (TRIPOD): The TRIPOD statement.' from https://www.equator-network.org/reporting-guidelines/tripod-statement/ ")
progressCitation <- paste0("Citation: Steyerberg EW, Moons KG, van der Windt DA, Hayden JA, Perel P, Schroter S, Riley RD, Hemingway H, Altman DG; PROGRESS Group. Prognosis Research Strategy (PROGRESS) 3: prognostic model research. PLoS Med. 2013;10(2):e1001381. doi: 10.1371/journal.pmed.1001381. Epub 2013 Feb 5. Review. PubMed PMID: 23393430; PubMed Central PMCID: PMC3564751.")
rCitation <- paste0("Citation: R Core Team (2013). R: A language and environment for statistical computing. R Foundation for Statistical Computing, Vienna, Austria. URL http://www.R-project.org/.")
#-----------------------------------------------------------------------------
#============== CREATE DOCUMENT ==============================================
# create new word document
doc = officer::read_docx()
#-----------------------------------------------------------------------------
#============ TITLE PAGE =====================================================
title <- officer::fpar(
officer::ftext("Patient-Level Prediction: ", prop = style_title),
officer::ftext(json$packageName, prop = style_title_italic)
)
doc <- doc %>%
officer::body_add_par("") %>%
officer::body_add_par("") %>%
officer::body_add_par("") %>%
officer::body_add_fpar(title) %>%
officer::body_add_par("") %>%
officer::body_add_par("") %>%
officer::body_add_par(paste0("Prepared on: ", Sys.Date()), style = "Normal") %>%
officer::body_add_par(paste0("Created by: ", json$createdBy$name, " (", json$createdBy$email,")"), style = "Normal") %>%
officer::body_add_break()
#-----------------------------------------------------------------------------
#============ TOC ============================================================
toc <- officer::fpar(
officer::ftext("Table of Contents", prop = style_toc)
)
doc <- doc %>%
officer::body_add_fpar(toc) %>%
officer::body_add_toc(level = 2) %>%
officer::body_add_break()
#-----------------------------------------------------------------------------
#============ LIST OF ABBREVIATIONS ==========================================
abb <- data.frame(rbind(
c("AUC", "Area Under the Receiver Operating Characteristic Curve"),
c("CDM","Common Data Model"),
c("O","Outcome Cohort"),
c("OHDSI","Observational Health Data Sciences & Informatics"),
c("OMOP","Observational Medical Outcomes Partnership"),
c("T", "Target Cohort"),
c("TAR", "Time at Risk")
))
names(abb) <- c("Abbreviation","Phrase")
abb <- abb[order(abb$Abbreviation),]
doc <- doc %>%
officer::body_add_par("List of Abbreviations", style = "heading 1") %>%
officer::body_add_par("") %>%
officer::body_add_table(abb, header = TRUE) %>%
officer::body_add_par("") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext("<< Rest to be completed outside of ATLAS >>", prop = style_helper_text)
))
#-----------------------------------------------------------------------------
#============ RESPONSIBLE PARTIES ============================================
doc <- doc %>%
officer::body_add_par("Responsible Parties", style = "heading 1") %>%
officer::body_add_par("") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext("<< To be completed outside of ATLAS ", prop = style_helper_text)
)) %>%
officer::body_add_par("") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext("Includes author, investigator, and reviewer names and sponsor information. >>", prop = style_helper_text)
))
#-----------------------------------------------------------------------------
#============ Executive Summary ==============================================
doc <- doc %>%
officer::body_add_par("Executive Summary", style = "heading 1") %>%
officer::body_add_par("") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext("<< A few statements about the rational and background for this study. >>", prop = style_helper_text)
)) %>%
officer::body_add_par("") %>%
officer::body_add_par(paste0("The objective of this study is to develop and validate patient-level prediction models for patients in ",
length(json$targetIds)," target cohort(s) (",
targetCohortNamesList,") to predict ",
length(json$outcomeIds)," outcome(s) (",
outcomeCohortNamesList,") for ",
length(tar)," time at risk(s) (",
tarList,")."),
style = "Normal") %>%
officer::body_add_par("") %>%
officer::body_add_par(paste0("The prediction will be implemented using ",
length(json$modelSettings)," algorithms (",
paste(lapply(analysisList$modelAnalysisList$models, function(x) x$name), collapse = ', '),")."),
style = "Normal")
#-----------------------------------------------------------------------------
#============ RATIONAL & BACKGROUND ==========================================
doc <- doc %>%
officer::body_add_par("Rational & Background", style = "heading 1") %>%
officer::body_add_par("") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext("<< To be completed outside of ATLAS.", prop = style_helper_text)
)) %>%
officer::body_add_par("") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext("Provide a short description of the reason that led to the initiation of or need for the study and add a short critical review of available published and unpublished data to explain gaps in knowledge that the study is intended to fill. >>", prop = style_helper_text)
))
#-----------------------------------------------------------------------------
#============ OBJECTIVE ======================================================
prep_objective <- merge(analysisList$cohortNames, analysisList$outcomeNames)
objective <- merge(prep_objective, tarListDF )
names(objective) <-c("Target Cohorts","Outcome Cohorts","Time at Risk")
doc <- doc %>%
officer::body_add_par("Objective", style = "heading 1") %>%
officer::body_add_par("") %>%
officer::body_add_par(paste0("The objective is to develop and validate patient-level prediction models for the following prediction problems:"),style = "Normal") %>%
officer::body_add_par("") %>%
officer::body_add_table(objective, header = TRUE, style = "Table Professional")
#-----------------------------------------------------------------------------
#============ METHODS ======================================================
doc <- doc %>%
officer::body_add_par("Methods", style = "heading 1") %>%
#```````````````````````````````````````````````````````````````````````````
officer::body_add_par("Study Design", style = "heading 2") %>%
officer::body_add_par("This study will follow a retrospective, observational, patient-level prediction design. We define 'retrospective' to mean the study will be conducted using data already collected prior to the start of the study. We define 'observational' to mean there is no intervention or treatment assignment imposed by the study. We define 'patient-level prediction' as a modeling process wherein an outcome is predicted within a time at risk relative to the target cohort start and/or end date. Prediction is performed using a set of covariates derived using data prior to the start of the target cohort.",style = "Normal") %>%
officer::body_add_par("") %>%
officer::body_add_par("Figure 1, illustrates the prediction problem we will address. Among a population at risk, we aim to predict which patients at a defined moment in time (t = 0) will experience some outcome during a time-at-risk. Prediction is done using only information about the patients in an observation window prior to that moment in time.", style="Normal") %>%
officer::body_add_par("") %>%
officer::body_add_img(src = figure1, width = 6.5, height = 2.01, style = "centered") %>%
officer::body_add_par("Figure 1: The prediction problem", style="graphic title") %>%
officer::body_add_par("") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext(plpCitation, prop = style_citation)
)) %>%
officer::body_add_par("") %>%
officer::body_add_par("We follow the PROGRESS best practice recommendations for model development and the TRIPOD guidance for transparent reporting of the model results.", style="Normal") %>%
officer::body_add_par("") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext(progressCitation, prop = style_citation)
)) %>%
officer::body_add_par("") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext(tripodCitation, prop = style_citation)
)) %>%
#```````````````````````````````````````````````````````````````````````````
officer::body_add_par("Data Source(s)", style = "heading 2") %>%
officer::body_add_par("") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext("<< To be completed outside of ATLAS.", prop = style_helper_text)
)) %>%
officer::body_add_par("") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext("For each database, provide database full name, version information (if applicable), the start and end dates of data capture, and a brief description of the data source. Also include information on data storage (e.g. software and IT environment, database maintenance and anti-fraud protection, archiving) and data protection.", prop = style_helper_text)
)) %>%
officer::body_add_par("") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext("Important Citations: OMOP Common Data Model: 'OMOP Common Data Model (CDM).' from https://github.com/OHDSI/CommonDataModel.", prop = style_helper_text)
)) %>%
#```````````````````````````````````````````````````````````````````````````
officer::body_add_par("Study Populations", style = "heading 2") %>%
officer::body_add_par("Target Cohort(s) [T]", style = "heading 3") %>%
officer::body_add_par("") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext("<< Currently cohort definitions need to be grabbed from ATLAS, in a Cohort Definition, Export Tab, from Text View. >>", prop = style_helper_text)
)) %>%
officer::body_add_par("") %>%
officer::body_add_table(targetCohorts, header = TRUE, style = "Table Professional") %>%
officer::body_add_par("") %>%
officer::body_add_par("Outcome Cohorts(s) [O]", style = "heading 3") %>%
officer::body_add_par("") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext("<< Currently cohort definitions need to be grabbed from ATLAS, in a Cohort Definition, Export Tab, from Text View. >>", prop = style_helper_text)
)) %>%
officer::body_add_par("") %>%
officer::body_add_table(outcomeCohorts, header = TRUE, style = "Table Professional") %>%
officer::body_add_par("") %>%
officer::body_add_par("Time at Risk", style = "heading 3") %>%
officer::body_add_par("") %>%
officer::body_add_table(tarDF, header = TRUE, style = "Table Professional") %>%
officer::body_add_par("Additional Population Settings", style = "heading 3") %>%
officer::body_add_par("")
for(i in 1:length(popSettings)){
onePopSettings <- as.data.frame(popSettings[i])
names(onePopSettings) <- c("Item","Settings")
doc <- doc %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext(paste0("Population Settings #",i), prop = style_table_title)
)) %>%
officer::body_add_table(onePopSettings, header = TRUE, style = "Table Professional") %>%
officer::body_add_par("")
}
#```````````````````````````````````````````````````````````````````````````
algorithms <- data.frame(rbind(
c("Lasso Logistic Regression", "Lasso logistic regression belongs to the family of generalized linear models, where a linear combination of the variables is learned and finally a logistic function maps the linear combination to a value between 0 and 1. The lasso regularization adds a cost based on model complexity to the objective function when training the model. This cost is the sum of the absolute values of the linear combination of the coefficients. The model automatically performs feature selection by minimizing this cost. We use the Cyclic coordinate descent for logistic, Poisson and survival analysis (Cyclops) package to perform large-scale regularized logistic regression: https://github.com/OHDSI/Cyclops"),
c("Gradient boosting machine", "Gradient boosting machines is a boosting ensemble technique and in our framework it combines multiple decision trees. Boosting works by iteratively adding decision trees but adds more weight to the data-points that are misclassified by prior decision trees in the cost function when training the next tree. We use Extreme Gradient Boosting, which is an efficient implementation of the gradient boosting framework implemented in the xgboost R package available from CRAN."),
c("Random forest", "Random forest is a bagging ensemble technique that combines multiple decision trees. The idea behind bagging is to reduce the likelihood of overfitting, by using weak classifiers, but combining multiple diverse weak classifiers into a strong classifier. Random forest accomplishes this by training multiple decision trees but only using a subset of the variables in each tree and the subset of variables differ between trees. Our packages uses the sklearn learn implementation of Random Forest in python."),
c("KNN", "K-nearest neighbors (KNN) is an algorithm that uses some metric to find the K closest labelled data-points, given the specified metric, to a new unlabelled data-point. The prediction of the new data-points is then the most prevalent class of the K-nearest labelled data-points. There is a sharing limitation of KNN, as the model requires labelled data to perform the prediction on new data, and it is often not possible to share this data across data sites. We included the BigKnn classifier developed in OHDSI which is a large scale k-nearest neighbor classifier using the Lucene search engine: https://github.com/OHDSI/BigKnn"),
c("AdaBoost", "AdaBoost is a boosting ensemble technique. Boosting works by iteratively adding decision trees but adds more weight to the data-points that are misclassified by prior decision trees in the cost function when training the next tree. We use the sklearn 'AdaboostClassifier' implementation in Python."),
c("DecisionTree", "A decision tree is a classifier that partitions the variable space using individual tests selected using a greedy approach. It aims to find partitions that have the highest information gain to separate the classes. The decision tree can easily overfit by enabling a large number of partitions (tree depth) and often needs some regularization (e.g., pruning or specifying hyper-parameters that limit the complexity of the model). We use the sklearn 'DecisionTreeClassifier' implementation in Python."),
c("Neural network", "Neural networks contain multiple layers that weight their inputs using an non-linear function. The first layer is the input layer, the last layer is the output layer the between are the hidden layers. Neural networks are generally trained using feed forward back-propagation. This is when you go through the network with a data-point and calculate the error between the true label and predicted label, then go backwards through the network and update the linear function weights based on the error. This can also be performed as a batch, where multiple data-points are feed through the network before being updated. We use the sklearn 'MLPClassifier' implementation in Python."),
c("Naive Bayes","The Naive Bayes algorithm applies the Bayes' theorem with the 'naive' assumption of conditional independence between every pair of features given the value of the class variable. Based on the likelihood of the data belong to a class and the prior distribution of the class, a posterior distribution is obtained.")
))
names(algorithms) <- c("Algorithm","Description")
algorithms <- algorithms[order(algorithms$Algorithm),]
modelIDs <- as.data.frame(sapply(analysisList$modelAnalysisList$models, function(x) x$name))
names(modelIDs) <- c("ID")
algorithmsFiltered <- algorithms[algorithms$Algorithm %in% modelIDs$ID,]
modelEvaluation <- data.frame(rbind(
c("ROC Plot", "The ROC plot plots the sensitivity against 1-specificity on the test set. The plot shows how well the model is able to discriminate between the people with the outcome and those without. The dashed diagonal line is the performance of a model that randomly assigns predictions. The higher the area under the ROC plot the better the discrimination of the model."),
c("Calibration Plot", "The calibration plot shows how close the predicted risk is to the observed risk. The diagonal dashed line thus indicates a perfectly calibrated model. The ten (or fewer) dots represent the mean predicted values for each quantile plotted against the observed fraction of people in that quantile who had the outcome (observed fraction). The straight black line is the linear regression using these 10 plotted quantile mean predicted vs observed fraction points. The two blue straight lines represented the 95% lower and upper confidence intervals of the slope of the fitted line."),
c("Smooth Calibration Plot", "Similar to the traditional calibration shown above the Smooth Calibration plot shows the relationship between predicted and observed risk. the major difference is that the smooth fit allows for a more fine grained examination of this. Whereas the traditional plot will be heavily influenced by the areas with the highest density of data the smooth plot will provide the same information for this region as well as a more accurate interpretation of areas with lower density. the plot also contains information on the distribution of the outcomes relative to predicted risk. However the increased information game comes at a computational cost. It is recommended to use the traditional plot for examination and then to produce the smooth plot for final versions."),
c("Prediction Distribution Plots", "The preference distribution plots are the preference score distributions corresponding to i) people in the test set with the outcome (red) and ii) people in the test set without the outcome (blue)."),
c("Box Plots", "The prediction distribution boxplots are box plots for the predicted risks of the people in the test set with the outcome (class 1: blue) and without the outcome (class 0: red)."),
c("Test-Train Similarity Plot", "The test-train similarity is presented by plotting the mean covariate values in the train set against those in the test set for people with and without the outcome."),
c("Variable Scatter Plot", "The variable scatter plot shows the mean covariate value for the people with the outcome against the mean covariate value for the people without the outcome. The size and color of the dots correspond to the importance of the covariates in the trained model (size of beta) and its direction (sign of beta with green meaning positive and red meaning negative), respectively."),
c("Precision Recall Plot", "The precision-recall curve is valuable for dataset with a high imbalance between the size of the positive and negative class. It shows the tradeoff between precision and recall for different threshold. High precision relates to a low false positive rate, and high recall relates to a low false negative rate. High scores for both show that the classifier is returning accurate results (high precision), as well as returning a majority of all positive results (high recall). A high area under the curve represents both high recall and high precision."),
c("Demographic Summary Plot", "This plot shows for females and males the expected and observed risk in different age groups together with a confidence area.")
))
names(modelEvaluation) <- c("Evaluation","Description")
modelEvaluation <- modelEvaluation[order(modelEvaluation$Evaluation),]
doc <- doc %>%
officer::body_add_par("Statistical Analysis Method(s)", style = "heading 2") %>%
officer::body_add_par("Algorithms", style = "heading 3") %>%
officer::body_add_par("") %>%
officer::body_add_table(algorithmsFiltered, header = TRUE, style = "Table Professional") %>%
officer::body_add_par("") %>%
officer::body_add_par("Model Evaluation", style = "heading 3") %>%
officer::body_add_par("") %>%
officer::body_add_par("The following evaluations will be performed on the model:", style = "Normal") %>%
officer::body_add_par("") %>%
officer::body_add_table(modelEvaluation, header = TRUE, style = "Table Professional") %>%
officer::body_add_par("") %>%
#```````````````````````````````````````````````````````````````````````````
officer::body_add_par("Quality Control", style = "heading 2") %>%
officer::body_add_par("") %>%
officer::body_add_par("The PatientLevelPrediction package itself, as well as other OHDSI packages on which PatientLevelPrediction depends, use unit tests for validation.",style="Normal") %>%
officer::body_add_par("") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext(plpCitation, prop = style_citation)
)) %>%
#```````````````````````````````````````````````````````````````````````````
officer::body_add_par("Tools", style = "heading 2") %>%
officer::body_add_par("") %>%
officer::body_add_par("This study will be designed using OHDSI tools and run with R.",style="Normal") %>%
officer::body_add_par("") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext(rCitation, prop = style_citation)
)) %>%
officer::body_add_par("") %>%
officer::body_add_par("More information about the tools can be found in the Appendix 'Study Generation Version Information'.", style = "Normal")
#-----------------------------------------------------------------------------
#============ DIAGNOSTICS ====================================================
doc <- doc %>%
officer::body_add_par("Diagnostics", style = "heading 1") %>%
officer::body_add_par("") %>%
officer::body_add_par("Reviewing the incidence rates of the outcomes in the target population prior to performing the analysis will allow us to assess its feasibility. The full table can be found in the 'Table and Figures' section under 'Incidence Rate of Target & Outcome'.",style="Normal") %>%
officer::body_add_par("") %>%
officer::body_add_par("Additionally, reviewing the characteristics of the cohorts provides insight into the cohorts being reviewed. The full table can be found below in the 'Table and Figures' section under 'Characterization'.",style="Normal")
#-----------------------------------------------------------------------------
#============ DATA ANALYSIS PLAN =============================================
doc <- doc %>%
officer::body_add_par("Data Analysis Plan", style = "heading 1") %>%
#```````````````````````````````````````````````````````````````````````````
officer::body_add_par("Algorithm Settings", style = "heading 2") %>%
officer::body_add_par("")
for(i in 1:length(json$modelSettings)){
modelSettingsTitle <- names(json$modelSettings[[i]])
modelSettings <- lapply(json$modelSettings[[i]], function(x) cbind(names(x), unlist(lapply(x, function(x2) paste(x2, collapse=', ')))))
oneModelSettings <- as.data.frame(modelSettings)
names(oneModelSettings) <- c("Covariates","Settings")
doc <- doc %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext(paste0("Model Settings Settings #",i, " - ",modelSettingsTitle), prop = style_table_title)
)) %>%
officer::body_add_table(oneModelSettings, header = TRUE, style = "Table Professional") %>%
officer::body_add_par("")
}
#```````````````````````````````````````````````````````````````````````````
covStatement1 <- paste0("The covariates (constructed using records on or prior to the target cohort start date) are used within this prediction mode include the following.")
covStatement2 <- paste0(" Each covariate needs to contain at least ",
json$runPlpArgs$minCovariateFraction,
" subjects to be considered for the model.")
if(json$runPlpArgs$minCovariateFraction == 0){
covStatement <- covStatement1
}else {
covStatement <- paste0(covStatement1,covStatement2)
}
doc <- doc %>%
officer::body_add_par("Covariate Settings", style = "heading 2") %>%
officer::body_add_par("") %>%
officer::body_add_par(covStatement,
style="Normal") %>%
officer::body_add_par("")
for(i in 1:length(covSettings)){
oneCovSettings <- as.data.frame(covSettings[i])
names(oneCovSettings) <- c("Covariates","Settings")
doc <- doc %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext(paste0("Covariate Settings #",i), prop = style_table_title)
)) %>%
officer::body_add_table(oneCovSettings, header = TRUE, style = "Table Professional") %>%
officer::body_add_par("")
}
#```````````````````````````````````````````````````````````````````````````
doc <- doc %>%
officer::body_add_par("Model Development & Evaluation", style = "heading 2") %>%
officer::body_add_par("") %>%
officer::body_add_par(paste0("To build and internally validate the models, we will partition the labelled data into a train set (",
(1-analysisList$testFraction)*100,
"%) and a test set (",
analysisList$testFraction*100,
"%)."),
style = "Normal") %>%
officer::body_add_par("") %>%
officer::body_add_par(paste0("The hyper-parameters for the models will be assessed using ",
analysisList$nfold,
"-fold cross validation on the train set and a final model will be trained using the full train set and optimal hyper-parameters."),
style = "Normal") %>%
officer::body_add_par("") %>%
officer::body_add_par("The internal validity of the models will be assessed on the test set. We will use the area under the receiver operating characteristic curve (AUC) to evaluate the discriminative performance of the models and plot the predicted risk against the observed fraction to visualize the calibration. See 'Model Evaluation' section for more detailed information about additional model evaluation metrics.") %>%
officer::body_add_par("") %>%
#```````````````````````````````````````````````````````````````````````````
officer::body_add_par("Analysis Execution Settings", style = "heading 2") %>%
officer::body_add_par("") %>%
officer::body_add_par(paste0("There are ",
length(json$targetIds),
" target cohorts evaluated for ",
length(json$outcomeIds),
" outcomes over ",
length(json$modelSettings),
" models over ",
length(covSettings),
" covariates settings and over ",
length(popSettings),
" population settings. In total there are ",
length(json$targetIds) * length(json$outcomeIds) * length(json$modelSettings) * length(covSettings) * length(popSettings),
" analysis performed. For a full list refer to appendix 'Complete Analysis List'."),
style = "Normal") %>%
officer::body_add_par("")
#-----------------------------------------------------------------------------
#============ STRENGTHS & LIMITATIONS ========================================
doc <- doc %>%
officer::body_add_par("Strengths & Limitations", style = "heading 1") %>%
officer::body_add_par("") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext("<< To be completed outside of ATLAS.", prop = style_helper_text)
)) %>%
officer::body_add_par("") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext("Some limitations to consider:",
prop = style_helper_text),
officer::ftext("--It may not be possible to develop prediction models for rare outcomes. ",
prop = style_helper_text),
officer::ftext("--Not all medical events are recorded into the observational datasets and some recordings can be incorrect. This could potentially lead to outcome misclassification.",
prop = style_helper_text),
officer::ftext("--The prediction models are only applicable to the population of patients represented by the data used to train the model and may not be generalizable to the wider population. >>",
prop = style_helper_text)
))
#-----------------------------------------------------------------------------
#============ PROTECTION OF HUMAN SUBJECTS ===================================
doc <- doc %>%
officer::body_add_par("Protection of Human Subjects", style = "heading 1") %>%
officer::body_add_par("") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext("<< To be completed outside of ATLAS.", prop = style_helper_text)
)) %>%
officer::body_add_par("") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext("Describe any additional safeguards that are appropriate for the data being used.",
prop = style_helper_text)
)) %>%
officer::body_add_par("") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext("Here is an example statement:", prop = style_helper_text),
officer::ftext("Confidentiality of patient records will be maintained always. All study reports will contain aggregate data only and will not identify individual patients or physicians. At no time during the study will the sponsor receive patient identifying information except when it is required by regulations in case of reporting adverse events.", prop = style_helper_text),
officer::ftext(">>", prop = style_helper_text)
))
#-----------------------------------------------------------------------------
#============ DISSEMINATING & COMMUNICATING ==================================
doc <- doc %>%
officer::body_add_par("Plans for Disseminating & Communicating Study Results", style = "heading 1") %>%
officer::body_add_par("") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext("<< To be completed outside of ATLAS.", prop = style_helper_text)
)) %>%
officer::body_add_par("") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext("List any plans for submission of progress reports, final reports, and publications.",
prop = style_helper_text),
officer::ftext(">>",
prop = style_helper_text)
)) %>%
officer::body_add_break()
#-----------------------------------------------------------------------------
#============ TABLES & FIGURES ===============================================
doc <- doc %>%
officer::body_add_par("Tables & Figures", style = "heading 1") %>%
#```````````````````````````````````````````````````````````````````````````
officer::body_add_par("Incidence Rate of Target & Outcome", style = "heading 2") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext("<< add incidence here. >>", prop = style_hidden_text)
)) %>%
officer::body_add_par("") %>%
#```````````````````````````````````````````````````````````````````````````
officer::body_add_par("Characterization", style = "heading 2") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext("<< add characterization table here. >>", prop = style_hidden_text)
)) %>%
officer::body_add_par("") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext("<< add results here. >>", prop = style_hidden_text)
)) %>%
officer::body_add_break()
#-----------------------------------------------------------------------------
#============ APPENDICES =====================================================
doc <- doc %>%
officer::body_add_par("Appendices", style = "heading 1") %>%
#```````````````````````````````````````````````````````````````````````````
officer::body_add_par("Study Generation Version Information", style = "heading 2") %>%
officer::body_add_par("") %>%
officer::body_add_par(paste0("Skeleton Version: ",json$skeletonType," - ", json$skeletonVersion),style="Normal") %>%
officer::body_add_par(paste0("Identifier / Organization: ",json$organizationName),style="Normal") %>%
officer::body_add_break() %>%
officer::body_end_section_continuous() %>%
#```````````````````````````````````````````````````````````````````````````
officer::body_add_par("Code List", style = "heading 2") %>%
officer::body_add_par("")
for(i in 1:length(concepts$uniqueConceptSets)){
conceptSetId <- paste0("Concept Set #",concepts$uniqueConceptSets[[i]]$conceptId,
" - ",concepts$uniqueConceptSets[[i]]$conceptName)
conceptSetTable <- as.data.frame(concepts$uniqueConceptSets[[i]]$conceptExpressionTable)
id <- as.data.frame(concepts$conceptTableSummary[which(concepts$conceptTableSummary$newConceptId == i),]$cohortDefinitionId)
names(id) <- c("ID")
outcomeCohortsForConceptSet <- outcomeCohorts[outcomeCohorts$`Cohort ID` %in% id$ID,]
targetCohortsForConceptSet <- targetCohorts[targetCohorts$`Cohort ID` %in% id$ID,]
cohortsForConceptSet <- rbind(outcomeCohortsForConceptSet,targetCohortsForConceptSet)
cohortsForConceptSet <- cohortsForConceptSet[,1:2]
doc <- doc %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext(conceptSetId, prop = style_table_title)
)) %>%
officer::body_add_table(conceptSetTable[,c(1,2,4,6,7,8,9,10,11,12)], header = TRUE, style = "Table Professional") %>%
officer::body_add_par("") %>%
officer::body_add_par("Cohorts that use this Concept Set:", style = "Normal") %>%
officer::body_add_par("") %>%
officer::body_add_table(cohortsForConceptSet, header = TRUE, style = "Table Professional") %>%
officer::body_add_par("")
}
#```````````````````````````````````````````````````````````````````````````
doc <- doc %>%
officer::body_add_break() %>%
officer::body_end_section_landscape() %>%
officer::body_add_par("Complete Analysis List", style = "heading 2") %>%
officer::body_add_par("") %>%
officer::body_add_par("Below is a complete list of analysis that will be performed. Definitions for the column 'Covariate Settings ID' can be found above in the 'Covariate Settings' section. Definitions for the 'Population Settings Id' can be found above in the 'Additional Population Settings' section.",style="Normal") %>%
officer::body_add_par("") %>%
officer::body_add_table(completeAnalysisList[,c(7,1,2,3,4,5,6)], header = TRUE, style = "Table Professional") %>%
officer::body_add_break()
doc <- doc %>% officer::body_add_fpar(
officer::fpar(
officer::ftext("<< add models here >>", prop = style_hidden_text)
)) %>% officer::body_add_par("")
#-----------------------------------------------------------------------------
#============ REFERNCES ======================================================
doc <- doc %>%
officer::body_add_par("References", style = "heading 1") %>%
officer::body_add_par("") %>%
officer::body_add_fpar(
officer::fpar(
officer::ftext("<< To be completed outside of ATLAS. >>", prop = style_helper_text)
))
#-----------------------------------------------------------------------------
if(!dir.exists(outputLocation)){
dir.create(outputLocation, recursive = T)
}
print(doc, target = file.path(outputLocation,'protocol.docx'))
}
#' createMultiPlpReport
#'
#' @description
#' Creates a word document report of the prediction
#' @details
#' The function creates a word document containing the analysis details, data summary and prediction model results.
#' @param analysisLocation The location of the multiple patient-level prediction study
#' @param protocolLocation The location of the auto generated patient-level prediction protocol
#' @param includeModels Whether to include the models into the results document
#'
#' @return
#' A work document containing the results of the study is saved into the doc directory in the analysisLocation
#' @export
createMultiPlpReport <- function(analysisLocation,
protocolLocation = file.path(analysisLocation,'doc','protocol.docx'),
includeModels = F){
if(!dir.exists(analysisLocation)){
stop('Directory input for analysisLocation does not exists')
}
# this fucntion creates a lsit for analysis with
# internal validation table, internal validation plots
# external validation table, external validation plots
modelsExtraction <- getModelInfo(analysisLocation)
# add checks for suitable files expected - protocol/summary
if(!file.exists(protocolLocation)){
stop('Protocol location invalid')
}
#================ Check for protocol =========================
# if exists load it and add results section - else return error
doc = tryCatch(officer::read_docx(path=protocolLocation),
error = function(e) stop(e))
heading1 <- 'heading 1'
heading2 <- 'heading 2'
heading3 <- 'heading 3'
tableStyle <- "Table Professional"
# Find the sections to add the results to (results + appendix)
doc %>%
officer::cursor_reach(keyword = "<< add results here. >>") %>% officer::cursor_forward() %>%
officer::body_add_par("Results", style = heading1)
for(model in modelsExtraction){
if(!is.null(model$internalPerformance)){
doc %>% officer::body_add_par(paste('Analysis',model$analysisId), style = heading2) %>%
officer::body_add_par('Description', style = heading3) %>%
officer::body_add_par(paste0("The predicton model within ", model$T,
" predict ", model$O, " during ", model$tar,
" developed using database ", model$D),
style = "Normal") %>%
officer::body_add_par("") %>%
officer::body_add_par("Internal Performance", style = heading3) %>%
officer::body_add_table(model$internalPerformance, style = tableStyle) %>%
officer::body_add_gg(model$scatterPlot)
if(!is.null(model$internalPlots[[7]])){
doc %>% rvg::body_add_vg(code = do.call(gridExtra::grid.arrange, c(model$internalPlots, list(layout_matrix=rbind(c(1,2),
c(3,4),
c(5,6),
c(7,7),
c(7,7),
c(8,8),
c(9,9)
)))))} else{
model$internalPlots[[7]] <- NULL
doc %>% rvg::body_add_vg(code = do.call(gridExtra::grid.arrange, c(model$internalPlots, list(layout_matrix=rbind(c(1,2),
c(3,4),
c(5,6),
c(7,7),
c(8,8)
)))))
}
}
if(!is.null(model$externalPerformance)){
doc %>% officer::body_add_par("") %>%
officer::body_add_par("External Performance", style = heading3) %>%
officer::body_add_table(model$externalPerformance, style = tableStyle) %>%
rvg::body_add_vg(code = do.call(gridExtra::grid.arrange, model$externalRocPlots)) %>%
rvg::body_add_vg(code = do.call(gridExtra::grid.arrange, model$externalCalPlots))
}
doc %>% officer::body_add_break()
}
if(includeModels){
# move the cursor at the end of the document
doc %>%
officer::cursor_reach(keyword = "<< add models here >>") %>% officer::cursor_forward() %>%
officer::body_add_par("Developed Models", style = heading2)
for(model in modelsExtraction){
if(!is.null(model$modelTable)){
doc %>% officer::body_add_par(paste('Analysis',model$analysisId), style = heading3) %>%
officer::body_add_table(model$modelTable, style = tableStyle) %>%
officer::body_add_break()
}
}
}
# print the document to the doc directory:
if(!dir.exists(file.path(analysisLocation,'doc'))){
dir.create(file.path(analysisLocation,'doc'), recursive = T)
}
print(doc, target = file.path(analysisLocation,'doc','plpMultiReport.docx'))
return(TRUE)
}
getModelInfo <- function(analysisLocation){
settings <- utils::read.csv(file.path(analysisLocation, "settings.csv"))
modelSettings <- lapply((1:nrow(settings))[order(settings$analysisId)], function(i) {getModelFromSettings(analysisLocation,settings[i,])})
return(modelSettings)
}
getModelFromSettings <- function(analysisLocation,x){
result <- list(analysisId = x$analysisId, T = x$cohortName,
D = x$devDatabase, O = x$outcomeName,
tar = paste0(x$riskWindowStart, ' days after ',
ifelse(x$addExposureDaysToStart==1, 'cohort end','cohort start'),
' to ', x$riskWindowEnd, ' days after ',
ifelse(x$addExposureDaysToEnd==1, 'cohort end','cohort start')),
model = x$modelSettingName)
if(!dir.exists(file.path(as.character(x$plpResultFolder),'plpResult'))){
return(NULL)
}
plpResult <- PatientLevelPrediction::loadPlpResult(file.path(as.character(x$plpResultFolder),'plpResult'))
modelTable <- plpResult$model$varImp
result$modelTable <- modelTable[modelTable$covariateValue!=0,]
if(!is.null(plpResult$performanceEvaluation)){
internalPerformance <- plpResult$performanceEvaluation$evaluationStatistics
internalPerformance <- as.data.frame(internalPerformance)
internalPerformance$Value <- format(as.double(as.character(internalPerformance$Value)), digits = 2, nsmall = 0, scientific = F)
class(internalPerformance$Value) <- 'double'
result$internalPerformance <- reshape2::dcast(internalPerformance, Metric ~ Eval, value.var = 'Value', fun.aggregate = mean)
result$internalPlots <- list(
PatientLevelPrediction::plotSparseRoc(plpResult$performanceEvaluation),
PatientLevelPrediction::plotPrecisionRecall(plpResult$performanceEvaluation),
PatientLevelPrediction::plotF1Measure(plpResult$performanceEvaluation),
PatientLevelPrediction::plotPredictionDistribution(plpResult$performanceEvaluation),
PatientLevelPrediction::plotSparseCalibration( plpResult$performanceEvaluation),
PatientLevelPrediction::plotSparseCalibration2( plpResult$performanceEvaluation),
PatientLevelPrediction::plotDemographicSummary( plpResult$performanceEvaluation),
PatientLevelPrediction::plotPreferencePDF(plpResult$performanceEvaluation),
PatientLevelPrediction::plotPredictedPDF(plpResult$performanceEvaluation)
)} else{
result$internalPlots <- NULL
}
result$scatterPlot <- PatientLevelPrediction::plotVariableScatterplot(plpResult$covariateSummary)
# get external results if they exist
externalPerformance <- c()
ind <- grep(paste0('Analysis_', x$analysisId,'/'),
dir(file.path(analysisLocation,'Validation'), recursive = T))
if(length(ind)>0){
vals <- dir(file.path(analysisLocation,'Validation'), recursive = T)[ind]
externalRocPlots <- list()
externalCalPlots <- list()
length(externalRocPlots) <- length(vals)
length(externalCalPlots) <- length(vals)
for(k in 1:length(vals)){
val <- vals[k]
nameDat <- strsplit(val, '\\/')[[1]][1]
val <- readRDS(file.path(analysisLocation,'Validation',val))
sum <- as.data.frame(val[[1]]$performanceEvaluation$evaluationStatistics)
sum$database <- nameDat
externalPerformance <- rbind(externalPerformance, sum)
externalCalPlots[[k]] <- PatientLevelPrediction::plotSparseCalibration2(val[[1]]$performanceEvaluation, type='validation') + ggplot2::labs(title=paste(nameDat))
externalRocPlots[[k]] <- PatientLevelPrediction::plotSparseRoc(val[[1]]$performanceEvaluation, type='validation')+ ggplot2::labs(title=paste(nameDat))
}
externalPerformance <- as.data.frame(externalPerformance)
externalPerformance$Value <- format(as.double(as.character(externalPerformance$Value)), digits = 2, nsmall = 0, scientific = F)
class(externalPerformance$Value) <- 'double'
result$externalPerformance <- reshape2::dcast(externalPerformance, Metric ~ database, value.var = 'Value', fun.aggregate = mean)
result$externalCalPlots <- externalCalPlots
result$externalRocPlots <- externalRocPlots
}
return(result)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stats-lm-tidiers.R
\name{lm_tidiers}
\alias{lm_tidiers}
\alias{tidy.lm}
\alias{tidy.summary.lm}
\alias{augment.lm}
\alias{glance.lm}
\alias{glance.summary.lm}
\title{Tidying methods for a linear model}
\usage{
\method{tidy}{lm}(x, conf.int = FALSE, conf.level = 0.95,
exponentiate = FALSE, quick = FALSE, ...)
\method{tidy}{summary.lm}(x, ...)
\method{augment}{lm}(x, data = stats::model.frame(x), newdata, type.predict,
type.residuals, ...)
\method{glance}{lm}(x, ...)
\method{glance}{summary.lm}(x, ...)
}
\arguments{
\item{x}{lm object}
\item{conf.int}{whether to include a confidence interval}
\item{conf.level}{confidence level of the interval, used only if
\code{conf.int=TRUE}}
\item{exponentiate}{whether to exponentiate the coefficient estimates
and confidence intervals (typical for logistic regression)}
\item{quick}{whether to compute a smaller and faster version, containing
only the \code{term} and \code{estimate} columns.}
\item{...}{extra arguments (not used)}
\item{data}{Original data, defaults to the extracting it from the model}
\item{newdata}{If provided, performs predictions on the new data}
\item{type.predict}{Type of prediction to compute for a GLM; passed on to
\code{\link[=predict.glm]{predict.glm()}}}
\item{type.residuals}{Type of residuals to compute for a GLM; passed on to
\code{\link[=residuals.glm]{residuals.glm()}}}
}
\value{
All tidying methods return a \code{data.frame} without rownames.
The structure depends on the method chosen.
\code{tidy.lm} returns one row for each coefficient, with five columns:
\item{term}{The term in the linear model being estimated and tested}
\item{estimate}{The estimated coefficient}
\item{std.error}{The standard error from the linear model}
\item{statistic}{t-statistic}
\item{p.value}{two-sided p-value}
If the linear model is an "mlm" object (multiple linear model), there is an
additional column:
\item{response}{Which response column the coefficients correspond to
(typically Y1, Y2, etc)}
If \code{conf.int=TRUE}, it also includes columns for \code{conf.low} and
\code{conf.high}, computed with \code{\link[=confint]{confint()}}.
When \code{newdata} is not supplied \code{augment.lm} returns
one row for each observation, with seven columns added to the original
data:
\item{.hat}{Diagonal of the hat matrix}
\item{.sigma}{Estimate of residual standard deviation when
corresponding observation is dropped from model}
\item{.cooksd}{Cooks distance, \code{\link[=cooks.distance]{cooks.distance()}}}
\item{.fitted}{Fitted values of model}
\item{.se.fit}{Standard errors of fitted values}
\item{.resid}{Residuals}
\item{.std.resid}{Standardised residuals}
(Some unusual "lm" objects, such as "rlm" from MASS, may omit
\code{.cooksd} and \code{.std.resid}. "gam" from mgcv omits
\code{.sigma})
When \code{newdata} is supplied, \code{augment.lm} returns one row for each
observation, with three columns added to the new data:
\item{.fitted}{Fitted values of model}
\item{.se.fit}{Standard errors of fitted values}
\item{.resid}{Residuals of fitted values on the new data}
\code{glance.lm} returns a one-row data.frame with the columns
\item{r.squared}{The percent of variance explained by the model}
\item{adj.r.squared}{r.squared adjusted based on the degrees of freedom}
\item{sigma}{The square root of the estimated residual variance}
\item{statistic}{F-statistic}
\item{p.value}{p-value from the F test, describing whether the full
regression is significant}
\item{df}{Degrees of freedom used by the coefficients}
\item{logLik}{the data's log-likelihood under the model}
\item{AIC}{the Akaike Information Criterion}
\item{BIC}{the Bayesian Information Criterion}
\item{deviance}{deviance}
\item{df.residual}{residual degrees of freedom}
}
\description{
These methods tidy the coefficients of a linear model into a summary,
augment the original data with information on the fitted values and
residuals, and construct a one-row glance of the model's statistics.
}
\details{
If you have missing values in your model data, you may need to refit
the model with \code{na.action = na.exclude}.
If \code{conf.int=TRUE}, the confidence interval is computed with
the \code{\link[=confint]{confint()}} function.
While \code{tidy} is supported for "mlm" objects, \code{augment} and
\code{glance} are not.
When the modeling was performed with \code{na.action = "na.omit"}
(as is the typical default), rows with NA in the initial data are omitted
entirely from the augmented data frame. When the modeling was performed
with \code{na.action = "na.exclude"}, one should provide the original data
as a second argument, at which point the augmented data will contain those
rows (typically with NAs in place of the new columns). If the original data
is not provided to \code{augment} and \code{na.action = "na.exclude"}, a
warning is raised and the incomplete rows are dropped.
Code and documentation for \code{augment.lm} originated in the
ggplot2 package, where it was called \code{fortify.lm}
}
\examples{
library(ggplot2)
library(dplyr)
mod <- lm(mpg ~ wt + qsec, data = mtcars)
tidy(mod)
glance(mod)
# coefficient plot
d <- tidy(mod) \%>\% mutate(low = estimate - std.error,
high = estimate + std.error)
ggplot(d, aes(estimate, term, xmin = low, xmax = high, height = 0)) +
geom_point() +
geom_vline(xintercept = 0) +
geom_errorbarh()
head(augment(mod))
head(augment(mod, mtcars))
# predict on new data
newdata <- mtcars \%>\% head(6) \%>\% mutate(wt = wt + 1)
augment(mod, newdata = newdata)
au <- augment(mod, data = mtcars)
plot(mod, which = 1)
qplot(.fitted, .resid, data = au) +
geom_hline(yintercept = 0) +
geom_smooth(se = FALSE)
qplot(.fitted, .std.resid, data = au) +
geom_hline(yintercept = 0) +
geom_smooth(se = FALSE)
qplot(.fitted, .std.resid, data = au,
colour = factor(cyl))
qplot(mpg, .std.resid, data = au, colour = factor(cyl))
plot(mod, which = 2)
qplot(sample =.std.resid, data = au, stat = "qq") +
geom_abline()
plot(mod, which = 3)
qplot(.fitted, sqrt(abs(.std.resid)), data = au) + geom_smooth(se = FALSE)
plot(mod, which = 4)
qplot(seq_along(.cooksd), .cooksd, data = au)
plot(mod, which = 5)
qplot(.hat, .std.resid, data = au) + geom_smooth(se = FALSE)
ggplot(au, aes(.hat, .std.resid)) +
geom_vline(size = 2, colour = "white", xintercept = 0) +
geom_hline(size = 2, colour = "white", yintercept = 0) +
geom_point() + geom_smooth(se = FALSE)
qplot(.hat, .std.resid, data = au, size = .cooksd) +
geom_smooth(se = FALSE, size = 0.5)
plot(mod, which = 6)
ggplot(au, aes(.hat, .cooksd)) +
geom_vline(xintercept = 0, colour = NA) +
geom_abline(slope = seq(0, 3, by = 0.5), colour = "white") +
geom_smooth(se = FALSE) +
geom_point()
qplot(.hat, .cooksd, size = .cooksd / .hat, data = au) + scale_size_area()
# column-wise models
a <- matrix(rnorm(20), nrow = 10)
b <- a + rnorm(length(a))
result <- lm(b ~ a)
tidy(result)
}
\seealso{
\code{\link[=summary.lm]{summary.lm()}}
\link{na.action}
}
|
/man/lm_tidiers.Rd
|
no_license
|
Wandrys-dev/broom
|
R
| false
| true
| 7,071
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stats-lm-tidiers.R
\name{lm_tidiers}
\alias{lm_tidiers}
\alias{tidy.lm}
\alias{tidy.summary.lm}
\alias{augment.lm}
\alias{glance.lm}
\alias{glance.summary.lm}
\title{Tidying methods for a linear model}
\usage{
\method{tidy}{lm}(x, conf.int = FALSE, conf.level = 0.95,
exponentiate = FALSE, quick = FALSE, ...)
\method{tidy}{summary.lm}(x, ...)
\method{augment}{lm}(x, data = stats::model.frame(x), newdata, type.predict,
type.residuals, ...)
\method{glance}{lm}(x, ...)
\method{glance}{summary.lm}(x, ...)
}
\arguments{
\item{x}{lm object}
\item{conf.int}{whether to include a confidence interval}
\item{conf.level}{confidence level of the interval, used only if
\code{conf.int=TRUE}}
\item{exponentiate}{whether to exponentiate the coefficient estimates
and confidence intervals (typical for logistic regression)}
\item{quick}{whether to compute a smaller and faster version, containing
only the \code{term} and \code{estimate} columns.}
\item{...}{extra arguments (not used)}
\item{data}{Original data, defaults to the extracting it from the model}
\item{newdata}{If provided, performs predictions on the new data}
\item{type.predict}{Type of prediction to compute for a GLM; passed on to
\code{\link[=predict.glm]{predict.glm()}}}
\item{type.residuals}{Type of residuals to compute for a GLM; passed on to
\code{\link[=residuals.glm]{residuals.glm()}}}
}
\value{
All tidying methods return a \code{data.frame} without rownames.
The structure depends on the method chosen.
\code{tidy.lm} returns one row for each coefficient, with five columns:
\item{term}{The term in the linear model being estimated and tested}
\item{estimate}{The estimated coefficient}
\item{std.error}{The standard error from the linear model}
\item{statistic}{t-statistic}
\item{p.value}{two-sided p-value}
If the linear model is an "mlm" object (multiple linear model), there is an
additional column:
\item{response}{Which response column the coefficients correspond to
(typically Y1, Y2, etc)}
If \code{conf.int=TRUE}, it also includes columns for \code{conf.low} and
\code{conf.high}, computed with \code{\link[=confint]{confint()}}.
When \code{newdata} is not supplied \code{augment.lm} returns
one row for each observation, with seven columns added to the original
data:
\item{.hat}{Diagonal of the hat matrix}
\item{.sigma}{Estimate of residual standard deviation when
corresponding observation is dropped from model}
\item{.cooksd}{Cooks distance, \code{\link[=cooks.distance]{cooks.distance()}}}
\item{.fitted}{Fitted values of model}
\item{.se.fit}{Standard errors of fitted values}
\item{.resid}{Residuals}
\item{.std.resid}{Standardised residuals}
(Some unusual "lm" objects, such as "rlm" from MASS, may omit
\code{.cooksd} and \code{.std.resid}. "gam" from mgcv omits
\code{.sigma})
When \code{newdata} is supplied, \code{augment.lm} returns one row for each
observation, with three columns added to the new data:
\item{.fitted}{Fitted values of model}
\item{.se.fit}{Standard errors of fitted values}
\item{.resid}{Residuals of fitted values on the new data}
\code{glance.lm} returns a one-row data.frame with the columns
\item{r.squared}{The percent of variance explained by the model}
\item{adj.r.squared}{r.squared adjusted based on the degrees of freedom}
\item{sigma}{The square root of the estimated residual variance}
\item{statistic}{F-statistic}
\item{p.value}{p-value from the F test, describing whether the full
regression is significant}
\item{df}{Degrees of freedom used by the coefficients}
\item{logLik}{the data's log-likelihood under the model}
\item{AIC}{the Akaike Information Criterion}
\item{BIC}{the Bayesian Information Criterion}
\item{deviance}{deviance}
\item{df.residual}{residual degrees of freedom}
}
\description{
These methods tidy the coefficients of a linear model into a summary,
augment the original data with information on the fitted values and
residuals, and construct a one-row glance of the model's statistics.
}
\details{
If you have missing values in your model data, you may need to refit
the model with \code{na.action = na.exclude}.
If \code{conf.int=TRUE}, the confidence interval is computed with
the \code{\link[=confint]{confint()}} function.
While \code{tidy} is supported for "mlm" objects, \code{augment} and
\code{glance} are not.
When the modeling was performed with \code{na.action = "na.omit"}
(as is the typical default), rows with NA in the initial data are omitted
entirely from the augmented data frame. When the modeling was performed
with \code{na.action = "na.exclude"}, one should provide the original data
as a second argument, at which point the augmented data will contain those
rows (typically with NAs in place of the new columns). If the original data
is not provided to \code{augment} and \code{na.action = "na.exclude"}, a
warning is raised and the incomplete rows are dropped.
Code and documentation for \code{augment.lm} originated in the
ggplot2 package, where it was called \code{fortify.lm}
}
\examples{
library(ggplot2)
library(dplyr)
mod <- lm(mpg ~ wt + qsec, data = mtcars)
tidy(mod)
glance(mod)
# coefficient plot
d <- tidy(mod) \%>\% mutate(low = estimate - std.error,
high = estimate + std.error)
ggplot(d, aes(estimate, term, xmin = low, xmax = high, height = 0)) +
geom_point() +
geom_vline(xintercept = 0) +
geom_errorbarh()
head(augment(mod))
head(augment(mod, mtcars))
# predict on new data
newdata <- mtcars \%>\% head(6) \%>\% mutate(wt = wt + 1)
augment(mod, newdata = newdata)
au <- augment(mod, data = mtcars)
plot(mod, which = 1)
qplot(.fitted, .resid, data = au) +
geom_hline(yintercept = 0) +
geom_smooth(se = FALSE)
qplot(.fitted, .std.resid, data = au) +
geom_hline(yintercept = 0) +
geom_smooth(se = FALSE)
qplot(.fitted, .std.resid, data = au,
colour = factor(cyl))
qplot(mpg, .std.resid, data = au, colour = factor(cyl))
plot(mod, which = 2)
qplot(sample =.std.resid, data = au, stat = "qq") +
geom_abline()
plot(mod, which = 3)
qplot(.fitted, sqrt(abs(.std.resid)), data = au) + geom_smooth(se = FALSE)
plot(mod, which = 4)
qplot(seq_along(.cooksd), .cooksd, data = au)
plot(mod, which = 5)
qplot(.hat, .std.resid, data = au) + geom_smooth(se = FALSE)
ggplot(au, aes(.hat, .std.resid)) +
geom_vline(size = 2, colour = "white", xintercept = 0) +
geom_hline(size = 2, colour = "white", yintercept = 0) +
geom_point() + geom_smooth(se = FALSE)
qplot(.hat, .std.resid, data = au, size = .cooksd) +
geom_smooth(se = FALSE, size = 0.5)
plot(mod, which = 6)
ggplot(au, aes(.hat, .cooksd)) +
geom_vline(xintercept = 0, colour = NA) +
geom_abline(slope = seq(0, 3, by = 0.5), colour = "white") +
geom_smooth(se = FALSE) +
geom_point()
qplot(.hat, .cooksd, size = .cooksd / .hat, data = au) + scale_size_area()
# column-wise models
a <- matrix(rnorm(20), nrow = 10)
b <- a + rnorm(length(a))
result <- lm(b ~ a)
tidy(result)
}
\seealso{
\code{\link[=summary.lm]{summary.lm()}}
\link{na.action}
}
|
## ----setup, include=FALSE------------------------------------------------
knitr::opts_chunk$set(
cache = FALSE,
cache.lazy = FALSE,
tidy = TRUE
)
## ----Libraries, echo=TRUE, message=FALSE, warning=FALSE------------------
library(tidyverse)
library(ggplot2)
library(Matrix)
library(Rmisc)
library(ggforce)
library(rjson)
library(cowplot)
library(RColorBrewer)
library(grid)
library(readbitmap)
library(Seurat)
## ------------------------------------------------------------------------
geom_spatial <- function(mapping = NULL,
data = NULL,
stat = "identity",
position = "identity",
na.rm = FALSE,
show.legend = NA,
inherit.aes = FALSE,
...) {
GeomCustom <- ggproto(
"GeomCustom",
Geom,
setup_data = function(self, data, params) {
data <- ggproto_parent(Geom, self)$setup_data(data, params)
data
},
draw_group = function(data, panel_scales, coord) {
vp <- grid::viewport(x=data$x, y=data$y)
g <- grid::editGrob(data$grob[[1]], vp=vp)
ggplot2:::ggname("geom_spatial", g)
},
required_aes = c("grob","x","y")
)
layer(
geom = GeomCustom,
mapping = mapping,
data = data,
stat = stat,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(na.rm = na.rm, ...)
)
}
## ----eval=FALSE, include=TRUE--------------------------------------------
## sample_names <- read.delim("lenas.txt", as.is=TRUE, header=FALSE)$V1
## sample_names
## ----eval=FALSE, include=TRUE--------------------------------------------
path = "/dcl02/lieber/ajaffe/SpatialTranscriptomics/HumanPilot/10X/"
## output
image_paths <- paste0(path, sample_names, "/tissue_lowres_image.png")
scalefactor_paths <- paste0(path, sample_names, "/scalefactors_json.json")
tissue_paths <- paste0(path, sample_names, "/tissue_positions_list.txt")
cluster_paths <- paste0(path, sample_names, "/", sample_names, "_analysis__clustering_graphclust_clusters.csv")
matrix_paths <- paste0(path, sample_names, "/", sample_names, "_filtered_feature_bc_matrix.h5")
all(file.exists(c(image_paths, scalefactor_paths, tissue_paths, cluster_paths, matrix_paths)))
# TRUE
## ------------------------------------------------------------------------
images_cl <- lapply(image_paths, read.bitmap)
dims = t(sapply(images_cl, dim))
colnames(dims) = c("height", "width", "channel")
dims = as.data.frame(dims)
## ------------------------------------------------------------------------
grobs <- lapply(images_cl, rasterGrob, width=unit(1,"npc"), height=unit(1,"npc"))
images_tibble <- tibble(sample=sample_names, grob=grobs)
images_tibble$height = dims$height
images_tibble$width = dims$width
images_tibble
## ------------------------------------------------------------------------
scales <- lapply(scalefactor_paths, function(x) fromJSON(file=x))
## ------------------------------------------------------------------------
clusters = lapply(cluster_paths, read.csv)
head(clusters[[1]])
## ------------------------------------------------------------------------
bcs <- list()
for (i in 1:length(sample_names)) {
bcs[[i]] <- read.csv(tissue_paths[i],col.names=c("barcode","tissue","row","col","imagerow","imagecol"), header = FALSE)
bcs[[i]]$imagerow <- bcs[[i]]$imagerow * scales[[i]]$tissue_lowres_scalef # scale tissue coordinates for lowres image
bcs[[i]]$imagecol <- bcs[[i]]$imagecol * scales[[i]]$tissue_lowres_scalef
bcs[[i]]$tissue <- as.factor(bcs[[i]]$tissue)
bcs[[i]] <- merge(bcs[[i]], clusters[[i]], by.x = "barcode", by.y = "Barcode", all = TRUE)
bcs[[i]]$height <- images_tibble$height[i]
bcs[[i]]$width <- images_tibble$width[i]
}
names(bcs) <- sample_names
head(bcs[[1]])
## ------------------------------------------------------------------------
matrix <- lapply(matrix_paths, Read10X_h5)
matrix = lapply(matrix, function(x) as.data.frame(t(x)))
head(matrix[[1]])
## ----message=FALSE, warning=FALSE----------------------------------------
umi_sum <- list()
for (i in 1:length(sample_names)) {
umi_sum[[i]] <- data.frame(barcode = row.names(matrix[[i]]),
sum_umi = Matrix::rowSums(matrix[[i]]))
}
names(umi_sum) <- sample_names
umi_sum <- bind_rows(umi_sum, .id = "sample")
head(umi_sum)
## ----message=FALSE, warning=FALSE----------------------------------------
gene_sum <- list()
for (i in 1:length(sample_names)) {
gene_sum[[i]] <- data.frame(barcode = row.names(matrix[[i]]),
sum_gene = Matrix::rowSums(matrix[[i]] != 0))
}
names(gene_sum) <- sample_names
gene_sum <- bind_rows(gene_sum, .id = "sample")
head(gene_sum)
## ------------------------------------------------------------------------
bcs_merge <- bind_rows(bcs, .id = "sample")
bcs_merge <- merge(bcs_merge,umi_sum, by = c("barcode", "sample"))
bcs_merge <- merge(bcs_merge,gene_sum, by = c("barcode", "sample"))
head(bcs_merge)
## ------------------------------------------------------------------------
myPalette <- colorRampPalette(rev(brewer.pal(11, "Spectral")))
## ---- fig.width = 16, fig.height = 8-------------------------------------
plots <- list()
for (i in 1:length(sample_names)) {
plots[[i]] <- bcs_merge %>%
filter(sample ==sample_names[i]) %>%
ggplot(aes(x=imagecol,y=imagerow,fill=sum_umi)) +
geom_spatial(data=images_tibble[i,], aes(grob=grob), x=0.5, y=0.5)+
geom_point(shape = 21, colour = "black", size = 1.75, stroke = 0.5)+
coord_cartesian(expand=FALSE)+
scale_fill_gradientn(colours = myPalette(100))+
xlim(0,max(bcs_merge %>%
filter(sample ==sample_names[i]) %>%
select(width)))+
ylim(max(bcs_merge %>%
filter(sample ==sample_names[i]) %>%
select(height)),0)+
xlab("") +
ylab("") +
ggtitle(sample_names[i])+
labs(fill = "Total UMI")+
theme_set(theme_bw(base_size = 10))+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"),
axis.text = element_blank(),
axis.ticks = element_blank())
}
pdf("example_umi.pdf",height=24, width=36)
print(plot_grid(plotlist = plots))
dev.off()
## ---- fig.width = 16, fig.height = 8-------------------------------------
plots <- list()
for (i in 1:length(sample_names)) {
plots[[i]] <- bcs_merge %>%
filter(sample ==sample_names[i]) %>%
ggplot(aes(x=imagecol,y=imagerow,fill=sum_gene)) +
geom_spatial(data=images_tibble[i,], aes(grob=grob), x=0.5, y=0.5)+
geom_point(shape = 21, colour = "black", size = 1.75, stroke = 0.5)+
coord_cartesian(expand=FALSE)+
scale_fill_gradientn(colours = myPalette(100))+
xlim(0,max(bcs_merge %>%
filter(sample ==sample_names[i]) %>%
select(width)))+
ylim(max(bcs_merge %>%
filter(sample ==sample_names[i]) %>%
select(height)),0)+
xlab("") +
ylab("") +
ggtitle(sample_names[i])+
labs(fill = "Total Genes")+
theme_set(theme_bw(base_size = 10))+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"),
axis.text = element_blank(),
axis.ticks = element_blank())
}
pdf("example_gene.pdf",height=24, width=36)
print(plot_grid(plotlist = plots))
dev.off()
## ---- fig.width = 16, fig.height = 8-------------------------------------
plots <- list()
for (i in 1:length(sample_names)) {
plots[[i]] <- bcs_merge %>%
filter(sample ==sample_names[i]) %>%
filter(tissue == "1") %>%
ggplot(aes(x=imagecol,y=imagerow,fill=factor(Cluster))) +
geom_spatial(data=images_tibble[i,], aes(grob=grob), x=0.5, y=0.5)+
geom_point(shape = 21, colour = "black", size = 1.75, stroke = 0.5)+
coord_cartesian(expand=FALSE)+
scale_fill_manual(values = c("#b2df8a","#e41a1c","#377eb8","#4daf4a","#ff7f00","gold", "#a65628", "#999999", "black", "grey", "white", "purple"))+
xlim(0,max(bcs_merge %>%
filter(sample ==sample_names[i]) %>%
select(width)))+
ylim(max(bcs_merge %>%
filter(sample ==sample_names[i]) %>%
select(height)),0)+
xlab("") +
ylab("") +
ggtitle(sample_names[i])+
labs(fill = "Cluster")+
guides(fill = guide_legend(override.aes = list(size=3)))+
theme_set(theme_bw(base_size = 10))+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"),
axis.text = element_blank(),
axis.ticks = element_blank())
}
pdf("example_cluster.pdf",height=24, width=36)
print(plot_grid(plotlist = plots))
dev.off()
## ---- fig.width = 16, fig.height = 8-------------------------------------
geneTab = read.csv("../mouse_layer_marker_info_cleaned.csv",as.is=TRUE)
library(biomaRt)
ensembl = useMart("ENSEMBL_MART_ENSEMBL",
dataset="hsapiens_gene_ensembl", host="feb2014.archive.ensembl.org")
sym = getBM(attributes = c("ensembl_gene_id","hgnc_symbol","entrezgene"),
mart=ensembl)
geneTab$hgnc_symbol = sym$hgnc_symbol[match(geneTab$HumanEnsID, sym$ensembl_gene_id)]
geneTab$hgnc_symbol[geneTab$HumanEnsID == "ENSG00000275700"] = "AATF"
symbol = c("BDNF", "MBP", "MOBP", "GFAP", "MOG", "SNAP25", "GAD2", "CAMK2A",
"AQP4", "CD74", "FOXP2", "PDGFRA", "DLG4", geneTab$hgnc_symbol)
dir.create("pdfs")
for(j in seq(along=symbol)) {
g = symbol[j]
ge = enquo(g)
plots <- list()
for (i in 1:length(sample_names)) {
plots[[i]] <- bcs_merge %>%
filter(sample ==sample_names[i]) %>%
bind_cols(select(matrix[[i]], g)) %>%
ggplot(aes(x=imagecol,y=imagerow,fill=g)) +
geom_spatial(data=images_tibble[i,], aes(grob=grob), x=0.5, y=0.5)+
geom_point(shape = 21, colour = "black", size = 1.75, stroke = 0.5)+
coord_cartesian(expand=FALSE)+
scale_fill_gradientn(colours = myPalette(100))+
xlim(0,max(bcs_merge %>%
filter(sample ==sample_names[i]) %>%
select(width)))+
ylim(max(bcs_merge %>%
filter(sample ==sample_names[i]) %>%
select(height)),0)+
xlab("") +
ylab("") +
ggtitle(paste(sample_names[i], g))+
theme_set(theme_bw(base_size = 10))+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"),
axis.text = element_blank(),
axis.ticks = element_blank())
}
pdf(paste0("pdfs/", g, ".pdf"),height=24, width=36)
print(plot_grid(plotlist = plots))
dev.off()
}
## FEZF2
plots <- list()
for (i in 1:length(sample_names)) {
plots[[i]] <- bcs_merge %>%
filter(sample ==sample_names[i]) %>%
bind_cols(select(matrix[[i]], "FEZF2")) %>%
ggplot(aes(x=imagecol,y=imagerow,fill=FEZF2)) +
geom_spatial(data=images_tibble[i,], aes(grob=grob), x=0.5, y=0.5)+
geom_point(shape = 21, colour = "black", size = 1.75, stroke = 0.5)+
coord_cartesian(expand=FALSE)+
scale_fill_gradientn(colours = myPalette(100))+
xlim(0,max(bcs_merge %>%
filter(sample ==sample_names[i]) %>%
select(width)))+
ylim(max(bcs_merge %>%
filter(sample ==sample_names[i]) %>%
select(height)),0)+
xlab("") +
ylab("") +
ggtitle(sample_names[i])+
theme_set(theme_bw(base_size = 10))+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"),
axis.text = element_blank(),
axis.ticks = element_blank())
}
pdf("example_FEZF2.pdf",height=24, width=36)
print(plot_grid(plotlist = plots))
dev.off()
## ---- out.width = "200px", echo=FALSE------------------------------------
knitr::include_graphics("~/public_html/Odin/Beta/example_notebook/hpca.jpg")
|
/Analysis/test_dplyr.R
|
no_license
|
ghadaabdelhady/HumanPilot
|
R
| false
| false
| 13,412
|
r
|
## ----setup, include=FALSE------------------------------------------------
knitr::opts_chunk$set(
cache = FALSE,
cache.lazy = FALSE,
tidy = TRUE
)
## ----Libraries, echo=TRUE, message=FALSE, warning=FALSE------------------
library(tidyverse)
library(ggplot2)
library(Matrix)
library(Rmisc)
library(ggforce)
library(rjson)
library(cowplot)
library(RColorBrewer)
library(grid)
library(readbitmap)
library(Seurat)
## ------------------------------------------------------------------------
geom_spatial <- function(mapping = NULL,
data = NULL,
stat = "identity",
position = "identity",
na.rm = FALSE,
show.legend = NA,
inherit.aes = FALSE,
...) {
GeomCustom <- ggproto(
"GeomCustom",
Geom,
setup_data = function(self, data, params) {
data <- ggproto_parent(Geom, self)$setup_data(data, params)
data
},
draw_group = function(data, panel_scales, coord) {
vp <- grid::viewport(x=data$x, y=data$y)
g <- grid::editGrob(data$grob[[1]], vp=vp)
ggplot2:::ggname("geom_spatial", g)
},
required_aes = c("grob","x","y")
)
layer(
geom = GeomCustom,
mapping = mapping,
data = data,
stat = stat,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(na.rm = na.rm, ...)
)
}
## ----eval=FALSE, include=TRUE--------------------------------------------
## sample_names <- read.delim("lenas.txt", as.is=TRUE, header=FALSE)$V1
## sample_names
## ----eval=FALSE, include=TRUE--------------------------------------------
path = "/dcl02/lieber/ajaffe/SpatialTranscriptomics/HumanPilot/10X/"
## output
image_paths <- paste0(path, sample_names, "/tissue_lowres_image.png")
scalefactor_paths <- paste0(path, sample_names, "/scalefactors_json.json")
tissue_paths <- paste0(path, sample_names, "/tissue_positions_list.txt")
cluster_paths <- paste0(path, sample_names, "/", sample_names, "_analysis__clustering_graphclust_clusters.csv")
matrix_paths <- paste0(path, sample_names, "/", sample_names, "_filtered_feature_bc_matrix.h5")
all(file.exists(c(image_paths, scalefactor_paths, tissue_paths, cluster_paths, matrix_paths)))
# TRUE
## ------------------------------------------------------------------------
images_cl <- lapply(image_paths, read.bitmap)
dims = t(sapply(images_cl, dim))
colnames(dims) = c("height", "width", "channel")
dims = as.data.frame(dims)
## ------------------------------------------------------------------------
grobs <- lapply(images_cl, rasterGrob, width=unit(1,"npc"), height=unit(1,"npc"))
images_tibble <- tibble(sample=sample_names, grob=grobs)
images_tibble$height = dims$height
images_tibble$width = dims$width
images_tibble
## ------------------------------------------------------------------------
scales <- lapply(scalefactor_paths, function(x) fromJSON(file=x))
## ------------------------------------------------------------------------
clusters = lapply(cluster_paths, read.csv)
head(clusters[[1]])
## ------------------------------------------------------------------------
bcs <- list()
for (i in 1:length(sample_names)) {
bcs[[i]] <- read.csv(tissue_paths[i],col.names=c("barcode","tissue","row","col","imagerow","imagecol"), header = FALSE)
bcs[[i]]$imagerow <- bcs[[i]]$imagerow * scales[[i]]$tissue_lowres_scalef # scale tissue coordinates for lowres image
bcs[[i]]$imagecol <- bcs[[i]]$imagecol * scales[[i]]$tissue_lowres_scalef
bcs[[i]]$tissue <- as.factor(bcs[[i]]$tissue)
bcs[[i]] <- merge(bcs[[i]], clusters[[i]], by.x = "barcode", by.y = "Barcode", all = TRUE)
bcs[[i]]$height <- images_tibble$height[i]
bcs[[i]]$width <- images_tibble$width[i]
}
names(bcs) <- sample_names
head(bcs[[1]])
## ------------------------------------------------------------------------
matrix <- lapply(matrix_paths, Read10X_h5)
matrix = lapply(matrix, function(x) as.data.frame(t(x)))
head(matrix[[1]])
## ----message=FALSE, warning=FALSE----------------------------------------
umi_sum <- list()
for (i in 1:length(sample_names)) {
umi_sum[[i]] <- data.frame(barcode = row.names(matrix[[i]]),
sum_umi = Matrix::rowSums(matrix[[i]]))
}
names(umi_sum) <- sample_names
umi_sum <- bind_rows(umi_sum, .id = "sample")
head(umi_sum)
## ----message=FALSE, warning=FALSE----------------------------------------
gene_sum <- list()
for (i in 1:length(sample_names)) {
gene_sum[[i]] <- data.frame(barcode = row.names(matrix[[i]]),
sum_gene = Matrix::rowSums(matrix[[i]] != 0))
}
names(gene_sum) <- sample_names
gene_sum <- bind_rows(gene_sum, .id = "sample")
head(gene_sum)
## ------------------------------------------------------------------------
bcs_merge <- bind_rows(bcs, .id = "sample")
bcs_merge <- merge(bcs_merge,umi_sum, by = c("barcode", "sample"))
bcs_merge <- merge(bcs_merge,gene_sum, by = c("barcode", "sample"))
head(bcs_merge)
## ------------------------------------------------------------------------
myPalette <- colorRampPalette(rev(brewer.pal(11, "Spectral")))
## ---- fig.width = 16, fig.height = 8-------------------------------------
plots <- list()
for (i in 1:length(sample_names)) {
plots[[i]] <- bcs_merge %>%
filter(sample ==sample_names[i]) %>%
ggplot(aes(x=imagecol,y=imagerow,fill=sum_umi)) +
geom_spatial(data=images_tibble[i,], aes(grob=grob), x=0.5, y=0.5)+
geom_point(shape = 21, colour = "black", size = 1.75, stroke = 0.5)+
coord_cartesian(expand=FALSE)+
scale_fill_gradientn(colours = myPalette(100))+
xlim(0,max(bcs_merge %>%
filter(sample ==sample_names[i]) %>%
select(width)))+
ylim(max(bcs_merge %>%
filter(sample ==sample_names[i]) %>%
select(height)),0)+
xlab("") +
ylab("") +
ggtitle(sample_names[i])+
labs(fill = "Total UMI")+
theme_set(theme_bw(base_size = 10))+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"),
axis.text = element_blank(),
axis.ticks = element_blank())
}
pdf("example_umi.pdf",height=24, width=36)
print(plot_grid(plotlist = plots))
dev.off()
## ---- fig.width = 16, fig.height = 8-------------------------------------
plots <- list()
for (i in 1:length(sample_names)) {
plots[[i]] <- bcs_merge %>%
filter(sample ==sample_names[i]) %>%
ggplot(aes(x=imagecol,y=imagerow,fill=sum_gene)) +
geom_spatial(data=images_tibble[i,], aes(grob=grob), x=0.5, y=0.5)+
geom_point(shape = 21, colour = "black", size = 1.75, stroke = 0.5)+
coord_cartesian(expand=FALSE)+
scale_fill_gradientn(colours = myPalette(100))+
xlim(0,max(bcs_merge %>%
filter(sample ==sample_names[i]) %>%
select(width)))+
ylim(max(bcs_merge %>%
filter(sample ==sample_names[i]) %>%
select(height)),0)+
xlab("") +
ylab("") +
ggtitle(sample_names[i])+
labs(fill = "Total Genes")+
theme_set(theme_bw(base_size = 10))+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"),
axis.text = element_blank(),
axis.ticks = element_blank())
}
pdf("example_gene.pdf",height=24, width=36)
print(plot_grid(plotlist = plots))
dev.off()
## ---- fig.width = 16, fig.height = 8-------------------------------------
plots <- list()
for (i in 1:length(sample_names)) {
plots[[i]] <- bcs_merge %>%
filter(sample ==sample_names[i]) %>%
filter(tissue == "1") %>%
ggplot(aes(x=imagecol,y=imagerow,fill=factor(Cluster))) +
geom_spatial(data=images_tibble[i,], aes(grob=grob), x=0.5, y=0.5)+
geom_point(shape = 21, colour = "black", size = 1.75, stroke = 0.5)+
coord_cartesian(expand=FALSE)+
scale_fill_manual(values = c("#b2df8a","#e41a1c","#377eb8","#4daf4a","#ff7f00","gold", "#a65628", "#999999", "black", "grey", "white", "purple"))+
xlim(0,max(bcs_merge %>%
filter(sample ==sample_names[i]) %>%
select(width)))+
ylim(max(bcs_merge %>%
filter(sample ==sample_names[i]) %>%
select(height)),0)+
xlab("") +
ylab("") +
ggtitle(sample_names[i])+
labs(fill = "Cluster")+
guides(fill = guide_legend(override.aes = list(size=3)))+
theme_set(theme_bw(base_size = 10))+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"),
axis.text = element_blank(),
axis.ticks = element_blank())
}
pdf("example_cluster.pdf",height=24, width=36)
print(plot_grid(plotlist = plots))
dev.off()
## ---- fig.width = 16, fig.height = 8-------------------------------------
geneTab = read.csv("../mouse_layer_marker_info_cleaned.csv",as.is=TRUE)
library(biomaRt)
ensembl = useMart("ENSEMBL_MART_ENSEMBL",
dataset="hsapiens_gene_ensembl", host="feb2014.archive.ensembl.org")
sym = getBM(attributes = c("ensembl_gene_id","hgnc_symbol","entrezgene"),
mart=ensembl)
geneTab$hgnc_symbol = sym$hgnc_symbol[match(geneTab$HumanEnsID, sym$ensembl_gene_id)]
geneTab$hgnc_symbol[geneTab$HumanEnsID == "ENSG00000275700"] = "AATF"
symbol = c("BDNF", "MBP", "MOBP", "GFAP", "MOG", "SNAP25", "GAD2", "CAMK2A",
"AQP4", "CD74", "FOXP2", "PDGFRA", "DLG4", geneTab$hgnc_symbol)
dir.create("pdfs")
for(j in seq(along=symbol)) {
g = symbol[j]
ge = enquo(g)
plots <- list()
for (i in 1:length(sample_names)) {
plots[[i]] <- bcs_merge %>%
filter(sample ==sample_names[i]) %>%
bind_cols(select(matrix[[i]], g)) %>%
ggplot(aes(x=imagecol,y=imagerow,fill=g)) +
geom_spatial(data=images_tibble[i,], aes(grob=grob), x=0.5, y=0.5)+
geom_point(shape = 21, colour = "black", size = 1.75, stroke = 0.5)+
coord_cartesian(expand=FALSE)+
scale_fill_gradientn(colours = myPalette(100))+
xlim(0,max(bcs_merge %>%
filter(sample ==sample_names[i]) %>%
select(width)))+
ylim(max(bcs_merge %>%
filter(sample ==sample_names[i]) %>%
select(height)),0)+
xlab("") +
ylab("") +
ggtitle(paste(sample_names[i], g))+
theme_set(theme_bw(base_size = 10))+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"),
axis.text = element_blank(),
axis.ticks = element_blank())
}
pdf(paste0("pdfs/", g, ".pdf"),height=24, width=36)
print(plot_grid(plotlist = plots))
dev.off()
}
## FEZF2
plots <- list()
for (i in 1:length(sample_names)) {
plots[[i]] <- bcs_merge %>%
filter(sample ==sample_names[i]) %>%
bind_cols(select(matrix[[i]], "FEZF2")) %>%
ggplot(aes(x=imagecol,y=imagerow,fill=FEZF2)) +
geom_spatial(data=images_tibble[i,], aes(grob=grob), x=0.5, y=0.5)+
geom_point(shape = 21, colour = "black", size = 1.75, stroke = 0.5)+
coord_cartesian(expand=FALSE)+
scale_fill_gradientn(colours = myPalette(100))+
xlim(0,max(bcs_merge %>%
filter(sample ==sample_names[i]) %>%
select(width)))+
ylim(max(bcs_merge %>%
filter(sample ==sample_names[i]) %>%
select(height)),0)+
xlab("") +
ylab("") +
ggtitle(sample_names[i])+
theme_set(theme_bw(base_size = 10))+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"),
axis.text = element_blank(),
axis.ticks = element_blank())
}
pdf("example_FEZF2.pdf",height=24, width=36)
print(plot_grid(plotlist = plots))
dev.off()
## ---- out.width = "200px", echo=FALSE------------------------------------
knitr::include_graphics("~/public_html/Odin/Beta/example_notebook/hpca.jpg")
|
/projeto_04-AnaliseDeRiscoDeCredito/AnaliseRiscoCredito.R
|
no_license
|
3Thiago/DataScienceAcademy-FormacaoCientistaDeDados
|
R
| false
| false
| 3,407
|
r
| ||
library(shiny)
library(shinyjs)
library(tm)
library(dplyr)
library(ggplot2)
library(R.utils)
library(stringr)
setwd("~/RProg/DSSCapstone")
ngrams <- readRDS('ngrams.RDA')
stopwordsProfane <- "2g1c, 2 girls 1 cup, acrotomophilia, alabama hot pocket, alaskan pipeline, anal, anilingus, anus, apeshit, arsehole, ass, asshole, assmunch, auto erotic, autoerotic, babeland, baby batter, baby juice, ball gag, ball gravy, ball kicking, ball licking, ball sack, ball sucking, bangbros, bareback, barely legal, barenaked, bastardo, bastinado, bbw, bdsm, beaner, beaners, beaver cleaver, beaver lips, bestiality, big black, big breasts, big knockers, big tits, bimbos, birdlock, bitch, bitches, black cock, blonde action, blonde on blonde action, blowjob, blow job, blow your load, blue waffle, blumpkin, bollocks, bondage, boner, boob, boobs, booty call, brown showers, brunette action, bukkake, bulldyke, bullet vibe, bullshit, bung hole, bunghole, busty, butt, buttcheeks, butthole, camel toe, camgirl, camslut, camwhore, carpet muncher, carpetmuncher, chocolate rosebuds, circlejerk, cleveland steamer, clit, clitoris, clover clamps, clusterfuck, cock, cocks, coprolagnia, coprophilia, cornhole, coon, coons, creampie, cum, cumming, cunnilingus, cunt, darkie, date rape, daterape, deep throat, deepthroat, dendrophilia, dick, dildo, dirty pillows, dirty sanchez, doggie style, doggiestyle, doggy style, doggystyle, dog style, dolcett, domination, dominatrix, dommes, donkey punch, double dong, double penetration, dp action, dry hump, dvda, eat my ass, ecchi, ejaculation, erotic, erotism, escort, ethical slut, eunuch, faggot, fecal, felch, fellatio, feltch, female squirting, femdom, figging, fingerbang, fingering, fisting, foot fetish, footjob, frotting, fuck, fuck buttons, fucking, fudge packer, fudgepacker, futanari, gang bang, gay sex, genitals, giant cock, girl on, girl on top, girls gone wild, goatcx, goatse, god damn, gokkun, golden shower, goodpoop, goo girl, goregasm, grope, group sex, g-spot, guro, hand job, handjob, hard core, hardcore, hentai, homoerotic, honkey, hooker, hot carl, hot chick, how to kill, how to murder, huge fat, humping, incest, intercourse, jack off, jail bait, jailbait, jelly donut, jerk off, jigaboo, jiggaboo, jiggerboo, jizz, juggs, kike, kinbaku, kinkster, kinky, knobbing, leather restraint, leather straight jacket, lemon party, lolita, lovemaking, make me come, male squirting, masturbate, menage a trois, milf, missionary position, motherfucker, mound of venus, mr hands, muff diver, muffdiving, nambla, nawashi, negro, neonazi, nigga, nigger, nig nog, nimphomania, nipple, nipples, nsfw images, nude, nudity, nympho, nymphomania, octopussy, omorashi, one cup two girls, one guy one jar, orgasm, orgy, paedophile, paki, panties, panty, pedobear, pedophile, pegging, penis, phone sex, piece of shit, pissing, piss pig, pisspig, playboy, pleasure chest, pole smoker, ponyplay, poof, poon, poontang, punany, poop chute, poopchute, porn, porno, pornography, prince albert piercing, pthc, pubes, pussy, queaf, queef, quim, raghead, raging boner, rape, raping, rapist, rectum, reverse cowgirl, rimjob, rimming, rosy palm, rosy palm and her 5 sisters, rusty trombone, sadism, santorum, scat, schlong, scissoring, semen, sex, sexo, sexy, shaved beaver, shaved pussy, shemale, shibari, shit, shitty, shota, shrimping, skeet, slanteye, slut, s&m, smut, snatch, snowballing, sodomize, sodomy, spic, splooge, splooge moose, spooge, spread legs, spunk, strap on, strapon, strappado, strip club, style doggy, suck, sucks, suicide girls, sultry women, swastika, swinger, tainted love, taste my, tea bagging, threesome, throating, tied up, tight white, tit, tits, titties, titty, tongue in a, topless, tosser, towelhead, tranny, tribadism, tub girl, tubgirl, tushy, twat, twink, twinkie, two girls one cup, undressing, upskirt, urethra play, urophilia, vagina, venus mound, vibrator, violet wand, vorarephilia, voyeur, vulva, wank, wetback, wet dream, white power, wrapping men, wrinkled starfish, xx, xxx, yaoi, yellow showers, yiffy, zoophilia, 🖕"
predictNgram <- function(sentence) {
maxLength <- 4
processed <- sentence %>% removeNumbers %>%
removePunctuation() %>%
tolower() %>%
removeWords(stopwordsProfane) %>%
stripWhitespace() %>%
trim()
strlength <- str_count(processed,"\\w+")
if (strlength>maxLength) strlength <- maxLength
pattern <- paste(rep("\\w+\\s+",strlength-1),collapse = "",sep="")
pattern <- paste0(pattern,"\\w+$")
predictPhrase <- str_extract(processed,pattern)
phraseMatch <- ngrams[[strlength]] %>%
filter(predictor==predictPhrase) %>%
select(count,prediction) %>%
top_n(3, count) %>%
select(prediction)
if (nrow(phraseMatch)>0) {
result1 <- ifelse(!is.na(phraseMatch[[1]][1]), as.character(phraseMatch[[1]][1]), "the")
result2 <- ifelse(!is.na(phraseMatch[[1]][2]), as.character(phraseMatch[[1]][2]), "be")
result3 <- ifelse(!is.na(phraseMatch[[1]][3]), as.character(phraseMatch[[1]][3]), "to")
results <- c(result1,result2,result3)
}
else if (strlength>1){
pattern <- paste(replicate(strlength-2, "\\w+\\s+"), collapse = "")
pattern <- paste0(pattern,"\\w+$")
sentence <- str_extract(processed,pattern)
predictNgram(sentence)
}
else {
print('no prediction')
}
}
shinyServer(
function(input, output, session) {
nextWord <- reactiveValues(word=NULL)
setWordOptions <- reactive({
if (str_count(input$text,"\\w+")>0) predictNgram(input$text)
})
observeEvent(input$result1, {
nextWord$word <- setWordOptions()[1]
if (str_count(input$text,"\\w+")>0) predictNgram(input$text)
output$result1 <- renderText({setWordOptions()[1]})
})
observeEvent(input$result2, {
nextWord$word <- setWordOptions()[2]
if (str_count(input$text,"\\w+")>0) predictNgram(input$text)
output$result2 <- renderText({setWordOptions()[2]})
})
observeEvent(input$result3, {
nextWord$word <- setWordOptions()[3]
if (str_count(input$text,"\\w+")>0) predictNgram(input$text)
output$result3 <- renderText({setWordOptions()[3]})
})
output$event <- renderText({
if (is.null(nextWord$word)) return()
words <- paste(input$text,nextWord$word)
nextWord$word <- NULL
updateTextInput(session, "text",
value = words
)
words
})
output$result1 <- renderText({setWordOptions()[1]})
output$result2 <- renderText({setWordOptions()[2]})
output$result3 <- renderText({setWordOptions()[3]})
outputOptions(output,'event', suspendWhenHidden=FALSE)
}
)
|
/server.R
|
no_license
|
asadowns/DSSCapstone
|
R
| false
| false
| 6,675
|
r
|
library(shiny)
library(shinyjs)
library(tm)
library(dplyr)
library(ggplot2)
library(R.utils)
library(stringr)
setwd("~/RProg/DSSCapstone")
ngrams <- readRDS('ngrams.RDA')
stopwordsProfane <- "2g1c, 2 girls 1 cup, acrotomophilia, alabama hot pocket, alaskan pipeline, anal, anilingus, anus, apeshit, arsehole, ass, asshole, assmunch, auto erotic, autoerotic, babeland, baby batter, baby juice, ball gag, ball gravy, ball kicking, ball licking, ball sack, ball sucking, bangbros, bareback, barely legal, barenaked, bastardo, bastinado, bbw, bdsm, beaner, beaners, beaver cleaver, beaver lips, bestiality, big black, big breasts, big knockers, big tits, bimbos, birdlock, bitch, bitches, black cock, blonde action, blonde on blonde action, blowjob, blow job, blow your load, blue waffle, blumpkin, bollocks, bondage, boner, boob, boobs, booty call, brown showers, brunette action, bukkake, bulldyke, bullet vibe, bullshit, bung hole, bunghole, busty, butt, buttcheeks, butthole, camel toe, camgirl, camslut, camwhore, carpet muncher, carpetmuncher, chocolate rosebuds, circlejerk, cleveland steamer, clit, clitoris, clover clamps, clusterfuck, cock, cocks, coprolagnia, coprophilia, cornhole, coon, coons, creampie, cum, cumming, cunnilingus, cunt, darkie, date rape, daterape, deep throat, deepthroat, dendrophilia, dick, dildo, dirty pillows, dirty sanchez, doggie style, doggiestyle, doggy style, doggystyle, dog style, dolcett, domination, dominatrix, dommes, donkey punch, double dong, double penetration, dp action, dry hump, dvda, eat my ass, ecchi, ejaculation, erotic, erotism, escort, ethical slut, eunuch, faggot, fecal, felch, fellatio, feltch, female squirting, femdom, figging, fingerbang, fingering, fisting, foot fetish, footjob, frotting, fuck, fuck buttons, fucking, fudge packer, fudgepacker, futanari, gang bang, gay sex, genitals, giant cock, girl on, girl on top, girls gone wild, goatcx, goatse, god damn, gokkun, golden shower, goodpoop, goo girl, goregasm, grope, group sex, g-spot, guro, hand job, handjob, hard core, hardcore, hentai, homoerotic, honkey, hooker, hot carl, hot chick, how to kill, how to murder, huge fat, humping, incest, intercourse, jack off, jail bait, jailbait, jelly donut, jerk off, jigaboo, jiggaboo, jiggerboo, jizz, juggs, kike, kinbaku, kinkster, kinky, knobbing, leather restraint, leather straight jacket, lemon party, lolita, lovemaking, make me come, male squirting, masturbate, menage a trois, milf, missionary position, motherfucker, mound of venus, mr hands, muff diver, muffdiving, nambla, nawashi, negro, neonazi, nigga, nigger, nig nog, nimphomania, nipple, nipples, nsfw images, nude, nudity, nympho, nymphomania, octopussy, omorashi, one cup two girls, one guy one jar, orgasm, orgy, paedophile, paki, panties, panty, pedobear, pedophile, pegging, penis, phone sex, piece of shit, pissing, piss pig, pisspig, playboy, pleasure chest, pole smoker, ponyplay, poof, poon, poontang, punany, poop chute, poopchute, porn, porno, pornography, prince albert piercing, pthc, pubes, pussy, queaf, queef, quim, raghead, raging boner, rape, raping, rapist, rectum, reverse cowgirl, rimjob, rimming, rosy palm, rosy palm and her 5 sisters, rusty trombone, sadism, santorum, scat, schlong, scissoring, semen, sex, sexo, sexy, shaved beaver, shaved pussy, shemale, shibari, shit, shitty, shota, shrimping, skeet, slanteye, slut, s&m, smut, snatch, snowballing, sodomize, sodomy, spic, splooge, splooge moose, spooge, spread legs, spunk, strap on, strapon, strappado, strip club, style doggy, suck, sucks, suicide girls, sultry women, swastika, swinger, tainted love, taste my, tea bagging, threesome, throating, tied up, tight white, tit, tits, titties, titty, tongue in a, topless, tosser, towelhead, tranny, tribadism, tub girl, tubgirl, tushy, twat, twink, twinkie, two girls one cup, undressing, upskirt, urethra play, urophilia, vagina, venus mound, vibrator, violet wand, vorarephilia, voyeur, vulva, wank, wetback, wet dream, white power, wrapping men, wrinkled starfish, xx, xxx, yaoi, yellow showers, yiffy, zoophilia, 🖕"
predictNgram <- function(sentence) {
maxLength <- 4
processed <- sentence %>% removeNumbers %>%
removePunctuation() %>%
tolower() %>%
removeWords(stopwordsProfane) %>%
stripWhitespace() %>%
trim()
strlength <- str_count(processed,"\\w+")
if (strlength>maxLength) strlength <- maxLength
pattern <- paste(rep("\\w+\\s+",strlength-1),collapse = "",sep="")
pattern <- paste0(pattern,"\\w+$")
predictPhrase <- str_extract(processed,pattern)
phraseMatch <- ngrams[[strlength]] %>%
filter(predictor==predictPhrase) %>%
select(count,prediction) %>%
top_n(3, count) %>%
select(prediction)
if (nrow(phraseMatch)>0) {
result1 <- ifelse(!is.na(phraseMatch[[1]][1]), as.character(phraseMatch[[1]][1]), "the")
result2 <- ifelse(!is.na(phraseMatch[[1]][2]), as.character(phraseMatch[[1]][2]), "be")
result3 <- ifelse(!is.na(phraseMatch[[1]][3]), as.character(phraseMatch[[1]][3]), "to")
results <- c(result1,result2,result3)
}
else if (strlength>1){
pattern <- paste(replicate(strlength-2, "\\w+\\s+"), collapse = "")
pattern <- paste0(pattern,"\\w+$")
sentence <- str_extract(processed,pattern)
predictNgram(sentence)
}
else {
print('no prediction')
}
}
shinyServer(
function(input, output, session) {
nextWord <- reactiveValues(word=NULL)
setWordOptions <- reactive({
if (str_count(input$text,"\\w+")>0) predictNgram(input$text)
})
observeEvent(input$result1, {
nextWord$word <- setWordOptions()[1]
if (str_count(input$text,"\\w+")>0) predictNgram(input$text)
output$result1 <- renderText({setWordOptions()[1]})
})
observeEvent(input$result2, {
nextWord$word <- setWordOptions()[2]
if (str_count(input$text,"\\w+")>0) predictNgram(input$text)
output$result2 <- renderText({setWordOptions()[2]})
})
observeEvent(input$result3, {
nextWord$word <- setWordOptions()[3]
if (str_count(input$text,"\\w+")>0) predictNgram(input$text)
output$result3 <- renderText({setWordOptions()[3]})
})
output$event <- renderText({
if (is.null(nextWord$word)) return()
words <- paste(input$text,nextWord$word)
nextWord$word <- NULL
updateTextInput(session, "text",
value = words
)
words
})
output$result1 <- renderText({setWordOptions()[1]})
output$result2 <- renderText({setWordOptions()[2]})
output$result3 <- renderText({setWordOptions()[3]})
outputOptions(output,'event', suspendWhenHidden=FALSE)
}
)
|
library(tidyverse)
library(corrplot)
library(ggplot2)
vinos <- read.csv("Datasets/0-Descargados/winequality-red.csv")
v <- cor(vinos)
corrplot(v)
view(vinos)
##########################################################
#################### Regresion Lineal ####################
##########################################################
# Regresion lineal (en el shiny las variables las pone el usuario)
R = lm(vinos$fixed.acidity~vinos$density)
# ploteal los puntos
plot(vinos$density, vinos$fixed.acidity)
# Te muestra la linea de regresion
abline(R, col="red", lwd=2)
# Interpretación: Usamos la función de lm() para saber la regresión lineal de 2 variables (he ahi "lineal") luego ploetamos las variables solas
# y por ultimo dibujamos la linea de la regresion usando la función creada anteriormente llamada R. Se puede ver como a mayor densidad que tiene
# el vino, tiende a tener mayor acidez. Esto se puede deber a que naturalmente al vino tener mas densidad, es decir, pesr más, su nivel de
# acidez elevará para manteren la proporción. Cabe resaltar que siempre hay diferencias, ya que depende mucho del vino. Nuestra regresión trata
# de predecir esa proporcionalidad.
##########################################################
############### Regresion polinomial #####################
##########################################################
# para ver la regresion del fixed.acidity en relacion a las otras 3 variables
# citric.acid, density, pH
ids <- sample(1:nrow(vinos),size=nrow(vinos)*0.7,replace = FALSE)
entrenamiento <- vinos[ids, c(1,3,8,9)] # se escogen las columnas que se evaluarán (70%)
probar <- vinos[-ids, c(1,3,8,9)] # se escogen las columnas que se evaluarán (30%)
ft = lm(fixed.acidity~ citric.acid + density + pH, data=entrenamiento)
# predecir
predict(ft, probar)
probar$prediccion <- predict(ft, probar)
probar
# Determinar la precision del modelo entrenado (porcentaje)
error <- mean(abs(100*((probar$prediccion - probar$fixed.acidity)/ probar$prediccion)))
accuracy <- 100 - error
accuracy
# Interpretacion: Primero, tenemos que dividir en dos nuestro dataset unos con el que se entranará (70% del dataset) y los demás resptantes
# se usarán para probar o testear. Se tiene que seleccionar las columnas que uno quiere usar para predecir. Se usó también la función por
# defecto de R lm(). Además también se uso la función predict() para predecir con respecto a las demas variables. Por ultimo, sacamos el error
# para saber cual es el accuracy. En nuestro caso es 92%, siendo un buen resultado de predicción.
#############################################
################### KNN #####################
#############################################
library(ggplot2)
library(class)
plot()
# en esta parte seleccionas que variables quieres
vinosKnn <- data.frame(vinos$fixed.acidity, vinos$density)
dat <- sample(1:nrow(vinosKnn),size=nrow(vinosKnn)*0.7,replace = FALSE)
train <- vinos[dat,] # 70%
test <- vinos[dat,] # 30%
train.labels <- vinos[dat,1]
test.labels <- vinos[-dat,1]
knn <- knn(train=train, test=test, cl=train.labels, k = 10, prob=TRUE)
accuracy.fin <- 100 * sum(train.labels == knn)/NROW(test.labels)
accuracy.fin
# Interpretacion: Para la primera version del knn (la segunda se encuentra al final) se uso la función por defecto de R. Con este modelo
# Se entrena un dataset (70%) y luego se testea ese mismo dataset (30%), se puede ver en el knn los resultados para cada uno. Luego calculamos
# el margen de error, que en este caso es alto, con 59.58%. Lo recomendado es entre 80% y 95%
##############################################
################### KMeans ###################
##############################################
corrplot(v)
plot(vinos$free.sulfur.dioxide, vinos$sulphates)
df <- data.frame(vinos$free.sulfur.dioxide, vinos$sulphates)
kmeans <- kmeans(df, 7)
plot(df, col = kmeans$cluster)
points(kmeans$centers, col = 1:2, pch = 8, cex = 2)
# Interpretacion: Se puede ver como cuando tenemos 7 cluster, la funcion kmeans, automaticamente se visualizan los 7 cluster. Además se puede
# ver como los cluster no estan en la misma posicion, sino que estan de acuerdo a al promedio de las distancias de todos los puntos dentro de
# ese grupo o categoria. Para cambiar los valores y los clusters, solo modifique los x,y de df y el numero en la función kmeans
#############################################
################### PCA #####################
#############################################
# normalizacion de los datos: estandarizacion(variables - promedio)/desv
vinosPCA <- scale(vinos)
pca <- prcomp(vinosPCA)
str(pca)
pca[[1]] # desviaciones
pca[[2]] # rotaciones
pca[[5]] # individuos
# Dependiendo de cuantas componentes se escribe abajo
componentes <- cbind(pca[[2]][,1],pca[[2]][,2],pca[[2]][,3], pca[[2]][,4])
individuos <- pca[[5]][,c(1:4)]
#install.packages("ade4")
library(ade4)
# analisis de cluster del componente c1 y c2
s.corcircle(componentes[,c(1,2)]) #Todos los componentes de la col 1 y 2
s.corcircle(componentes[,c(1,3)])
s.corcircle(componentes[,c(1,4)])
s.corcircle(componentes[,c(1,1)])
# Interpretacon: PCA o "Principal Component Analysis", en nuestro caso, en las componentes[,c(1,2)] se puede ver como hay 4 grupos, 3 de ellos
# bien marcados:
# Primer grupo: pH (Oeste)
# Segundo grupo: alcohol, quality (Sur)
# Tercer grupo: sulphates, citric.acid, fixed.acidity (Este)
# Cuarto grupo: Volatile.acidity, total.sulfur.dioxide, residual.sulfur (Norte)
# El grupo menos relacionado es el cuarto, donde no se ve muy bien y no se marca, ya que cubre mucho espacio y hay mucha diferencia entre si
# El grupo que esta mas relacionado entre si es el Segundo y tercero, ya que se ven que son los mas cercanos entre si, ademas de separados del
# resto.
#####################################################
################### Knn función #####################
#####################################################
library(ggplot2)
NROW(vinos)
view(vinos)
x <- vinos$citric.acid # cambiar variables en el shiny
y <- vinos$density # cambiar variables en el shiny
dataframe = data.frame(x, y)
etiquetar <- function(dataframe) {
grupos <- c()
for (i in 1:NROW(dataframe)) {
if(dataframe$x[i]>=min(dataframe$x) & dataframe$x[i]<(max(dataframe$x)*0.4)) {
grupos <- c(grupos,'A')
}
else if(dataframe$x[i]>=(max(dataframe$x)*0.4) & dataframe$x[i]<(max(dataframe$x)*0.6)) {
grupos <- c(grupos, 'B')
}
else grupos <- c(grupos, 'C')
}
dataframe <- cbind(dataframe, grupos)
return(dataframe)
}
dataframe = etiquetar(dataframe)
head(dataframe)
ggplot(data = dataframe,aes(x=dataframe$x,y=dataframe$y,color=dataframe$grupos))+
geom_point()+xlab("X")+ylab("Y")+ggtitle("Clasificador KNN")
# sacar el 70% y el 30% para entrenamiento y testeo respectivamente
ids=sample(1:nrow(dataframe),size=nrow(dataframe)*0.7,replace = FALSE)
Entrenamiento<-dataframe[ids,]
Test<-dataframe[-ids,]
ggplot(data = Entrenamiento ,aes(x=x,y=y,color=grupos))+
geom_point()+xlab("X")+ylab("Y")+ggtitle("Clasificador KNN")
dataframe.temporal = dataframe
knn <- function(dataframe.temporal, nuevoX, nuevoY, k, metodo) {
if (metodo == 1) {
d <- (abs(nuevoX-dataframe.temporal$x)-abs(nuevoY-dataframe.temporal$y))
} else {
d <- sqrt((nuevoX-dataframe.temporal$x)^2 + (nuevoY-dataframe.temporal$y)^2)
}
dataframe.temporal <- cbind(dataframe.temporal, d)
vOrden <- sort(dataframe.temporal$d)
vecinos <- dataframe.temporal[dataframe.temporal$d %in% vOrden[1:k],3]
return (vecinos[1:k])
}
v <- knn(dataframe, 7, 13, 1332, 1)
porc<-function(vector,value) {
return (sum(as.integer(vector==value)))
}
a<-porc(v,"A")
b<-porc(v,"B")
c<-porc(v,"C")
total<-(a+b+c)
a*100/total
b*100/total
c*100/total
# Interpretacion: Esta función creada por nosotros (inspirada por la functión hecha en clase) crea 3 categorias (o grupos) de un dataframe
# además, ya que las categorias se crean con relacion a la variable 'x', se incorporó una manera de hacer las categorias de manera dinamica
# es decir, multiplicamos x * 0.4, y asi sucesivamente, para no tener que digitar hasta donde quieres cada categoria de manera manual.
|
/Proyecto/Codigo/5 - Modelados.R
|
no_license
|
Jireh01/AdminInfo
|
R
| false
| false
| 8,309
|
r
|
library(tidyverse)
library(corrplot)
library(ggplot2)
vinos <- read.csv("Datasets/0-Descargados/winequality-red.csv")
v <- cor(vinos)
corrplot(v)
view(vinos)
##########################################################
#################### Regresion Lineal ####################
##########################################################
# Regresion lineal (en el shiny las variables las pone el usuario)
R = lm(vinos$fixed.acidity~vinos$density)
# ploteal los puntos
plot(vinos$density, vinos$fixed.acidity)
# Te muestra la linea de regresion
abline(R, col="red", lwd=2)
# Interpretación: Usamos la función de lm() para saber la regresión lineal de 2 variables (he ahi "lineal") luego ploetamos las variables solas
# y por ultimo dibujamos la linea de la regresion usando la función creada anteriormente llamada R. Se puede ver como a mayor densidad que tiene
# el vino, tiende a tener mayor acidez. Esto se puede deber a que naturalmente al vino tener mas densidad, es decir, pesr más, su nivel de
# acidez elevará para manteren la proporción. Cabe resaltar que siempre hay diferencias, ya que depende mucho del vino. Nuestra regresión trata
# de predecir esa proporcionalidad.
##########################################################
############### Regresion polinomial #####################
##########################################################
# para ver la regresion del fixed.acidity en relacion a las otras 3 variables
# citric.acid, density, pH
ids <- sample(1:nrow(vinos),size=nrow(vinos)*0.7,replace = FALSE)
entrenamiento <- vinos[ids, c(1,3,8,9)] # se escogen las columnas que se evaluarán (70%)
probar <- vinos[-ids, c(1,3,8,9)] # se escogen las columnas que se evaluarán (30%)
ft = lm(fixed.acidity~ citric.acid + density + pH, data=entrenamiento)
# predecir
predict(ft, probar)
probar$prediccion <- predict(ft, probar)
probar
# Determinar la precision del modelo entrenado (porcentaje)
error <- mean(abs(100*((probar$prediccion - probar$fixed.acidity)/ probar$prediccion)))
accuracy <- 100 - error
accuracy
# Interpretacion: Primero, tenemos que dividir en dos nuestro dataset unos con el que se entranará (70% del dataset) y los demás resptantes
# se usarán para probar o testear. Se tiene que seleccionar las columnas que uno quiere usar para predecir. Se usó también la función por
# defecto de R lm(). Además también se uso la función predict() para predecir con respecto a las demas variables. Por ultimo, sacamos el error
# para saber cual es el accuracy. En nuestro caso es 92%, siendo un buen resultado de predicción.
#############################################
################### KNN #####################
#############################################
library(ggplot2)
library(class)
plot()
# en esta parte seleccionas que variables quieres
vinosKnn <- data.frame(vinos$fixed.acidity, vinos$density)
dat <- sample(1:nrow(vinosKnn),size=nrow(vinosKnn)*0.7,replace = FALSE)
train <- vinos[dat,] # 70%
test <- vinos[dat,] # 30%
train.labels <- vinos[dat,1]
test.labels <- vinos[-dat,1]
knn <- knn(train=train, test=test, cl=train.labels, k = 10, prob=TRUE)
accuracy.fin <- 100 * sum(train.labels == knn)/NROW(test.labels)
accuracy.fin
# Interpretacion: Para la primera version del knn (la segunda se encuentra al final) se uso la función por defecto de R. Con este modelo
# Se entrena un dataset (70%) y luego se testea ese mismo dataset (30%), se puede ver en el knn los resultados para cada uno. Luego calculamos
# el margen de error, que en este caso es alto, con 59.58%. Lo recomendado es entre 80% y 95%
##############################################
################### KMeans ###################
##############################################
corrplot(v)
plot(vinos$free.sulfur.dioxide, vinos$sulphates)
df <- data.frame(vinos$free.sulfur.dioxide, vinos$sulphates)
kmeans <- kmeans(df, 7)
plot(df, col = kmeans$cluster)
points(kmeans$centers, col = 1:2, pch = 8, cex = 2)
# Interpretacion: Se puede ver como cuando tenemos 7 cluster, la funcion kmeans, automaticamente se visualizan los 7 cluster. Además se puede
# ver como los cluster no estan en la misma posicion, sino que estan de acuerdo a al promedio de las distancias de todos los puntos dentro de
# ese grupo o categoria. Para cambiar los valores y los clusters, solo modifique los x,y de df y el numero en la función kmeans
#############################################
################### PCA #####################
#############################################
# normalizacion de los datos: estandarizacion(variables - promedio)/desv
vinosPCA <- scale(vinos)
pca <- prcomp(vinosPCA)
str(pca)
pca[[1]] # desviaciones
pca[[2]] # rotaciones
pca[[5]] # individuos
# Dependiendo de cuantas componentes se escribe abajo
componentes <- cbind(pca[[2]][,1],pca[[2]][,2],pca[[2]][,3], pca[[2]][,4])
individuos <- pca[[5]][,c(1:4)]
#install.packages("ade4")
library(ade4)
# analisis de cluster del componente c1 y c2
s.corcircle(componentes[,c(1,2)]) #Todos los componentes de la col 1 y 2
s.corcircle(componentes[,c(1,3)])
s.corcircle(componentes[,c(1,4)])
s.corcircle(componentes[,c(1,1)])
# Interpretacon: PCA o "Principal Component Analysis", en nuestro caso, en las componentes[,c(1,2)] se puede ver como hay 4 grupos, 3 de ellos
# bien marcados:
# Primer grupo: pH (Oeste)
# Segundo grupo: alcohol, quality (Sur)
# Tercer grupo: sulphates, citric.acid, fixed.acidity (Este)
# Cuarto grupo: Volatile.acidity, total.sulfur.dioxide, residual.sulfur (Norte)
# El grupo menos relacionado es el cuarto, donde no se ve muy bien y no se marca, ya que cubre mucho espacio y hay mucha diferencia entre si
# El grupo que esta mas relacionado entre si es el Segundo y tercero, ya que se ven que son los mas cercanos entre si, ademas de separados del
# resto.
#####################################################
################### Knn función #####################
#####################################################
library(ggplot2)
NROW(vinos)
view(vinos)
x <- vinos$citric.acid # cambiar variables en el shiny
y <- vinos$density # cambiar variables en el shiny
dataframe = data.frame(x, y)
etiquetar <- function(dataframe) {
grupos <- c()
for (i in 1:NROW(dataframe)) {
if(dataframe$x[i]>=min(dataframe$x) & dataframe$x[i]<(max(dataframe$x)*0.4)) {
grupos <- c(grupos,'A')
}
else if(dataframe$x[i]>=(max(dataframe$x)*0.4) & dataframe$x[i]<(max(dataframe$x)*0.6)) {
grupos <- c(grupos, 'B')
}
else grupos <- c(grupos, 'C')
}
dataframe <- cbind(dataframe, grupos)
return(dataframe)
}
dataframe = etiquetar(dataframe)
head(dataframe)
ggplot(data = dataframe,aes(x=dataframe$x,y=dataframe$y,color=dataframe$grupos))+
geom_point()+xlab("X")+ylab("Y")+ggtitle("Clasificador KNN")
# sacar el 70% y el 30% para entrenamiento y testeo respectivamente
ids=sample(1:nrow(dataframe),size=nrow(dataframe)*0.7,replace = FALSE)
Entrenamiento<-dataframe[ids,]
Test<-dataframe[-ids,]
ggplot(data = Entrenamiento ,aes(x=x,y=y,color=grupos))+
geom_point()+xlab("X")+ylab("Y")+ggtitle("Clasificador KNN")
dataframe.temporal = dataframe
knn <- function(dataframe.temporal, nuevoX, nuevoY, k, metodo) {
if (metodo == 1) {
d <- (abs(nuevoX-dataframe.temporal$x)-abs(nuevoY-dataframe.temporal$y))
} else {
d <- sqrt((nuevoX-dataframe.temporal$x)^2 + (nuevoY-dataframe.temporal$y)^2)
}
dataframe.temporal <- cbind(dataframe.temporal, d)
vOrden <- sort(dataframe.temporal$d)
vecinos <- dataframe.temporal[dataframe.temporal$d %in% vOrden[1:k],3]
return (vecinos[1:k])
}
v <- knn(dataframe, 7, 13, 1332, 1)
porc<-function(vector,value) {
return (sum(as.integer(vector==value)))
}
a<-porc(v,"A")
b<-porc(v,"B")
c<-porc(v,"C")
total<-(a+b+c)
a*100/total
b*100/total
c*100/total
# Interpretacion: Esta función creada por nosotros (inspirada por la functión hecha en clase) crea 3 categorias (o grupos) de un dataframe
# además, ya que las categorias se crean con relacion a la variable 'x', se incorporó una manera de hacer las categorias de manera dinamica
# es decir, multiplicamos x * 0.4, y asi sucesivamente, para no tener que digitar hasta donde quieres cada categoria de manera manual.
|
# Do different relevant plots for temperature.
#
#
# Written by Adrian Dragulescu on 4-Jun-2004
plot.hist.temp <- function(save, options){
save$plots <- 1
setwd(save$dir$calib)
options$filename <- paste("parms.",options$airport.name,".Rdata",sep="")
load(options$filename)
#------------------------------------------------------
# Plot the monthly temperature
#------------------------------------------------------
fName <- paste(save$dir$plots,options$airport.name,"_hT-month.pdf", sep="")
if (save$plots){pdf(fName, width=8.0, heigh=5.0)}
xLim <- c(1,12);
yLim <- c(min(temp.month$mean-temp.month$sd),
max(temp.month$mean+temp.month$sd))
plot(xLim,yLim, type="n", main=options$airport.name,
xlab="Month", ylab="Monthly temperature",
xlim=xLim, ylim=yLim, xaxt="n", cex.main=1)
lines(temp.month$mean, type="b", col="blue")
lines(temp.month$mean-temp.month$sd, type="b", lty=2, col="red")
lines(temp.month$mean+temp.month$sd, type="b", lty=2, col="red")
axis(side=1, at=1:12, labels=month.abb)
if (save$plots){dev.off()}
#-------------------------------------------------------
# Plot the spline interpolated daily temperature
#-------------------------------------------------------
fName <- paste(save$dir$plots,options$airport.name,"_hT-day.pdf", sep="")
if (save$plots){pdf(fName, width=8.0, heigh=5.0)}
yLim <- c(min(temp.day$mean-temp.day$sd),
max(temp.day$mean+temp.day$sd))
plot(temp.day$mean, type="n", ylim=yLim,
xlab="Day of the year", ylab="Daily temperature",
main=options$airport.name, cex.main=1, xaxs="i",
xlim=c(0,367))
lines(temp.day$mean, col="blue")
lines(temp.day$mean+temp.day$sd, lty=2, col="red")
lines(temp.day$mean-temp.day$sd, lty=2, col="red")
if (save$plots){dev.off()}
#-------------------------------------------------------
# Plot this year in the bands
#-------------------------------------------------------
aux <- as.numeric(temp.hist.scenarios)
ind <- rep(1:366, each=dim(temp.hist.scenarios)[1])
day.max <- tapply(aux, ind, max)
day.min <- tapply(aux, ind, min)
thisYear <- hdata$year[nrow(hdata)]
ind <- which(hdata$year==thisYear)
temp.thisyear <- (hdata$Tmax[ind] + hdata$Tmin[ind])/2
hdata$Tavg <- (hdata$Tmax + hdata$Tmin)/2
fName <- paste(save$dir$plots,options$airport.name,"_envelope.pdf", sep="")
if (save$plots){pdf(fName, width=8.0, heigh=5.0)}
plot(1:366, day.max, type="n", ylim=c(min(day.min),max(day.max)),
xlim=c(1,366), xlab="Day of year", ylab="Temperature",
main=options$airport.name, cex.main=1)
polygon(c(1:366,366:1),c(day.max,rev(day.min)), col="lavender",
border="gray")
lines(1:366,temp.day$mean, col="gray")
lines(1:length(temp.thisyear), temp.thisyear, col="blue")
if (save$plots){dev.off()}
#-------------------------------------------------------
# Plot monthly temperature vs. year by month
#-------------------------------------------------------
plot.monthly.T <- function(hist.avg, m, options){
# Where: - hist.avg is the monthly average by year
# - m is the month you are interested to plot
main <- paste(options$city, ", ", month.abb[m], sep="")
fileName <- paste(save$dir$plots, options$city, "-",
month.abb[m],".pdf", sep="")
pdf(fileName, width=7.0, height=5.0)
q <- as.numeric(quantile(hist.avg[,m], probs=c(0.1,0.5,0.9), na.rm=TRUE))
plot(rownames(hist.avg), hist.avg[,m], xlab="year", col="blue",
ylab="Average monthly temperature", main=main, cex.main=1)
points(thisYear, hist.avg[as.character(thisYear),m], pch=8, col="red")
abline(h=q[c(1,3)], col="pink")
abline(h=q[2], col="grey")
mtext(c("10%", "50%", "90%"), side=4, at=q)
dev.off()
return(rbind(t(t(q)), hist.avg["2004",1]))
}
hist.avg <- NULL
uYears <- unique(hdata$year)
hist.avg <- matrix(NA, ncol=12, nrow=length(uYears))
for (yr in 1:length(uYears)){
ind <- which(hdata$year==uYears[yr])
aux <- tapply(hdata[ind,"Tavg"], hdata[ind,"month"], mean)
hist.avg[yr,as.numeric(names(aux))] <- as.matrix(aux)
}
rownames(hist.avg) <- uYears
colnames(hist.avg) <- month.abb
hist.avg <- signif(hist.avg, digits=3)
options$city <- options$airport.name
for (m in 1:12){
aux <- plot.monthly.T(hist.avg, m, options)
}
#-------------------------------------------------------
# Trellis Plot temperature by month -- big file
#-------------------------------------------------------
monthF <- ordered(month.abb[hdata$month], levels=month.abb)
fName <- paste(save$dir$plots,options$airport.name,"_trellis.pdf", sep="")
if (save$plots){pdf(fName, width=5.0, heigh=5.0)}
aux <- trellis.par.get()
bkg.col <- trellis.par.get("background")
bkg.col$col <- "white"
trellis.par.set("background", bkg.col)
print(xyplot(hdata$Tavg ~ hdata$day | monthF,
panel=function(x,y){
hF <- factor(x)
my <- tapply(y,hF,median)
panel.xyplot(x,y, col="gray", pch=".")
panel.xyplot(levels(hF),my, type="l", col="blue", lwd=2)
},
col="grey", pch=".", bg="white",
xlab="Day of month", ylab="Historical temperature",
main=options$main))
if (save$plots){dev.off()}
## #-------------------------------------------------------
## # BW Plot of the last 10 years
## #-------------------------------------------------------
## fName <- paste(save$dir$plots,options$airport.name,"_trellis.pdf", sep="")
## if (save$plots){pdf(fName, width=5.0, heigh=5.0)}
## aux <- trellis.par.get()
## bkg.col <- trellis.par.get("background")
## bkg.col$col <- "white"
## trellis.par.set("background", bkg.col)
## print(xyplot(hdata$Tavg ~ hdata$day | monthF,
## panel=function(x,y){
## hF <- factor(x)
## my <- tapply(y,hF,median)
## panel.xyplot(x,y, col="gray", pch=".")
## panel.xyplot(levels(hF),my, type="l", col="blue", lwd=2)
## },
## col="grey", pch=".", bg="white",
## xlab="Day of month", ylab="Historical temperature",
## main=options$main))
## if (save$plots){dev.off()}
#----------------------------------------------------------
# Quantile plot of daily changes by month -- big file
#----------------------------------------------------------
fName <- paste(save$dir$plots,options$airport.name,
"_daily_Temp_changes_quantile.pdf", sep="")
N <- dim(hdata)[1]
dT <- diff(hdata$Tavg)
if (save$plots){pdf(fName, width=8.75, heigh=6.0)}
aux <- trellis.par.get()
bkg.col <- trellis.par.get("background")
bkg.col$col <- "white"
trellis.par.set("background", bkg.col)
print(xyplot( dT ~ dT | monthF[1:(N-1)],
panel=function(x,y){
NN <- length(x)
probF <- seq(1/(2*NN),1-1/(2*NN), length=NN)
reg <- lm(sort(x)~qnorm(probF))
panel.xyplot(qnorm(probF), sort(x), col="blue", pch=1)
panel.xyplot(qnorm(probF), predict(reg), col="grey", type="l",
lwd=2)},
xlim=c(-4,4), xlab="Quantile standard normal",
ylab="Quantile daily temperature changes",
main=options$airport.name, cex.main=1))
if (save$plots){dev.off()}
}
|
/R Extension/RMG/Models/Temperature/plot.hist.temp.R
|
no_license
|
uhasan1/QLExtension-backup
|
R
| false
| false
| 7,093
|
r
|
# Do different relevant plots for temperature.
#
#
# Written by Adrian Dragulescu on 4-Jun-2004
plot.hist.temp <- function(save, options){
save$plots <- 1
setwd(save$dir$calib)
options$filename <- paste("parms.",options$airport.name,".Rdata",sep="")
load(options$filename)
#------------------------------------------------------
# Plot the monthly temperature
#------------------------------------------------------
fName <- paste(save$dir$plots,options$airport.name,"_hT-month.pdf", sep="")
if (save$plots){pdf(fName, width=8.0, heigh=5.0)}
xLim <- c(1,12);
yLim <- c(min(temp.month$mean-temp.month$sd),
max(temp.month$mean+temp.month$sd))
plot(xLim,yLim, type="n", main=options$airport.name,
xlab="Month", ylab="Monthly temperature",
xlim=xLim, ylim=yLim, xaxt="n", cex.main=1)
lines(temp.month$mean, type="b", col="blue")
lines(temp.month$mean-temp.month$sd, type="b", lty=2, col="red")
lines(temp.month$mean+temp.month$sd, type="b", lty=2, col="red")
axis(side=1, at=1:12, labels=month.abb)
if (save$plots){dev.off()}
#-------------------------------------------------------
# Plot the spline interpolated daily temperature
#-------------------------------------------------------
fName <- paste(save$dir$plots,options$airport.name,"_hT-day.pdf", sep="")
if (save$plots){pdf(fName, width=8.0, heigh=5.0)}
yLim <- c(min(temp.day$mean-temp.day$sd),
max(temp.day$mean+temp.day$sd))
plot(temp.day$mean, type="n", ylim=yLim,
xlab="Day of the year", ylab="Daily temperature",
main=options$airport.name, cex.main=1, xaxs="i",
xlim=c(0,367))
lines(temp.day$mean, col="blue")
lines(temp.day$mean+temp.day$sd, lty=2, col="red")
lines(temp.day$mean-temp.day$sd, lty=2, col="red")
if (save$plots){dev.off()}
#-------------------------------------------------------
# Plot this year in the bands
#-------------------------------------------------------
aux <- as.numeric(temp.hist.scenarios)
ind <- rep(1:366, each=dim(temp.hist.scenarios)[1])
day.max <- tapply(aux, ind, max)
day.min <- tapply(aux, ind, min)
thisYear <- hdata$year[nrow(hdata)]
ind <- which(hdata$year==thisYear)
temp.thisyear <- (hdata$Tmax[ind] + hdata$Tmin[ind])/2
hdata$Tavg <- (hdata$Tmax + hdata$Tmin)/2
fName <- paste(save$dir$plots,options$airport.name,"_envelope.pdf", sep="")
if (save$plots){pdf(fName, width=8.0, heigh=5.0)}
plot(1:366, day.max, type="n", ylim=c(min(day.min),max(day.max)),
xlim=c(1,366), xlab="Day of year", ylab="Temperature",
main=options$airport.name, cex.main=1)
polygon(c(1:366,366:1),c(day.max,rev(day.min)), col="lavender",
border="gray")
lines(1:366,temp.day$mean, col="gray")
lines(1:length(temp.thisyear), temp.thisyear, col="blue")
if (save$plots){dev.off()}
#-------------------------------------------------------
# Plot monthly temperature vs. year by month
#-------------------------------------------------------
plot.monthly.T <- function(hist.avg, m, options){
# Where: - hist.avg is the monthly average by year
# - m is the month you are interested to plot
main <- paste(options$city, ", ", month.abb[m], sep="")
fileName <- paste(save$dir$plots, options$city, "-",
month.abb[m],".pdf", sep="")
pdf(fileName, width=7.0, height=5.0)
q <- as.numeric(quantile(hist.avg[,m], probs=c(0.1,0.5,0.9), na.rm=TRUE))
plot(rownames(hist.avg), hist.avg[,m], xlab="year", col="blue",
ylab="Average monthly temperature", main=main, cex.main=1)
points(thisYear, hist.avg[as.character(thisYear),m], pch=8, col="red")
abline(h=q[c(1,3)], col="pink")
abline(h=q[2], col="grey")
mtext(c("10%", "50%", "90%"), side=4, at=q)
dev.off()
return(rbind(t(t(q)), hist.avg["2004",1]))
}
hist.avg <- NULL
uYears <- unique(hdata$year)
hist.avg <- matrix(NA, ncol=12, nrow=length(uYears))
for (yr in 1:length(uYears)){
ind <- which(hdata$year==uYears[yr])
aux <- tapply(hdata[ind,"Tavg"], hdata[ind,"month"], mean)
hist.avg[yr,as.numeric(names(aux))] <- as.matrix(aux)
}
rownames(hist.avg) <- uYears
colnames(hist.avg) <- month.abb
hist.avg <- signif(hist.avg, digits=3)
options$city <- options$airport.name
for (m in 1:12){
aux <- plot.monthly.T(hist.avg, m, options)
}
#-------------------------------------------------------
# Trellis Plot temperature by month -- big file
#-------------------------------------------------------
monthF <- ordered(month.abb[hdata$month], levels=month.abb)
fName <- paste(save$dir$plots,options$airport.name,"_trellis.pdf", sep="")
if (save$plots){pdf(fName, width=5.0, heigh=5.0)}
aux <- trellis.par.get()
bkg.col <- trellis.par.get("background")
bkg.col$col <- "white"
trellis.par.set("background", bkg.col)
print(xyplot(hdata$Tavg ~ hdata$day | monthF,
panel=function(x,y){
hF <- factor(x)
my <- tapply(y,hF,median)
panel.xyplot(x,y, col="gray", pch=".")
panel.xyplot(levels(hF),my, type="l", col="blue", lwd=2)
},
col="grey", pch=".", bg="white",
xlab="Day of month", ylab="Historical temperature",
main=options$main))
if (save$plots){dev.off()}
## #-------------------------------------------------------
## # BW Plot of the last 10 years
## #-------------------------------------------------------
## fName <- paste(save$dir$plots,options$airport.name,"_trellis.pdf", sep="")
## if (save$plots){pdf(fName, width=5.0, heigh=5.0)}
## aux <- trellis.par.get()
## bkg.col <- trellis.par.get("background")
## bkg.col$col <- "white"
## trellis.par.set("background", bkg.col)
## print(xyplot(hdata$Tavg ~ hdata$day | monthF,
## panel=function(x,y){
## hF <- factor(x)
## my <- tapply(y,hF,median)
## panel.xyplot(x,y, col="gray", pch=".")
## panel.xyplot(levels(hF),my, type="l", col="blue", lwd=2)
## },
## col="grey", pch=".", bg="white",
## xlab="Day of month", ylab="Historical temperature",
## main=options$main))
## if (save$plots){dev.off()}
#----------------------------------------------------------
# Quantile plot of daily changes by month -- big file
#----------------------------------------------------------
fName <- paste(save$dir$plots,options$airport.name,
"_daily_Temp_changes_quantile.pdf", sep="")
N <- dim(hdata)[1]
dT <- diff(hdata$Tavg)
if (save$plots){pdf(fName, width=8.75, heigh=6.0)}
aux <- trellis.par.get()
bkg.col <- trellis.par.get("background")
bkg.col$col <- "white"
trellis.par.set("background", bkg.col)
print(xyplot( dT ~ dT | monthF[1:(N-1)],
panel=function(x,y){
NN <- length(x)
probF <- seq(1/(2*NN),1-1/(2*NN), length=NN)
reg <- lm(sort(x)~qnorm(probF))
panel.xyplot(qnorm(probF), sort(x), col="blue", pch=1)
panel.xyplot(qnorm(probF), predict(reg), col="grey", type="l",
lwd=2)},
xlim=c(-4,4), xlab="Quantile standard normal",
ylab="Quantile daily temperature changes",
main=options$airport.name, cex.main=1))
if (save$plots){dev.off()}
}
|
/R/splash.point.R
|
no_license
|
lhmet-forks/rsplash
|
R
| false
| false
| 25,650
|
r
| ||
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/set_paths.R
\docType{import}
\name{reexports}
\alias{reexports}
\alias{edit_r_environ}
\title{Objects exported from other packages}
\keyword{internal}
\description{
These objects are imported from other packages. Follow the links
below to see their documentation.
\describe{
\item{usethis}{\code{\link[usethis]{edit_r_environ}}}
}}
|
/fuzzedpackages/m2r/man/reexports.Rd
|
no_license
|
akhikolla/testpackages
|
R
| false
| true
| 414
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/set_paths.R
\docType{import}
\name{reexports}
\alias{reexports}
\alias{edit_r_environ}
\title{Objects exported from other packages}
\keyword{internal}
\description{
These objects are imported from other packages. Follow the links
below to see their documentation.
\describe{
\item{usethis}{\code{\link[usethis]{edit_r_environ}}}
}}
|
#' Efficient parallel lapply using a SLURM cluster
#'
#' An easy-to-use form of lapply that emulates parallelization using a SLURM cluster.
#'
#' Mimics the functionality of lapply but implemented
#' in a way that iterations can be submmitted as one or more individual
#' jobs to a SLURM cluster.
#' Each job batch, err, out, and script files are stored in a temporary folder. Once
#' all jobs have been submmitted, the function waits for them to finish. When they
#' are done executing, all results from individual jobs will be compiled into a single list.
#'
#' @param x vector/list - FUN will be applied to the elements of this
#' @param FUN function - function to be applied to each element of x
#' @param ... further arguments of FUN
#' @param tasks integer - number of individual parallel jobs to execute
#' @param workingDir string - path to folder that will contain all the temporary files needed for submission, execution, and compilation of inidivudal jobs
#' @param packages character vector - package names to be loaded in individual tasks
#' @param sources character vector - paths to R code to be loaded in individual tasks
#' @param extraBashLines character vector - each element will be added as a line to the inidividual task execution bash script before R gets executed. For instance, here you may want to load R if it is not in your system by default
#' @param extraScriptLines character vector - each element will be added as a line to the individual task execution R script before starting lapply
#' @param clean logical - if TRUE all files created in workingDir will be deleted
#' @param partition character - Partition to use. Equivalent to \code{--partition} of SLURM sbatch
#' @param time character - Time requested for job execution, one accepted format is "HH:MM:SS". Equivalent to \code{--time} of SLURM sbatch
#' @param mem character - Memory requested for job execution, one accepted format is "xG" or "xMB". Equivalent to \code{--mem} of SLURM sbatch
#' @param proc integer - Number of processors requested per task. Equivalent to \code{--cpus-per-task} of SLURM sbatch
#' @param totalProc integer - Number of tasks requested for job. Equivalent to \code{--ntasks} of SLURM sbatch
#' @param nodes integer - Number of nodes requested for job. Equivalent to \code{--nodes} of SLURM sbatch
#' @param email character - email address to send info when job is done. Equivalent to \code{--mail-user=} of SLURM sbatch}
#'
#' @return list - results of FUN applied to each element in x
#' @examples
#' \dontrun{
#' #------------------------
#' # Parallel execution of 100 function calls using 4 parellel tasks
#' myFun <- function(x) {
#' #Sys.sleep(10)
#' return(rep(x, 3))
#' }
#'
#' dir.create("~/testSap")
#' sapOut <- superApply(1:100, FUN = myFun, tasks = 4, workingDir = "~/testSap", time = "60", mem = "1G")
#'
#'
#' #------------------------
#' # Parallel execution of 100 function calls using 100 parellel tasks
#' sapOut <- superApply(1:100, FUN = myFun, tasks = 100, workingDir = "~/testSap", time = "60", mem = "1G")
#'
#'
#' #------------------------
#' # Parallel execution where a package is required in function calls
#' myFun <- function(x) {
#' return(ggplot(data.frame(x = 1:100, y = (1:100)*x), aes(x = x, y = y )) + geom_point() + ylim(0, 1e4))
#' }
#'
#' dir.create("~/testSap")
#' sapOut <- superApply(1:100, FUN = myFun, tasks = 4, workingDir = "~/testSap", packages = "ggplot2", time = "60", mem = "1G")
#'
#'
#' #------------------------
#' # Parallel execution where R has to be loaded in the system (e.g. in bash `module load R`)
#' sapOut <- superApply(1:100, FUN = myFun, tasks = 4, workingDir = "~/testSap", time = "60", mem = "1G", extraBashLines = "module load R")
#'
#'
#' #------------------------
#' # Parellel execution where a source is required in funciton calls
#' # Content of ./customRep.R
#' customRep <- function(x) {
#' return(paste("customFunction", rep(x, 3)))
#' }
#' # Super appply execution
#' myFun <- function(x) {
#' return(customRep(x))
#' }
#'
#' dir.create("~/testSap")
#' sapOut <- superApply(1:100, FUN = myFun, tasks = 4, sources = "./customRep.R", workingDir = "~/testSap", time = "60", mem = "1G")
#'
#' }
#' @export
superApply <- function(x, FUN, ..., tasks = 1, workingDir = getwd(), packages = NULL, sources = NULL, extraBashLines = NULL, extraScriptLines = "", clean = T, partition = NULL, time = NULL, mem = NULL, proc = NULL, totalProc = NULL, nodes = NULL, email = NULL){
if(!is.list(x) & !is.vector(x))
stop("x hast to be a list of a vector")
if(!is.numeric(tasks))
stop("tasks has to be numerical")
if(!is.null(extraBashLines) & !is.character(extraBashLines))
stop("extraBashLines has to be character or NULL")
if(length(tasks) > 1)
stop("tasks has to be of length 1")
#if()
# stop("")
#if()
# stop("")
#if()
# stop("")
#if()
# stop("")
SAP_PREFIX <- "sAp_"
idPrefix <- paste0(c(SAP_PREFIX, sample(letters, size=3), sample(0:9,size=1)), collapse = "")
workingDir <- path.expand(workingDir)
FUN <- match.fun(FUN)
# Organizing JobArray parameters
JobArrayPars <- list(outDir = workingDir, partition = partition, time = time, mem = mem, proc = proc, totalProc = totalProc, nodes = nodes, email = email)
# Getting indeces to partition X into different tasks (i.e. individual jobs)
partitionIndeces<- getPartitionIndeces(x, tasks = tasks)
# Constructiong paralleleJobs
printTime("Partitioning function calls\n")
jobArray <- getJobArray(x, FUN, ..., partitionIndeces = partitionIndeces, idPrefix = idPrefix, workingDir = workingDir, extraScriptLines = extraScriptLines, extraBashLines = extraBashLines, JobArrayPars = JobArrayPars, packages = packages, sources = sources)
# Submmiting and waitng for jobs
printTime("Submmiting parallel Jobs\n")
jobArray$submit()
submission <- tryCatch({
jobArray$wait(stopIfFailed = T)
}, interrupt = function(i) {
clean_interruption(jobArray, workingDir, idPrefix)
return (NULL)
})
if(is.null(submission))
return(invisible(NULL))
# Merging output jobs
printTime("Merging parellel results\n")
jobNames <- jobArray$getJobNames()
expectedOutFiles <- paste0(jobNames, ".outRData")
expectedOutVariables <- paste0("output_", jobNames)
supperApplyResults <- mergeListDir (expectedOutFiles, expectedOutVariables, workingDir)
printTime("Merge done\n")
# Removing jobs files if desired
if(clean) {
printTime("Cleaning partitioned data\n")
file.remove(list.files(workingDir, full.names = T, pattern = paste0(idPrefix, "*")))
printTime("Cleaning done\n")
}
return(supperApplyResults)
}
#' Helper of superApply
#'
#' Creates a list with slots, containing the start and end indeces
#' corresponding to the partitions of x required to run the number of parallel tasks
#'
#' Parsing x, is it vector, list? or is it number of repetitions (i.e. x is just a number)?
#' This just to calculate the number of times the FUN has to be executed
getPartitionIndeces <- function(x, tasks = tasks) {
if(!is.vector(x)){
x <- as.list(x)
times <- length(x)
}else{
if(length(x) == 1 & is.numeric(x)){ # This will make apply ignore x and will execute FUN x times with ... arguments.
times <- x # It requires a FUN that does nothing with its first argument
ignoreX <- TRUE
}else{
times <- length(x)
}
}
# Creates indexes to partition data for parallel processing
jobsPerTask <- ceiling(times/tasks)
iStart <- seq(1, times, jobsPerTask)
iEnd <- seq (jobsPerTask, times, jobsPerTask)
if(iEnd[length(iEnd)] < times)
iEnd <- c(iEnd,times)
# Returns partition indices
result <- list(iStart = iStart, iEnd = iEnd)
return(result)
}
#' Helper of superApply
#'
#' Submits multiple jobs from the partions of x created in get Partition Indeces
#'
#' @param x list/vector - data to be partition
#' @param FUN function - function to be applied to each element of x
#' @param ... - further arguments of FUN
#' @param idPrefix character - prefix for job names
#' @param partitionIndeces list - output of getPartitionIndeces()
#' @return a JobArray object
getJobArray<- function(x, FUN, ..., idPrefix, partitionIndeces, workingDir, extraScriptLines, extraBashLines, JobArrayPars, packages, sources) {
# Cleaning and or creating workind dir for submission
dir.create(workingDir, showWarnings = F, recursive = T)
# Making unique ids for each submission
idPrefix <- paste0(c(idPrefix, sample(letters, size=3), sample(0:9,size=1)), collapse = "")
system(paste0("rm ", file.path(workingDir, paste0(idPrefix, "*"))), ignore.stdout = T, ignore.stderr = T)
iStart <- partitionIndeces$iStart
iEnd <- partitionIndeces$iEnd
# Creating individual scripts for each submission
jobScripts <- createJobScriptsData(x, FUN = FUN, ..., idPrefix = idPrefix, iStart = iStart, iEnd = iEnd, workingDir = workingDir, extraScriptLines = extraScriptLines, extraBashLines = extraBashLines, packages = packages, sources = sources)
JobArrayPars <- c(list(commandList = jobScripts, jobName = idPrefix), JobArrayPars)
jobArray <- do.call(JobArray$new, JobArrayPars)
return(jobArray)
}
#' Helper of superApply
#'
#' Takes a vector/list x, a function FUN and extra paramaters (...) and creates a Rscript
#' that executes lappy in x using FUN. Scripts are saved in workingDir
#'
#' @param x - vector/list - data to which lapply will be executed
#' @param FUN - function - function to be applied to x
#' @param ... - extra paramaters passed to FUN
#' @param idPrefix character - prefix for job names
#' @param iStart numeric vector - start indeces where partitions were done on x
#' @param iEnd numeric vector - end indeces where partitions were done on x
createJobScriptsData <- function(x, FUN, ..., idPrefix, iStart, iEnd, workingDir, extraScriptLines = "", extraBashLines = "", packages = NULL, sources = NULL) {
cmds <- list()
FUN <- match.fun(FUN)
# Checking if I need to load current packages or if user-defined packages can be loaded
if(is.null(packages)) {
packages <- createStringFunction ("library", getUserPackages())
} else if (packages == "") {
packages <- packages
} else {
packages <- createStringFunction ("library", packages)
eval(parse(text = paste(packages, collapse = ";")))
}
# Checking if I can source user-defined paths
if(!is.null(sources)) {
tempEnv <- new.env()
sources <- paste0('"', sources, '"')
sourcesLocal <- createStringFunction ("source", paste(sources, ", chdir = T, local = tempEnv"))
sources <- createStringFunction ("source", paste(sources, ", chdir = T"))
eval(parse(text = paste(sourcesLocal, collapse = ";")))
rm(tempEnv)
}
for(i in 1:length(iStart)) {
id <- paste0(idPrefix, "_", i)
xCurrent <- x[iStart[i]:iEnd[i]]
flush.console()
# Setting file and var names
outDataFile <- file.path(workingDir, paste0(id, ".outRData"))
dataFile <- file.path(workingDir, paste0(id, ".applyRData"))
#Saving RData files used in script
pars <- list(...)
save(xCurrent, FUN, pars, list = getUserFunctions(), file = dataFile)
rm(xCurrent)
gc()
#Making script to be submmited
tempScript <- c(
extraScriptLines,
packages,
sources,
paste0("load('", dataFile, "')"),
paste0("output_", id, " <- do.call( lapply, c(list(X = xCurrent, FUN = FUN), pars))" ),
paste0("save(output_", id, ", file='", outDataFile, "')")
)
RscriptFile <- file.path(workingDir, paste0(id, ".Rscript"))
writeLines (tempScript, RscriptFile)
# Submitting job
if(!is.null(extraBashLines)) {
cmds <- c(cmds, list(c(extraBashLines, paste0("Rscript --vanilla ", RscriptFile))))
} else {
cmds <- c(cmds, list(c(paste0("Rscript --vanilla ", RscriptFile))))
}
}
return(cmds)
}
#' Helper of superApply
#'
#' merges the result of independ jobs after completion of parallel lapply executions
#' @param files character vector - files to be merged
#' @param varNames character vector - varnames associated to each file to be merged
#' @param workingDir character - working directory
mergeListDir <- function(files, varNames, workingDir){
finishedFiles <- files %in% list.files(workingDir)
if(!all(finishedFiles))
stop("Not all of the individual task's outputs were found. Uknown error")
files <- files [finishedFiles]
varNames <- varNames[finishedFiles]
finalF <- list()
for (i in 1:length(files)){
load(file.path(workingDir,files[i]))
finalF <- c(finalF, eval(parse(text = varNames[i])))
}
return(finalF)
}
#' Helper of superApply
#'
#' @return a character vector with the names of the functions in the global enviroment
getUserFunctions <- function() {
return(c(lsf.str(globalenv())))
}
#' Helper of superApply
#'
#' @return Returns a character vector with the names of the packages in the global enviroment
getUserPackages <- function() {
return(names(sessionInfo()$otherPkgs))
}
#' Helper of superApply
#'
#' Takes a function name and a character vector to be put inside
#' independent calls of the function.
#'
#'
#' @param fun character - the fucntion name as a string
#' @param inside character vector - the items to put inside function
#'
#' @return character vector - of the form [1]"fun(inside[1])" ... [n]"fun(inside[n]))" where n is the lengh of inside. If inside is NULL it returns an empty string
#'
#' @examples
#' createStringFunction("library", c("ggplot2", "dyplr"))
#' #[1] "library(ggplot2)" "library(dyplr)"
#'
#' createStringFunction("library")
#' #[1] ""
createStringFunction <- function(fun, inside = NULL) {
if(is.null(inside))
return("")
inside <- paste0("(", inside, ")")
return(paste0(fun, inside))
}
#' Helper of superApply
#'
#' Executes cleaning code after user sends cleaning signal (ctrl+c, ESC)
clean_interruption <- function(jobArray, workingDir, idPrefix) {
cat("\n")
printTime("Sent kill signal, preparing to clean up\n")
printTime("Cancelling jobs\n")
jobArray$cancel()
printTime("Waiting for jobs to be cancelled to proceed with file removal\n")
jobArray$wait(stopIfFailed = F, verbose = F)
printTime("Cleaning job Array files\n")
jobArray$clean()
printTime("Cleaning partitioned data\n")
file.remove(list.files(workingDir, full.names = T, pattern = paste0(idPrefix, "*")))
printTime("Cleaning done\n")
return(invisible(NULL))
}
|
/R/superApply.R
|
no_license
|
nemobis/rSubmitter
|
R
| false
| false
| 15,471
|
r
|
#' Efficient parallel lapply using a SLURM cluster
#'
#' An easy-to-use form of lapply that emulates parallelization using a SLURM cluster.
#'
#' Mimics the functionality of lapply but implemented
#' in a way that iterations can be submmitted as one or more individual
#' jobs to a SLURM cluster.
#' Each job batch, err, out, and script files are stored in a temporary folder. Once
#' all jobs have been submmitted, the function waits for them to finish. When they
#' are done executing, all results from individual jobs will be compiled into a single list.
#'
#' @param x vector/list - FUN will be applied to the elements of this
#' @param FUN function - function to be applied to each element of x
#' @param ... further arguments of FUN
#' @param tasks integer - number of individual parallel jobs to execute
#' @param workingDir string - path to folder that will contain all the temporary files needed for submission, execution, and compilation of inidivudal jobs
#' @param packages character vector - package names to be loaded in individual tasks
#' @param sources character vector - paths to R code to be loaded in individual tasks
#' @param extraBashLines character vector - each element will be added as a line to the inidividual task execution bash script before R gets executed. For instance, here you may want to load R if it is not in your system by default
#' @param extraScriptLines character vector - each element will be added as a line to the individual task execution R script before starting lapply
#' @param clean logical - if TRUE all files created in workingDir will be deleted
#' @param partition character - Partition to use. Equivalent to \code{--partition} of SLURM sbatch
#' @param time character - Time requested for job execution, one accepted format is "HH:MM:SS". Equivalent to \code{--time} of SLURM sbatch
#' @param mem character - Memory requested for job execution, one accepted format is "xG" or "xMB". Equivalent to \code{--mem} of SLURM sbatch
#' @param proc integer - Number of processors requested per task. Equivalent to \code{--cpus-per-task} of SLURM sbatch
#' @param totalProc integer - Number of tasks requested for job. Equivalent to \code{--ntasks} of SLURM sbatch
#' @param nodes integer - Number of nodes requested for job. Equivalent to \code{--nodes} of SLURM sbatch
#' @param email character - email address to send info when job is done. Equivalent to \code{--mail-user=} of SLURM sbatch}
#'
#' @return list - results of FUN applied to each element in x
#' @examples
#' \dontrun{
#' #------------------------
#' # Parallel execution of 100 function calls using 4 parellel tasks
#' myFun <- function(x) {
#' #Sys.sleep(10)
#' return(rep(x, 3))
#' }
#'
#' dir.create("~/testSap")
#' sapOut <- superApply(1:100, FUN = myFun, tasks = 4, workingDir = "~/testSap", time = "60", mem = "1G")
#'
#'
#' #------------------------
#' # Parallel execution of 100 function calls using 100 parellel tasks
#' sapOut <- superApply(1:100, FUN = myFun, tasks = 100, workingDir = "~/testSap", time = "60", mem = "1G")
#'
#'
#' #------------------------
#' # Parallel execution where a package is required in function calls
#' myFun <- function(x) {
#' return(ggplot(data.frame(x = 1:100, y = (1:100)*x), aes(x = x, y = y )) + geom_point() + ylim(0, 1e4))
#' }
#'
#' dir.create("~/testSap")
#' sapOut <- superApply(1:100, FUN = myFun, tasks = 4, workingDir = "~/testSap", packages = "ggplot2", time = "60", mem = "1G")
#'
#'
#' #------------------------
#' # Parallel execution where R has to be loaded in the system (e.g. in bash `module load R`)
#' sapOut <- superApply(1:100, FUN = myFun, tasks = 4, workingDir = "~/testSap", time = "60", mem = "1G", extraBashLines = "module load R")
#'
#'
#' #------------------------
#' # Parellel execution where a source is required in funciton calls
#' # Content of ./customRep.R
#' customRep <- function(x) {
#' return(paste("customFunction", rep(x, 3)))
#' }
#' # Super appply execution
#' myFun <- function(x) {
#' return(customRep(x))
#' }
#'
#' dir.create("~/testSap")
#' sapOut <- superApply(1:100, FUN = myFun, tasks = 4, sources = "./customRep.R", workingDir = "~/testSap", time = "60", mem = "1G")
#'
#' }
#' @export
superApply <- function(x, FUN, ..., tasks = 1, workingDir = getwd(), packages = NULL, sources = NULL, extraBashLines = NULL, extraScriptLines = "", clean = T, partition = NULL, time = NULL, mem = NULL, proc = NULL, totalProc = NULL, nodes = NULL, email = NULL){
if(!is.list(x) & !is.vector(x))
stop("x hast to be a list of a vector")
if(!is.numeric(tasks))
stop("tasks has to be numerical")
if(!is.null(extraBashLines) & !is.character(extraBashLines))
stop("extraBashLines has to be character or NULL")
if(length(tasks) > 1)
stop("tasks has to be of length 1")
#if()
# stop("")
#if()
# stop("")
#if()
# stop("")
#if()
# stop("")
SAP_PREFIX <- "sAp_"
idPrefix <- paste0(c(SAP_PREFIX, sample(letters, size=3), sample(0:9,size=1)), collapse = "")
workingDir <- path.expand(workingDir)
FUN <- match.fun(FUN)
# Organizing JobArray parameters
JobArrayPars <- list(outDir = workingDir, partition = partition, time = time, mem = mem, proc = proc, totalProc = totalProc, nodes = nodes, email = email)
# Getting indeces to partition X into different tasks (i.e. individual jobs)
partitionIndeces<- getPartitionIndeces(x, tasks = tasks)
# Constructiong paralleleJobs
printTime("Partitioning function calls\n")
jobArray <- getJobArray(x, FUN, ..., partitionIndeces = partitionIndeces, idPrefix = idPrefix, workingDir = workingDir, extraScriptLines = extraScriptLines, extraBashLines = extraBashLines, JobArrayPars = JobArrayPars, packages = packages, sources = sources)
# Submmiting and waitng for jobs
printTime("Submmiting parallel Jobs\n")
jobArray$submit()
submission <- tryCatch({
jobArray$wait(stopIfFailed = T)
}, interrupt = function(i) {
clean_interruption(jobArray, workingDir, idPrefix)
return (NULL)
})
if(is.null(submission))
return(invisible(NULL))
# Merging output jobs
printTime("Merging parellel results\n")
jobNames <- jobArray$getJobNames()
expectedOutFiles <- paste0(jobNames, ".outRData")
expectedOutVariables <- paste0("output_", jobNames)
supperApplyResults <- mergeListDir (expectedOutFiles, expectedOutVariables, workingDir)
printTime("Merge done\n")
# Removing jobs files if desired
if(clean) {
printTime("Cleaning partitioned data\n")
file.remove(list.files(workingDir, full.names = T, pattern = paste0(idPrefix, "*")))
printTime("Cleaning done\n")
}
return(supperApplyResults)
}
#' Helper of superApply
#'
#' Creates a list with slots, containing the start and end indeces
#' corresponding to the partitions of x required to run the number of parallel tasks
#'
#' Parsing x, is it vector, list? or is it number of repetitions (i.e. x is just a number)?
#' This just to calculate the number of times the FUN has to be executed
getPartitionIndeces <- function(x, tasks = tasks) {
if(!is.vector(x)){
x <- as.list(x)
times <- length(x)
}else{
if(length(x) == 1 & is.numeric(x)){ # This will make apply ignore x and will execute FUN x times with ... arguments.
times <- x # It requires a FUN that does nothing with its first argument
ignoreX <- TRUE
}else{
times <- length(x)
}
}
# Creates indexes to partition data for parallel processing
jobsPerTask <- ceiling(times/tasks)
iStart <- seq(1, times, jobsPerTask)
iEnd <- seq (jobsPerTask, times, jobsPerTask)
if(iEnd[length(iEnd)] < times)
iEnd <- c(iEnd,times)
# Returns partition indices
result <- list(iStart = iStart, iEnd = iEnd)
return(result)
}
#' Helper of superApply
#'
#' Submits multiple jobs from the partions of x created in get Partition Indeces
#'
#' @param x list/vector - data to be partition
#' @param FUN function - function to be applied to each element of x
#' @param ... - further arguments of FUN
#' @param idPrefix character - prefix for job names
#' @param partitionIndeces list - output of getPartitionIndeces()
#' @return a JobArray object
getJobArray<- function(x, FUN, ..., idPrefix, partitionIndeces, workingDir, extraScriptLines, extraBashLines, JobArrayPars, packages, sources) {
# Cleaning and or creating workind dir for submission
dir.create(workingDir, showWarnings = F, recursive = T)
# Making unique ids for each submission
idPrefix <- paste0(c(idPrefix, sample(letters, size=3), sample(0:9,size=1)), collapse = "")
system(paste0("rm ", file.path(workingDir, paste0(idPrefix, "*"))), ignore.stdout = T, ignore.stderr = T)
iStart <- partitionIndeces$iStart
iEnd <- partitionIndeces$iEnd
# Creating individual scripts for each submission
jobScripts <- createJobScriptsData(x, FUN = FUN, ..., idPrefix = idPrefix, iStart = iStart, iEnd = iEnd, workingDir = workingDir, extraScriptLines = extraScriptLines, extraBashLines = extraBashLines, packages = packages, sources = sources)
JobArrayPars <- c(list(commandList = jobScripts, jobName = idPrefix), JobArrayPars)
jobArray <- do.call(JobArray$new, JobArrayPars)
return(jobArray)
}
#' Helper of superApply
#'
#' Takes a vector/list x, a function FUN and extra paramaters (...) and creates a Rscript
#' that executes lappy in x using FUN. Scripts are saved in workingDir
#'
#' @param x - vector/list - data to which lapply will be executed
#' @param FUN - function - function to be applied to x
#' @param ... - extra paramaters passed to FUN
#' @param idPrefix character - prefix for job names
#' @param iStart numeric vector - start indeces where partitions were done on x
#' @param iEnd numeric vector - end indeces where partitions were done on x
createJobScriptsData <- function(x, FUN, ..., idPrefix, iStart, iEnd, workingDir, extraScriptLines = "", extraBashLines = "", packages = NULL, sources = NULL) {
cmds <- list()
FUN <- match.fun(FUN)
# Checking if I need to load current packages or if user-defined packages can be loaded
if(is.null(packages)) {
packages <- createStringFunction ("library", getUserPackages())
} else if (packages == "") {
packages <- packages
} else {
packages <- createStringFunction ("library", packages)
eval(parse(text = paste(packages, collapse = ";")))
}
# Checking if I can source user-defined paths
if(!is.null(sources)) {
tempEnv <- new.env()
sources <- paste0('"', sources, '"')
sourcesLocal <- createStringFunction ("source", paste(sources, ", chdir = T, local = tempEnv"))
sources <- createStringFunction ("source", paste(sources, ", chdir = T"))
eval(parse(text = paste(sourcesLocal, collapse = ";")))
rm(tempEnv)
}
for(i in 1:length(iStart)) {
id <- paste0(idPrefix, "_", i)
xCurrent <- x[iStart[i]:iEnd[i]]
flush.console()
# Setting file and var names
outDataFile <- file.path(workingDir, paste0(id, ".outRData"))
dataFile <- file.path(workingDir, paste0(id, ".applyRData"))
#Saving RData files used in script
pars <- list(...)
save(xCurrent, FUN, pars, list = getUserFunctions(), file = dataFile)
rm(xCurrent)
gc()
#Making script to be submmited
tempScript <- c(
extraScriptLines,
packages,
sources,
paste0("load('", dataFile, "')"),
paste0("output_", id, " <- do.call( lapply, c(list(X = xCurrent, FUN = FUN), pars))" ),
paste0("save(output_", id, ", file='", outDataFile, "')")
)
RscriptFile <- file.path(workingDir, paste0(id, ".Rscript"))
writeLines (tempScript, RscriptFile)
# Submitting job
if(!is.null(extraBashLines)) {
cmds <- c(cmds, list(c(extraBashLines, paste0("Rscript --vanilla ", RscriptFile))))
} else {
cmds <- c(cmds, list(c(paste0("Rscript --vanilla ", RscriptFile))))
}
}
return(cmds)
}
#' Helper of superApply
#'
#' merges the result of independ jobs after completion of parallel lapply executions
#' @param files character vector - files to be merged
#' @param varNames character vector - varnames associated to each file to be merged
#' @param workingDir character - working directory
mergeListDir <- function(files, varNames, workingDir){
finishedFiles <- files %in% list.files(workingDir)
if(!all(finishedFiles))
stop("Not all of the individual task's outputs were found. Uknown error")
files <- files [finishedFiles]
varNames <- varNames[finishedFiles]
finalF <- list()
for (i in 1:length(files)){
load(file.path(workingDir,files[i]))
finalF <- c(finalF, eval(parse(text = varNames[i])))
}
return(finalF)
}
#' Helper of superApply
#'
#' @return a character vector with the names of the functions in the global enviroment
getUserFunctions <- function() {
return(c(lsf.str(globalenv())))
}
#' Helper of superApply
#'
#' @return Returns a character vector with the names of the packages in the global enviroment
getUserPackages <- function() {
return(names(sessionInfo()$otherPkgs))
}
#' Helper of superApply
#'
#' Takes a function name and a character vector to be put inside
#' independent calls of the function.
#'
#'
#' @param fun character - the fucntion name as a string
#' @param inside character vector - the items to put inside function
#'
#' @return character vector - of the form [1]"fun(inside[1])" ... [n]"fun(inside[n]))" where n is the lengh of inside. If inside is NULL it returns an empty string
#'
#' @examples
#' createStringFunction("library", c("ggplot2", "dyplr"))
#' #[1] "library(ggplot2)" "library(dyplr)"
#'
#' createStringFunction("library")
#' #[1] ""
createStringFunction <- function(fun, inside = NULL) {
if(is.null(inside))
return("")
inside <- paste0("(", inside, ")")
return(paste0(fun, inside))
}
#' Helper of superApply
#'
#' Executes cleaning code after user sends cleaning signal (ctrl+c, ESC)
clean_interruption <- function(jobArray, workingDir, idPrefix) {
cat("\n")
printTime("Sent kill signal, preparing to clean up\n")
printTime("Cancelling jobs\n")
jobArray$cancel()
printTime("Waiting for jobs to be cancelled to proceed with file removal\n")
jobArray$wait(stopIfFailed = F, verbose = F)
printTime("Cleaning job Array files\n")
jobArray$clean()
printTime("Cleaning partitioned data\n")
file.remove(list.files(workingDir, full.names = T, pattern = paste0(idPrefix, "*")))
printTime("Cleaning done\n")
return(invisible(NULL))
}
|
#Interactions
library(lmtest)
testing
#Before lockdown
#sex:age
before.sexage.int <- glm(LGO_before ~ socialgrade + ethnicity
+ dogs + sex*age,
family = binomial, weights = weight, data=dta_label)
summary(before.sexage.int)
anova(lm.before.adj, before.sexage.int, test="Chi")
lrtest(lm.before.adj, before.sexage.int)
allEffects(before.sexage.int)
#sex:ethnicity
before.sexeth.int <- glm(LGO_before ~ socialgrade + age
+ dogs + sex*ethnicity,
family = binomial, weights = weight, data=dta_label)
summary(before.sexeth.int)
anova(lm.before.adj, before.sexeth.int, test="Chi")
allEffects(before.sexeth.int)
#sex:sg
before.sgsex.int <- glm(LGO_before ~ age + ethnicity
+ dogs + sex*socialgrade,
family = binomial, weights = weight, data=dta_label)
summary(before.sgsex.int)
anova(lm.before.adj, before.sgsex.int, test="Chi")
#sg:ethnicity
before.sgeth.int <- glm(LGO_before ~ sex + age
+ dogs + socialgrade*ethnicity,
family = binomial, weights = weight, data=dta_label)
summary(before.sgeth.int)
anova(lm.before.adj, before.sgeth.int, test="Chi")
#sg:age
before.sgage.int <- glm(LGO_before ~ sex + ethnicity
+ dogs + socialgrade*age,
family = binomial, weights = weight, data=dta_label)
summary(before.sgage.int)
anova(lm.before.adj, before.sgage.int, test="Chi")
#age:eth
before.ageeth.int <- glm(LGO_before ~ sex + socialgrade
+ dogs + age*ethnicity,
family = binomial, weights = weight, data=dta_label)
summary(before.ageeth.int)
anova(lm.before.adj, before.ageeth.int, test="Chi")
allEffects(before.ageeth.int)
#sex:dog owner
before.sexdog.int <- glm(LGO_before ~ age + ethnicity
+ socialgrade + sex*dogs,
family = binomial, weights = weight, data=dta_label)
summary(before.sexdog.int)
anova(lm.before.adj, before.sexdog.int, test="Chi")
#sg : dogs
before.sgdog.int <- glm(LGO_before ~ age + ethnicity
+ sex + socialgrade*dogs,
family = binomial, weights = weight, data=dta_label)
summary(before.sgdog.int)
anova(lm.before.adj, before.sgdog.int, test="Chi")
#age: dogs
before.agedog.int <- glm(LGO_before ~ socialgrade + ethnicity
+ sex + age*dogs,
family = binomial, weights = weight, data=dta_label)
summary(before.agedog.int)
anova(lm.before.adj, before.agedog.int, test="Chi")
#eth: dogs
before.ethdog.int <- glm(LGO_before ~ socialgrade + age
+ sex + ethnicity*dogs,
family = binomial, weights = weight, data=dta_label)
summary(before.ethdog.int)
anova(lm.before.adj, before.ethdog.int, test="Chi")
# After lockdown ***********************************************************
dta_label$LGO_after <- as.factor(dta_label$LGO_after)
class(dta_label$LGO_after)
# sex:sg
after.sexsg.int <- glm(LGO_after ~ age
+ ethnicity + dogs + sex*socialgrade,
family = binomial, weights = weight, data=dta_label)
summary(after.sexsg.int)
anova(lm.after.adj, after.sexsg.int, test="Chi")
#lrtest(lm.after.adj, after.sexsg.int)
# sex:age
after.sexage.int <- glm(LGO_after ~ socialgrade
+ ethnicity + dogs + sex*age,
family = binomial, weights = weight, data=dta_label)
summary(after.sexage.int)
anova(lm.after.adj, after.sexage.int , test="Chi")
#sex:ethnicity
after.sexeth.int <- glm(LGO_after ~ socialgrade + age
+ dogs + sex*ethnicity,
family = binomial, weights = weight, data=dta_label)
summary(after.sexeth.int)
anova(lm.after.adj, after.sexeth.int, test="Chi")
#sg:ethnicity
after.sgeth.int <- glm(LGO_after ~ sex + age
+ dogs + socialgrade*ethnicity,
family = binomial, weights = weight, data=dta_label)
summary(after.sgeth.int)
anova(lm.after.adj, after.sgeth.int, test="Chi")
allEffects(after.sgeth.int)
#sg:age
after.sgage.int <- glm(LGO_after ~ sex
+ ethnicity + dogs + socialgrade*age,
family = binomial, weights = weight, data=dta_label)
summary(after.sgage.int)
anova(lm.after.adj, after.sgage.int, test="Chi")
#lrtest(lm.after.adj, after.sgage.int)
#age:eth
after.ageeth.int <- glm(LGO_after ~ sex + socialgrade + dogs
+ age*ethnicity,
family = binomial, weights = weight, data=dta_label)
summary(after.ageeth.int)
anova(lm.after.adj, after.ageeth.int, test="Chi")
#sex:dog owner
after.sexdog.int <- glm(LGO_after ~ age + ethnicity
+ socialgrade + sex*dogs,
family = binomial, weights = weight, data=dta_label)
summary(after.sexdog.int)
anova(lm.after.adj, after.sexdog.int, test="Chi")
#sg : dogs
after.sgdog.int <- glm(LGO_after ~ age + ethnicity
+ sex + socialgrade*dogs,
family = binomial, weights = weight, data=dta_label)
summary(after.sgdog.int)
anova(lm.after.adj, after.sgdog.int, test="Chi")
#age: dogs
after.agedog.int <- glm(LGO_after ~ sex + ethnicity
+ socialgrade + age*dogs,
family = binomial, weights = weight, data=dta_label)
summary(after.agedog.int)
anova(lm.after.adj, after.agedog.int, test="Chi")
#eth: dogs
after.ethdog.int <- glm(LGO_after ~ age + sex
+ socialgrade + ethnicity*dogs,
family = binomial, weights = weight, data=dta_label)
summary(after.ethdog.int)
anova(lm.after.adj, after.ethdog.int, test="Chi")
#******************************************
#Increased after lockdown?
dta_label$visit_increase <- as.factor(dta_label$visit_increase)
class(dta_label$visit_increase)
# sex:sg
increase.sexsg.int <- glm(visit_increase ~ age
+ ethnicity + dogs + sex*socialgrade,
family = binomial, weights = weight, data=dta_label)
summary(increase.sexsg.int)
anova(lm.increase.adj, increase.sexsg.int, test="Chi")
# sex:age
increase.sexage.int <- glm(visit_increase ~ socialgrade
+ ethnicity + dogs + sex*age,
family = binomial, weights = weight, data=dta_label)
summary(increase.sexage.int)
anova(lm.increase.adj, increase.sexage.int, test="Chi")
#sex:ethnicity
increase.sexeth.int <- glm(visit_increase ~ socialgrade + age
+ dogs + sex*ethnicity,
family = binomial, weights = weight, data=dta_label)
summary(increase.sexeth.int)
anova(lm.increase.adj, increase.sexeth.int, test="Chi")
#sg:ethnicity
increase.sgeth.int <- glm(visit_increase ~ sex + age
+ dogs + socialgrade*ethnicity,
family = binomial, weights = weight, data=dta_label)
summary(increase.sgeth.int)
anova(lm.increase.adj, increase.sgeth.int, test="Chi")
confint(increase.sgeth.int, level=.95)
#sg:age
increase.sgage.int <- glm(visit_increase ~ sex
+ ethnicity + dogs + socialgrade*age,
family = binomial, weights = weight, data=dta_label)
summary(increase.sgage.int)
anova(lm.increase.adj, increase.sgage.int, test="Chi")
#age:eth
increase.ageeth.int <- glm(visit_increase ~ sex + socialgrade
+ dogs + age*ethnicity,
family = binomial, weights = weight,data=dta_label)
summary(increase.ageeth.int)
anova(lm.increase.adj, increase.ageeth.int, test="Chi")
table(dta_label$age)
#sex:dog owner
increase.sexdog.int <- glm(visit_increase ~ age + ethnicity
+ socialgrade + sex*dogs,
family = binomial, weights = weight, data=dta_label)
summary(increase.sexdog.int)
anova(lm.increase.adj, increase.sexdog.int, test="Chi")
# sg:dog
increase.sgdog.int <- glm(visit_increase ~ age + ethnicity
+ sex + socialgrade*dogs,
family = binomial, weights = weight, data=dta_label)
summary(increase.sgdog.int)
anova(lm.increase.adj, increase.sgdog.int, test="Chi")
# age:dog
increase.agedog.int <- glm(visit_increase ~ socialgrade + ethnicity
+ sex + age*dogs,
family = binomial, weights = weight, data=dta_label)
summary(increase.agedog.int)
anova(lm.increase.adj, increase.agedog.int, test="Chi")
#eth:dog
increase.ethdog.int <- glm(visit_increase ~ socialgrade + age
+ sex + ethnicity*dogs,
family = binomial, weights = weight, data=dta_label)
summary(increase.ethdog.int)
anova(lm.increase.adj, increase.ethdog.int, test="Chi")
# Decreased after lockdown ***********************************************************
# sex:sg
decr.sexsg.int <- glm(visit_decr ~ age
+ ethnicity + dogs + sex*socialgrade,
family = binomial, weights = weight, data=dta_label)
summary(decr.sexsg.int)
anova(lm.decrease.adj, decr.sexsg.int, test="Chi")
# sex:age
decr.sexage.int <- glm(visit_decr ~ socialgrade
+ ethnicity + dogs + sex*age,
family = binomial, weights = weight, data=dta_label)
summary(decr.sexage.int)
anova(lm.decrease.adj, decr.sexage.int, test="Chi")
#sex:ethnicity
decr.sexeth.int <- glm(visit_decr ~ + socialgrade + age
+ dogs + sex*ethnicity,
family = binomial, weights = weight, data=dta_label)
summary(decr.sexeth.int)
anova(lm.decrease.adj, decr.sexeth.int, test="Chi")
#sg:ethnicity
decr.sgeth.int <- glm(visit_decr ~ sex + age
+ dogs + socialgrade*ethnicity,
family = binomial, weights = weight, data=dta_label)
summary(decr.sgeth.int)
anova(lm.decrease.adj, decr.sgeth.int, test="Chi")
#sg:age
decr.sgage.int <- glm(visit_decr ~ sex
+ ethnicity + dogs + socialgrade*age,
family = binomial, weights = weight, data=dta_label)
summary(decr.sgage.int)
anova(lm.decrease.adj, decr.sgage.int, test="Chi")
#age:eth
decr.ageeth.int <- glm(visit_decr ~ sex + socialgrade +
dogs + age*ethnicity,
family = binomial, weights = weight, data=dta_label)
summary(decr.ageeth.int)
anova(lm.decrease.adj, decr.ageeth.int, test="Chi")
allEffects(decr.ageeth.int)
table(dta_label$age)
#sex:dog owner
decr.sexdog.int <- glm(visit_decr ~ age + ethnicity
+ socialgrade + sex*dogs,
family = binomial, weights = weight, data=dta_label)
summary(decr.sexdog.int)
anova(lm.decrease.adj, decr.sexdog.int, test="Chi")
# sg:dog
decr.sgdog.int <- glm(visit_decr ~ age + ethnicity
+ sex + socialgrade*dogs,
family = binomial, weights = weight, data=dta_label)
summary(decr.sgdog.int)
anova(lm.decrease.adj, decr.sgdog.int, test="Chi")
# age:dog
decr.agedog.int <- glm(visit_decr ~ socialgrade + ethnicity
+ sex + age*dogs,
family = binomial, weights = weight, data=dta_label)
summary(decr.agedog.int)
anova(lm.decrease.adj, decr.agedog.int, test="Chi")
#eth:dog
decr.ethdog.int <- glm(visit_decr ~ socialgrade + age
+ sex + ethnicity*dogs,
family = binomial, weights = weight, data=dta_label)
summary(decr.ethdog.int)
anova(lm.decrease.adj, decr.ethdog.int, test="Chi")
# MH benefit***********************************************************
# sex:sg
MH.sexsg.int <- glm(mentalhealth_agree ~ age
+ ethnicity + dogs + sex*socialgrade,
family = binomial, weights = weight, data=dta_label)
summary(MH.sexsg.int)
anova(lm.MHbenefit.adj, MH.sexsg.int, test="Chi")
# sex:age
MH.sexage.int <- glm(mentalhealth_agree ~ socialgrade
+ ethnicity + dogs + sex*age,
family = binomial, weights = weight, data=dta_label)
summary(MH.sexage.int)
anova(lm.MHbenefit.adj, MH.sexage.int, test="Chi")
#sex:ethnicity
MH.sexeth.int <- glm(mentalhealth_agree ~ socialgrade + age
+ dogs + sex*ethnicity,
family = binomial, weights = weight, data=dta_label)
summary(MH.sexeth.int)
anova(lm.MHbenefit.adj, MH.sexeth.int, test="Chi")
#sg:ethnicity
MH.sgeth.int <- glm(mentalhealth_agree ~ sex + age
+ dogs + socialgrade*ethnicity,
family = binomial, weights = weight, data=dta_label)
summary(MH.sgeth.int)
anova(lm.MHbenefit.adj, MH.sgeth.int, test="Chi")
#sg:age
MH.sgage.int <- glm(mentalhealth_agree ~ sex +
ethnicity + dogs + socialgrade*age,
family = binomial, weights = weight, data=dta_label)
summary(MH.sgage.int)
anova(lm.MHbenefit.adj, MH.sgage.int, test="Chi")
#age:eth
MH.ageeth.int <- glm(mentalhealth_agree ~ sex + socialgrade
+ dogs + age*ethnicity,
family = binomial, weights = weight, data=dta_label)
summary(MH.ageeth.int)
anova(lm.MHbenefit.adj, MH.ageeth.int, test="Chi")
table(dta_label$age)
#sex:dog owner
MH.sexdog.int <- glm(mentalhealth_agree ~ age + ethnicity
+ socialgrade + sex*dogs,
family = binomial, weights = weight, data=dta_label)
summary(MH.sexdog.int)
anova(lm.MHbenefit.adj, MH.sexdog.int, test="Chi")
# sg:dog
MH.sgdog.int <- glm(mentalhealth_agree ~ age + ethnicity
+ sex + socialgrade*dogs,
family = binomial, weights = weight, data=dta_label)
summary(MH.sgdog.int)
anova(lm.MHbenefit.adj, MH.sgdog.int, test="Chi")
# age:dog
MH.agedog.int <- glm(mentalhealth_agree ~ socialgrade + ethnicity
+ sex + age*dogs,
family = binomial, weights = weight, data=dta_label)
summary(MH.agedog.int)
anova(lm.MHbenefit.adj, MH.agedog.int, test="Chi")
#eth:dog
MH.ethdog.int <- glm(mentalhealth_agree ~ socialgrade + age
+ sex + ethnicity*dogs,
family = binomial, weights = weight, data=dta_label)
summary(MH.ethdog.int)
anova(lm.MHbenefit.adj, MH.ethdog.int, test="Chi")
# Social int***********************************************************
# sex:sg
SI.sexsg.int <- glm(miss_social_agree ~ age
+ ethnicity + dogs + sex*socialgrade,
family = binomial, weights = weight, data=dta_label)
summary(SI.sexsg.int)
anova(lm.socialint.adj, SI.sexsg.int, test="Chi")
# sex:age
SI.sexage.int <- glm(miss_social_agree ~ socialgrade
+ ethnicity + dogs + sex*age,
family = binomial, weights = weight, data=dta_label)
summary(SI.sexage.int)
anova(lm.socialint.adj, SI.sexage.int, test="Chi")
#sex:ethnicity
SI.sexeth.int <- glm(miss_social_agree ~ socialgrade + age
+ dogs + sex*ethnicity,
family = binomial, weights = weight, data=dta_label)
summary(SI.sexeth.int)
anova(lm.socialint.adj, SI.sexeth.int, test="Chi")
#sg:ethnicity
SI.sgeth.int <- glm(miss_social_agree ~ sex + age
+ dogs + socialgrade*ethnicity,
family = binomial, weights = weight, data=dta_label)
summary(SI.sgeth.int)
anova(lm.socialint.adj, SI.sgeth.int, test="Chi")
#sg:age
SI.sgage.int <- glm(miss_social_agree ~ sex +
ethnicity + dogs + socialgrade*age,
family = binomial, weights = weight, data=dta_label)
summary(SI.sgage.int)
anova(lm.socialint.adj, SI.sgage.int, test="Chi")
#age:eth
SI.ageeth.int <- glm(miss_social_agree ~ sex + socialgrade
+ dogs + age*ethnicity,
family = binomial, weights = weight, data=dta_label)
summary(SI.ageeth.int)
anova(lm.socialint.adj, SI.ageeth.int, test="Chi")
table(dta_label$age)
#sex:dog owner
SI.sexdog.int <- glm(miss_social_agree ~ age + ethnicity
+ socialgrade + sex*dogs,
family = binomial, weights = weight, data=dta_label)
summary(SI.sexdog.int)
anova(lm.socialint.adj, SI.sexdog.int, test="Chi")
# sg:dog
SI.sgdog.int <- glm(miss_social_agree ~ age + ethnicity
+ sex + socialgrade*dogs,
family = binomial, weights = weight, data=dta_label)
summary(SI.sgdog.int)
anova(lm.socialint.adj, SI.sgdog.int, test="Chi")
# age:dog
SI.agedog.int <- glm(miss_social_agree ~ socialgrade + ethnicity
+ sex + age*dogs,
family = binomial, weights = weight, data=dta_label)
summary(SI.agedog.int)
anova(lm.socialint.adj, SI.agedog.int, test="Chi")
#eth:dog
SI.ethdog.int <- glm(miss_social_agree ~ socialgrade + age
+ sex + ethnicity*dogs,
family = binomial, weights = weight, data=dta_label)
summary(SI.ethdog.int)
anova(lm.socialint.adj, SI.ethdog.int, test="Chi")
# Increase PA ***********************************************************
# sex*sg
PA.sexsg.int <- glm(increase_PA_agree ~ age
+ ethnicity + dogs + sex*socialgrade,
family = binomial, weights = weight, data=dta_label)
summary(PA.sexsg.int)
anova(lm.increasePA.adj, PA.sexsg.int, test="Chi")
# sex*age
PA.sexage.int <- glm(increase_PA_agree ~ socialgrade
+ ethnicity + dogs + sex*age,
family = binomial, weights = weight, data=dta_label)
summary(PA.sexage.int)
anova(lm.increasePA.adj, PA.sexage.int, test="Chi")
#sex*ethnicity
PA.sexeth.int <- glm(increase_PA_agree ~ socialgrade + age
+ dogs + sex*ethnicity,
family = binomial, weights = weight, data=dta_label)
summary(PA.sexeth.int)
anova(lm.increasePA.adj, PA.sexeth.int, test="Chi")
#sg*ethnicity
PA.sgeth.int <- glm(increase_PA_agree ~ sex + age
+ dogs + socialgrade*ethnicity,
family = binomial, weights = weight, data=dta_label)
summary(PA.sgeth.int)
anova(lm.increasePA.adj, PA.sgeth.int, test="Chi")
#sg*age
PA.sgage.int <- glm(increase_PA_agree ~ sex
+ ethnicity + dogs + socialgrade*age,
family = binomial, weights = weight, data=dta_label)
summary(PA.sgage.int)
anova(lm.increasePA.adj, PA.sgage.int, test="Chi")
#age*eth
PA.ageeth.int <- glm(increase_PA_agree ~ sex + socialgrade +
dogs + age*ethnicity,
family = binomial, weights = weight, data=dta_label)
summary(PA.ageeth.int)
anova(lm.increasePA.adj, PA.ageeth.int, test="Chi")
table(age)
#sex*dog owner
PA.sexdog.int <- glm(increase_PA_agree ~ age + ethnicity
+ socialgrade + sex*dogs,
family = binomial, weights = weight, data=dta_label)
summary(PA.sexdog.int)
anova(lm.increasePA.adj, PA.sexdog.int, test="Chi")
# sg*dog
PA.sgdog.int <- glm(increase_PA_agree ~ age + ethnicity
+ sex + socialgrade*dogs,
family = binomial, weights = weight, data=dta_label)
summary(PA.sgdog.int)
anova(lm.increasePA.adj, PA.sgdog.int, test="Chi")
# age*dog
PA.agedog.int <- glm(increase_PA_agree ~ socialgrade + ethnicity
+ sex + age*dogs,
family = binomial, weights = weight, data=dta_label)
summary(PA.agedog.int)
anova(lm.increasePA.adj, PA.agedog.int, test="Chi")
#eth*dog
PA.ethdog.int <- glm(increase_PA_agree ~ socialgrade + age
+ sex + ethnicity*dogs,
family = binomial, weights = weight, data=dta_label)
summary(PA.ethdog.int)
anova(lm.increasePA.adj, PA.ethdog.int, test="Chi")
|
/Interactions (log reg models).R
|
no_license
|
hannahburnett/greenspace-covid_code
|
R
| false
| false
| 19,971
|
r
|
#Interactions
library(lmtest)
testing
#Before lockdown
#sex:age
before.sexage.int <- glm(LGO_before ~ socialgrade + ethnicity
+ dogs + sex*age,
family = binomial, weights = weight, data=dta_label)
summary(before.sexage.int)
anova(lm.before.adj, before.sexage.int, test="Chi")
lrtest(lm.before.adj, before.sexage.int)
allEffects(before.sexage.int)
#sex:ethnicity
before.sexeth.int <- glm(LGO_before ~ socialgrade + age
+ dogs + sex*ethnicity,
family = binomial, weights = weight, data=dta_label)
summary(before.sexeth.int)
anova(lm.before.adj, before.sexeth.int, test="Chi")
allEffects(before.sexeth.int)
#sex:sg
before.sgsex.int <- glm(LGO_before ~ age + ethnicity
+ dogs + sex*socialgrade,
family = binomial, weights = weight, data=dta_label)
summary(before.sgsex.int)
anova(lm.before.adj, before.sgsex.int, test="Chi")
#sg:ethnicity
before.sgeth.int <- glm(LGO_before ~ sex + age
+ dogs + socialgrade*ethnicity,
family = binomial, weights = weight, data=dta_label)
summary(before.sgeth.int)
anova(lm.before.adj, before.sgeth.int, test="Chi")
#sg:age
before.sgage.int <- glm(LGO_before ~ sex + ethnicity
+ dogs + socialgrade*age,
family = binomial, weights = weight, data=dta_label)
summary(before.sgage.int)
anova(lm.before.adj, before.sgage.int, test="Chi")
#age:eth
before.ageeth.int <- glm(LGO_before ~ sex + socialgrade
+ dogs + age*ethnicity,
family = binomial, weights = weight, data=dta_label)
summary(before.ageeth.int)
anova(lm.before.adj, before.ageeth.int, test="Chi")
allEffects(before.ageeth.int)
#sex:dog owner
before.sexdog.int <- glm(LGO_before ~ age + ethnicity
+ socialgrade + sex*dogs,
family = binomial, weights = weight, data=dta_label)
summary(before.sexdog.int)
anova(lm.before.adj, before.sexdog.int, test="Chi")
#sg : dogs
before.sgdog.int <- glm(LGO_before ~ age + ethnicity
+ sex + socialgrade*dogs,
family = binomial, weights = weight, data=dta_label)
summary(before.sgdog.int)
anova(lm.before.adj, before.sgdog.int, test="Chi")
#age: dogs
before.agedog.int <- glm(LGO_before ~ socialgrade + ethnicity
+ sex + age*dogs,
family = binomial, weights = weight, data=dta_label)
summary(before.agedog.int)
anova(lm.before.adj, before.agedog.int, test="Chi")
#eth: dogs
before.ethdog.int <- glm(LGO_before ~ socialgrade + age
+ sex + ethnicity*dogs,
family = binomial, weights = weight, data=dta_label)
summary(before.ethdog.int)
anova(lm.before.adj, before.ethdog.int, test="Chi")
# After lockdown ***********************************************************
dta_label$LGO_after <- as.factor(dta_label$LGO_after)
class(dta_label$LGO_after)
# sex:sg
after.sexsg.int <- glm(LGO_after ~ age
+ ethnicity + dogs + sex*socialgrade,
family = binomial, weights = weight, data=dta_label)
summary(after.sexsg.int)
anova(lm.after.adj, after.sexsg.int, test="Chi")
#lrtest(lm.after.adj, after.sexsg.int)
# sex:age
after.sexage.int <- glm(LGO_after ~ socialgrade
+ ethnicity + dogs + sex*age,
family = binomial, weights = weight, data=dta_label)
summary(after.sexage.int)
anova(lm.after.adj, after.sexage.int , test="Chi")
#sex:ethnicity
after.sexeth.int <- glm(LGO_after ~ socialgrade + age
+ dogs + sex*ethnicity,
family = binomial, weights = weight, data=dta_label)
summary(after.sexeth.int)
anova(lm.after.adj, after.sexeth.int, test="Chi")
#sg:ethnicity
after.sgeth.int <- glm(LGO_after ~ sex + age
+ dogs + socialgrade*ethnicity,
family = binomial, weights = weight, data=dta_label)
summary(after.sgeth.int)
anova(lm.after.adj, after.sgeth.int, test="Chi")
allEffects(after.sgeth.int)
#sg:age
after.sgage.int <- glm(LGO_after ~ sex
+ ethnicity + dogs + socialgrade*age,
family = binomial, weights = weight, data=dta_label)
summary(after.sgage.int)
anova(lm.after.adj, after.sgage.int, test="Chi")
#lrtest(lm.after.adj, after.sgage.int)
#age:eth
after.ageeth.int <- glm(LGO_after ~ sex + socialgrade + dogs
+ age*ethnicity,
family = binomial, weights = weight, data=dta_label)
summary(after.ageeth.int)
anova(lm.after.adj, after.ageeth.int, test="Chi")
#sex:dog owner
after.sexdog.int <- glm(LGO_after ~ age + ethnicity
+ socialgrade + sex*dogs,
family = binomial, weights = weight, data=dta_label)
summary(after.sexdog.int)
anova(lm.after.adj, after.sexdog.int, test="Chi")
#sg : dogs
after.sgdog.int <- glm(LGO_after ~ age + ethnicity
+ sex + socialgrade*dogs,
family = binomial, weights = weight, data=dta_label)
summary(after.sgdog.int)
anova(lm.after.adj, after.sgdog.int, test="Chi")
#age: dogs
after.agedog.int <- glm(LGO_after ~ sex + ethnicity
+ socialgrade + age*dogs,
family = binomial, weights = weight, data=dta_label)
summary(after.agedog.int)
anova(lm.after.adj, after.agedog.int, test="Chi")
#eth: dogs
after.ethdog.int <- glm(LGO_after ~ age + sex
+ socialgrade + ethnicity*dogs,
family = binomial, weights = weight, data=dta_label)
summary(after.ethdog.int)
anova(lm.after.adj, after.ethdog.int, test="Chi")
#******************************************
#Increased after lockdown?
dta_label$visit_increase <- as.factor(dta_label$visit_increase)
class(dta_label$visit_increase)
# sex:sg
increase.sexsg.int <- glm(visit_increase ~ age
+ ethnicity + dogs + sex*socialgrade,
family = binomial, weights = weight, data=dta_label)
summary(increase.sexsg.int)
anova(lm.increase.adj, increase.sexsg.int, test="Chi")
# sex:age
increase.sexage.int <- glm(visit_increase ~ socialgrade
+ ethnicity + dogs + sex*age,
family = binomial, weights = weight, data=dta_label)
summary(increase.sexage.int)
anova(lm.increase.adj, increase.sexage.int, test="Chi")
#sex:ethnicity
increase.sexeth.int <- glm(visit_increase ~ socialgrade + age
+ dogs + sex*ethnicity,
family = binomial, weights = weight, data=dta_label)
summary(increase.sexeth.int)
anova(lm.increase.adj, increase.sexeth.int, test="Chi")
#sg:ethnicity
increase.sgeth.int <- glm(visit_increase ~ sex + age
+ dogs + socialgrade*ethnicity,
family = binomial, weights = weight, data=dta_label)
summary(increase.sgeth.int)
anova(lm.increase.adj, increase.sgeth.int, test="Chi")
confint(increase.sgeth.int, level=.95)
#sg:age
increase.sgage.int <- glm(visit_increase ~ sex
+ ethnicity + dogs + socialgrade*age,
family = binomial, weights = weight, data=dta_label)
summary(increase.sgage.int)
anova(lm.increase.adj, increase.sgage.int, test="Chi")
#age:eth
increase.ageeth.int <- glm(visit_increase ~ sex + socialgrade
+ dogs + age*ethnicity,
family = binomial, weights = weight,data=dta_label)
summary(increase.ageeth.int)
anova(lm.increase.adj, increase.ageeth.int, test="Chi")
table(dta_label$age)
#sex:dog owner
increase.sexdog.int <- glm(visit_increase ~ age + ethnicity
+ socialgrade + sex*dogs,
family = binomial, weights = weight, data=dta_label)
summary(increase.sexdog.int)
anova(lm.increase.adj, increase.sexdog.int, test="Chi")
# sg:dog
increase.sgdog.int <- glm(visit_increase ~ age + ethnicity
+ sex + socialgrade*dogs,
family = binomial, weights = weight, data=dta_label)
summary(increase.sgdog.int)
anova(lm.increase.adj, increase.sgdog.int, test="Chi")
# age:dog
increase.agedog.int <- glm(visit_increase ~ socialgrade + ethnicity
+ sex + age*dogs,
family = binomial, weights = weight, data=dta_label)
summary(increase.agedog.int)
anova(lm.increase.adj, increase.agedog.int, test="Chi")
#eth:dog
increase.ethdog.int <- glm(visit_increase ~ socialgrade + age
+ sex + ethnicity*dogs,
family = binomial, weights = weight, data=dta_label)
summary(increase.ethdog.int)
anova(lm.increase.adj, increase.ethdog.int, test="Chi")
# Decreased after lockdown ***********************************************************
# sex:sg
decr.sexsg.int <- glm(visit_decr ~ age
+ ethnicity + dogs + sex*socialgrade,
family = binomial, weights = weight, data=dta_label)
summary(decr.sexsg.int)
anova(lm.decrease.adj, decr.sexsg.int, test="Chi")
# sex:age
decr.sexage.int <- glm(visit_decr ~ socialgrade
+ ethnicity + dogs + sex*age,
family = binomial, weights = weight, data=dta_label)
summary(decr.sexage.int)
anova(lm.decrease.adj, decr.sexage.int, test="Chi")
#sex:ethnicity
decr.sexeth.int <- glm(visit_decr ~ + socialgrade + age
+ dogs + sex*ethnicity,
family = binomial, weights = weight, data=dta_label)
summary(decr.sexeth.int)
anova(lm.decrease.adj, decr.sexeth.int, test="Chi")
#sg:ethnicity
decr.sgeth.int <- glm(visit_decr ~ sex + age
+ dogs + socialgrade*ethnicity,
family = binomial, weights = weight, data=dta_label)
summary(decr.sgeth.int)
anova(lm.decrease.adj, decr.sgeth.int, test="Chi")
#sg:age
decr.sgage.int <- glm(visit_decr ~ sex
+ ethnicity + dogs + socialgrade*age,
family = binomial, weights = weight, data=dta_label)
summary(decr.sgage.int)
anova(lm.decrease.adj, decr.sgage.int, test="Chi")
#age:eth
decr.ageeth.int <- glm(visit_decr ~ sex + socialgrade +
dogs + age*ethnicity,
family = binomial, weights = weight, data=dta_label)
summary(decr.ageeth.int)
anova(lm.decrease.adj, decr.ageeth.int, test="Chi")
allEffects(decr.ageeth.int)
table(dta_label$age)
#sex:dog owner
decr.sexdog.int <- glm(visit_decr ~ age + ethnicity
+ socialgrade + sex*dogs,
family = binomial, weights = weight, data=dta_label)
summary(decr.sexdog.int)
anova(lm.decrease.adj, decr.sexdog.int, test="Chi")
# sg:dog
decr.sgdog.int <- glm(visit_decr ~ age + ethnicity
+ sex + socialgrade*dogs,
family = binomial, weights = weight, data=dta_label)
summary(decr.sgdog.int)
anova(lm.decrease.adj, decr.sgdog.int, test="Chi")
# age:dog
decr.agedog.int <- glm(visit_decr ~ socialgrade + ethnicity
+ sex + age*dogs,
family = binomial, weights = weight, data=dta_label)
summary(decr.agedog.int)
anova(lm.decrease.adj, decr.agedog.int, test="Chi")
#eth:dog
decr.ethdog.int <- glm(visit_decr ~ socialgrade + age
+ sex + ethnicity*dogs,
family = binomial, weights = weight, data=dta_label)
summary(decr.ethdog.int)
anova(lm.decrease.adj, decr.ethdog.int, test="Chi")
# MH benefit***********************************************************
# sex:sg
MH.sexsg.int <- glm(mentalhealth_agree ~ age
+ ethnicity + dogs + sex*socialgrade,
family = binomial, weights = weight, data=dta_label)
summary(MH.sexsg.int)
anova(lm.MHbenefit.adj, MH.sexsg.int, test="Chi")
# sex:age
MH.sexage.int <- glm(mentalhealth_agree ~ socialgrade
+ ethnicity + dogs + sex*age,
family = binomial, weights = weight, data=dta_label)
summary(MH.sexage.int)
anova(lm.MHbenefit.adj, MH.sexage.int, test="Chi")
#sex:ethnicity
MH.sexeth.int <- glm(mentalhealth_agree ~ socialgrade + age
+ dogs + sex*ethnicity,
family = binomial, weights = weight, data=dta_label)
summary(MH.sexeth.int)
anova(lm.MHbenefit.adj, MH.sexeth.int, test="Chi")
#sg:ethnicity
MH.sgeth.int <- glm(mentalhealth_agree ~ sex + age
+ dogs + socialgrade*ethnicity,
family = binomial, weights = weight, data=dta_label)
summary(MH.sgeth.int)
anova(lm.MHbenefit.adj, MH.sgeth.int, test="Chi")
#sg:age
MH.sgage.int <- glm(mentalhealth_agree ~ sex +
ethnicity + dogs + socialgrade*age,
family = binomial, weights = weight, data=dta_label)
summary(MH.sgage.int)
anova(lm.MHbenefit.adj, MH.sgage.int, test="Chi")
#age:eth
MH.ageeth.int <- glm(mentalhealth_agree ~ sex + socialgrade
+ dogs + age*ethnicity,
family = binomial, weights = weight, data=dta_label)
summary(MH.ageeth.int)
anova(lm.MHbenefit.adj, MH.ageeth.int, test="Chi")
table(dta_label$age)
#sex:dog owner
MH.sexdog.int <- glm(mentalhealth_agree ~ age + ethnicity
+ socialgrade + sex*dogs,
family = binomial, weights = weight, data=dta_label)
summary(MH.sexdog.int)
anova(lm.MHbenefit.adj, MH.sexdog.int, test="Chi")
# sg:dog
MH.sgdog.int <- glm(mentalhealth_agree ~ age + ethnicity
+ sex + socialgrade*dogs,
family = binomial, weights = weight, data=dta_label)
summary(MH.sgdog.int)
anova(lm.MHbenefit.adj, MH.sgdog.int, test="Chi")
# age:dog
MH.agedog.int <- glm(mentalhealth_agree ~ socialgrade + ethnicity
+ sex + age*dogs,
family = binomial, weights = weight, data=dta_label)
summary(MH.agedog.int)
anova(lm.MHbenefit.adj, MH.agedog.int, test="Chi")
#eth:dog
MH.ethdog.int <- glm(mentalhealth_agree ~ socialgrade + age
+ sex + ethnicity*dogs,
family = binomial, weights = weight, data=dta_label)
summary(MH.ethdog.int)
anova(lm.MHbenefit.adj, MH.ethdog.int, test="Chi")
# Social int***********************************************************
# sex:sg
SI.sexsg.int <- glm(miss_social_agree ~ age
+ ethnicity + dogs + sex*socialgrade,
family = binomial, weights = weight, data=dta_label)
summary(SI.sexsg.int)
anova(lm.socialint.adj, SI.sexsg.int, test="Chi")
# sex:age
SI.sexage.int <- glm(miss_social_agree ~ socialgrade
+ ethnicity + dogs + sex*age,
family = binomial, weights = weight, data=dta_label)
summary(SI.sexage.int)
anova(lm.socialint.adj, SI.sexage.int, test="Chi")
#sex:ethnicity
SI.sexeth.int <- glm(miss_social_agree ~ socialgrade + age
+ dogs + sex*ethnicity,
family = binomial, weights = weight, data=dta_label)
summary(SI.sexeth.int)
anova(lm.socialint.adj, SI.sexeth.int, test="Chi")
#sg:ethnicity
SI.sgeth.int <- glm(miss_social_agree ~ sex + age
+ dogs + socialgrade*ethnicity,
family = binomial, weights = weight, data=dta_label)
summary(SI.sgeth.int)
anova(lm.socialint.adj, SI.sgeth.int, test="Chi")
#sg:age
SI.sgage.int <- glm(miss_social_agree ~ sex +
ethnicity + dogs + socialgrade*age,
family = binomial, weights = weight, data=dta_label)
summary(SI.sgage.int)
anova(lm.socialint.adj, SI.sgage.int, test="Chi")
#age:eth
SI.ageeth.int <- glm(miss_social_agree ~ sex + socialgrade
+ dogs + age*ethnicity,
family = binomial, weights = weight, data=dta_label)
summary(SI.ageeth.int)
anova(lm.socialint.adj, SI.ageeth.int, test="Chi")
table(dta_label$age)
#sex:dog owner
SI.sexdog.int <- glm(miss_social_agree ~ age + ethnicity
+ socialgrade + sex*dogs,
family = binomial, weights = weight, data=dta_label)
summary(SI.sexdog.int)
anova(lm.socialint.adj, SI.sexdog.int, test="Chi")
# sg:dog
SI.sgdog.int <- glm(miss_social_agree ~ age + ethnicity
+ sex + socialgrade*dogs,
family = binomial, weights = weight, data=dta_label)
summary(SI.sgdog.int)
anova(lm.socialint.adj, SI.sgdog.int, test="Chi")
# age:dog
SI.agedog.int <- glm(miss_social_agree ~ socialgrade + ethnicity
+ sex + age*dogs,
family = binomial, weights = weight, data=dta_label)
summary(SI.agedog.int)
anova(lm.socialint.adj, SI.agedog.int, test="Chi")
#eth:dog
SI.ethdog.int <- glm(miss_social_agree ~ socialgrade + age
+ sex + ethnicity*dogs,
family = binomial, weights = weight, data=dta_label)
summary(SI.ethdog.int)
anova(lm.socialint.adj, SI.ethdog.int, test="Chi")
# Increase PA ***********************************************************
# sex*sg
PA.sexsg.int <- glm(increase_PA_agree ~ age
+ ethnicity + dogs + sex*socialgrade,
family = binomial, weights = weight, data=dta_label)
summary(PA.sexsg.int)
anova(lm.increasePA.adj, PA.sexsg.int, test="Chi")
# sex*age
PA.sexage.int <- glm(increase_PA_agree ~ socialgrade
+ ethnicity + dogs + sex*age,
family = binomial, weights = weight, data=dta_label)
summary(PA.sexage.int)
anova(lm.increasePA.adj, PA.sexage.int, test="Chi")
#sex*ethnicity
PA.sexeth.int <- glm(increase_PA_agree ~ socialgrade + age
+ dogs + sex*ethnicity,
family = binomial, weights = weight, data=dta_label)
summary(PA.sexeth.int)
anova(lm.increasePA.adj, PA.sexeth.int, test="Chi")
#sg*ethnicity
PA.sgeth.int <- glm(increase_PA_agree ~ sex + age
+ dogs + socialgrade*ethnicity,
family = binomial, weights = weight, data=dta_label)
summary(PA.sgeth.int)
anova(lm.increasePA.adj, PA.sgeth.int, test="Chi")
#sg*age
PA.sgage.int <- glm(increase_PA_agree ~ sex
+ ethnicity + dogs + socialgrade*age,
family = binomial, weights = weight, data=dta_label)
summary(PA.sgage.int)
anova(lm.increasePA.adj, PA.sgage.int, test="Chi")
#age*eth
PA.ageeth.int <- glm(increase_PA_agree ~ sex + socialgrade +
dogs + age*ethnicity,
family = binomial, weights = weight, data=dta_label)
summary(PA.ageeth.int)
anova(lm.increasePA.adj, PA.ageeth.int, test="Chi")
table(age)
#sex*dog owner
PA.sexdog.int <- glm(increase_PA_agree ~ age + ethnicity
+ socialgrade + sex*dogs,
family = binomial, weights = weight, data=dta_label)
summary(PA.sexdog.int)
anova(lm.increasePA.adj, PA.sexdog.int, test="Chi")
# sg*dog
PA.sgdog.int <- glm(increase_PA_agree ~ age + ethnicity
+ sex + socialgrade*dogs,
family = binomial, weights = weight, data=dta_label)
summary(PA.sgdog.int)
anova(lm.increasePA.adj, PA.sgdog.int, test="Chi")
# age*dog
PA.agedog.int <- glm(increase_PA_agree ~ socialgrade + ethnicity
+ sex + age*dogs,
family = binomial, weights = weight, data=dta_label)
summary(PA.agedog.int)
anova(lm.increasePA.adj, PA.agedog.int, test="Chi")
#eth*dog
PA.ethdog.int <- glm(increase_PA_agree ~ socialgrade + age
+ sex + ethnicity*dogs,
family = binomial, weights = weight, data=dta_label)
summary(PA.ethdog.int)
anova(lm.increasePA.adj, PA.ethdog.int, test="Chi")
|
#' Poisson regression via stan
#'
#' [rstanarm::stan_glm()] uses Bayesian estimation to fit a model for
#' count data.
#'
#' @includeRmd man/rmd/poisson_reg_stan.md details
#'
#' @name details_poisson_reg_stan
#' @keywords internal
NULL
# See inst/README-DOCS.md for a description of how these files are processed
|
/R/poisson_reg_stan.R
|
permissive
|
tidymodels/parsnip
|
R
| false
| false
| 315
|
r
|
#' Poisson regression via stan
#'
#' [rstanarm::stan_glm()] uses Bayesian estimation to fit a model for
#' count data.
#'
#' @includeRmd man/rmd/poisson_reg_stan.md details
#'
#' @name details_poisson_reg_stan
#' @keywords internal
NULL
# See inst/README-DOCS.md for a description of how these files are processed
|
\name{schools}
\alias{schools}
\docType{data}
\title{
%% ~~ data name/kind ... ~~
Schools data set (NELS-88)
}
\description{
%% ~~ A concise (1-5 lines) description of the dataset. ~~
Data set used by Kreft and De Leeuw in their book \emph{Introducing Multilevel Modeling, Sage (1988)} to analyse the relationship between math score and time spent by students to do math homework.
The data set is a subsample of NELS-88 data consisting of 10 handpicked schools from the 1003 schools in the full data set. Students are nested within schools and information is available both at the school and student level.
}
\usage{data("schools")}
\format{
A data frame with 260 observations on the following 19 variables.
\describe{
\item{\code{schid}}{School ID: a numeric vector identyfing each school.}
\item{\code{stuid}}{The student ID.}
\item{\code{ses}}{Socioeconomic status.}
\item{\code{meanses}}{Mean ses for the school.}
\item{\code{homework}}{The number of hours spent weekly doing homeworks.}
\item{\code{white}}{A dummy for white race (=1) versus non-white (=0).}
\item{\code{parented}}{Parents highest education level.}
\item{\code{public}}{Public school: 1=public, 0=non public.}
\item{\code{ratio}}{Student-teacher ratio.}
\item{\code{percmin}}{Percent minority in school.}
\item{\code{math}}{Math score}
\item{\code{sex}}{Sex: 1=male, 2=female.}
\item{\code{race}}{Race of student, 1=asian, 2=Hispanic, 3=Black, 4=White, 5=Native American.}
\item{\code{sctype}}{Type of school: 1=public, 2=catholic, 3= Private other religion, 4=Private non-r.}
\item{\code{cstr}}{Classroom environment structure: ordinal from 1=not accurate to 5=very much accurate.}
\item{\code{scsize}}{School size: ordinal from 1=[1,199) to 7=[1200+).}
\item{\code{urban}}{Urbanicity: 1=Urban, 2=Suburban, 3=Rural.}
\item{\code{region}}{Geographic region of the school: NE=1,NC=2,South=3,West=4.}
\item{\code{schnum}}{Standardized school ID.}
}
}
% \details{
% %% ~~ If necessary, more details than the __description__ above ~~
% }
\source{
%% ~~ reference to a publication or URL from which the data were obtained ~~
Ita G G Kreft, Jan De Leeuw 1988. Introducing Multilevel Modeling, Sage
%http://gifi.stat.ucla.edu/janspubs/1998/books/kreft_deleeuw_B_98.pdf
National Education Longitudinal Study of 1988 (NELS:88): https://nces.ed.gov/surveys/nels88/
}
% \references{
% %% ~~ possibly secondary sources and usages ~~
% }
\examples{
data(schools)
# Kreft and De Leeuw, Introducing Multilevel Modeling, Sage (1988).
# The data set is the subsample of NELS-88 data consisting of 10 handpicked schools
# from the 1003 schools in the full data set.
# Suppose that the effect of homeworks on math score is unconfounded conditional on X and
# unobserved school features (we assume this only for illustrative purpouse)
# Let us consider the following variables:
X<-schools$ses #X<-as.matrix(schools[,c("ses","white","public")])
Y<-schools$math
Tr<-ifelse(schools$homework>1,1,0)
Group<-schools$schid
# Note that when Group is missing, NULL or there is only one Group the function
# returns the output of the Match function with a warning.
# Let us assume that the effect of homeworks (Tr) on math score (Y)
# is unconfounded conditional on X and other unobserved schools features.
# Several strategies to handle unobserved group characteristics
# are described in Arpino & Cannas, 2016 (see References).
# Multivariate Matching on covariates in X
#(default parameters: one-to-one matching on X with replacement with a caliper of 0.25).
### Matching within schools
mw<-MatchW(Y=Y, Tr=Tr, X=X, Group=Group, caliper=0.1)
# compare balance before and after matching
bmw <- MatchBalance(Tr~X,data=schools,match.out=mw)
# calculate proportion of matched observations
(mw$orig.treated.nobs-mw$ndrops)/mw$orig.treated.nobs
# check number of drops by school
mw$orig.ndrops.by.group
# examine output
mw # complete list of results
summary(mw) # basic statistics
#### Propensity score matching
# estimate the propensity score (ps) model
mod <- glm(Tr~ses+parented+public+sex+race+urban,
family=binomial(link="logit"),data=schools)
eps <- fitted(mod)
# eg 1: within-school propensity score matching
psmw <- MatchW(Y=schools$math, Tr=Tr, X=eps, Group=schools$schid, caliper=0.1)
# We can use other strategies for controlling unobserved cluster covariates
# by using different specifications of ps (see Arpino and Mealli for details):
# eg 2: standard propensity score matching using ps estimated
# from a logit model with dummies for schools
mod <- glm(Tr ~ ses + parented + public + sex + race + urban
+schid - 1,family=binomial(link="logit"),data=schools)
eps <- fitted(mod)
dpsm <- MatchW(Y=schools$math, Tr=Tr, X=eps, caliper=0.1)
# this is equivalent to run Match with X=eps
# eg3: standard propensity score matching using ps estimated from
# multilevel logit model (random intercept at the school level)
require(lme4)
mod<-glmer(Tr ~ ses + parented + public + sex + race + urban + (1|schid),
family=binomial(link="logit"), data=schools)
eps <- fitted(mod)
mpsm<-MatchW(Y=schools$math, Tr=Tr, X=eps, Group=NULL, caliper=0.1)
# this is equivalent to run Match with X=eps
}
\keyword{school dataset (NELS-88)}
|
/man/schools.Rd
|
no_license
|
gintian/CMatching
|
R
| false
| false
| 5,350
|
rd
|
\name{schools}
\alias{schools}
\docType{data}
\title{
%% ~~ data name/kind ... ~~
Schools data set (NELS-88)
}
\description{
%% ~~ A concise (1-5 lines) description of the dataset. ~~
Data set used by Kreft and De Leeuw in their book \emph{Introducing Multilevel Modeling, Sage (1988)} to analyse the relationship between math score and time spent by students to do math homework.
The data set is a subsample of NELS-88 data consisting of 10 handpicked schools from the 1003 schools in the full data set. Students are nested within schools and information is available both at the school and student level.
}
\usage{data("schools")}
\format{
A data frame with 260 observations on the following 19 variables.
\describe{
\item{\code{schid}}{School ID: a numeric vector identyfing each school.}
\item{\code{stuid}}{The student ID.}
\item{\code{ses}}{Socioeconomic status.}
\item{\code{meanses}}{Mean ses for the school.}
\item{\code{homework}}{The number of hours spent weekly doing homeworks.}
\item{\code{white}}{A dummy for white race (=1) versus non-white (=0).}
\item{\code{parented}}{Parents highest education level.}
\item{\code{public}}{Public school: 1=public, 0=non public.}
\item{\code{ratio}}{Student-teacher ratio.}
\item{\code{percmin}}{Percent minority in school.}
\item{\code{math}}{Math score}
\item{\code{sex}}{Sex: 1=male, 2=female.}
\item{\code{race}}{Race of student, 1=asian, 2=Hispanic, 3=Black, 4=White, 5=Native American.}
\item{\code{sctype}}{Type of school: 1=public, 2=catholic, 3= Private other religion, 4=Private non-r.}
\item{\code{cstr}}{Classroom environment structure: ordinal from 1=not accurate to 5=very much accurate.}
\item{\code{scsize}}{School size: ordinal from 1=[1,199) to 7=[1200+).}
\item{\code{urban}}{Urbanicity: 1=Urban, 2=Suburban, 3=Rural.}
\item{\code{region}}{Geographic region of the school: NE=1,NC=2,South=3,West=4.}
\item{\code{schnum}}{Standardized school ID.}
}
}
% \details{
% %% ~~ If necessary, more details than the __description__ above ~~
% }
\source{
%% ~~ reference to a publication or URL from which the data were obtained ~~
Ita G G Kreft, Jan De Leeuw 1988. Introducing Multilevel Modeling, Sage
%http://gifi.stat.ucla.edu/janspubs/1998/books/kreft_deleeuw_B_98.pdf
National Education Longitudinal Study of 1988 (NELS:88): https://nces.ed.gov/surveys/nels88/
}
% \references{
% %% ~~ possibly secondary sources and usages ~~
% }
\examples{
data(schools)
# Kreft and De Leeuw, Introducing Multilevel Modeling, Sage (1988).
# The data set is the subsample of NELS-88 data consisting of 10 handpicked schools
# from the 1003 schools in the full data set.
# Suppose that the effect of homeworks on math score is unconfounded conditional on X and
# unobserved school features (we assume this only for illustrative purpouse)
# Let us consider the following variables:
X<-schools$ses #X<-as.matrix(schools[,c("ses","white","public")])
Y<-schools$math
Tr<-ifelse(schools$homework>1,1,0)
Group<-schools$schid
# Note that when Group is missing, NULL or there is only one Group the function
# returns the output of the Match function with a warning.
# Let us assume that the effect of homeworks (Tr) on math score (Y)
# is unconfounded conditional on X and other unobserved schools features.
# Several strategies to handle unobserved group characteristics
# are described in Arpino & Cannas, 2016 (see References).
# Multivariate Matching on covariates in X
#(default parameters: one-to-one matching on X with replacement with a caliper of 0.25).
### Matching within schools
mw<-MatchW(Y=Y, Tr=Tr, X=X, Group=Group, caliper=0.1)
# compare balance before and after matching
bmw <- MatchBalance(Tr~X,data=schools,match.out=mw)
# calculate proportion of matched observations
(mw$orig.treated.nobs-mw$ndrops)/mw$orig.treated.nobs
# check number of drops by school
mw$orig.ndrops.by.group
# examine output
mw # complete list of results
summary(mw) # basic statistics
#### Propensity score matching
# estimate the propensity score (ps) model
mod <- glm(Tr~ses+parented+public+sex+race+urban,
family=binomial(link="logit"),data=schools)
eps <- fitted(mod)
# eg 1: within-school propensity score matching
psmw <- MatchW(Y=schools$math, Tr=Tr, X=eps, Group=schools$schid, caliper=0.1)
# We can use other strategies for controlling unobserved cluster covariates
# by using different specifications of ps (see Arpino and Mealli for details):
# eg 2: standard propensity score matching using ps estimated
# from a logit model with dummies for schools
mod <- glm(Tr ~ ses + parented + public + sex + race + urban
+schid - 1,family=binomial(link="logit"),data=schools)
eps <- fitted(mod)
dpsm <- MatchW(Y=schools$math, Tr=Tr, X=eps, caliper=0.1)
# this is equivalent to run Match with X=eps
# eg3: standard propensity score matching using ps estimated from
# multilevel logit model (random intercept at the school level)
require(lme4)
mod<-glmer(Tr ~ ses + parented + public + sex + race + urban + (1|schid),
family=binomial(link="logit"), data=schools)
eps <- fitted(mod)
mpsm<-MatchW(Y=schools$math, Tr=Tr, X=eps, Group=NULL, caliper=0.1)
# this is equivalent to run Match with X=eps
}
\keyword{school dataset (NELS-88)}
|
###########################################################
## Additional utilities for SYSargs and SYSargs2 objects ##
###########################################################
######################################################
## Convenience write function for targetsout(args) ##
######################################################
writeTargetsout <- function (x, file = "default", silent = FALSE, overwrite = FALSE, step = NULL, new_col=NULL, new_col_output_index=NULL, ...) {
if(all(class(x) != "SYSargs" & class(x) != "SYSargs2")) stop("Argument 'x' needs to be assigned an object of class 'SYSargs' OR 'SYSargs2")
## SYSargs class
if(class(x) == "SYSargs") {
targets <- targetsout(x)
software <- software(x)
if(file == "default") {
file <- paste("targets_", software, ".txt", sep = "")
file <- gsub(" {1,}", "_", file)
} else {
file <- file
}
headerlines <- targetsheader(x)
## SYSargs2 class
} else if(class(x) == "SYSargs2") {
if(is.null(step))
stop(paste("Argument 'step' needs to be assigned one of the following values:",
paste(names(x$clt), collapse = ", "), "OR the corresponding position"))
if(all(!is.null(step) & is.character(step) & !any(names(x$clt) %in% step)))
stop(paste("Argument 'step' can only be assigned one of the following values:",
paste(names(x$clt), collapse = ", "), "OR the corresponding position"))
if(all(!is.null(step) & is.numeric(step) & !any(seq_along(names(x$clt)) %in% step)))
stop(paste("Argument 'step' can only be assigned one of the following position:",
paste(seq_along(names(x$clt)), collapse = ", "), "OR the corresponding names"))
targets <- targets.as.df(targets(x))
## Adding the collums
if ((!is.null(new_col) & is.null(new_col_output_index)) |
(is.null(new_col) & !is.null(new_col_output_index)) |
(is.null(new_col) & is.null(new_col_output_index))){
cat("One of 'new_col' and 'new_col_output_index' is null. It is using default column naming and adding all the output files expected, and each one will be written in a different column. \n")
for (i in seq_len(length(output(x)[[1]][[step]]))){
pout <- sapply(names(output(x)), function(y) normalizePath(output(x)[[y]][[step]][[i]]), simplify = F)
targets[[paste0(cwlfiles(x)$steps, "_", i)]] = as.character(pout)
}
} else if(!is.null(new_col) & !is.null(new_col_output_index)){
if(any(length(output(x)[[1]][[step]]) < new_col_output_index) | any(new_col_output_index < 1)) {
stop(paste0("'new_col_output_index' argument needs to be equal or bigger than 1 and smaller than ", length(output(x)[[1]][[1]]), ", the maximum number of outputs files." ))
}
if(length(new_col) != length(new_col_output_index)){
stop("'new_col' should have the same length as 'new_col_output_index'")
}
for(i in seq_along(new_col)){
pout <- sapply(names(output(x)), function(y) normalizePath(output(x)[[y]][[step]][[new_col_output_index[i]]]), simplify = F)
targets[[as.character(new_col[i])]] = as.character(pout)
}
}
## Workflow and Step Name
software <- strsplit(basename(cwlfiles(x)$cwl), split = "\\.")[[1]][1]
if(is.character(step)) {
step <- strsplit(step, split = "\\.")[[1]][1]
} else {
step <- strsplit(names(x$clt)[step], split = "\\.")[[1]][1]
}
if(file == "default") {
file <- paste("targets_", step, ".txt", sep = "")
file <- gsub(" {1,}", "_", file)
} else {
file <- file
}
headerlines <- targetsheader(x)[[1]]
}
if(file.exists(file) & overwrite == FALSE) stop(paste("I am not allowed to overwrite files; please delete existing file:", file, "or set 'overwrite=TRUE'"))
targetslines <- c(paste(colnames(targets), collapse = "\t"), apply(targets, 1, paste, collapse = "\t"))
writeLines(c(headerlines, targetslines), file, ...)
if(silent != TRUE) cat("\t", "Written content of 'targetsout(x)' to file:", file, "\n")
}
## Usage:
# writeTargetsout(x=args, file="default") ## SYSargs class
# writeTargetsout(x=WF, file="default", step=1, new_col = "FileName1", new_col_output_index = 1) ## SYSargs2 class
##############################################################################
## Function to run NGS aligners including sorting and indexing of BAM files ##
##############################################################################
runCommandline <- function(args, runid="01", make_bam=TRUE, del_sam=TRUE, dir=FALSE, dir.name=NULL, force=FALSE, ...) {
if(any(nchar(gsub(" {1,}", "", modules(args))) > 0)) {
## Check if "Environment Modules" is installed in the system
## "Environment Modules" is not available
if(suppressWarnings(system("modulecmd bash -V", ignore.stderr = TRUE, ignore.stdout = TRUE))!=1) {
message("Message: 'Environment Modules is not available. Please make sure to configure your PATH environment variable according to the software in use.'", "\n")
} else {
## "Environment Modules" is available and proceed the module load
if(suppressWarnings(system("modulecmd bash -V", ignore.stderr = TRUE, ignore.stdout = TRUE))==1) { # Returns TRUE if module system is present.
for(j in modules(args)) moduleload(j) # loads specified software from module system
}
}
}
## SYSargs class
if(class(args)=="SYSargs") {
commands <- sysargs(args)
completed <- file.exists(outpaths(args))
names(completed) <- outpaths(args)
logdir <- results(args)
for(i in seq(along=commands)) {
## Run alignmets only for samples for which no BAM file is available.
if(as.logical(completed)[i]) {
next()
} else {
## Create soubmitargsID_command file
cat(commands[i], file=paste(logdir, "submitargs", runid, sep=""), sep = "\n", append=TRUE)
## Run executable
command <- gsub(" .*", "", as.character(commands[i]))
commandargs <- gsub("^.*? ", "",as.character(commands[i]))
## Execute system command; note: BWA needs special treatment in stderr handling since it writes
## some stderr messages to sam file if used with system2()
if(software(args) %in% c("bwa aln", "bwa mem")) {
stdout <- system2(command, args=commandargs, stdout=TRUE, stderr=FALSE)
} else if(software(args) %in% c("bash_commands")) {
stdout <- system(paste(command, commandargs))
} else {
stdout <- system2(command, args=commandargs, stdout=TRUE, stderr=TRUE)
}
## Create submitargsID_stdout file
cat(commands[i], file=paste(logdir, "submitargs", runid, "_log", sep=""), sep = "\n", append=TRUE)
cat(unlist(stdout), file=paste(logdir, "submitargs", runid, "_log", sep=""), sep = "\n", append=TRUE)
## Conditional postprocessing of results
if(make_bam==TRUE) {
if(grepl(".sam$", outfile1(args)[i])) { # If output is *.sam file (e.g. Bowtie2)
asBam(file=outfile1(args)[i], destination=gsub("\\.sam$", "", outfile1(args)[i]), overwrite=TRUE, indexDestination=TRUE)
if(del_sam==TRUE){
unlink(outfile1(args)[i])
} else if(del_sam==FALSE){
dump <- "do nothing"
}
} else if(grepl("vcf$|bcf$|xls$|bed$", outpaths(args)[i])) {
dump <- "do nothing"
} else { # If output is unindexed *.bam file (e.g. Tophat2)
sortBam(file=names(completed[i]), destination=gsub("\\.bam$", "", names(completed[i])))
indexBam(names(completed[i]))
}
}
}
}
bamcompleted <- gsub("sam$", "bam$", file.exists(outpaths(args)))
names(bamcompleted) <- SampleName(args)
cat("Missing alignment results (bam files):", sum(!as.logical(bamcompleted)), "\n"); cat("Existing alignment results (bam files):", sum(as.logical(bamcompleted)), "\n")
return(bamcompleted)
## SYSargs2 class ##
} else if(class(args)=="SYSargs2") {
## Workflow Name
cwl.wf <- strsplit(basename(cwlfiles(args)$cwl), split="\\.")[[1]][1]
## Folder name provide in the yml file or in the dir.name
if(is.null(args$yamlinput$results_path$path)) {
if(is.null(dir.name)) {
stop("argument 'dir.name' missing. The argument can only be assigned 'NULL' when directory name is provided in the yml template. The argument should be assigned as a character vector of length 1")
}
}
if(is.null(dir.name)) {
logdir <- normalizePath(args$yamlinput$results_path$path)
} else {
logdir <- paste(getwd(), "/results/", sep="")
}
args.return <- args
## Check what expected outputs have been generated
if(make_bam==FALSE){
completed <- output(args)
outputList <- as.character()
for(i in seq_along(output(args))){
for(j in seq_along(output(args)[[i]])){
completed[[i]][[j]] <- file.exists(output(args)[[i]][[j]])
names(completed[[i]][[j]]) <- output(args)[[i]][[j]]
outputList <- c(outputList, output(args)[[i]][[j]])
}
}
if(length(output(args)[[1]][[1]])==1){
names(outputList) <- rep(names(output(args)), each=length(output(args)[[1]]))
} else if(length(output(args)[[1]][[1]])>1){
names(outputList) <- rep(names(output(args)), each=length(output(args)[[1]][[1]]))
}
} else if(make_bam==TRUE) {
if(any(grepl("samtools", names(clt(args))))){ stop("argument 'make_bam' should be 'FALSE' when using the workflow with 'SAMtools'")}
args1 <- output_update(args, dir=FALSE, replace=TRUE, extension=c(".sam", ".bam"))
completed <- output(args1)
outputList <- as.character()
for(i in seq_along(output(args1))){
for(j in seq_along(output(args1)[[i]])){
completed[[i]][[j]] <- file.exists(output(args1)[[i]][[j]])
names(completed[[i]][[j]]) <- output(args1)[[i]][[j]]
outputList <- c(outputList, output(args1)[[i]][[j]])
if(any(grepl(".bam", output(args1)[[i]][[j]]))){
for(k in which(grepl(".bam", output(args1)[[i]][[j]]))){
outputList <- c(outputList, paste0(gsub("\\.bam$", "", output(args1)[[i]][[j]][k]), ".bam.bai"))
}
}
}
}
if(length(output(args)[[1]][[1]])==1){
names(outputList) <- rep(names(output(args)), each=length(output(args)[[1]])+1)
} else if(length(output(args)[[1]][[1]])>1){
names(outputList) <- rep(names(output(args)), each=length(output(args)[[1]][[1]])+1)
}
# names(outputList) <- rep(names(output(args)), each=length(output(args)[[1]])+1)
args.return <- output_update(args.return, dir=FALSE, replace=TRUE, extension=c(".sam", ".bam"))
}
for(i in seq_along(cmdlist(args))){
for(j in seq_along(cmdlist(args)[[i]])){
## Run the commandline only for samples for which no output file is available.
if(all(force==FALSE & all(as.logical(completed[[i]][[j]])))) {
next()
} else {
# Create soubmitargsID_command file
cat(cmdlist(args)[[i]][[j]], file=paste(logdir, "/submitargs", runid, "_", cwl.wf, sep=""), fill=TRUE, labels=paste0(names(cmdlist(args))[[i]], ":"), append=TRUE)
## Create an object for executable
command <- gsub(" .*", "", as.character(cmdlist(args)[[i]][[j]]))
commandargs <- gsub("^.*? ", "",as.character(cmdlist(args)[[i]][[j]]))
## Check if the command is in the PATH
if(!command == c("bash")){
tryCatch(system(command, ignore.stdout = TRUE, ignore.stderr = TRUE), warning=function(w) cat(paste0("ERROR: ", "\n", command, ": command not found. ", '\n', "Please make sure to configure your PATH environment variable according to the software in use."), "\n"))
}
## Run executable
if(command %in% "bwa") {
stdout <- system2(command, args=commandargs, stdout=TRUE, stderr=FALSE)
} else if(command %in% c("bash")) {
stdout <- system(paste(command, commandargs))
} else if(isTRUE(grep('\\$', command)==1)) {
stdout <- system(paste(command, commandargs))
} else {
stdout <- system2(command, args=commandargs, stdout=TRUE, stderr=TRUE)
}
## Create submitargsID_stdout file
cat(cmdlist(args)[[i]][[j]], file=paste(logdir, "/submitargs", runid, "_", cwl.wf, "_log", sep=""), fill=TRUE, labels=paste0(names(cmdlist(args))[[i]], ":"), sep = "\n", append=TRUE)
cat(unlist(stdout), file=paste(logdir, "/submitargs", runid, "_", cwl.wf, "_log", sep=""), sep = "\n", append=TRUE)
}
cat("################", file=paste(logdir, "/submitargs", runid, "_", cwl.wf, "_log", sep=""), sep = "\n", append=TRUE)
if(make_bam==TRUE) {
sam_files <- grepl(".sam$", output(args)[[i]][[j]])
others_files <- grepl("vcf$|bcf$|xls$|bed$", output(args)[[i]][[j]])
completed.bam <- grepl(".bam$", output(args)[[i]][[j]])
if(any(sam_files)){
for(k in which(sam_files)){
Rsamtools::asBam(file=output(args)[[i]][[j]][k], destination=gsub("\\.sam$", "", output(args)[[i]][[j]][k]), overwrite=TRUE, indexDestination=TRUE)
if(del_sam==TRUE){
unlink(output(args)[[i]][[j]][k])
} else if(del_sam==FALSE){
dump <- "do nothing"
}
} } else if(any(others_files)){
dump <- "do nothing"
}
if(any(completed.bam)){ # If output is unindexed *.bam file (e.g. Tophat2)
for(k in which(completed.bam)){
Rsamtools::sortBam(file=output(args)[[i]][[j]][k], destination=gsub("\\.bam$", "", output(args)[[i]][[j]][k]))
Rsamtools::indexBam(output(args)[[i]][[j]][k])
}
}
}
}
}
## Create recursive the subfolders
if(dir==TRUE){
if(!is.null(dir.name)){
cwl.wf <- dir.name
}
for(i in seq_along(names(cmdlist(args)))){
full_path <- paste0(logdir, "/", cwl.wf, "/", names(cmdlist(args)[i]))
if(dir.exists(full_path)==FALSE){
dir.create(full_path, recursive = TRUE) }
}
if(dir.exists(paste0(logdir, "/", cwl.wf, "/_logs/"))==FALSE){
dir.create(paste0(logdir, "/", cwl.wf, "/_logs/"), recursive = TRUE) }
#
files_log <- list.files(path=logdir, pattern = "submitargs")
for(i in seq_along(files_log)){
file.rename(from=paste0(logdir, "/", files_log[i]), to=paste0(logdir, "/", cwl.wf, "/_logs/", files_log[i]))
}
outputList_new <- as.character()
for(i in seq_along(outputList)){
if(file.exists(outputList[i])){
name <- strsplit(outputList[i], split="\\/")[[1]]
name <- name[length(name)]
file.rename(from=outputList[i], to=paste0(logdir, "/", cwl.wf, "/", names(outputList[i]), "/", name))
outputList_new <- c(outputList_new, paste0(logdir, "/", cwl.wf, "/", names(outputList[i]), "/", name))
} else if(file.exists(outputList[i])==FALSE){
dump <- "No such file or directory"
}
}
outputList <- outputList_new
args.return <- output_update(args.return, dir=TRUE, replace=FALSE)
}
output_completed <- as.character()
for(i in seq_along(outputList)){
output_completed[i] <- file.exists(outputList[i])
}
names(output_completed) <- outputList
cat("Missing expected outputs files:", sum(!as.logical(output_completed)), "\n"); cat("Existing expected outputs files:", sum(as.logical(output_completed)), "\n")
print(output_completed)
return(args.return)
#return(output_completed)
}
}
## Usage:
# WF <- runCommandline(WF) # creates the files in the ./results folder
# WF <- runCommandline(WF, dir=TRUE) # creates the files in the ./results/workflowName/Samplename folder
# WF <- runCommandline(WF, make_bam = FALSE, dir=TRUE) ## For hisat2-mapping.cwl template
############################################################################################
## batchtools-based function to submit runCommandline jobs to queuing system of a cluster ##
############################################################################################
## The advantage of this function is that it should work with most queuing/scheduling systems such as SLURM, Troque, SGE, ...
clusterRun <- function(args, FUN = runCommandline, more.args=list(args=args, make_bam=TRUE), conffile = ".batchtools.conf.R", template = "batchtools.slurm.tmpl", Njobs, runid = "01", resourceList) {
## Validity checks of inputs
if(any(class(args)!="SYSargs" & class(args)!="SYSargs2")) stop("Argument 'args' needs to be assigned an object of class 'SYSargs' OR 'SYSargs2'")
if(class(FUN)!="function") stop("Value assigned to 'FUN' argument is not an object of class function.")
if(!file.exists(conffile)) stop("Need to point under 'conffile' argument to proper config file. See more information here: https://mllg.github.io/batchtools/reference/makeRegistry.html. Note: in this file *.tmpl needs to point to a valid template file.")
if(!file.exists(template)) stop("Need to point under 'template' argument to proper template file. Sample template files for different schedulers are available here: https://github.com/mllg/batchtools/blob/master/inst/templates/")
if(!class(more.args)=="list") stop("'more.args' needs to be object of class 'list'.")
if(any(!names(more.args) %in% names(as.list(formals(FUN))))) stop(paste("The list of arguments assigned to 'more.args' can only be the following arguments defined in the function 'FUN':", paste(names(as.list(formals(FUN))), collapse=", ")))
## SYSargs class
if(class(args)=="SYSargs") {
path <- normalizePath(results(args))
args.f <- seq(along = args)
## SYSargs2 class
} else if (class(args)=="SYSargs2") {
path <- normalizePath(args$yamlinput$results_path$path)
args.f <- seq(along=cmdlist(args))
}
## batchtools routines
f <- function(i, args, ...) FUN(args=args[i], ...)
logdir1 <- paste0(path, "/submitargs", runid, "_btdb_", paste(sample(0:9, 4), collapse = ""))
reg <- makeRegistry(file.dir = logdir1, conf.file = conffile, packages = "systemPipeR")
ids <- batchMap(fun = f, args.f, more.args = more.args, reg=reg)
chunk <- chunk(ids$job.id, n.chunks = Njobs, shuffle = FALSE)
ids$chunk <- chunk
done <- submitJobs(ids=ids, reg=reg, resources = resourceList)
return(reg)
}
## Usage:
# resources <- list(walltime=120, ntasks=1, ncpus=4, memory=1024)
# reg <- clusterRun(args, conffile = ".batchtools.conf.R", template = "batchtools.slurm.tmpl", Njobs=18, runid="01", resourceList=resources)
# getStatus(reg=reg)
# waitForJobs(reg=reg)
########################
## Read preprocessing ##
########################
preprocessReads <- function(args, Fct, batchsize=100000, overwrite=TRUE, ...) {
if(all(class(args)!="SYSargs" & class(args)!="SYSargs2")) stop("Argument 'args' needs to be assigned an object of class 'SYSargs' OR 'SYSargs2")
if(class(Fct)!="character") stop("Argument 'Fct' needs to be of class character")
if(class(args)=="SYSargs"){
colnames_args <- colnames(targetsout(args)) #SYSargs
outpaths <- outpaths(args) #SYSargs
targets_in <- targetsin(args)
} else if (class(args)=="SYSargs2") {
colnames_args <- colnames(targets.as.df(args$targets)) #SYSargs2
outpaths <- subsetWF(args = args, slot = "output", subset = 1, index=1)
targets_in <- targets.as.df(args$targets)
}
## Run function in loop over all fastq files
## Single end fastq files
if(!all(c("FileName1", "FileName2") %in% colnames_args)) {
for(i in seq(along=args)) {
outfile <- outpaths[i]
## Delete existing fastq files with same names, since writeFastq will append to them
if(overwrite==TRUE) {
if(any(file.exists(outfile))) unlink(outfile)
} else {
if(any(file.exists(outfile))) stop(paste("File", outfile , "exists. Please delete file first or set overwrite=TRUE."))
}
## Run preprocessor function with FastqStreamer
counter <- 0
f <- FastqStreamer(infile1(args)[i], batchsize)
while(length(fq <- yield(f))) {
fqtrim <- eval(parse(text=Fct))
writeFastq(fqtrim, outfile, mode="a", ...)
counter <- counter + length(fqtrim)
cat(counter, "processed reads written to file:", outfile, "\n")
}
close(f)
}
}
## Paired end fastq files
if(all(c("FileName1", "FileName2") %in% colnames_args)) {
for(i in seq(along=args)) {
p1 <- as.character(targets_in$FileName1[i])
p2 <- as.character(targets_in$FileName2[i])
if(class(args)=="SYSargs"){
p1out <- as.character(targetsout(args)$FileName1[i])
p2out <- as.character(targetsout(args)$FileName2[i])
} else if (class(args)=="SYSargs2") {
p1out <- args$output[[i]][[1]][[1]]
p2out <- args$output[[i]][[1]][[2]]
}
## Delete existing fastq files with same names, since writeFastq will append to them
if(overwrite==TRUE) {
if(any(file.exists(p1out))) unlink(p1out)
if(any(file.exists(p2out))) unlink(p2out)
} else {
if(any(file.exists(p1out))) stop(paste("File", p1out , "exists. Please delete file first or set overwrite=TRUE."))
if(any(file.exists(p2out))) stop(paste("File", p2out , "exists. Please delete file first or set overwrite=TRUE."))
}
## Run preprocessor function with FastqStreamer
counter1 <- 0
counter2 <- 0
f1 <- FastqStreamer(p1, batchsize)
f2 <- FastqStreamer(p2, batchsize)
while(length(fq1 <- yield(f1))) {
fq2 <- yield(f2)
if(length(fq1)!=length(fq2)) stop("Paired end files cannot have different read numbers.")
## Process p1
fq <- fq1 # for simplicity in eval
fq1trim <- eval(parse(text=Fct))
## Index for p1
index1 <- as.character(id(fq1)) %in% as.character(id(fq1trim))
names(index1) <- seq(along=index1)
index1 <- names(index1[index1])
## Process p2
fq <- fq2 # for simplicity in eval
fq2trim <- eval(parse(text=Fct))
## Index for p1
index2 <- as.character(id(fq2)) %in% as.character(id(fq2trim))
names(index2) <- seq(along=index2)
index2 <- names(index2[index2])
## Export to processed paired files
indexpair1 <- index1 %in% index2
writeFastq(fq1trim[indexpair1], p1out, mode="a", ...)
indexpair2 <- index2 %in% index1
writeFastq(fq2trim[indexpair2], p2out, mode="a", ...)
counter1 <- counter1 + sum(indexpair1)
cat(counter1, "processed reads written to file:", p1out, "\n")
counter2 <- counter2 + sum(indexpair2)
cat(counter2, "processed reads written to file:", p2out, "\n")
}
close(f1)
close(f2)
}
}
}
## Usage:
# preprocessReads(args=args, Fct="trimLRPatterns(Rpattern="GCCCGGGTAA", subject=fq)", batchsize=100000, overwrite=TRUE, compress=TRUE)
##################################################################
## Function to create sym links to bam files for viewing in IGV ##
##################################################################
symLink2bam <- function(sysargs, command="ln -s", htmldir, ext=c(".bam", ".bai"), urlbase, urlfile) {
## Create URL file
if(all(class(sysargs) != "SYSargs" & class(sysargs) != "SYSargs2")) stop("Argument 'sysargs' needs to be assigned an object of class 'SYSargs' OR 'SYSargs2")
## SYSargs class
if((class(sysargs)) == "SYSargs") {
bampaths <- outpaths(sysargs)
symname <- SampleName(sysargs)
## SYSargs2 class ##
} else if (class(sysargs)=="SYSargs2") {
bampaths <- subsetWF(args = sysargs, slot = "output", subset = 1, index=1)
symname <- sysargs$targets[[1]][[2]]
for(i in seq(along=sysargs)) {
symname[i] <- sysargs$targets[[i]][[2]]
}
}
urls <- paste(urlbase, htmldir[2], symname, ext[1], "\t", symname, sep="")
writeLines(urls, urlfile)
## Creat correspoding sym links
dir.create(paste(htmldir, collapse=""))
symname <- rep(symname, each=2)
symname <- paste(symname, c(ext[1], paste(ext, collapse="")), sep="")
bampaths2 <- as.vector(t(cbind(bampaths, paste(bampaths, ext[2], sep=""))))
symcommands <- paste(command, " ", bampaths2, " ", paste(htmldir, collapse=""), symname, sep="")
for(i in symcommands) system(i)
}
## Usage:
# symLink2bam(sysargs=args, command="ln -s", htmldir=c("~/.html/", "somedir/"), ext=c(".bam", ".bai"), urlbase="http://cluster.hpcc.ucr.edu/~tgirke/", urlfile="IGVurl.txt")
#####################
## Alignment Stats ##
#####################
alignStats <- function(args, output_index = 1) {
fqpaths <- infile1(args)
## SYSargs class
if(class(args)=="SYSargs") {
bampaths <- outpaths(args)
# SYSargs2 class
} else if (class(args)=="SYSargs2") {
output.all <- subsetWF(args, slot = "output", subset = 1, index = output_index)
bampaths <- as.character()
for(i in seq_along(output.all)){
for(j in seq_along(output.all[[i]])){
if(grepl(".sam$", output.all[[i]][[j]])==TRUE & grepl(".bam$", output.all[[i]][[j]])==FALSE){
stop("Please provide files in BAM format. Also, check 'output_update' function, if the BAM files were previously generated.") }
else if(grepl(".bam$", output.all[[i]][[j]])==TRUE & grepl("sorted.bam$", output.all[[i]][[j]])==FALSE){
bampaths <- c(bampaths, output.all[[i]][[j]]) }
}
}
names(bampaths) <- names(output.all)
}
bamexists <- file.exists(bampaths)
fqpaths <- fqpaths[bamexists]
bampaths <- bampaths[bamexists]
## Obtain total read number from FASTQ files
Nreads <- countLines(fqpaths)/4
names(Nreads) <- names(fqpaths)
## If reads are PE multiply by 2 as a rough approximation
if(nchar(infile2(args))[1] > 0) Nreads <- Nreads * 2
## Obtain total number of alignments from BAM files
bfl <- BamFileList(bampaths, yieldSize=50000, index=character())
param <- ScanBamParam(flag=scanBamFlag(isUnmappedQuery=FALSE))
Nalign <- countBam(bfl, param=param)
## Obtain number of primary alignments from BAM files
param <- ScanBamParam(flag=scanBamFlag(isSecondaryAlignment=FALSE, isUnmappedQuery=FALSE))
Nalignprim <- countBam(bfl, param=param)
statsDF <- data.frame(FileName=names(Nreads),
Nreads=Nreads,
Nalign=Nalign$records,
Perc_Aligned=Nalign$records/Nreads*100,
Nalign_Primary=Nalignprim$records,
Perc_Aligned_Primary=Nalignprim$records/Nreads*100
)
if(nchar(infile2(args))[1] > 0) colnames(statsDF)[which(colnames(statsDF)=="Nreads")] <- "Nreads2x"
return(statsDF)
}
## Usage:
# read_statsDF <- alignStats(args=args)
########################
## RPKM Normalization ##
########################
returnRPKM <- function(counts, ranges) {
geneLengthsInKB <- sum(width(reduce(ranges)))/1000 # Length of exon union per gene in kbp
millionsMapped <- sum(counts)/1e+06 # Factor for converting to million of mapped reads.
rpm <- counts/millionsMapped # RPK: reads per kilobase of exon model.
rpkm <- rpm/geneLengthsInKB # RPKM: reads per kilobase of exon model per million mapped reads.
return(rpkm)
}
## Usage:
# countDFrpkm <- apply(countDF, 2, function(x) returnRPKM(counts=x, ranges=eByg))
###############################################
## Read Sample Comparisons from Targets File ##
###############################################
## Parses sample comparisons from <CMP> line(s) in targets.txt file or SYSars object.
## All possible comparisons can be specified with 'CMPset: ALL'.
readComp <- function(file, format="vector", delim="-") {
if(!format %in% c("vector", "matrix")) stop("Argument format can only be assigned: vector or matrix!")
## Parse <CMP> line
# if(any(class(file)=="SYSargs" & class(file)=="SYSargs2")){
# # if(class(file)=="SYSargs") {
# if(length(targetsheader(file))==0) stop("Input has no targets header lines.")
# comp <- targetsheader(file)
# } else {
# comp <- readLines(file)
# }
# ## SYSargs class
if(class(file) == "SYSargs") {
if(length(targetsheader(file))==0) stop("Input has no targets header lines.")
comp <- targetsheader(file)
## SYSargs2 class
} else if (class(file) == "SYSargs2"){
if(length(targetsheader(file)[[1]])==0) stop("Input has no targets header lines.")
comp <- targetsheader(file)[[1]]
} else {
comp <- readLines(file)
}
comp <- comp[grepl("<CMP>", comp)]
comp <- gsub("#.*<CMP>| {1,}", "", comp)
comp <- gsub("\t", "", comp); comp <- gsub("^\"|\"$", "", comp) # Often required if Excel is used for editing targets file
comp <- strsplit(comp, ":|,")
names(comp) <- lapply(seq(along=comp), function(x) comp[[x]][1])
comp <- sapply(names(comp), function(x) comp[[x]][-1], simplify=FALSE)
## Check whether all samples are present in Factor column of targets file
checkvalues <- unique(unlist(strsplit(unlist(comp), "-")))
checkvalues <- checkvalues[checkvalues!="ALL"]
if(class(file)=="SYSargs") {
all <- unique(as.character(targetsin(file)$Factor))
} else if(class(file)=="SYSargs2") {
all <- unique(as.character(targets.as.df(targets(args_bam))$Factor))
} else {
all <- unique(as.character(read.delim(file, comment.char = "#")$Factor))
}
if(any(!checkvalues %in% all)) stop(paste("The following samples are not present in Factor column of targets file:", paste(checkvalues[!checkvalues %in% all], collapse=", ")))
## Generate outputs
allindex <- sapply(names(comp), function(x) any(grepl("ALL", comp[[x]])))
if(any(allindex)) for(i in which(allindex)) comp[[i]] <- combn(all, m=2, FUN=paste, collapse=delim)
if(format == "vector" & delim != "-") comp <- sapply(names(comp), function(x) gsub("-", delim, comp[[x]]), simplify=FALSE)
if(format == "vector") return(comp)
if(format == "matrix") return(sapply(names(comp), function(x) do.call("rbind", strsplit(comp[[x]], "-")), simplify=FALSE))
}
## Usage:
# cmp <- readComp("targets.txt", format="vector", delim="-")
# cmp <- readComp(args, format="vector", delim="-")
#################################
## Access module system from R ##
#################################
# S3 Class for handling function calls
myEnvModules <- structure(list(), class="EnvModules")
## Main function to allow avail, list and list
myEnvModules$init <- function(){
# Module function assumes MODULEPATH and MODULEDIR are set in login profile
# Get base environment from login profile
base_env <- strsplit(system('bash -l -c "env"',intern = TRUE),'\n')
base_env <- strsplit(as.character(base_env),'=')
# Iterate through base environment
for (x in seq(1,length(base_env))) {
# Set environment based on login profile
if (base_env[[x]][1]=="LOADEDMODULES" || base_env[[x]][1]=="MODULESHOME" || base_env[[x]][1]=="MODULEPATH" || base_env[[x]][1]=="MODULES_DIR" || base_env[[x]][1]=="IIGB_MODULES"){
if (base_env[[x]][1]=="LOADEDMODULES"){
default_modules <- strsplit(base_env[[x]][2],":")
}
else{
l <- list(base_env[[x]][2])
names(l) <- base_env[[x]][1]
do.call(Sys.setenv, l)
}
}
}
# Make sure to process default modules after the environment is set with the above loop
for (x in seq(1,length(default_modules[[1]]))){
module_name <- default_modules[[1]][x]
print(paste("Loading module",module_name))
try(myEnvModules$load_unload("load",module_name))
}
}
# Print available modules or currently loaded modules on stderr
myEnvModules$avail_list <- function(action_type){
try(module_vars <- system(paste('modulecmd bash',action_type),intern = TRUE))
}
# Unload all currently loaded modules
myEnvModules$clear <- function(action_type){
loaded_modules <- strsplit(Sys.getenv("LOADEDMODULES"),":")
if (length(loaded_modules[[1]]) > 0) {
for (x in seq(1,length(loaded_modules[[1]]))){
module_name <- loaded_modules[[1]][x]
print(paste("Unloading module",module_name))
try(myEnvModules$load_unload("unload",module_name))
}
}
}
# Load and unload actions are basically the same, set environment variables given by modulecmd
myEnvModules$load_unload <- function(action_type, module_name=""){
module_name <- paste(module_name, collapse=' ')
# Use the low level C binary for generating module environment variables
try(module_vars <- system(paste('modulecmd bash',action_type, module_name),intern = TRUE))
if (length(module_vars) > 0){
for (y in seq(1,length(module_vars))) {
# Separate environment variables
module_var <- strsplit(module_vars,";")
# Iterate through all environment variables
for (x in seq(1,length(module_var[[y]]))) {
# Isolate key, value pair
evar <- module_var[[y]][x]
# Filter export commands
if (length(grep('^ *export',evar)) == 0 && length(evar) > 0) {
# Seprate key and value
evar <- strsplit(as.character(evar),'=')
# Stip spaces at the end of the value
evar_val <- gsub('[[:space:]]','',evar[[1]][2])
# Remove extra backslashes
l <- list(gsub('\\$','',evar_val))
# Load dependant modules
if (length(grep('^ *module',evar[[1]][1])) > 0){
inner_module <- strsplit(evar[[1]][1]," ")
#myEnvModules$load_unload(inner_module[1][[1]][2],inner_module[1][[1]][3])
}
# Source environment
else if (length(grep('^ *source',evar[[1]][1])) > 0){
warning(paste0("Module uses a bash script to initialize, some software may not function as expected:\n\t",evar[[1]][1]))
}
# Unset variables that need to be unset
else if(length(grep("^ *unset ",evar[[1]][1])) > 0){
evar <- gsub("^unset (.*)$","\\1",evar[[1]][1])
Sys.unsetenv(evar)
} else {
# Assign names to each value in list
names(l) <- evar[[1]][1]
# Set environment variable in current environment
do.call(Sys.setenv, l)
}
}
}
}
}
}
#Define what happens bases on action
module <- function(action_type,module_name=""){
# Check to see if modulecmd is in current PATH
try(suppressWarnings(modulecmd_path <- system("which modulecmd",intern=TRUE,ignore.stderr=TRUE)),
silent=TRUE
)
# Only initialize module system if it has not yet been initialized and the modulecmd exisits
if ( Sys.getenv('MODULEPATH') == "" && length(modulecmd_path) > 0) {
myEnvModules$init()
} else if (Sys.getenv('MODULEPATH') == "" && length(modulecmd_path) == 0) {
stop("Could not find the installation of Environment Modules: \"modulecmd\". Please make sure to configure your PATH environment variable according to the software in use.")
}
switch(action_type,
"load" = myEnvModules$load_unload(action_type, module_name),
"unload" = myEnvModules$load_unload(action_type, module_name),
"list" = myEnvModules$avail_list(action_type),
"avail" = myEnvModules$avail_list(action_type),
"clear" = myEnvModules$clear(action_type),
"init" = myEnvModules$init(),
stop("That action is not supported.")
)
}
## Usage:
# module("load","tophat")
# module("load","tophat/2.1.1")
# module("list")
# module("avail")
# module("init")
# module("unload", "tophat")
# module("unload", "tophat/2.1.1")
#####################
## Legacy Wrappers ##
#####################
## List software available in module system
modulelist <- function() {
module("avail")
# warning("The function modulelist will be deprecated in future releases, please refer to the documentation for proper useage.")
}
## Load software from module system
moduleload <- function(module,envir="PATH") {
module("load", module)
# warning("The function moduleload will be deprecated in future releases, please refer to the documentation for proper useage.")
}
#######################################################################
## Run edgeR GLM with entire count matrix or subsetted by comparison ##
#######################################################################
## If independent=TRUE then countDF will be subsetted for each comparison
run_edgeR <- function(countDF, targets, cmp, independent=TRUE, paired=NULL, mdsplot="") {
if(class(cmp) != "matrix" & length(cmp)==2) cmp <- t(as.matrix(cmp)) # If cmp is vector of length 2, convert it to matrix.
samples <- as.character(targets$Factor); names(samples) <- paste(as.character(targets$SampleName), "", sep="")
countDF <- countDF[, names(samples)]
countDF[is.na(countDF)] <- 0
edgeDF <- data.frame(row.names=rownames(countDF))
group <- as.character(samples)
if(independent==TRUE) {
loopv <- seq(along=cmp[,1])
} else {
loopv <- 1
}
for(j in loopv) {
## Filtering and normalization
y <- DGEList(counts=countDF, group=group) # Constructs DGEList object
if(independent == TRUE) {
subset <- samples[samples %in% cmp[j,]]
y <- y[, names(subset)]
y$samples$group <- factor(as.character(y$samples$group))
}
keep <- rowSums(cpm(y)>1) >= 2; y <- y[keep, ]
y <- calcNormFactors(y)
## Design matrix
if(length(paired)==0) {
design <- model.matrix(~0+y$samples$group, data=y$samples)
colnames(design) <- levels(y$samples$group)
} else {
if(length(paired)>0 & independent==FALSE) stop("When providing values under 'paired' also set independent=TRUE")
Subject <- factor(paired[samples %in% cmp[j,]]) # corrected Jun 2014 (won't change results)
Treat <- y$samples$group
design <- model.matrix(~Subject+Treat)
levels(design) <- levels(y$samples$group)
}
## Estimate dispersion
y <- estimateGLMCommonDisp(y, design, verbose=TRUE) # Estimates common dispersions
y <- estimateGLMTrendedDisp(y, design) # Estimates trended dispersions
y <- estimateGLMTagwiseDisp(y, design) # Estimates tagwise dispersions
fit <- glmFit(y, design) # Fits the negative binomial GLM for each tag and produces an object of class DGEGLM with some new components.
## Contrast matrix is optional but makes anlysis more transparent
if(independent == TRUE) {
mycomp <- paste(cmp[j,1], cmp[j,2], sep="-")
} else {
mycomp <- paste(cmp[,1], cmp[,2], sep="-")
}
if(length(paired)==0) contrasts <- makeContrasts(contrasts=mycomp, levels=design)
for(i in seq(along=mycomp)) {
if(length(paired)==0) {
lrt <- glmLRT(fit, contrast=contrasts[,i]) # Takes DGEGLM object and carries out the likelihood ratio test.
} else {
lrt <- glmLRT(fit) # No contrast matrix with paired design
}
deg <- as.data.frame(topTags(lrt, n=length(rownames(y))))
colnames(deg) <- paste(paste(mycomp[i], collapse="_"), colnames(deg), sep="_")
edgeDF <- cbind(edgeDF, deg[rownames(edgeDF),])
}
if(nchar(mdsplot)>0) {
pdf(paste("./results/sample_MDS_", paste(unique(subset), collapse="-"), ".pdf", sep=""))
plotMDS(y)
dev.off()
}
}
return(edgeDF)
}
## Usage:
# cmp <- readComp(file=targetspath, format="matrix", delim="-")
# edgeDF <- run_edgeR(countDF=countDF, targets=targets, cmp=cmp[[1]], independent=TRUE, mdsplot="")
####################################################################
## Run DESeq2 with entire count matrix or subsetted by comparison ##
####################################################################
## If independent=TRUE then countDF will be subsetted for each comparison
run_DESeq2 <- function(countDF, targets, cmp, independent=FALSE) {
if(class(cmp) != "matrix" & length(cmp)==2) cmp <- t(as.matrix(cmp)) # If cmp is vector of length 2, convert it to matrix.
samples <- as.character(targets$Factor); names(samples) <- paste(as.character(targets$SampleName), "", sep="")
countDF <- countDF[, names(samples)]
countDF[is.na(countDF)] <- 0
deseqDF <- data.frame(row.names=rownames(countDF))
if(independent==TRUE) {
loopv <- seq(along=cmp[,1])
} else {
loopv <- 1
}
for(j in loopv) {
if(independent==TRUE) {
## Create subsetted DESeqDataSet object
subset <- samples[samples %in% cmp[j,]]
countDFsub <- countDF[, names(subset)]
dds <- DESeq2::DESeqDataSetFromMatrix(countData=as.matrix(countDFsub), colData=data.frame(condition=subset), design = ~ condition)
mycmp <- cmp[j, , drop=FALSE]
} else {
## Create full DESeqDataSet object
dds <- DESeq2::DESeqDataSetFromMatrix(countData=as.matrix(countDF), colData=data.frame(condition=samples), design = ~ condition)
mycmp <- cmp
}
## Estimate of (i) size factors, (ii) dispersion, (iii) negative binomial GLM fitting and (iv) Wald statistics
dds <- DESeq2::DESeq(dds, quiet=TRUE)
for(i in seq(along=mycmp[,1])) {
## Extracts DEG results for specific contrasts from DESeqDataSet object
res <- DESeq2::results(dds, contrast=c("condition", mycmp[i,]))
## Set NAs to reasonable values to avoid errors in downstream filtering steps
res[is.na(res[,"padj"]), "padj"] <- 1
res[is.na(res[,"log2FoldChange"]), "log2FoldChange"] <- 0
deg <- as.data.frame(res)
colnames(deg)[colnames(deg) %in% c("log2FoldChange", "padj")] <- c("logFC", "FDR")
colnames(deg) <- paste(paste(mycmp[i,], collapse="-"), colnames(deg), sep="_")
deseqDF <- cbind(deseqDF, deg[rownames(deseqDF),])
}
}
return(deseqDF)
}
## Usage:
# cmp <- readComp(file=targetspath, format="matrix", delim="-")
# degseqDF <- run_DESeq2(countDF=countDF, targets=targets, cmp=cmp[[1]], independent=TRUE)
############################################
## Filter DEGs by p-value and fold change ##
############################################
filterDEGs <- function(degDF, filter, plot=TRUE) {
pval <- degDF[, grep("_FDR$", colnames(degDF)), drop=FALSE]
log2FC <- degDF[, grep("_logFC$", colnames(degDF)), drop=FALSE]
## DEGs that are up or down regulated
pf <- pval <= filter["FDR"]/100 & (log2FC >= log2(filter["Fold"]) | log2FC <= -log2(filter["Fold"]))
colnames(pf) <- gsub("_FDR", "", colnames(pf))
pf[is.na(pf)] <- FALSE
DEGlistUPorDOWN <- sapply(colnames(pf), function(x) rownames(pf[pf[,x,drop=FALSE],,drop=FALSE]), simplify=FALSE)
## DEGs that are up regulated
pf <- pval <= filter["FDR"]/100 & log2FC >= log2(filter["Fold"])
colnames(pf) <- gsub("_FDR", "", colnames(pf))
pf[is.na(pf)] <- FALSE
DEGlistUP <- sapply(colnames(pf), function(x) rownames(pf[pf[,x,drop=FALSE],,drop=FALSE]), simplify=FALSE)
## DEGs that are down regulated
pf <- pval <= filter["FDR"]/100 & log2FC <= -log2(filter["Fold"])
colnames(pf) <- gsub("_FDR", "", colnames(pf))
pf[is.na(pf)] <- FALSE
DEGlistDOWN <- sapply(colnames(pf), function(x) rownames(pf[pf[,x,drop=FALSE],,drop=FALSE]), simplify=FALSE)
df <- data.frame(Comparisons=names(DEGlistUPorDOWN), Counts_Up_or_Down=sapply(DEGlistUPorDOWN, length), Counts_Up=sapply(DEGlistUP, length), Counts_Down=sapply(DEGlistDOWN, length))
resultlist <- list(UporDown=DEGlistUPorDOWN, Up=DEGlistUP, Down=DEGlistDOWN, Summary=df)
if(plot==TRUE) {
mytitle <- paste("DEG Counts (", names(filter)[1], ": ", filter[1], " & " , names(filter)[2], ": ", filter[2], "%)", sep="")
df_plot <- data.frame(Comparisons=rep(as.character(df$Comparisons), 2), Counts=c(df$Counts_Up, df$Counts_Down), Type=rep(c("Up", "Down"), each=length(df[,1])))
p <- ggplot(df_plot, aes(Comparisons, Counts, fill = Type)) + geom_bar(position="stack", stat="identity") + coord_flip() + theme(axis.text.y=element_text(angle=0, hjust=1)) + ggtitle(mytitle)
print(p)
}
return(resultlist)
}
## Usage:
# DEG_list <- filterDEGs(degDF=edgeDF, filter=c(Fold=2, FDR=1))
|
/R/utilities.R
|
no_license
|
heyiamstella/systemPipeR
|
R
| false
| false
| 43,788
|
r
|
###########################################################
## Additional utilities for SYSargs and SYSargs2 objects ##
###########################################################
######################################################
## Convenience write function for targetsout(args) ##
######################################################
writeTargetsout <- function (x, file = "default", silent = FALSE, overwrite = FALSE, step = NULL, new_col=NULL, new_col_output_index=NULL, ...) {
if(all(class(x) != "SYSargs" & class(x) != "SYSargs2")) stop("Argument 'x' needs to be assigned an object of class 'SYSargs' OR 'SYSargs2")
## SYSargs class
if(class(x) == "SYSargs") {
targets <- targetsout(x)
software <- software(x)
if(file == "default") {
file <- paste("targets_", software, ".txt", sep = "")
file <- gsub(" {1,}", "_", file)
} else {
file <- file
}
headerlines <- targetsheader(x)
## SYSargs2 class
} else if(class(x) == "SYSargs2") {
if(is.null(step))
stop(paste("Argument 'step' needs to be assigned one of the following values:",
paste(names(x$clt), collapse = ", "), "OR the corresponding position"))
if(all(!is.null(step) & is.character(step) & !any(names(x$clt) %in% step)))
stop(paste("Argument 'step' can only be assigned one of the following values:",
paste(names(x$clt), collapse = ", "), "OR the corresponding position"))
if(all(!is.null(step) & is.numeric(step) & !any(seq_along(names(x$clt)) %in% step)))
stop(paste("Argument 'step' can only be assigned one of the following position:",
paste(seq_along(names(x$clt)), collapse = ", "), "OR the corresponding names"))
targets <- targets.as.df(targets(x))
## Adding the collums
if ((!is.null(new_col) & is.null(new_col_output_index)) |
(is.null(new_col) & !is.null(new_col_output_index)) |
(is.null(new_col) & is.null(new_col_output_index))){
cat("One of 'new_col' and 'new_col_output_index' is null. It is using default column naming and adding all the output files expected, and each one will be written in a different column. \n")
for (i in seq_len(length(output(x)[[1]][[step]]))){
pout <- sapply(names(output(x)), function(y) normalizePath(output(x)[[y]][[step]][[i]]), simplify = F)
targets[[paste0(cwlfiles(x)$steps, "_", i)]] = as.character(pout)
}
} else if(!is.null(new_col) & !is.null(new_col_output_index)){
if(any(length(output(x)[[1]][[step]]) < new_col_output_index) | any(new_col_output_index < 1)) {
stop(paste0("'new_col_output_index' argument needs to be equal or bigger than 1 and smaller than ", length(output(x)[[1]][[1]]), ", the maximum number of outputs files." ))
}
if(length(new_col) != length(new_col_output_index)){
stop("'new_col' should have the same length as 'new_col_output_index'")
}
for(i in seq_along(new_col)){
pout <- sapply(names(output(x)), function(y) normalizePath(output(x)[[y]][[step]][[new_col_output_index[i]]]), simplify = F)
targets[[as.character(new_col[i])]] = as.character(pout)
}
}
## Workflow and Step Name
software <- strsplit(basename(cwlfiles(x)$cwl), split = "\\.")[[1]][1]
if(is.character(step)) {
step <- strsplit(step, split = "\\.")[[1]][1]
} else {
step <- strsplit(names(x$clt)[step], split = "\\.")[[1]][1]
}
if(file == "default") {
file <- paste("targets_", step, ".txt", sep = "")
file <- gsub(" {1,}", "_", file)
} else {
file <- file
}
headerlines <- targetsheader(x)[[1]]
}
if(file.exists(file) & overwrite == FALSE) stop(paste("I am not allowed to overwrite files; please delete existing file:", file, "or set 'overwrite=TRUE'"))
targetslines <- c(paste(colnames(targets), collapse = "\t"), apply(targets, 1, paste, collapse = "\t"))
writeLines(c(headerlines, targetslines), file, ...)
if(silent != TRUE) cat("\t", "Written content of 'targetsout(x)' to file:", file, "\n")
}
## Usage:
# writeTargetsout(x=args, file="default") ## SYSargs class
# writeTargetsout(x=WF, file="default", step=1, new_col = "FileName1", new_col_output_index = 1) ## SYSargs2 class
##############################################################################
## Function to run NGS aligners including sorting and indexing of BAM files ##
##############################################################################
runCommandline <- function(args, runid="01", make_bam=TRUE, del_sam=TRUE, dir=FALSE, dir.name=NULL, force=FALSE, ...) {
if(any(nchar(gsub(" {1,}", "", modules(args))) > 0)) {
## Check if "Environment Modules" is installed in the system
## "Environment Modules" is not available
if(suppressWarnings(system("modulecmd bash -V", ignore.stderr = TRUE, ignore.stdout = TRUE))!=1) {
message("Message: 'Environment Modules is not available. Please make sure to configure your PATH environment variable according to the software in use.'", "\n")
} else {
## "Environment Modules" is available and proceed the module load
if(suppressWarnings(system("modulecmd bash -V", ignore.stderr = TRUE, ignore.stdout = TRUE))==1) { # Returns TRUE if module system is present.
for(j in modules(args)) moduleload(j) # loads specified software from module system
}
}
}
## SYSargs class
if(class(args)=="SYSargs") {
commands <- sysargs(args)
completed <- file.exists(outpaths(args))
names(completed) <- outpaths(args)
logdir <- results(args)
for(i in seq(along=commands)) {
## Run alignmets only for samples for which no BAM file is available.
if(as.logical(completed)[i]) {
next()
} else {
## Create soubmitargsID_command file
cat(commands[i], file=paste(logdir, "submitargs", runid, sep=""), sep = "\n", append=TRUE)
## Run executable
command <- gsub(" .*", "", as.character(commands[i]))
commandargs <- gsub("^.*? ", "",as.character(commands[i]))
## Execute system command; note: BWA needs special treatment in stderr handling since it writes
## some stderr messages to sam file if used with system2()
if(software(args) %in% c("bwa aln", "bwa mem")) {
stdout <- system2(command, args=commandargs, stdout=TRUE, stderr=FALSE)
} else if(software(args) %in% c("bash_commands")) {
stdout <- system(paste(command, commandargs))
} else {
stdout <- system2(command, args=commandargs, stdout=TRUE, stderr=TRUE)
}
## Create submitargsID_stdout file
cat(commands[i], file=paste(logdir, "submitargs", runid, "_log", sep=""), sep = "\n", append=TRUE)
cat(unlist(stdout), file=paste(logdir, "submitargs", runid, "_log", sep=""), sep = "\n", append=TRUE)
## Conditional postprocessing of results
if(make_bam==TRUE) {
if(grepl(".sam$", outfile1(args)[i])) { # If output is *.sam file (e.g. Bowtie2)
asBam(file=outfile1(args)[i], destination=gsub("\\.sam$", "", outfile1(args)[i]), overwrite=TRUE, indexDestination=TRUE)
if(del_sam==TRUE){
unlink(outfile1(args)[i])
} else if(del_sam==FALSE){
dump <- "do nothing"
}
} else if(grepl("vcf$|bcf$|xls$|bed$", outpaths(args)[i])) {
dump <- "do nothing"
} else { # If output is unindexed *.bam file (e.g. Tophat2)
sortBam(file=names(completed[i]), destination=gsub("\\.bam$", "", names(completed[i])))
indexBam(names(completed[i]))
}
}
}
}
bamcompleted <- gsub("sam$", "bam$", file.exists(outpaths(args)))
names(bamcompleted) <- SampleName(args)
cat("Missing alignment results (bam files):", sum(!as.logical(bamcompleted)), "\n"); cat("Existing alignment results (bam files):", sum(as.logical(bamcompleted)), "\n")
return(bamcompleted)
## SYSargs2 class ##
} else if(class(args)=="SYSargs2") {
## Workflow Name
cwl.wf <- strsplit(basename(cwlfiles(args)$cwl), split="\\.")[[1]][1]
## Folder name provide in the yml file or in the dir.name
if(is.null(args$yamlinput$results_path$path)) {
if(is.null(dir.name)) {
stop("argument 'dir.name' missing. The argument can only be assigned 'NULL' when directory name is provided in the yml template. The argument should be assigned as a character vector of length 1")
}
}
if(is.null(dir.name)) {
logdir <- normalizePath(args$yamlinput$results_path$path)
} else {
logdir <- paste(getwd(), "/results/", sep="")
}
args.return <- args
## Check what expected outputs have been generated
if(make_bam==FALSE){
completed <- output(args)
outputList <- as.character()
for(i in seq_along(output(args))){
for(j in seq_along(output(args)[[i]])){
completed[[i]][[j]] <- file.exists(output(args)[[i]][[j]])
names(completed[[i]][[j]]) <- output(args)[[i]][[j]]
outputList <- c(outputList, output(args)[[i]][[j]])
}
}
if(length(output(args)[[1]][[1]])==1){
names(outputList) <- rep(names(output(args)), each=length(output(args)[[1]]))
} else if(length(output(args)[[1]][[1]])>1){
names(outputList) <- rep(names(output(args)), each=length(output(args)[[1]][[1]]))
}
} else if(make_bam==TRUE) {
if(any(grepl("samtools", names(clt(args))))){ stop("argument 'make_bam' should be 'FALSE' when using the workflow with 'SAMtools'")}
args1 <- output_update(args, dir=FALSE, replace=TRUE, extension=c(".sam", ".bam"))
completed <- output(args1)
outputList <- as.character()
for(i in seq_along(output(args1))){
for(j in seq_along(output(args1)[[i]])){
completed[[i]][[j]] <- file.exists(output(args1)[[i]][[j]])
names(completed[[i]][[j]]) <- output(args1)[[i]][[j]]
outputList <- c(outputList, output(args1)[[i]][[j]])
if(any(grepl(".bam", output(args1)[[i]][[j]]))){
for(k in which(grepl(".bam", output(args1)[[i]][[j]]))){
outputList <- c(outputList, paste0(gsub("\\.bam$", "", output(args1)[[i]][[j]][k]), ".bam.bai"))
}
}
}
}
if(length(output(args)[[1]][[1]])==1){
names(outputList) <- rep(names(output(args)), each=length(output(args)[[1]])+1)
} else if(length(output(args)[[1]][[1]])>1){
names(outputList) <- rep(names(output(args)), each=length(output(args)[[1]][[1]])+1)
}
# names(outputList) <- rep(names(output(args)), each=length(output(args)[[1]])+1)
args.return <- output_update(args.return, dir=FALSE, replace=TRUE, extension=c(".sam", ".bam"))
}
for(i in seq_along(cmdlist(args))){
for(j in seq_along(cmdlist(args)[[i]])){
## Run the commandline only for samples for which no output file is available.
if(all(force==FALSE & all(as.logical(completed[[i]][[j]])))) {
next()
} else {
# Create soubmitargsID_command file
cat(cmdlist(args)[[i]][[j]], file=paste(logdir, "/submitargs", runid, "_", cwl.wf, sep=""), fill=TRUE, labels=paste0(names(cmdlist(args))[[i]], ":"), append=TRUE)
## Create an object for executable
command <- gsub(" .*", "", as.character(cmdlist(args)[[i]][[j]]))
commandargs <- gsub("^.*? ", "",as.character(cmdlist(args)[[i]][[j]]))
## Check if the command is in the PATH
if(!command == c("bash")){
tryCatch(system(command, ignore.stdout = TRUE, ignore.stderr = TRUE), warning=function(w) cat(paste0("ERROR: ", "\n", command, ": command not found. ", '\n', "Please make sure to configure your PATH environment variable according to the software in use."), "\n"))
}
## Run executable
if(command %in% "bwa") {
stdout <- system2(command, args=commandargs, stdout=TRUE, stderr=FALSE)
} else if(command %in% c("bash")) {
stdout <- system(paste(command, commandargs))
} else if(isTRUE(grep('\\$', command)==1)) {
stdout <- system(paste(command, commandargs))
} else {
stdout <- system2(command, args=commandargs, stdout=TRUE, stderr=TRUE)
}
## Create submitargsID_stdout file
cat(cmdlist(args)[[i]][[j]], file=paste(logdir, "/submitargs", runid, "_", cwl.wf, "_log", sep=""), fill=TRUE, labels=paste0(names(cmdlist(args))[[i]], ":"), sep = "\n", append=TRUE)
cat(unlist(stdout), file=paste(logdir, "/submitargs", runid, "_", cwl.wf, "_log", sep=""), sep = "\n", append=TRUE)
}
cat("################", file=paste(logdir, "/submitargs", runid, "_", cwl.wf, "_log", sep=""), sep = "\n", append=TRUE)
if(make_bam==TRUE) {
sam_files <- grepl(".sam$", output(args)[[i]][[j]])
others_files <- grepl("vcf$|bcf$|xls$|bed$", output(args)[[i]][[j]])
completed.bam <- grepl(".bam$", output(args)[[i]][[j]])
if(any(sam_files)){
for(k in which(sam_files)){
Rsamtools::asBam(file=output(args)[[i]][[j]][k], destination=gsub("\\.sam$", "", output(args)[[i]][[j]][k]), overwrite=TRUE, indexDestination=TRUE)
if(del_sam==TRUE){
unlink(output(args)[[i]][[j]][k])
} else if(del_sam==FALSE){
dump <- "do nothing"
}
} } else if(any(others_files)){
dump <- "do nothing"
}
if(any(completed.bam)){ # If output is unindexed *.bam file (e.g. Tophat2)
for(k in which(completed.bam)){
Rsamtools::sortBam(file=output(args)[[i]][[j]][k], destination=gsub("\\.bam$", "", output(args)[[i]][[j]][k]))
Rsamtools::indexBam(output(args)[[i]][[j]][k])
}
}
}
}
}
## Create recursive the subfolders
if(dir==TRUE){
if(!is.null(dir.name)){
cwl.wf <- dir.name
}
for(i in seq_along(names(cmdlist(args)))){
full_path <- paste0(logdir, "/", cwl.wf, "/", names(cmdlist(args)[i]))
if(dir.exists(full_path)==FALSE){
dir.create(full_path, recursive = TRUE) }
}
if(dir.exists(paste0(logdir, "/", cwl.wf, "/_logs/"))==FALSE){
dir.create(paste0(logdir, "/", cwl.wf, "/_logs/"), recursive = TRUE) }
#
files_log <- list.files(path=logdir, pattern = "submitargs")
for(i in seq_along(files_log)){
file.rename(from=paste0(logdir, "/", files_log[i]), to=paste0(logdir, "/", cwl.wf, "/_logs/", files_log[i]))
}
outputList_new <- as.character()
for(i in seq_along(outputList)){
if(file.exists(outputList[i])){
name <- strsplit(outputList[i], split="\\/")[[1]]
name <- name[length(name)]
file.rename(from=outputList[i], to=paste0(logdir, "/", cwl.wf, "/", names(outputList[i]), "/", name))
outputList_new <- c(outputList_new, paste0(logdir, "/", cwl.wf, "/", names(outputList[i]), "/", name))
} else if(file.exists(outputList[i])==FALSE){
dump <- "No such file or directory"
}
}
outputList <- outputList_new
args.return <- output_update(args.return, dir=TRUE, replace=FALSE)
}
output_completed <- as.character()
for(i in seq_along(outputList)){
output_completed[i] <- file.exists(outputList[i])
}
names(output_completed) <- outputList
cat("Missing expected outputs files:", sum(!as.logical(output_completed)), "\n"); cat("Existing expected outputs files:", sum(as.logical(output_completed)), "\n")
print(output_completed)
return(args.return)
#return(output_completed)
}
}
## Usage:
# WF <- runCommandline(WF) # creates the files in the ./results folder
# WF <- runCommandline(WF, dir=TRUE) # creates the files in the ./results/workflowName/Samplename folder
# WF <- runCommandline(WF, make_bam = FALSE, dir=TRUE) ## For hisat2-mapping.cwl template
############################################################################################
## batchtools-based function to submit runCommandline jobs to queuing system of a cluster ##
############################################################################################
## The advantage of this function is that it should work with most queuing/scheduling systems such as SLURM, Troque, SGE, ...
clusterRun <- function(args, FUN = runCommandline, more.args=list(args=args, make_bam=TRUE), conffile = ".batchtools.conf.R", template = "batchtools.slurm.tmpl", Njobs, runid = "01", resourceList) {
## Validity checks of inputs
if(any(class(args)!="SYSargs" & class(args)!="SYSargs2")) stop("Argument 'args' needs to be assigned an object of class 'SYSargs' OR 'SYSargs2'")
if(class(FUN)!="function") stop("Value assigned to 'FUN' argument is not an object of class function.")
if(!file.exists(conffile)) stop("Need to point under 'conffile' argument to proper config file. See more information here: https://mllg.github.io/batchtools/reference/makeRegistry.html. Note: in this file *.tmpl needs to point to a valid template file.")
if(!file.exists(template)) stop("Need to point under 'template' argument to proper template file. Sample template files for different schedulers are available here: https://github.com/mllg/batchtools/blob/master/inst/templates/")
if(!class(more.args)=="list") stop("'more.args' needs to be object of class 'list'.")
if(any(!names(more.args) %in% names(as.list(formals(FUN))))) stop(paste("The list of arguments assigned to 'more.args' can only be the following arguments defined in the function 'FUN':", paste(names(as.list(formals(FUN))), collapse=", ")))
## SYSargs class
if(class(args)=="SYSargs") {
path <- normalizePath(results(args))
args.f <- seq(along = args)
## SYSargs2 class
} else if (class(args)=="SYSargs2") {
path <- normalizePath(args$yamlinput$results_path$path)
args.f <- seq(along=cmdlist(args))
}
## batchtools routines
f <- function(i, args, ...) FUN(args=args[i], ...)
logdir1 <- paste0(path, "/submitargs", runid, "_btdb_", paste(sample(0:9, 4), collapse = ""))
reg <- makeRegistry(file.dir = logdir1, conf.file = conffile, packages = "systemPipeR")
ids <- batchMap(fun = f, args.f, more.args = more.args, reg=reg)
chunk <- chunk(ids$job.id, n.chunks = Njobs, shuffle = FALSE)
ids$chunk <- chunk
done <- submitJobs(ids=ids, reg=reg, resources = resourceList)
return(reg)
}
## Usage:
# resources <- list(walltime=120, ntasks=1, ncpus=4, memory=1024)
# reg <- clusterRun(args, conffile = ".batchtools.conf.R", template = "batchtools.slurm.tmpl", Njobs=18, runid="01", resourceList=resources)
# getStatus(reg=reg)
# waitForJobs(reg=reg)
########################
## Read preprocessing ##
########################
preprocessReads <- function(args, Fct, batchsize=100000, overwrite=TRUE, ...) {
if(all(class(args)!="SYSargs" & class(args)!="SYSargs2")) stop("Argument 'args' needs to be assigned an object of class 'SYSargs' OR 'SYSargs2")
if(class(Fct)!="character") stop("Argument 'Fct' needs to be of class character")
if(class(args)=="SYSargs"){
colnames_args <- colnames(targetsout(args)) #SYSargs
outpaths <- outpaths(args) #SYSargs
targets_in <- targetsin(args)
} else if (class(args)=="SYSargs2") {
colnames_args <- colnames(targets.as.df(args$targets)) #SYSargs2
outpaths <- subsetWF(args = args, slot = "output", subset = 1, index=1)
targets_in <- targets.as.df(args$targets)
}
## Run function in loop over all fastq files
## Single end fastq files
if(!all(c("FileName1", "FileName2") %in% colnames_args)) {
for(i in seq(along=args)) {
outfile <- outpaths[i]
## Delete existing fastq files with same names, since writeFastq will append to them
if(overwrite==TRUE) {
if(any(file.exists(outfile))) unlink(outfile)
} else {
if(any(file.exists(outfile))) stop(paste("File", outfile , "exists. Please delete file first or set overwrite=TRUE."))
}
## Run preprocessor function with FastqStreamer
counter <- 0
f <- FastqStreamer(infile1(args)[i], batchsize)
while(length(fq <- yield(f))) {
fqtrim <- eval(parse(text=Fct))
writeFastq(fqtrim, outfile, mode="a", ...)
counter <- counter + length(fqtrim)
cat(counter, "processed reads written to file:", outfile, "\n")
}
close(f)
}
}
## Paired end fastq files
if(all(c("FileName1", "FileName2") %in% colnames_args)) {
for(i in seq(along=args)) {
p1 <- as.character(targets_in$FileName1[i])
p2 <- as.character(targets_in$FileName2[i])
if(class(args)=="SYSargs"){
p1out <- as.character(targetsout(args)$FileName1[i])
p2out <- as.character(targetsout(args)$FileName2[i])
} else if (class(args)=="SYSargs2") {
p1out <- args$output[[i]][[1]][[1]]
p2out <- args$output[[i]][[1]][[2]]
}
## Delete existing fastq files with same names, since writeFastq will append to them
if(overwrite==TRUE) {
if(any(file.exists(p1out))) unlink(p1out)
if(any(file.exists(p2out))) unlink(p2out)
} else {
if(any(file.exists(p1out))) stop(paste("File", p1out , "exists. Please delete file first or set overwrite=TRUE."))
if(any(file.exists(p2out))) stop(paste("File", p2out , "exists. Please delete file first or set overwrite=TRUE."))
}
## Run preprocessor function with FastqStreamer
counter1 <- 0
counter2 <- 0
f1 <- FastqStreamer(p1, batchsize)
f2 <- FastqStreamer(p2, batchsize)
while(length(fq1 <- yield(f1))) {
fq2 <- yield(f2)
if(length(fq1)!=length(fq2)) stop("Paired end files cannot have different read numbers.")
## Process p1
fq <- fq1 # for simplicity in eval
fq1trim <- eval(parse(text=Fct))
## Index for p1
index1 <- as.character(id(fq1)) %in% as.character(id(fq1trim))
names(index1) <- seq(along=index1)
index1 <- names(index1[index1])
## Process p2
fq <- fq2 # for simplicity in eval
fq2trim <- eval(parse(text=Fct))
## Index for p1
index2 <- as.character(id(fq2)) %in% as.character(id(fq2trim))
names(index2) <- seq(along=index2)
index2 <- names(index2[index2])
## Export to processed paired files
indexpair1 <- index1 %in% index2
writeFastq(fq1trim[indexpair1], p1out, mode="a", ...)
indexpair2 <- index2 %in% index1
writeFastq(fq2trim[indexpair2], p2out, mode="a", ...)
counter1 <- counter1 + sum(indexpair1)
cat(counter1, "processed reads written to file:", p1out, "\n")
counter2 <- counter2 + sum(indexpair2)
cat(counter2, "processed reads written to file:", p2out, "\n")
}
close(f1)
close(f2)
}
}
}
## Usage:
# preprocessReads(args=args, Fct="trimLRPatterns(Rpattern="GCCCGGGTAA", subject=fq)", batchsize=100000, overwrite=TRUE, compress=TRUE)
##################################################################
## Function to create sym links to bam files for viewing in IGV ##
##################################################################
symLink2bam <- function(sysargs, command="ln -s", htmldir, ext=c(".bam", ".bai"), urlbase, urlfile) {
## Create URL file
if(all(class(sysargs) != "SYSargs" & class(sysargs) != "SYSargs2")) stop("Argument 'sysargs' needs to be assigned an object of class 'SYSargs' OR 'SYSargs2")
## SYSargs class
if((class(sysargs)) == "SYSargs") {
bampaths <- outpaths(sysargs)
symname <- SampleName(sysargs)
## SYSargs2 class ##
} else if (class(sysargs)=="SYSargs2") {
bampaths <- subsetWF(args = sysargs, slot = "output", subset = 1, index=1)
symname <- sysargs$targets[[1]][[2]]
for(i in seq(along=sysargs)) {
symname[i] <- sysargs$targets[[i]][[2]]
}
}
urls <- paste(urlbase, htmldir[2], symname, ext[1], "\t", symname, sep="")
writeLines(urls, urlfile)
## Creat correspoding sym links
dir.create(paste(htmldir, collapse=""))
symname <- rep(symname, each=2)
symname <- paste(symname, c(ext[1], paste(ext, collapse="")), sep="")
bampaths2 <- as.vector(t(cbind(bampaths, paste(bampaths, ext[2], sep=""))))
symcommands <- paste(command, " ", bampaths2, " ", paste(htmldir, collapse=""), symname, sep="")
for(i in symcommands) system(i)
}
## Usage:
# symLink2bam(sysargs=args, command="ln -s", htmldir=c("~/.html/", "somedir/"), ext=c(".bam", ".bai"), urlbase="http://cluster.hpcc.ucr.edu/~tgirke/", urlfile="IGVurl.txt")
#####################
## Alignment Stats ##
#####################
alignStats <- function(args, output_index = 1) {
fqpaths <- infile1(args)
## SYSargs class
if(class(args)=="SYSargs") {
bampaths <- outpaths(args)
# SYSargs2 class
} else if (class(args)=="SYSargs2") {
output.all <- subsetWF(args, slot = "output", subset = 1, index = output_index)
bampaths <- as.character()
for(i in seq_along(output.all)){
for(j in seq_along(output.all[[i]])){
if(grepl(".sam$", output.all[[i]][[j]])==TRUE & grepl(".bam$", output.all[[i]][[j]])==FALSE){
stop("Please provide files in BAM format. Also, check 'output_update' function, if the BAM files were previously generated.") }
else if(grepl(".bam$", output.all[[i]][[j]])==TRUE & grepl("sorted.bam$", output.all[[i]][[j]])==FALSE){
bampaths <- c(bampaths, output.all[[i]][[j]]) }
}
}
names(bampaths) <- names(output.all)
}
bamexists <- file.exists(bampaths)
fqpaths <- fqpaths[bamexists]
bampaths <- bampaths[bamexists]
## Obtain total read number from FASTQ files
Nreads <- countLines(fqpaths)/4
names(Nreads) <- names(fqpaths)
## If reads are PE multiply by 2 as a rough approximation
if(nchar(infile2(args))[1] > 0) Nreads <- Nreads * 2
## Obtain total number of alignments from BAM files
bfl <- BamFileList(bampaths, yieldSize=50000, index=character())
param <- ScanBamParam(flag=scanBamFlag(isUnmappedQuery=FALSE))
Nalign <- countBam(bfl, param=param)
## Obtain number of primary alignments from BAM files
param <- ScanBamParam(flag=scanBamFlag(isSecondaryAlignment=FALSE, isUnmappedQuery=FALSE))
Nalignprim <- countBam(bfl, param=param)
statsDF <- data.frame(FileName=names(Nreads),
Nreads=Nreads,
Nalign=Nalign$records,
Perc_Aligned=Nalign$records/Nreads*100,
Nalign_Primary=Nalignprim$records,
Perc_Aligned_Primary=Nalignprim$records/Nreads*100
)
if(nchar(infile2(args))[1] > 0) colnames(statsDF)[which(colnames(statsDF)=="Nreads")] <- "Nreads2x"
return(statsDF)
}
## Usage:
# read_statsDF <- alignStats(args=args)
########################
## RPKM Normalization ##
########################
returnRPKM <- function(counts, ranges) {
geneLengthsInKB <- sum(width(reduce(ranges)))/1000 # Length of exon union per gene in kbp
millionsMapped <- sum(counts)/1e+06 # Factor for converting to million of mapped reads.
rpm <- counts/millionsMapped # RPK: reads per kilobase of exon model.
rpkm <- rpm/geneLengthsInKB # RPKM: reads per kilobase of exon model per million mapped reads.
return(rpkm)
}
## Usage:
# countDFrpkm <- apply(countDF, 2, function(x) returnRPKM(counts=x, ranges=eByg))
###############################################
## Read Sample Comparisons from Targets File ##
###############################################
## Parses sample comparisons from <CMP> line(s) in targets.txt file or SYSars object.
## All possible comparisons can be specified with 'CMPset: ALL'.
readComp <- function(file, format="vector", delim="-") {
if(!format %in% c("vector", "matrix")) stop("Argument format can only be assigned: vector or matrix!")
## Parse <CMP> line
# if(any(class(file)=="SYSargs" & class(file)=="SYSargs2")){
# # if(class(file)=="SYSargs") {
# if(length(targetsheader(file))==0) stop("Input has no targets header lines.")
# comp <- targetsheader(file)
# } else {
# comp <- readLines(file)
# }
# ## SYSargs class
if(class(file) == "SYSargs") {
if(length(targetsheader(file))==0) stop("Input has no targets header lines.")
comp <- targetsheader(file)
## SYSargs2 class
} else if (class(file) == "SYSargs2"){
if(length(targetsheader(file)[[1]])==0) stop("Input has no targets header lines.")
comp <- targetsheader(file)[[1]]
} else {
comp <- readLines(file)
}
comp <- comp[grepl("<CMP>", comp)]
comp <- gsub("#.*<CMP>| {1,}", "", comp)
comp <- gsub("\t", "", comp); comp <- gsub("^\"|\"$", "", comp) # Often required if Excel is used for editing targets file
comp <- strsplit(comp, ":|,")
names(comp) <- lapply(seq(along=comp), function(x) comp[[x]][1])
comp <- sapply(names(comp), function(x) comp[[x]][-1], simplify=FALSE)
## Check whether all samples are present in Factor column of targets file
checkvalues <- unique(unlist(strsplit(unlist(comp), "-")))
checkvalues <- checkvalues[checkvalues!="ALL"]
if(class(file)=="SYSargs") {
all <- unique(as.character(targetsin(file)$Factor))
} else if(class(file)=="SYSargs2") {
all <- unique(as.character(targets.as.df(targets(args_bam))$Factor))
} else {
all <- unique(as.character(read.delim(file, comment.char = "#")$Factor))
}
if(any(!checkvalues %in% all)) stop(paste("The following samples are not present in Factor column of targets file:", paste(checkvalues[!checkvalues %in% all], collapse=", ")))
## Generate outputs
allindex <- sapply(names(comp), function(x) any(grepl("ALL", comp[[x]])))
if(any(allindex)) for(i in which(allindex)) comp[[i]] <- combn(all, m=2, FUN=paste, collapse=delim)
if(format == "vector" & delim != "-") comp <- sapply(names(comp), function(x) gsub("-", delim, comp[[x]]), simplify=FALSE)
if(format == "vector") return(comp)
if(format == "matrix") return(sapply(names(comp), function(x) do.call("rbind", strsplit(comp[[x]], "-")), simplify=FALSE))
}
## Usage:
# cmp <- readComp("targets.txt", format="vector", delim="-")
# cmp <- readComp(args, format="vector", delim="-")
#################################
## Access module system from R ##
#################################
# S3 Class for handling function calls
myEnvModules <- structure(list(), class="EnvModules")
## Main function to allow avail, list and list
myEnvModules$init <- function(){
# Module function assumes MODULEPATH and MODULEDIR are set in login profile
# Get base environment from login profile
base_env <- strsplit(system('bash -l -c "env"',intern = TRUE),'\n')
base_env <- strsplit(as.character(base_env),'=')
# Iterate through base environment
for (x in seq(1,length(base_env))) {
# Set environment based on login profile
if (base_env[[x]][1]=="LOADEDMODULES" || base_env[[x]][1]=="MODULESHOME" || base_env[[x]][1]=="MODULEPATH" || base_env[[x]][1]=="MODULES_DIR" || base_env[[x]][1]=="IIGB_MODULES"){
if (base_env[[x]][1]=="LOADEDMODULES"){
default_modules <- strsplit(base_env[[x]][2],":")
}
else{
l <- list(base_env[[x]][2])
names(l) <- base_env[[x]][1]
do.call(Sys.setenv, l)
}
}
}
# Make sure to process default modules after the environment is set with the above loop
for (x in seq(1,length(default_modules[[1]]))){
module_name <- default_modules[[1]][x]
print(paste("Loading module",module_name))
try(myEnvModules$load_unload("load",module_name))
}
}
# Print available modules or currently loaded modules on stderr
myEnvModules$avail_list <- function(action_type){
try(module_vars <- system(paste('modulecmd bash',action_type),intern = TRUE))
}
# Unload all currently loaded modules
myEnvModules$clear <- function(action_type){
loaded_modules <- strsplit(Sys.getenv("LOADEDMODULES"),":")
if (length(loaded_modules[[1]]) > 0) {
for (x in seq(1,length(loaded_modules[[1]]))){
module_name <- loaded_modules[[1]][x]
print(paste("Unloading module",module_name))
try(myEnvModules$load_unload("unload",module_name))
}
}
}
# Load and unload actions are basically the same, set environment variables given by modulecmd
myEnvModules$load_unload <- function(action_type, module_name=""){
module_name <- paste(module_name, collapse=' ')
# Use the low level C binary for generating module environment variables
try(module_vars <- system(paste('modulecmd bash',action_type, module_name),intern = TRUE))
if (length(module_vars) > 0){
for (y in seq(1,length(module_vars))) {
# Separate environment variables
module_var <- strsplit(module_vars,";")
# Iterate through all environment variables
for (x in seq(1,length(module_var[[y]]))) {
# Isolate key, value pair
evar <- module_var[[y]][x]
# Filter export commands
if (length(grep('^ *export',evar)) == 0 && length(evar) > 0) {
# Seprate key and value
evar <- strsplit(as.character(evar),'=')
# Stip spaces at the end of the value
evar_val <- gsub('[[:space:]]','',evar[[1]][2])
# Remove extra backslashes
l <- list(gsub('\\$','',evar_val))
# Load dependant modules
if (length(grep('^ *module',evar[[1]][1])) > 0){
inner_module <- strsplit(evar[[1]][1]," ")
#myEnvModules$load_unload(inner_module[1][[1]][2],inner_module[1][[1]][3])
}
# Source environment
else if (length(grep('^ *source',evar[[1]][1])) > 0){
warning(paste0("Module uses a bash script to initialize, some software may not function as expected:\n\t",evar[[1]][1]))
}
# Unset variables that need to be unset
else if(length(grep("^ *unset ",evar[[1]][1])) > 0){
evar <- gsub("^unset (.*)$","\\1",evar[[1]][1])
Sys.unsetenv(evar)
} else {
# Assign names to each value in list
names(l) <- evar[[1]][1]
# Set environment variable in current environment
do.call(Sys.setenv, l)
}
}
}
}
}
}
#Define what happens bases on action
module <- function(action_type,module_name=""){
# Check to see if modulecmd is in current PATH
try(suppressWarnings(modulecmd_path <- system("which modulecmd",intern=TRUE,ignore.stderr=TRUE)),
silent=TRUE
)
# Only initialize module system if it has not yet been initialized and the modulecmd exisits
if ( Sys.getenv('MODULEPATH') == "" && length(modulecmd_path) > 0) {
myEnvModules$init()
} else if (Sys.getenv('MODULEPATH') == "" && length(modulecmd_path) == 0) {
stop("Could not find the installation of Environment Modules: \"modulecmd\". Please make sure to configure your PATH environment variable according to the software in use.")
}
switch(action_type,
"load" = myEnvModules$load_unload(action_type, module_name),
"unload" = myEnvModules$load_unload(action_type, module_name),
"list" = myEnvModules$avail_list(action_type),
"avail" = myEnvModules$avail_list(action_type),
"clear" = myEnvModules$clear(action_type),
"init" = myEnvModules$init(),
stop("That action is not supported.")
)
}
## Usage:
# module("load","tophat")
# module("load","tophat/2.1.1")
# module("list")
# module("avail")
# module("init")
# module("unload", "tophat")
# module("unload", "tophat/2.1.1")
#####################
## Legacy Wrappers ##
#####################
## List software available in module system
modulelist <- function() {
module("avail")
# warning("The function modulelist will be deprecated in future releases, please refer to the documentation for proper useage.")
}
## Load software from module system
moduleload <- function(module,envir="PATH") {
module("load", module)
# warning("The function moduleload will be deprecated in future releases, please refer to the documentation for proper useage.")
}
#######################################################################
## Run edgeR GLM with entire count matrix or subsetted by comparison ##
#######################################################################
## If independent=TRUE then countDF will be subsetted for each comparison
run_edgeR <- function(countDF, targets, cmp, independent=TRUE, paired=NULL, mdsplot="") {
if(class(cmp) != "matrix" & length(cmp)==2) cmp <- t(as.matrix(cmp)) # If cmp is vector of length 2, convert it to matrix.
samples <- as.character(targets$Factor); names(samples) <- paste(as.character(targets$SampleName), "", sep="")
countDF <- countDF[, names(samples)]
countDF[is.na(countDF)] <- 0
edgeDF <- data.frame(row.names=rownames(countDF))
group <- as.character(samples)
if(independent==TRUE) {
loopv <- seq(along=cmp[,1])
} else {
loopv <- 1
}
for(j in loopv) {
## Filtering and normalization
y <- DGEList(counts=countDF, group=group) # Constructs DGEList object
if(independent == TRUE) {
subset <- samples[samples %in% cmp[j,]]
y <- y[, names(subset)]
y$samples$group <- factor(as.character(y$samples$group))
}
keep <- rowSums(cpm(y)>1) >= 2; y <- y[keep, ]
y <- calcNormFactors(y)
## Design matrix
if(length(paired)==0) {
design <- model.matrix(~0+y$samples$group, data=y$samples)
colnames(design) <- levels(y$samples$group)
} else {
if(length(paired)>0 & independent==FALSE) stop("When providing values under 'paired' also set independent=TRUE")
Subject <- factor(paired[samples %in% cmp[j,]]) # corrected Jun 2014 (won't change results)
Treat <- y$samples$group
design <- model.matrix(~Subject+Treat)
levels(design) <- levels(y$samples$group)
}
## Estimate dispersion
y <- estimateGLMCommonDisp(y, design, verbose=TRUE) # Estimates common dispersions
y <- estimateGLMTrendedDisp(y, design) # Estimates trended dispersions
y <- estimateGLMTagwiseDisp(y, design) # Estimates tagwise dispersions
fit <- glmFit(y, design) # Fits the negative binomial GLM for each tag and produces an object of class DGEGLM with some new components.
## Contrast matrix is optional but makes anlysis more transparent
if(independent == TRUE) {
mycomp <- paste(cmp[j,1], cmp[j,2], sep="-")
} else {
mycomp <- paste(cmp[,1], cmp[,2], sep="-")
}
if(length(paired)==0) contrasts <- makeContrasts(contrasts=mycomp, levels=design)
for(i in seq(along=mycomp)) {
if(length(paired)==0) {
lrt <- glmLRT(fit, contrast=contrasts[,i]) # Takes DGEGLM object and carries out the likelihood ratio test.
} else {
lrt <- glmLRT(fit) # No contrast matrix with paired design
}
deg <- as.data.frame(topTags(lrt, n=length(rownames(y))))
colnames(deg) <- paste(paste(mycomp[i], collapse="_"), colnames(deg), sep="_")
edgeDF <- cbind(edgeDF, deg[rownames(edgeDF),])
}
if(nchar(mdsplot)>0) {
pdf(paste("./results/sample_MDS_", paste(unique(subset), collapse="-"), ".pdf", sep=""))
plotMDS(y)
dev.off()
}
}
return(edgeDF)
}
## Usage:
# cmp <- readComp(file=targetspath, format="matrix", delim="-")
# edgeDF <- run_edgeR(countDF=countDF, targets=targets, cmp=cmp[[1]], independent=TRUE, mdsplot="")
####################################################################
## Run DESeq2 with entire count matrix or subsetted by comparison ##
####################################################################
## If independent=TRUE then countDF will be subsetted for each comparison
run_DESeq2 <- function(countDF, targets, cmp, independent=FALSE) {
if(class(cmp) != "matrix" & length(cmp)==2) cmp <- t(as.matrix(cmp)) # If cmp is vector of length 2, convert it to matrix.
samples <- as.character(targets$Factor); names(samples) <- paste(as.character(targets$SampleName), "", sep="")
countDF <- countDF[, names(samples)]
countDF[is.na(countDF)] <- 0
deseqDF <- data.frame(row.names=rownames(countDF))
if(independent==TRUE) {
loopv <- seq(along=cmp[,1])
} else {
loopv <- 1
}
for(j in loopv) {
if(independent==TRUE) {
## Create subsetted DESeqDataSet object
subset <- samples[samples %in% cmp[j,]]
countDFsub <- countDF[, names(subset)]
dds <- DESeq2::DESeqDataSetFromMatrix(countData=as.matrix(countDFsub), colData=data.frame(condition=subset), design = ~ condition)
mycmp <- cmp[j, , drop=FALSE]
} else {
## Create full DESeqDataSet object
dds <- DESeq2::DESeqDataSetFromMatrix(countData=as.matrix(countDF), colData=data.frame(condition=samples), design = ~ condition)
mycmp <- cmp
}
## Estimate of (i) size factors, (ii) dispersion, (iii) negative binomial GLM fitting and (iv) Wald statistics
dds <- DESeq2::DESeq(dds, quiet=TRUE)
for(i in seq(along=mycmp[,1])) {
## Extracts DEG results for specific contrasts from DESeqDataSet object
res <- DESeq2::results(dds, contrast=c("condition", mycmp[i,]))
## Set NAs to reasonable values to avoid errors in downstream filtering steps
res[is.na(res[,"padj"]), "padj"] <- 1
res[is.na(res[,"log2FoldChange"]), "log2FoldChange"] <- 0
deg <- as.data.frame(res)
colnames(deg)[colnames(deg) %in% c("log2FoldChange", "padj")] <- c("logFC", "FDR")
colnames(deg) <- paste(paste(mycmp[i,], collapse="-"), colnames(deg), sep="_")
deseqDF <- cbind(deseqDF, deg[rownames(deseqDF),])
}
}
return(deseqDF)
}
## Usage:
# cmp <- readComp(file=targetspath, format="matrix", delim="-")
# degseqDF <- run_DESeq2(countDF=countDF, targets=targets, cmp=cmp[[1]], independent=TRUE)
############################################
## Filter DEGs by p-value and fold change ##
############################################
filterDEGs <- function(degDF, filter, plot=TRUE) {
pval <- degDF[, grep("_FDR$", colnames(degDF)), drop=FALSE]
log2FC <- degDF[, grep("_logFC$", colnames(degDF)), drop=FALSE]
## DEGs that are up or down regulated
pf <- pval <= filter["FDR"]/100 & (log2FC >= log2(filter["Fold"]) | log2FC <= -log2(filter["Fold"]))
colnames(pf) <- gsub("_FDR", "", colnames(pf))
pf[is.na(pf)] <- FALSE
DEGlistUPorDOWN <- sapply(colnames(pf), function(x) rownames(pf[pf[,x,drop=FALSE],,drop=FALSE]), simplify=FALSE)
## DEGs that are up regulated
pf <- pval <= filter["FDR"]/100 & log2FC >= log2(filter["Fold"])
colnames(pf) <- gsub("_FDR", "", colnames(pf))
pf[is.na(pf)] <- FALSE
DEGlistUP <- sapply(colnames(pf), function(x) rownames(pf[pf[,x,drop=FALSE],,drop=FALSE]), simplify=FALSE)
## DEGs that are down regulated
pf <- pval <= filter["FDR"]/100 & log2FC <= -log2(filter["Fold"])
colnames(pf) <- gsub("_FDR", "", colnames(pf))
pf[is.na(pf)] <- FALSE
DEGlistDOWN <- sapply(colnames(pf), function(x) rownames(pf[pf[,x,drop=FALSE],,drop=FALSE]), simplify=FALSE)
df <- data.frame(Comparisons=names(DEGlistUPorDOWN), Counts_Up_or_Down=sapply(DEGlistUPorDOWN, length), Counts_Up=sapply(DEGlistUP, length), Counts_Down=sapply(DEGlistDOWN, length))
resultlist <- list(UporDown=DEGlistUPorDOWN, Up=DEGlistUP, Down=DEGlistDOWN, Summary=df)
if(plot==TRUE) {
mytitle <- paste("DEG Counts (", names(filter)[1], ": ", filter[1], " & " , names(filter)[2], ": ", filter[2], "%)", sep="")
df_plot <- data.frame(Comparisons=rep(as.character(df$Comparisons), 2), Counts=c(df$Counts_Up, df$Counts_Down), Type=rep(c("Up", "Down"), each=length(df[,1])))
p <- ggplot(df_plot, aes(Comparisons, Counts, fill = Type)) + geom_bar(position="stack", stat="identity") + coord_flip() + theme(axis.text.y=element_text(angle=0, hjust=1)) + ggtitle(mytitle)
print(p)
}
return(resultlist)
}
## Usage:
# DEG_list <- filterDEGs(degDF=edgeDF, filter=c(Fold=2, FDR=1))
|
#Example : 2.2A Chapter : 2.2 page no : 50
#Pivots and Multipliers in converting matrix to upper traingular system
matrix(c(1,-1,0,-1,2,-1,0,-1,2),ncol=3)->A
A
print(paste("First pivot is",A[1,1]))
l21<-A[2,1]/A[1,1]
print(paste("Multiplier L21 to convert the second row first element to 0 is",l21))
A[2,]<-A[2,]-l21*A[1,]
A
print(paste("The second pivot is ",A[2,2]))
l32<-A[3,2]/A[2,2]
A[3,]<-A[3,]-l32*A[2,]
print("The equivalent Upper traingular system for the matrix A is ")
A
|
/Introduction_To_Linear_Algebra_by_Gilbert_Strang/CH2/EX2.2.a/Ex2_2.2a.r
|
permissive
|
FOSSEE/R_TBC_Uploads
|
R
| false
| false
| 505
|
r
|
#Example : 2.2A Chapter : 2.2 page no : 50
#Pivots and Multipliers in converting matrix to upper traingular system
matrix(c(1,-1,0,-1,2,-1,0,-1,2),ncol=3)->A
A
print(paste("First pivot is",A[1,1]))
l21<-A[2,1]/A[1,1]
print(paste("Multiplier L21 to convert the second row first element to 0 is",l21))
A[2,]<-A[2,]-l21*A[1,]
A
print(paste("The second pivot is ",A[2,2]))
l32<-A[3,2]/A[2,2]
A[3,]<-A[3,]-l32*A[2,]
print("The equivalent Upper traingular system for the matrix A is ")
A
|
#======================================
# word2vec로 연관 키워드 추출하기
#======================================
rm(list=ls())
#
# install.packages("devtools") #Rtools는 Windows에 깔때 별도로 깔아야 한다.
# install.packages("wordVectors")
# install.packages("tsne") #다차원 척도법(MDS) MAP을 그리기 위해 필요함
library(devtools)
install_github("bmschmidt/wordVectors")
library(wordVectors)
library(tsne)
#맥에서 한글처리
par(family = "AppleGothic")
#readr 패키지 설치
install.packages("readr")
library(readr)
#형태소 분석기 실행하기
system("tctStart")
#분석 결과 가져오기
parsedData =read.csv("kei_content_data.csv")
View(parsedData)
#word2vec Train용 TXT파일 만들기
write.table(parsedData$parsedContent,file = "kei_w2.txt", row.names = FALSE, col.names = FALSE)
#모델 Training
model = train_word2vec("kei_w2v.txt", output_file = "kei_w2v_2.bin",
threads=3, vectors=100, window=12)
#threads:코어갯수 높을수록 속도 빨라짐
#vectors:벡터갯수 높을수록 결과를 자세히 볼 수 있음(데이터가 10만개 넘으면 100~500개 정도로 설정해줌)
#word2vec 모델링은 학습데이터의 양이 많을수록 추론의 정확도가 좋아짐
#word2vector 확인하기
read.vectors("kei_w2v_2.bin")
#연관 키워드 추출하기
nearest_to(model,model[["환경"]], 10) #'환경'와 가까운 단어 10개 출력(거리측정법: 코사인거리)
#2가지 이상 키워드에 대한 연관 키워드 추출하기
some = nearest_to(model,model[[c("기후","정책")]], 20)
some
#단어간 연산하기
subVec = model[rownames(model)=="환경",] - model[rownames(model) == "제도",] + model[rownames(model) == "기후",]
subNear = nearest_to(model, subVec, 20)
subNear
#단어 관계 시각화
plot(filter_to_rownames(model,names(some)))
plot(model)
#Euclidean Distance
dist(model[(row.names(model)=="환경" | row.names(model)=="개선"),])
#Cosine 유사도
cosineSimilarity(model[["기후","환경"]], model[["제도"]])
cosineDist(model[["기후","환경"]], model[["제"]])
|
/180607_텍스트마이닝/word2vec.R
|
no_license
|
chankoo/BOAZ-Sessions
|
R
| false
| false
| 2,122
|
r
|
#======================================
# word2vec로 연관 키워드 추출하기
#======================================
rm(list=ls())
#
# install.packages("devtools") #Rtools는 Windows에 깔때 별도로 깔아야 한다.
# install.packages("wordVectors")
# install.packages("tsne") #다차원 척도법(MDS) MAP을 그리기 위해 필요함
library(devtools)
install_github("bmschmidt/wordVectors")
library(wordVectors)
library(tsne)
#맥에서 한글처리
par(family = "AppleGothic")
#readr 패키지 설치
install.packages("readr")
library(readr)
#형태소 분석기 실행하기
system("tctStart")
#분석 결과 가져오기
parsedData =read.csv("kei_content_data.csv")
View(parsedData)
#word2vec Train용 TXT파일 만들기
write.table(parsedData$parsedContent,file = "kei_w2.txt", row.names = FALSE, col.names = FALSE)
#모델 Training
model = train_word2vec("kei_w2v.txt", output_file = "kei_w2v_2.bin",
threads=3, vectors=100, window=12)
#threads:코어갯수 높을수록 속도 빨라짐
#vectors:벡터갯수 높을수록 결과를 자세히 볼 수 있음(데이터가 10만개 넘으면 100~500개 정도로 설정해줌)
#word2vec 모델링은 학습데이터의 양이 많을수록 추론의 정확도가 좋아짐
#word2vector 확인하기
read.vectors("kei_w2v_2.bin")
#연관 키워드 추출하기
nearest_to(model,model[["환경"]], 10) #'환경'와 가까운 단어 10개 출력(거리측정법: 코사인거리)
#2가지 이상 키워드에 대한 연관 키워드 추출하기
some = nearest_to(model,model[[c("기후","정책")]], 20)
some
#단어간 연산하기
subVec = model[rownames(model)=="환경",] - model[rownames(model) == "제도",] + model[rownames(model) == "기후",]
subNear = nearest_to(model, subVec, 20)
subNear
#단어 관계 시각화
plot(filter_to_rownames(model,names(some)))
plot(model)
#Euclidean Distance
dist(model[(row.names(model)=="환경" | row.names(model)=="개선"),])
#Cosine 유사도
cosineSimilarity(model[["기후","환경"]], model[["제도"]])
cosineDist(model[["기후","환경"]], model[["제"]])
|
set.seed(42)
p21 <- CreateSeuratObject(counts = p21_counts, project = "p21", min.cells = 3, min.features = 200)
p10 <- CreateSeuratObject(counts = p10_counts, project = "p10", min.cells = 3, min.features = 200)
fivemonth <- CreateSeuratObject(counts = fivemonthcounts, project = "fivemonth", min.cells = 3, min.features = 200)
twentyfourmonth <- CreateSeuratObject(counts = twentyfourcounts, project = "twentyfour", min.cells = 3, min.features = 200)
thirtymonth <- CreateSeuratObject(counts = thirtycounts, project = "thirty", min.cells = 3, min.features = 200)
#Integrated All Datasets
dataset.list <- c(p10, p21, fivemonth, twentyfourmonth, thirtymonth)
for (i in 1:length(dataset.list)) {
dataset.list[[i]] <- NormalizeData(dataset.list[[i]], verbose = FALSE)
dataset.list[[i]] <- FindVariableFeatures(dataset.list[[i]], selection.method = "vst",
nfeatures = 2000, verbose = FALSE)
}
dataset.list[[1]]@meta.data[["orig.ident"]] <- "p10"
dataset.list[[2]]@meta.data[["orig.ident"]] <- "p21"
dataset.list[[3]]@meta.data[["orig.ident"]] <- "fivemonth"
dataset.list[[4]]@meta.data[["orig.ident"]] <- "twentyfour"
dataset.list[[5]]@meta.data[["orig.ident"]] <- "thirty"
dataset.anchors <- FindIntegrationAnchors(object.list = dataset.list, dims = 1:30)
dataset.integrated <- IntegrateData(anchorset = dataset.anchors, dims = 1:30)
DefaultAssay(dataset.integrated) <- "integrated"
# Run the standard workflow for visualization and clustering
dataset.integrated <- ScaleData(dataset.integrated, verbose = FALSE)
dataset.integrated <- RunPCA(dataset.integrated, npcs = 30, verbose = FALSE)
dataset.integrated <- FindNeighbors(dataset.integrated, dims = 1:30)
dataset.integrated <- FindClusters(dataset.integrated, resolution = 0.5)
dataset.integrated <- RunUMAP(dataset.integrated, reduction = "pca", dims = 1:30)
DimPlot(dataset.integrated, reduction = "umap", label = TRUE,
repel = TRUE) + NoLegend()
DimPlot(dataset.integrated, reduction = "umap", label = TRUE, pt.size = 2.5) + NoAxes() + NoLegend()
|
/scripts/integrations/ta integration.R
|
no_license
|
MillayLab/single-myonucleus
|
R
| false
| false
| 2,100
|
r
|
set.seed(42)
p21 <- CreateSeuratObject(counts = p21_counts, project = "p21", min.cells = 3, min.features = 200)
p10 <- CreateSeuratObject(counts = p10_counts, project = "p10", min.cells = 3, min.features = 200)
fivemonth <- CreateSeuratObject(counts = fivemonthcounts, project = "fivemonth", min.cells = 3, min.features = 200)
twentyfourmonth <- CreateSeuratObject(counts = twentyfourcounts, project = "twentyfour", min.cells = 3, min.features = 200)
thirtymonth <- CreateSeuratObject(counts = thirtycounts, project = "thirty", min.cells = 3, min.features = 200)
#Integrated All Datasets
dataset.list <- c(p10, p21, fivemonth, twentyfourmonth, thirtymonth)
for (i in 1:length(dataset.list)) {
dataset.list[[i]] <- NormalizeData(dataset.list[[i]], verbose = FALSE)
dataset.list[[i]] <- FindVariableFeatures(dataset.list[[i]], selection.method = "vst",
nfeatures = 2000, verbose = FALSE)
}
dataset.list[[1]]@meta.data[["orig.ident"]] <- "p10"
dataset.list[[2]]@meta.data[["orig.ident"]] <- "p21"
dataset.list[[3]]@meta.data[["orig.ident"]] <- "fivemonth"
dataset.list[[4]]@meta.data[["orig.ident"]] <- "twentyfour"
dataset.list[[5]]@meta.data[["orig.ident"]] <- "thirty"
dataset.anchors <- FindIntegrationAnchors(object.list = dataset.list, dims = 1:30)
dataset.integrated <- IntegrateData(anchorset = dataset.anchors, dims = 1:30)
DefaultAssay(dataset.integrated) <- "integrated"
# Run the standard workflow for visualization and clustering
dataset.integrated <- ScaleData(dataset.integrated, verbose = FALSE)
dataset.integrated <- RunPCA(dataset.integrated, npcs = 30, verbose = FALSE)
dataset.integrated <- FindNeighbors(dataset.integrated, dims = 1:30)
dataset.integrated <- FindClusters(dataset.integrated, resolution = 0.5)
dataset.integrated <- RunUMAP(dataset.integrated, reduction = "pca", dims = 1:30)
DimPlot(dataset.integrated, reduction = "umap", label = TRUE,
repel = TRUE) + NoLegend()
DimPlot(dataset.integrated, reduction = "umap", label = TRUE, pt.size = 2.5) + NoAxes() + NoLegend()
|
Sys.setlocale("LC_TIME", "en_US.UTF-8")
data <- read.csv2("/Users/tiagoassano/Documents/household_power_consumption.txt", stringsAsFactors = FALSE)
data$Time <- strptime(data$Time, format = "%T")
data$Date <- as.Date(data$Date, format = "%d/%m/%Y")
data$Global_active_power <- as.numeric(data$Global_active_power)
data1<- subset(data,Date=="2007-02-01"| Date=="2007-02-02")
png(filename="plot1.png")
hist(data1$Global_active_power, xlab = "Global Active Power (kilowatts)",
ylab = "Frequency", main = "Global Active Power", col = "red")
dev.off()
|
/plot1.R
|
no_license
|
Tassano/ExData_Plotting1
|
R
| false
| false
| 553
|
r
|
Sys.setlocale("LC_TIME", "en_US.UTF-8")
data <- read.csv2("/Users/tiagoassano/Documents/household_power_consumption.txt", stringsAsFactors = FALSE)
data$Time <- strptime(data$Time, format = "%T")
data$Date <- as.Date(data$Date, format = "%d/%m/%Y")
data$Global_active_power <- as.numeric(data$Global_active_power)
data1<- subset(data,Date=="2007-02-01"| Date=="2007-02-02")
png(filename="plot1.png")
hist(data1$Global_active_power, xlab = "Global Active Power (kilowatts)",
ylab = "Frequency", main = "Global Active Power", col = "red")
dev.off()
|
# Code to add to the book
|
/Todo.R
|
no_license
|
fowiny/dataExploration_R_training
|
R
| false
| false
| 27
|
r
|
# Code to add to the book
|
library (shiny)
require (shinydashboard)
library(dplyr)
library(ggplot2)
theme_set(theme_bw())
data<-read.csv("2018.csv", stringAsFactor = FALSE, header = TRUE)
year <- (2018)
#define ui
shinyUI(
dashboardPage(
skin = "blue",
dashboardHeader(title = "Who Is Happy??"),
dashboardSidebar(
sidebarMenu(
menuItem("Happiness rate", tabName = "dashboard", icon = icon("calculator")),
menuItem("Dataset", tabName ="dataset",icon=icon("book")),
menuItem("Boxplot", tabName = "boxplot", icon = icon("bar-chart-o")),
menuItem("Comparison of Region", tabName = "compare", icon = icon("line-chart"))
)
)
),
dashboardBody(
tabItems(
tabItem(tabName = "dashboard",
h1("Who Is Happy??"),
fluidRow(
img(src='https://lh3.googleusercontent.com/ApW2Um8gCc3O-isltwjcdALAp-Y5GXXmHVCALmap-Okh4zxWY3nKl1WXJuQ1R1OWL6fK=s170', height = "20%", width = "100%", align = "center"),
h2("Basic concepts of Happiness"),
p("Three Surprising Facts About Happiness"),
p("- Happiness is Contagious. Like a cold, happiness can be caught from the people around you."),
p("- Smiling actually does make you feel happy. Nothing is more annoying than the stranger that tells you to 'smile'"),
p("- Emotions last only a few seconds. If you've ever been stuck in a bad mood for days on end, this might sound unlikely."),
box(
title = strong("Input Dashborad"), solidHeader = TRUE, collapsible = TRUE, status = "danger",
h4(strong("Choose your filter to visualise the data")),
htmlOutput("region_selector")
),
box(
title ="Frequency Graph", solidHeader = TRUE, collapsible = TRUE, status = "danger",
h4(strong("Year versus Happiness Rate")),
plotOutput("fgraph")
),
box(
title = "Boxplot", solidHeader = TRUE, collapsible = TRUE, status = "danger",
h4(strong("Analysis for the chosen region: ")),
textOutput("t1"),
textOutput("t2"),
textOutput("t3"),
textOutput("t4"),
textOutput("t5")
)
)
),
tabItem(tabName = "dataset",
fluidPage(
h2 ("Dataset used"),
p("The dataset that we used is sorced from kaggle.com. Table below represents an interactive visualisation of dataset used"),
br(),
sidebarLayout(
sidebarPanel(
conditionalPanel(
'input.dataset==="data"',
helpText("Choose the variable(s) to show"),
checkboxGroupInput("show_vars", "Columns in datsets:",
names(data), selected =)
)
),
mainPanel (
tabsetPanel(
id = 'dataset',
tabPanel("data", DT::dataTableOutput("mytable1"))
)
)
),
tabItem( tabName = "boxplot",
fluidPage(
h2("Boxplot of the whole dataset "),
br(),
plotOutput("myplot1")
)
),
tabItem(tabName = "compare",
fluidPage(
h2("Comparison of Happiness Rate between region"),
br(),
sidebarLayout(
sidebarPanel (
selectInput("coun", "Select a region: ",data[,1])
),
mainPanel(
plotOutput("comp")
)
)
))
)
))
)
)
#define server logic
|
/ui.R
|
no_license
|
Afryna/lala
|
R
| false
| false
| 4,075
|
r
|
library (shiny)
require (shinydashboard)
library(dplyr)
library(ggplot2)
theme_set(theme_bw())
data<-read.csv("2018.csv", stringAsFactor = FALSE, header = TRUE)
year <- (2018)
#define ui
shinyUI(
dashboardPage(
skin = "blue",
dashboardHeader(title = "Who Is Happy??"),
dashboardSidebar(
sidebarMenu(
menuItem("Happiness rate", tabName = "dashboard", icon = icon("calculator")),
menuItem("Dataset", tabName ="dataset",icon=icon("book")),
menuItem("Boxplot", tabName = "boxplot", icon = icon("bar-chart-o")),
menuItem("Comparison of Region", tabName = "compare", icon = icon("line-chart"))
)
)
),
dashboardBody(
tabItems(
tabItem(tabName = "dashboard",
h1("Who Is Happy??"),
fluidRow(
img(src='https://lh3.googleusercontent.com/ApW2Um8gCc3O-isltwjcdALAp-Y5GXXmHVCALmap-Okh4zxWY3nKl1WXJuQ1R1OWL6fK=s170', height = "20%", width = "100%", align = "center"),
h2("Basic concepts of Happiness"),
p("Three Surprising Facts About Happiness"),
p("- Happiness is Contagious. Like a cold, happiness can be caught from the people around you."),
p("- Smiling actually does make you feel happy. Nothing is more annoying than the stranger that tells you to 'smile'"),
p("- Emotions last only a few seconds. If you've ever been stuck in a bad mood for days on end, this might sound unlikely."),
box(
title = strong("Input Dashborad"), solidHeader = TRUE, collapsible = TRUE, status = "danger",
h4(strong("Choose your filter to visualise the data")),
htmlOutput("region_selector")
),
box(
title ="Frequency Graph", solidHeader = TRUE, collapsible = TRUE, status = "danger",
h4(strong("Year versus Happiness Rate")),
plotOutput("fgraph")
),
box(
title = "Boxplot", solidHeader = TRUE, collapsible = TRUE, status = "danger",
h4(strong("Analysis for the chosen region: ")),
textOutput("t1"),
textOutput("t2"),
textOutput("t3"),
textOutput("t4"),
textOutput("t5")
)
)
),
tabItem(tabName = "dataset",
fluidPage(
h2 ("Dataset used"),
p("The dataset that we used is sorced from kaggle.com. Table below represents an interactive visualisation of dataset used"),
br(),
sidebarLayout(
sidebarPanel(
conditionalPanel(
'input.dataset==="data"',
helpText("Choose the variable(s) to show"),
checkboxGroupInput("show_vars", "Columns in datsets:",
names(data), selected =)
)
),
mainPanel (
tabsetPanel(
id = 'dataset',
tabPanel("data", DT::dataTableOutput("mytable1"))
)
)
),
tabItem( tabName = "boxplot",
fluidPage(
h2("Boxplot of the whole dataset "),
br(),
plotOutput("myplot1")
)
),
tabItem(tabName = "compare",
fluidPage(
h2("Comparison of Happiness Rate between region"),
br(),
sidebarLayout(
sidebarPanel (
selectInput("coun", "Select a region: ",data[,1])
),
mainPanel(
plotOutput("comp")
)
)
))
)
))
)
)
#define server logic
|
context("metadata cache")
test_that("get_cache_files", {
dir.create(pri <- fs::path_norm(tempfile()))
on.exit(unlink(pri, recursive = TRUE), add = TRUE)
dir.create(rep <- fs::path_norm(tempfile()))
on.exit(unlink(rep, recursive = TRUE), add = TRUE)
cmc <- cranlike_metadata_cache$new(pri, rep, "source", bioc = FALSE)
pri_files <- get_private(cmc)$get_cache_files("primary")
rep_files <- get_private(cmc)$get_cache_files("replica")
check <- function(files, root) {
expect_equal(files$root, root)
expect_true(all(c("meta", "lock", "rds") %in% names(files)))
expect_equal(
fs::path_common(c(files$rds, files$lock, files$rds, root)),
root)
expect_true(tibble::is_tibble(files$pkgs))
expect_equal(
sort(names(files$pkgs)),
sort(c("path", "etag", "basedir", "base", "mirror", "url", "fallback_url",
"platform", "type", "bioc_version", "meta_path", "meta_etag",
"meta_url")))
expect_equal(
fs::path_common(c(files$pkgs$path, files$pkgs$etag, root)),
root)
}
check(pri_files, pri)
check(rep_files, rep)
})
test_that("get_current_data", {
dir.create(pri <- fs::path_norm(tempfile()))
on.exit(unlink(pri, recursive = TRUE), add = TRUE)
dir.create(rep <- fs::path_norm(tempfile()))
on.exit(unlink(rep, recursive = TRUE), add = TRUE)
cmc <- cranlike_metadata_cache$new(pri, rep, "source", bioc = FALSE)
set_private(cmc, "data") <- "DATA"
set_private(cmc, "data_time") <- Sys.time()
expect_equal(get_private(cmc)$get_current_data(oneday()), "DATA")
set_private(cmc, "data_time") <- Sys.time() - 2 * oneday()
expect_error(
get_private(cmc)$get_current_data(oneday()),
"Loaded data outdated")
set_private(cmc, "data_time") <- NULL
expect_error(
get_private(cmc)$get_current_data(oneday()),
"Loaded data outdated")
set_private(cmc, "data") <- NULL
expect_error(get_private(cmc)$get_current_data(oneday()), "No data loaded")
})
test_that("load_replica_rds", {
dir.create(pri <- fs::path_norm(tempfile()))
on.exit(unlink(pri, recursive = TRUE), add = TRUE)
dir.create(rep <- fs::path_norm(tempfile()))
on.exit(unlink(rep, recursive = TRUE), add = TRUE)
cmc <- cranlike_metadata_cache$new(pri, rep, "source", bioc = FALSE)
expect_error(
get_private(cmc)$load_replica_rds(oneday()),
"No replica RDS file in cache"
)
rep_files <- get_private(cmc)$get_cache_files("replica")
mkdirp(dirname(rep_files$rds))
saveRDS("This is it", rep_files$rds)
file_set_time(rep_files$rds, Sys.time() - 2 * oneday())
expect_error(
get_private(cmc)$load_replica_rds(oneday()),
"Replica RDS cache file outdated"
)
file_set_time(rep_files$rds, Sys.time() - 1/2 * oneday())
expect_equal(
get_private(cmc)$load_replica_rds(oneday()),
"This is it")
expect_equal(get_private(cmc)$data, "This is it")
expect_true(Sys.time() - get_private(cmc)$data_time < oneday())
})
test_that("load_primary_rds", {
dir.create(pri <- fs::path_norm(tempfile()))
on.exit(unlink(pri, recursive = TRUE), add = TRUE)
dir.create(rep <- fs::path_norm(tempfile()))
on.exit(unlink(rep, recursive = TRUE), add = TRUE)
cmc <- cranlike_metadata_cache$new(pri, rep, "source", bioc = FALSE)
expect_error(
get_private(cmc)$load_primary_rds(oneday()),
"No primary RDS file in cache"
)
pri_files <- get_private(cmc)$get_cache_files("primary")
mkdirp(dirname(pri_files$rds))
saveRDS("This is it", pri_files$rds)
file_set_time(pri_files$rds, Sys.time() - 2 * oneday())
expect_error(
get_private(cmc)$load_primary_rds(oneday()),
"Primary RDS cache file outdated"
)
file_set_time(pri_files$rds, Sys.time() - 1/2 * oneday())
for (f in pri_files$pkgs$path) { mkdirp(dirname(f)); cat("x", file = f) }
file_set_time(pri_files$pkgs$path, Sys.time() - 2 * oneday())
expect_equal(
get_private(cmc)$load_primary_rds(oneday()),
"This is it")
expect_equal(get_private(cmc)$data, "This is it")
expect_true(Sys.time() - get_private(cmc)$data_time < oneday())
## Replica was also updated
expect_equal(
get_private(cmc)$load_replica_rds(oneday()),
"This is it")
})
test_that("locking failures", {
pri <- test_temp_dir()
rep <- test_temp_dir()
cmc <- cranlike_metadata_cache$new(pri, rep, "source", bioc = FALSE)
mockery::stub(cmc__load_primary_rds, "lock", function(...) NULL)
expect_error(
cmc__load_primary_rds(cmc, get_private(cmc), oneday()),
"Cannot acquire lock to copy RDS")
mockery::stub(cmc__load_primary_pkgs, "lock", function(...) NULL)
expect_error(
cmc__load_primary_pkgs(cmc, get_private(cmc), oneday()),
"Cannot acquire lock to copy PACKAGES")
})
test_that("load_primary_rds 3", {
pri <- test_temp_dir()
rep <- test_temp_dir()
cmc <- cranlike_metadata_cache$new(pri, rep, "source", bioc = FALSE)
pri_files <- get_private(cmc)$get_cache_files("primary")
touch(pri_files$rds)
expect_error(
cmc__load_primary_rds(cmc, get_private(cmc), oneday()),
"Primary PACKAGES missing")
})
test_that("load_primary_pkgs", {
withr::local_options(list(repos = NULL))
dir.create(pri <- fs::path_norm(tempfile()))
on.exit(unlink(pri, recursive = TRUE), add = TRUE)
dir.create(rep <- fs::path_norm(tempfile()))
on.exit(unlink(rep, recursive = TRUE), add = TRUE)
cmc <- cranlike_metadata_cache$new(pri, rep, c("macos", "source"),
bioc = FALSE)
expect_error(
get_private(cmc)$load_primary_pkgs(oneday()),
"Some primary PACKAGES files don't exist")
pri_files <- get_private(cmc)$get_cache_files("primary")
mkdirp(dirname(pri_files$pkgs$path))
fs::file_copy(get_fixture("PACKAGES-mac.gz"), pri_files$pkgs$path[1])
expect_error(
synchronise(get_private(cmc)$load_primary_pkgs(oneday())),
"Some primary PACKAGES files don't exist")
for (i in utils::tail(seq_len(nrow(pri_files$pkgs)), -1)) {
fs::file_copy(get_fixture("PACKAGES-src.gz"), pri_files$pkgs$path[i])
}
file_set_time(pri_files$pkgs$path, Sys.time() - 2 * oneday())
expect_error(
synchronise(get_private(cmc)$load_primary_pkgs(oneday())),
"Some primary PACKAGES files are outdated")
file_set_time(pri_files$pkgs$path, Sys.time() - 1/2 * oneday())
res <- synchronise(get_private(cmc)$load_primary_pkgs(oneday()))
check_packages_data(res)
## RDS was updated as well
rep_files <- get_private(cmc)$get_cache_files("replica")
expect_true(file.exists(rep_files$rds))
expect_true(Sys.time() - file_get_time(rep_files$rds) < oneday())
## Primary RDS was updated as well
expect_true(file.exists(pri_files$rds))
expect_true(Sys.time() - file_get_time(pri_files$rds) < oneminute())
})
test_that("update_replica_pkgs", {
skip_if_offline()
dir.create(pri <- fs::path_norm(tempfile()))
on.exit(unlink(pri, recursive = TRUE), add = TRUE)
dir.create(rep <- fs::path_norm(tempfile()))
on.exit(unlink(rep, recursive = TRUE), add = TRUE)
cmc <- cranlike_metadata_cache$new(pri, rep, "source", bioc = FALSE)
synchronise(get_private(cmc)$update_replica_pkgs())
rep_files <- get_private(cmc)$get_cache_files("replica")
expect_true(all(file.exists(rep_files$pkgs$path)))
expect_true(all(file.exists(rep_files$pkgs$etag)))
data <- get_private(cmc)$update_replica_rds()
expect_identical(data, get_private(cmc)$data)
check_packages_data(data)
})
test_that("update_replica_rds", {
dir.create(pri <- fs::path_norm(tempfile()))
on.exit(unlink(pri, recursive = TRUE), add = TRUE)
dir.create(rep <- fs::path_norm(tempfile()))
on.exit(unlink(rep, recursive = TRUE), add = TRUE)
cmc <- cranlike_metadata_cache$new(pri, rep, c("macos", "source"),
bioc = FALSE)
rep_files <- get_private(cmc)$get_cache_files("replica")
mkdirp(dirname(rep_files$pkgs$path))
fs::file_copy(get_fixture("PACKAGES-mac.gz"), rep_files$pkgs$path[1])
for (i in utils::tail(seq_len(nrow(rep_files$pkgs)), -1)) {
fs::file_copy(get_fixture("PACKAGES-src.gz"), rep_files$pkgs$path[i])
}
data <- get_private(cmc)$update_replica_rds()
expect_identical(get_private(cmc)$data, data)
expect_true(get_private(cmc)$data_time > Sys.time() - oneminute())
check_packages_data(data)
})
test_that("update_primary", {
dir.create(pri <- fs::path_norm(tempfile()))
on.exit(unlink(pri, recursive = TRUE), add = TRUE)
dir.create(rep <- fs::path_norm(tempfile()))
on.exit(unlink(rep, recursive = TRUE), add = TRUE)
cmc <- cranlike_metadata_cache$new(pri, rep, c("macos", "source"),
bioc = FALSE)
pri_files <- get_private(cmc)$get_cache_files("primary")
rep_files <- get_private(cmc)$get_cache_files("replica")
mkdirp(dirname(rep_files$rds))
saveRDS("RDS", rep_files$rds)
get_private(cmc)$update_primary(rds = TRUE, packages = FALSE)
expect_true(file.exists(pri_files$rds))
expect_equal(readRDS(pri_files$rds), "RDS")
lapply_rows(rep_files$pkgs, function(pkg) {
mkdirp(dirname(pkg$path))
cat(basename(pkg$path), "\n", sep = "", file = pkg$path)
mkdirp(dirname(pkg$etag))
cat(pkg$url, "\n", sep = "", file = pkg$etag)
})
get_private(cmc)$update_primary(rds = FALSE, packages = TRUE)
expect_true(all(file.exists(pri_files$pkgs$path)))
expect_true(all(file.exists(pri_files$pkgs$etag)))
lapply_rows(pri_files$pkgs, function(pkg) {
expect_equal(readLines(pkg$path), basename(pkg$path))
expect_equal(readLines(pkg$etag), pkg$url)
})
})
test_that("update_primary 2", {
expect_null(cmc__update_primary(NULL, NULL, FALSE, FALSE, FALSE))
pri <- test_temp_dir()
rep <- test_temp_dir()
cmc <- cranlike_metadata_cache$new(pri, rep, c("macos", "source"),
bioc = FALSE)
mockery::stub(cmc__update_primary, "lock", function(...) NULL)
expect_error(
cmc__update_primary(cmc, get_private(cmc), TRUE, TRUE, TRUE),
"Cannot acquire lock to update primary cache")
})
test_that("update", {
skip_if_offline()
dir.create(pri <- fs::path_norm(tempfile()))
on.exit(unlink(pri, recursive = TRUE), add = TRUE)
dir.create(rep <- fs::path_norm(tempfile()))
on.exit(unlink(rep, recursive = TRUE), add = TRUE)
cmc <- cranlike_metadata_cache$new(pri, rep, "source", bioc = FALSE)
data <- cmc$update()
check_packages_data(data)
## Data is loaded
expect_identical(get_private(cmc)$data, data)
expect_true(Sys.time() - get_private(cmc)$data_time < oneminute())
## There is a replica RDS
rep_files <- get_private(cmc)$get_cache_files("replica")
expect_true(file.exists(rep_files$rds))
expect_true(Sys.time() - file_get_time(rep_files$rds) < oneminute())
## There is a primary RDS
pri_files <- get_private(cmc)$get_cache_files("primary")
expect_true(file.exists(pri_files$rds))
expect_true(Sys.time() - file_get_time(pri_files$rds) < oneminute())
## There are replicate PACKAGES, with Etag files
expect_true(all(file.exists(rep_files$pkgs$path)))
expect_true(all(file.exists(rep_files$pkgs$etag)))
## There are primary PACKAGES, with Etag files
expect_true(all(file.exists(pri_files$pkgs$path)))
expect_true(all(file.exists(pri_files$pkgs$etag)))
## List
expect_equal(as.list(data$pkgs), as.list(cmc$list()))
lst <- cmc$list(c("igraph", "MASS"))
expect_equal(sort(c("igraph", "MASS")), sort(unique(lst$package)))
## Revdeps
rdeps <- cmc$revdeps("MASS")
expect_true("abc" %in% rdeps$package)
expect_true("abd" %in% rdeps$package)
rdeps <- cmc$revdeps("MASS", recursive = FALSE)
expect_true("abc" %in% rdeps$package)
expect_false("abd" %in% rdeps$package)
})
test_that("check_update", {
skip_if_offline()
withr::local_options(
list(repos = c(CRAN = "https://cloud.r-project.org"))
)
dir.create(pri <- fs::path_norm(tempfile()))
on.exit(unlink(pri, recursive = TRUE), add = TRUE)
dir.create(rep <- fs::path_norm(tempfile()))
on.exit(unlink(rep, recursive = TRUE), add = TRUE)
cmc <- cranlike_metadata_cache$new(pri, rep, "source", bioc = FALSE)
data <- cmc$check_update()
check_packages_data(data)
## Data is loaded
expect_identical(get_private(cmc)$data, data)
expect_true(Sys.time() - get_private(cmc)$data_time < oneminute())
## There is a replica RDS
rep_files <- get_private(cmc)$get_cache_files("replica")
expect_true(file.exists(rep_files$rds))
expect_true(Sys.time() - file_get_time(rep_files$rds) < oneminute())
## There is a primary RDS
pri_files <- get_private(cmc)$get_cache_files("primary")
expect_true(file.exists(pri_files$rds))
expect_true(Sys.time() - file_get_time(pri_files$rds) < oneminute())
## There are replicate PACKAGES, with Etag files
expect_true(all(file.exists(rep_files$pkgs$path)))
expect_true(all(file.exists(rep_files$pkgs$etag)))
## There are primary PACKAGES, with Etag files
expect_true(all(file.exists(pri_files$pkgs$path)))
expect_true(all(file.exists(pri_files$pkgs$etag)))
## We don't download it again, if the Etag files are current
cat("foobar\n", file = rep_files$pkgs$path[1])
cat("foobar2\n", file = rep_files$rds)
cat("foobar\n", file = pri_files$pkgs$path[1])
cat("foobar2\n", file = pri_files$rds)
data2 <- cmc$check_update()
expect_identical(data, data2)
expect_equal(read_lines(rep_files$pkgs$path[1]), "foobar")
## Cleanup
cmc$cleanup(force = TRUE)
expect_false(file.exists(pri_files$rds))
expect_false(any(file.exists(pri_files$pkgs$path)))
expect_false(file.exists(rep_files$rds))
expect_false(any(file.exists(rep_files$pkgs$path)))
})
test_that("deps will auto-update as needed", {
skip_if_offline()
withr::local_options(list(repos = NULL))
dir.create(pri <- fs::path_norm(tempfile()))
on.exit(unlink(pri, recursive = TRUE), add = TRUE)
dir.create(rep <- fs::path_norm(tempfile()))
on.exit(unlink(rep, recursive = TRUE), add = TRUE)
cmc <- cranlike_metadata_cache$new(pri, rep, "source", bioc = FALSE)
pri_files <- get_private(cmc)$get_cache_files("primary")
mkdirp(dirname(pri_files$pkgs$path))
fs::file_copy(get_fixture("PACKAGES-src.gz"), pri_files$pkgs$path)
## This will update the RDS files, and also load the data
cmc$deps("A3", recursive = FALSE)
## Data is loaded
expect_false(is.null(get_private(cmc)$data))
expect_true(Sys.time() - get_private(cmc)$data_time < oneminute())
## There is a replica RDS
rep_files <- get_private(cmc)$get_cache_files("replica")
expect_true(file.exists(rep_files$rds))
expect_true(Sys.time() - file_get_time(rep_files$rds) < oneminute())
## There is a primary RDS
pri_files <- get_private(cmc)$get_cache_files("primary")
expect_true(file.exists(pri_files$rds))
expect_true(Sys.time() - file_get_time(pri_files$rds) < oneminute())
## There are replicate PACKAGES, no Etag files, since no downloads...
expect_true(all(file.exists(rep_files$pkgs$path)))
## There are primary PACKAGES, no Etag files, since no downloads...
expect_true(all(file.exists(pri_files$pkgs$path)))
})
test_that("deps, extract_deps", {
skip_if_offline()
withr::local_options(list(repos = NULL))
dir.create(pri <- fs::path_norm(tempfile()))
on.exit(unlink(pri, recursive = TRUE), add = TRUE)
dir.create(rep <- fs::path_norm(tempfile()))
on.exit(unlink(rep, recursive = TRUE), add = TRUE)
cmc <- cranlike_metadata_cache$new(pri, rep, "source", bioc = FALSE,
cran_mirror = "mirror")
pri_files <- get_private(cmc)$get_cache_files("primary")
mkdirp(dirname(pri_files$pkgs$path))
fs::file_copy(get_fixture("PACKAGES-src.gz"), pri_files$pkgs$path)
file_set_time(pri_files$pkgs$path, Sys.time() - 1/2 * oneday())
pkgs <- read_packages_file(
get_fixture("PACKAGES-src.gz"),
mirror = "mirror", repodir = "src/contrib", platform = "source",
rversion = get_minor_r_version(current_r_version()), type = "cran")
deps <- cmc$deps("abc", FALSE, FALSE)
expect_identical(deps$package, "abc")
expect_identical(attr(deps, "base"), character())
expect_identical(attr(deps, "unknown"), character())
deps2 <- extract_deps(pkgs, "abc", FALSE, FALSE)
expect_identical(deps, deps2)
deps <- extract_deps(pkgs, "abc", TRUE, FALSE)
expect_identical(deps$package, c("abc", "abc.data", "MASS", "nnet"))
expect_identical(attr(deps, "base"), character())
expect_identical(attr(deps, "unknown"), c("quantreg", "locfit"))
deps2 <- extract_deps(pkgs, "abc", TRUE, FALSE)
expect_identical(deps, deps2)
deps <- extract_deps(pkgs, "abc", TRUE, TRUE)
expect_identical(deps$package, c("abc", "abc.data", "MASS", "nnet"))
expect_identical(
sort(attr(deps, "base")),
sort(c("grDevices", "graphics", "stats", "utils", "methods")))
expect_identical(attr(deps, "unknown"), c("quantreg", "locfit"))
deps2 <- extract_deps(pkgs, "abc", TRUE, TRUE)
expect_identical(deps, deps2)
deps <- extract_deps(pkgs, "nnet", c("Depends", "Suggests"), FALSE)
expect_identical(deps$package, c("MASS", "nnet"))
expect_identical(attr(deps, "base"), c("stats", "utils"))
expect_identical(attr(deps, "unknown"), character())
deps2 <- extract_deps(pkgs, "nnet", c("Depends", "Suggests"), FALSE)
expect_identical(deps, deps2)
})
test_that("concurrency in update", {
skip_if_offline()
dir.create(pri <- fs::path_norm(tempfile()))
on.exit(unlink(pri, recursive = TRUE), add = TRUE)
dir.create(rep <- fs::path_norm(tempfile()))
on.exit(unlink(rep, recursive = TRUE), add = TRUE)
cmc <- cranlike_metadata_cache$new(pri, rep, "source", bioc = FALSE)
## TODO: somehow check that there are no parallel downloads
do <- function() {
dx1 <- cmc$async_update()
dx2 <- cmc$async_update()
dx3 <- cmc$async_update()
when_all(dx1, dx2, dx3)
}
res <- synchronise(do())
check_packages_data(res[[1]])
check_packages_data(res[[2]])
check_packages_data(res[[3]])
expect_null(get_private(cmc)$update_deferred)
})
test_that("cmc__get_repos", {
repos <- c(CRAN = "bad")
## No bioc, CRAN is replaced
expect_equal(
cmc__get_repos(repos, FALSE, cran_mirror = "good", r_version = "3.5"),
tibble(name = "CRAN", url = "good", type = "cran",
bioc_version = NA_character_)
)
## BioC, all new
res <- cmc__get_repos(repos, TRUE, "good", r_version = "3.5")
expect_equal(
res$name,
c("CRAN", "BioCsoft", "BioCann", "BioCexp", "BioCworkflows"))
expect_equal(res$url[1], "good")
expect_equal(res$type, c("cran", "bioc", "bioc", "bioc", "bioc"))
expect_equal(
res$bioc_version,
c(NA_character_, "3.8", "3.8", "3.8", "3.8"))
## BioC, some are custom
repos <- c(CRAN = "bad", BioCsoft = "ok")
res <- cmc__get_repos(repos, TRUE, "good", r_version = "3.5")
expect_equal(
res$name,
c("CRAN", "BioCsoft", "BioCann", "BioCexp", "BioCworkflows"))
expect_equal(res$url[1], "good")
expect_equal(res$url[2], "ok")
expect_equal(res$type, c("cran", "bioc", "bioc", "bioc", "bioc"))
expect_equal(
res$bioc_version,
c(NA_character_, "3.8", "3.8", "3.8", "3.8"))
})
test_that("download failures", {
skip_if_offline()
dir.create(pri <- fs::path_norm(tempfile()))
on.exit(unlink(pri, recursive = TRUE), add = TRUE)
dir.create(rep <- fs::path_norm(tempfile()))
on.exit(unlink(rep, recursive = TRUE), add = TRUE)
cmc <- cranlike_metadata_cache$new(
pri, rep, "source", bioc = FALSE,
cran_mirror = "http://127.0.0.1:23424/")
expect_error(
expect_message(cmc$update(), "Metadata download failed"))
expect_error(cmc$get_update())
expect_error(cmc$list())
})
test_that("cleanup", {
mockery::stub(cmc_cleanup, "interactive", FALSE)
expect_error(cmc_cleanup(NULL, NULL, FALSE), "Not cleaning up cache")
})
test_that("cleanup", {
dir.create(pri <- fs::path_norm(tempfile()))
on.exit(unlink(pri, recursive = TRUE), add = TRUE)
dir.create(rep <- fs::path_norm(tempfile()))
on.exit(unlink(rep, recursive = TRUE), add = TRUE)
cmc <- cranlike_metadata_cache$new(pri, rep, "source", bioc = FALSE)
mockery::stub(cmc_cleanup, "interactive", TRUE)
mockery::stub(cmc_cleanup, "readline", "")
expect_error(cmc_cleanup(cmc, get_private(cmc), FALSE), "Aborted")
})
test_that("memory cache", {
skip_if_offline()
pri <- test_temp_dir()
rep <- test_temp_dir()
cmc <- cranlike_metadata_cache$new(pri, rep, "source", bioc = FALSE)
data <- cmc$list()
rep2 <- test_temp_dir()
cmc2 <- cranlike_metadata_cache$new(pri, rep2, "source", bioc = FALSE)
week <- as.difftime(7, units = "days")
data2 <- get_private(cmc2)$get_memory_cache(week)
expect_identical(data, data2$pkgs)
rep3 <- test_temp_dir()
cmc3 <- cranlike_metadata_cache$new(pri, rep3, "source", bioc = FALSE)
instance <- as.difftime(1/100000, units = "secs")
expect_error(data3 <- get_private(cmc3)$get_memory_cache(instance),
"Memory cache outdated")
})
test_that("update_memory_cache", {
pri <- test_temp_dir()
rep <- test_temp_dir()
cmc <- cranlike_metadata_cache$new(pri, rep, c("macos", "source"),
bioc = FALSE)
mockery::stub(cmc__copy_to_replica, "lock", function(...) NULL)
expect_error(
cmc__copy_to_replica(cmc, get_private(cmc), TRUE, TRUE, TRUE),
"Cannot acquire lock to copy primary cache")
})
|
/tests/testthat/test-metadata-cache.R
|
permissive
|
hongooi73/pkgcache
|
R
| false
| false
| 21,262
|
r
|
context("metadata cache")
test_that("get_cache_files", {
dir.create(pri <- fs::path_norm(tempfile()))
on.exit(unlink(pri, recursive = TRUE), add = TRUE)
dir.create(rep <- fs::path_norm(tempfile()))
on.exit(unlink(rep, recursive = TRUE), add = TRUE)
cmc <- cranlike_metadata_cache$new(pri, rep, "source", bioc = FALSE)
pri_files <- get_private(cmc)$get_cache_files("primary")
rep_files <- get_private(cmc)$get_cache_files("replica")
check <- function(files, root) {
expect_equal(files$root, root)
expect_true(all(c("meta", "lock", "rds") %in% names(files)))
expect_equal(
fs::path_common(c(files$rds, files$lock, files$rds, root)),
root)
expect_true(tibble::is_tibble(files$pkgs))
expect_equal(
sort(names(files$pkgs)),
sort(c("path", "etag", "basedir", "base", "mirror", "url", "fallback_url",
"platform", "type", "bioc_version", "meta_path", "meta_etag",
"meta_url")))
expect_equal(
fs::path_common(c(files$pkgs$path, files$pkgs$etag, root)),
root)
}
check(pri_files, pri)
check(rep_files, rep)
})
test_that("get_current_data", {
dir.create(pri <- fs::path_norm(tempfile()))
on.exit(unlink(pri, recursive = TRUE), add = TRUE)
dir.create(rep <- fs::path_norm(tempfile()))
on.exit(unlink(rep, recursive = TRUE), add = TRUE)
cmc <- cranlike_metadata_cache$new(pri, rep, "source", bioc = FALSE)
set_private(cmc, "data") <- "DATA"
set_private(cmc, "data_time") <- Sys.time()
expect_equal(get_private(cmc)$get_current_data(oneday()), "DATA")
set_private(cmc, "data_time") <- Sys.time() - 2 * oneday()
expect_error(
get_private(cmc)$get_current_data(oneday()),
"Loaded data outdated")
set_private(cmc, "data_time") <- NULL
expect_error(
get_private(cmc)$get_current_data(oneday()),
"Loaded data outdated")
set_private(cmc, "data") <- NULL
expect_error(get_private(cmc)$get_current_data(oneday()), "No data loaded")
})
test_that("load_replica_rds", {
dir.create(pri <- fs::path_norm(tempfile()))
on.exit(unlink(pri, recursive = TRUE), add = TRUE)
dir.create(rep <- fs::path_norm(tempfile()))
on.exit(unlink(rep, recursive = TRUE), add = TRUE)
cmc <- cranlike_metadata_cache$new(pri, rep, "source", bioc = FALSE)
expect_error(
get_private(cmc)$load_replica_rds(oneday()),
"No replica RDS file in cache"
)
rep_files <- get_private(cmc)$get_cache_files("replica")
mkdirp(dirname(rep_files$rds))
saveRDS("This is it", rep_files$rds)
file_set_time(rep_files$rds, Sys.time() - 2 * oneday())
expect_error(
get_private(cmc)$load_replica_rds(oneday()),
"Replica RDS cache file outdated"
)
file_set_time(rep_files$rds, Sys.time() - 1/2 * oneday())
expect_equal(
get_private(cmc)$load_replica_rds(oneday()),
"This is it")
expect_equal(get_private(cmc)$data, "This is it")
expect_true(Sys.time() - get_private(cmc)$data_time < oneday())
})
test_that("load_primary_rds", {
dir.create(pri <- fs::path_norm(tempfile()))
on.exit(unlink(pri, recursive = TRUE), add = TRUE)
dir.create(rep <- fs::path_norm(tempfile()))
on.exit(unlink(rep, recursive = TRUE), add = TRUE)
cmc <- cranlike_metadata_cache$new(pri, rep, "source", bioc = FALSE)
expect_error(
get_private(cmc)$load_primary_rds(oneday()),
"No primary RDS file in cache"
)
pri_files <- get_private(cmc)$get_cache_files("primary")
mkdirp(dirname(pri_files$rds))
saveRDS("This is it", pri_files$rds)
file_set_time(pri_files$rds, Sys.time() - 2 * oneday())
expect_error(
get_private(cmc)$load_primary_rds(oneday()),
"Primary RDS cache file outdated"
)
file_set_time(pri_files$rds, Sys.time() - 1/2 * oneday())
for (f in pri_files$pkgs$path) { mkdirp(dirname(f)); cat("x", file = f) }
file_set_time(pri_files$pkgs$path, Sys.time() - 2 * oneday())
expect_equal(
get_private(cmc)$load_primary_rds(oneday()),
"This is it")
expect_equal(get_private(cmc)$data, "This is it")
expect_true(Sys.time() - get_private(cmc)$data_time < oneday())
## Replica was also updated
expect_equal(
get_private(cmc)$load_replica_rds(oneday()),
"This is it")
})
test_that("locking failures", {
pri <- test_temp_dir()
rep <- test_temp_dir()
cmc <- cranlike_metadata_cache$new(pri, rep, "source", bioc = FALSE)
mockery::stub(cmc__load_primary_rds, "lock", function(...) NULL)
expect_error(
cmc__load_primary_rds(cmc, get_private(cmc), oneday()),
"Cannot acquire lock to copy RDS")
mockery::stub(cmc__load_primary_pkgs, "lock", function(...) NULL)
expect_error(
cmc__load_primary_pkgs(cmc, get_private(cmc), oneday()),
"Cannot acquire lock to copy PACKAGES")
})
test_that("load_primary_rds 3", {
pri <- test_temp_dir()
rep <- test_temp_dir()
cmc <- cranlike_metadata_cache$new(pri, rep, "source", bioc = FALSE)
pri_files <- get_private(cmc)$get_cache_files("primary")
touch(pri_files$rds)
expect_error(
cmc__load_primary_rds(cmc, get_private(cmc), oneday()),
"Primary PACKAGES missing")
})
test_that("load_primary_pkgs", {
withr::local_options(list(repos = NULL))
dir.create(pri <- fs::path_norm(tempfile()))
on.exit(unlink(pri, recursive = TRUE), add = TRUE)
dir.create(rep <- fs::path_norm(tempfile()))
on.exit(unlink(rep, recursive = TRUE), add = TRUE)
cmc <- cranlike_metadata_cache$new(pri, rep, c("macos", "source"),
bioc = FALSE)
expect_error(
get_private(cmc)$load_primary_pkgs(oneday()),
"Some primary PACKAGES files don't exist")
pri_files <- get_private(cmc)$get_cache_files("primary")
mkdirp(dirname(pri_files$pkgs$path))
fs::file_copy(get_fixture("PACKAGES-mac.gz"), pri_files$pkgs$path[1])
expect_error(
synchronise(get_private(cmc)$load_primary_pkgs(oneday())),
"Some primary PACKAGES files don't exist")
for (i in utils::tail(seq_len(nrow(pri_files$pkgs)), -1)) {
fs::file_copy(get_fixture("PACKAGES-src.gz"), pri_files$pkgs$path[i])
}
file_set_time(pri_files$pkgs$path, Sys.time() - 2 * oneday())
expect_error(
synchronise(get_private(cmc)$load_primary_pkgs(oneday())),
"Some primary PACKAGES files are outdated")
file_set_time(pri_files$pkgs$path, Sys.time() - 1/2 * oneday())
res <- synchronise(get_private(cmc)$load_primary_pkgs(oneday()))
check_packages_data(res)
## RDS was updated as well
rep_files <- get_private(cmc)$get_cache_files("replica")
expect_true(file.exists(rep_files$rds))
expect_true(Sys.time() - file_get_time(rep_files$rds) < oneday())
## Primary RDS was updated as well
expect_true(file.exists(pri_files$rds))
expect_true(Sys.time() - file_get_time(pri_files$rds) < oneminute())
})
test_that("update_replica_pkgs", {
skip_if_offline()
dir.create(pri <- fs::path_norm(tempfile()))
on.exit(unlink(pri, recursive = TRUE), add = TRUE)
dir.create(rep <- fs::path_norm(tempfile()))
on.exit(unlink(rep, recursive = TRUE), add = TRUE)
cmc <- cranlike_metadata_cache$new(pri, rep, "source", bioc = FALSE)
synchronise(get_private(cmc)$update_replica_pkgs())
rep_files <- get_private(cmc)$get_cache_files("replica")
expect_true(all(file.exists(rep_files$pkgs$path)))
expect_true(all(file.exists(rep_files$pkgs$etag)))
data <- get_private(cmc)$update_replica_rds()
expect_identical(data, get_private(cmc)$data)
check_packages_data(data)
})
test_that("update_replica_rds", {
dir.create(pri <- fs::path_norm(tempfile()))
on.exit(unlink(pri, recursive = TRUE), add = TRUE)
dir.create(rep <- fs::path_norm(tempfile()))
on.exit(unlink(rep, recursive = TRUE), add = TRUE)
cmc <- cranlike_metadata_cache$new(pri, rep, c("macos", "source"),
bioc = FALSE)
rep_files <- get_private(cmc)$get_cache_files("replica")
mkdirp(dirname(rep_files$pkgs$path))
fs::file_copy(get_fixture("PACKAGES-mac.gz"), rep_files$pkgs$path[1])
for (i in utils::tail(seq_len(nrow(rep_files$pkgs)), -1)) {
fs::file_copy(get_fixture("PACKAGES-src.gz"), rep_files$pkgs$path[i])
}
data <- get_private(cmc)$update_replica_rds()
expect_identical(get_private(cmc)$data, data)
expect_true(get_private(cmc)$data_time > Sys.time() - oneminute())
check_packages_data(data)
})
test_that("update_primary", {
dir.create(pri <- fs::path_norm(tempfile()))
on.exit(unlink(pri, recursive = TRUE), add = TRUE)
dir.create(rep <- fs::path_norm(tempfile()))
on.exit(unlink(rep, recursive = TRUE), add = TRUE)
cmc <- cranlike_metadata_cache$new(pri, rep, c("macos", "source"),
bioc = FALSE)
pri_files <- get_private(cmc)$get_cache_files("primary")
rep_files <- get_private(cmc)$get_cache_files("replica")
mkdirp(dirname(rep_files$rds))
saveRDS("RDS", rep_files$rds)
get_private(cmc)$update_primary(rds = TRUE, packages = FALSE)
expect_true(file.exists(pri_files$rds))
expect_equal(readRDS(pri_files$rds), "RDS")
lapply_rows(rep_files$pkgs, function(pkg) {
mkdirp(dirname(pkg$path))
cat(basename(pkg$path), "\n", sep = "", file = pkg$path)
mkdirp(dirname(pkg$etag))
cat(pkg$url, "\n", sep = "", file = pkg$etag)
})
get_private(cmc)$update_primary(rds = FALSE, packages = TRUE)
expect_true(all(file.exists(pri_files$pkgs$path)))
expect_true(all(file.exists(pri_files$pkgs$etag)))
lapply_rows(pri_files$pkgs, function(pkg) {
expect_equal(readLines(pkg$path), basename(pkg$path))
expect_equal(readLines(pkg$etag), pkg$url)
})
})
test_that("update_primary 2", {
expect_null(cmc__update_primary(NULL, NULL, FALSE, FALSE, FALSE))
pri <- test_temp_dir()
rep <- test_temp_dir()
cmc <- cranlike_metadata_cache$new(pri, rep, c("macos", "source"),
bioc = FALSE)
mockery::stub(cmc__update_primary, "lock", function(...) NULL)
expect_error(
cmc__update_primary(cmc, get_private(cmc), TRUE, TRUE, TRUE),
"Cannot acquire lock to update primary cache")
})
test_that("update", {
skip_if_offline()
dir.create(pri <- fs::path_norm(tempfile()))
on.exit(unlink(pri, recursive = TRUE), add = TRUE)
dir.create(rep <- fs::path_norm(tempfile()))
on.exit(unlink(rep, recursive = TRUE), add = TRUE)
cmc <- cranlike_metadata_cache$new(pri, rep, "source", bioc = FALSE)
data <- cmc$update()
check_packages_data(data)
## Data is loaded
expect_identical(get_private(cmc)$data, data)
expect_true(Sys.time() - get_private(cmc)$data_time < oneminute())
## There is a replica RDS
rep_files <- get_private(cmc)$get_cache_files("replica")
expect_true(file.exists(rep_files$rds))
expect_true(Sys.time() - file_get_time(rep_files$rds) < oneminute())
## There is a primary RDS
pri_files <- get_private(cmc)$get_cache_files("primary")
expect_true(file.exists(pri_files$rds))
expect_true(Sys.time() - file_get_time(pri_files$rds) < oneminute())
## There are replicate PACKAGES, with Etag files
expect_true(all(file.exists(rep_files$pkgs$path)))
expect_true(all(file.exists(rep_files$pkgs$etag)))
## There are primary PACKAGES, with Etag files
expect_true(all(file.exists(pri_files$pkgs$path)))
expect_true(all(file.exists(pri_files$pkgs$etag)))
## List
expect_equal(as.list(data$pkgs), as.list(cmc$list()))
lst <- cmc$list(c("igraph", "MASS"))
expect_equal(sort(c("igraph", "MASS")), sort(unique(lst$package)))
## Revdeps
rdeps <- cmc$revdeps("MASS")
expect_true("abc" %in% rdeps$package)
expect_true("abd" %in% rdeps$package)
rdeps <- cmc$revdeps("MASS", recursive = FALSE)
expect_true("abc" %in% rdeps$package)
expect_false("abd" %in% rdeps$package)
})
test_that("check_update", {
skip_if_offline()
withr::local_options(
list(repos = c(CRAN = "https://cloud.r-project.org"))
)
dir.create(pri <- fs::path_norm(tempfile()))
on.exit(unlink(pri, recursive = TRUE), add = TRUE)
dir.create(rep <- fs::path_norm(tempfile()))
on.exit(unlink(rep, recursive = TRUE), add = TRUE)
cmc <- cranlike_metadata_cache$new(pri, rep, "source", bioc = FALSE)
data <- cmc$check_update()
check_packages_data(data)
## Data is loaded
expect_identical(get_private(cmc)$data, data)
expect_true(Sys.time() - get_private(cmc)$data_time < oneminute())
## There is a replica RDS
rep_files <- get_private(cmc)$get_cache_files("replica")
expect_true(file.exists(rep_files$rds))
expect_true(Sys.time() - file_get_time(rep_files$rds) < oneminute())
## There is a primary RDS
pri_files <- get_private(cmc)$get_cache_files("primary")
expect_true(file.exists(pri_files$rds))
expect_true(Sys.time() - file_get_time(pri_files$rds) < oneminute())
## There are replicate PACKAGES, with Etag files
expect_true(all(file.exists(rep_files$pkgs$path)))
expect_true(all(file.exists(rep_files$pkgs$etag)))
## There are primary PACKAGES, with Etag files
expect_true(all(file.exists(pri_files$pkgs$path)))
expect_true(all(file.exists(pri_files$pkgs$etag)))
## We don't download it again, if the Etag files are current
cat("foobar\n", file = rep_files$pkgs$path[1])
cat("foobar2\n", file = rep_files$rds)
cat("foobar\n", file = pri_files$pkgs$path[1])
cat("foobar2\n", file = pri_files$rds)
data2 <- cmc$check_update()
expect_identical(data, data2)
expect_equal(read_lines(rep_files$pkgs$path[1]), "foobar")
## Cleanup
cmc$cleanup(force = TRUE)
expect_false(file.exists(pri_files$rds))
expect_false(any(file.exists(pri_files$pkgs$path)))
expect_false(file.exists(rep_files$rds))
expect_false(any(file.exists(rep_files$pkgs$path)))
})
test_that("deps will auto-update as needed", {
skip_if_offline()
withr::local_options(list(repos = NULL))
dir.create(pri <- fs::path_norm(tempfile()))
on.exit(unlink(pri, recursive = TRUE), add = TRUE)
dir.create(rep <- fs::path_norm(tempfile()))
on.exit(unlink(rep, recursive = TRUE), add = TRUE)
cmc <- cranlike_metadata_cache$new(pri, rep, "source", bioc = FALSE)
pri_files <- get_private(cmc)$get_cache_files("primary")
mkdirp(dirname(pri_files$pkgs$path))
fs::file_copy(get_fixture("PACKAGES-src.gz"), pri_files$pkgs$path)
## This will update the RDS files, and also load the data
cmc$deps("A3", recursive = FALSE)
## Data is loaded
expect_false(is.null(get_private(cmc)$data))
expect_true(Sys.time() - get_private(cmc)$data_time < oneminute())
## There is a replica RDS
rep_files <- get_private(cmc)$get_cache_files("replica")
expect_true(file.exists(rep_files$rds))
expect_true(Sys.time() - file_get_time(rep_files$rds) < oneminute())
## There is a primary RDS
pri_files <- get_private(cmc)$get_cache_files("primary")
expect_true(file.exists(pri_files$rds))
expect_true(Sys.time() - file_get_time(pri_files$rds) < oneminute())
## There are replicate PACKAGES, no Etag files, since no downloads...
expect_true(all(file.exists(rep_files$pkgs$path)))
## There are primary PACKAGES, no Etag files, since no downloads...
expect_true(all(file.exists(pri_files$pkgs$path)))
})
test_that("deps, extract_deps", {
skip_if_offline()
withr::local_options(list(repos = NULL))
dir.create(pri <- fs::path_norm(tempfile()))
on.exit(unlink(pri, recursive = TRUE), add = TRUE)
dir.create(rep <- fs::path_norm(tempfile()))
on.exit(unlink(rep, recursive = TRUE), add = TRUE)
cmc <- cranlike_metadata_cache$new(pri, rep, "source", bioc = FALSE,
cran_mirror = "mirror")
pri_files <- get_private(cmc)$get_cache_files("primary")
mkdirp(dirname(pri_files$pkgs$path))
fs::file_copy(get_fixture("PACKAGES-src.gz"), pri_files$pkgs$path)
file_set_time(pri_files$pkgs$path, Sys.time() - 1/2 * oneday())
pkgs <- read_packages_file(
get_fixture("PACKAGES-src.gz"),
mirror = "mirror", repodir = "src/contrib", platform = "source",
rversion = get_minor_r_version(current_r_version()), type = "cran")
deps <- cmc$deps("abc", FALSE, FALSE)
expect_identical(deps$package, "abc")
expect_identical(attr(deps, "base"), character())
expect_identical(attr(deps, "unknown"), character())
deps2 <- extract_deps(pkgs, "abc", FALSE, FALSE)
expect_identical(deps, deps2)
deps <- extract_deps(pkgs, "abc", TRUE, FALSE)
expect_identical(deps$package, c("abc", "abc.data", "MASS", "nnet"))
expect_identical(attr(deps, "base"), character())
expect_identical(attr(deps, "unknown"), c("quantreg", "locfit"))
deps2 <- extract_deps(pkgs, "abc", TRUE, FALSE)
expect_identical(deps, deps2)
deps <- extract_deps(pkgs, "abc", TRUE, TRUE)
expect_identical(deps$package, c("abc", "abc.data", "MASS", "nnet"))
expect_identical(
sort(attr(deps, "base")),
sort(c("grDevices", "graphics", "stats", "utils", "methods")))
expect_identical(attr(deps, "unknown"), c("quantreg", "locfit"))
deps2 <- extract_deps(pkgs, "abc", TRUE, TRUE)
expect_identical(deps, deps2)
deps <- extract_deps(pkgs, "nnet", c("Depends", "Suggests"), FALSE)
expect_identical(deps$package, c("MASS", "nnet"))
expect_identical(attr(deps, "base"), c("stats", "utils"))
expect_identical(attr(deps, "unknown"), character())
deps2 <- extract_deps(pkgs, "nnet", c("Depends", "Suggests"), FALSE)
expect_identical(deps, deps2)
})
test_that("concurrency in update", {
skip_if_offline()
dir.create(pri <- fs::path_norm(tempfile()))
on.exit(unlink(pri, recursive = TRUE), add = TRUE)
dir.create(rep <- fs::path_norm(tempfile()))
on.exit(unlink(rep, recursive = TRUE), add = TRUE)
cmc <- cranlike_metadata_cache$new(pri, rep, "source", bioc = FALSE)
## TODO: somehow check that there are no parallel downloads
do <- function() {
dx1 <- cmc$async_update()
dx2 <- cmc$async_update()
dx3 <- cmc$async_update()
when_all(dx1, dx2, dx3)
}
res <- synchronise(do())
check_packages_data(res[[1]])
check_packages_data(res[[2]])
check_packages_data(res[[3]])
expect_null(get_private(cmc)$update_deferred)
})
test_that("cmc__get_repos", {
repos <- c(CRAN = "bad")
## No bioc, CRAN is replaced
expect_equal(
cmc__get_repos(repos, FALSE, cran_mirror = "good", r_version = "3.5"),
tibble(name = "CRAN", url = "good", type = "cran",
bioc_version = NA_character_)
)
## BioC, all new
res <- cmc__get_repos(repos, TRUE, "good", r_version = "3.5")
expect_equal(
res$name,
c("CRAN", "BioCsoft", "BioCann", "BioCexp", "BioCworkflows"))
expect_equal(res$url[1], "good")
expect_equal(res$type, c("cran", "bioc", "bioc", "bioc", "bioc"))
expect_equal(
res$bioc_version,
c(NA_character_, "3.8", "3.8", "3.8", "3.8"))
## BioC, some are custom
repos <- c(CRAN = "bad", BioCsoft = "ok")
res <- cmc__get_repos(repos, TRUE, "good", r_version = "3.5")
expect_equal(
res$name,
c("CRAN", "BioCsoft", "BioCann", "BioCexp", "BioCworkflows"))
expect_equal(res$url[1], "good")
expect_equal(res$url[2], "ok")
expect_equal(res$type, c("cran", "bioc", "bioc", "bioc", "bioc"))
expect_equal(
res$bioc_version,
c(NA_character_, "3.8", "3.8", "3.8", "3.8"))
})
test_that("download failures", {
skip_if_offline()
dir.create(pri <- fs::path_norm(tempfile()))
on.exit(unlink(pri, recursive = TRUE), add = TRUE)
dir.create(rep <- fs::path_norm(tempfile()))
on.exit(unlink(rep, recursive = TRUE), add = TRUE)
cmc <- cranlike_metadata_cache$new(
pri, rep, "source", bioc = FALSE,
cran_mirror = "http://127.0.0.1:23424/")
expect_error(
expect_message(cmc$update(), "Metadata download failed"))
expect_error(cmc$get_update())
expect_error(cmc$list())
})
test_that("cleanup", {
mockery::stub(cmc_cleanup, "interactive", FALSE)
expect_error(cmc_cleanup(NULL, NULL, FALSE), "Not cleaning up cache")
})
test_that("cleanup", {
dir.create(pri <- fs::path_norm(tempfile()))
on.exit(unlink(pri, recursive = TRUE), add = TRUE)
dir.create(rep <- fs::path_norm(tempfile()))
on.exit(unlink(rep, recursive = TRUE), add = TRUE)
cmc <- cranlike_metadata_cache$new(pri, rep, "source", bioc = FALSE)
mockery::stub(cmc_cleanup, "interactive", TRUE)
mockery::stub(cmc_cleanup, "readline", "")
expect_error(cmc_cleanup(cmc, get_private(cmc), FALSE), "Aborted")
})
test_that("memory cache", {
skip_if_offline()
pri <- test_temp_dir()
rep <- test_temp_dir()
cmc <- cranlike_metadata_cache$new(pri, rep, "source", bioc = FALSE)
data <- cmc$list()
rep2 <- test_temp_dir()
cmc2 <- cranlike_metadata_cache$new(pri, rep2, "source", bioc = FALSE)
week <- as.difftime(7, units = "days")
data2 <- get_private(cmc2)$get_memory_cache(week)
expect_identical(data, data2$pkgs)
rep3 <- test_temp_dir()
cmc3 <- cranlike_metadata_cache$new(pri, rep3, "source", bioc = FALSE)
instance <- as.difftime(1/100000, units = "secs")
expect_error(data3 <- get_private(cmc3)$get_memory_cache(instance),
"Memory cache outdated")
})
test_that("update_memory_cache", {
pri <- test_temp_dir()
rep <- test_temp_dir()
cmc <- cranlike_metadata_cache$new(pri, rep, c("macos", "source"),
bioc = FALSE)
mockery::stub(cmc__copy_to_replica, "lock", function(...) NULL)
expect_error(
cmc__copy_to_replica(cmc, get_private(cmc), TRUE, TRUE, TRUE),
"Cannot acquire lock to copy primary cache")
})
|
## Get bad probesets for Section 6 trimming, plus plots
bad_ps_fun<-function(
celA,celB,
frmaA,frmaB,
direct,
filename){
setwd(direct)
library(affy)
### REMOVE BATCHY PROVES!! ###
rma.bad.ps.1<-list()
rma.bad.ps.2<-list()
rma.bad.ps.3<-list()
rma.bad.ps.4<-list()
rma.bad.ps.5<-list()
rma.bad.ps.6<-list()
frma.bad.ps.1<-list()
frma.bad.ps.2<-list()
frma.bad.ps.3<-list()
frma.bad.ps.4<-list()
frma.bad.ps.5<-list()
frma.bad.ps.6<-list()
rma_batch_coefs<-list(0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0)
rma_out_coefs<-list(0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0)
frma_batch_coefs<-list(0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0)
frma_out_coefs<-list(0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0)
for(i in 1:length(sepA)){
## 1 and 3 are the build sets ##
celAb<-celA[,sepA[[i]]==1 | sepA[[i]]==3]
celBb<-celB[,sepB[[i]]==1 | sepB[[i]]==3]
lenA<-length(celAb)
lenB<-length(celBb)
batch<-c(rep("A",lenA),rep("B",lenB))
rma_datAB<-merge.AffyBatch(celAb,celBb)
rma_datAB<-exprs(rma(rma_datAB))
frma_datAB<-cbind(frmaA[,sepA[[i]]==1 | sepA[[i]]==3],
frmaB[,sepB[[i]]==1 | sepB[[i]]==3])
modgrp<-c(grp[[1]][sepA[[i]]==1 | sepA[[i]]==3],
grp[[2]][sepB[[i]]==1 | sepB[[i]]==3])
rma_pvals<-c()
quants<-seq(.1,.6,by=.1)
len<-dim(frma_datAB)[1]
#RMA first#
for(s in 1:len){
out<-as.vector(rma_datAB[s,])
fit<-lm(out~as.factor(batch)+as.factor(modgrp))
rma_pvals[s]<-coef(summary(fit))[2,4]
rma_batch_coefs[[i]][s]<-coef(summary(fit))[2,3]
rma_out_coefs[[i]][s]<-coef(summary(fit))[3,3]
}
rma_threshs<-quantile(rma_pvals, quants)
frma_pvals<-c()
#fRMA next#
for(s in 1:len){
out<-as.vector(frma_datAB[s,])
fit<-lm(out~as.factor(batch)+as.factor(modgrp))
frma_pvals[s]<-coef(summary(fit))[2,4]
frma_batch_coefs[[i]][s]<-coef(summary(fit))[2,3]
frma_out_coefs[[i]][s]<-coef(summary(fit))[3,3]
}
frma_threshs<-quantile(frma_pvals, quants)
## RMA bad probesets ##
bad.ps.ind.1<-rep(0,len)
bad.ps.ind.2<-rep(0,len)
bad.ps.ind.3<-rep(0,len)
bad.ps.ind.4<-rep(0,len)
bad.ps.ind.5<-rep(0,len)
bad.ps.ind.6<-rep(0,len)
bad.ps.ind.1[which(rma_pvals<rma_threshs[1])]<-1
bad.ps.ind.2[which(rma_pvals<rma_threshs[2])]<-1
bad.ps.ind.3[which(rma_pvals<rma_threshs[3])]<-1
bad.ps.ind.4[which(rma_pvals<rma_threshs[4])]<-1
bad.ps.ind.5[which(rma_pvals<rma_threshs[5])]<-1
bad.ps.ind.6[which(rma_pvals<rma_threshs[6])]<-1
rma.bad.ps.1[[i]]<-bad.ps.ind.1
rma.bad.ps.2[[i]]<-bad.ps.ind.2
rma.bad.ps.3[[i]]<-bad.ps.ind.3
rma.bad.ps.4[[i]]<-bad.ps.ind.4
rma.bad.ps.5[[i]]<-bad.ps.ind.5
rma.bad.ps.6[[i]]<-bad.ps.ind.6
## fRMA bad probesets ##
bad.ps.ind.1<-rep(0,len)
bad.ps.ind.2<-rep(0,len)
bad.ps.ind.3<-rep(0,len)
bad.ps.ind.4<-rep(0,len)
bad.ps.ind.5<-rep(0,len)
bad.ps.ind.6<-rep(0,len)
bad.ps.ind.1[which(frma_pvals<frma_threshs[1])]<-1
bad.ps.ind.2[which(frma_pvals<frma_threshs[2])]<-1
bad.ps.ind.3[which(frma_pvals<frma_threshs[3])]<-1
bad.ps.ind.4[which(frma_pvals<frma_threshs[4])]<-1
bad.ps.ind.5[which(frma_pvals<frma_threshs[5])]<-1
bad.ps.ind.6[which(frma_pvals<frma_threshs[6])]<-1
frma.bad.ps.1[[i]]<-bad.ps.ind.1
frma.bad.ps.2[[i]]<-bad.ps.ind.2
frma.bad.ps.3[[i]]<-bad.ps.ind.3
frma.bad.ps.4[[i]]<-bad.ps.ind.4
frma.bad.ps.5[[i]]<-bad.ps.ind.5
frma.bad.ps.6[[i]]<-bad.ps.ind.6
}
save(list=c('rma.bad.ps.1',
'rma.bad.ps.2',
'rma.bad.ps.3',
'rma.bad.ps.4',
'rma.bad.ps.5',
'rma.bad.ps.6'),
file=paste(filename,"_RMA_badps.RData",sep=""))
save(list=c('frma.bad.ps.1',
'frma.bad.ps.2',
'frma.bad.ps.3',
'frma.bad.ps.4',
'frma.bad.ps.5',
'frma.bad.ps.6'),
file=paste(filename,"_fRMA_badps.RData",sep=""))
save(list=c("rma_batch_coefs","frma_batch_coefs","rma_out_coefs","frma_out_coefs"),
file=paste(filename,"_model_coefs.RData",sep=""))
}
|
/B_analysts_sources_github/hilaryparker/PracticalBatch/Section6_bad_ps.R
|
no_license
|
Irbis3/crantasticScrapper
|
R
| false
| false
| 4,864
|
r
|
## Get bad probesets for Section 6 trimming, plus plots
bad_ps_fun<-function(
celA,celB,
frmaA,frmaB,
direct,
filename){
setwd(direct)
library(affy)
### REMOVE BATCHY PROVES!! ###
rma.bad.ps.1<-list()
rma.bad.ps.2<-list()
rma.bad.ps.3<-list()
rma.bad.ps.4<-list()
rma.bad.ps.5<-list()
rma.bad.ps.6<-list()
frma.bad.ps.1<-list()
frma.bad.ps.2<-list()
frma.bad.ps.3<-list()
frma.bad.ps.4<-list()
frma.bad.ps.5<-list()
frma.bad.ps.6<-list()
rma_batch_coefs<-list(0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0)
rma_out_coefs<-list(0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0)
frma_batch_coefs<-list(0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0)
frma_out_coefs<-list(0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0)
for(i in 1:length(sepA)){
## 1 and 3 are the build sets ##
celAb<-celA[,sepA[[i]]==1 | sepA[[i]]==3]
celBb<-celB[,sepB[[i]]==1 | sepB[[i]]==3]
lenA<-length(celAb)
lenB<-length(celBb)
batch<-c(rep("A",lenA),rep("B",lenB))
rma_datAB<-merge.AffyBatch(celAb,celBb)
rma_datAB<-exprs(rma(rma_datAB))
frma_datAB<-cbind(frmaA[,sepA[[i]]==1 | sepA[[i]]==3],
frmaB[,sepB[[i]]==1 | sepB[[i]]==3])
modgrp<-c(grp[[1]][sepA[[i]]==1 | sepA[[i]]==3],
grp[[2]][sepB[[i]]==1 | sepB[[i]]==3])
rma_pvals<-c()
quants<-seq(.1,.6,by=.1)
len<-dim(frma_datAB)[1]
#RMA first#
for(s in 1:len){
out<-as.vector(rma_datAB[s,])
fit<-lm(out~as.factor(batch)+as.factor(modgrp))
rma_pvals[s]<-coef(summary(fit))[2,4]
rma_batch_coefs[[i]][s]<-coef(summary(fit))[2,3]
rma_out_coefs[[i]][s]<-coef(summary(fit))[3,3]
}
rma_threshs<-quantile(rma_pvals, quants)
frma_pvals<-c()
#fRMA next#
for(s in 1:len){
out<-as.vector(frma_datAB[s,])
fit<-lm(out~as.factor(batch)+as.factor(modgrp))
frma_pvals[s]<-coef(summary(fit))[2,4]
frma_batch_coefs[[i]][s]<-coef(summary(fit))[2,3]
frma_out_coefs[[i]][s]<-coef(summary(fit))[3,3]
}
frma_threshs<-quantile(frma_pvals, quants)
## RMA bad probesets ##
bad.ps.ind.1<-rep(0,len)
bad.ps.ind.2<-rep(0,len)
bad.ps.ind.3<-rep(0,len)
bad.ps.ind.4<-rep(0,len)
bad.ps.ind.5<-rep(0,len)
bad.ps.ind.6<-rep(0,len)
bad.ps.ind.1[which(rma_pvals<rma_threshs[1])]<-1
bad.ps.ind.2[which(rma_pvals<rma_threshs[2])]<-1
bad.ps.ind.3[which(rma_pvals<rma_threshs[3])]<-1
bad.ps.ind.4[which(rma_pvals<rma_threshs[4])]<-1
bad.ps.ind.5[which(rma_pvals<rma_threshs[5])]<-1
bad.ps.ind.6[which(rma_pvals<rma_threshs[6])]<-1
rma.bad.ps.1[[i]]<-bad.ps.ind.1
rma.bad.ps.2[[i]]<-bad.ps.ind.2
rma.bad.ps.3[[i]]<-bad.ps.ind.3
rma.bad.ps.4[[i]]<-bad.ps.ind.4
rma.bad.ps.5[[i]]<-bad.ps.ind.5
rma.bad.ps.6[[i]]<-bad.ps.ind.6
## fRMA bad probesets ##
bad.ps.ind.1<-rep(0,len)
bad.ps.ind.2<-rep(0,len)
bad.ps.ind.3<-rep(0,len)
bad.ps.ind.4<-rep(0,len)
bad.ps.ind.5<-rep(0,len)
bad.ps.ind.6<-rep(0,len)
bad.ps.ind.1[which(frma_pvals<frma_threshs[1])]<-1
bad.ps.ind.2[which(frma_pvals<frma_threshs[2])]<-1
bad.ps.ind.3[which(frma_pvals<frma_threshs[3])]<-1
bad.ps.ind.4[which(frma_pvals<frma_threshs[4])]<-1
bad.ps.ind.5[which(frma_pvals<frma_threshs[5])]<-1
bad.ps.ind.6[which(frma_pvals<frma_threshs[6])]<-1
frma.bad.ps.1[[i]]<-bad.ps.ind.1
frma.bad.ps.2[[i]]<-bad.ps.ind.2
frma.bad.ps.3[[i]]<-bad.ps.ind.3
frma.bad.ps.4[[i]]<-bad.ps.ind.4
frma.bad.ps.5[[i]]<-bad.ps.ind.5
frma.bad.ps.6[[i]]<-bad.ps.ind.6
}
save(list=c('rma.bad.ps.1',
'rma.bad.ps.2',
'rma.bad.ps.3',
'rma.bad.ps.4',
'rma.bad.ps.5',
'rma.bad.ps.6'),
file=paste(filename,"_RMA_badps.RData",sep=""))
save(list=c('frma.bad.ps.1',
'frma.bad.ps.2',
'frma.bad.ps.3',
'frma.bad.ps.4',
'frma.bad.ps.5',
'frma.bad.ps.6'),
file=paste(filename,"_fRMA_badps.RData",sep=""))
save(list=c("rma_batch_coefs","frma_batch_coefs","rma_out_coefs","frma_out_coefs"),
file=paste(filename,"_model_coefs.RData",sep=""))
}
|
#!/usr/bin/env Rscript
## begin warning handler
withCallingHandlers({
library(methods) # Because Rscript does not always do this
options('useFancyQuotes' = FALSE)
suppressPackageStartupMessages(library("optparse"))
suppressPackageStartupMessages(library("RGalaxy"))
option_list <- list()
option_list$exon_anno <- make_option('--exon_anno', type='character')
option_list$proteinseq <- make_option('--proteinseq', type='character')
option_list$procodingseq <- make_option('--procodingseq', type='character')
option_list$bam_file <- make_option('--bam', type='character')
option_list$idpDB_file <- make_option('--idpDB', type='character')
option_list$pepXmlTab_file <- make_option('--pepXmlTab', type='character')
option_list$peptideShakerPsmReport_file <- make_option('--peptideShakerPsmReport', type='character')
option_list$variantAnnotation_file <- make_option('--variantAnnotation', type='character')
option_list$searchEngineScore <- make_option('--searchEngineScore', type='character')
opt <- parse_args(OptionParser(option_list=option_list))
psm2sam <- function(
exon_anno_file = GalaxyInputFile(required=TRUE),
proteinseq_file = GalaxyInputFile(required=TRUE),
procodingseq_file = GalaxyInputFile(required=TRUE),
bam_file = GalaxyInputFile(required=TRUE),
idpDB_file = GalaxyInputFile(required=FALSE),
pepXmlTab_file = GalaxyInputFile(required=FALSE),
peptideShakerPsmReport_file = GalaxyInputFile(required=FALSE),
variantAnnotation_file = GalaxyInputFile(required=FALSE),
searchEngineScore = GalaxyCharacterParam(required=FALSE)
)
{
options(stringsAsFactors = FALSE)
if (length(bam_file) == 0)
{
stop("BAM file must be specified to provide sequence headers")
}
outputHeader = grep("^@(?!PG)", readLines(bam_file, n=500, warn=FALSE), value=TRUE, perl=TRUE)
if (length(outputHeader) == 0)
{
stop("failed to read header lines from bam_file")
}
# load customProDB from GitHub (NOTE: downloading the zip is faster than cloning the repo with git2r or devtools::install_github)
download.file("https://github.com/chambm/customProDB/archive/9db2223ef9932e50124b92d1bc49206af1f40fb3.zip", "customProDB.zip", quiet=TRUE)
unzip("customProDB.zip")
devtools::load_all("customProDB-9db2223ef9932e50124b92d1bc49206af1f40fb3")
# load proBAMr from GitHub
download.file("https://github.com/chambm/proBAMr/archive/a03edf68f51215be40717c5374f39ce67bd2e68b.zip", "proBAMr.zip", quiet=TRUE)
unzip("proBAMr.zip")
devtools::load_all("proBAMr-a03edf68f51215be40717c5374f39ce67bd2e68b")
psmInputLength = length(idpDB_file)+length(pepXmlTab_file)+length(peptideShakerPsmReport_file)
if (psmInputLength == 0)
{
stop("one of the input PSM file parameters must be specified")
}
else if (psmInputLength > 1)
{
stop("only one of the input PSM file parameters can be specified")
}
if (length(idpDB_file) > 0)
{
if (length(searchEngineScore) == 0)
stop("searchEngineScore parameter must be specified when reading IDPicker PSMs, e.g. 'MyriMatch:MVH'")
passedPSM = readIdpDB(idpDB_file, searchEngineScore)
}
else if (length(pepXmlTab_file) > 0)
{
if (length(searchEngineScore) == 0)
stop("searchEngineScore parameter must be specified when reading pepXmlTab PSMs, e.g. 'mvh'")
passedPSM = readPepXmlTab(pepXmlTab_file, searchEngineScore)
}
else if (length(peptideShakerPsmReport_file) > 0)
{
if (length(searchEngineScore) > 0)
warning("searchEngineScore parameter is ignored when reading PeptideShaker PSM report")
passedPSM = readPeptideShakerPsmReport(peptideShakerPsmReport_file)
}
load(exon_anno_file)
load(proteinseq_file)
load(procodingseq_file)
if (length(variantAnnotation_file) > 0)
{
load(variantAnnotation_file) # variantAnnotation list, with members snvprocoding/snvproseq and indelprocoding/indelproseq
varprocoding = unique(rbind(variantAnnotation$snvprocoding, variantAnnotation$indelprocoding))
varproseq = unique(rbind(variantAnnotation$snvproseq, variantAnnotation$indelproseq))
}
else
{
varprocoding = NULL
varproseq = NULL
}
# add proBAMr program key
outputHeader = c(outputHeader, paste0("@PG\tID:proBAMr\tVN:", packageVersion("proBAMr")))
# first write header lines to the output SAM
writeLines(outputHeader, "output.sam")
# then write the PSM "reads"
PSMtab2SAM(passedPSM, exon,
proteinseq, procodingseq,
varproseq, varprocoding,
outfile = "output.sam",
show_progress = FALSE)
invisible(NULL)
}
params <- list()
for(param in names(opt))
{
if (!param == "help")
params[param] <- opt[param]
}
setClass("GalaxyRemoteError", contains="character")
wrappedFunction <- function(f)
{
tryCatch(do.call(f, params),
error=function(e) new("GalaxyRemoteError", conditionMessage(e)))
}
suppressPackageStartupMessages(library(RGalaxy))
do.call(psm2sam, params)
## end warning handler
}, warning = function(w) {
cat(paste("Warning:", conditionMessage(w), "\n"))
invokeRestart("muffleWarning")
})
|
/tools/probam_suite/psm2sam/PSM2SAM.R
|
permissive
|
bernt-matthias/tools-galaxyp
|
R
| false
| false
| 5,289
|
r
|
#!/usr/bin/env Rscript
## begin warning handler
withCallingHandlers({
library(methods) # Because Rscript does not always do this
options('useFancyQuotes' = FALSE)
suppressPackageStartupMessages(library("optparse"))
suppressPackageStartupMessages(library("RGalaxy"))
option_list <- list()
option_list$exon_anno <- make_option('--exon_anno', type='character')
option_list$proteinseq <- make_option('--proteinseq', type='character')
option_list$procodingseq <- make_option('--procodingseq', type='character')
option_list$bam_file <- make_option('--bam', type='character')
option_list$idpDB_file <- make_option('--idpDB', type='character')
option_list$pepXmlTab_file <- make_option('--pepXmlTab', type='character')
option_list$peptideShakerPsmReport_file <- make_option('--peptideShakerPsmReport', type='character')
option_list$variantAnnotation_file <- make_option('--variantAnnotation', type='character')
option_list$searchEngineScore <- make_option('--searchEngineScore', type='character')
opt <- parse_args(OptionParser(option_list=option_list))
psm2sam <- function(
exon_anno_file = GalaxyInputFile(required=TRUE),
proteinseq_file = GalaxyInputFile(required=TRUE),
procodingseq_file = GalaxyInputFile(required=TRUE),
bam_file = GalaxyInputFile(required=TRUE),
idpDB_file = GalaxyInputFile(required=FALSE),
pepXmlTab_file = GalaxyInputFile(required=FALSE),
peptideShakerPsmReport_file = GalaxyInputFile(required=FALSE),
variantAnnotation_file = GalaxyInputFile(required=FALSE),
searchEngineScore = GalaxyCharacterParam(required=FALSE)
)
{
options(stringsAsFactors = FALSE)
if (length(bam_file) == 0)
{
stop("BAM file must be specified to provide sequence headers")
}
outputHeader = grep("^@(?!PG)", readLines(bam_file, n=500, warn=FALSE), value=TRUE, perl=TRUE)
if (length(outputHeader) == 0)
{
stop("failed to read header lines from bam_file")
}
# load customProDB from GitHub (NOTE: downloading the zip is faster than cloning the repo with git2r or devtools::install_github)
download.file("https://github.com/chambm/customProDB/archive/9db2223ef9932e50124b92d1bc49206af1f40fb3.zip", "customProDB.zip", quiet=TRUE)
unzip("customProDB.zip")
devtools::load_all("customProDB-9db2223ef9932e50124b92d1bc49206af1f40fb3")
# load proBAMr from GitHub
download.file("https://github.com/chambm/proBAMr/archive/a03edf68f51215be40717c5374f39ce67bd2e68b.zip", "proBAMr.zip", quiet=TRUE)
unzip("proBAMr.zip")
devtools::load_all("proBAMr-a03edf68f51215be40717c5374f39ce67bd2e68b")
psmInputLength = length(idpDB_file)+length(pepXmlTab_file)+length(peptideShakerPsmReport_file)
if (psmInputLength == 0)
{
stop("one of the input PSM file parameters must be specified")
}
else if (psmInputLength > 1)
{
stop("only one of the input PSM file parameters can be specified")
}
if (length(idpDB_file) > 0)
{
if (length(searchEngineScore) == 0)
stop("searchEngineScore parameter must be specified when reading IDPicker PSMs, e.g. 'MyriMatch:MVH'")
passedPSM = readIdpDB(idpDB_file, searchEngineScore)
}
else if (length(pepXmlTab_file) > 0)
{
if (length(searchEngineScore) == 0)
stop("searchEngineScore parameter must be specified when reading pepXmlTab PSMs, e.g. 'mvh'")
passedPSM = readPepXmlTab(pepXmlTab_file, searchEngineScore)
}
else if (length(peptideShakerPsmReport_file) > 0)
{
if (length(searchEngineScore) > 0)
warning("searchEngineScore parameter is ignored when reading PeptideShaker PSM report")
passedPSM = readPeptideShakerPsmReport(peptideShakerPsmReport_file)
}
load(exon_anno_file)
load(proteinseq_file)
load(procodingseq_file)
if (length(variantAnnotation_file) > 0)
{
load(variantAnnotation_file) # variantAnnotation list, with members snvprocoding/snvproseq and indelprocoding/indelproseq
varprocoding = unique(rbind(variantAnnotation$snvprocoding, variantAnnotation$indelprocoding))
varproseq = unique(rbind(variantAnnotation$snvproseq, variantAnnotation$indelproseq))
}
else
{
varprocoding = NULL
varproseq = NULL
}
# add proBAMr program key
outputHeader = c(outputHeader, paste0("@PG\tID:proBAMr\tVN:", packageVersion("proBAMr")))
# first write header lines to the output SAM
writeLines(outputHeader, "output.sam")
# then write the PSM "reads"
PSMtab2SAM(passedPSM, exon,
proteinseq, procodingseq,
varproseq, varprocoding,
outfile = "output.sam",
show_progress = FALSE)
invisible(NULL)
}
params <- list()
for(param in names(opt))
{
if (!param == "help")
params[param] <- opt[param]
}
setClass("GalaxyRemoteError", contains="character")
wrappedFunction <- function(f)
{
tryCatch(do.call(f, params),
error=function(e) new("GalaxyRemoteError", conditionMessage(e)))
}
suppressPackageStartupMessages(library(RGalaxy))
do.call(psm2sam, params)
## end warning handler
}, warning = function(w) {
cat(paste("Warning:", conditionMessage(w), "\n"))
invokeRestart("muffleWarning")
})
|
library(rcbalance)
library(rcbsubset)
if (requireNamespace("optmatch", quietly = TRUE)){
library(optmatch)
context('Precision for distances and penalties')
#skip_on_cran()
data(nuclearplants)
#reorder nuclearplants dataframe so treated units come first
nuclearplants <- nuclearplants[order(nuclearplants$pr, decreasing = TRUE),]
my.dist.struct <- build.dist.struct(z = nuclearplants$pr,
X = subset(nuclearplants[c('date', 't1', 't2', 'cap','cum.n')]),
exact = nuclearplants$ne, calip.option = 'none')
match.out <- rcbsubset(my.dist.struct, treated.info = nuclearplants[1:10,], control.info = nuclearplants[11:32,], fb.list = list('ct',c('ct','bw')))
test_that('Fine balance is achieved', {
expect_equal(match.out$fb.tables[[1]][,1], match.out$fb.tables[[1]][,2])
expect_equal(match.out$fb.tables[[2]][,1], match.out$fb.tables[[2]][,2])
})
#THIS CHECK NOW DEPRECATED, NOT TOO IMPORTANT AND HARD TO MAKE CONSISTENT
# BOTH LOCALLY AND ON CRAN.
# test_that('Overly large distances are recognized and caught', {
# new.dist <- matrix(c(1:4)*1e-5, nrow=2, ncol = 2) + .Machine$integer.max - 2
# expect_warning(expect_error(rcbalance(new.dist), 'Integer overflow in penalties! Run with a higher tolerance, a lower penalty value, or fewer levels of fine balance.'), 'NAs introduced by coercion to integer range')
# })
}
|
/tests/testthat/test-precision.R
|
no_license
|
cran/rcbsubset
|
R
| false
| false
| 1,336
|
r
|
library(rcbalance)
library(rcbsubset)
if (requireNamespace("optmatch", quietly = TRUE)){
library(optmatch)
context('Precision for distances and penalties')
#skip_on_cran()
data(nuclearplants)
#reorder nuclearplants dataframe so treated units come first
nuclearplants <- nuclearplants[order(nuclearplants$pr, decreasing = TRUE),]
my.dist.struct <- build.dist.struct(z = nuclearplants$pr,
X = subset(nuclearplants[c('date', 't1', 't2', 'cap','cum.n')]),
exact = nuclearplants$ne, calip.option = 'none')
match.out <- rcbsubset(my.dist.struct, treated.info = nuclearplants[1:10,], control.info = nuclearplants[11:32,], fb.list = list('ct',c('ct','bw')))
test_that('Fine balance is achieved', {
expect_equal(match.out$fb.tables[[1]][,1], match.out$fb.tables[[1]][,2])
expect_equal(match.out$fb.tables[[2]][,1], match.out$fb.tables[[2]][,2])
})
#THIS CHECK NOW DEPRECATED, NOT TOO IMPORTANT AND HARD TO MAKE CONSISTENT
# BOTH LOCALLY AND ON CRAN.
# test_that('Overly large distances are recognized and caught', {
# new.dist <- matrix(c(1:4)*1e-5, nrow=2, ncol = 2) + .Machine$integer.max - 2
# expect_warning(expect_error(rcbalance(new.dist), 'Integer overflow in penalties! Run with a higher tolerance, a lower penalty value, or fewer levels of fine balance.'), 'NAs introduced by coercion to integer range')
# })
}
|
compareMAF_AB <- mafCompare(m1 = TCGAdataMAFA.plus.cn, m2 = TCGAdataMAFB.plus.cn,
m1Name = 'MUT', m2Name = 'WILD', minMut = 10, useCNV = T)#最小mut数默认为5的基因
forestPlot(mafCompareRes = compareMAF_AB, color = c('royalblue', 'maroon'), fdr = 0.3, geneFontSize = 0.8)
## 设置了fdr以后,设置P值就没用了
graph2ppt(file = "output/plots/野生vs突变8基因森林图.pptx") # 记得把KEAP1和 NFE2L2删除
print(compareMAF_AB)
summary_compareMAF<-as.data.frame(compareMAF_AB$results)
write.csv(summary_compareMAF,"outdata/summary_compareMAF.csv")
# deSNP <- compareMAF_AB$results %>%
# dplyr::arrange(., order(or,decreasing = T)) %>%
# dplyr::filter(is.finite(or)) %>%
# dplyr::select(Hugo_Symbol) %>% head( , n=25) %>% as.data.frame() %>% .[,1]
# deSNP
##得到差异明显的前8个基因,但是这些基因是突变频率比较低的
deSNP <- c("STK11","EGFR","GRIN2B","SPEF2", "SNTG2","BRWD3","OR6N1","ADGRB1")
###这里我确定了10个基因,是前10突变的,看起来有差距的10个基因。
genes <- c("TTN","RYR2","CSMD3","USH2A","SPTA1","ZFHX4","NAV3","EGFR","SPEF2","SNTG2")
deSNP <- genes
## 画出瀑布图
oncoplot(maf = TCGAdataMAFA.plus.cn,
keepGeneOrder = T,
genes = deSNP)
graph2ppt(file = "output/plots/突变组8低频基因的oncoplot.pptx")
graph2pdf(file = "output/plots/突变组8低频基因的oncoplot")
oncoplot(maf = TCGAdataMAFB.plus.cn,
keepGeneOrder = T,
genes = deSNP)#显示特定基因
graph2ppt(file = "output/plots/野生组8低频基因的oncoplot")
graph2pdf(file = "output/plots/野生组8低频基因的oncoplot.pdf")
coOncoplot(genes = deSNP, m1 = TCGAdataMAFA.plus.cn, m2 = TCGAdataMAFB.plus.cn, m1Name = 'MUT', m2Name = 'WILD', removeNonMutated = TRUE)
graph2pdf(file = "output/plots/野生vs突变8低频基因的cooncoplot.pdf")
coBarplot(genes = deSNP, m1 = TCGAdataMAFA.plus.cn, m2 = TCGAdataMAFB.plus.cn,
m1Name = 'MUT', m2Name = 'WILD', yLims = c(30,30) )
graph2pdf(file = "output/plots/野生vs突变8低频基因的cobarplot.pdf")
#将比较结果可视化,绘制森林图 keap1肯定明显,因为这是我的分组条件
forestPlot(mafCompareRes = compareMAF_AB, pVal = 0.1,
color = c('royalblue', 'maroon'),
fdr = 0.1,
geneFontSize = 0.8)
|
/step08--比较野生和突变之间前几位突变——————.R
|
no_license
|
xjin15/KEAP1-NFE2L2-CUL3-analysis-code
|
R
| false
| false
| 2,406
|
r
|
compareMAF_AB <- mafCompare(m1 = TCGAdataMAFA.plus.cn, m2 = TCGAdataMAFB.plus.cn,
m1Name = 'MUT', m2Name = 'WILD', minMut = 10, useCNV = T)#最小mut数默认为5的基因
forestPlot(mafCompareRes = compareMAF_AB, color = c('royalblue', 'maroon'), fdr = 0.3, geneFontSize = 0.8)
## 设置了fdr以后,设置P值就没用了
graph2ppt(file = "output/plots/野生vs突变8基因森林图.pptx") # 记得把KEAP1和 NFE2L2删除
print(compareMAF_AB)
summary_compareMAF<-as.data.frame(compareMAF_AB$results)
write.csv(summary_compareMAF,"outdata/summary_compareMAF.csv")
# deSNP <- compareMAF_AB$results %>%
# dplyr::arrange(., order(or,decreasing = T)) %>%
# dplyr::filter(is.finite(or)) %>%
# dplyr::select(Hugo_Symbol) %>% head( , n=25) %>% as.data.frame() %>% .[,1]
# deSNP
##得到差异明显的前8个基因,但是这些基因是突变频率比较低的
deSNP <- c("STK11","EGFR","GRIN2B","SPEF2", "SNTG2","BRWD3","OR6N1","ADGRB1")
###这里我确定了10个基因,是前10突变的,看起来有差距的10个基因。
genes <- c("TTN","RYR2","CSMD3","USH2A","SPTA1","ZFHX4","NAV3","EGFR","SPEF2","SNTG2")
deSNP <- genes
## 画出瀑布图
oncoplot(maf = TCGAdataMAFA.plus.cn,
keepGeneOrder = T,
genes = deSNP)
graph2ppt(file = "output/plots/突变组8低频基因的oncoplot.pptx")
graph2pdf(file = "output/plots/突变组8低频基因的oncoplot")
oncoplot(maf = TCGAdataMAFB.plus.cn,
keepGeneOrder = T,
genes = deSNP)#显示特定基因
graph2ppt(file = "output/plots/野生组8低频基因的oncoplot")
graph2pdf(file = "output/plots/野生组8低频基因的oncoplot.pdf")
coOncoplot(genes = deSNP, m1 = TCGAdataMAFA.plus.cn, m2 = TCGAdataMAFB.plus.cn, m1Name = 'MUT', m2Name = 'WILD', removeNonMutated = TRUE)
graph2pdf(file = "output/plots/野生vs突变8低频基因的cooncoplot.pdf")
coBarplot(genes = deSNP, m1 = TCGAdataMAFA.plus.cn, m2 = TCGAdataMAFB.plus.cn,
m1Name = 'MUT', m2Name = 'WILD', yLims = c(30,30) )
graph2pdf(file = "output/plots/野生vs突变8低频基因的cobarplot.pdf")
#将比较结果可视化,绘制森林图 keap1肯定明显,因为这是我的分组条件
forestPlot(mafCompareRes = compareMAF_AB, pVal = 0.1,
color = c('royalblue', 'maroon'),
fdr = 0.1,
geneFontSize = 0.8)
|
# Notes for 03-quality-control.R
# --------------------------------------
## Copy code from https://github.com/lcolladotor/osca_LIIGH_UNAM_2020/blob/master/03-quality-control.R
## ----all_code, cache=TRUE--------------------------------------------------------------------------------------------
## Data
library('scRNAseq')
sce.416b <- LunSpikeInData(which = "416b")
sce.416b$block <- factor(sce.416b$block)
# Download the relevant Ensembl annotation database
# using AnnotationHub resources
library('AnnotationHub')
ah <- AnnotationHub()
query(ah, c("Mus musculus", "Ensembl", "v97"))
# Annotate each gene with its chromosome location
ens.mm.v97 <- ah[["AH73905"]]
location <- mapIds(
ens.mm.v97,
keys = rownames(sce.416b),
keytype = "GENEID",
column = "SEQNAME"
)
# Identify the mitochondrial genes
is.mito <- which(location == "MT")
library('scater')
## sums of altExp are calculated
x <- colSums(counts(altExp(sce.416b, 'ERCC')))
head(x)
x_genes <- colSums(counts(sce.416b))
head(x_genes)
sce.416b <- addPerCellQC(sce.416b,
subsets = list(Mito = is.mito))
## ----qc_metrics, cache=TRUE, dependson='all_code'--------------------------------------------------------------------
plotColData(sce.416b, x = "block", y = "detected")
plotColData(sce.416b, x = "block", y = "detected") +
scale_y_log10()
plotColData(sce.416b,
x = "block",
y = "detected",
other_fields = "phenotype") +
scale_y_log10() +
facet_wrap( ~ phenotype)
## ----all_code_part2, cache = TRUE, dependson='all_code'--------------------------------------------------------------
# Example thresholds
qc.lib <- sce.416b$sum < 100000
qc.nexprs <- sce.416b$detected < 5000
qc.spike <- sce.416b$altexps_ERCC_percent > 10
qc.mito <- sce.416b$subsets_Mito_percent > 10
discard <- qc.lib | qc.nexprs | qc.spike | qc.mito
class(qc.lib)
addmargins(table('lib' = qc.lib, 'nexprs' = qc.nexprs, 'spike' = qc.spike, 'mito' = qc.mito))
addmargins(table('lib' = qc.lib, 'spike' = qc.spike, 'mito' = qc.mito))
addmargins(table('lib' = qc.lib, 'other filters' = qc.nexprs | qc.spike | qc.mito))
which(qc.lib & (qc.nexprs | qc.spike | qc.mito))
which(qc.lib & !(qc.nexprs | qc.spike | qc.mito))
intersect(which(qc.lib), which(qc.nexprs | qc.spike | qc.mito))
# Summarize the number of cells removed for each reason
DataFrame(
LibSize = sum(qc.lib),
NExprs = sum(qc.nexprs),
SpikeProp = sum(qc.spike),
MitoProp = sum(qc.mito),
Total = sum(discard)
)
plotColData(sce.416b, x = "block", y = "sum")
plotColData(sce.416b, x = "block", y = "sum") + scale_y_log10()
qc.lib2 <- isOutlier(sce.416b$sum, log = TRUE, type = "lower")
qc.nexprs2 <- isOutlier(sce.416b$detected, log = TRUE,
type = "lower")
qc.spike2 <- isOutlier(sce.416b$altexps_ERCC_percent,
type = "higher")
qc.mito2 <- isOutlier(sce.416b$subsets_Mito_percent,
type = "higher")
discard2 <- qc.lib2 | qc.nexprs2 | qc.spike2 | qc.mito2
# Extract the thresholds
attr(qc.lib2, "thresholds")
attr(qc.nexprs2, "thresholds")
# Summarize the number of cells removed for each reason.
DataFrame(
LibSize = sum(qc.lib2),
NExprs = sum(qc.nexprs2),
SpikeProp = sum(qc.spike2),
MitoProp = sum(qc.mito2),
Total = sum(discard2)
)
## More checks
plotColData(sce.416b,
x = "block",
y = "detected",
other_fields = "phenotype") +
scale_y_log10() +
facet_wrap( ~ phenotype)
batch <- paste0(sce.416b$phenotype, "-", sce.416b$block)
qc.lib3 <- isOutlier(sce.416b$sum,
log = TRUE,
type = "lower",
batch = batch)
qc.nexprs3 <- isOutlier(sce.416b$detected,
log = TRUE,
type = "lower",
batch = batch)
qc.spike3 <- isOutlier(sce.416b$altexps_ERCC_percent,
type = "higher",
batch = batch)
qc.mito3 <- isOutlier(sce.416b$subsets_Mito_percent,
type = "higher",
batch = batch)
discard3 <- qc.lib3 | qc.nexprs3 | qc.spike3 | qc.mito3
sce.416b$discard3 <- discard3
plotColData(sce.416b,
x = "block",
y = "detected",
colour_by = 'discard3',
other_fields = "phenotype") +
scale_y_log10() +
facet_wrap( ~ phenotype)
# Extract the thresholds
attr(qc.lib3, "thresholds")
attr(qc.nexprs3, "thresholds")
# Summarize the number of cells removed for each reason
DataFrame(
LibSize = sum(qc.lib3),
NExprs = sum(qc.nexprs3),
SpikeProp = sum(qc.spike3),
MitoProp = sum(qc.mito3),
Total = sum(discard3)
)
## ----use_case, cache=TRUE, dependson= c('all_code', 'all_code_part2')------------------------------------------------
sce.grun <- GrunPancreasData()
sce.grun <- addPerCellQC(sce.grun)
plotColData(sce.grun, x = "donor", y = "altexps_ERCC_percent")
hist(sce.grun$altexps_ERCC_percent[sce.grun$donor == 'D10'],
breaks = 100,
col = 'light blue')
hist(sce.grun$altexps_ERCC_percent[sce.grun$donor == 'D2'],
breaks = 100,
col = 'light blue')
discard.ercc <- isOutlier(sce.grun$altexps_ERCC_percent,
type = "higher",
batch = sce.grun$donor)
discard.ercc2 <- isOutlier(
sce.grun$altexps_ERCC_percent,
type = "higher",
batch = sce.grun$donor,
subset = sce.grun$donor %in% c("D17", "D2", "D7")
)
## Understanding %in%
class(sce.grun$donor)
length(sce.grun$donor)
dim(sce.grun)
table(sce.grun$donor)
manual_subset <- sce.grun$donor == 'D17' | sce.grun$donor == 'D2' | sce.grun$donor == 'D7'
class(manual_subset)
length(manual_subset)
sum(manual_subset)
480 + 96 + 384
## quicker
# x %in% y
x <- c('a', 'b', 'c', 'ch')
y <- letters
x %in% y
x[x %in% y]
auto_subset <- sce.grun$donor %in% c('D17', 'D2', 'D7')
identical(manual_subset, auto_subset)
plotColData(
sce.grun,
x = "donor",
y = "altexps_ERCC_percent",
colour_by = data.frame(discard = discard.ercc)
)
plotColData(
sce.grun,
x = "donor",
y = "altexps_ERCC_percent",
colour_by = data.frame(discard = discard.ercc2)
)
# Add info about which cells are outliers
sce.416b$discard <- discard2
# Look at this plot for each QC metric
plotColData(
sce.416b,
x = "block",
y = "sum",
colour_by = "discard",
other_fields = "phenotype"
) +
facet_wrap( ~ phenotype) +
scale_y_log10()
# Another useful diagnostic plot
plotColData(
sce.416b,
x = "sum",
y = "subsets_Mito_percent",
colour_by = "discard",
other_fields = c("block", "phenotype")
) +
facet_grid(block ~ phenotype)
## ----use_case_pbmc, cache=TRUE, dependson='all_code'-----------------------------------------------------------------
library('BiocFileCache')
bfc <- BiocFileCache()
raw.path <-
bfcrpath(
bfc,
file.path(
"http://cf.10xgenomics.com/samples",
"cell-exp/2.1.0/pbmc4k/pbmc4k_raw_gene_bc_matrices.tar.gz"
)
)
untar(raw.path, exdir = file.path(tempdir(), "pbmc4k"))
library('DropletUtils')
library('Matrix')
fname <- file.path(tempdir(), "pbmc4k/raw_gene_bc_matrices/GRCh38")
sce.pbmc <- read10xCounts(fname, col.names = TRUE)
bcrank <- barcodeRanks(counts(sce.pbmc))
# Only showing unique points for plotting speed.
uniq <- !duplicated(bcrank$rank)
plot(
bcrank$rank[uniq],
bcrank$total[uniq],
log = "xy",
xlab = "Rank",
ylab = "Total UMI count",
cex.lab = 1.2
)
abline(h = metadata(bcrank)$inflection,
col = "darkgreen",
lty = 2)
abline(h = metadata(bcrank)$knee,
col = "dodgerblue",
lty = 2)
legend(
"bottomleft",
legend = c("Inflection", "Knee"),
col = c("darkgreen", "dodgerblue"),
lty = 2,
cex = 1.2
)
set.seed(100)
e.out <- emptyDrops(counts(sce.pbmc))
# See ?emptyDrops for an explanation of why there are NA # values.
summary(e.out$FDR <= 0.001)
set.seed(100)
limit <- 100
all.out <-
emptyDrops(counts(sce.pbmc), lower = limit, test.ambient = TRUE)
# Ideally, this histogram should look close to uniform.
# Large peaks near zero indicate that barcodes with total
# counts below 'lower' are not ambient in origin.
hist(all.out$PValue[all.out$Total <= limit &
all.out$Total > 0],
xlab = "P-value",
main = "",
col = "grey80")
sce.pbmc <- sce.pbmc[, which(e.out$FDR <= 0.001)]
is.mito <- grep("^MT-", rowData(sce.pbmc)$Symbol)
sce.pmbc <- addPerCellQC(sce.pbmc, subsets = list(MT = is.mito))
discard.mito <-
isOutlier(sce.pmbc$subsets_MT_percent, type = "higher")
plot(
sce.pmbc$sum,
sce.pmbc$subsets_MT_percent,
log = "x",
xlab = "Total count",
ylab = "Mitochondrial %"
)
abline(h = attr(discard.mito, "thresholds")["higher"], col = "red")
## Exercise answers
### Why does emptyDrops() return NA values?
## Below lower & test.ambient = FALSE
## 0 "total" (even with test.ambient = TRUE)
with(all.out, table(
'NA pvalue' = is.na(PValue),
'Total is 0?' = Total == 0
))
### Are the p-values the same for e.out and all.out?
## Answers from the group: Yes: 4, No: 6
identical(e.out$PValue, all.out$PValue)
## What if you subset to the non-NA entries?
identical(
e.out$PValue[!is.na(all.out$FDR)],
all.out$PValue[!is.na(all.out$FDR)]
)
## false
identical(
e.out$PValue[!is.na(e.out$FDR)],
all.out$PValue[!is.na(e.out$FDR)]
)
## true
with(e.out, table(
'NA pvalue' = is.na(PValue),
'Total is <= 100' = Total <= 100
))
## We talked about the importance of setting the random seed
## using set.seed() before running any function that has
## a random component. Otherwise your results will not be
## reproducible and you'll never know if they didn't
## reproduce because of the random seed or because some
## other code changed in the function you are running.
## ----marking, cache=TRUE, dependson='use_case'-----------------------------------------------------------------------
# Removing low-quality cells
# Keeping the columns we DON'T want to discard
filtered <- sce.416b[,!discard2]
# Marking low-quality cells
marked <- sce.416b
marked$discard <- discard2
## ----'reproducibility', cache = TRUE, dependson=knitr::all_labels()--------------------------------------------------
options(width = 120)
sessioninfo::session_info()
## Notes
|
/R/03-quality-control.R
|
no_license
|
lcolladotor/osca_playground_leo
|
R
| false
| false
| 9,987
|
r
|
# Notes for 03-quality-control.R
# --------------------------------------
## Copy code from https://github.com/lcolladotor/osca_LIIGH_UNAM_2020/blob/master/03-quality-control.R
## ----all_code, cache=TRUE--------------------------------------------------------------------------------------------
## Data
library('scRNAseq')
sce.416b <- LunSpikeInData(which = "416b")
sce.416b$block <- factor(sce.416b$block)
# Download the relevant Ensembl annotation database
# using AnnotationHub resources
library('AnnotationHub')
ah <- AnnotationHub()
query(ah, c("Mus musculus", "Ensembl", "v97"))
# Annotate each gene with its chromosome location
ens.mm.v97 <- ah[["AH73905"]]
location <- mapIds(
ens.mm.v97,
keys = rownames(sce.416b),
keytype = "GENEID",
column = "SEQNAME"
)
# Identify the mitochondrial genes
is.mito <- which(location == "MT")
library('scater')
## sums of altExp are calculated
x <- colSums(counts(altExp(sce.416b, 'ERCC')))
head(x)
x_genes <- colSums(counts(sce.416b))
head(x_genes)
sce.416b <- addPerCellQC(sce.416b,
subsets = list(Mito = is.mito))
## ----qc_metrics, cache=TRUE, dependson='all_code'--------------------------------------------------------------------
plotColData(sce.416b, x = "block", y = "detected")
plotColData(sce.416b, x = "block", y = "detected") +
scale_y_log10()
plotColData(sce.416b,
x = "block",
y = "detected",
other_fields = "phenotype") +
scale_y_log10() +
facet_wrap( ~ phenotype)
## ----all_code_part2, cache = TRUE, dependson='all_code'--------------------------------------------------------------
# Example thresholds
qc.lib <- sce.416b$sum < 100000
qc.nexprs <- sce.416b$detected < 5000
qc.spike <- sce.416b$altexps_ERCC_percent > 10
qc.mito <- sce.416b$subsets_Mito_percent > 10
discard <- qc.lib | qc.nexprs | qc.spike | qc.mito
class(qc.lib)
addmargins(table('lib' = qc.lib, 'nexprs' = qc.nexprs, 'spike' = qc.spike, 'mito' = qc.mito))
addmargins(table('lib' = qc.lib, 'spike' = qc.spike, 'mito' = qc.mito))
addmargins(table('lib' = qc.lib, 'other filters' = qc.nexprs | qc.spike | qc.mito))
which(qc.lib & (qc.nexprs | qc.spike | qc.mito))
which(qc.lib & !(qc.nexprs | qc.spike | qc.mito))
intersect(which(qc.lib), which(qc.nexprs | qc.spike | qc.mito))
# Summarize the number of cells removed for each reason
DataFrame(
LibSize = sum(qc.lib),
NExprs = sum(qc.nexprs),
SpikeProp = sum(qc.spike),
MitoProp = sum(qc.mito),
Total = sum(discard)
)
plotColData(sce.416b, x = "block", y = "sum")
plotColData(sce.416b, x = "block", y = "sum") + scale_y_log10()
qc.lib2 <- isOutlier(sce.416b$sum, log = TRUE, type = "lower")
qc.nexprs2 <- isOutlier(sce.416b$detected, log = TRUE,
type = "lower")
qc.spike2 <- isOutlier(sce.416b$altexps_ERCC_percent,
type = "higher")
qc.mito2 <- isOutlier(sce.416b$subsets_Mito_percent,
type = "higher")
discard2 <- qc.lib2 | qc.nexprs2 | qc.spike2 | qc.mito2
# Extract the thresholds
attr(qc.lib2, "thresholds")
attr(qc.nexprs2, "thresholds")
# Summarize the number of cells removed for each reason.
DataFrame(
LibSize = sum(qc.lib2),
NExprs = sum(qc.nexprs2),
SpikeProp = sum(qc.spike2),
MitoProp = sum(qc.mito2),
Total = sum(discard2)
)
## More checks
plotColData(sce.416b,
x = "block",
y = "detected",
other_fields = "phenotype") +
scale_y_log10() +
facet_wrap( ~ phenotype)
batch <- paste0(sce.416b$phenotype, "-", sce.416b$block)
qc.lib3 <- isOutlier(sce.416b$sum,
log = TRUE,
type = "lower",
batch = batch)
qc.nexprs3 <- isOutlier(sce.416b$detected,
log = TRUE,
type = "lower",
batch = batch)
qc.spike3 <- isOutlier(sce.416b$altexps_ERCC_percent,
type = "higher",
batch = batch)
qc.mito3 <- isOutlier(sce.416b$subsets_Mito_percent,
type = "higher",
batch = batch)
discard3 <- qc.lib3 | qc.nexprs3 | qc.spike3 | qc.mito3
sce.416b$discard3 <- discard3
plotColData(sce.416b,
x = "block",
y = "detected",
colour_by = 'discard3',
other_fields = "phenotype") +
scale_y_log10() +
facet_wrap( ~ phenotype)
# Extract the thresholds
attr(qc.lib3, "thresholds")
attr(qc.nexprs3, "thresholds")
# Summarize the number of cells removed for each reason
DataFrame(
LibSize = sum(qc.lib3),
NExprs = sum(qc.nexprs3),
SpikeProp = sum(qc.spike3),
MitoProp = sum(qc.mito3),
Total = sum(discard3)
)
## ----use_case, cache=TRUE, dependson= c('all_code', 'all_code_part2')------------------------------------------------
sce.grun <- GrunPancreasData()
sce.grun <- addPerCellQC(sce.grun)
plotColData(sce.grun, x = "donor", y = "altexps_ERCC_percent")
hist(sce.grun$altexps_ERCC_percent[sce.grun$donor == 'D10'],
breaks = 100,
col = 'light blue')
hist(sce.grun$altexps_ERCC_percent[sce.grun$donor == 'D2'],
breaks = 100,
col = 'light blue')
discard.ercc <- isOutlier(sce.grun$altexps_ERCC_percent,
type = "higher",
batch = sce.grun$donor)
discard.ercc2 <- isOutlier(
sce.grun$altexps_ERCC_percent,
type = "higher",
batch = sce.grun$donor,
subset = sce.grun$donor %in% c("D17", "D2", "D7")
)
## Understanding %in%
class(sce.grun$donor)
length(sce.grun$donor)
dim(sce.grun)
table(sce.grun$donor)
manual_subset <- sce.grun$donor == 'D17' | sce.grun$donor == 'D2' | sce.grun$donor == 'D7'
class(manual_subset)
length(manual_subset)
sum(manual_subset)
480 + 96 + 384
## quicker
# x %in% y
x <- c('a', 'b', 'c', 'ch')
y <- letters
x %in% y
x[x %in% y]
auto_subset <- sce.grun$donor %in% c('D17', 'D2', 'D7')
identical(manual_subset, auto_subset)
plotColData(
sce.grun,
x = "donor",
y = "altexps_ERCC_percent",
colour_by = data.frame(discard = discard.ercc)
)
plotColData(
sce.grun,
x = "donor",
y = "altexps_ERCC_percent",
colour_by = data.frame(discard = discard.ercc2)
)
# Add info about which cells are outliers
sce.416b$discard <- discard2
# Look at this plot for each QC metric
plotColData(
sce.416b,
x = "block",
y = "sum",
colour_by = "discard",
other_fields = "phenotype"
) +
facet_wrap( ~ phenotype) +
scale_y_log10()
# Another useful diagnostic plot
plotColData(
sce.416b,
x = "sum",
y = "subsets_Mito_percent",
colour_by = "discard",
other_fields = c("block", "phenotype")
) +
facet_grid(block ~ phenotype)
## ----use_case_pbmc, cache=TRUE, dependson='all_code'-----------------------------------------------------------------
library('BiocFileCache')
bfc <- BiocFileCache()
raw.path <-
bfcrpath(
bfc,
file.path(
"http://cf.10xgenomics.com/samples",
"cell-exp/2.1.0/pbmc4k/pbmc4k_raw_gene_bc_matrices.tar.gz"
)
)
untar(raw.path, exdir = file.path(tempdir(), "pbmc4k"))
library('DropletUtils')
library('Matrix')
fname <- file.path(tempdir(), "pbmc4k/raw_gene_bc_matrices/GRCh38")
sce.pbmc <- read10xCounts(fname, col.names = TRUE)
bcrank <- barcodeRanks(counts(sce.pbmc))
# Only showing unique points for plotting speed.
uniq <- !duplicated(bcrank$rank)
plot(
bcrank$rank[uniq],
bcrank$total[uniq],
log = "xy",
xlab = "Rank",
ylab = "Total UMI count",
cex.lab = 1.2
)
abline(h = metadata(bcrank)$inflection,
col = "darkgreen",
lty = 2)
abline(h = metadata(bcrank)$knee,
col = "dodgerblue",
lty = 2)
legend(
"bottomleft",
legend = c("Inflection", "Knee"),
col = c("darkgreen", "dodgerblue"),
lty = 2,
cex = 1.2
)
set.seed(100)
e.out <- emptyDrops(counts(sce.pbmc))
# See ?emptyDrops for an explanation of why there are NA # values.
summary(e.out$FDR <= 0.001)
set.seed(100)
limit <- 100
all.out <-
emptyDrops(counts(sce.pbmc), lower = limit, test.ambient = TRUE)
# Ideally, this histogram should look close to uniform.
# Large peaks near zero indicate that barcodes with total
# counts below 'lower' are not ambient in origin.
hist(all.out$PValue[all.out$Total <= limit &
all.out$Total > 0],
xlab = "P-value",
main = "",
col = "grey80")
sce.pbmc <- sce.pbmc[, which(e.out$FDR <= 0.001)]
is.mito <- grep("^MT-", rowData(sce.pbmc)$Symbol)
sce.pmbc <- addPerCellQC(sce.pbmc, subsets = list(MT = is.mito))
discard.mito <-
isOutlier(sce.pmbc$subsets_MT_percent, type = "higher")
plot(
sce.pmbc$sum,
sce.pmbc$subsets_MT_percent,
log = "x",
xlab = "Total count",
ylab = "Mitochondrial %"
)
abline(h = attr(discard.mito, "thresholds")["higher"], col = "red")
## Exercise answers
### Why does emptyDrops() return NA values?
## Below lower & test.ambient = FALSE
## 0 "total" (even with test.ambient = TRUE)
with(all.out, table(
'NA pvalue' = is.na(PValue),
'Total is 0?' = Total == 0
))
### Are the p-values the same for e.out and all.out?
## Answers from the group: Yes: 4, No: 6
identical(e.out$PValue, all.out$PValue)
## What if you subset to the non-NA entries?
identical(
e.out$PValue[!is.na(all.out$FDR)],
all.out$PValue[!is.na(all.out$FDR)]
)
## false
identical(
e.out$PValue[!is.na(e.out$FDR)],
all.out$PValue[!is.na(e.out$FDR)]
)
## true
with(e.out, table(
'NA pvalue' = is.na(PValue),
'Total is <= 100' = Total <= 100
))
## We talked about the importance of setting the random seed
## using set.seed() before running any function that has
## a random component. Otherwise your results will not be
## reproducible and you'll never know if they didn't
## reproduce because of the random seed or because some
## other code changed in the function you are running.
## ----marking, cache=TRUE, dependson='use_case'-----------------------------------------------------------------------
# Removing low-quality cells
# Keeping the columns we DON'T want to discard
filtered <- sce.416b[,!discard2]
# Marking low-quality cells
marked <- sce.416b
marked$discard <- discard2
## ----'reproducibility', cache = TRUE, dependson=knitr::all_labels()--------------------------------------------------
options(width = 120)
sessioninfo::session_info()
## Notes
|
#CBN Method-Urban.R
#
# Copyright © 2018:Arin Shahbazian
# Licence: GPL-3
#
rm(list=ls())
starttime <- proc.time()
cat("\n\n================ Prepare Data =====================================\n")
library(yaml)
Settings <- yaml.load_file("Settings.yaml")
library(readxl)
library(stringr)
library(data.table)
library(sm)
library(ggplot2)
#for(year in (Settings$startyear:Settings$endyear)){
# cat(paste0("\n------------------------------\nYear:",year,"\n"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","HHBase.rda"))
HHBase[,IndivNo:=NULL]
HHBase[,Relationship:=NULL]
HHBase[,Sex:=NULL]
HHBase[,Age:=NULL]
HHBase[,Literate:=NULL]
HHBase[,Student:=NULL]
HHBase[,EduCode:=NULL]
HHBase[,EduYears:=NULL]
HHBase[,EduLevel:=NULL]
HHBase[,EduLevel0:=NULL]
HHBase[,ActivityState:=NULL]
HHBase[,MarritalState:=NULL]
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Ghand_Data.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Hoboobat_Data.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Roghan_Data.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Berenj_Data.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Nan_Data.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Goosht_Data.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Morgh_Data.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Mahi_Data.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Shir_Data.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Mast_Data.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Panir_Data.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Tokhmemorgh_Data.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Mive_Data.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Sabzi_Data.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Makarooni_Data.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Sibzamini_Data.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Weights.rda"))
Ghand_Data<-Ghand_Data[,.(HHID,Ghandgram,GhandPrice)]
Hoboobat_Data<-Hoboobat_Data[,.(HHID,Hoboobatgram,HoboobatPrice)]
Roghan_Data<-Roghan_Data[,.(HHID,Roghangram,RoghanPrice)]
Berenj_Data<-Berenj_Data[,.(HHID,Berenjgram,BerenjPrice)]
Nan_Data<-Nan_Data[,.(HHID,Nangram,NanPrice)]
Goosht_Data<-Goosht_Data[,.(HHID,Gooshtgram,GooshtPrice)]
Morgh_Data<-Morgh_Data[,.(HHID,Morghgram,MorghPrice)]
Mahi_Data<-Mahi_Data[,.(HHID,Mahigram,MahiPrice)]
Shir_Data<-Shir_Data[,.(HHID,Shirgram,ShirPrice)]
Mast_Data<-Mast_Data[,.(HHID,Mastgram,MastPrice)]
Panir_Data<-Panir_Data[,.(HHID,Panirgram,PanirPrice)]
Tokhmemorgh_Data<-Tokhmemorgh_Data[,.(HHID,Tokhmemorghgram,TokhmemorghPrice)]
Mive_Data<-Mive_Data[,.(HHID,Mivegram,MivePrice)]
Sabzi_Data<-Sabzi_Data[,.(HHID,Sabzigram,SabziPrice)]
Makarooni_Data<-Makarooni_Data[,.(HHID,Makaroonigram,MakarooniPrice)]
Sibzamini_Data<-Sibzamini_Data[,.(HHID,Sibzaminigram,SibzaminiPrice)]
Food<-merge(HHBase,Ghand_Data,by =c("HHID"),all.x=TRUE)
for (col in c("Ghandgram")) Food[is.na(get(col)), (col) := 0]
Food<-merge(Food,Hoboobat_Data,by =c("HHID"),all.x=TRUE)
for (col in c("Hoboobatgram")) Food[is.na(get(col)), (col) := 0]
Food<-merge(Food,Roghan_Data,by =c("HHID"),all.x=TRUE)
for (col in c("Roghangram")) Food[is.na(get(col)), (col) := 0]
Food<-merge(Food,Berenj_Data,by =c("HHID"),all.x=TRUE)
for (col in c("Berenjgram")) Food[is.na(get(col)), (col) := 0]
Food<-merge(Food,Nan_Data,by =c("HHID"),all.x=TRUE)
for (col in c("Nangram")) Food[is.na(get(col)), (col) := 0]
Food<-merge(Food,Goosht_Data,by =c("HHID"),all.x=TRUE)
for (col in c("Gooshtgram")) Food[is.na(get(col)), (col) := 0]
Food<-merge(Food,Morgh_Data,by =c("HHID"),all.x=TRUE)
for (col in c("Morghgram")) Food[is.na(get(col)), (col) := 0]
Food<-merge(Food,Mahi_Data,by =c("HHID"),all.x=TRUE)
for (col in c("Mahigram")) Food[is.na(get(col)), (col) := 0]
Food<-merge(Food,Shir_Data,by =c("HHID"),all.x=TRUE)
for (col in c("Shirgram")) Food[is.na(get(col)), (col) := 0]
Food<-merge(Food,Mast_Data,by =c("HHID"),all.x=TRUE)
for (col in c("Mastgram")) Food[is.na(get(col)), (col) := 0]
Food<-merge(Food,Panir_Data,by =c("HHID"),all.x=TRUE)
for (col in c("Panirgram")) Food[is.na(get(col)), (col) := 0]
Food<-merge(Food,Tokhmemorgh_Data,by =c("HHID"),all.x=TRUE)
for (col in c("Tokhmemorghgram")) Food[is.na(get(col)), (col) := 0]
Food<-merge(Food,Mive_Data,by =c("HHID"),all.x=TRUE)
for (col in c("Mivegram")) Food[is.na(get(col)), (col) := 0]
Food<-merge(Food,Sabzi_Data,by =c("HHID"),all.x=TRUE)
for (col in c("Sabzigram")) Food[is.na(get(col)), (col) := 0]
Food<-merge(Food,Makarooni_Data,by =c("HHID"),all.x=TRUE)
for (col in c("Makaroonigram")) Food[is.na(get(col)), (col) := 0]
Food<-merge(Food,Sibzamini_Data,by =c("HHID"),all.x=TRUE)
for (col in c("Sibzaminigram")) Food[is.na(get(col)), (col) := 0]
Food<-merge(Food,Weights,by =c("HHID"),all.x=TRUE)
#load Expenditure groups
load(file=paste0(Settings$HEISProcessedPath,"Y","95","HHBase.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","HHI.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Foods.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Cigars.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Cloths.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Amusements.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Communications.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Durables.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Education.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Energy.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Furnitures.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Hotels.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","House.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Medicals.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Behdashts.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Transportations.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Others.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Investments.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Resturants.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Weights.rda"))
#merge Expenditure groups
CBN<-merge(Food,HHI ,by =c("HHID"),all=TRUE)
CBN<-merge(CBN,FoodData,by =c("HHID"),all=TRUE)
for (col in c("FoodExpenditure")) CBN[is.na(get(col)), (col) := 0]
CBN<-merge(CBN,CigarData,by =c("HHID"),all=TRUE)
for (col in c("Cigar_Exp")) CBN[is.na(get(col)), (col) := 0]
CBN<-merge(CBN,ClothData,by =c("HHID"),all=TRUE)
for (col in c("Cloth_Exp")) CBN[is.na(get(col)), (col) := 0]
CBN<-merge(CBN,AmusementData,by =c("HHID"),all=TRUE)
for (col in c("Amusement_Exp")) CBN[is.na(get(col)), (col) := 0]
CBN<-merge(CBN,CommunicationData,by =c("HHID"),all=TRUE)
for (col in c("Communication_Exp")) CBN[is.na(get(col)), (col) := 0]
CBN<-merge(CBN,EducData,by =c("HHID"),all=TRUE)
for (col in c("EducExpenditure")) CBN[is.na(get(col)), (col) := 0]
CBN<-merge(CBN,EnergyData,by =c("HHID"),all=TRUE)
for (col in c("Energy_Exp")) CBN[is.na(get(col)), (col) := 0]
CBN<-merge(CBN,FurnitureData,by =c("HHID"),all=TRUE)
for (col in c("Furniture_Exp")) CBN[is.na(get(col)), (col) := 0]
CBN<-merge(CBN,HotelData,by =c("HHID"),all=TRUE)
for (col in c("Hotel_Exp")) CBN[is.na(get(col)), (col) := 0]
CBN<-merge(CBN,BehdashtData,by =c("HHID"),all=TRUE)
for (col in c("Behdasht_Exp")) CBN[is.na(get(col)), (col) := 0]
CBN<-merge(CBN,TransportationData,by =c("HHID"),all=TRUE)
for (col in c("Transportation_Exp")) CBN[is.na(get(col)), (col) := 0]
CBN<-merge(CBN,OtherData,by =c("HHID"),all=TRUE)
for (col in c("Other_Exp")) CBN[is.na(get(col)), (col) := 0]
CBN<-merge(CBN,HouseData,by =c("HHID"),all=TRUE)
for (col in c("ServiceExp")) CBN[is.na(get(col)), (col) := 0]
CBN<-merge(CBN,InvestmentData,by =c("HHID"),all=TRUE)
for (col in c("Investment_Exp")) CBN[is.na(get(col)), (col) := 0]
CBN<-merge(CBN,MedicalData,by =c("HHID"),all=TRUE)
for (col in c("Medical_Exp")) CBN[is.na(get(col)), (col) := 0]
CBN<-merge(CBN,DurableData,by =c("HHID"),all=TRUE)
for (col in c("Durable_Exp")) CBN[is.na(get(col)), (col) := 0]
CBN<-merge(CBN,ResturantData,by =c("HHID"),all=TRUE)
for (col in c("Resturant_Exp")) CBN[is.na(get(col)), (col) := 0]
CBN<-CBN[Size!=0]
CBN<-CBN[Region=="Urban"]
CBN<-CBN[FoodExpenditure!=0]
#Calculate Per_Total Expenditures Monthly
CBN[, Total_Exp_Month := Reduce(`+`, .SD), .SDcols=c(66:78,82:84)][]
CBN[, Total_Exp_Month_nondurable := Reduce(`+`, .SD), .SDcols=66:78][]
CBN$Total_Exp_Month_Per<-CBN$Total_Exp_Month/CBN$EqSizeOECD
CBN$Total_Exp_Month_Per_nondurable<-CBN$Total_Exp_Month_nondurable/CBN$EqSizeOECD
#Calculate Per_Food Expenditures Monthly
CBN[,EqSizeCalory :=(Size-NKids) + NKids*(1800/2100)]
CBN$FoodExpenditure_Per<-CBN$FoodExpenditure/CBN$EqSizeCalory
#Calculate Per_Food Expenditures Daily
CBN$FoodExpenditure_Per_day<-CBN$FoodExpenditure_Per/30
CBN$Ghandgram_Per_day<-CBN$Ghandgram/(30*CBN$EqSizeOECD)
CBN$Hoboobatgram_Per_day<-CBN$Hoboobatgram/(30*CBN$EqSizeOECD)
CBN$Berenjgram_Per_day<-CBN$Berenjgram/(30*CBN$EqSizeOECD)
CBN$Nangram_Per_day<-CBN$Nangram/(30*CBN$EqSizeOECD)
CBN$Roghangram_Per_day<-CBN$Roghangram/(30*CBN$EqSizeOECD)
CBN$Gooshtgram_Per_day<-CBN$Gooshtgram/(30*CBN$EqSizeOECD)
CBN$Morghgram_Per_day<-CBN$Morghgram/(30*CBN$EqSizeOECD)
CBN$Mahigram_Per_day<-CBN$Mahigram/(30*CBN$EqSizeOECD)
CBN$Shirgram_Per_day<-CBN$Shirgram/(30*CBN$EqSizeOECD)
CBN$Mastgram_Per_day<-CBN$Mastgram/(30*CBN$EqSizeOECD)
CBN$Panirgram_Per_day<-CBN$Panirgram/(30*CBN$EqSizeOECD)
CBN$Tokhmemorghgram_Per_day<-CBN$Tokhmemorghgram/(30*CBN$EqSizeOECD)
CBN$Mivegram_Per_day<-CBN$Mivegram/(30*CBN$EqSizeOECD)
CBN$Sabzigram_Per_day<-CBN$Sabzigram/(30*CBN$EqSizeOECD)
CBN$Makaroonigram_Per_day<-CBN$Makaroonigram/(30*CBN$EqSizeOECD)
CBN$Sibzaminigram_Per_day<-CBN$Sibzaminigram/(30*CBN$EqSizeOECD)
CBN[,EqSizeCalory:=NULL]
load(file="PriceIndex95.rda")
CBN<-merge(CBN,PriceIndex95,by=c("ProvinceCode"),all.x = TRUE)
CBN[,ostan:=NULL]
load(file="PriceIndex.rda")
CBN<-merge(CBN,PriceIndex,by=c("ProvinceCode"),all.x = TRUE)
#Sort Expenditure data
CBN<- CBN[order(Total_Exp_Month_Per_nondurable)]
#Calculate cumulative weights
sum(CBN$Weight)
CBN$cumweight <- cumsum(CBN$Weight)
tx <- max(CBN$cumweight)
#Calculate deciles by weights
CBN[,Decile:=cut(cumweight,breaks = seq(0,tx,tx/10),labels = 1:10)]
CBN[,Percentile:=cut(cumweight,breaks=seq(0,tx,tx/100),labels=1:100)]
CBN$Ghand_W<-CBN$Ghandgram_Per_day*CBN$GhandPrice*0.001*30
CBN$Hoboobat_W<-CBN$Hoboobatgram_Per_day*CBN$HoboobatPrice*0.001*30
CBN$Roghan_W<-CBN$Roghangram_Per_day*CBN$RoghanPrice*0.001*30
CBN$Berenj_W<-CBN$Berenjgram_Per_day*CBN$BerenjPrice*0.001*30
CBN$Nan_W<-CBN$Nangram_Per_day*CBN$NanPrice*0.001*30
CBN$Goosht_W<-CBN$Gooshtgram_Per_day*CBN$GooshtPrice*0.001*30
CBN$Morgh_W<-CBN$Morghgram_Per_day*CBN$MorghPrice*0.001*30
CBN$Mahi_W<-CBN$Mahigram_Per_day*CBN$MahiPrice*0.001*30
CBN$Shir_W<-CBN$Shirgram_Per_day*CBN$ShirPrice*0.001*30
CBN$Mast_W<-CBN$Mastgram_Per_day*CBN$MastPrice*0.001*30
CBN$Panir_W<-CBN$Panirgram_Per_day*CBN$PanirPrice*0.001*30
CBN$Tokhmemorgh_W<-CBN$Tokhmemorghgram_Per_day*CBN$TokhmemorghPrice*0.001*30
CBN$Mive_W<-CBN$Mivegram_Per_day*CBN$MivePrice*0.001*30
CBN$Sabzi_W<-CBN$Sabzigram_Per_day*CBN$SabziPrice*0.001*30
CBN$Makarooni_W<-CBN$Makaroonigram_Per_day*CBN$MakarooniPrice*0.001*30
CBN$Sibzamini_W<-CBN$Sibzaminigram_Per_day*CBN$SibzaminiPrice*0.001*30
CBN$Home_W<-CBN$ServiceExp/CBN$EqSizeOECD
CBN$Home_Per_Metr<-CBN$MetrPrice/CBN$EqSizeOECD
#Seperate big cities
CBN[,sum(Weight*Size),by=ProvinceCode][order(V1)]
CBN[,HHIDs:=as.character(HHID)]
CBN[,ShahrestanCode:=as.integer(str_sub(HHIDs,2,5))]
CBN[,sum(Weight*Size),by=ShahrestanCode][order(V1)][330:387]
CBNTehran<-CBN[ProvinceCode==23]
CBNTehran[,sum(Weight*Size),by=ShahrestanCode]
CBNTabriz<-CBN[ProvinceCode==3]
CBNTabriz[,sum(Weight*Size),by=ShahrestanCode]
CBNAhvaz<-CBN[ProvinceCode==6]
CBNAhvaz[,sum(Weight*Size),by=ShahrestanCode]
CBNShiraz<-CBN[ProvinceCode==7]
CBNShiraz[,sum(Weight*Size),by=ShahrestanCode]
CBNMashhad<-CBN[ProvinceCode==9]
CBNMashhad[,sum(Weight*Size),by=ShahrestanCode]
CBNEsfahan<-CBN[ProvinceCode==10]
CBNEsfahan[,sum(Weight*Size),by=ShahrestanCode]
CBNKaraj<-CBN[ProvinceCode==30]
CBNKaraj[,sum(Weight*Size),by=ShahrestanCode]
CBNKermanshah<-CBN[ProvinceCode==5]
CBNKermanshah[,sum(Weight*Size),by=ShahrestanCode]
CBN<-CBN[ShahrestanCode==2301,ProvinceCode:=as.numeric(ShahrestanCode)]
CBN<-CBN[ShahrestanCode==303,ProvinceCode:=as.numeric(ShahrestanCode)]
CBN<-CBN[ShahrestanCode==603,ProvinceCode:=as.numeric(ShahrestanCode)]
CBN<-CBN[ShahrestanCode==707,ProvinceCode:=as.numeric(ShahrestanCode)]
CBN<-CBN[ShahrestanCode==916,ProvinceCode:=as.numeric(ShahrestanCode)]
CBN<-CBN[ShahrestanCode==1002,ProvinceCode:=as.numeric(ShahrestanCode)]
CBN<-CBN[ShahrestanCode==3001,ProvinceCode:=as.numeric(ShahrestanCode)]
CBN<-CBN[ShahrestanCode==2301,ProvinceCode:=as.numeric(ShahrestanCode)]
CBN<-CBN[ShahrestanCode==502,ProvinceCode:=as.numeric(ShahrestanCode)]
# Food Calories
CBN$Ghand_Calory<- CBN$Ghandgram *4
CBN$Hoboobat_Calory<- CBN$Hoboobatgram *3
CBN$Nan_Calory<- CBN$Nangram *2.5
CBN$Berenj_Calory<- CBN$Berenjgram *1.2
CBN$Roghan_Calory<- CBN$Roghangram *8
CBN$Goosht_Calory<- CBN$Gooshtgram *2.5
CBN$Morgh_Calory<- CBN$Morghgram *2
CBN$Mahi_Calory<- CBN$Mahigram *1
CBN$Shir_Calory<- CBN$Shirgram *2.5
CBN$Mast_Calory<- CBN$Mastgram *1.5
CBN$Panir_Calory<- CBN$Mastgram *2.5
CBN$Tokhmemorgh_Calory<- CBN$Tokhmemorghgram *1.4
CBN$Mive_Calory<- CBN$Mivegram *0.5
CBN$Sabzi_Calory<- CBN$Sabzigram *0.5
CBN$Makarooni_Calory<- CBN$Makaroonigram *3.6
CBN$Sibzamini_Calory<- CBN$Sibzaminigram *0.9
#utils::View(CBN)
#CalculatePer_calories
CBN[, Daily_Exp_Calories := Reduce(`+`, .SD), .SDcols=149:164][]
CBN[,EqSizeCalory :=(Size-NKids) + NKids*(1800/2100)]
CBN[,Per_Daily_Exp_Calories:=Daily_Exp_Calories/EqSizeCalory]
CBN <- CBN[Per_Daily_Exp_Calories<100000] # arbitrary removal of outliers
#CBN[,Daily_Calories_cluster:=weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
#CBN[,weighted.mean(Daily_Calories_cluster,Weight,na.rm = TRUE),by=cluster]
#CBN[,weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
#CBN[,weighted.mean(Size,Weight,na.rm = TRUE),by=cluster]
#CBN[,sum(Weight*Size),by=cluster]
#CBN[,sum(Weight),by=cluster]
#CBN[,sum(Poor),by=cluster]
#Calculate_Per_calories
CBN$Ghand_per_Calory<- CBN$Ghandgram *4/CBN$EqSizeCalory
CBN$Hoboobat_per_Calory<- CBN$Hoboobatgram *3/CBN$EqSizeCalory
CBN$Nan_per_Calory<- CBN$Nangram *2.5/CBN$EqSizeCalory
CBN$Berenj_per_Calory<- CBN$Berenjgram *1.2/CBN$EqSizeCalory
CBN$Roghan_per_Calory<- CBN$Roghangram *8/CBN$EqSizeCalory
CBN$Goosht_per_Calory<- CBN$Gooshtgram *2.5/CBN$EqSizeCalory
CBN$Morgh_per_Calory<- CBN$Morghgram *2/CBN$EqSizeCalory
CBN$Mahi_per_Calory<- CBN$Mahigram *1/CBN$EqSizeCalory
CBN$Shir_per_Calory<- CBN$Shirgram *2.5/CBN$EqSizeCalory
CBN$Mast_per_Calory<- CBN$Mastgram *1.5/CBN$EqSizeCalory
CBN$Panir_per_Calory<- CBN$Mastgram *2.5/CBN$EqSizeCalory
CBN$Tokhmemorgh_per_Calory<- CBN$Tokhmemorghgram *1.4/CBN$EqSizeCalory
CBN$Mive_per_Calory<- CBN$Mivegram *0.5/CBN$EqSizeCalory
CBN$Sabzi_per_Calory<- CBN$Sabzigram *0.5/CBN$EqSizeCalory
CBN$Makarooni_per_Calory<- CBN$Makaroonigram *3.6/CBN$EqSizeCalory
CBN$Sibzamini_per_Calory<- CBN$Sibzaminigram *0.9/CBN$EqSizeCalory
#Assume that deciles 1 and 2 are poor
CBN[,Poor:=ifelse(Decile %in% 1:2,1,0)]
CBNPoor<-CBN[Poor==1]
OldfirstUrban<-CBN[,.(HHID,Percentile,Poor)]
save(OldfirstUrban, file=paste0(Settings$HEISProcessedPath,"Y",year,"OldFoodUrban.rda"))
#K-means weights
PriceWeights<-CBN[,.(HHID,Ghand_W,Hoboobat_W,Roghan_W,Berenj_W,Nan_W,Goosht_W,Morgh_W,Mahi_W,Shir_W,Mast_W,Panir_W,Tokhmemorgh_W,Mive_W,Sabzi_W,Makarooni_W,Sibzamini_W,Home_W,ProvinceCode,Weight)]
dt3 <- PriceWeights[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(ProvinceCode)]
dt3<- dt3[order(ProvinceCode)]
dt3 <- dt3[,.(Ghand_W,Hoboobat_W,Roghan_W,Berenj_W,Nan_W,Goosht_W,Morgh_W,Mahi_W,Shir_W,Mast_W,Panir_W,Tokhmemorgh_W,Mive_W,Sabzi_W,Makarooni_W,Sibzamini_W,Home_W)]
#K-means algorithm for clustering by prices
test<-CBNPoor[,.(GhandPrice,HoboobatPrice,RoghanPrice,BerenjPrice,NanPrice,GooshtPrice,MorghPrice,MahiPrice,ShirPrice,MastPrice,PanirPrice,TokhmemorghPrice,MivePrice,SabziPrice,MakarooniPrice,SibzaminiPrice,MetrPrice,ProvinceCode,Weight)]
#test<-CBNPoor[,.(GhandPrice,HoboobatPrice,RoghanPrice,BerenjPrice,NanPrice,GooshtPrice,MorghPrice,MahiPrice,ShirPrice,MastPrice,PanirPrice,TokhmemorghPrice,MivePrice,SabziPrice,MakarooniPrice,SibzaminiPrice,MetrPrice,Ghand_W,Hoboobat_W,Roghan_W,Berenj_W,Nan_W,Goosht_W,Morgh_W,Mahi_W,Shir_W,Mast_W,Panir_W,Tokhmemorgh_W,Mive_W,Sabzi_W,Makarooni_W,Sibzamini_W,Home_W,Region,ProvinceCode,Weight)]
dt2 <- test[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(ProvinceCode)]
dt2<- dt2[order(ProvinceCode)]
for (col in c("MahiPrice")) dt2[is.nan(get(col)), (col) := 200000]
dt <- dt2 [,.(GhandPrice,HoboobatPrice,RoghanPrice,BerenjPrice,NanPrice,GooshtPrice,MorghPrice,MahiPrice,ShirPrice,MastPrice,PanirPrice,TokhmemorghPrice,MivePrice,SabziPrice,MakarooniPrice,SibzaminiPrice,MetrPrice)]
pca <- princomp(dt, cor=T)
PRICE <- pca$scores
PRICE1 <- -1*PRICE[,1]
PRICE2 <- -1*PRICE[,2]
PRICE3 <- -1*PRICE[,3]
PRICE4 <- -1*PRICE[,4]
PRICE5 <- -1*PRICE[,5]
PRICE6 <- -1*PRICE[,6]
PRICE7 <- -1*PRICE[,7]
PRICE8 <- -1*PRICE[,8]
PRICE9 <- -1*PRICE[,9]
PRICE10 <- -1*PRICE[,10]
PRICE11 <- -1*PRICE[,11]
PRICE12 <- -1*PRICE[,12]
PRICE13 <- -1*PRICE[,13]
PRICE14 <- -1*PRICE[,14]
PRICE15 <- -1*PRICE[,15]
PRICE16 <- -1*PRICE[,16]
PRICE17 <- -1*PRICE[,17]
# Deciding how many clusters
wss <- (nrow(dt)-1)*sum(apply(dt,2,var))
for (i in 2:30) wss[i] <- sum(kmeans(dt, centers=i)$withinss)
plot(1:30, wss, type="b", xlab="Number of Clusters",
ylab="Within groups sum of squares")
#Weighted clustering
dt3.m <- dt3[,lapply(.SD, mean)] # Weights for each vector
dtW <- dt * sqrt(dt3.m[rep(1,nrow(dt))]) # Weighted observations
kmeans(dtW,4) # Simple K-means
cl <- kmeans(dtW,4)
cl$cluster
dt2 <- dt2[,cluster:=data.table(cl$cluster)]
dt2<-dt2[,.(ProvinceCode,cluster)]
load(file="dt4Urban.rda")
#plot(PRICE1, PRICE2,col=cl$cluster)
#points(cl$centers, pch=20)
CBNPoor<-merge(CBNPoor,dt2,by=c("ProvinceCode"),all.x = TRUE)
CBNPoor[,sum(Weight*Size),by=cluster]
CBNPoor[,sum(Weight),by=cluster]
CBNPoor[,sum(Poor),by=cluster]
C2<-CBNPoor[,.(HHID,ProvinceCode,Region,Decile,Poor,cluster)]
######################################################################
####Iteration 1#####
###Iteration1-1
#Calculate Per_calories in clusters
CBNPoor[,Daily_Ghand_cluster:=weighted.mean(Ghand_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Hoboobat_cluster:=weighted.mean(Hoboobat_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Nan_cluster:=weighted.mean(Nan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Berenj_cluster:=weighted.mean(Berenj_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Roghan_cluster:=weighted.mean(Roghan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Goosht_cluster:=weighted.mean(Goosht_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Morgh_cluster:=weighted.mean(Morgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mahi_cluster:=weighted.mean(Mahi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Shir_cluster:=weighted.mean(Shir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mast_cluster:=weighted.mean(Mast_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Panir_cluster:=weighted.mean(Panir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Tokhmemorgh_cluster:=weighted.mean(Tokhmemorgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mive_cluster:=weighted.mean(Mive_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Sabzi_cluster:=weighted.mean(Sabzi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Makarooni_cluster:=weighted.mean(Makarooni_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Sibzamini_cluster:=weighted.mean(Sibzamini_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[, Daily_Calories_cluster2 := Reduce(`+`, .SD), .SDcols=c(186:201)][]
#Calculate Per_calories in clusters(=2100)
CBNPoor[,Daily2_Ghand:=(Daily_Ghand_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Ghand")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Hoboobat:=(Daily_Hoboobat_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Hoboobat")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Nan:=(Daily_Nan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Nan")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Berenj:=(Daily_Berenj_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Berenj")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Roghan:=(Daily_Roghan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Roghan")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Goosht:=(Daily_Goosht_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Goosht")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Morgh:=(Daily_Morgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Morgh")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mahi:=(Daily_Mahi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mahi")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Shir:=(Daily_Shir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Shir")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mast:=(Daily_Mast_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mast")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Panir:=(Daily_Panir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Panir")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Tokhmemorgh:=(Daily_Tokhmemorgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Tokhmemorgh")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mive:=(Daily_Mive_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mive")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Sabzi:=(Daily_Sabzi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sabzi")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Makarooni:=(Daily_Makarooni_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Makarooni")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Sibzamini:=(Daily_Sibzamini_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sibzamini")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[, Daily_Calories3 := Reduce(`+`, .SD), .SDcols=c(203:218)][]
CBNPoor[,FoodExpenditure_Per_cluster:=weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(FoodExpenditure_Per_cluster,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Calory_Price:=(FoodExpenditure_Per_cluster/(Daily_Calories_cluster2))]
CBNPoor[,weighted.mean(Calory_Price,Weight,na.rm = TRUE),by=cluster]
#Calculate per_Calory from resturants
CBNPoor[,Per_Calory_Resturant:=(0.7*Resturant_Exp/EqSizeCalory)/Calory_Price]
for (col in c("Per_Calory_Resturant")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor<-CBNPoor[Per_Daily_Exp_Calories!=0]
CBNPoor[,Per_Daily_Calories:=Per_Daily_Exp_Calories+Per_Calory_Resturant]
CBNPoor[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Per_Calory_Resturant,Weight,na.rm = TRUE),by=cluster]
#sum of total food expenditures
CBNPoor[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
CBNPoor[,weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(FoodExpenditure_Per_total,Weight,na.rm = TRUE),by=cluster]
CBN[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
#Food expenditures (equal 2100 CCAL)
CBNPoor[,Bundle_Value:=FoodExpenditure_Per_total*2100/Per_Daily_Calories]
CBNPoor[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=ProvinceCode]
#CBN[,weighted.mean(Poor,Weight),by=cluster][order(cluster)]
CBNPoor[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE)]
CBNPoor[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE)]
#Real Prices
T_Bundle_Value<-subset(CBNPoor, ProvinceCode==2301, select=c(Bundle_Value,Home_Per_Metr,Weight))
Tehran_Bundle_Value1<-weighted.mean(T_Bundle_Value$Bundle_Value,T_Bundle_Value$Weight,na.rm = TRUE)
Tehran_Bundle_Value2<-weighted.mean(T_Bundle_Value$Home_Per_Metr,T_Bundle_Value$Weight,na.rm = TRUE)
CBNPoor[,RealPriceIndex1:=weighted.mean(Bundle_Value,Weight,na.rm = TRUE)/Tehran_Bundle_Value1,by=ProvinceCode]
CBNPoor[,RealPriceIndex2:=weighted.mean(Home_Per_Metr,Weight,na.rm = TRUE)/Tehran_Bundle_Value2,by=ProvinceCode]
CBNPoor[,weighted.mean(RealPriceIndex1,Weight),by=ProvinceCode]
CBNPoor[,weighted.mean(RealPriceIndex2,Weight),by=ProvinceCode]
Indexes2<-CBNPoor[,.(RealPriceIndex1,RealPriceIndex2,ProvinceCode,Weight)]
Indexes2<-Indexes2[,RealPriceIndex:=(RealPriceIndex1+RealPriceIndex2)/2]
Indexes<-Indexes2[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(ProvinceCode)]
Indexes31<-Indexes[,.(ProvinceCode,RealPriceIndex1,RealPriceIndex2,RealPriceIndex)]
Indexes<-Indexes[,.(ProvinceCode,RealPriceIndex)]
CBN<-merge(CBN,Indexes,by=c("ProvinceCode"),all.x = TRUE)
CBN<-CBN[,Total_Food_Month_Per2:=FoodExpenditure_Per*RealPriceIndex]
CBN<-CBN[,Total_Exp_Month_Per2:=Total_Exp_Month_Per_nondurable*RealPriceIndex]
#utils::View(CBN)
#Sort Expenditure data
CBN<- CBN[order(Total_Exp_Month_Per2)]
#Calculate cumulative weights
sum(CBN$Weight)
CBN$cumweight <- cumsum(CBN$Weight)
tx <- max(CBN$cumweight)
#Calculate deciles by weights
CBN[,Decile:=cut(cumweight,breaks = seq(0,tx,tx/10),labels = 1:10)]
CBN[,Percentile:=cut(cumweight,breaks=seq(0,tx,tx/100),labels=1:100)]
#Update Poors
CBN[,Poor:=ifelse(Decile %in% 1:2,1,0)]
CBNPoor<-CBN[Poor==1]
CBNPoor<-merge(CBNPoor,dt2,by=c("ProvinceCode"),all.x = TRUE)
CBNPoor[,sum(Size*Weight),by=.(cluster)][order(cluster)]
###Iteration1-2
#Calculate Per_calories in clusters
CBNPoor[,Daily_Ghand_cluster:=weighted.mean(Ghand_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Hoboobat_cluster:=weighted.mean(Hoboobat_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Nan_cluster:=weighted.mean(Nan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Berenj_cluster:=weighted.mean(Berenj_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Roghan_cluster:=weighted.mean(Roghan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Goosht_cluster:=weighted.mean(Goosht_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Morgh_cluster:=weighted.mean(Morgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mahi_cluster:=weighted.mean(Mahi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Shir_cluster:=weighted.mean(Shir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mast_cluster:=weighted.mean(Mast_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Panir_cluster:=weighted.mean(Panir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Tokhmemorgh_cluster:=weighted.mean(Tokhmemorgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mive_cluster:=weighted.mean(Mive_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Sabzi_cluster:=weighted.mean(Sabzi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Makarooni_cluster:=weighted.mean(Makarooni_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Sibzamini_cluster:=weighted.mean(Sibzamini_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[, Daily_Calories_cluster2 := Reduce(`+`, .SD), .SDcols=c(186:201)][]
#utils::View(CBNPoor)
#Calculate Per_calories in clusters(=2100)
CBNPoor[,Daily2_Ghand:=(Daily_Ghand_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Ghand")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Hoboobat:=(Daily_Hoboobat_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Hoboobat")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Nan:=(Daily_Nan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Nan")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Berenj:=(Daily_Berenj_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Berenj")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Roghan:=(Daily_Roghan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Roghan")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Goosht:=(Daily_Goosht_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Goosht")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Morgh:=(Daily_Morgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Morgh")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mahi:=(Daily_Mahi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mahi")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Shir:=(Daily_Shir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Shir")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mast:=(Daily_Mast_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mast")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Panir:=(Daily_Panir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Panir")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Tokhmemorgh:=(Daily_Tokhmemorgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Tokhmemorgh")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mive:=(Daily_Mive_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mive")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Sabzi:=(Daily_Sabzi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sabzi")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Makarooni:=(Daily_Makarooni_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Makarooni")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Sibzamini:=(Daily_Sibzamini_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sibzamini")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[, Daily_Calories3 := Reduce(`+`, .SD), .SDcols=c(203:218)][]
CBNPoor[,FoodExpenditure_Per_cluster:=weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(FoodExpenditure_Per_cluster,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Calory_Price:=(FoodExpenditure_Per_cluster/(Daily_Calories_cluster2))]
CBNPoor[,weighted.mean(Calory_Price,Weight,na.rm = TRUE),by=cluster]
#Calculate per_Calory from resturants
CBNPoor[,Per_Calory_Resturant:=(0.7*Resturant_Exp/EqSizeCalory)/Calory_Price]
for (col in c("Per_Calory_Resturant")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor<-CBNPoor[Per_Daily_Exp_Calories!=0]
CBNPoor[,Per_Daily_Calories:=Per_Daily_Exp_Calories+Per_Calory_Resturant]
CBNPoor[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Per_Calory_Resturant,Weight,na.rm = TRUE),by=cluster]
#sum of total food expenditures
CBNPoor[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
CBNPoor[,weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(FoodExpenditure_Per_total,Weight,na.rm = TRUE),by=cluster]
CBN[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
#Food expenditures (equal 2100 CCAL)
CBNPoor[,Bundle_Value:=FoodExpenditure_Per_total*2100/Per_Daily_Calories]
CBNPoor[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=ProvinceCode]
#CBN[,weighted.mean(Poor,Weight),by=cluster][order(cluster)]
#Real Prices
T_Bundle_Value<-subset(CBNPoor, ProvinceCode==2301, select=c(Bundle_Value,Home_Per_Metr,Weight))
Tehran_Bundle_Value1<-weighted.mean(T_Bundle_Value$Bundle_Value,T_Bundle_Value$Weight,na.rm = TRUE)
Tehran_Bundle_Value2<-weighted.mean(T_Bundle_Value$Home_Per_Metr,T_Bundle_Value$Weight,na.rm = TRUE)
CBNPoor[,RealPriceIndex1:=weighted.mean(Bundle_Value,Weight,na.rm = TRUE)/Tehran_Bundle_Value1,by=ProvinceCode]
CBNPoor[,RealPriceIndex2:=weighted.mean(Home_Per_Metr,Weight,na.rm = TRUE)/Tehran_Bundle_Value2,by=ProvinceCode]
CBNPoor[,weighted.mean(RealPriceIndex1,Weight),by=ProvinceCode]
CBNPoor[,weighted.mean(RealPriceIndex2,Weight),by=ProvinceCode]
Indexes2<-CBNPoor[,.(RealPriceIndex1,RealPriceIndex2,ProvinceCode,Weight)]
Indexes2<-Indexes2[,RealPriceIndex:=(RealPriceIndex1+RealPriceIndex2)/2]
Indexes<-Indexes2[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(ProvinceCode)]
Indexes32<-Indexes[,.(ProvinceCode,RealPriceIndex1,RealPriceIndex2,RealPriceIndex)]
Indexes<-Indexes[,.(ProvinceCode,RealPriceIndex)]
CBN[,RealPriceIndex:=NULL]
CBN<-merge(CBN,Indexes,by=c("ProvinceCode"),all.x = TRUE)
CBN<-CBN[,Total_Food_Month_Per2:=FoodExpenditure_Per*RealPriceIndex]
CBN<-CBN[,Total_Exp_Month_Per2:=Total_Exp_Month_Per_nondurable*RealPriceIndex]
#utils::View(CBN)
#Sort Expenditure data
CBN<- CBN[order(Total_Exp_Month_Per2)]
#Calculate cumulative weights
sum(CBN$Weight)
CBN$cumweight <- cumsum(CBN$Weight)
tx <- max(CBN$cumweight)
#Calculate deciles by weights
CBN[,Decile:=cut(cumweight,breaks = seq(0,tx,tx/10),labels = 1:10)]
CBN[,Percentile:=cut(cumweight,breaks=seq(0,tx,tx/100),labels=1:100)]
#Update Poors
CBN[,Poor:=ifelse(Decile %in% 2:5,1,0)]
CBNPoor<-CBN[Poor==1]
CBNPoor<-merge(CBNPoor,dt2,by=c("ProvinceCode"),all.x = TRUE)
CBNPoor[,sum(HIndivNo),by=.(ProvinceCode)][order(ProvinceCode)]
###Iteration1-3
#Calculate Per_calories in clusters
CBNPoor[,Daily_Ghand_cluster:=weighted.mean(Ghand_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Hoboobat_cluster:=weighted.mean(Hoboobat_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Nan_cluster:=weighted.mean(Nan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Berenj_cluster:=weighted.mean(Berenj_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Roghan_cluster:=weighted.mean(Roghan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Goosht_cluster:=weighted.mean(Goosht_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Morgh_cluster:=weighted.mean(Morgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mahi_cluster:=weighted.mean(Mahi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Shir_cluster:=weighted.mean(Shir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mast_cluster:=weighted.mean(Mast_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Panir_cluster:=weighted.mean(Panir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Tokhmemorgh_cluster:=weighted.mean(Tokhmemorgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mive_cluster:=weighted.mean(Mive_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Sabzi_cluster:=weighted.mean(Sabzi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Makarooni_cluster:=weighted.mean(Makarooni_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Sibzamini_cluster:=weighted.mean(Sibzamini_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[, Daily_Calories_cluster2 := Reduce(`+`, .SD), .SDcols=c(186:201)][]
#utils::View(CBNPoor)
#Calculate Per_calories in clusters(=2100)
CBNPoor[,Daily2_Ghand:=(Daily_Ghand_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Ghand")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Hoboobat:=(Daily_Hoboobat_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Hoboobat")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Nan:=(Daily_Nan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Nan")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Berenj:=(Daily_Berenj_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Berenj")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Roghan:=(Daily_Roghan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Roghan")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Goosht:=(Daily_Goosht_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Goosht")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Morgh:=(Daily_Morgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Morgh")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mahi:=(Daily_Mahi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mahi")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Shir:=(Daily_Shir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Shir")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mast:=(Daily_Mast_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mast")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Panir:=(Daily_Panir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Panir")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Tokhmemorgh:=(Daily_Tokhmemorgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Tokhmemorgh")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mive:=(Daily_Mive_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mive")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Sabzi:=(Daily_Sabzi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sabzi")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Makarooni:=(Daily_Makarooni_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Makarooni")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Sibzamini:=(Daily_Sibzamini_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sibzamini")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[, Daily_Calories3 := Reduce(`+`, .SD), .SDcols=c(203:218)][]
CBNPoor[,FoodExpenditure_Per_cluster:=weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(FoodExpenditure_Per_cluster,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Calory_Price:=(FoodExpenditure_Per_cluster/(Daily_Calories_cluster2))]
CBNPoor[,weighted.mean(Calory_Price,Weight,na.rm = TRUE),by=cluster]
#Calculate per_Calory from resturants
CBNPoor[,Per_Calory_Resturant:=(0.7*Resturant_Exp/EqSizeCalory)/Calory_Price]
for (col in c("Per_Calory_Resturant")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor<-CBNPoor[Per_Daily_Exp_Calories!=0]
CBNPoor[,Per_Daily_Calories:=Per_Daily_Exp_Calories+Per_Calory_Resturant]
CBNPoor[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Per_Calory_Resturant,Weight,na.rm = TRUE),by=cluster]
#sum of total food expenditures
CBNPoor[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
CBNPoor[,weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(FoodExpenditure_Per_total,Weight,na.rm = TRUE),by=cluster]
CBN[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
#Food expenditures (equal 2100 CCAL)
CBNPoor[,Bundle_Value:=FoodExpenditure_Per_total*2100/Per_Daily_Calories]
CBNPoor[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=ProvinceCode]
#CBN[,weighted.mean(Poor,Weight),by=cluster][order(cluster)]
#Real Prices
T_Bundle_Value<-subset(CBNPoor, ProvinceCode==2301, select=c(Bundle_Value,Home_Per_Metr,Weight))
Tehran_Bundle_Value1<-weighted.mean(T_Bundle_Value$Bundle_Value,T_Bundle_Value$Weight,na.rm = TRUE)
Tehran_Bundle_Value2<-weighted.mean(T_Bundle_Value$Home_Per_Metr,T_Bundle_Value$Weight,na.rm = TRUE)
CBNPoor[,RealPriceIndex1:=weighted.mean(Bundle_Value,Weight,na.rm = TRUE)/Tehran_Bundle_Value1,by=ProvinceCode]
CBNPoor[,RealPriceIndex2:=weighted.mean(Home_Per_Metr,Weight,na.rm = TRUE)/Tehran_Bundle_Value2,by=ProvinceCode]
CBNPoor[,weighted.mean(RealPriceIndex1,Weight),by=ProvinceCode]
CBNPoor[,weighted.mean(RealPriceIndex2,Weight),by=ProvinceCode]
Indexes2<-CBNPoor[,.(RealPriceIndex1,RealPriceIndex2,ProvinceCode,Weight)]
Indexes2<-Indexes2[,RealPriceIndex:=(RealPriceIndex1+RealPriceIndex2)/2]
Indexes<-Indexes2[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(ProvinceCode)]
Indexes33<-Indexes[,.(ProvinceCode,RealPriceIndex1,RealPriceIndex2,RealPriceIndex)]
Indexes<-Indexes[,.(ProvinceCode,RealPriceIndex)]
CBN[,RealPriceIndex:=NULL]
CBN<-merge(CBN,Indexes,by=c("ProvinceCode"),all.x = TRUE)
CBN<-CBN[,Total_Food_Month_Per2:=FoodExpenditure_Per*RealPriceIndex]
CBN<-CBN[,Total_Exp_Month_Per2:=Total_Exp_Month_Per_nondurable*RealPriceIndex]
#utils::View(CBN)
#Sort Expenditure data
CBN<- CBN[order(Total_Exp_Month_Per2)]
#Calculate cumulative weights
sum(CBN$Weight)
CBN$cumweight <- cumsum(CBN$Weight)
tx <- max(CBN$cumweight)
#Calculate deciles by weights
CBN[,Decile:=cut(cumweight,breaks = seq(0,tx,tx/10),labels = 1:10)]
CBN[,Percentile:=cut(cumweight,breaks=seq(0,tx,tx/100),labels=1:100)]
#Update Poors
CBN[,Poor:=ifelse(Decile %in% 2:5,1,0)]
CBNPoor<-CBN[Poor==1]
CBNPoor<-merge(CBNPoor,dt2,by=c("ProvinceCode"),all.x = TRUE)
CBNPoor[,sum(HIndivNo),by=.(ProvinceCode)][order(ProvinceCode)]
###Iteration1-4
#Calculate Per_calories in clusters
CBNPoor[,Daily_Ghand_cluster:=weighted.mean(Ghand_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Hoboobat_cluster:=weighted.mean(Hoboobat_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Nan_cluster:=weighted.mean(Nan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Berenj_cluster:=weighted.mean(Berenj_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Roghan_cluster:=weighted.mean(Roghan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Goosht_cluster:=weighted.mean(Goosht_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Morgh_cluster:=weighted.mean(Morgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mahi_cluster:=weighted.mean(Mahi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Shir_cluster:=weighted.mean(Shir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mast_cluster:=weighted.mean(Mast_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Panir_cluster:=weighted.mean(Panir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Tokhmemorgh_cluster:=weighted.mean(Tokhmemorgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mive_cluster:=weighted.mean(Mive_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Sabzi_cluster:=weighted.mean(Sabzi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Makarooni_cluster:=weighted.mean(Makarooni_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Sibzamini_cluster:=weighted.mean(Sibzamini_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[, Daily_Calories_cluster2 := Reduce(`+`, .SD), .SDcols=c(186:201)][]
#utils::View(CBNPoor)
#Calculate Per_calories in clusters(=2100)
CBNPoor[,Daily2_Ghand:=(Daily_Ghand_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Ghand")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Hoboobat:=(Daily_Hoboobat_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Hoboobat")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Nan:=(Daily_Nan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Nan")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Berenj:=(Daily_Berenj_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Berenj")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Roghan:=(Daily_Roghan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Roghan")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Goosht:=(Daily_Goosht_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Goosht")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Morgh:=(Daily_Morgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Morgh")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mahi:=(Daily_Mahi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mahi")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Shir:=(Daily_Shir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Shir")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mast:=(Daily_Mast_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mast")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Panir:=(Daily_Panir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Panir")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Tokhmemorgh:=(Daily_Tokhmemorgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Tokhmemorgh")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mive:=(Daily_Mive_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mive")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Sabzi:=(Daily_Sabzi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sabzi")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Makarooni:=(Daily_Makarooni_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Makarooni")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Sibzamini:=(Daily_Sibzamini_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sibzamini")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[, Daily_Calories3 := Reduce(`+`, .SD), .SDcols=c(203:218)][]
CBNPoor[,FoodExpenditure_Per_cluster:=weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(FoodExpenditure_Per_cluster,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Calory_Price:=(FoodExpenditure_Per_cluster/(Daily_Calories_cluster2))]
CBNPoor[,weighted.mean(Calory_Price,Weight,na.rm = TRUE),by=cluster]
#Calculate per_Calory from resturants
CBNPoor[,Per_Calory_Resturant:=(0.7*Resturant_Exp/EqSizeCalory)/Calory_Price]
for (col in c("Per_Calory_Resturant")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor<-CBNPoor[Per_Daily_Exp_Calories!=0]
CBNPoor[,Per_Daily_Calories:=Per_Daily_Exp_Calories+Per_Calory_Resturant]
CBNPoor[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Per_Calory_Resturant,Weight,na.rm = TRUE),by=cluster]
#sum of total food expenditures
CBNPoor[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
CBNPoor[,weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(FoodExpenditure_Per_total,Weight,na.rm = TRUE),by=cluster]
CBN[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
#Food expenditures (equal 2100 CCAL)
CBNPoor[,Bundle_Value:=FoodExpenditure_Per_total*2100/Per_Daily_Calories]
CBNPoor[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=ProvinceCode]
#CBN[,weighted.mean(Poor,Weight),by=cluster][order(cluster)]
#Real Prices
T_Bundle_Value<-subset(CBNPoor, ProvinceCode==2301, select=c(Bundle_Value,Home_Per_Metr,Weight))
Tehran_Bundle_Value1<-weighted.mean(T_Bundle_Value$Bundle_Value,T_Bundle_Value$Weight,na.rm = TRUE)
Tehran_Bundle_Value2<-weighted.mean(T_Bundle_Value$Home_Per_Metr,T_Bundle_Value$Weight,na.rm = TRUE)
CBNPoor[,RealPriceIndex1:=weighted.mean(Bundle_Value,Weight,na.rm = TRUE)/Tehran_Bundle_Value1,by=ProvinceCode]
CBNPoor[,RealPriceIndex2:=weighted.mean(Home_Per_Metr,Weight,na.rm = TRUE)/Tehran_Bundle_Value2,by=ProvinceCode]
CBNPoor[,weighted.mean(RealPriceIndex1,Weight),by=ProvinceCode]
CBNPoor[,weighted.mean(RealPriceIndex2,Weight),by=ProvinceCode]
Indexes2<-CBNPoor[,.(RealPriceIndex1,RealPriceIndex2,ProvinceCode,Weight)]
Indexes2<-Indexes2[,RealPriceIndex:=(RealPriceIndex1+RealPriceIndex2)/2]
Indexes<-Indexes2[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(ProvinceCode)]
Indexes34<-Indexes[,.(ProvinceCode,RealPriceIndex1,RealPriceIndex2,RealPriceIndex)]
Indexes<-Indexes[,.(ProvinceCode,RealPriceIndex)]
CBN[,RealPriceIndex:=NULL]
CBN<-merge(CBN,Indexes,by=c("ProvinceCode"),all.x = TRUE)
CBN<-CBN[,Total_Food_Month_Per2:=FoodExpenditure_Per*RealPriceIndex]
CBN<-CBN[,Total_Exp_Month_Per2:=Total_Exp_Month_Per_nondurable*RealPriceIndex]
#utils::View(CBN)
#Sort Expenditure data
CBN<- CBN[order(Total_Exp_Month_Per2)]
#Calculate cumulative weights
sum(CBN$Weight)
CBN$cumweight <- cumsum(CBN$Weight)
tx <- max(CBN$cumweight)
#Calculate deciles by weights
CBN[,Decile:=cut(cumweight,breaks = seq(0,tx,tx/10),labels = 1:10)]
CBN[,Percentile:=cut(cumweight,breaks=seq(0,tx,tx/100),labels=1:100)]
#Update Poors
CBN[,Poor:=ifelse(Decile %in% 2:5,1,0)]
CBNPoor<-CBN[Poor==1]
CBNPoor<-merge(CBNPoor,dt2,by=c("ProvinceCode"),all.x = TRUE)
CBNPoor[,sum(HIndivNo),by=.(ProvinceCode)][order(ProvinceCode)]
###Iteration1-5
#Calculate Per_calories in clusters
CBNPoor[,Daily_Ghand_cluster:=weighted.mean(Ghand_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Hoboobat_cluster:=weighted.mean(Hoboobat_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Nan_cluster:=weighted.mean(Nan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Berenj_cluster:=weighted.mean(Berenj_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Roghan_cluster:=weighted.mean(Roghan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Goosht_cluster:=weighted.mean(Goosht_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Morgh_cluster:=weighted.mean(Morgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mahi_cluster:=weighted.mean(Mahi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Shir_cluster:=weighted.mean(Shir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mast_cluster:=weighted.mean(Mast_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Panir_cluster:=weighted.mean(Panir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Tokhmemorgh_cluster:=weighted.mean(Tokhmemorgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mive_cluster:=weighted.mean(Mive_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Sabzi_cluster:=weighted.mean(Sabzi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Makarooni_cluster:=weighted.mean(Makarooni_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Sibzamini_cluster:=weighted.mean(Sibzamini_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[, Daily_Calories_cluster2 := Reduce(`+`, .SD), .SDcols=c(186:201)][]
#utils::View(CBNPoor)
#Calculate Per_calories in clusters(=2100)
CBNPoor[,Daily2_Ghand:=(Daily_Ghand_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Ghand")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Hoboobat:=(Daily_Hoboobat_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Hoboobat")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Nan:=(Daily_Nan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Nan")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Berenj:=(Daily_Berenj_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Berenj")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Roghan:=(Daily_Roghan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Roghan")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Goosht:=(Daily_Goosht_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Goosht")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Morgh:=(Daily_Morgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Morgh")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mahi:=(Daily_Mahi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mahi")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Shir:=(Daily_Shir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Shir")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mast:=(Daily_Mast_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mast")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Panir:=(Daily_Panir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Panir")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Tokhmemorgh:=(Daily_Tokhmemorgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Tokhmemorgh")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mive:=(Daily_Mive_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mive")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Sabzi:=(Daily_Sabzi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sabzi")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Makarooni:=(Daily_Makarooni_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Makarooni")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Sibzamini:=(Daily_Sibzamini_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sibzamini")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[, Daily_Calories3 := Reduce(`+`, .SD), .SDcols=c(203:218)][]
CBNPoor[,FoodExpenditure_Per_cluster:=weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(FoodExpenditure_Per_cluster,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Calory_Price:=(FoodExpenditure_Per_cluster/(Daily_Calories_cluster2))]
CBNPoor[,weighted.mean(Calory_Price,Weight,na.rm = TRUE),by=cluster]
#Calculate per_Calory from resturants
CBNPoor[,Per_Calory_Resturant:=(0.7*Resturant_Exp/EqSizeCalory)/Calory_Price]
for (col in c("Per_Calory_Resturant")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor<-CBNPoor[Per_Daily_Exp_Calories!=0]
CBNPoor[,Per_Daily_Calories:=Per_Daily_Exp_Calories+Per_Calory_Resturant]
CBNPoor[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Per_Calory_Resturant,Weight,na.rm = TRUE),by=cluster]
#sum of total food expenditures
CBNPoor[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
CBNPoor[,weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(FoodExpenditure_Per_total,Weight,na.rm = TRUE),by=cluster]
CBN[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
#Food expenditures (equal 2100 CCAL)
CBNPoor[,Bundle_Value:=FoodExpenditure_Per_total*2100/Per_Daily_Calories]
CBNPoor[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=ProvinceCode]
#CBN[,weighted.mean(Poor,Weight),by=cluster][order(cluster)]
#Real Prices
T_Bundle_Value<-subset(CBNPoor, ProvinceCode==2301, select=c(Bundle_Value,Home_Per_Metr,Weight))
Tehran_Bundle_Value1<-weighted.mean(T_Bundle_Value$Bundle_Value,T_Bundle_Value$Weight,na.rm = TRUE)
Tehran_Bundle_Value2<-weighted.mean(T_Bundle_Value$Home_Per_Metr,T_Bundle_Value$Weight,na.rm = TRUE)
CBNPoor[,RealPriceIndex1:=weighted.mean(Bundle_Value,Weight,na.rm = TRUE)/Tehran_Bundle_Value1,by=ProvinceCode]
CBNPoor[,RealPriceIndex2:=weighted.mean(Home_Per_Metr,Weight,na.rm = TRUE)/Tehran_Bundle_Value2,by=ProvinceCode]
CBNPoor[,weighted.mean(RealPriceIndex1,Weight),by=ProvinceCode]
CBNPoor[,weighted.mean(RealPriceIndex2,Weight),by=ProvinceCode]
Indexes2<-CBNPoor[,.(RealPriceIndex1,RealPriceIndex2,ProvinceCode,Weight)]
Indexes2<-Indexes2[,RealPriceIndex:=(RealPriceIndex1+RealPriceIndex2)/2]
Indexes<-Indexes2[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(ProvinceCode)]
Indexes35<-Indexes[,.(ProvinceCode,RealPriceIndex1,RealPriceIndex2,RealPriceIndex)]
Indexes<-Indexes[,.(ProvinceCode,RealPriceIndex)]
CBN[,RealPriceIndex:=NULL]
CBN<-merge(CBN,Indexes,by=c("ProvinceCode"),all.x = TRUE)
CBN<-CBN[,Total_Food_Month_Per2:=FoodExpenditure_Per*RealPriceIndex]
CBN<-CBN[,Total_Exp_Month_Per2:=Total_Exp_Month_Per_nondurable*RealPriceIndex]
#utils::View(CBN)
#Sort Expenditure data
CBN<- CBN[order(Total_Exp_Month_Per2)]
#Calculate cumulative weights
sum(CBN$Weight)
CBN$cumweight <- cumsum(CBN$Weight)
tx <- max(CBN$cumweight)
#Calculate deciles by weights
CBN[,Decile:=cut(cumweight,breaks = seq(0,tx,tx/10),labels = 1:10)]
CBN[,Percentile:=cut(cumweight,breaks=seq(0,tx,tx/100),labels=1:100)]
#Update Poors
CBN[,Poor:=ifelse(Decile %in% 2:5,1,0)]
CBNPoor<-CBN[Poor==1]
CBNPoor<-merge(CBNPoor,dt2,by=c("ProvinceCode"),all.x = TRUE)
CBNPoor[,sum(HIndivNo),by=.(ProvinceCode)][order(ProvinceCode)]
###Iteration1-6
#Calculate Per_calories in clusters
CBNPoor[,Daily_Ghand_cluster:=weighted.mean(Ghand_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Hoboobat_cluster:=weighted.mean(Hoboobat_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Nan_cluster:=weighted.mean(Nan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Berenj_cluster:=weighted.mean(Berenj_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Roghan_cluster:=weighted.mean(Roghan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Goosht_cluster:=weighted.mean(Goosht_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Morgh_cluster:=weighted.mean(Morgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mahi_cluster:=weighted.mean(Mahi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Shir_cluster:=weighted.mean(Shir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mast_cluster:=weighted.mean(Mast_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Panir_cluster:=weighted.mean(Panir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Tokhmemorgh_cluster:=weighted.mean(Tokhmemorgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mive_cluster:=weighted.mean(Mive_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Sabzi_cluster:=weighted.mean(Sabzi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Makarooni_cluster:=weighted.mean(Makarooni_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Sibzamini_cluster:=weighted.mean(Sibzamini_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[, Daily_Calories_cluster2 := Reduce(`+`, .SD), .SDcols=c(186:201)][]
#utils::View(CBNPoor)
#Calculate Per_calories in clusters(=2100)
CBNPoor[,Daily2_Ghand:=(Daily_Ghand_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Ghand")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Hoboobat:=(Daily_Hoboobat_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Hoboobat")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Nan:=(Daily_Nan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Nan")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Berenj:=(Daily_Berenj_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Berenj")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Roghan:=(Daily_Roghan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Roghan")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Goosht:=(Daily_Goosht_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Goosht")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Morgh:=(Daily_Morgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Morgh")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mahi:=(Daily_Mahi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mahi")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Shir:=(Daily_Shir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Shir")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mast:=(Daily_Mast_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mast")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Panir:=(Daily_Panir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Panir")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Tokhmemorgh:=(Daily_Tokhmemorgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Tokhmemorgh")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mive:=(Daily_Mive_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mive")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Sabzi:=(Daily_Sabzi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sabzi")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Makarooni:=(Daily_Makarooni_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Makarooni")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Sibzamini:=(Daily_Sibzamini_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sibzamini")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[, Daily_Calories3 := Reduce(`+`, .SD), .SDcols=c(203:218)][]
CBNPoor[,FoodExpenditure_Per_cluster:=weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(FoodExpenditure_Per_cluster,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Calory_Price:=(FoodExpenditure_Per_cluster/(Daily_Calories_cluster2))]
CBNPoor[,weighted.mean(Calory_Price,Weight,na.rm = TRUE),by=cluster]
#Calculate per_Calory from resturants
CBNPoor[,Per_Calory_Resturant:=(0.7*Resturant_Exp/EqSizeCalory)/Calory_Price]
for (col in c("Per_Calory_Resturant")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor<-CBNPoor[Per_Daily_Exp_Calories!=0]
CBNPoor[,Per_Daily_Calories:=Per_Daily_Exp_Calories+Per_Calory_Resturant]
CBNPoor[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Per_Calory_Resturant,Weight,na.rm = TRUE),by=cluster]
#sum of total food expenditures
CBNPoor[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
CBNPoor[,weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(FoodExpenditure_Per_total,Weight,na.rm = TRUE),by=cluster]
CBN[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
#Food expenditures (equal 2100 CCAL)
CBNPoor[,Bundle_Value:=FoodExpenditure_Per_total*2100/Per_Daily_Calories]
CBNPoor[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=ProvinceCode]
#CBN[,weighted.mean(Poor,Weight),by=cluster][order(cluster)]
#Real Prices
T_Bundle_Value<-subset(CBNPoor, ProvinceCode==2301, select=c(Bundle_Value,Home_Per_Metr,Weight))
Tehran_Bundle_Value1<-weighted.mean(T_Bundle_Value$Bundle_Value,T_Bundle_Value$Weight,na.rm = TRUE)
Tehran_Bundle_Value2<-weighted.mean(T_Bundle_Value$Home_Per_Metr,T_Bundle_Value$Weight,na.rm = TRUE)
CBNPoor[,RealPriceIndex1:=weighted.mean(Bundle_Value,Weight,na.rm = TRUE)/Tehran_Bundle_Value1,by=ProvinceCode]
CBNPoor[,RealPriceIndex2:=weighted.mean(Home_Per_Metr,Weight,na.rm = TRUE)/Tehran_Bundle_Value2,by=ProvinceCode]
CBNPoor[,weighted.mean(RealPriceIndex1,Weight),by=ProvinceCode]
CBNPoor[,weighted.mean(RealPriceIndex2,Weight),by=ProvinceCode]
Indexes2<-CBNPoor[,.(RealPriceIndex1,RealPriceIndex2,ProvinceCode,Weight)]
Indexes2<-Indexes2[,RealPriceIndex:=(RealPriceIndex1+RealPriceIndex2)/2]
Indexes<-Indexes2[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(ProvinceCode)]
Indexes36<-Indexes[,.(ProvinceCode,RealPriceIndex1,RealPriceIndex2,RealPriceIndex)]
Indexes<-Indexes[,.(ProvinceCode,RealPriceIndex)]
CBN[,RealPriceIndex:=NULL]
CBN<-merge(CBN,Indexes,by=c("ProvinceCode"),all.x = TRUE)
CBN<-CBN[,Total_Food_Month_Per2:=FoodExpenditure_Per*RealPriceIndex]
CBN<-CBN[,Total_Exp_Month_Per2:=Total_Exp_Month_Per_nondurable*RealPriceIndex]
#utils::View(CBN)
#Sort Expenditure data
CBN<- CBN[order(Total_Exp_Month_Per2)]
#Calculate cumulative weights
sum(CBN$Weight)
CBN$cumweight <- cumsum(CBN$Weight)
tx <- max(CBN$cumweight)
#Calculate deciles by weights
CBN[,Decile:=cut(cumweight,breaks = seq(0,tx,tx/10),labels = 1:10)]
CBN[,Percentile:=cut(cumweight,breaks=seq(0,tx,tx/100),labels=1:100)]
#Update Poors
CBN[,Poor:=ifelse(Decile %in% 2:5,1,0)]
CBNPoor<-CBN[Poor==1]
CBNPoor<-merge(CBNPoor,dt2,by=c("ProvinceCode"),all.x = TRUE)
CBNPoor[,sum(HIndivNo),by=.(ProvinceCode)][order(ProvinceCode)]
###Iteration1-7
#Calculate Per_calories in clusters
CBNPoor[,Daily_Ghand_cluster:=weighted.mean(Ghand_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Hoboobat_cluster:=weighted.mean(Hoboobat_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Nan_cluster:=weighted.mean(Nan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Berenj_cluster:=weighted.mean(Berenj_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Roghan_cluster:=weighted.mean(Roghan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Goosht_cluster:=weighted.mean(Goosht_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Morgh_cluster:=weighted.mean(Morgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mahi_cluster:=weighted.mean(Mahi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Shir_cluster:=weighted.mean(Shir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mast_cluster:=weighted.mean(Mast_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Panir_cluster:=weighted.mean(Panir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Tokhmemorgh_cluster:=weighted.mean(Tokhmemorgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mive_cluster:=weighted.mean(Mive_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Sabzi_cluster:=weighted.mean(Sabzi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Makarooni_cluster:=weighted.mean(Makarooni_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Sibzamini_cluster:=weighted.mean(Sibzamini_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[, Daily_Calories_cluster2 := Reduce(`+`, .SD), .SDcols=c(186:201)][]
#utils::View(CBNPoor)
#Calculate Per_calories in clusters(=2100)
CBNPoor[,Daily2_Ghand:=(Daily_Ghand_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Ghand")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Hoboobat:=(Daily_Hoboobat_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Hoboobat")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Nan:=(Daily_Nan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Nan")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Berenj:=(Daily_Berenj_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Berenj")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Roghan:=(Daily_Roghan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Roghan")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Goosht:=(Daily_Goosht_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Goosht")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Morgh:=(Daily_Morgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Morgh")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mahi:=(Daily_Mahi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mahi")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Shir:=(Daily_Shir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Shir")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mast:=(Daily_Mast_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mast")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Panir:=(Daily_Panir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Panir")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Tokhmemorgh:=(Daily_Tokhmemorgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Tokhmemorgh")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mive:=(Daily_Mive_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mive")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Sabzi:=(Daily_Sabzi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sabzi")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Makarooni:=(Daily_Makarooni_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Makarooni")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Sibzamini:=(Daily_Sibzamini_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sibzamini")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[, Daily_Calories3 := Reduce(`+`, .SD), .SDcols=c(203:218)][]
CBNPoor[,FoodExpenditure_Per_cluster:=weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(FoodExpenditure_Per_cluster,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Calory_Price:=(FoodExpenditure_Per_cluster/(Daily_Calories_cluster2))]
CBNPoor[,weighted.mean(Calory_Price,Weight,na.rm = TRUE),by=cluster]
#Calculate per_Calory from resturants
CBNPoor[,Per_Calory_Resturant:=(0.7*Resturant_Exp/EqSizeCalory)/Calory_Price]
for (col in c("Per_Calory_Resturant")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor<-CBNPoor[Per_Daily_Exp_Calories!=0]
CBNPoor[,Per_Daily_Calories:=Per_Daily_Exp_Calories+Per_Calory_Resturant]
CBNPoor[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Per_Calory_Resturant,Weight,na.rm = TRUE),by=cluster]
#sum of total food expenditures
CBNPoor[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
CBNPoor[,weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(FoodExpenditure_Per_total,Weight,na.rm = TRUE),by=cluster]
CBN[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
#Food expenditures (equal 2100 CCAL)
CBNPoor[,Bundle_Value:=FoodExpenditure_Per_total*2100/Per_Daily_Calories]
CBNPoor[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=ProvinceCode]
#CBN[,weighted.mean(Poor,Weight),by=cluster][order(cluster)]
#Real Prices
T_Bundle_Value<-subset(CBNPoor, ProvinceCode==2301, select=c(Bundle_Value,Home_Per_Metr,Weight))
Tehran_Bundle_Value1<-weighted.mean(T_Bundle_Value$Bundle_Value,T_Bundle_Value$Weight,na.rm = TRUE)
Tehran_Bundle_Value2<-weighted.mean(T_Bundle_Value$Home_Per_Metr,T_Bundle_Value$Weight,na.rm = TRUE)
CBNPoor[,RealPriceIndex1:=weighted.mean(Bundle_Value,Weight,na.rm = TRUE)/Tehran_Bundle_Value1,by=ProvinceCode]
CBNPoor[,RealPriceIndex2:=weighted.mean(Home_Per_Metr,Weight,na.rm = TRUE)/Tehran_Bundle_Value2,by=ProvinceCode]
CBNPoor[,weighted.mean(RealPriceIndex1,Weight),by=ProvinceCode]
CBNPoor[,weighted.mean(RealPriceIndex2,Weight),by=ProvinceCode]
Indexes2<-CBNPoor[,.(RealPriceIndex1,RealPriceIndex2,ProvinceCode,Weight)]
Indexes2<-Indexes2[,RealPriceIndex:=(RealPriceIndex1+RealPriceIndex2)/2]
Indexes<-Indexes2[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(ProvinceCode)]
Indexes37<-Indexes[,.(ProvinceCode,RealPriceIndex1,RealPriceIndex2,RealPriceIndex)]
Indexes<-Indexes[,.(ProvinceCode,RealPriceIndex)]
CBN[,RealPriceIndex:=NULL]
CBN<-merge(CBN,Indexes,by=c("ProvinceCode"),all.x = TRUE)
CBN<-CBN[,Total_Food_Month_Per2:=FoodExpenditure_Per*RealPriceIndex]
CBN<-CBN[,Total_Exp_Month_Per2:=Total_Exp_Month_Per_nondurable*RealPriceIndex]
#utils::View(CBN)
#Sort Expenditure data
CBN<- CBN[order(Total_Exp_Month_Per2)]
#Calculate cumulative weights
sum(CBN$Weight)
CBN$cumweight <- cumsum(CBN$Weight)
tx <- max(CBN$cumweight)
#Calculate deciles by weights
CBN[,Decile:=cut(cumweight,breaks = seq(0,tx,tx/10),labels = 1:10)]
CBN[,Percentile:=cut(cumweight,breaks=seq(0,tx,tx/100),labels=1:100)]
#Update Poors
CBN[,Poor:=ifelse(Decile %in% 2:5,1,0)]
CBNPoor<-CBN[Poor==1]
CBNPoor<-merge(CBNPoor,dt2,by=c("ProvinceCode"),all.x = TRUE)
CBNPoor[,sum(HIndivNo),by=.(ProvinceCode)][order(ProvinceCode)]
###Iteration1-8
#Calculate Per_calories in clusters
CBNPoor[,Daily_Ghand_cluster:=weighted.mean(Ghand_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Hoboobat_cluster:=weighted.mean(Hoboobat_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Nan_cluster:=weighted.mean(Nan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Berenj_cluster:=weighted.mean(Berenj_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Roghan_cluster:=weighted.mean(Roghan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Goosht_cluster:=weighted.mean(Goosht_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Morgh_cluster:=weighted.mean(Morgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mahi_cluster:=weighted.mean(Mahi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Shir_cluster:=weighted.mean(Shir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mast_cluster:=weighted.mean(Mast_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Panir_cluster:=weighted.mean(Panir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Tokhmemorgh_cluster:=weighted.mean(Tokhmemorgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mive_cluster:=weighted.mean(Mive_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Sabzi_cluster:=weighted.mean(Sabzi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Makarooni_cluster:=weighted.mean(Makarooni_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Sibzamini_cluster:=weighted.mean(Sibzamini_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[, Daily_Calories_cluster2 := Reduce(`+`, .SD), .SDcols=c(186:201)][]
#utils::View(CBNPoor)
#Calculate Per_calories in clusters(=2100)
CBNPoor[,Daily2_Ghand:=(Daily_Ghand_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Ghand")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Hoboobat:=(Daily_Hoboobat_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Hoboobat")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Nan:=(Daily_Nan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Nan")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Berenj:=(Daily_Berenj_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Berenj")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Roghan:=(Daily_Roghan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Roghan")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Goosht:=(Daily_Goosht_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Goosht")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Morgh:=(Daily_Morgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Morgh")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mahi:=(Daily_Mahi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mahi")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Shir:=(Daily_Shir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Shir")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mast:=(Daily_Mast_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mast")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Panir:=(Daily_Panir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Panir")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Tokhmemorgh:=(Daily_Tokhmemorgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Tokhmemorgh")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mive:=(Daily_Mive_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mive")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Sabzi:=(Daily_Sabzi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sabzi")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Makarooni:=(Daily_Makarooni_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Makarooni")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Sibzamini:=(Daily_Sibzamini_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sibzamini")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[, Daily_Calories3 := Reduce(`+`, .SD), .SDcols=c(203:218)][]
CBNPoor[,FoodExpenditure_Per_cluster:=weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(FoodExpenditure_Per_cluster,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Calory_Price:=(FoodExpenditure_Per_cluster/(Daily_Calories_cluster2))]
CBNPoor[,weighted.mean(Calory_Price,Weight,na.rm = TRUE),by=cluster]
#Calculate per_Calory from resturants
CBNPoor[,Per_Calory_Resturant:=(0.7*Resturant_Exp/EqSizeCalory)/Calory_Price]
for (col in c("Per_Calory_Resturant")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor<-CBNPoor[Per_Daily_Exp_Calories!=0]
CBNPoor[,Per_Daily_Calories:=Per_Daily_Exp_Calories+Per_Calory_Resturant]
CBNPoor[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Per_Calory_Resturant,Weight,na.rm = TRUE),by=cluster]
#sum of total food expenditures
CBNPoor[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
CBNPoor[,weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(FoodExpenditure_Per_total,Weight,na.rm = TRUE),by=cluster]
CBN[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
#Food expenditures (equal 2100 CCAL)
CBNPoor[,Bundle_Value:=FoodExpenditure_Per_total*2100/Per_Daily_Calories]
CBNPoor[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=ProvinceCode]
#CBN[,weighted.mean(Poor,Weight),by=cluster][order(cluster)]
#Real Prices
T_Bundle_Value<-subset(CBNPoor, ProvinceCode==2301, select=c(Bundle_Value,Home_Per_Metr,Weight))
Tehran_Bundle_Value1<-weighted.mean(T_Bundle_Value$Bundle_Value,T_Bundle_Value$Weight,na.rm = TRUE)
Tehran_Bundle_Value2<-weighted.mean(T_Bundle_Value$Home_Per_Metr,T_Bundle_Value$Weight,na.rm = TRUE)
CBNPoor[,RealPriceIndex1:=weighted.mean(Bundle_Value,Weight,na.rm = TRUE)/Tehran_Bundle_Value1,by=ProvinceCode]
CBNPoor[,RealPriceIndex2:=weighted.mean(Home_Per_Metr,Weight,na.rm = TRUE)/Tehran_Bundle_Value2,by=ProvinceCode]
CBNPoor[,weighted.mean(RealPriceIndex1,Weight),by=ProvinceCode]
CBNPoor[,weighted.mean(RealPriceIndex2,Weight),by=ProvinceCode]
Indexes2<-CBNPoor[,.(RealPriceIndex1,RealPriceIndex2,ProvinceCode,Weight)]
Indexes2<-Indexes2[,RealPriceIndex:=(RealPriceIndex1+RealPriceIndex2)/2]
Indexes<-Indexes2[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(ProvinceCode)]
Indexes38<-Indexes[,.(ProvinceCode,RealPriceIndex1,RealPriceIndex2,RealPriceIndex)]
Indexes<-Indexes[,.(ProvinceCode,RealPriceIndex)]
CBN[,RealPriceIndex:=NULL]
CBN<-merge(CBN,Indexes,by=c("ProvinceCode"),all.x = TRUE)
CBN<-CBN[,Total_Food_Month_Per2:=FoodExpenditure_Per*RealPriceIndex]
CBN<-CBN[,Total_Exp_Month_Per2:=Total_Exp_Month_Per_nondurable*RealPriceIndex]
#utils::View(CBN)
#Sort Expenditure data
CBN<- CBN[order(Total_Exp_Month_Per2)]
#Calculate cumulative weights
sum(CBN$Weight)
CBN$cumweight <- cumsum(CBN$Weight)
tx <- max(CBN$cumweight)
#Calculate deciles by weights
CBN[,Decile:=cut(cumweight,breaks = seq(0,tx,tx/10),labels = 1:10)]
CBN[,Percentile:=cut(cumweight,breaks=seq(0,tx,tx/100),labels=1:100)]
#Update Poors
CBN[,Poor:=ifelse(Decile %in% 2:5,1,0)]
CBNPoor<-CBN[Poor==1]
CBNPoor<-merge(CBNPoor,dt2,by=c("ProvinceCode"),all.x = TRUE)
CBNPoor[,sum(HIndivNo),by=.(ProvinceCode)][order(ProvinceCode)]
###Iteration1-9
#Calculate Per_calories in clusters
CBNPoor[,Daily_Ghand_cluster:=weighted.mean(Ghand_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Hoboobat_cluster:=weighted.mean(Hoboobat_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Nan_cluster:=weighted.mean(Nan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Berenj_cluster:=weighted.mean(Berenj_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Roghan_cluster:=weighted.mean(Roghan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Goosht_cluster:=weighted.mean(Goosht_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Morgh_cluster:=weighted.mean(Morgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mahi_cluster:=weighted.mean(Mahi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Shir_cluster:=weighted.mean(Shir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mast_cluster:=weighted.mean(Mast_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Panir_cluster:=weighted.mean(Panir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Tokhmemorgh_cluster:=weighted.mean(Tokhmemorgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mive_cluster:=weighted.mean(Mive_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Sabzi_cluster:=weighted.mean(Sabzi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Makarooni_cluster:=weighted.mean(Makarooni_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Sibzamini_cluster:=weighted.mean(Sibzamini_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[, Daily_Calories_cluster2 := Reduce(`+`, .SD), .SDcols=c(186:201)][]
#utils::View(CBNPoor)
#Calculate Per_calories in clusters(=2100)
CBNPoor[,Daily2_Ghand:=(Daily_Ghand_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Ghand")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Hoboobat:=(Daily_Hoboobat_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Hoboobat")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Nan:=(Daily_Nan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Nan")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Berenj:=(Daily_Berenj_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Berenj")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Roghan:=(Daily_Roghan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Roghan")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Goosht:=(Daily_Goosht_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Goosht")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Morgh:=(Daily_Morgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Morgh")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mahi:=(Daily_Mahi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mahi")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Shir:=(Daily_Shir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Shir")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mast:=(Daily_Mast_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mast")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Panir:=(Daily_Panir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Panir")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Tokhmemorgh:=(Daily_Tokhmemorgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Tokhmemorgh")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mive:=(Daily_Mive_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mive")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Sabzi:=(Daily_Sabzi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sabzi")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Makarooni:=(Daily_Makarooni_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Makarooni")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Sibzamini:=(Daily_Sibzamini_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sibzamini")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[, Daily_Calories3 := Reduce(`+`, .SD), .SDcols=c(203:218)][]
CBNPoor[,FoodExpenditure_Per_cluster:=weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(FoodExpenditure_Per_cluster,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Calory_Price:=(FoodExpenditure_Per_cluster/(Daily_Calories_cluster2))]
CBNPoor[,weighted.mean(Calory_Price,Weight,na.rm = TRUE),by=cluster]
#Calculate per_Calory from resturants
CBNPoor[,Per_Calory_Resturant:=(0.7*Resturant_Exp/EqSizeCalory)/Calory_Price]
for (col in c("Per_Calory_Resturant")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor<-CBNPoor[Per_Daily_Exp_Calories!=0]
CBNPoor[,Per_Daily_Calories:=Per_Daily_Exp_Calories+Per_Calory_Resturant]
CBNPoor[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Per_Calory_Resturant,Weight,na.rm = TRUE),by=cluster]
#sum of total food expenditures
CBNPoor[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
CBNPoor[,weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(FoodExpenditure_Per_total,Weight,na.rm = TRUE),by=cluster]
CBN[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
#Food expenditures (equal 2100 CCAL)
CBNPoor[,Bundle_Value:=FoodExpenditure_Per_total*2100/Per_Daily_Calories]
CBNPoor[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=ProvinceCode]
#CBN[,weighted.mean(Poor,Weight),by=cluster][order(cluster)]
#Real Prices
T_Bundle_Value<-subset(CBNPoor, ProvinceCode==2301, select=c(Bundle_Value,Home_Per_Metr,Weight))
Tehran_Bundle_Value1<-weighted.mean(T_Bundle_Value$Bundle_Value,T_Bundle_Value$Weight,na.rm = TRUE)
Tehran_Bundle_Value2<-weighted.mean(T_Bundle_Value$Home_Per_Metr,T_Bundle_Value$Weight,na.rm = TRUE)
CBNPoor[,RealPriceIndex1:=weighted.mean(Bundle_Value,Weight,na.rm = TRUE)/Tehran_Bundle_Value1,by=ProvinceCode]
CBNPoor[,RealPriceIndex2:=weighted.mean(Home_Per_Metr,Weight,na.rm = TRUE)/Tehran_Bundle_Value2,by=ProvinceCode]
CBNPoor[,weighted.mean(RealPriceIndex1,Weight),by=ProvinceCode]
CBNPoor[,weighted.mean(RealPriceIndex2,Weight),by=ProvinceCode]
Indexes2<-CBNPoor[,.(RealPriceIndex1,RealPriceIndex2,ProvinceCode,Weight)]
Indexes2<-Indexes2[,RealPriceIndex:=(RealPriceIndex1+RealPriceIndex2)/2]
Indexes<-Indexes2[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(ProvinceCode)]
Indexes39<-Indexes[,.(ProvinceCode,RealPriceIndex1,RealPriceIndex2,RealPriceIndex)]
Indexes<-Indexes[,.(ProvinceCode,RealPriceIndex)]
CBN[,RealPriceIndex:=NULL]
CBN<-merge(CBN,Indexes,by=c("ProvinceCode"),all.x = TRUE)
CBN<-CBN[,Total_Food_Month_Per2:=FoodExpenditure_Per*RealPriceIndex]
CBN<-CBN[,Total_Exp_Month_Per2:=Total_Exp_Month_Per_nondurable*RealPriceIndex]
#utils::View(CBN)
#Sort Expenditure data
CBN<- CBN[order(Total_Exp_Month_Per2)]
#Calculate cumulative weights
sum(CBN$Weight)
CBN$cumweight <- cumsum(CBN$Weight)
tx <- max(CBN$cumweight)
#Calculate deciles by weights
CBN[,Decile:=cut(cumweight,breaks = seq(0,tx,tx/10),labels = 1:10)]
CBN[,Percentile:=cut(cumweight,breaks=seq(0,tx,tx/100),labels=1:100)]
#Update Poors
CBN[,Poor:=ifelse(Decile %in% 2:5,1,0)]
CBNPoor<-CBN[Poor==1]
CBNPoor<-merge(CBNPoor,dt2,by=c("ProvinceCode"),all.x = TRUE)
CBNPoor[,sum(HIndivNo),by=.(ProvinceCode)][order(ProvinceCode)]
###Iteration1-10
#Calculate Per_calories in clusters
CBNPoor[,Daily_Ghand_cluster:=weighted.mean(Ghand_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Hoboobat_cluster:=weighted.mean(Hoboobat_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Nan_cluster:=weighted.mean(Nan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Berenj_cluster:=weighted.mean(Berenj_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Roghan_cluster:=weighted.mean(Roghan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Goosht_cluster:=weighted.mean(Goosht_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Morgh_cluster:=weighted.mean(Morgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mahi_cluster:=weighted.mean(Mahi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Shir_cluster:=weighted.mean(Shir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mast_cluster:=weighted.mean(Mast_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Panir_cluster:=weighted.mean(Panir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Tokhmemorgh_cluster:=weighted.mean(Tokhmemorgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mive_cluster:=weighted.mean(Mive_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Sabzi_cluster:=weighted.mean(Sabzi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Makarooni_cluster:=weighted.mean(Makarooni_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Sibzamini_cluster:=weighted.mean(Sibzamini_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[, Daily_Calories_cluster2 := Reduce(`+`, .SD), .SDcols=c(186:201)][]
#utils::View(CBNPoor)
#Calculate Per_calories in clusters(=2100)
CBNPoor[,Daily2_Ghand:=(Daily_Ghand_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Ghand")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Hoboobat:=(Daily_Hoboobat_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Hoboobat")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Nan:=(Daily_Nan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Nan")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Berenj:=(Daily_Berenj_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Berenj")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Roghan:=(Daily_Roghan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Roghan")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Goosht:=(Daily_Goosht_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Goosht")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Morgh:=(Daily_Morgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Morgh")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mahi:=(Daily_Mahi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mahi")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Shir:=(Daily_Shir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Shir")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mast:=(Daily_Mast_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mast")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Panir:=(Daily_Panir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Panir")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Tokhmemorgh:=(Daily_Tokhmemorgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Tokhmemorgh")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mive:=(Daily_Mive_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mive")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Sabzi:=(Daily_Sabzi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sabzi")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Makarooni:=(Daily_Makarooni_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Makarooni")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Sibzamini:=(Daily_Sibzamini_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sibzamini")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[, Daily_Calories3 := Reduce(`+`, .SD), .SDcols=c(203:218)][]
CBNPoor[,FoodExpenditure_Per_cluster:=weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(FoodExpenditure_Per_cluster,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Calory_Price:=(FoodExpenditure_Per_cluster/(Daily_Calories_cluster2))]
CBNPoor[,weighted.mean(Calory_Price,Weight,na.rm = TRUE),by=cluster]
#Calculate per_Calory from resturants
CBNPoor[,Per_Calory_Resturant:=(0.7*Resturant_Exp/EqSizeCalory)/Calory_Price]
for (col in c("Per_Calory_Resturant")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor<-CBNPoor[Per_Daily_Exp_Calories!=0]
CBNPoor[,Per_Daily_Calories:=Per_Daily_Exp_Calories+Per_Calory_Resturant]
CBNPoor[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Per_Calory_Resturant,Weight,na.rm = TRUE),by=cluster]
#sum of total food expenditures
CBNPoor[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
CBNPoor[,weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(FoodExpenditure_Per_total,Weight,na.rm = TRUE),by=cluster]
CBN[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
#Food expenditures (equal 2100 CCAL)
CBNPoor[,Bundle_Value:=FoodExpenditure_Per_total*2100/Per_Daily_Calories]
CBNPoor[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=ProvinceCode]
#CBN[,weighted.mean(Poor,Weight),by=cluster][order(cluster)]
#Real Prices
T_Bundle_Value<-subset(CBNPoor, ProvinceCode==2301, select=c(Bundle_Value,Home_Per_Metr,Weight))
Tehran_Bundle_Value1<-weighted.mean(T_Bundle_Value$Bundle_Value,T_Bundle_Value$Weight,na.rm = TRUE)
Tehran_Bundle_Value2<-weighted.mean(T_Bundle_Value$Home_Per_Metr,T_Bundle_Value$Weight,na.rm = TRUE)
CBNPoor[,RealPriceIndex1:=weighted.mean(Bundle_Value,Weight,na.rm = TRUE)/Tehran_Bundle_Value1,by=ProvinceCode]
CBNPoor[,RealPriceIndex2:=weighted.mean(Home_Per_Metr,Weight,na.rm = TRUE)/Tehran_Bundle_Value2,by=ProvinceCode]
CBNPoor[,weighted.mean(RealPriceIndex1,Weight),by=ProvinceCode]
CBNPoor[,weighted.mean(RealPriceIndex2,Weight),by=ProvinceCode]
Indexes2<-CBNPoor[,.(RealPriceIndex1,RealPriceIndex2,ProvinceCode,Weight)]
Indexes2<-Indexes2[,RealPriceIndex:=(RealPriceIndex1+RealPriceIndex2)/2]
Indexes<-Indexes2[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(ProvinceCode)]
Indexes310<-Indexes[,.(ProvinceCode,RealPriceIndex1,RealPriceIndex2,RealPriceIndex)]
Indexes<-Indexes[,.(ProvinceCode,RealPriceIndex)]
CBN[,RealPriceIndex:=NULL]
CBN<-merge(CBN,Indexes,by=c("ProvinceCode"),all.x = TRUE)
CBN<-CBN[,Total_Food_Month_Per2:=FoodExpenditure_Per*RealPriceIndex]
CBN<-CBN[,Total_Exp_Month_Per2:=Total_Exp_Month_Per_nondurable*RealPriceIndex]
#utils::View(CBN)
#Sort Expenditure data
CBN<- CBN[order(Total_Exp_Month_Per2)]
#Calculate cumulative weights
sum(CBN$Weight)
CBN$cumweight <- cumsum(CBN$Weight)
tx <- max(CBN$cumweight)
#Calculate deciles by weights
CBN[,Decile:=cut(cumweight,breaks = seq(0,tx,tx/10),labels = 1:10)]
CBN[,Percentile:=cut(cumweight,breaks=seq(0,tx,tx/100),labels=1:100)]
#Update Poors
CBN[,Poor:=ifelse(Decile %in% 2:5,1,0)]
CBNPoor<-CBN[Poor==1]
CBNPoor<-merge(CBNPoor,dt2,by=c("ProvinceCode"),all.x = TRUE)
CBNPoor[,sum(HIndivNo),by=.(cluster)][order(cluster)]
#Calculate Per_calories in clusters
CBNPoor[,Daily_Ghand_cluster:=weighted.mean(Ghand_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Hoboobat_cluster:=weighted.mean(Hoboobat_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Nan_cluster:=weighted.mean(Nan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Berenj_cluster:=weighted.mean(Berenj_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Roghan_cluster:=weighted.mean(Roghan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Goosht_cluster:=weighted.mean(Goosht_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Morgh_cluster:=weighted.mean(Morgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mahi_cluster:=weighted.mean(Mahi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Shir_cluster:=weighted.mean(Shir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mast_cluster:=weighted.mean(Mast_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Panir_cluster:=weighted.mean(Panir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Tokhmemorgh_cluster:=weighted.mean(Tokhmemorgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mive_cluster:=weighted.mean(Mive_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Sabzi_cluster:=weighted.mean(Sabzi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Makarooni_cluster:=weighted.mean(Makarooni_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Sibzamini_cluster:=weighted.mean(Sibzamini_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[, Daily_Calories_cluster2 := Reduce(`+`, .SD), .SDcols=c(189:204)][]
#utils::View(CBNPoor)
#Calculate Per_calories in clusters(=2100)
CBNPoor[,Daily2_Ghand:=(Daily_Ghand_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Ghand")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Hoboobat:=(Daily_Hoboobat_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Hoboobat")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Nan:=(Daily_Nan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Nan")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Berenj:=(Daily_Berenj_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Berenj")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Roghan:=(Daily_Roghan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Roghan")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Goosht:=(Daily_Goosht_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Goosht")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Morgh:=(Daily_Morgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Morgh")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mahi:=(Daily_Mahi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mahi")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Shir:=(Daily_Shir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Shir")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mast:=(Daily_Mast_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mast")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Panir:=(Daily_Panir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Panir")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Tokhmemorgh:=(Daily_Tokhmemorgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Tokhmemorgh")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mive:=(Daily_Mive_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mive")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Sabzi:=(Daily_Sabzi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sabzi")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Makarooni:=(Daily_Makarooni_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Makarooni")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Sibzamini:=(Daily_Sibzamini_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sibzamini")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[, Daily_Calories3 := Reduce(`+`, .SD), .SDcols=c(206:221)][]
CBNPoor[,FoodExpenditure_Per_cluster:=weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(FoodExpenditure_Per_cluster,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Calory_Price:=(FoodExpenditure_Per_cluster/(Daily_Calories_cluster2))]
CBNPoor[,weighted.mean(Calory_Price,Weight,na.rm = TRUE),by=cluster]
#Calculate per_Calory from resturants
CBNPoor[,Per_Calory_Resturant:=(0.7*Resturant_Exp/EqSizeCalory)/Calory_Price]
for (col in c("Per_Calory_Resturant")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor<-CBNPoor[Per_Daily_Exp_Calories!=0]
CBNPoor[,Per_Daily_Calories:=Per_Daily_Exp_Calories+Per_Calory_Resturant]
CBNCalory<-CBNPoor[,.(Per_Daily_Calories,Per_Daily_Exp_Calories,Per_Calory_Resturant,Resturant_Exp,cluster,ProvinceCode)]
CBNPoor[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Per_Calory_Resturant,Weight,na.rm = TRUE),by=cluster]
#sum of total food expenditures
CBNPoor[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
CBNPoor[,weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(FoodExpenditure_Per_total,Weight,na.rm = TRUE),by=cluster]
CBN[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
#Food expenditures (equal 2100 CCAL)
CBNPoor[,Bundle_Value:=FoodExpenditure_Per_total*2100/Per_Daily_Calories]
CBNPoor[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=ProvinceCode]
#Calculations
CBNPoor[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(FoodExpenditure_Per_day,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,sum(Size*Weight),by=cluster][order(cluster)]
#Food Poverty Line for each cluster
#cluster 1
CBNPoorCluster<-CBNPoor[cluster==1]
Food_Povertyline1_1<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 2
CBNPoorCluster<-CBNPoor[cluster==2]
Food_Povertyline2_1<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 3
CBNPoorCluster<-CBNPoor[cluster==3]
Food_Povertyline3_1<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 4
CBNPoorCluster<-CBNPoor[cluster==4]
Food_Povertyline4_1<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#ee<-CBNPoor[,.(Total_Exp_Month_Real,Total_Exp_Month,Total_Food_Month_Per2,Total_Exp_Month_Per_nondurable,Total_Exp_Month_nondurable_Real_Per,FoodExpenditure_Per,cluster)]
#mean(ee[,Total_Food_Month_Per2==Total_Exp_Month_nondurable_Real])
#mean(ee[,Total_Food_Month_Per2<3500000])
#ee<- ee[order(Total_Food_Month_Per2)]
#utils::View(CBN)
CBN<-merge(CBN,dt2,by=c("ProvinceCode"),all.x = TRUE)
#for (col in c("Total_Food_Month_Per2")) CBN[is.na(get(col)), (col) := Total_Exp_Month_Per_nondurable]
c<-CBN[,.(FoodExpenditure_Per,FoodExpenditure_Per_total,Total_Exp_Month_Per_nondurable,Total_Exp_Month,Total_Food_Month_Per2,Poor,Decile,Weight,cluster)]
#########Iteration 2###############
#Sort Expenditure data
CBN<- CBN[order(Total_Food_Month_Per2)]
c<- c[order(Total_Food_Month_Per2)]
#Indicate new poors
CBN[,Poor2:=ifelse(FoodExpenditure_Per_total < Food_Povertyline1_1 & cluster==1,1,0)]
c[,Poor2:=ifelse(FoodExpenditure_Per_total < Food_Povertyline1_1 & cluster==1 ,1,0)]
CBN[,Poor2:=ifelse(FoodExpenditure_Per_total < Food_Povertyline2_1 & cluster==2,1,Poor2)]
c[,Poor2:=ifelse(FoodExpenditure_Per_total < Food_Povertyline2_1 & cluster==2 ,1,Poor2)]
CBN[,Poor2:=ifelse(FoodExpenditure_Per_total < Food_Povertyline3_1 & cluster==3,1,Poor2)]
c[,Poor2:=ifelse(FoodExpenditure_Per_total < Food_Povertyline3_1 & cluster==3 ,1,Poor2)]
CBN[,Poor2:=ifelse(FoodExpenditure_Per_total < Food_Povertyline4_1 & cluster==4,1,Poor2)]
c[,Poor2:=ifelse(FoodExpenditure_Per_total < Food_Povertyline4_1 & cluster==4 ,1,Poor2)]
CBN[,weighted.mean(Poor2,Weight),by=cluster][order(cluster)]
CBNPoor[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor2<-CBN[Poor2==1]
#CalculatePer_calories in clusters
CBNPoor2[,Daily_Ghand_cluster:=weighted.mean(Ghand_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor2[,Daily_Hoboobat_cluster:=weighted.mean(Hoboobat_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor2[,Daily_Nan_cluster:=weighted.mean(Nan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor2[,Daily_Berenj_cluster:=weighted.mean(Berenj_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor2[,Daily_Roghan_cluster:=weighted.mean(Roghan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor2[,Daily_Goosht_cluster:=weighted.mean(Goosht_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor2[,Daily_Morgh_cluster:=weighted.mean(Morgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor2[,Daily_Mahi_cluster:=weighted.mean(Mahi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor2[,Daily_Shir_cluster:=weighted.mean(Shir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor2[,Daily_Mast_cluster:=weighted.mean(Mast_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor2[,Daily_Panir_cluster:=weighted.mean(Panir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor2[,Daily_Tokhmemorgh_cluster:=weighted.mean(Tokhmemorgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor2[,Daily_Mive_cluster:=weighted.mean(Mive_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor2[,Daily_Sabzi_cluster:=weighted.mean(Sabzi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor2[,Daily_Makarooni_cluster:=weighted.mean(Makarooni_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor2[,Daily_Sibzamini_cluster:=weighted.mean(Sibzamini_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor2[, Daily_Calories_cluster2 := Reduce(`+`, .SD), .SDcols=c(190:205)][]
#utils::View(CBNPoor2)
#Calculate Per_calories in clusters(=2100)
CBNPoor2[,Daily2_Ghand:=(Daily_Ghand_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Ghand")) CBNPoor2[is.na(get(col)), (col) := 0]
CBNPoor2[,Daily2_Hoboobat:=(Daily_Hoboobat_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Hoboobat")) CBNPoor2[is.na(get(col)), (col) := 0]
CBNPoor2[,Daily2_Nan:=(Daily_Nan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Nan")) CBNPoor2[is.na(get(col)), (col) := 0]
CBNPoor2[,Daily2_Berenj:=(Daily_Berenj_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Berenj")) CBNPoor2[is.na(get(col)), (col) := 0]
CBNPoor2[,Daily2_Roghan:=(Daily_Roghan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Roghan")) CBNPoor2[is.na(get(col)), (col) := 0]
CBNPoor2[,Daily2_Goosht:=(Daily_Goosht_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Goosht")) CBNPoor2[is.na(get(col)), (col) := 0]
CBNPoor2[,Daily2_Morgh:=(Daily_Morgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Morgh")) CBNPoor2[is.na(get(col)), (col) := 0]
CBNPoor2[,Daily2_Mahi:=(Daily_Mahi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mahi")) CBNPoor2[is.na(get(col)), (col) := 0]
CBNPoor2[,Daily2_Shir:=(Daily_Shir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Shir")) CBNPoor2[is.na(get(col)), (col) := 0]
CBNPoor2[,Daily2_Mast:=(Daily_Mast_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mast")) CBNPoor2[is.na(get(col)), (col) := 0]
CBNPoor2[,Daily2_Panir:=(Daily_Panir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Panir")) CBNPoor2[is.na(get(col)), (col) := 0]
CBNPoor2[,Daily2_Tokhmemorgh:=(Daily_Tokhmemorgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Tokhmemorgh")) CBNPoor2[is.na(get(col)), (col) := 0]
CBNPoor2[,Daily2_Mive:=(Daily_Mive_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mive")) CBNPoor2[is.na(get(col)), (col) := 0]
CBNPoor2[,Daily2_Sabzi:=(Daily_Sabzi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sabzi")) CBNPoor2[is.na(get(col)), (col) := 0]
CBNPoor2[,Daily2_Makarooni:=(Daily_Makarooni_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Makarooni")) CBNPoor2[is.na(get(col)), (col) := 0]
CBNPoor2[,Daily2_Sibzamini:=(Daily_Sibzamini_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sibzamini")) CBNPoor2[is.na(get(col)), (col) := 0]
CBNPoor2[, Daily_Calories3 := Reduce(`+`, .SD), .SDcols=c(207:222)][]
CBNPoor2[,FoodExpenditure_Per_cluster:=weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor2[,weighted.mean(FoodExpenditure_Per_cluster,Weight,na.rm = TRUE),by=cluster]
CBNPoor2[,Calory_Price:=(FoodExpenditure_Per_cluster/(Daily_Calories_cluster2))]
CBNPoor2[,weighted.mean(Calory_Price,Weight,na.rm = TRUE),by=cluster]
#Calculate per_Calory from resturants
CBNPoor2[,Per_Calory_Resturant:=(0.7*Resturant_Exp/EqSizeCalory)/Calory_Price]
for (col in c("Per_Calory_Resturant")) CBNPoor2[is.na(get(col)), (col) := 0]
CBNPoor2<-CBNPoor2[Per_Daily_Exp_Calories!=0]
CBNPoor2[,Per_Daily_Calories:=Per_Daily_Exp_Calories+Per_Calory_Resturant]
CBNPoor2[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor2[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor2[,weighted.mean(Per_Calory_Resturant,Weight,na.rm = TRUE),by=cluster]
#sum of total food expenditures
CBNPoor2[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
CBNPoor2[,weighted.mean(FoodExpenditure_Per_total,Weight,na.rm = TRUE),by=cluster]
#Food expenditures (equal 2100 CCAL)
CBNPoor2[,Bundle_Value:=FoodExpenditure_Per_total*2100/Per_Daily_Calories]
CBNPoor2[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=cluster]
CBNPoor2[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=ProvinceCode]
#Calculations
CBNPoor2[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor2[,weighted.mean(FoodExpenditure_Per_day,Weight,na.rm = TRUE),by=cluster]
CBNPoor2[,weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor2[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
#Food Poverty Line for each cluster
#cluster 1
CBNPoorCluster<-CBNPoor2[cluster==1]
Food_Povertyline1_2<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 2
CBNPoorCluster<-CBNPoor2[cluster==2]
Food_Povertyline2_2<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 3
CBNPoorCluster<-CBNPoor2[cluster==3]
Food_Povertyline3_2<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 4
CBNPoorCluster<-CBNPoor2[cluster==4]
Food_Povertyline4_2<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#ee<-CBNPoor[,.(Total_Exp_Month_Real,Total_Exp_Month,Total_Food_Month_Per2,Total_Exp_Month_Per_nondurable,Total_Exp_Month_nondurable_Real_Per,FoodExpenditure_Per,cluster)]
#mean(ee[,Total_Food_Month_Per2==Total_Exp_Month_nondurable_Real])
#mean(ee[,Total_Food_Month_Per2<3500000])
#ee<- ee[order(Total_Food_Month_Per2)]
#utils::View(CBN)
#for (col in c("Total_Food_Month_Per2")) CBN[is.na(get(col)), (col) := Total_Exp_Month_Per_nondurable]
c<-CBN[,.(FoodExpenditure_Per,FoodExpenditure_Per_total,Total_Exp_Month_Per_nondurable,Total_Exp_Month,Total_Food_Month_Per2,Poor,Decile,Weight,cluster)]
#########Iteration 3###############
#Sort Expenditure data
CBN<- CBN[order(Total_Food_Month_Per2)]
c<- c[order(Total_Food_Month_Per2)]
#Indicate new poors
CBN[,Poor3:=ifelse(FoodExpenditure_Per_total < Food_Povertyline1_2 & cluster==1,1,0)]
c[,Poor3:=ifelse(FoodExpenditure_Per_total < Food_Povertyline1_2 & cluster==1 ,1,0)]
CBN[,Poor3:=ifelse(FoodExpenditure_Per_total < Food_Povertyline2_2 & cluster==2,1,Poor3)]
c[,Poor3:=ifelse(FoodExpenditure_Per_total < Food_Povertyline2_2 & cluster==2 ,1,Poor3)]
CBN[,Poor3:=ifelse(FoodExpenditure_Per_total < Food_Povertyline3_2 & cluster==3,1,Poor3)]
c[,Poor3:=ifelse(FoodExpenditure_Per_total < Food_Povertyline3_2 & cluster==3 ,1,Poor3)]
CBN[,Poor3:=ifelse(FoodExpenditure_Per_total < Food_Povertyline4_2 & cluster==4,1,Poor3)]
c[,Poor3:=ifelse(FoodExpenditure_Per_total < Food_Povertyline4_2 & cluster==4 ,1,Poor3)]
CBN[,weighted.mean(Poor3,Weight),by=cluster][order(cluster)]
CBNPoor2[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor3<-CBN[Poor3==1]
#CalculatePer_calories in clusters
CBNPoor3[,Daily_Ghand_cluster:=weighted.mean(Ghand_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor3[,Daily_Hoboobat_cluster:=weighted.mean(Hoboobat_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor3[,Daily_Nan_cluster:=weighted.mean(Nan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor3[,Daily_Berenj_cluster:=weighted.mean(Berenj_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor3[,Daily_Roghan_cluster:=weighted.mean(Roghan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor3[,Daily_Goosht_cluster:=weighted.mean(Goosht_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor3[,Daily_Morgh_cluster:=weighted.mean(Morgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor3[,Daily_Mahi_cluster:=weighted.mean(Mahi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor3[,Daily_Shir_cluster:=weighted.mean(Shir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor3[,Daily_Mast_cluster:=weighted.mean(Mast_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor3[,Daily_Panir_cluster:=weighted.mean(Panir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor3[,Daily_Tokhmemorgh_cluster:=weighted.mean(Tokhmemorgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor3[,Daily_Mive_cluster:=weighted.mean(Mive_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor3[,Daily_Sabzi_cluster:=weighted.mean(Sabzi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor3[,Daily_Makarooni_cluster:=weighted.mean(Makarooni_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor3[,Daily_Sibzamini_cluster:=weighted.mean(Sibzamini_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor3[, Daily_Calories_cluster2 := Reduce(`+`, .SD), .SDcols=c(191:206)][]
#utils::View(CBNPoor3)
#Calculate Per_calories in clusters(=2100)
CBNPoor3[,Daily2_Ghand:=(Daily_Ghand_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Ghand")) CBNPoor3[is.na(get(col)), (col) := 0]
CBNPoor3[,Daily2_Hoboobat:=(Daily_Hoboobat_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Hoboobat")) CBNPoor3[is.na(get(col)), (col) := 0]
CBNPoor3[,Daily2_Nan:=(Daily_Nan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Nan")) CBNPoor3[is.na(get(col)), (col) := 0]
CBNPoor3[,Daily2_Berenj:=(Daily_Berenj_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Berenj")) CBNPoor3[is.na(get(col)), (col) := 0]
CBNPoor3[,Daily2_Roghan:=(Daily_Roghan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Roghan")) CBNPoor3[is.na(get(col)), (col) := 0]
CBNPoor3[,Daily2_Goosht:=(Daily_Goosht_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Goosht")) CBNPoor3[is.na(get(col)), (col) := 0]
CBNPoor3[,Daily2_Morgh:=(Daily_Morgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Morgh")) CBNPoor3[is.na(get(col)), (col) := 0]
CBNPoor3[,Daily2_Mahi:=(Daily_Mahi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mahi")) CBNPoor3[is.na(get(col)), (col) := 0]
CBNPoor3[,Daily2_Shir:=(Daily_Shir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Shir")) CBNPoor3[is.na(get(col)), (col) := 0]
CBNPoor3[,Daily2_Mast:=(Daily_Mast_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mast")) CBNPoor3[is.na(get(col)), (col) := 0]
CBNPoor3[,Daily2_Panir:=(Daily_Panir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Panir")) CBNPoor3[is.na(get(col)), (col) := 0]
CBNPoor3[,Daily2_Tokhmemorgh:=(Daily_Tokhmemorgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Tokhmemorgh")) CBNPoor3[is.na(get(col)), (col) := 0]
CBNPoor3[,Daily2_Mive:=(Daily_Mive_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mive")) CBNPoor3[is.na(get(col)), (col) := 0]
CBNPoor3[,Daily2_Sabzi:=(Daily_Sabzi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sabzi")) CBNPoor3[is.na(get(col)), (col) := 0]
CBNPoor3[,Daily2_Makarooni:=(Daily_Makarooni_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Makarooni")) CBNPoor3[is.na(get(col)), (col) := 0]
CBNPoor3[,Daily2_Sibzamini:=(Daily_Sibzamini_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sibzamini")) CBNPoor3[is.na(get(col)), (col) := 0]
CBNPoor3[, Daily_Calories3 := Reduce(`+`, .SD), .SDcols=c(208:223)][]
CBNPoor3[,FoodExpenditure_Per_cluster:=weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor3[,weighted.mean(FoodExpenditure_Per_cluster,Weight,na.rm = TRUE),by=cluster]
CBNPoor3[,Calory_Price:=(FoodExpenditure_Per_cluster/(Daily_Calories_cluster2))]
CBNPoor3[,weighted.mean(Calory_Price,Weight,na.rm = TRUE),by=cluster]
#Calculate per_Calory from resturants
CBNPoor3[,Per_Calory_Resturant:=(0.7*Resturant_Exp/EqSizeCalory)/Calory_Price]
for (col in c("Per_Calory_Resturant")) CBNPoor3[is.na(get(col)), (col) := 0]
CBNPoor3<-CBNPoor3[Per_Daily_Exp_Calories!=0]
CBNPoor3[,Per_Daily_Calories:=Per_Daily_Exp_Calories+Per_Calory_Resturant]
CBNPoor3[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor3[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor3[,weighted.mean(Per_Calory_Resturant,Weight,na.rm = TRUE),by=cluster]
#sum of total food expenditures
CBNPoor3[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
CBNPoor3[,weighted.mean(FoodExpenditure_Per_total,Weight,na.rm = TRUE),by=cluster]
#Food expenditures (equal 2100 CCAL)
CBNPoor3[,Bundle_Value:=FoodExpenditure_Per_total*2100/Per_Daily_Calories]
CBNPoor3[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=cluster]
CBNPoor3[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=ProvinceCode]
#Calculations
CBNPoor3[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor3[,weighted.mean(FoodExpenditure_Per_day,Weight,na.rm = TRUE),by=cluster]
CBNPoor3[,weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor3[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
#Food Poverty Line for each cluster
#cluster 1
CBNPoorCluster<-CBNPoor3[cluster==1]
Food_Povertyline1_3<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 2
CBNPoorCluster<-CBNPoor3[cluster==2]
Food_Povertyline2_3<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 3
CBNPoorCluster<-CBNPoor3[cluster==3]
Food_Povertyline3_3<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 4
CBNPoorCluster<-CBNPoor3[cluster==4]
Food_Povertyline4_3<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#ee<-CBNPoor[,.(Total_Exp_Month_Real,Total_Exp_Month,Total_Food_Month_Per2,Total_Exp_Month_Per_nondurable,Total_Exp_Month_nondurable_Real_Per,FoodExpenditure_Per,cluster)]
#mean(ee[,Total_Food_Month_Per2==Total_Exp_Month_nondurable_Real])
#mean(ee[,Total_Food_Month_Per2<3500000])
#ee<- ee[order(Total_Food_Month_Per2)]
#utils::View(CBN)
#for (col in c("Total_Food_Month_Per2")) CBN[is.na(get(col)), (col) := Total_Exp_Month_Per_nondurable]
c<-CBN[,.(FoodExpenditure_Per,FoodExpenditure_Per_total,Total_Exp_Month_Per_nondurable,Total_Exp_Month,Total_Food_Month_Per2,Poor,Decile,Weight,cluster)]
#########Iteration 4###############
#Sort Expenditure data
CBN<- CBN[order(Total_Food_Month_Per2)]
c<- c[order(Total_Food_Month_Per2)]
#Indicate new poors
CBN[,Poor4:=ifelse(FoodExpenditure_Per_total < Food_Povertyline1_3 & cluster==1,1,0)]
c[,Poor4:=ifelse(FoodExpenditure_Per_total < Food_Povertyline1_3 & cluster==1 ,1,0)]
CBN[,Poor4:=ifelse(FoodExpenditure_Per_total < Food_Povertyline2_3 & cluster==2,1,Poor4)]
c[,Poor4:=ifelse(FoodExpenditure_Per_total < Food_Povertyline2_3 & cluster==2 ,1,Poor4)]
CBN[,Poor4:=ifelse(FoodExpenditure_Per_total < Food_Povertyline3_3 & cluster==3,1,Poor4)]
c[,Poor4:=ifelse(FoodExpenditure_Per_total < Food_Povertyline3_3 & cluster==3 ,1,Poor4)]
CBN[,Poor4:=ifelse(FoodExpenditure_Per_total < Food_Povertyline4_3 & cluster==4,1,Poor4)]
c[,Poor4:=ifelse(FoodExpenditure_Per_total < Food_Povertyline4_3 & cluster==4 ,1,Poor4)]
CBN[,weighted.mean(Poor4,Weight),by=cluster][order(cluster)]
CBNPoor3[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor4<-CBN[Poor4==1]
#CalculatePer_calories in clusters
CBNPoor4[,Daily_Ghand_cluster:=weighted.mean(Ghand_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor4[,Daily_Hoboobat_cluster:=weighted.mean(Hoboobat_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor4[,Daily_Nan_cluster:=weighted.mean(Nan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor4[,Daily_Berenj_cluster:=weighted.mean(Berenj_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor4[,Daily_Roghan_cluster:=weighted.mean(Roghan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor4[,Daily_Goosht_cluster:=weighted.mean(Goosht_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor4[,Daily_Morgh_cluster:=weighted.mean(Morgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor4[,Daily_Mahi_cluster:=weighted.mean(Mahi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor4[,Daily_Shir_cluster:=weighted.mean(Shir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor4[,Daily_Mast_cluster:=weighted.mean(Mast_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor4[,Daily_Panir_cluster:=weighted.mean(Panir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor4[,Daily_Tokhmemorgh_cluster:=weighted.mean(Tokhmemorgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor4[,Daily_Mive_cluster:=weighted.mean(Mive_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor4[,Daily_Sabzi_cluster:=weighted.mean(Sabzi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor4[,Daily_Makarooni_cluster:=weighted.mean(Makarooni_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor4[,Daily_Sibzamini_cluster:=weighted.mean(Sibzamini_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor4[, Daily_Calories_cluster2 := Reduce(`+`, .SD), .SDcols=c(191:206)][]
#utils::View(CBNPoor4)
#Calculate Per_calories in clusters(=2100)
CBNPoor4[,Daily2_Ghand:=(Daily_Ghand_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Ghand")) CBNPoor4[is.na(get(col)), (col) := 0]
CBNPoor4[,Daily2_Hoboobat:=(Daily_Hoboobat_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Hoboobat")) CBNPoor4[is.na(get(col)), (col) := 0]
CBNPoor4[,Daily2_Nan:=(Daily_Nan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Nan")) CBNPoor4[is.na(get(col)), (col) := 0]
CBNPoor4[,Daily2_Berenj:=(Daily_Berenj_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Berenj")) CBNPoor4[is.na(get(col)), (col) := 0]
CBNPoor4[,Daily2_Roghan:=(Daily_Roghan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Roghan")) CBNPoor4[is.na(get(col)), (col) := 0]
CBNPoor4[,Daily2_Goosht:=(Daily_Goosht_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Goosht")) CBNPoor4[is.na(get(col)), (col) := 0]
CBNPoor4[,Daily2_Morgh:=(Daily_Morgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Morgh")) CBNPoor4[is.na(get(col)), (col) := 0]
CBNPoor4[,Daily2_Mahi:=(Daily_Mahi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mahi")) CBNPoor4[is.na(get(col)), (col) := 0]
CBNPoor4[,Daily2_Shir:=(Daily_Shir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Shir")) CBNPoor4[is.na(get(col)), (col) := 0]
CBNPoor4[,Daily2_Mast:=(Daily_Mast_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mast")) CBNPoor4[is.na(get(col)), (col) := 0]
CBNPoor4[,Daily2_Panir:=(Daily_Panir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Panir")) CBNPoor4[is.na(get(col)), (col) := 0]
CBNPoor4[,Daily2_Tokhmemorgh:=(Daily_Tokhmemorgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Tokhmemorgh")) CBNPoor4[is.na(get(col)), (col) := 0]
CBNPoor4[,Daily2_Mive:=(Daily_Mive_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mive")) CBNPoor4[is.na(get(col)), (col) := 0]
CBNPoor4[,Daily2_Sabzi:=(Daily_Sabzi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sabzi")) CBNPoor4[is.na(get(col)), (col) := 0]
CBNPoor4[,Daily2_Makarooni:=(Daily_Makarooni_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Makarooni")) CBNPoor4[is.na(get(col)), (col) := 0]
CBNPoor4[,Daily2_Sibzamini:=(Daily_Sibzamini_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sibzamini")) CBNPoor4[is.na(get(col)), (col) := 0]
CBNPoor4[, Daily_Calories3 := Reduce(`+`, .SD), .SDcols=c(208:223)][]
CBNPoor4[,FoodExpenditure_Per_cluster:=weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor4[,weighted.mean(FoodExpenditure_Per_cluster,Weight,na.rm = TRUE),by=cluster]
CBNPoor4[,Calory_Price:=(FoodExpenditure_Per_cluster/(Daily_Calories_cluster2))]
CBNPoor4[,weighted.mean(Calory_Price,Weight,na.rm = TRUE),by=cluster]
#Calculate per_Calory from resturants
CBNPoor4[,Per_Calory_Resturant:=(0.7*Resturant_Exp/EqSizeCalory)/Calory_Price]
for (col in c("Per_Calory_Resturant")) CBNPoor4[is.na(get(col)), (col) := 0]
CBNPoor4<-CBNPoor4[Per_Daily_Exp_Calories!=0]
CBNPoor4[,Per_Daily_Calories:=Per_Daily_Exp_Calories+Per_Calory_Resturant]
CBNPoor4[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor4[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor4[,weighted.mean(Per_Calory_Resturant,Weight,na.rm = TRUE),by=cluster]
#sum of total food expenditures
CBNPoor4[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
CBNPoor4[,weighted.mean(FoodExpenditure_Per_total,Weight,na.rm = TRUE),by=cluster]
#Food expenditures (equal 2100 CCAL)
CBNPoor4[,Bundle_Value:=FoodExpenditure_Per_total*2100/Per_Daily_Calories]
CBNPoor4[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=cluster]
CBNPoor4[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=ProvinceCode]
#Calculations
CBNPoor4[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor4[,weighted.mean(FoodExpenditure_Per_day,Weight,na.rm = TRUE),by=cluster]
CBNPoor4[,weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor4[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
#Food Poverty Line for each cluster
#cluster 1
CBNPoorCluster<-CBNPoor4[cluster==1]
Food_Povertyline1_4<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 2
CBNPoorCluster<-CBNPoor4[cluster==2]
Food_Povertyline2_4<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 3
CBNPoorCluster<-CBNPoor4[cluster==3]
Food_Povertyline3_4<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 4
CBNPoorCluster<-CBNPoor4[cluster==4]
Food_Povertyline4_4<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#ee<-CBNPoor[,.(Total_Exp_Month_Real,Total_Exp_Month,Total_Food_Month_Per2,Total_Exp_Month_Per_nondurable,Total_Exp_Month_nondurable_Real_Per,FoodExpenditure_Per,cluster)]
#mean(ee[,Total_Food_Month_Per2==Total_Exp_Month_nondurable_Real])
#mean(ee[,Total_Food_Month_Per2<3500000])
#ee<- ee[order(Total_Food_Month_Per2)]
#utils::View(CBN)
#for (col in c("Total_Food_Month_Per2")) CBN[is.na(get(col)), (col) := Total_Exp_Month_Per_nondurable]
c<-CBN[,.(FoodExpenditure_Per,FoodExpenditure_Per_total,Total_Exp_Month_Per_nondurable,Total_Exp_Month,Total_Food_Month_Per2,Poor,Decile,Weight,cluster)]
#########Iteration 5###############
#Sort Expenditure data
CBN<- CBN[order(Total_Food_Month_Per2)]
c<- c[order(Total_Food_Month_Per2)]
#Indicate new poors
CBN[,Poor5:=ifelse(FoodExpenditure_Per_total < Food_Povertyline1_4 & cluster==1,1,0)]
c[,Poor5:=ifelse(FoodExpenditure_Per_total < Food_Povertyline1_4 & cluster==1 ,1,0)]
CBN[,Poor5:=ifelse(FoodExpenditure_Per_total < Food_Povertyline2_4 & cluster==2,1,Poor5)]
c[,Poor5:=ifelse(FoodExpenditure_Per_total < Food_Povertyline2_4 & cluster==2 ,1,Poor5)]
CBN[,Poor5:=ifelse(FoodExpenditure_Per_total < Food_Povertyline3_4 & cluster==3,1,Poor5)]
c[,Poor5:=ifelse(FoodExpenditure_Per_total < Food_Povertyline3_4 & cluster==3 ,1,Poor5)]
CBN[,Poor5:=ifelse(FoodExpenditure_Per_total < Food_Povertyline4_4 & cluster==4,1,Poor5)]
c[,Poor5:=ifelse(FoodExpenditure_Per_total < Food_Povertyline4_4 & cluster==4 ,1,Poor5)]
CBN[,weighted.mean(Poor5,Weight),by=cluster][order(cluster)]
CBNPoor4[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor5<-CBN[Poor5==1]
#CalculatePer_calories in clusters
CBNPoor5[,Daily_Ghand_cluster:=weighted.mean(Ghand_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor5[,Daily_Hoboobat_cluster:=weighted.mean(Hoboobat_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor5[,Daily_Nan_cluster:=weighted.mean(Nan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor5[,Daily_Berenj_cluster:=weighted.mean(Berenj_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor5[,Daily_Roghan_cluster:=weighted.mean(Roghan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor5[,Daily_Goosht_cluster:=weighted.mean(Goosht_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor5[,Daily_Morgh_cluster:=weighted.mean(Morgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor5[,Daily_Mahi_cluster:=weighted.mean(Mahi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor5[,Daily_Shir_cluster:=weighted.mean(Shir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor5[,Daily_Mast_cluster:=weighted.mean(Mast_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor5[,Daily_Panir_cluster:=weighted.mean(Panir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor5[,Daily_Tokhmemorgh_cluster:=weighted.mean(Tokhmemorgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor5[,Daily_Mive_cluster:=weighted.mean(Mive_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor5[,Daily_Sabzi_cluster:=weighted.mean(Sabzi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor5[,Daily_Makarooni_cluster:=weighted.mean(Makarooni_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor5[,Daily_Sibzamini_cluster:=weighted.mean(Sibzamini_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor5[, Daily_Calories_cluster2 := Reduce(`+`, .SD), .SDcols=c(191:206)][]
#utils::View(CBNPoor5)
#Calculate Per_calories in clusters(=2100)
CBNPoor5[,Daily2_Ghand:=(Daily_Ghand_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Ghand")) CBNPoor5[is.na(get(col)), (col) := 0]
CBNPoor5[,Daily2_Hoboobat:=(Daily_Hoboobat_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Hoboobat")) CBNPoor5[is.na(get(col)), (col) := 0]
CBNPoor5[,Daily2_Nan:=(Daily_Nan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Nan")) CBNPoor5[is.na(get(col)), (col) := 0]
CBNPoor5[,Daily2_Berenj:=(Daily_Berenj_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Berenj")) CBNPoor5[is.na(get(col)), (col) := 0]
CBNPoor5[,Daily2_Roghan:=(Daily_Roghan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Roghan")) CBNPoor5[is.na(get(col)), (col) := 0]
CBNPoor5[,Daily2_Goosht:=(Daily_Goosht_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Goosht")) CBNPoor5[is.na(get(col)), (col) := 0]
CBNPoor5[,Daily2_Morgh:=(Daily_Morgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Morgh")) CBNPoor5[is.na(get(col)), (col) := 0]
CBNPoor5[,Daily2_Mahi:=(Daily_Mahi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mahi")) CBNPoor5[is.na(get(col)), (col) := 0]
CBNPoor5[,Daily2_Shir:=(Daily_Shir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Shir")) CBNPoor5[is.na(get(col)), (col) := 0]
CBNPoor5[,Daily2_Mast:=(Daily_Mast_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mast")) CBNPoor5[is.na(get(col)), (col) := 0]
CBNPoor5[,Daily2_Panir:=(Daily_Panir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Panir")) CBNPoor5[is.na(get(col)), (col) := 0]
CBNPoor5[,Daily2_Tokhmemorgh:=(Daily_Tokhmemorgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Tokhmemorgh")) CBNPoor5[is.na(get(col)), (col) := 0]
CBNPoor5[,Daily2_Mive:=(Daily_Mive_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mive")) CBNPoor5[is.na(get(col)), (col) := 0]
CBNPoor5[,Daily2_Sabzi:=(Daily_Sabzi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sabzi")) CBNPoor5[is.na(get(col)), (col) := 0]
CBNPoor5[,Daily2_Makarooni:=(Daily_Makarooni_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Makarooni")) CBNPoor5[is.na(get(col)), (col) := 0]
CBNPoor5[,Daily2_Sibzamini:=(Daily_Sibzamini_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sibzamini")) CBNPoor5[is.na(get(col)), (col) := 0]
CBNPoor5[, Daily_Calories3 := Reduce(`+`, .SD), .SDcols=c(208:223)][]
CBNPoor5[,FoodExpenditure_Per_cluster:=weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor5[,weighted.mean(FoodExpenditure_Per_cluster,Weight,na.rm = TRUE),by=cluster]
CBNPoor5[,Calory_Price:=(FoodExpenditure_Per_cluster/(Daily_Calories_cluster2))]
CBNPoor5[,weighted.mean(Calory_Price,Weight,na.rm = TRUE),by=cluster]
#Calculate per_Calory from resturants
CBNPoor5[,Per_Calory_Resturant:=(0.7*Resturant_Exp/EqSizeCalory)/Calory_Price]
for (col in c("Per_Calory_Resturant")) CBNPoor5[is.na(get(col)), (col) := 0]
CBNPoor5<-CBNPoor5[Per_Daily_Exp_Calories!=0]
CBNPoor5[,Per_Daily_Calories:=Per_Daily_Exp_Calories+Per_Calory_Resturant]
CBNPoor5[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor5[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor5[,weighted.mean(Per_Calory_Resturant,Weight,na.rm = TRUE),by=cluster]
#sum of total food expenditures
CBNPoor5[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
CBNPoor5[,weighted.mean(FoodExpenditure_Per_total,Weight,na.rm = TRUE),by=cluster]
#Food expenditures (equal 2100 CCAL)
CBNPoor5[,Bundle_Value:=FoodExpenditure_Per_total*2100/Per_Daily_Calories]
CBNPoor5[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=cluster]
CBNPoor5[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=ProvinceCode]
#Calculations
CBNPoor5[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor5[,weighted.mean(FoodExpenditure_Per_day,Weight,na.rm = TRUE),by=cluster]
CBNPoor5[,weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor5[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
#Food Poverty Line for each cluster
#cluster 1
CBNPoorCluster<-CBNPoor5[cluster==1]
Food_Povertyline1_5<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 2
CBNPoorCluster<-CBNPoor5[cluster==2]
Food_Povertyline2_5<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 3
CBNPoorCluster<-CBNPoor5[cluster==3]
Food_Povertyline3_5<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 4
CBNPoorCluster<-CBNPoor5[cluster==4]
Food_Povertyline4_5<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#ee<-CBNPoor[,.(Total_Exp_Month_Real,Total_Exp_Month,Total_Food_Month_Per2,Total_Exp_Month_Per_nondurable,Total_Exp_Month_nondurable_Real_Per,FoodExpenditure_Per,cluster)]
#mean(ee[,Total_Food_Month_Per2==Total_Exp_Month_nondurable_Real])
#mean(ee[,Total_Food_Month_Per2<3500000])
#ee<- ee[order(Total_Food_Month_Per2)]
#utils::View(CBN)
#for (col in c("Total_Food_Month_Per2")) CBN[is.na(get(col)), (col) := Total_Exp_Month_Per_nondurable]
c<-CBN[,.(FoodExpenditure_Per,FoodExpenditure_Per_total,Total_Exp_Month_Per_nondurable,Total_Exp_Month,Total_Food_Month_Per2,Poor,Decile,Weight,cluster)]
#########Iteration 6###############
#Sort Expenditure data
CBN<- CBN[order(Total_Food_Month_Per2)]
c<- c[order(Total_Food_Month_Per2)]
#Indicate new poors
CBN[,Poor6:=ifelse(FoodExpenditure_Per_total < Food_Povertyline1_5 & cluster==1,1,0)]
c[,Poor6:=ifelse(FoodExpenditure_Per_total < Food_Povertyline1_5 & cluster==1 ,1,0)]
CBN[,Poor6:=ifelse(FoodExpenditure_Per_total < Food_Povertyline2_5 & cluster==2,1,Poor6)]
c[,Poor6:=ifelse(FoodExpenditure_Per_total < Food_Povertyline2_5 & cluster==2 ,1,Poor6)]
CBN[,Poor6:=ifelse(FoodExpenditure_Per_total < Food_Povertyline3_5 & cluster==3,1,Poor6)]
c[,Poor6:=ifelse(FoodExpenditure_Per_total < Food_Povertyline3_5 & cluster==3 ,1,Poor6)]
CBN[,Poor6:=ifelse(FoodExpenditure_Per_total < Food_Povertyline4_5 & cluster==4,1,Poor6)]
c[,Poor6:=ifelse(FoodExpenditure_Per_total < Food_Povertyline4_5 & cluster==4 ,1,Poor6)]
CBN[,weighted.mean(Poor6,Weight),by=cluster][order(cluster)]
CBNPoor5[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor6<-CBN[Poor6==1]
#CalculatePer_calories in clusters
CBNPoor6[,Daily_Ghand_cluster:=weighted.mean(Ghand_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor6[,Daily_Hoboobat_cluster:=weighted.mean(Hoboobat_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor6[,Daily_Nan_cluster:=weighted.mean(Nan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor6[,Daily_Berenj_cluster:=weighted.mean(Berenj_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor6[,Daily_Roghan_cluster:=weighted.mean(Roghan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor6[,Daily_Goosht_cluster:=weighted.mean(Goosht_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor6[,Daily_Morgh_cluster:=weighted.mean(Morgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor6[,Daily_Mahi_cluster:=weighted.mean(Mahi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor6[,Daily_Shir_cluster:=weighted.mean(Shir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor6[,Daily_Mast_cluster:=weighted.mean(Mast_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor6[,Daily_Panir_cluster:=weighted.mean(Panir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor6[,Daily_Tokhmemorgh_cluster:=weighted.mean(Tokhmemorgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor6[,Daily_Mive_cluster:=weighted.mean(Mive_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor6[,Daily_Sabzi_cluster:=weighted.mean(Sabzi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor6[,Daily_Makarooni_cluster:=weighted.mean(Makarooni_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor6[,Daily_Sibzamini_cluster:=weighted.mean(Sibzamini_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor6[, Daily_Calories_cluster2 := Reduce(`+`, .SD), .SDcols=c(191:206)][]
#utils::View(CBNPoor6)
#Calculate Per_calories in clusters(=2100)
CBNPoor6[,Daily2_Ghand:=(Daily_Ghand_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Ghand")) CBNPoor6[is.na(get(col)), (col) := 0]
CBNPoor6[,Daily2_Hoboobat:=(Daily_Hoboobat_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Hoboobat")) CBNPoor6[is.na(get(col)), (col) := 0]
CBNPoor6[,Daily2_Nan:=(Daily_Nan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Nan")) CBNPoor6[is.na(get(col)), (col) := 0]
CBNPoor6[,Daily2_Berenj:=(Daily_Berenj_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Berenj")) CBNPoor6[is.na(get(col)), (col) := 0]
CBNPoor6[,Daily2_Roghan:=(Daily_Roghan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Roghan")) CBNPoor6[is.na(get(col)), (col) := 0]
CBNPoor6[,Daily2_Goosht:=(Daily_Goosht_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Goosht")) CBNPoor6[is.na(get(col)), (col) := 0]
CBNPoor6[,Daily2_Morgh:=(Daily_Morgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Morgh")) CBNPoor6[is.na(get(col)), (col) := 0]
CBNPoor6[,Daily2_Mahi:=(Daily_Mahi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mahi")) CBNPoor6[is.na(get(col)), (col) := 0]
CBNPoor6[,Daily2_Shir:=(Daily_Shir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Shir")) CBNPoor6[is.na(get(col)), (col) := 0]
CBNPoor6[,Daily2_Mast:=(Daily_Mast_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mast")) CBNPoor6[is.na(get(col)), (col) := 0]
CBNPoor6[,Daily2_Panir:=(Daily_Panir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Panir")) CBNPoor6[is.na(get(col)), (col) := 0]
CBNPoor6[,Daily2_Tokhmemorgh:=(Daily_Tokhmemorgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Tokhmemorgh")) CBNPoor6[is.na(get(col)), (col) := 0]
CBNPoor6[,Daily2_Mive:=(Daily_Mive_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mive")) CBNPoor6[is.na(get(col)), (col) := 0]
CBNPoor6[,Daily2_Sabzi:=(Daily_Sabzi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sabzi")) CBNPoor6[is.na(get(col)), (col) := 0]
CBNPoor6[,Daily2_Makarooni:=(Daily_Makarooni_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Makarooni")) CBNPoor6[is.na(get(col)), (col) := 0]
CBNPoor6[,Daily2_Sibzamini:=(Daily_Sibzamini_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sibzamini")) CBNPoor6[is.na(get(col)), (col) := 0]
CBNPoor6[, Daily_Calories3 := Reduce(`+`, .SD), .SDcols=c(208:223)][]
CBNPoor6[,FoodExpenditure_Per_cluster:=weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor6[,weighted.mean(FoodExpenditure_Per_cluster,Weight,na.rm = TRUE),by=cluster]
CBNPoor6[,Calory_Price:=(FoodExpenditure_Per_cluster/(Daily_Calories_cluster2))]
CBNPoor6[,weighted.mean(Calory_Price,Weight,na.rm = TRUE),by=cluster]
#Calculate per_Calory from resturants
CBNPoor6[,Per_Calory_Resturant:=(0.7*Resturant_Exp/EqSizeCalory)/Calory_Price]
for (col in c("Per_Calory_Resturant")) CBNPoor6[is.na(get(col)), (col) := 0]
CBNPoor6<-CBNPoor6[Per_Daily_Exp_Calories!=0]
CBNPoor6[,Per_Daily_Calories:=Per_Daily_Exp_Calories+Per_Calory_Resturant]
CBNPoor6[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor6[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor6[,weighted.mean(Per_Calory_Resturant,Weight,na.rm = TRUE),by=cluster]
#sum of total food expenditures
CBNPoor6[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
CBNPoor6[,weighted.mean(FoodExpenditure_Per_total,Weight,na.rm = TRUE),by=cluster]
#Food expenditures (equal 2100 CCAL)
CBNPoor6[,Bundle_Value:=FoodExpenditure_Per_total*2100/Per_Daily_Calories]
CBNPoor6[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=cluster]
CBNPoor6[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=ProvinceCode]
#Calculations
CBNPoor6[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor6[,weighted.mean(FoodExpenditure_Per_day,Weight,na.rm = TRUE),by=cluster]
CBNPoor6[,weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor6[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
#Food Poverty Line for each cluster
#cluster 1
CBNPoorCluster<-CBNPoor6[cluster==1]
Food_Povertyline1_6<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 2
CBNPoorCluster<-CBNPoor6[cluster==2]
Food_Povertyline2_6<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 3
CBNPoorCluster<-CBNPoor6[cluster==3]
Food_Povertyline3_6<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 4
CBNPoorCluster<-CBNPoor6[cluster==4]
Food_Povertyline4_6<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#ee<-CBNPoor[,.(Total_Exp_Month_Real,Total_Exp_Month,Total_Food_Month_Per2,Total_Exp_Month_Per_nondurable,Total_Exp_Month_nondurable_Real_Per,FoodExpenditure_Per,cluster)]
#mean(ee[,Total_Food_Month_Per2==Total_Exp_Month_nondurable_Real])
#mean(ee[,Total_Food_Month_Per2<3500000])
#ee<- ee[order(Total_Food_Month_Per2)]
#utils::View(CBN)
#for (col in c("Total_Food_Month_Per2")) CBN[is.na(get(col)), (col) := Total_Exp_Month_Per_nondurable]
c<-CBN[,.(FoodExpenditure_Per,FoodExpenditure_Per_total,Total_Exp_Month_Per_nondurable,Total_Exp_Month,Total_Food_Month_Per2,Poor,Decile,Weight,cluster)]
#########Iteration 7###############
#Sort Expenditure data
CBN<- CBN[order(Total_Food_Month_Per2)]
c<- c[order(Total_Food_Month_Per2)]
#Indicate new poors
CBN[,Poor7:=ifelse(FoodExpenditure_Per_total < Food_Povertyline1_6 & cluster==1,1,0)]
c[,Poor7:=ifelse(FoodExpenditure_Per_total < Food_Povertyline1_6 & cluster==1 ,1,0)]
CBN[,Poor7:=ifelse(FoodExpenditure_Per_total < Food_Povertyline2_6 & cluster==2,1,Poor7)]
c[,Poor7:=ifelse(FoodExpenditure_Per_total < Food_Povertyline2_6 & cluster==2 ,1,Poor7)]
CBN[,Poor7:=ifelse(FoodExpenditure_Per_total < Food_Povertyline3_6 & cluster==3,1,Poor7)]
c[,Poor7:=ifelse(FoodExpenditure_Per_total < Food_Povertyline3_6 & cluster==3 ,1,Poor7)]
CBN[,Poor7:=ifelse(FoodExpenditure_Per_total < Food_Povertyline4_6 & cluster==4,1,Poor7)]
c[,Poor7:=ifelse(FoodExpenditure_Per_total < Food_Povertyline4_6 & cluster==4 ,1,Poor7)]
CBN[,weighted.mean(Poor7,Weight),by=cluster][order(cluster)]
CBNPoor6[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor7<-CBN[Poor7==1]
#CalculatePer_calories in clusters
CBNPoor7[,Daily_Ghand_cluster:=weighted.mean(Ghand_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor7[,Daily_Hoboobat_cluster:=weighted.mean(Hoboobat_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor7[,Daily_Nan_cluster:=weighted.mean(Nan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor7[,Daily_Berenj_cluster:=weighted.mean(Berenj_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor7[,Daily_Roghan_cluster:=weighted.mean(Roghan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor7[,Daily_Goosht_cluster:=weighted.mean(Goosht_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor7[,Daily_Morgh_cluster:=weighted.mean(Morgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor7[,Daily_Mahi_cluster:=weighted.mean(Mahi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor7[,Daily_Shir_cluster:=weighted.mean(Shir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor7[,Daily_Mast_cluster:=weighted.mean(Mast_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor7[,Daily_Panir_cluster:=weighted.mean(Panir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor7[,Daily_Tokhmemorgh_cluster:=weighted.mean(Tokhmemorgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor7[,Daily_Mive_cluster:=weighted.mean(Mive_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor7[,Daily_Sabzi_cluster:=weighted.mean(Sabzi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor7[,Daily_Makarooni_cluster:=weighted.mean(Makarooni_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor7[,Daily_Sibzamini_cluster:=weighted.mean(Sibzamini_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor7[, Daily_Calories_cluster2 := Reduce(`+`, .SD), .SDcols=c(191:206)][]
#utils::View(CBNPoor7)
#Calculate Per_calories in clusters(=2100)
CBNPoor7[,Daily2_Ghand:=(Daily_Ghand_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Ghand")) CBNPoor7[is.na(get(col)), (col) := 0]
CBNPoor7[,Daily2_Hoboobat:=(Daily_Hoboobat_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Hoboobat")) CBNPoor7[is.na(get(col)), (col) := 0]
CBNPoor7[,Daily2_Nan:=(Daily_Nan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Nan")) CBNPoor7[is.na(get(col)), (col) := 0]
CBNPoor7[,Daily2_Berenj:=(Daily_Berenj_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Berenj")) CBNPoor7[is.na(get(col)), (col) := 0]
CBNPoor7[,Daily2_Roghan:=(Daily_Roghan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Roghan")) CBNPoor7[is.na(get(col)), (col) := 0]
CBNPoor7[,Daily2_Goosht:=(Daily_Goosht_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Goosht")) CBNPoor7[is.na(get(col)), (col) := 0]
CBNPoor7[,Daily2_Morgh:=(Daily_Morgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Morgh")) CBNPoor7[is.na(get(col)), (col) := 0]
CBNPoor7[,Daily2_Mahi:=(Daily_Mahi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mahi")) CBNPoor7[is.na(get(col)), (col) := 0]
CBNPoor7[,Daily2_Shir:=(Daily_Shir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Shir")) CBNPoor7[is.na(get(col)), (col) := 0]
CBNPoor7[,Daily2_Mast:=(Daily_Mast_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mast")) CBNPoor7[is.na(get(col)), (col) := 0]
CBNPoor7[,Daily2_Panir:=(Daily_Panir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Panir")) CBNPoor7[is.na(get(col)), (col) := 0]
CBNPoor7[,Daily2_Tokhmemorgh:=(Daily_Tokhmemorgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Tokhmemorgh")) CBNPoor7[is.na(get(col)), (col) := 0]
CBNPoor7[,Daily2_Mive:=(Daily_Mive_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mive")) CBNPoor7[is.na(get(col)), (col) := 0]
CBNPoor7[,Daily2_Sabzi:=(Daily_Sabzi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sabzi")) CBNPoor7[is.na(get(col)), (col) := 0]
CBNPoor7[,Daily2_Makarooni:=(Daily_Makarooni_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Makarooni")) CBNPoor7[is.na(get(col)), (col) := 0]
CBNPoor7[,Daily2_Sibzamini:=(Daily_Sibzamini_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sibzamini")) CBNPoor7[is.na(get(col)), (col) := 0]
CBNPoor7[, Daily_Calories3 := Reduce(`+`, .SD), .SDcols=c(208:223)][]
CBNPoor7[,FoodExpenditure_Per_cluster:=weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor7[,weighted.mean(FoodExpenditure_Per_cluster,Weight,na.rm = TRUE),by=cluster]
CBNPoor7[,Calory_Price:=(FoodExpenditure_Per_cluster/(Daily_Calories_cluster2))]
CBNPoor7[,weighted.mean(Calory_Price,Weight,na.rm = TRUE),by=cluster]
#Calculate per_Calory from resturants
CBNPoor7[,Per_Calory_Resturant:=(0.7*Resturant_Exp/EqSizeCalory)/Calory_Price]
for (col in c("Per_Calory_Resturant")) CBNPoor7[is.na(get(col)), (col) := 0]
CBNPoor7<-CBNPoor7[Per_Daily_Exp_Calories!=0]
CBNPoor7[,Per_Daily_Calories:=Per_Daily_Exp_Calories+Per_Calory_Resturant]
CBNPoor7[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor7[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor7[,weighted.mean(Per_Calory_Resturant,Weight,na.rm = TRUE),by=cluster]
#sum of total food expenditures
CBNPoor7[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
CBNPoor7[,weighted.mean(FoodExpenditure_Per_total,Weight,na.rm = TRUE),by=cluster]
#Food expenditures (equal 2100 CCAL)
CBNPoor7[,Bundle_Value:=FoodExpenditure_Per_total*2100/Per_Daily_Calories]
CBNPoor7[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=cluster]
CBNPoor7[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=ProvinceCode]
#Calculations
CBNPoor7[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor7[,weighted.mean(FoodExpenditure_Per_day,Weight,na.rm = TRUE),by=cluster]
CBNPoor7[,weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor7[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
#Food Poverty Line for each cluster
#cluster 1
CBNPoorCluster<-CBNPoor7[cluster==1]
Food_Povertyline1_7<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 2
CBNPoorCluster<-CBNPoor7[cluster==2]
Food_Povertyline2_7<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 3
CBNPoorCluster<-CBNPoor7[cluster==3]
Food_Povertyline3_7<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 4
CBNPoorCluster<-CBNPoor7[cluster==4]
Food_Povertyline4_7<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#ee<-CBNPoor[,.(Total_Exp_Month_Real,Total_Exp_Month,Total_Food_Month_Per2,Total_Exp_Month_Per_nondurable,Total_Exp_Month_nondurable_Real_Per,FoodExpenditure_Per,cluster)]
#mean(ee[,Total_Food_Month_Per2==Total_Exp_Month_nondurable_Real])
#mean(ee[,Total_Food_Month_Per2<3500000])
#ee<- ee[order(Total_Food_Month_Per2)]
#utils::View(CBN)
#for (col in c("Total_Food_Month_Per2")) CBN[is.na(get(col)), (col) := Total_Exp_Month_Per_nondurable]
c<-CBN[,.(FoodExpenditure_Per,FoodExpenditure_Per_total,Total_Exp_Month_Per_nondurable,Total_Exp_Month,Total_Food_Month_Per2,Poor,Decile,Weight,cluster)]
#########Iteration 8###############
#Sort Expenditure data
CBN<- CBN[order(Total_Food_Month_Per2)]
c<- c[order(Total_Food_Month_Per2)]
#Indicate new poors
CBN[,Poor8:=ifelse(FoodExpenditure_Per_total < Food_Povertyline1_7 & cluster==1,1,0)]
c[,Poor8:=ifelse(FoodExpenditure_Per_total < Food_Povertyline1_7 & cluster==1 ,1,0)]
CBN[,Poor8:=ifelse(FoodExpenditure_Per_total < Food_Povertyline2_7 & cluster==2,1,Poor8)]
c[,Poor8:=ifelse(FoodExpenditure_Per_total < Food_Povertyline2_7 & cluster==2 ,1,Poor8)]
CBN[,Poor8:=ifelse(FoodExpenditure_Per_total < Food_Povertyline3_7 & cluster==3,1,Poor8)]
c[,Poor8:=ifelse(FoodExpenditure_Per_total < Food_Povertyline3_7 & cluster==3 ,1,Poor8)]
CBN[,Poor8:=ifelse(FoodExpenditure_Per_total < Food_Povertyline4_7 & cluster==4,1,Poor8)]
c[,Poor8:=ifelse(FoodExpenditure_Per_total < Food_Povertyline4_7 & cluster==4 ,1,Poor8)]
CBN[,weighted.mean(Poor8,Weight),by=cluster][order(cluster)]
CBNPoor7[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor8<-CBN[Poor8==1]
#CalculatePer_calories in clusters
CBNPoor8[,Daily_Ghand_cluster:=weighted.mean(Ghand_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor8[,Daily_Hoboobat_cluster:=weighted.mean(Hoboobat_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor8[,Daily_Nan_cluster:=weighted.mean(Nan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor8[,Daily_Berenj_cluster:=weighted.mean(Berenj_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor8[,Daily_Roghan_cluster:=weighted.mean(Roghan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor8[,Daily_Goosht_cluster:=weighted.mean(Goosht_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor8[,Daily_Morgh_cluster:=weighted.mean(Morgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor8[,Daily_Mahi_cluster:=weighted.mean(Mahi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor8[,Daily_Shir_cluster:=weighted.mean(Shir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor8[,Daily_Mast_cluster:=weighted.mean(Mast_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor8[,Daily_Panir_cluster:=weighted.mean(Panir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor8[,Daily_Tokhmemorgh_cluster:=weighted.mean(Tokhmemorgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor8[,Daily_Mive_cluster:=weighted.mean(Mive_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor8[,Daily_Sabzi_cluster:=weighted.mean(Sabzi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor8[,Daily_Makarooni_cluster:=weighted.mean(Makarooni_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor8[,Daily_Sibzamini_cluster:=weighted.mean(Sibzamini_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor8[, Daily_Calories_cluster2 := Reduce(`+`, .SD), .SDcols=c(191:206)][]
#utils::View(CBNPoor8)
#Calculate Per_calories in clusters(=2100)
CBNPoor8[,Daily2_Ghand:=(Daily_Ghand_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Ghand")) CBNPoor8[is.na(get(col)), (col) := 0]
CBNPoor8[,Daily2_Hoboobat:=(Daily_Hoboobat_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Hoboobat")) CBNPoor8[is.na(get(col)), (col) := 0]
CBNPoor8[,Daily2_Nan:=(Daily_Nan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Nan")) CBNPoor8[is.na(get(col)), (col) := 0]
CBNPoor8[,Daily2_Berenj:=(Daily_Berenj_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Berenj")) CBNPoor8[is.na(get(col)), (col) := 0]
CBNPoor8[,Daily2_Roghan:=(Daily_Roghan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Roghan")) CBNPoor8[is.na(get(col)), (col) := 0]
CBNPoor8[,Daily2_Goosht:=(Daily_Goosht_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Goosht")) CBNPoor8[is.na(get(col)), (col) := 0]
CBNPoor8[,Daily2_Morgh:=(Daily_Morgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Morgh")) CBNPoor8[is.na(get(col)), (col) := 0]
CBNPoor8[,Daily2_Mahi:=(Daily_Mahi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mahi")) CBNPoor8[is.na(get(col)), (col) := 0]
CBNPoor8[,Daily2_Shir:=(Daily_Shir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Shir")) CBNPoor8[is.na(get(col)), (col) := 0]
CBNPoor8[,Daily2_Mast:=(Daily_Mast_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mast")) CBNPoor8[is.na(get(col)), (col) := 0]
CBNPoor8[,Daily2_Panir:=(Daily_Panir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Panir")) CBNPoor8[is.na(get(col)), (col) := 0]
CBNPoor8[,Daily2_Tokhmemorgh:=(Daily_Tokhmemorgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Tokhmemorgh")) CBNPoor8[is.na(get(col)), (col) := 0]
CBNPoor8[,Daily2_Mive:=(Daily_Mive_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mive")) CBNPoor8[is.na(get(col)), (col) := 0]
CBNPoor8[,Daily2_Sabzi:=(Daily_Sabzi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sabzi")) CBNPoor8[is.na(get(col)), (col) := 0]
CBNPoor8[,Daily2_Makarooni:=(Daily_Makarooni_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Makarooni")) CBNPoor8[is.na(get(col)), (col) := 0]
CBNPoor8[,Daily2_Sibzamini:=(Daily_Sibzamini_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sibzamini")) CBNPoor8[is.na(get(col)), (col) := 0]
CBNPoor8[, Daily_Calories3 := Reduce(`+`, .SD), .SDcols=c(208:223)][]
CBNPoor8[,FoodExpenditure_Per_cluster:=weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor8[,weighted.mean(FoodExpenditure_Per_cluster,Weight,na.rm = TRUE),by=cluster]
CBNPoor8[,Calory_Price:=(FoodExpenditure_Per_cluster/(Daily_Calories_cluster2))]
CBNPoor8[,weighted.mean(Calory_Price,Weight,na.rm = TRUE),by=cluster]
#Calculate per_Calory from resturants
CBNPoor8[,Per_Calory_Resturant:=(0.7*Resturant_Exp/EqSizeCalory)/Calory_Price]
for (col in c("Per_Calory_Resturant")) CBNPoor8[is.na(get(col)), (col) := 0]
CBNPoor8<-CBNPoor8[Per_Daily_Exp_Calories!=0]
CBNPoor8[,Per_Daily_Calories:=Per_Daily_Exp_Calories+Per_Calory_Resturant]
CBNPoor8[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor8[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor8[,weighted.mean(Per_Calory_Resturant,Weight,na.rm = TRUE),by=cluster]
#sum of total food expenditures
CBNPoor8[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
CBNPoor8[,weighted.mean(FoodExpenditure_Per_total,Weight,na.rm = TRUE),by=cluster]
#Food expenditures (equal 2100 CCAL)
CBNPoor8[,Bundle_Value:=FoodExpenditure_Per_total*2100/Per_Daily_Calories]
CBNPoor8[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=cluster]
CBNPoor8[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=ProvinceCode]
#Calculations
CBNPoor8[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor8[,weighted.mean(FoodExpenditure_Per_day,Weight,na.rm = TRUE),by=cluster]
CBNPoor8[,weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor8[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
#Food Poverty Line for each cluster
#cluster 1
CBNPoorCluster<-CBNPoor8[cluster==1]
Food_Povertyline1_8<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 2
CBNPoorCluster<-CBNPoor8[cluster==2]
Food_Povertyline2_8<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 3
CBNPoorCluster<-CBNPoor8[cluster==3]
Food_Povertyline3_8<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 4
CBNPoorCluster<-CBNPoor8[cluster==4]
Food_Povertyline4_8<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#ee<-CBNPoor[,.(Total_Exp_Month_Real,Total_Exp_Month,Total_Food_Month_Per2,Total_Exp_Month_Per_nondurable,Total_Exp_Month_nondurable_Real_Per,FoodExpenditure_Per,cluster)]
#mean(ee[,Total_Food_Month_Per2==Total_Exp_Month_nondurable_Real])
#mean(ee[,Total_Food_Month_Per2<3500000])
#ee<- ee[order(Total_Food_Month_Per2)]
#utils::View(CBN)
#for (col in c("Total_Food_Month_Per2")) CBN[is.na(get(col)), (col) := Total_Exp_Month_Per_nondurable]
c<-CBN[,.(FoodExpenditure_Per,FoodExpenditure_Per_total,Total_Exp_Month_Per_nondurable,Total_Exp_Month,Total_Food_Month_Per2,Poor,Decile,Weight,cluster)]
#########Iteration 9###############
#Sort Expenditure data
CBN<- CBN[order(Total_Food_Month_Per2)]
c<- c[order(Total_Food_Month_Per2)]
#Indicate new poors
CBN[,Poor9:=ifelse(FoodExpenditure_Per_total < Food_Povertyline1_8 & cluster==1,1,0)]
c[,Poor9:=ifelse(FoodExpenditure_Per_total < Food_Povertyline1_8 & cluster==1 ,1,0)]
CBN[,Poor9:=ifelse(FoodExpenditure_Per_total < Food_Povertyline2_8 & cluster==2,1,Poor9)]
c[,Poor9:=ifelse(FoodExpenditure_Per_total < Food_Povertyline2_8 & cluster==2 ,1,Poor9)]
CBN[,Poor9:=ifelse(FoodExpenditure_Per_total < Food_Povertyline3_8 & cluster==3,1,Poor9)]
c[,Poor9:=ifelse(FoodExpenditure_Per_total < Food_Povertyline3_8 & cluster==3 ,1,Poor9)]
CBN[,Poor9:=ifelse(FoodExpenditure_Per_total < Food_Povertyline4_8 & cluster==4,1,Poor9)]
c[,Poor9:=ifelse(FoodExpenditure_Per_total < Food_Povertyline4_8 & cluster==4 ,1,Poor9)]
CBN[,weighted.mean(Poor9,Weight),by=cluster][order(cluster)]
CBNPoor8[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor9<-CBN[Poor9==1]
#CalculatePer_calories in clusters
CBNPoor9[,Daily_Ghand_cluster:=weighted.mean(Ghand_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor9[,Daily_Hoboobat_cluster:=weighted.mean(Hoboobat_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor9[,Daily_Nan_cluster:=weighted.mean(Nan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor9[,Daily_Berenj_cluster:=weighted.mean(Berenj_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor9[,Daily_Roghan_cluster:=weighted.mean(Roghan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor9[,Daily_Goosht_cluster:=weighted.mean(Goosht_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor9[,Daily_Morgh_cluster:=weighted.mean(Morgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor9[,Daily_Mahi_cluster:=weighted.mean(Mahi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor9[,Daily_Shir_cluster:=weighted.mean(Shir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor9[,Daily_Mast_cluster:=weighted.mean(Mast_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor9[,Daily_Panir_cluster:=weighted.mean(Panir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor9[,Daily_Tokhmemorgh_cluster:=weighted.mean(Tokhmemorgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor9[,Daily_Mive_cluster:=weighted.mean(Mive_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor9[,Daily_Sabzi_cluster:=weighted.mean(Sabzi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor9[,Daily_Makarooni_cluster:=weighted.mean(Makarooni_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor9[,Daily_Sibzamini_cluster:=weighted.mean(Sibzamini_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor9[, Daily_Calories_cluster2 := Reduce(`+`, .SD), .SDcols=c(191:206)][]
#utils::View(CBNPoor9)
#Calculate Per_calories in clusters(=2100)
CBNPoor9[,Daily2_Ghand:=(Daily_Ghand_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Ghand")) CBNPoor9[is.na(get(col)), (col) := 0]
CBNPoor9[,Daily2_Hoboobat:=(Daily_Hoboobat_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Hoboobat")) CBNPoor9[is.na(get(col)), (col) := 0]
CBNPoor9[,Daily2_Nan:=(Daily_Nan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Nan")) CBNPoor9[is.na(get(col)), (col) := 0]
CBNPoor9[,Daily2_Berenj:=(Daily_Berenj_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Berenj")) CBNPoor9[is.na(get(col)), (col) := 0]
CBNPoor9[,Daily2_Roghan:=(Daily_Roghan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Roghan")) CBNPoor9[is.na(get(col)), (col) := 0]
CBNPoor9[,Daily2_Goosht:=(Daily_Goosht_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Goosht")) CBNPoor9[is.na(get(col)), (col) := 0]
CBNPoor9[,Daily2_Morgh:=(Daily_Morgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Morgh")) CBNPoor9[is.na(get(col)), (col) := 0]
CBNPoor9[,Daily2_Mahi:=(Daily_Mahi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mahi")) CBNPoor9[is.na(get(col)), (col) := 0]
CBNPoor9[,Daily2_Shir:=(Daily_Shir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Shir")) CBNPoor9[is.na(get(col)), (col) := 0]
CBNPoor9[,Daily2_Mast:=(Daily_Mast_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mast")) CBNPoor9[is.na(get(col)), (col) := 0]
CBNPoor9[,Daily2_Panir:=(Daily_Panir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Panir")) CBNPoor9[is.na(get(col)), (col) := 0]
CBNPoor9[,Daily2_Tokhmemorgh:=(Daily_Tokhmemorgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Tokhmemorgh")) CBNPoor9[is.na(get(col)), (col) := 0]
CBNPoor9[,Daily2_Mive:=(Daily_Mive_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mive")) CBNPoor9[is.na(get(col)), (col) := 0]
CBNPoor9[,Daily2_Sabzi:=(Daily_Sabzi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sabzi")) CBNPoor9[is.na(get(col)), (col) := 0]
CBNPoor9[,Daily2_Makarooni:=(Daily_Makarooni_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Makarooni")) CBNPoor9[is.na(get(col)), (col) := 0]
CBNPoor9[,Daily2_Sibzamini:=(Daily_Sibzamini_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sibzamini")) CBNPoor9[is.na(get(col)), (col) := 0]
CBNPoor9[, Daily_Calories3 := Reduce(`+`, .SD), .SDcols=c(208:223)][]
CBNPoor9[,FoodExpenditure_Per_cluster:=weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor9[,weighted.mean(FoodExpenditure_Per_cluster,Weight,na.rm = TRUE),by=cluster]
CBNPoor9[,Calory_Price:=(FoodExpenditure_Per_cluster/(Daily_Calories_cluster2))]
CBNPoor9[,weighted.mean(Calory_Price,Weight,na.rm = TRUE),by=cluster]
#Calculate per_Calory from resturants
CBNPoor9[,Per_Calory_Resturant:=(0.7*Resturant_Exp/EqSizeCalory)/Calory_Price]
for (col in c("Per_Calory_Resturant")) CBNPoor9[is.na(get(col)), (col) := 0]
CBNPoor9<-CBNPoor9[Per_Daily_Exp_Calories!=0]
CBNPoor9[,Per_Daily_Calories:=Per_Daily_Exp_Calories+Per_Calory_Resturant]
CBNPoor9[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor9[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor9[,weighted.mean(Per_Calory_Resturant,Weight,na.rm = TRUE),by=cluster]
#sum of total food expenditures
CBNPoor9[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
CBNPoor9[,weighted.mean(FoodExpenditure_Per_total,Weight,na.rm = TRUE),by=cluster]
#Food expenditures (equal 2100 CCAL)
CBNPoor9[,Bundle_Value:=FoodExpenditure_Per_total*2100/Per_Daily_Calories]
CBNPoor9[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=cluster]
CBNPoor9[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=ProvinceCode]
#Calculations
CBNPoor9[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor9[,weighted.mean(FoodExpenditure_Per_day,Weight,na.rm = TRUE),by=cluster]
CBNPoor9[,weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor9[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
#Food Poverty Line for each cluster
#cluster 1
CBNPoorCluster<-CBNPoor9[cluster==1]
Food_Povertyline1_9<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 2
CBNPoorCluster<-CBNPoor9[cluster==2]
Food_Povertyline2_9<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 3
CBNPoorCluster<-CBNPoor9[cluster==3]
Food_Povertyline3_9<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 4
CBNPoorCluster<-CBNPoor9[cluster==4]
Food_Povertyline4_9<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#ee<-CBNPoor[,.(Total_Exp_Month_Real,Total_Exp_Month,Total_Food_Month_Per2,Total_Exp_Month_Per_nondurable,Total_Exp_Month_nondurable_Real_Per,FoodExpenditure_Per,cluster)]
#mean(ee[,Total_Food_Month_Per2==Total_Exp_Month_nondurable_Real])
#mean(ee[,Total_Food_Month_Per2<3500000])
#ee<- ee[order(Total_Food_Month_Per2)]
#utils::View(CBN)
#for (col in c("Total_Food_Month_Per2")) CBN[is.na(get(col)), (col) := Total_Exp_Month_Per_nondurable]
c<-CBN[,.(FoodExpenditure_Per,FoodExpenditure_Per_total,Total_Exp_Month_Per_nondurable,Total_Exp_Month,Total_Food_Month_Per2,Poor,Decile,Weight,cluster)]
#########Iteration 10###############
#Sort Expenditure data
CBN<- CBN[order(Total_Food_Month_Per2)]
c<- c[order(Total_Food_Month_Per2)]
#Indicate new poors
CBN[,Poor10:=ifelse(FoodExpenditure_Per_total < Food_Povertyline1_9 & cluster==1,1,0)]
c[,Poor10:=ifelse(FoodExpenditure_Per_total < Food_Povertyline1_9 & cluster==1 ,1,0)]
CBN[,Poor10:=ifelse(FoodExpenditure_Per_total < Food_Povertyline2_9 & cluster==2,1,Poor10)]
c[,Poor10:=ifelse(FoodExpenditure_Per_total < Food_Povertyline2_9 & cluster==2 ,1,Poor10)]
CBN[,Poor10:=ifelse(FoodExpenditure_Per_total < Food_Povertyline3_9 & cluster==3,1,Poor10)]
c[,Poor10:=ifelse(FoodExpenditure_Per_total < Food_Povertyline3_9 & cluster==3 ,1,Poor10)]
CBN[,Poor10:=ifelse(FoodExpenditure_Per_total < Food_Povertyline4_9 & cluster==4,1,Poor10)]
c[,Poor10:=ifelse(FoodExpenditure_Per_total < Food_Povertyline4_9 & cluster==4 ,1,Poor10)]
CBN[,weighted.mean(Poor10,Weight),by=cluster][order(cluster)]
CBNPoor9[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor10<-CBN[Poor10==1]
#Engel
#CBNPoor9<-CBNPoor9[,ratio1:=FoodExpenditure/Total_Exp_Month]
#CBNPoor9[,weighted.mean(ratio1,Weight),by=cluster]
#summary(CBNPoor9$ratio1)
CBN<-CBN[,ratio1:=FoodExpenditure/Total_Exp_Month]
CBN[,weighted.mean(ratio1,Weight),by=ProvinceCode][order(ProvinceCode)]
CBN<-CBN[,ratio2:=ServiceExp/Total_Exp_Month]
CBN[,weighted.mean(ratio2,Weight),by=ProvinceCode][order(ProvinceCode)]
#Engel-home ratio calculations
CBN[cluster==1 & FoodExpenditure_Per_total<1.1*Food_Povertyline1_9 & FoodExpenditure_Per_total>0.90*Food_Povertyline1_9,sum(HIndivNo),by=ProvinceCode][order(ProvinceCode)]
CBN[cluster==2 & FoodExpenditure_Per_total<1.1*Food_Povertyline2_9 & FoodExpenditure_Per_total>0.90*Food_Povertyline1_9,sum(HIndivNo),by=ProvinceCode][order(ProvinceCode)]
CBN[cluster==3 & FoodExpenditure_Per_total<1.1*Food_Povertyline3_9 & FoodExpenditure_Per_total>0.90*Food_Povertyline1_9,sum(HIndivNo),by=ProvinceCode][order(ProvinceCode)]
CBN[cluster==4 & FoodExpenditure_Per_total<1.1*Food_Povertyline4_9 & FoodExpenditure_Per_total>0.90*Food_Povertyline1_9,sum(HIndivNo),by=ProvinceCode][order(ProvinceCode)]
CBN[cluster==1 & FoodExpenditure_Per_total<1.1*Food_Povertyline1_9 & FoodExpenditure_Per_total>0.90*Food_Povertyline1_9,weighted.mean(ratio1,Weight),by=ProvinceCode][order(ProvinceCode)]
CBN[cluster==2 & FoodExpenditure_Per_total<1.1*Food_Povertyline2_9 & FoodExpenditure_Per_total>0.90*Food_Povertyline1_9,weighted.mean(ratio1,Weight),by=ProvinceCode][order(ProvinceCode)]
CBN[cluster==3 & FoodExpenditure_Per_total<1.1*Food_Povertyline3_9 & FoodExpenditure_Per_total>0.90*Food_Povertyline1_9,weighted.mean(ratio1,Weight),by=ProvinceCode][order(ProvinceCode)]
CBN[cluster==4 & FoodExpenditure_Per_total<1.1*Food_Povertyline4_9 & FoodExpenditure_Per_total>0.90*Food_Povertyline1_9,weighted.mean(ratio1,Weight),by=ProvinceCode][order(ProvinceCode)]
CBN[cluster==1 & FoodExpenditure_Per_total<1.1*Food_Povertyline1_9 & FoodExpenditure_Per_total>0.90*Food_Povertyline1_9,weighted.mean(ratio2,Weight),by=ProvinceCode][order(ProvinceCode)]
CBN[cluster==2 & FoodExpenditure_Per_total<1.1*Food_Povertyline2_9 & FoodExpenditure_Per_total>0.90*Food_Povertyline1_9,weighted.mean(ratio2,Weight),by=ProvinceCode][order(ProvinceCode)]
CBN[cluster==3 & FoodExpenditure_Per_total<1.1*Food_Povertyline3_9 & FoodExpenditure_Per_total>0.90*Food_Povertyline1_9,weighted.mean(ratio2,Weight),by=ProvinceCode][order(ProvinceCode)]
CBN[cluster==4 & FoodExpenditure_Per_total<1.1*Food_Povertyline4_9 & FoodExpenditure_Per_total>0.90*Food_Povertyline1_9,weighted.mean(ratio2,Weight),by=ProvinceCode][order(ProvinceCode)]
# Poverty Line for each cluster
#cluster 1
CBNPoorCluster<-CBN[cluster==1 & FoodExpenditure_Per_total<1.1*Food_Povertyline1_9 & FoodExpenditure_Per_total>0.90*Food_Povertyline1_9]
UrbanEngel1<-CBNPoorCluster[,.(HHID,Region,ProvinceCode,cluster,ratio1,FoodExpenditure_Per_total)]
CBNPoorCluster<-CBN[cluster==1 & FoodExpenditure_Per_total<0.9*Food_Povertyline1_9]
UrbanunderEngel1<-CBNPoorCluster[,.(HHID,Region,ProvinceCode,cluster,ratio1,FoodExpenditure_Per_total)]
CBNPoorCluster<-CBN[cluster==1 & FoodExpenditure_Per_total>1.1*Food_Povertyline1_9]
UrbanaboveEngel1<-CBNPoorCluster[,.(HHID,Region,ProvinceCode,cluster,ratio1,FoodExpenditure_Per_total)]
Engel1<-weighted.mean(CBNPoorCluster$ratio1,CBNPoorCluster$Weight)
Engel_Reverse1<-1/Engel1
Povertyline1_9<-Engel_Reverse1*Food_Povertyline1_9
#cluster 2
CBNPoorCluster<-CBN[cluster==2 & FoodExpenditure_Per_total<1.1*Food_Povertyline2_9 & FoodExpenditure_Per_total>0.90*Food_Povertyline2_9]
UrbanEngel2<-CBNPoorCluster[,.(HHID,Region,ProvinceCode,cluster,ratio1,FoodExpenditure_Per_total)]
CBNPoorCluster<-CBN[cluster==2 & FoodExpenditure_Per_total<0.9*Food_Povertyline2_9]
UrbanunderEngel2<-CBNPoorCluster[,.(HHID,Region,ProvinceCode,cluster,ratio1,FoodExpenditure_Per_total)]
CBNPoorCluster<-CBN[cluster==2 & FoodExpenditure_Per_total>1.1*Food_Povertyline2_9]
UrbanaboveEngel2<-CBNPoorCluster[,.(HHID,Region,ProvinceCode,cluster,ratio1,FoodExpenditure_Per_total)]
Engel2<-weighted.mean(CBNPoorCluster$ratio1,CBNPoorCluster$Weight)
Engel_Reverse2<-1/Engel2
Povertyline2_9<-Engel_Reverse2*Food_Povertyline2_9
#cluster 3
CBNPoorCluster<-CBN[cluster==3 & FoodExpenditure_Per_total<1.1*Food_Povertyline3_9 & FoodExpenditure_Per_total>0.90*Food_Povertyline3_9]
UrbanEngel3<-CBNPoorCluster[,.(HHID,Region,ProvinceCode,cluster,ratio1,FoodExpenditure_Per_total)]
CBNPoorCluster<-CBN[cluster==3 & FoodExpenditure_Per_total<0.9*Food_Povertyline3_9]
UrbanunderEngel3<-CBNPoorCluster[,.(HHID,Region,ProvinceCode,cluster,ratio1,FoodExpenditure_Per_total)]
CBNPoorCluster<-CBN[cluster==3 & FoodExpenditure_Per_total>1.1*Food_Povertyline3_9]
UrbanaboveEngel3<-CBNPoorCluster[,.(HHID,Region,ProvinceCode,cluster,ratio1,FoodExpenditure_Per_total)]
Engel3<-weighted.mean(CBNPoorCluster$ratio1,CBNPoorCluster$Weight)
Engel_Reverse3<-1/Engel3
Povertyline3_9<-Engel_Reverse3*Food_Povertyline3_9
#cluster 4
CBNPoorCluster<-CBN[cluster==4 & FoodExpenditure_Per_total<1.1*Food_Povertyline4_9 & FoodExpenditure_Per_total>0.90*Food_Povertyline4_9]
UrbanEngel4<-CBNPoorCluster[,.(HHID,Region,ProvinceCode,cluster,ratio1,FoodExpenditure_Per_total)]
CBNPoorCluster<-CBN[cluster==4 & FoodExpenditure_Per_total<0.9*Food_Povertyline4_9]
UrbanunderEngel4<-CBNPoorCluster[,.(HHID,Region,ProvinceCode,cluster,ratio1,FoodExpenditure_Per_total)]
CBNPoorCluster<-CBN[cluster==4 & FoodExpenditure_Per_total>1.1*Food_Povertyline4_9]
UrbanaboveEngel4<-CBNPoorCluster[,.(HHID,Region,ProvinceCode,cluster,ratio1,FoodExpenditure_Per_total)]
Engel4<-weighted.mean(CBNPoorCluster$ratio1,CBNPoorCluster$Weight)
Engel_Reverse4<-1/Engel4
Povertyline4_9<-Engel_Reverse4*Food_Povertyline4_9
UrbanEngel<-rbind(UrbanEngel1,UrbanEngel2,UrbanEngel3,UrbanEngel4)
save(UrbanEngel, file=paste0(Settings$HEISProcessedPath,"Y",year,"UrbanEngel.rda"))
UrbanunderEngel<-rbind(UrbanunderEngel1,UrbanunderEngel2,UrbanunderEngel3,UrbanunderEngel4)
save(UrbanunderEngel, file=paste0(Settings$HEISProcessedPath,"Y",year,"UrbanunderEngel.rda"))
UrbanaboveEngel<-rbind(UrbanaboveEngel1,UrbanaboveEngel2,UrbanaboveEngel3,UrbanaboveEngel4)
save(UrbanaboveEngel, file=paste0(Settings$HEISProcessedPath,"Y",year,"UrbanaboveEngel.rda"))
Povertyline1_9<-4107570
Povertyline2_9<-3642510
Povertyline3_9<-5055730
Povertyline4_9<-7689100
#Indicate final poors
CBN<-CBN[,Total_Exp_Month_Per2:=Total_Exp_Month_Per_nondurable*RealPriceIndex]
CBN[,Poor11:=ifelse(Total_Exp_Month_Per_nondurable < Povertyline1_9 & cluster==1,1,0)]
CBN[,Poor11:=ifelse(Total_Exp_Month_Per_nondurable < Povertyline2_9 & cluster==2,1,Poor11)]
CBN[,Poor11:=ifelse(Total_Exp_Month_Per_nondurable < Povertyline3_9 & cluster==3,1,Poor11)]
CBN[,Poor11:=ifelse(Total_Exp_Month_Per_nondurable < Povertyline4_9 & cluster==4,1,Poor11)]
CBN[,weighted.mean(Poor11,Weight),by=cluster][order(cluster)]
CBNPoor9[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor11<-CBN[Poor11==1]
CBN[,sum(Size*Weight),by=cluster][order(cluster)]
CBN[,sum(Size*Weight),by=.(cluster,Decile)][order(cluster,Decile)]
CBNPoor[,sum(Size*Weight),by=cluster][order(cluster)]
CBNPoor11[,sum(Size*Weight),by=cluster][order(cluster)]
CBNPoor9[,sum(Size*Weight),by=cluster][order(cluster)]
CBNPoor9[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=cluster][order(cluster)]
CBNPoor9[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster][order(cluster)]
CBN[,weighted.mean(Poor11,Weight)]
CBN[,weighted.mean(Poor11,Weight),by=cluster][order(cluster)]
CBN[,weighted.mean(Poor11,Weight),by=ProvinceCode][order(ProvinceCode)]
CBNPoor11[,sum(Size*Weight),by=ProvinceCode][order(ProvinceCode)]
CBNPoor11[,sum(Weight),by=ProvinceCode][order(ProvinceCode)]
CBN[,sum(Size*Weight),by=ProvinceCode][order(ProvinceCode)]
CBN[,sum(Size*Weight)]
CBNPoor11[,weighted.mean(FoodExpenditure_Per_total,Weight,na.rm = TRUE),by=cluster][order(cluster)]
CBNPoor11[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster][order(cluster)]
##############################
###Real Prices for report###
##############################
#sum of total food expenditures
CBN[,FoodExpenditure_Per_total:=FoodExpenditure_Per]
#Food expenditures (equal 2100 CCAL)
CBN<-CBN[Per_Daily_Exp_Calories>0]
CBN[,Bundle_Value:=FoodExpenditure_Per_total*2100/Per_Daily_Exp_Calories]
CBN[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=cluster]
T_Bundle_Value<-subset(CBN, ProvinceCode==2301, select=c(Bundle_Value,Home_Per_Metr,Weight))
Tehran_Bundle_Value1<-weighted.mean(T_Bundle_Value$Bundle_Value,T_Bundle_Value$Weight,na.rm = TRUE)
Tehran_Bundle_Value2<-weighted.mean(T_Bundle_Value$Home_Per_Metr,T_Bundle_Value$Weight,na.rm = TRUE)
CBN[,RealPriceIndex1:=weighted.mean(Bundle_Value,Weight,na.rm = TRUE)/Tehran_Bundle_Value1,by=ProvinceCode]
CBN[,RealPriceIndex2:=weighted.mean(Home_Per_Metr,Weight,na.rm = TRUE)/Tehran_Bundle_Value2,by=ProvinceCode]
CBN[,weighted.mean(RealPriceIndex1,Weight),by=ProvinceCode]
CBN[,weighted.mean(RealPriceIndex2,Weight),by=ProvinceCode]
Indexes2_1<-CBN[,.(RealPriceIndex1,RealPriceIndex2,ProvinceCode,Weight)]
Indexes2_1<-Indexes2_1[,RealPriceIndex:=(RealPriceIndex1+RealPriceIndex2)/2]
Indexes_total<-Indexes2_1[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(ProvinceCode)]
Indexes3_1<-Indexes_total[,.(ProvinceCode,RealPriceIndex1,RealPriceIndex2,RealPriceIndex)]
Indexes<-Indexes[,.(ProvinceCode,RealPriceIndex)]
CBN_Poor<-CBN[Poor11==1]
T_Bundle_Value<-subset(CBN_Poor, ProvinceCode==2301, select=c(Bundle_Value,Home_Per_Metr,Weight))
Tehran_Bundle_Value1<-weighted.mean(T_Bundle_Value$Bundle_Value,T_Bundle_Value$Weight,na.rm = TRUE)
Tehran_Bundle_Value2<-weighted.mean(T_Bundle_Value$Home_Per_Metr,T_Bundle_Value$Weight,na.rm = TRUE)
CBN_Poor[,RealPriceIndex1:=weighted.mean(Bundle_Value,Weight,na.rm = TRUE)/Tehran_Bundle_Value1,by=ProvinceCode]
CBN_Poor[,RealPriceIndex2:=weighted.mean(Home_Per_Metr,Weight,na.rm = TRUE)/Tehran_Bundle_Value2,by=ProvinceCode]
CBN_Poor[,weighted.mean(RealPriceIndex1,Weight),by=ProvinceCode]
CBN_Poor[,weighted.mean(RealPriceIndex2,Weight),by=ProvinceCode]
Indexes2_2<-CBN_Poor[,.(RealPriceIndex1,RealPriceIndex2,ProvinceCode,Weight,Poor11)]
Indexes2_2<-Indexes2_2[,RealPriceIndex:=(RealPriceIndex1+RealPriceIndex2)/2]
Indexes_finalpoor<-Indexes2_2[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(ProvinceCode)]
Indexes3_2<-Indexes_finalpoor[,.(ProvinceCode,RealPriceIndex1,RealPriceIndex2,RealPriceIndex)]
Indexes<-Indexes[,.(ProvinceCode,RealPriceIndex)]
###Save Tables
CBN_Urban<-CBN
save(CBN_Urban, file = paste0(Settings$HEISProcessedPath,"CBN_Urban","95.rda"))
CBNPoor_Urban<-CBNPoor11
save(CBNPoor_Urban, file = paste0(Settings$HEISProcessedPath,"CBNPoor_Urban","95.rda"))
#utils::View(CBN)
for (col in c("GhandPrice")) CBNPoor11[is.na(get(col)), (col) := 0]
for (col in c("HoboobatPrice")) CBNPoor11[is.na(get(col)), (col) := 0]
for (col in c("RoghanPrice")) CBNPoor11[is.na(get(col)), (col) := 0]
for (col in c("BerenjPrice")) CBNPoor11[is.na(get(col)), (col) := 0]
for (col in c("NanPrice")) CBNPoor11[is.na(get(col)), (col) := 0]
for (col in c("GooshtPrice")) CBNPoor11[is.na(get(col)), (col) := 0]
for (col in c("MorghPrice")) CBNPoor11[is.na(get(col)), (col) := 0]
for (col in c("MahiPrice")) CBNPoor11[is.na(get(col)), (col) := 0]
for (col in c("ShirPrice")) CBNPoor11[is.na(get(col)), (col) := 0]
for (col in c("MastPrice")) CBNPoor11[is.na(get(col)), (col) := 0]
for (col in c("PanirPrice")) CBNPoor11[is.na(get(col)), (col) := 0]
for (col in c("TokhmemorghPrice")) CBNPoor11[is.na(get(col)), (col) := 0]
for (col in c("MivePrice")) CBNPoor11[is.na(get(col)), (col) := 0]
for (col in c("SabziPrice")) CBNPoor11[is.na(get(col)), (col) := 0]
for (col in c("MakarooniPrice")) CBNPoor11[is.na(get(col)), (col) := 0]
for (col in c("SibzaminiPrice")) CBNPoor11[is.na(get(col)), (col) := 0]
CBNPoor11<-CBNPoor11[,x:=GhandPrice*Ghandgram+HoboobatPrice*Hoboobatgram+RoghanPrice*Roghangram+BerenjPrice*Berenjgram+NanPrice*Nangram+GooshtPrice*Gooshtgram+MorghPrice*Morghgram+MahiPrice*Mahigram+ShirPrice*Shirgram+MastPrice*Mastgram+PanirPrice*Panirgram+TokhmemorghPrice*Tokhmemorghgram+MivePrice*Mivegram+SabziPrice*Sabzigram+MakarooniPrice*Makaroonigram+SibzaminiPrice*Sibzaminigram]
CBNPoor11[,weighted.mean((GhandPrice*Ghandgram)/x,Weight),cluster==4]
CBNPoor11[,weighted.mean((HoboobatPrice*Hoboobatgram)/x,Weight),cluster==4]
CBNPoor11[,weighted.mean((RoghanPrice*Roghangram)/x,Weight),cluster==4]
CBNPoor11[,weighted.mean((BerenjPrice*Berenjgram)/x,Weight),cluster==4]
CBNPoor11[,weighted.mean((NanPrice*Nangram)/x,Weight),cluster==4]
CBNPoor11[,weighted.mean((GooshtPrice*Gooshtgram)/x,Weight),cluster==4]
CBNPoor11[,weighted.mean((MorghPrice*Morghgram)/x,Weight),cluster==4]
CBNPoor11[,weighted.mean((MahiPrice*Mahigram)/x,Weight),cluster==4]
CBNPoor11[,weighted.mean((ShirPrice*Shirgram)/x,Weight),cluster==4]
CBNPoor11[,weighted.mean((MastPrice*Mastgram)/x,Weight),cluster==4]
CBNPoor11[,weighted.mean((PanirPrice*Panirgram)/x,Weight),cluster==4]
CBNPoor11[,weighted.mean((TokhmemorghPrice*Tokhmemorghgram)/x,Weight),cluster==4]
CBNPoor11[,weighted.mean((MivePrice*Mivegram)/x,Weight),cluster==4]
CBNPoor11[,weighted.mean((SabziPrice*Sabzigram)/x,Weight),cluster==4]
CBNPoor11[,weighted.mean((MakarooniPrice*Makaroonigram)/x,Weight),cluster==4]
CBNPoor11[,weighted.mean((SibzaminiPrice*Sibzaminigram)/x,Weight),cluster==4]
for (col in c("GhandPrice")) CBN[is.na(get(col)), (col) := 0]
for (col in c("HoboobatPrice")) CBN[is.na(get(col)), (col) := 0]
for (col in c("RoghanPrice")) CBN[is.na(get(col)), (col) := 0]
for (col in c("BerenjPrice")) CBN[is.na(get(col)), (col) := 0]
for (col in c("NanPrice")) CBN[is.na(get(col)), (col) := 0]
for (col in c("GooshtPrice")) CBN[is.na(get(col)), (col) := 0]
for (col in c("MorghPrice")) CBN[is.na(get(col)), (col) := 0]
for (col in c("MahiPrice")) CBN[is.na(get(col)), (col) := 0]
for (col in c("ShirPrice")) CBN[is.na(get(col)), (col) := 0]
for (col in c("MastPrice")) CBN[is.na(get(col)), (col) := 0]
for (col in c("PanirPrice")) CBN[is.na(get(col)), (col) := 0]
for (col in c("TokhmemorghPrice")) CBN[is.na(get(col)), (col) := 0]
for (col in c("MivePrice")) CBN[is.na(get(col)), (col) := 0]
for (col in c("SabziPrice")) CBN[is.na(get(col)), (col) := 0]
for (col in c("MakarooniPrice")) CBN[is.na(get(col)), (col) := 0]
for (col in c("SibzaminiPrice")) CBN[is.na(get(col)), (col) := 0]
CBN<-CBN[,x:=GhandPrice*Ghandgram+HoboobatPrice*Hoboobatgram+RoghanPrice*Roghangram+BerenjPrice*Berenjgram+NanPrice*Nangram+GooshtPrice*Gooshtgram+MorghPrice*Morghgram+MahiPrice*Mahigram+ShirPrice*Shirgram+MastPrice*Mastgram+PanirPrice*Panirgram+TokhmemorghPrice*Tokhmemorghgram+MivePrice*Mivegram+SabziPrice*Sabzigram+MakarooniPrice*Makaroonigram+SibzaminiPrice*Sibzaminigram]
CBN[,weighted.mean((GhandPrice*Ghandgram)/x,Weight)*103.5]
CBN[,weighted.mean((HoboobatPrice*Hoboobatgram)/x,Weight)*117]
CBN[,weighted.mean((RoghanPrice*Roghangram)/x,Weight)*111.5]
CBN[,weighted.mean((BerenjPrice*Berenjgram)/x,Weight)*114.3]
CBN[,weighted.mean((NanPrice*Nangram)/x,Weight)*110.8]
CBN[,weighted.mean((GooshtPrice*Gooshtgram)/x,Weight)*123.5]
CBN[,weighted.mean((MorghPrice*Morghgram)/x,Weight)*114.1]
CBN[,weighted.mean((MahiPrice*Mahigram)/x,Weight)*107.6]
CBN[,weighted.mean((ShirPrice*Shirgram)/x,Weight)*103.4]
CBN[,weighted.mean((MastPrice*Mastgram)/x,Weight)*106.1]
CBN[,weighted.mean((PanirPrice*Panirgram)/x,Weight)*106.8]
CBN[,weighted.mean((TokhmemorghPrice*Tokhmemorghgram)/x,Weight)*125.9]
CBN[,weighted.mean((MivePrice*Mivegram)/x,Weight)*97.1]
CBN[,weighted.mean((SabziPrice*Sabzigram)/x,Weight)*109.9]
CBN[,weighted.mean((MakarooniPrice*Makaroonigram)/x,Weight)*101.9]
CBN[,weighted.mean((SibzaminiPrice*Sibzaminigram)/x,Weight)*119.7]
CBNUrban<-CBN[,.(HHID,Percentile,Poor2,Weight,ProvinceCode)]
save(CBNUrban, file=paste0(Settings$HEISProcessedPath,"Y",year,"CBNUrban.rda"))
OldFoodUrban<-CBN[,.(HHID,Percentile,Poor9)]
save(OldFoodUrban, file=paste0(Settings$HEISProcessedPath,"Y",year,"OldFoodUrban.rda"))
OldFinalPoorUrban<-CBN[,.(HHID,Percentile,Poor11)]
save(OldFinalPoorUrban, file=paste0(Settings$HEISProcessedPath,"Y",year,"OldFinalPoorUrban.rda"))
endtime <- proc.time()
cat("\n\n============================\nIt took ")
cat(endtime-starttime)
|
/R/Archive/Other Codes/CBN Urban-Sort only first stage.R
|
no_license
|
IPRCIRI/IRHEIS
|
R
| false
| false
| 184,171
|
r
|
#CBN Method-Urban.R
#
# Copyright © 2018:Arin Shahbazian
# Licence: GPL-3
#
rm(list=ls())
starttime <- proc.time()
cat("\n\n================ Prepare Data =====================================\n")
library(yaml)
Settings <- yaml.load_file("Settings.yaml")
library(readxl)
library(stringr)
library(data.table)
library(sm)
library(ggplot2)
#for(year in (Settings$startyear:Settings$endyear)){
# cat(paste0("\n------------------------------\nYear:",year,"\n"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","HHBase.rda"))
HHBase[,IndivNo:=NULL]
HHBase[,Relationship:=NULL]
HHBase[,Sex:=NULL]
HHBase[,Age:=NULL]
HHBase[,Literate:=NULL]
HHBase[,Student:=NULL]
HHBase[,EduCode:=NULL]
HHBase[,EduYears:=NULL]
HHBase[,EduLevel:=NULL]
HHBase[,EduLevel0:=NULL]
HHBase[,ActivityState:=NULL]
HHBase[,MarritalState:=NULL]
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Ghand_Data.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Hoboobat_Data.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Roghan_Data.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Berenj_Data.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Nan_Data.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Goosht_Data.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Morgh_Data.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Mahi_Data.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Shir_Data.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Mast_Data.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Panir_Data.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Tokhmemorgh_Data.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Mive_Data.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Sabzi_Data.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Makarooni_Data.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Sibzamini_Data.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Weights.rda"))
Ghand_Data<-Ghand_Data[,.(HHID,Ghandgram,GhandPrice)]
Hoboobat_Data<-Hoboobat_Data[,.(HHID,Hoboobatgram,HoboobatPrice)]
Roghan_Data<-Roghan_Data[,.(HHID,Roghangram,RoghanPrice)]
Berenj_Data<-Berenj_Data[,.(HHID,Berenjgram,BerenjPrice)]
Nan_Data<-Nan_Data[,.(HHID,Nangram,NanPrice)]
Goosht_Data<-Goosht_Data[,.(HHID,Gooshtgram,GooshtPrice)]
Morgh_Data<-Morgh_Data[,.(HHID,Morghgram,MorghPrice)]
Mahi_Data<-Mahi_Data[,.(HHID,Mahigram,MahiPrice)]
Shir_Data<-Shir_Data[,.(HHID,Shirgram,ShirPrice)]
Mast_Data<-Mast_Data[,.(HHID,Mastgram,MastPrice)]
Panir_Data<-Panir_Data[,.(HHID,Panirgram,PanirPrice)]
Tokhmemorgh_Data<-Tokhmemorgh_Data[,.(HHID,Tokhmemorghgram,TokhmemorghPrice)]
Mive_Data<-Mive_Data[,.(HHID,Mivegram,MivePrice)]
Sabzi_Data<-Sabzi_Data[,.(HHID,Sabzigram,SabziPrice)]
Makarooni_Data<-Makarooni_Data[,.(HHID,Makaroonigram,MakarooniPrice)]
Sibzamini_Data<-Sibzamini_Data[,.(HHID,Sibzaminigram,SibzaminiPrice)]
Food<-merge(HHBase,Ghand_Data,by =c("HHID"),all.x=TRUE)
for (col in c("Ghandgram")) Food[is.na(get(col)), (col) := 0]
Food<-merge(Food,Hoboobat_Data,by =c("HHID"),all.x=TRUE)
for (col in c("Hoboobatgram")) Food[is.na(get(col)), (col) := 0]
Food<-merge(Food,Roghan_Data,by =c("HHID"),all.x=TRUE)
for (col in c("Roghangram")) Food[is.na(get(col)), (col) := 0]
Food<-merge(Food,Berenj_Data,by =c("HHID"),all.x=TRUE)
for (col in c("Berenjgram")) Food[is.na(get(col)), (col) := 0]
Food<-merge(Food,Nan_Data,by =c("HHID"),all.x=TRUE)
for (col in c("Nangram")) Food[is.na(get(col)), (col) := 0]
Food<-merge(Food,Goosht_Data,by =c("HHID"),all.x=TRUE)
for (col in c("Gooshtgram")) Food[is.na(get(col)), (col) := 0]
Food<-merge(Food,Morgh_Data,by =c("HHID"),all.x=TRUE)
for (col in c("Morghgram")) Food[is.na(get(col)), (col) := 0]
Food<-merge(Food,Mahi_Data,by =c("HHID"),all.x=TRUE)
for (col in c("Mahigram")) Food[is.na(get(col)), (col) := 0]
Food<-merge(Food,Shir_Data,by =c("HHID"),all.x=TRUE)
for (col in c("Shirgram")) Food[is.na(get(col)), (col) := 0]
Food<-merge(Food,Mast_Data,by =c("HHID"),all.x=TRUE)
for (col in c("Mastgram")) Food[is.na(get(col)), (col) := 0]
Food<-merge(Food,Panir_Data,by =c("HHID"),all.x=TRUE)
for (col in c("Panirgram")) Food[is.na(get(col)), (col) := 0]
Food<-merge(Food,Tokhmemorgh_Data,by =c("HHID"),all.x=TRUE)
for (col in c("Tokhmemorghgram")) Food[is.na(get(col)), (col) := 0]
Food<-merge(Food,Mive_Data,by =c("HHID"),all.x=TRUE)
for (col in c("Mivegram")) Food[is.na(get(col)), (col) := 0]
Food<-merge(Food,Sabzi_Data,by =c("HHID"),all.x=TRUE)
for (col in c("Sabzigram")) Food[is.na(get(col)), (col) := 0]
Food<-merge(Food,Makarooni_Data,by =c("HHID"),all.x=TRUE)
for (col in c("Makaroonigram")) Food[is.na(get(col)), (col) := 0]
Food<-merge(Food,Sibzamini_Data,by =c("HHID"),all.x=TRUE)
for (col in c("Sibzaminigram")) Food[is.na(get(col)), (col) := 0]
Food<-merge(Food,Weights,by =c("HHID"),all.x=TRUE)
#load Expenditure groups
load(file=paste0(Settings$HEISProcessedPath,"Y","95","HHBase.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","HHI.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Foods.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Cigars.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Cloths.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Amusements.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Communications.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Durables.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Education.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Energy.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Furnitures.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Hotels.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","House.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Medicals.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Behdashts.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Transportations.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Others.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Investments.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Resturants.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y","95","Weights.rda"))
#merge Expenditure groups
CBN<-merge(Food,HHI ,by =c("HHID"),all=TRUE)
CBN<-merge(CBN,FoodData,by =c("HHID"),all=TRUE)
for (col in c("FoodExpenditure")) CBN[is.na(get(col)), (col) := 0]
CBN<-merge(CBN,CigarData,by =c("HHID"),all=TRUE)
for (col in c("Cigar_Exp")) CBN[is.na(get(col)), (col) := 0]
CBN<-merge(CBN,ClothData,by =c("HHID"),all=TRUE)
for (col in c("Cloth_Exp")) CBN[is.na(get(col)), (col) := 0]
CBN<-merge(CBN,AmusementData,by =c("HHID"),all=TRUE)
for (col in c("Amusement_Exp")) CBN[is.na(get(col)), (col) := 0]
CBN<-merge(CBN,CommunicationData,by =c("HHID"),all=TRUE)
for (col in c("Communication_Exp")) CBN[is.na(get(col)), (col) := 0]
CBN<-merge(CBN,EducData,by =c("HHID"),all=TRUE)
for (col in c("EducExpenditure")) CBN[is.na(get(col)), (col) := 0]
CBN<-merge(CBN,EnergyData,by =c("HHID"),all=TRUE)
for (col in c("Energy_Exp")) CBN[is.na(get(col)), (col) := 0]
CBN<-merge(CBN,FurnitureData,by =c("HHID"),all=TRUE)
for (col in c("Furniture_Exp")) CBN[is.na(get(col)), (col) := 0]
CBN<-merge(CBN,HotelData,by =c("HHID"),all=TRUE)
for (col in c("Hotel_Exp")) CBN[is.na(get(col)), (col) := 0]
CBN<-merge(CBN,BehdashtData,by =c("HHID"),all=TRUE)
for (col in c("Behdasht_Exp")) CBN[is.na(get(col)), (col) := 0]
CBN<-merge(CBN,TransportationData,by =c("HHID"),all=TRUE)
for (col in c("Transportation_Exp")) CBN[is.na(get(col)), (col) := 0]
CBN<-merge(CBN,OtherData,by =c("HHID"),all=TRUE)
for (col in c("Other_Exp")) CBN[is.na(get(col)), (col) := 0]
CBN<-merge(CBN,HouseData,by =c("HHID"),all=TRUE)
for (col in c("ServiceExp")) CBN[is.na(get(col)), (col) := 0]
CBN<-merge(CBN,InvestmentData,by =c("HHID"),all=TRUE)
for (col in c("Investment_Exp")) CBN[is.na(get(col)), (col) := 0]
CBN<-merge(CBN,MedicalData,by =c("HHID"),all=TRUE)
for (col in c("Medical_Exp")) CBN[is.na(get(col)), (col) := 0]
CBN<-merge(CBN,DurableData,by =c("HHID"),all=TRUE)
for (col in c("Durable_Exp")) CBN[is.na(get(col)), (col) := 0]
CBN<-merge(CBN,ResturantData,by =c("HHID"),all=TRUE)
for (col in c("Resturant_Exp")) CBN[is.na(get(col)), (col) := 0]
CBN<-CBN[Size!=0]
CBN<-CBN[Region=="Urban"]
CBN<-CBN[FoodExpenditure!=0]
#Calculate Per_Total Expenditures Monthly
CBN[, Total_Exp_Month := Reduce(`+`, .SD), .SDcols=c(66:78,82:84)][]
CBN[, Total_Exp_Month_nondurable := Reduce(`+`, .SD), .SDcols=66:78][]
CBN$Total_Exp_Month_Per<-CBN$Total_Exp_Month/CBN$EqSizeOECD
CBN$Total_Exp_Month_Per_nondurable<-CBN$Total_Exp_Month_nondurable/CBN$EqSizeOECD
#Calculate Per_Food Expenditures Monthly
CBN[,EqSizeCalory :=(Size-NKids) + NKids*(1800/2100)]
CBN$FoodExpenditure_Per<-CBN$FoodExpenditure/CBN$EqSizeCalory
#Calculate Per_Food Expenditures Daily
CBN$FoodExpenditure_Per_day<-CBN$FoodExpenditure_Per/30
CBN$Ghandgram_Per_day<-CBN$Ghandgram/(30*CBN$EqSizeOECD)
CBN$Hoboobatgram_Per_day<-CBN$Hoboobatgram/(30*CBN$EqSizeOECD)
CBN$Berenjgram_Per_day<-CBN$Berenjgram/(30*CBN$EqSizeOECD)
CBN$Nangram_Per_day<-CBN$Nangram/(30*CBN$EqSizeOECD)
CBN$Roghangram_Per_day<-CBN$Roghangram/(30*CBN$EqSizeOECD)
CBN$Gooshtgram_Per_day<-CBN$Gooshtgram/(30*CBN$EqSizeOECD)
CBN$Morghgram_Per_day<-CBN$Morghgram/(30*CBN$EqSizeOECD)
CBN$Mahigram_Per_day<-CBN$Mahigram/(30*CBN$EqSizeOECD)
CBN$Shirgram_Per_day<-CBN$Shirgram/(30*CBN$EqSizeOECD)
CBN$Mastgram_Per_day<-CBN$Mastgram/(30*CBN$EqSizeOECD)
CBN$Panirgram_Per_day<-CBN$Panirgram/(30*CBN$EqSizeOECD)
CBN$Tokhmemorghgram_Per_day<-CBN$Tokhmemorghgram/(30*CBN$EqSizeOECD)
CBN$Mivegram_Per_day<-CBN$Mivegram/(30*CBN$EqSizeOECD)
CBN$Sabzigram_Per_day<-CBN$Sabzigram/(30*CBN$EqSizeOECD)
CBN$Makaroonigram_Per_day<-CBN$Makaroonigram/(30*CBN$EqSizeOECD)
CBN$Sibzaminigram_Per_day<-CBN$Sibzaminigram/(30*CBN$EqSizeOECD)
CBN[,EqSizeCalory:=NULL]
load(file="PriceIndex95.rda")
CBN<-merge(CBN,PriceIndex95,by=c("ProvinceCode"),all.x = TRUE)
CBN[,ostan:=NULL]
load(file="PriceIndex.rda")
CBN<-merge(CBN,PriceIndex,by=c("ProvinceCode"),all.x = TRUE)
#Sort Expenditure data
CBN<- CBN[order(Total_Exp_Month_Per_nondurable)]
#Calculate cumulative weights
sum(CBN$Weight)
CBN$cumweight <- cumsum(CBN$Weight)
tx <- max(CBN$cumweight)
#Calculate deciles by weights
CBN[,Decile:=cut(cumweight,breaks = seq(0,tx,tx/10),labels = 1:10)]
CBN[,Percentile:=cut(cumweight,breaks=seq(0,tx,tx/100),labels=1:100)]
CBN$Ghand_W<-CBN$Ghandgram_Per_day*CBN$GhandPrice*0.001*30
CBN$Hoboobat_W<-CBN$Hoboobatgram_Per_day*CBN$HoboobatPrice*0.001*30
CBN$Roghan_W<-CBN$Roghangram_Per_day*CBN$RoghanPrice*0.001*30
CBN$Berenj_W<-CBN$Berenjgram_Per_day*CBN$BerenjPrice*0.001*30
CBN$Nan_W<-CBN$Nangram_Per_day*CBN$NanPrice*0.001*30
CBN$Goosht_W<-CBN$Gooshtgram_Per_day*CBN$GooshtPrice*0.001*30
CBN$Morgh_W<-CBN$Morghgram_Per_day*CBN$MorghPrice*0.001*30
CBN$Mahi_W<-CBN$Mahigram_Per_day*CBN$MahiPrice*0.001*30
CBN$Shir_W<-CBN$Shirgram_Per_day*CBN$ShirPrice*0.001*30
CBN$Mast_W<-CBN$Mastgram_Per_day*CBN$MastPrice*0.001*30
CBN$Panir_W<-CBN$Panirgram_Per_day*CBN$PanirPrice*0.001*30
CBN$Tokhmemorgh_W<-CBN$Tokhmemorghgram_Per_day*CBN$TokhmemorghPrice*0.001*30
CBN$Mive_W<-CBN$Mivegram_Per_day*CBN$MivePrice*0.001*30
CBN$Sabzi_W<-CBN$Sabzigram_Per_day*CBN$SabziPrice*0.001*30
CBN$Makarooni_W<-CBN$Makaroonigram_Per_day*CBN$MakarooniPrice*0.001*30
CBN$Sibzamini_W<-CBN$Sibzaminigram_Per_day*CBN$SibzaminiPrice*0.001*30
CBN$Home_W<-CBN$ServiceExp/CBN$EqSizeOECD
CBN$Home_Per_Metr<-CBN$MetrPrice/CBN$EqSizeOECD
#Seperate big cities
CBN[,sum(Weight*Size),by=ProvinceCode][order(V1)]
CBN[,HHIDs:=as.character(HHID)]
CBN[,ShahrestanCode:=as.integer(str_sub(HHIDs,2,5))]
CBN[,sum(Weight*Size),by=ShahrestanCode][order(V1)][330:387]
CBNTehran<-CBN[ProvinceCode==23]
CBNTehran[,sum(Weight*Size),by=ShahrestanCode]
CBNTabriz<-CBN[ProvinceCode==3]
CBNTabriz[,sum(Weight*Size),by=ShahrestanCode]
CBNAhvaz<-CBN[ProvinceCode==6]
CBNAhvaz[,sum(Weight*Size),by=ShahrestanCode]
CBNShiraz<-CBN[ProvinceCode==7]
CBNShiraz[,sum(Weight*Size),by=ShahrestanCode]
CBNMashhad<-CBN[ProvinceCode==9]
CBNMashhad[,sum(Weight*Size),by=ShahrestanCode]
CBNEsfahan<-CBN[ProvinceCode==10]
CBNEsfahan[,sum(Weight*Size),by=ShahrestanCode]
CBNKaraj<-CBN[ProvinceCode==30]
CBNKaraj[,sum(Weight*Size),by=ShahrestanCode]
CBNKermanshah<-CBN[ProvinceCode==5]
CBNKermanshah[,sum(Weight*Size),by=ShahrestanCode]
CBN<-CBN[ShahrestanCode==2301,ProvinceCode:=as.numeric(ShahrestanCode)]
CBN<-CBN[ShahrestanCode==303,ProvinceCode:=as.numeric(ShahrestanCode)]
CBN<-CBN[ShahrestanCode==603,ProvinceCode:=as.numeric(ShahrestanCode)]
CBN<-CBN[ShahrestanCode==707,ProvinceCode:=as.numeric(ShahrestanCode)]
CBN<-CBN[ShahrestanCode==916,ProvinceCode:=as.numeric(ShahrestanCode)]
CBN<-CBN[ShahrestanCode==1002,ProvinceCode:=as.numeric(ShahrestanCode)]
CBN<-CBN[ShahrestanCode==3001,ProvinceCode:=as.numeric(ShahrestanCode)]
CBN<-CBN[ShahrestanCode==2301,ProvinceCode:=as.numeric(ShahrestanCode)]
CBN<-CBN[ShahrestanCode==502,ProvinceCode:=as.numeric(ShahrestanCode)]
# Food Calories
CBN$Ghand_Calory<- CBN$Ghandgram *4
CBN$Hoboobat_Calory<- CBN$Hoboobatgram *3
CBN$Nan_Calory<- CBN$Nangram *2.5
CBN$Berenj_Calory<- CBN$Berenjgram *1.2
CBN$Roghan_Calory<- CBN$Roghangram *8
CBN$Goosht_Calory<- CBN$Gooshtgram *2.5
CBN$Morgh_Calory<- CBN$Morghgram *2
CBN$Mahi_Calory<- CBN$Mahigram *1
CBN$Shir_Calory<- CBN$Shirgram *2.5
CBN$Mast_Calory<- CBN$Mastgram *1.5
CBN$Panir_Calory<- CBN$Mastgram *2.5
CBN$Tokhmemorgh_Calory<- CBN$Tokhmemorghgram *1.4
CBN$Mive_Calory<- CBN$Mivegram *0.5
CBN$Sabzi_Calory<- CBN$Sabzigram *0.5
CBN$Makarooni_Calory<- CBN$Makaroonigram *3.6
CBN$Sibzamini_Calory<- CBN$Sibzaminigram *0.9
#utils::View(CBN)
#CalculatePer_calories
CBN[, Daily_Exp_Calories := Reduce(`+`, .SD), .SDcols=149:164][]
CBN[,EqSizeCalory :=(Size-NKids) + NKids*(1800/2100)]
CBN[,Per_Daily_Exp_Calories:=Daily_Exp_Calories/EqSizeCalory]
CBN <- CBN[Per_Daily_Exp_Calories<100000] # arbitrary removal of outliers
#CBN[,Daily_Calories_cluster:=weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
#CBN[,weighted.mean(Daily_Calories_cluster,Weight,na.rm = TRUE),by=cluster]
#CBN[,weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
#CBN[,weighted.mean(Size,Weight,na.rm = TRUE),by=cluster]
#CBN[,sum(Weight*Size),by=cluster]
#CBN[,sum(Weight),by=cluster]
#CBN[,sum(Poor),by=cluster]
#Calculate_Per_calories
CBN$Ghand_per_Calory<- CBN$Ghandgram *4/CBN$EqSizeCalory
CBN$Hoboobat_per_Calory<- CBN$Hoboobatgram *3/CBN$EqSizeCalory
CBN$Nan_per_Calory<- CBN$Nangram *2.5/CBN$EqSizeCalory
CBN$Berenj_per_Calory<- CBN$Berenjgram *1.2/CBN$EqSizeCalory
CBN$Roghan_per_Calory<- CBN$Roghangram *8/CBN$EqSizeCalory
CBN$Goosht_per_Calory<- CBN$Gooshtgram *2.5/CBN$EqSizeCalory
CBN$Morgh_per_Calory<- CBN$Morghgram *2/CBN$EqSizeCalory
CBN$Mahi_per_Calory<- CBN$Mahigram *1/CBN$EqSizeCalory
CBN$Shir_per_Calory<- CBN$Shirgram *2.5/CBN$EqSizeCalory
CBN$Mast_per_Calory<- CBN$Mastgram *1.5/CBN$EqSizeCalory
CBN$Panir_per_Calory<- CBN$Mastgram *2.5/CBN$EqSizeCalory
CBN$Tokhmemorgh_per_Calory<- CBN$Tokhmemorghgram *1.4/CBN$EqSizeCalory
CBN$Mive_per_Calory<- CBN$Mivegram *0.5/CBN$EqSizeCalory
CBN$Sabzi_per_Calory<- CBN$Sabzigram *0.5/CBN$EqSizeCalory
CBN$Makarooni_per_Calory<- CBN$Makaroonigram *3.6/CBN$EqSizeCalory
CBN$Sibzamini_per_Calory<- CBN$Sibzaminigram *0.9/CBN$EqSizeCalory
#Assume that deciles 1 and 2 are poor
CBN[,Poor:=ifelse(Decile %in% 1:2,1,0)]
CBNPoor<-CBN[Poor==1]
OldfirstUrban<-CBN[,.(HHID,Percentile,Poor)]
save(OldfirstUrban, file=paste0(Settings$HEISProcessedPath,"Y",year,"OldFoodUrban.rda"))
#K-means weights
PriceWeights<-CBN[,.(HHID,Ghand_W,Hoboobat_W,Roghan_W,Berenj_W,Nan_W,Goosht_W,Morgh_W,Mahi_W,Shir_W,Mast_W,Panir_W,Tokhmemorgh_W,Mive_W,Sabzi_W,Makarooni_W,Sibzamini_W,Home_W,ProvinceCode,Weight)]
dt3 <- PriceWeights[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(ProvinceCode)]
dt3<- dt3[order(ProvinceCode)]
dt3 <- dt3[,.(Ghand_W,Hoboobat_W,Roghan_W,Berenj_W,Nan_W,Goosht_W,Morgh_W,Mahi_W,Shir_W,Mast_W,Panir_W,Tokhmemorgh_W,Mive_W,Sabzi_W,Makarooni_W,Sibzamini_W,Home_W)]
#K-means algorithm for clustering by prices
test<-CBNPoor[,.(GhandPrice,HoboobatPrice,RoghanPrice,BerenjPrice,NanPrice,GooshtPrice,MorghPrice,MahiPrice,ShirPrice,MastPrice,PanirPrice,TokhmemorghPrice,MivePrice,SabziPrice,MakarooniPrice,SibzaminiPrice,MetrPrice,ProvinceCode,Weight)]
#test<-CBNPoor[,.(GhandPrice,HoboobatPrice,RoghanPrice,BerenjPrice,NanPrice,GooshtPrice,MorghPrice,MahiPrice,ShirPrice,MastPrice,PanirPrice,TokhmemorghPrice,MivePrice,SabziPrice,MakarooniPrice,SibzaminiPrice,MetrPrice,Ghand_W,Hoboobat_W,Roghan_W,Berenj_W,Nan_W,Goosht_W,Morgh_W,Mahi_W,Shir_W,Mast_W,Panir_W,Tokhmemorgh_W,Mive_W,Sabzi_W,Makarooni_W,Sibzamini_W,Home_W,Region,ProvinceCode,Weight)]
dt2 <- test[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(ProvinceCode)]
dt2<- dt2[order(ProvinceCode)]
for (col in c("MahiPrice")) dt2[is.nan(get(col)), (col) := 200000]
dt <- dt2 [,.(GhandPrice,HoboobatPrice,RoghanPrice,BerenjPrice,NanPrice,GooshtPrice,MorghPrice,MahiPrice,ShirPrice,MastPrice,PanirPrice,TokhmemorghPrice,MivePrice,SabziPrice,MakarooniPrice,SibzaminiPrice,MetrPrice)]
pca <- princomp(dt, cor=T)
PRICE <- pca$scores
PRICE1 <- -1*PRICE[,1]
PRICE2 <- -1*PRICE[,2]
PRICE3 <- -1*PRICE[,3]
PRICE4 <- -1*PRICE[,4]
PRICE5 <- -1*PRICE[,5]
PRICE6 <- -1*PRICE[,6]
PRICE7 <- -1*PRICE[,7]
PRICE8 <- -1*PRICE[,8]
PRICE9 <- -1*PRICE[,9]
PRICE10 <- -1*PRICE[,10]
PRICE11 <- -1*PRICE[,11]
PRICE12 <- -1*PRICE[,12]
PRICE13 <- -1*PRICE[,13]
PRICE14 <- -1*PRICE[,14]
PRICE15 <- -1*PRICE[,15]
PRICE16 <- -1*PRICE[,16]
PRICE17 <- -1*PRICE[,17]
# Deciding how many clusters
wss <- (nrow(dt)-1)*sum(apply(dt,2,var))
for (i in 2:30) wss[i] <- sum(kmeans(dt, centers=i)$withinss)
plot(1:30, wss, type="b", xlab="Number of Clusters",
ylab="Within groups sum of squares")
#Weighted clustering
dt3.m <- dt3[,lapply(.SD, mean)] # Weights for each vector
dtW <- dt * sqrt(dt3.m[rep(1,nrow(dt))]) # Weighted observations
kmeans(dtW,4) # Simple K-means
cl <- kmeans(dtW,4)
cl$cluster
dt2 <- dt2[,cluster:=data.table(cl$cluster)]
dt2<-dt2[,.(ProvinceCode,cluster)]
load(file="dt4Urban.rda")
#plot(PRICE1, PRICE2,col=cl$cluster)
#points(cl$centers, pch=20)
CBNPoor<-merge(CBNPoor,dt2,by=c("ProvinceCode"),all.x = TRUE)
CBNPoor[,sum(Weight*Size),by=cluster]
CBNPoor[,sum(Weight),by=cluster]
CBNPoor[,sum(Poor),by=cluster]
C2<-CBNPoor[,.(HHID,ProvinceCode,Region,Decile,Poor,cluster)]
######################################################################
####Iteration 1#####
###Iteration1-1
#Calculate Per_calories in clusters
CBNPoor[,Daily_Ghand_cluster:=weighted.mean(Ghand_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Hoboobat_cluster:=weighted.mean(Hoboobat_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Nan_cluster:=weighted.mean(Nan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Berenj_cluster:=weighted.mean(Berenj_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Roghan_cluster:=weighted.mean(Roghan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Goosht_cluster:=weighted.mean(Goosht_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Morgh_cluster:=weighted.mean(Morgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mahi_cluster:=weighted.mean(Mahi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Shir_cluster:=weighted.mean(Shir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mast_cluster:=weighted.mean(Mast_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Panir_cluster:=weighted.mean(Panir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Tokhmemorgh_cluster:=weighted.mean(Tokhmemorgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mive_cluster:=weighted.mean(Mive_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Sabzi_cluster:=weighted.mean(Sabzi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Makarooni_cluster:=weighted.mean(Makarooni_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Sibzamini_cluster:=weighted.mean(Sibzamini_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[, Daily_Calories_cluster2 := Reduce(`+`, .SD), .SDcols=c(186:201)][]
#Calculate Per_calories in clusters(=2100)
CBNPoor[,Daily2_Ghand:=(Daily_Ghand_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Ghand")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Hoboobat:=(Daily_Hoboobat_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Hoboobat")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Nan:=(Daily_Nan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Nan")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Berenj:=(Daily_Berenj_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Berenj")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Roghan:=(Daily_Roghan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Roghan")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Goosht:=(Daily_Goosht_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Goosht")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Morgh:=(Daily_Morgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Morgh")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mahi:=(Daily_Mahi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mahi")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Shir:=(Daily_Shir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Shir")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mast:=(Daily_Mast_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mast")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Panir:=(Daily_Panir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Panir")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Tokhmemorgh:=(Daily_Tokhmemorgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Tokhmemorgh")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mive:=(Daily_Mive_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mive")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Sabzi:=(Daily_Sabzi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sabzi")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Makarooni:=(Daily_Makarooni_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Makarooni")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Sibzamini:=(Daily_Sibzamini_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sibzamini")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[, Daily_Calories3 := Reduce(`+`, .SD), .SDcols=c(203:218)][]
CBNPoor[,FoodExpenditure_Per_cluster:=weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(FoodExpenditure_Per_cluster,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Calory_Price:=(FoodExpenditure_Per_cluster/(Daily_Calories_cluster2))]
CBNPoor[,weighted.mean(Calory_Price,Weight,na.rm = TRUE),by=cluster]
#Calculate per_Calory from resturants
CBNPoor[,Per_Calory_Resturant:=(0.7*Resturant_Exp/EqSizeCalory)/Calory_Price]
for (col in c("Per_Calory_Resturant")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor<-CBNPoor[Per_Daily_Exp_Calories!=0]
CBNPoor[,Per_Daily_Calories:=Per_Daily_Exp_Calories+Per_Calory_Resturant]
CBNPoor[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Per_Calory_Resturant,Weight,na.rm = TRUE),by=cluster]
#sum of total food expenditures
CBNPoor[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
CBNPoor[,weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(FoodExpenditure_Per_total,Weight,na.rm = TRUE),by=cluster]
CBN[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
#Food expenditures (equal 2100 CCAL)
CBNPoor[,Bundle_Value:=FoodExpenditure_Per_total*2100/Per_Daily_Calories]
CBNPoor[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=ProvinceCode]
#CBN[,weighted.mean(Poor,Weight),by=cluster][order(cluster)]
CBNPoor[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE)]
CBNPoor[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE)]
#Real Prices
T_Bundle_Value<-subset(CBNPoor, ProvinceCode==2301, select=c(Bundle_Value,Home_Per_Metr,Weight))
Tehran_Bundle_Value1<-weighted.mean(T_Bundle_Value$Bundle_Value,T_Bundle_Value$Weight,na.rm = TRUE)
Tehran_Bundle_Value2<-weighted.mean(T_Bundle_Value$Home_Per_Metr,T_Bundle_Value$Weight,na.rm = TRUE)
CBNPoor[,RealPriceIndex1:=weighted.mean(Bundle_Value,Weight,na.rm = TRUE)/Tehran_Bundle_Value1,by=ProvinceCode]
CBNPoor[,RealPriceIndex2:=weighted.mean(Home_Per_Metr,Weight,na.rm = TRUE)/Tehran_Bundle_Value2,by=ProvinceCode]
CBNPoor[,weighted.mean(RealPriceIndex1,Weight),by=ProvinceCode]
CBNPoor[,weighted.mean(RealPriceIndex2,Weight),by=ProvinceCode]
Indexes2<-CBNPoor[,.(RealPriceIndex1,RealPriceIndex2,ProvinceCode,Weight)]
Indexes2<-Indexes2[,RealPriceIndex:=(RealPriceIndex1+RealPriceIndex2)/2]
Indexes<-Indexes2[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(ProvinceCode)]
Indexes31<-Indexes[,.(ProvinceCode,RealPriceIndex1,RealPriceIndex2,RealPriceIndex)]
Indexes<-Indexes[,.(ProvinceCode,RealPriceIndex)]
CBN<-merge(CBN,Indexes,by=c("ProvinceCode"),all.x = TRUE)
CBN<-CBN[,Total_Food_Month_Per2:=FoodExpenditure_Per*RealPriceIndex]
CBN<-CBN[,Total_Exp_Month_Per2:=Total_Exp_Month_Per_nondurable*RealPriceIndex]
#utils::View(CBN)
#Sort Expenditure data
CBN<- CBN[order(Total_Exp_Month_Per2)]
#Calculate cumulative weights
sum(CBN$Weight)
CBN$cumweight <- cumsum(CBN$Weight)
tx <- max(CBN$cumweight)
#Calculate deciles by weights
CBN[,Decile:=cut(cumweight,breaks = seq(0,tx,tx/10),labels = 1:10)]
CBN[,Percentile:=cut(cumweight,breaks=seq(0,tx,tx/100),labels=1:100)]
#Update Poors
CBN[,Poor:=ifelse(Decile %in% 1:2,1,0)]
CBNPoor<-CBN[Poor==1]
CBNPoor<-merge(CBNPoor,dt2,by=c("ProvinceCode"),all.x = TRUE)
CBNPoor[,sum(Size*Weight),by=.(cluster)][order(cluster)]
###Iteration1-2
#Calculate Per_calories in clusters
CBNPoor[,Daily_Ghand_cluster:=weighted.mean(Ghand_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Hoboobat_cluster:=weighted.mean(Hoboobat_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Nan_cluster:=weighted.mean(Nan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Berenj_cluster:=weighted.mean(Berenj_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Roghan_cluster:=weighted.mean(Roghan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Goosht_cluster:=weighted.mean(Goosht_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Morgh_cluster:=weighted.mean(Morgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mahi_cluster:=weighted.mean(Mahi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Shir_cluster:=weighted.mean(Shir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mast_cluster:=weighted.mean(Mast_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Panir_cluster:=weighted.mean(Panir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Tokhmemorgh_cluster:=weighted.mean(Tokhmemorgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mive_cluster:=weighted.mean(Mive_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Sabzi_cluster:=weighted.mean(Sabzi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Makarooni_cluster:=weighted.mean(Makarooni_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Sibzamini_cluster:=weighted.mean(Sibzamini_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[, Daily_Calories_cluster2 := Reduce(`+`, .SD), .SDcols=c(186:201)][]
#utils::View(CBNPoor)
#Calculate Per_calories in clusters(=2100)
CBNPoor[,Daily2_Ghand:=(Daily_Ghand_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Ghand")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Hoboobat:=(Daily_Hoboobat_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Hoboobat")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Nan:=(Daily_Nan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Nan")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Berenj:=(Daily_Berenj_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Berenj")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Roghan:=(Daily_Roghan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Roghan")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Goosht:=(Daily_Goosht_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Goosht")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Morgh:=(Daily_Morgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Morgh")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mahi:=(Daily_Mahi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mahi")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Shir:=(Daily_Shir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Shir")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mast:=(Daily_Mast_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mast")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Panir:=(Daily_Panir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Panir")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Tokhmemorgh:=(Daily_Tokhmemorgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Tokhmemorgh")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mive:=(Daily_Mive_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mive")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Sabzi:=(Daily_Sabzi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sabzi")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Makarooni:=(Daily_Makarooni_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Makarooni")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Sibzamini:=(Daily_Sibzamini_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sibzamini")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[, Daily_Calories3 := Reduce(`+`, .SD), .SDcols=c(203:218)][]
CBNPoor[,FoodExpenditure_Per_cluster:=weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(FoodExpenditure_Per_cluster,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Calory_Price:=(FoodExpenditure_Per_cluster/(Daily_Calories_cluster2))]
CBNPoor[,weighted.mean(Calory_Price,Weight,na.rm = TRUE),by=cluster]
#Calculate per_Calory from resturants
CBNPoor[,Per_Calory_Resturant:=(0.7*Resturant_Exp/EqSizeCalory)/Calory_Price]
for (col in c("Per_Calory_Resturant")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor<-CBNPoor[Per_Daily_Exp_Calories!=0]
CBNPoor[,Per_Daily_Calories:=Per_Daily_Exp_Calories+Per_Calory_Resturant]
CBNPoor[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Per_Calory_Resturant,Weight,na.rm = TRUE),by=cluster]
#sum of total food expenditures
CBNPoor[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
CBNPoor[,weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(FoodExpenditure_Per_total,Weight,na.rm = TRUE),by=cluster]
CBN[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
#Food expenditures (equal 2100 CCAL)
CBNPoor[,Bundle_Value:=FoodExpenditure_Per_total*2100/Per_Daily_Calories]
CBNPoor[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=ProvinceCode]
#CBN[,weighted.mean(Poor,Weight),by=cluster][order(cluster)]
#Real Prices
T_Bundle_Value<-subset(CBNPoor, ProvinceCode==2301, select=c(Bundle_Value,Home_Per_Metr,Weight))
Tehran_Bundle_Value1<-weighted.mean(T_Bundle_Value$Bundle_Value,T_Bundle_Value$Weight,na.rm = TRUE)
Tehran_Bundle_Value2<-weighted.mean(T_Bundle_Value$Home_Per_Metr,T_Bundle_Value$Weight,na.rm = TRUE)
CBNPoor[,RealPriceIndex1:=weighted.mean(Bundle_Value,Weight,na.rm = TRUE)/Tehran_Bundle_Value1,by=ProvinceCode]
CBNPoor[,RealPriceIndex2:=weighted.mean(Home_Per_Metr,Weight,na.rm = TRUE)/Tehran_Bundle_Value2,by=ProvinceCode]
CBNPoor[,weighted.mean(RealPriceIndex1,Weight),by=ProvinceCode]
CBNPoor[,weighted.mean(RealPriceIndex2,Weight),by=ProvinceCode]
Indexes2<-CBNPoor[,.(RealPriceIndex1,RealPriceIndex2,ProvinceCode,Weight)]
Indexes2<-Indexes2[,RealPriceIndex:=(RealPriceIndex1+RealPriceIndex2)/2]
Indexes<-Indexes2[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(ProvinceCode)]
Indexes32<-Indexes[,.(ProvinceCode,RealPriceIndex1,RealPriceIndex2,RealPriceIndex)]
Indexes<-Indexes[,.(ProvinceCode,RealPriceIndex)]
CBN[,RealPriceIndex:=NULL]
CBN<-merge(CBN,Indexes,by=c("ProvinceCode"),all.x = TRUE)
CBN<-CBN[,Total_Food_Month_Per2:=FoodExpenditure_Per*RealPriceIndex]
CBN<-CBN[,Total_Exp_Month_Per2:=Total_Exp_Month_Per_nondurable*RealPriceIndex]
#utils::View(CBN)
#Sort Expenditure data
CBN<- CBN[order(Total_Exp_Month_Per2)]
#Calculate cumulative weights
sum(CBN$Weight)
CBN$cumweight <- cumsum(CBN$Weight)
tx <- max(CBN$cumweight)
#Calculate deciles by weights
CBN[,Decile:=cut(cumweight,breaks = seq(0,tx,tx/10),labels = 1:10)]
CBN[,Percentile:=cut(cumweight,breaks=seq(0,tx,tx/100),labels=1:100)]
#Update Poors
CBN[,Poor:=ifelse(Decile %in% 2:5,1,0)]
CBNPoor<-CBN[Poor==1]
CBNPoor<-merge(CBNPoor,dt2,by=c("ProvinceCode"),all.x = TRUE)
CBNPoor[,sum(HIndivNo),by=.(ProvinceCode)][order(ProvinceCode)]
###Iteration1-3
#Calculate Per_calories in clusters
CBNPoor[,Daily_Ghand_cluster:=weighted.mean(Ghand_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Hoboobat_cluster:=weighted.mean(Hoboobat_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Nan_cluster:=weighted.mean(Nan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Berenj_cluster:=weighted.mean(Berenj_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Roghan_cluster:=weighted.mean(Roghan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Goosht_cluster:=weighted.mean(Goosht_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Morgh_cluster:=weighted.mean(Morgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mahi_cluster:=weighted.mean(Mahi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Shir_cluster:=weighted.mean(Shir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mast_cluster:=weighted.mean(Mast_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Panir_cluster:=weighted.mean(Panir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Tokhmemorgh_cluster:=weighted.mean(Tokhmemorgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mive_cluster:=weighted.mean(Mive_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Sabzi_cluster:=weighted.mean(Sabzi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Makarooni_cluster:=weighted.mean(Makarooni_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Sibzamini_cluster:=weighted.mean(Sibzamini_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[, Daily_Calories_cluster2 := Reduce(`+`, .SD), .SDcols=c(186:201)][]
#utils::View(CBNPoor)
#Calculate Per_calories in clusters(=2100)
CBNPoor[,Daily2_Ghand:=(Daily_Ghand_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Ghand")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Hoboobat:=(Daily_Hoboobat_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Hoboobat")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Nan:=(Daily_Nan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Nan")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Berenj:=(Daily_Berenj_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Berenj")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Roghan:=(Daily_Roghan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Roghan")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Goosht:=(Daily_Goosht_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Goosht")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Morgh:=(Daily_Morgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Morgh")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mahi:=(Daily_Mahi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mahi")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Shir:=(Daily_Shir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Shir")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mast:=(Daily_Mast_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mast")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Panir:=(Daily_Panir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Panir")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Tokhmemorgh:=(Daily_Tokhmemorgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Tokhmemorgh")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mive:=(Daily_Mive_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mive")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Sabzi:=(Daily_Sabzi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sabzi")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Makarooni:=(Daily_Makarooni_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Makarooni")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Sibzamini:=(Daily_Sibzamini_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sibzamini")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[, Daily_Calories3 := Reduce(`+`, .SD), .SDcols=c(203:218)][]
CBNPoor[,FoodExpenditure_Per_cluster:=weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(FoodExpenditure_Per_cluster,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Calory_Price:=(FoodExpenditure_Per_cluster/(Daily_Calories_cluster2))]
CBNPoor[,weighted.mean(Calory_Price,Weight,na.rm = TRUE),by=cluster]
#Calculate per_Calory from resturants
CBNPoor[,Per_Calory_Resturant:=(0.7*Resturant_Exp/EqSizeCalory)/Calory_Price]
for (col in c("Per_Calory_Resturant")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor<-CBNPoor[Per_Daily_Exp_Calories!=0]
CBNPoor[,Per_Daily_Calories:=Per_Daily_Exp_Calories+Per_Calory_Resturant]
CBNPoor[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Per_Calory_Resturant,Weight,na.rm = TRUE),by=cluster]
#sum of total food expenditures
CBNPoor[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
CBNPoor[,weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(FoodExpenditure_Per_total,Weight,na.rm = TRUE),by=cluster]
CBN[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
#Food expenditures (equal 2100 CCAL)
CBNPoor[,Bundle_Value:=FoodExpenditure_Per_total*2100/Per_Daily_Calories]
CBNPoor[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=ProvinceCode]
#CBN[,weighted.mean(Poor,Weight),by=cluster][order(cluster)]
#Real Prices
T_Bundle_Value<-subset(CBNPoor, ProvinceCode==2301, select=c(Bundle_Value,Home_Per_Metr,Weight))
Tehran_Bundle_Value1<-weighted.mean(T_Bundle_Value$Bundle_Value,T_Bundle_Value$Weight,na.rm = TRUE)
Tehran_Bundle_Value2<-weighted.mean(T_Bundle_Value$Home_Per_Metr,T_Bundle_Value$Weight,na.rm = TRUE)
CBNPoor[,RealPriceIndex1:=weighted.mean(Bundle_Value,Weight,na.rm = TRUE)/Tehran_Bundle_Value1,by=ProvinceCode]
CBNPoor[,RealPriceIndex2:=weighted.mean(Home_Per_Metr,Weight,na.rm = TRUE)/Tehran_Bundle_Value2,by=ProvinceCode]
CBNPoor[,weighted.mean(RealPriceIndex1,Weight),by=ProvinceCode]
CBNPoor[,weighted.mean(RealPriceIndex2,Weight),by=ProvinceCode]
Indexes2<-CBNPoor[,.(RealPriceIndex1,RealPriceIndex2,ProvinceCode,Weight)]
Indexes2<-Indexes2[,RealPriceIndex:=(RealPriceIndex1+RealPriceIndex2)/2]
Indexes<-Indexes2[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(ProvinceCode)]
Indexes33<-Indexes[,.(ProvinceCode,RealPriceIndex1,RealPriceIndex2,RealPriceIndex)]
Indexes<-Indexes[,.(ProvinceCode,RealPriceIndex)]
CBN[,RealPriceIndex:=NULL]
CBN<-merge(CBN,Indexes,by=c("ProvinceCode"),all.x = TRUE)
CBN<-CBN[,Total_Food_Month_Per2:=FoodExpenditure_Per*RealPriceIndex]
CBN<-CBN[,Total_Exp_Month_Per2:=Total_Exp_Month_Per_nondurable*RealPriceIndex]
#utils::View(CBN)
#Sort Expenditure data
CBN<- CBN[order(Total_Exp_Month_Per2)]
#Calculate cumulative weights
sum(CBN$Weight)
CBN$cumweight <- cumsum(CBN$Weight)
tx <- max(CBN$cumweight)
#Calculate deciles by weights
CBN[,Decile:=cut(cumweight,breaks = seq(0,tx,tx/10),labels = 1:10)]
CBN[,Percentile:=cut(cumweight,breaks=seq(0,tx,tx/100),labels=1:100)]
#Update Poors
CBN[,Poor:=ifelse(Decile %in% 2:5,1,0)]
CBNPoor<-CBN[Poor==1]
CBNPoor<-merge(CBNPoor,dt2,by=c("ProvinceCode"),all.x = TRUE)
CBNPoor[,sum(HIndivNo),by=.(ProvinceCode)][order(ProvinceCode)]
###Iteration1-4
#Calculate Per_calories in clusters
CBNPoor[,Daily_Ghand_cluster:=weighted.mean(Ghand_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Hoboobat_cluster:=weighted.mean(Hoboobat_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Nan_cluster:=weighted.mean(Nan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Berenj_cluster:=weighted.mean(Berenj_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Roghan_cluster:=weighted.mean(Roghan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Goosht_cluster:=weighted.mean(Goosht_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Morgh_cluster:=weighted.mean(Morgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mahi_cluster:=weighted.mean(Mahi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Shir_cluster:=weighted.mean(Shir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mast_cluster:=weighted.mean(Mast_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Panir_cluster:=weighted.mean(Panir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Tokhmemorgh_cluster:=weighted.mean(Tokhmemorgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mive_cluster:=weighted.mean(Mive_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Sabzi_cluster:=weighted.mean(Sabzi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Makarooni_cluster:=weighted.mean(Makarooni_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Sibzamini_cluster:=weighted.mean(Sibzamini_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[, Daily_Calories_cluster2 := Reduce(`+`, .SD), .SDcols=c(186:201)][]
#utils::View(CBNPoor)
#Calculate Per_calories in clusters(=2100)
CBNPoor[,Daily2_Ghand:=(Daily_Ghand_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Ghand")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Hoboobat:=(Daily_Hoboobat_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Hoboobat")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Nan:=(Daily_Nan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Nan")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Berenj:=(Daily_Berenj_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Berenj")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Roghan:=(Daily_Roghan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Roghan")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Goosht:=(Daily_Goosht_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Goosht")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Morgh:=(Daily_Morgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Morgh")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mahi:=(Daily_Mahi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mahi")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Shir:=(Daily_Shir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Shir")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mast:=(Daily_Mast_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mast")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Panir:=(Daily_Panir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Panir")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Tokhmemorgh:=(Daily_Tokhmemorgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Tokhmemorgh")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mive:=(Daily_Mive_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mive")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Sabzi:=(Daily_Sabzi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sabzi")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Makarooni:=(Daily_Makarooni_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Makarooni")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Sibzamini:=(Daily_Sibzamini_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sibzamini")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[, Daily_Calories3 := Reduce(`+`, .SD), .SDcols=c(203:218)][]
CBNPoor[,FoodExpenditure_Per_cluster:=weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(FoodExpenditure_Per_cluster,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Calory_Price:=(FoodExpenditure_Per_cluster/(Daily_Calories_cluster2))]
CBNPoor[,weighted.mean(Calory_Price,Weight,na.rm = TRUE),by=cluster]
#Calculate per_Calory from resturants
CBNPoor[,Per_Calory_Resturant:=(0.7*Resturant_Exp/EqSizeCalory)/Calory_Price]
for (col in c("Per_Calory_Resturant")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor<-CBNPoor[Per_Daily_Exp_Calories!=0]
CBNPoor[,Per_Daily_Calories:=Per_Daily_Exp_Calories+Per_Calory_Resturant]
CBNPoor[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Per_Calory_Resturant,Weight,na.rm = TRUE),by=cluster]
#sum of total food expenditures
CBNPoor[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
CBNPoor[,weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(FoodExpenditure_Per_total,Weight,na.rm = TRUE),by=cluster]
CBN[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
#Food expenditures (equal 2100 CCAL)
CBNPoor[,Bundle_Value:=FoodExpenditure_Per_total*2100/Per_Daily_Calories]
CBNPoor[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=ProvinceCode]
#CBN[,weighted.mean(Poor,Weight),by=cluster][order(cluster)]
#Real Prices
T_Bundle_Value<-subset(CBNPoor, ProvinceCode==2301, select=c(Bundle_Value,Home_Per_Metr,Weight))
Tehran_Bundle_Value1<-weighted.mean(T_Bundle_Value$Bundle_Value,T_Bundle_Value$Weight,na.rm = TRUE)
Tehran_Bundle_Value2<-weighted.mean(T_Bundle_Value$Home_Per_Metr,T_Bundle_Value$Weight,na.rm = TRUE)
CBNPoor[,RealPriceIndex1:=weighted.mean(Bundle_Value,Weight,na.rm = TRUE)/Tehran_Bundle_Value1,by=ProvinceCode]
CBNPoor[,RealPriceIndex2:=weighted.mean(Home_Per_Metr,Weight,na.rm = TRUE)/Tehran_Bundle_Value2,by=ProvinceCode]
CBNPoor[,weighted.mean(RealPriceIndex1,Weight),by=ProvinceCode]
CBNPoor[,weighted.mean(RealPriceIndex2,Weight),by=ProvinceCode]
Indexes2<-CBNPoor[,.(RealPriceIndex1,RealPriceIndex2,ProvinceCode,Weight)]
Indexes2<-Indexes2[,RealPriceIndex:=(RealPriceIndex1+RealPriceIndex2)/2]
Indexes<-Indexes2[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(ProvinceCode)]
Indexes34<-Indexes[,.(ProvinceCode,RealPriceIndex1,RealPriceIndex2,RealPriceIndex)]
Indexes<-Indexes[,.(ProvinceCode,RealPriceIndex)]
CBN[,RealPriceIndex:=NULL]
CBN<-merge(CBN,Indexes,by=c("ProvinceCode"),all.x = TRUE)
CBN<-CBN[,Total_Food_Month_Per2:=FoodExpenditure_Per*RealPriceIndex]
CBN<-CBN[,Total_Exp_Month_Per2:=Total_Exp_Month_Per_nondurable*RealPriceIndex]
#utils::View(CBN)
#Sort Expenditure data
CBN<- CBN[order(Total_Exp_Month_Per2)]
#Calculate cumulative weights
sum(CBN$Weight)
CBN$cumweight <- cumsum(CBN$Weight)
tx <- max(CBN$cumweight)
#Calculate deciles by weights
CBN[,Decile:=cut(cumweight,breaks = seq(0,tx,tx/10),labels = 1:10)]
CBN[,Percentile:=cut(cumweight,breaks=seq(0,tx,tx/100),labels=1:100)]
#Update Poors
CBN[,Poor:=ifelse(Decile %in% 2:5,1,0)]
CBNPoor<-CBN[Poor==1]
CBNPoor<-merge(CBNPoor,dt2,by=c("ProvinceCode"),all.x = TRUE)
CBNPoor[,sum(HIndivNo),by=.(ProvinceCode)][order(ProvinceCode)]
###Iteration1-5
#Calculate Per_calories in clusters
CBNPoor[,Daily_Ghand_cluster:=weighted.mean(Ghand_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Hoboobat_cluster:=weighted.mean(Hoboobat_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Nan_cluster:=weighted.mean(Nan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Berenj_cluster:=weighted.mean(Berenj_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Roghan_cluster:=weighted.mean(Roghan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Goosht_cluster:=weighted.mean(Goosht_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Morgh_cluster:=weighted.mean(Morgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mahi_cluster:=weighted.mean(Mahi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Shir_cluster:=weighted.mean(Shir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mast_cluster:=weighted.mean(Mast_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Panir_cluster:=weighted.mean(Panir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Tokhmemorgh_cluster:=weighted.mean(Tokhmemorgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mive_cluster:=weighted.mean(Mive_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Sabzi_cluster:=weighted.mean(Sabzi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Makarooni_cluster:=weighted.mean(Makarooni_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Sibzamini_cluster:=weighted.mean(Sibzamini_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[, Daily_Calories_cluster2 := Reduce(`+`, .SD), .SDcols=c(186:201)][]
#utils::View(CBNPoor)
#Calculate Per_calories in clusters(=2100)
CBNPoor[,Daily2_Ghand:=(Daily_Ghand_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Ghand")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Hoboobat:=(Daily_Hoboobat_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Hoboobat")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Nan:=(Daily_Nan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Nan")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Berenj:=(Daily_Berenj_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Berenj")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Roghan:=(Daily_Roghan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Roghan")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Goosht:=(Daily_Goosht_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Goosht")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Morgh:=(Daily_Morgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Morgh")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mahi:=(Daily_Mahi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mahi")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Shir:=(Daily_Shir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Shir")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mast:=(Daily_Mast_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mast")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Panir:=(Daily_Panir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Panir")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Tokhmemorgh:=(Daily_Tokhmemorgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Tokhmemorgh")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mive:=(Daily_Mive_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mive")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Sabzi:=(Daily_Sabzi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sabzi")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Makarooni:=(Daily_Makarooni_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Makarooni")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Sibzamini:=(Daily_Sibzamini_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sibzamini")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[, Daily_Calories3 := Reduce(`+`, .SD), .SDcols=c(203:218)][]
CBNPoor[,FoodExpenditure_Per_cluster:=weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(FoodExpenditure_Per_cluster,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Calory_Price:=(FoodExpenditure_Per_cluster/(Daily_Calories_cluster2))]
CBNPoor[,weighted.mean(Calory_Price,Weight,na.rm = TRUE),by=cluster]
#Calculate per_Calory from resturants
CBNPoor[,Per_Calory_Resturant:=(0.7*Resturant_Exp/EqSizeCalory)/Calory_Price]
for (col in c("Per_Calory_Resturant")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor<-CBNPoor[Per_Daily_Exp_Calories!=0]
CBNPoor[,Per_Daily_Calories:=Per_Daily_Exp_Calories+Per_Calory_Resturant]
CBNPoor[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Per_Calory_Resturant,Weight,na.rm = TRUE),by=cluster]
#sum of total food expenditures
CBNPoor[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
CBNPoor[,weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(FoodExpenditure_Per_total,Weight,na.rm = TRUE),by=cluster]
CBN[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
#Food expenditures (equal 2100 CCAL)
CBNPoor[,Bundle_Value:=FoodExpenditure_Per_total*2100/Per_Daily_Calories]
CBNPoor[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=ProvinceCode]
#CBN[,weighted.mean(Poor,Weight),by=cluster][order(cluster)]
#Real Prices
T_Bundle_Value<-subset(CBNPoor, ProvinceCode==2301, select=c(Bundle_Value,Home_Per_Metr,Weight))
Tehran_Bundle_Value1<-weighted.mean(T_Bundle_Value$Bundle_Value,T_Bundle_Value$Weight,na.rm = TRUE)
Tehran_Bundle_Value2<-weighted.mean(T_Bundle_Value$Home_Per_Metr,T_Bundle_Value$Weight,na.rm = TRUE)
CBNPoor[,RealPriceIndex1:=weighted.mean(Bundle_Value,Weight,na.rm = TRUE)/Tehran_Bundle_Value1,by=ProvinceCode]
CBNPoor[,RealPriceIndex2:=weighted.mean(Home_Per_Metr,Weight,na.rm = TRUE)/Tehran_Bundle_Value2,by=ProvinceCode]
CBNPoor[,weighted.mean(RealPriceIndex1,Weight),by=ProvinceCode]
CBNPoor[,weighted.mean(RealPriceIndex2,Weight),by=ProvinceCode]
Indexes2<-CBNPoor[,.(RealPriceIndex1,RealPriceIndex2,ProvinceCode,Weight)]
Indexes2<-Indexes2[,RealPriceIndex:=(RealPriceIndex1+RealPriceIndex2)/2]
Indexes<-Indexes2[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(ProvinceCode)]
Indexes35<-Indexes[,.(ProvinceCode,RealPriceIndex1,RealPriceIndex2,RealPriceIndex)]
Indexes<-Indexes[,.(ProvinceCode,RealPriceIndex)]
CBN[,RealPriceIndex:=NULL]
CBN<-merge(CBN,Indexes,by=c("ProvinceCode"),all.x = TRUE)
CBN<-CBN[,Total_Food_Month_Per2:=FoodExpenditure_Per*RealPriceIndex]
CBN<-CBN[,Total_Exp_Month_Per2:=Total_Exp_Month_Per_nondurable*RealPriceIndex]
#utils::View(CBN)
#Sort Expenditure data
CBN<- CBN[order(Total_Exp_Month_Per2)]
#Calculate cumulative weights
sum(CBN$Weight)
CBN$cumweight <- cumsum(CBN$Weight)
tx <- max(CBN$cumweight)
#Calculate deciles by weights
CBN[,Decile:=cut(cumweight,breaks = seq(0,tx,tx/10),labels = 1:10)]
CBN[,Percentile:=cut(cumweight,breaks=seq(0,tx,tx/100),labels=1:100)]
#Update Poors
CBN[,Poor:=ifelse(Decile %in% 2:5,1,0)]
CBNPoor<-CBN[Poor==1]
CBNPoor<-merge(CBNPoor,dt2,by=c("ProvinceCode"),all.x = TRUE)
CBNPoor[,sum(HIndivNo),by=.(ProvinceCode)][order(ProvinceCode)]
###Iteration1-6
#Calculate Per_calories in clusters
CBNPoor[,Daily_Ghand_cluster:=weighted.mean(Ghand_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Hoboobat_cluster:=weighted.mean(Hoboobat_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Nan_cluster:=weighted.mean(Nan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Berenj_cluster:=weighted.mean(Berenj_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Roghan_cluster:=weighted.mean(Roghan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Goosht_cluster:=weighted.mean(Goosht_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Morgh_cluster:=weighted.mean(Morgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mahi_cluster:=weighted.mean(Mahi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Shir_cluster:=weighted.mean(Shir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mast_cluster:=weighted.mean(Mast_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Panir_cluster:=weighted.mean(Panir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Tokhmemorgh_cluster:=weighted.mean(Tokhmemorgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mive_cluster:=weighted.mean(Mive_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Sabzi_cluster:=weighted.mean(Sabzi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Makarooni_cluster:=weighted.mean(Makarooni_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Sibzamini_cluster:=weighted.mean(Sibzamini_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[, Daily_Calories_cluster2 := Reduce(`+`, .SD), .SDcols=c(186:201)][]
#utils::View(CBNPoor)
#Calculate Per_calories in clusters(=2100)
CBNPoor[,Daily2_Ghand:=(Daily_Ghand_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Ghand")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Hoboobat:=(Daily_Hoboobat_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Hoboobat")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Nan:=(Daily_Nan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Nan")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Berenj:=(Daily_Berenj_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Berenj")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Roghan:=(Daily_Roghan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Roghan")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Goosht:=(Daily_Goosht_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Goosht")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Morgh:=(Daily_Morgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Morgh")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mahi:=(Daily_Mahi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mahi")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Shir:=(Daily_Shir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Shir")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mast:=(Daily_Mast_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mast")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Panir:=(Daily_Panir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Panir")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Tokhmemorgh:=(Daily_Tokhmemorgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Tokhmemorgh")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mive:=(Daily_Mive_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mive")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Sabzi:=(Daily_Sabzi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sabzi")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Makarooni:=(Daily_Makarooni_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Makarooni")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Sibzamini:=(Daily_Sibzamini_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sibzamini")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[, Daily_Calories3 := Reduce(`+`, .SD), .SDcols=c(203:218)][]
CBNPoor[,FoodExpenditure_Per_cluster:=weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(FoodExpenditure_Per_cluster,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Calory_Price:=(FoodExpenditure_Per_cluster/(Daily_Calories_cluster2))]
CBNPoor[,weighted.mean(Calory_Price,Weight,na.rm = TRUE),by=cluster]
#Calculate per_Calory from resturants
CBNPoor[,Per_Calory_Resturant:=(0.7*Resturant_Exp/EqSizeCalory)/Calory_Price]
for (col in c("Per_Calory_Resturant")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor<-CBNPoor[Per_Daily_Exp_Calories!=0]
CBNPoor[,Per_Daily_Calories:=Per_Daily_Exp_Calories+Per_Calory_Resturant]
CBNPoor[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Per_Calory_Resturant,Weight,na.rm = TRUE),by=cluster]
#sum of total food expenditures
CBNPoor[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
CBNPoor[,weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(FoodExpenditure_Per_total,Weight,na.rm = TRUE),by=cluster]
CBN[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
#Food expenditures (equal 2100 CCAL)
CBNPoor[,Bundle_Value:=FoodExpenditure_Per_total*2100/Per_Daily_Calories]
CBNPoor[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=ProvinceCode]
#CBN[,weighted.mean(Poor,Weight),by=cluster][order(cluster)]
#Real Prices
T_Bundle_Value<-subset(CBNPoor, ProvinceCode==2301, select=c(Bundle_Value,Home_Per_Metr,Weight))
Tehran_Bundle_Value1<-weighted.mean(T_Bundle_Value$Bundle_Value,T_Bundle_Value$Weight,na.rm = TRUE)
Tehran_Bundle_Value2<-weighted.mean(T_Bundle_Value$Home_Per_Metr,T_Bundle_Value$Weight,na.rm = TRUE)
CBNPoor[,RealPriceIndex1:=weighted.mean(Bundle_Value,Weight,na.rm = TRUE)/Tehran_Bundle_Value1,by=ProvinceCode]
CBNPoor[,RealPriceIndex2:=weighted.mean(Home_Per_Metr,Weight,na.rm = TRUE)/Tehran_Bundle_Value2,by=ProvinceCode]
CBNPoor[,weighted.mean(RealPriceIndex1,Weight),by=ProvinceCode]
CBNPoor[,weighted.mean(RealPriceIndex2,Weight),by=ProvinceCode]
Indexes2<-CBNPoor[,.(RealPriceIndex1,RealPriceIndex2,ProvinceCode,Weight)]
Indexes2<-Indexes2[,RealPriceIndex:=(RealPriceIndex1+RealPriceIndex2)/2]
Indexes<-Indexes2[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(ProvinceCode)]
Indexes36<-Indexes[,.(ProvinceCode,RealPriceIndex1,RealPriceIndex2,RealPriceIndex)]
Indexes<-Indexes[,.(ProvinceCode,RealPriceIndex)]
CBN[,RealPriceIndex:=NULL]
CBN<-merge(CBN,Indexes,by=c("ProvinceCode"),all.x = TRUE)
CBN<-CBN[,Total_Food_Month_Per2:=FoodExpenditure_Per*RealPriceIndex]
CBN<-CBN[,Total_Exp_Month_Per2:=Total_Exp_Month_Per_nondurable*RealPriceIndex]
#utils::View(CBN)
#Sort Expenditure data
CBN<- CBN[order(Total_Exp_Month_Per2)]
#Calculate cumulative weights
sum(CBN$Weight)
CBN$cumweight <- cumsum(CBN$Weight)
tx <- max(CBN$cumweight)
#Calculate deciles by weights
CBN[,Decile:=cut(cumweight,breaks = seq(0,tx,tx/10),labels = 1:10)]
CBN[,Percentile:=cut(cumweight,breaks=seq(0,tx,tx/100),labels=1:100)]
#Update Poors
CBN[,Poor:=ifelse(Decile %in% 2:5,1,0)]
CBNPoor<-CBN[Poor==1]
CBNPoor<-merge(CBNPoor,dt2,by=c("ProvinceCode"),all.x = TRUE)
CBNPoor[,sum(HIndivNo),by=.(ProvinceCode)][order(ProvinceCode)]
###Iteration1-7
#Calculate Per_calories in clusters
CBNPoor[,Daily_Ghand_cluster:=weighted.mean(Ghand_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Hoboobat_cluster:=weighted.mean(Hoboobat_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Nan_cluster:=weighted.mean(Nan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Berenj_cluster:=weighted.mean(Berenj_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Roghan_cluster:=weighted.mean(Roghan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Goosht_cluster:=weighted.mean(Goosht_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Morgh_cluster:=weighted.mean(Morgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mahi_cluster:=weighted.mean(Mahi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Shir_cluster:=weighted.mean(Shir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mast_cluster:=weighted.mean(Mast_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Panir_cluster:=weighted.mean(Panir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Tokhmemorgh_cluster:=weighted.mean(Tokhmemorgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mive_cluster:=weighted.mean(Mive_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Sabzi_cluster:=weighted.mean(Sabzi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Makarooni_cluster:=weighted.mean(Makarooni_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Sibzamini_cluster:=weighted.mean(Sibzamini_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[, Daily_Calories_cluster2 := Reduce(`+`, .SD), .SDcols=c(186:201)][]
#utils::View(CBNPoor)
#Calculate Per_calories in clusters(=2100)
CBNPoor[,Daily2_Ghand:=(Daily_Ghand_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Ghand")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Hoboobat:=(Daily_Hoboobat_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Hoboobat")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Nan:=(Daily_Nan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Nan")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Berenj:=(Daily_Berenj_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Berenj")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Roghan:=(Daily_Roghan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Roghan")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Goosht:=(Daily_Goosht_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Goosht")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Morgh:=(Daily_Morgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Morgh")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mahi:=(Daily_Mahi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mahi")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Shir:=(Daily_Shir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Shir")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mast:=(Daily_Mast_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mast")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Panir:=(Daily_Panir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Panir")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Tokhmemorgh:=(Daily_Tokhmemorgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Tokhmemorgh")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mive:=(Daily_Mive_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mive")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Sabzi:=(Daily_Sabzi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sabzi")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Makarooni:=(Daily_Makarooni_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Makarooni")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Sibzamini:=(Daily_Sibzamini_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sibzamini")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[, Daily_Calories3 := Reduce(`+`, .SD), .SDcols=c(203:218)][]
CBNPoor[,FoodExpenditure_Per_cluster:=weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(FoodExpenditure_Per_cluster,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Calory_Price:=(FoodExpenditure_Per_cluster/(Daily_Calories_cluster2))]
CBNPoor[,weighted.mean(Calory_Price,Weight,na.rm = TRUE),by=cluster]
#Calculate per_Calory from resturants
CBNPoor[,Per_Calory_Resturant:=(0.7*Resturant_Exp/EqSizeCalory)/Calory_Price]
for (col in c("Per_Calory_Resturant")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor<-CBNPoor[Per_Daily_Exp_Calories!=0]
CBNPoor[,Per_Daily_Calories:=Per_Daily_Exp_Calories+Per_Calory_Resturant]
CBNPoor[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Per_Calory_Resturant,Weight,na.rm = TRUE),by=cluster]
#sum of total food expenditures
CBNPoor[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
CBNPoor[,weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(FoodExpenditure_Per_total,Weight,na.rm = TRUE),by=cluster]
CBN[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
#Food expenditures (equal 2100 CCAL)
CBNPoor[,Bundle_Value:=FoodExpenditure_Per_total*2100/Per_Daily_Calories]
CBNPoor[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=ProvinceCode]
#CBN[,weighted.mean(Poor,Weight),by=cluster][order(cluster)]
#Real Prices
T_Bundle_Value<-subset(CBNPoor, ProvinceCode==2301, select=c(Bundle_Value,Home_Per_Metr,Weight))
Tehran_Bundle_Value1<-weighted.mean(T_Bundle_Value$Bundle_Value,T_Bundle_Value$Weight,na.rm = TRUE)
Tehran_Bundle_Value2<-weighted.mean(T_Bundle_Value$Home_Per_Metr,T_Bundle_Value$Weight,na.rm = TRUE)
CBNPoor[,RealPriceIndex1:=weighted.mean(Bundle_Value,Weight,na.rm = TRUE)/Tehran_Bundle_Value1,by=ProvinceCode]
CBNPoor[,RealPriceIndex2:=weighted.mean(Home_Per_Metr,Weight,na.rm = TRUE)/Tehran_Bundle_Value2,by=ProvinceCode]
CBNPoor[,weighted.mean(RealPriceIndex1,Weight),by=ProvinceCode]
CBNPoor[,weighted.mean(RealPriceIndex2,Weight),by=ProvinceCode]
Indexes2<-CBNPoor[,.(RealPriceIndex1,RealPriceIndex2,ProvinceCode,Weight)]
Indexes2<-Indexes2[,RealPriceIndex:=(RealPriceIndex1+RealPriceIndex2)/2]
Indexes<-Indexes2[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(ProvinceCode)]
Indexes37<-Indexes[,.(ProvinceCode,RealPriceIndex1,RealPriceIndex2,RealPriceIndex)]
Indexes<-Indexes[,.(ProvinceCode,RealPriceIndex)]
CBN[,RealPriceIndex:=NULL]
CBN<-merge(CBN,Indexes,by=c("ProvinceCode"),all.x = TRUE)
CBN<-CBN[,Total_Food_Month_Per2:=FoodExpenditure_Per*RealPriceIndex]
CBN<-CBN[,Total_Exp_Month_Per2:=Total_Exp_Month_Per_nondurable*RealPriceIndex]
#utils::View(CBN)
#Sort Expenditure data
CBN<- CBN[order(Total_Exp_Month_Per2)]
#Calculate cumulative weights
sum(CBN$Weight)
CBN$cumweight <- cumsum(CBN$Weight)
tx <- max(CBN$cumweight)
#Calculate deciles by weights
CBN[,Decile:=cut(cumweight,breaks = seq(0,tx,tx/10),labels = 1:10)]
CBN[,Percentile:=cut(cumweight,breaks=seq(0,tx,tx/100),labels=1:100)]
#Update Poors
CBN[,Poor:=ifelse(Decile %in% 2:5,1,0)]
CBNPoor<-CBN[Poor==1]
CBNPoor<-merge(CBNPoor,dt2,by=c("ProvinceCode"),all.x = TRUE)
CBNPoor[,sum(HIndivNo),by=.(ProvinceCode)][order(ProvinceCode)]
###Iteration1-8
#Calculate Per_calories in clusters
CBNPoor[,Daily_Ghand_cluster:=weighted.mean(Ghand_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Hoboobat_cluster:=weighted.mean(Hoboobat_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Nan_cluster:=weighted.mean(Nan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Berenj_cluster:=weighted.mean(Berenj_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Roghan_cluster:=weighted.mean(Roghan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Goosht_cluster:=weighted.mean(Goosht_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Morgh_cluster:=weighted.mean(Morgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mahi_cluster:=weighted.mean(Mahi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Shir_cluster:=weighted.mean(Shir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mast_cluster:=weighted.mean(Mast_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Panir_cluster:=weighted.mean(Panir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Tokhmemorgh_cluster:=weighted.mean(Tokhmemorgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mive_cluster:=weighted.mean(Mive_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Sabzi_cluster:=weighted.mean(Sabzi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Makarooni_cluster:=weighted.mean(Makarooni_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Sibzamini_cluster:=weighted.mean(Sibzamini_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[, Daily_Calories_cluster2 := Reduce(`+`, .SD), .SDcols=c(186:201)][]
#utils::View(CBNPoor)
#Calculate Per_calories in clusters(=2100)
CBNPoor[,Daily2_Ghand:=(Daily_Ghand_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Ghand")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Hoboobat:=(Daily_Hoboobat_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Hoboobat")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Nan:=(Daily_Nan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Nan")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Berenj:=(Daily_Berenj_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Berenj")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Roghan:=(Daily_Roghan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Roghan")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Goosht:=(Daily_Goosht_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Goosht")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Morgh:=(Daily_Morgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Morgh")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mahi:=(Daily_Mahi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mahi")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Shir:=(Daily_Shir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Shir")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mast:=(Daily_Mast_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mast")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Panir:=(Daily_Panir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Panir")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Tokhmemorgh:=(Daily_Tokhmemorgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Tokhmemorgh")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mive:=(Daily_Mive_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mive")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Sabzi:=(Daily_Sabzi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sabzi")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Makarooni:=(Daily_Makarooni_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Makarooni")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Sibzamini:=(Daily_Sibzamini_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sibzamini")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[, Daily_Calories3 := Reduce(`+`, .SD), .SDcols=c(203:218)][]
CBNPoor[,FoodExpenditure_Per_cluster:=weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(FoodExpenditure_Per_cluster,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Calory_Price:=(FoodExpenditure_Per_cluster/(Daily_Calories_cluster2))]
CBNPoor[,weighted.mean(Calory_Price,Weight,na.rm = TRUE),by=cluster]
#Calculate per_Calory from resturants
CBNPoor[,Per_Calory_Resturant:=(0.7*Resturant_Exp/EqSizeCalory)/Calory_Price]
for (col in c("Per_Calory_Resturant")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor<-CBNPoor[Per_Daily_Exp_Calories!=0]
CBNPoor[,Per_Daily_Calories:=Per_Daily_Exp_Calories+Per_Calory_Resturant]
CBNPoor[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Per_Calory_Resturant,Weight,na.rm = TRUE),by=cluster]
#sum of total food expenditures
CBNPoor[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
CBNPoor[,weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(FoodExpenditure_Per_total,Weight,na.rm = TRUE),by=cluster]
CBN[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
#Food expenditures (equal 2100 CCAL)
CBNPoor[,Bundle_Value:=FoodExpenditure_Per_total*2100/Per_Daily_Calories]
CBNPoor[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=ProvinceCode]
#CBN[,weighted.mean(Poor,Weight),by=cluster][order(cluster)]
#Real Prices
T_Bundle_Value<-subset(CBNPoor, ProvinceCode==2301, select=c(Bundle_Value,Home_Per_Metr,Weight))
Tehran_Bundle_Value1<-weighted.mean(T_Bundle_Value$Bundle_Value,T_Bundle_Value$Weight,na.rm = TRUE)
Tehran_Bundle_Value2<-weighted.mean(T_Bundle_Value$Home_Per_Metr,T_Bundle_Value$Weight,na.rm = TRUE)
CBNPoor[,RealPriceIndex1:=weighted.mean(Bundle_Value,Weight,na.rm = TRUE)/Tehran_Bundle_Value1,by=ProvinceCode]
CBNPoor[,RealPriceIndex2:=weighted.mean(Home_Per_Metr,Weight,na.rm = TRUE)/Tehran_Bundle_Value2,by=ProvinceCode]
CBNPoor[,weighted.mean(RealPriceIndex1,Weight),by=ProvinceCode]
CBNPoor[,weighted.mean(RealPriceIndex2,Weight),by=ProvinceCode]
Indexes2<-CBNPoor[,.(RealPriceIndex1,RealPriceIndex2,ProvinceCode,Weight)]
Indexes2<-Indexes2[,RealPriceIndex:=(RealPriceIndex1+RealPriceIndex2)/2]
Indexes<-Indexes2[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(ProvinceCode)]
Indexes38<-Indexes[,.(ProvinceCode,RealPriceIndex1,RealPriceIndex2,RealPriceIndex)]
Indexes<-Indexes[,.(ProvinceCode,RealPriceIndex)]
CBN[,RealPriceIndex:=NULL]
CBN<-merge(CBN,Indexes,by=c("ProvinceCode"),all.x = TRUE)
CBN<-CBN[,Total_Food_Month_Per2:=FoodExpenditure_Per*RealPriceIndex]
CBN<-CBN[,Total_Exp_Month_Per2:=Total_Exp_Month_Per_nondurable*RealPriceIndex]
#utils::View(CBN)
#Sort Expenditure data
CBN<- CBN[order(Total_Exp_Month_Per2)]
#Calculate cumulative weights
sum(CBN$Weight)
CBN$cumweight <- cumsum(CBN$Weight)
tx <- max(CBN$cumweight)
#Calculate deciles by weights
CBN[,Decile:=cut(cumweight,breaks = seq(0,tx,tx/10),labels = 1:10)]
CBN[,Percentile:=cut(cumweight,breaks=seq(0,tx,tx/100),labels=1:100)]
#Update Poors
CBN[,Poor:=ifelse(Decile %in% 2:5,1,0)]
CBNPoor<-CBN[Poor==1]
CBNPoor<-merge(CBNPoor,dt2,by=c("ProvinceCode"),all.x = TRUE)
CBNPoor[,sum(HIndivNo),by=.(ProvinceCode)][order(ProvinceCode)]
###Iteration1-9
#Calculate Per_calories in clusters
CBNPoor[,Daily_Ghand_cluster:=weighted.mean(Ghand_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Hoboobat_cluster:=weighted.mean(Hoboobat_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Nan_cluster:=weighted.mean(Nan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Berenj_cluster:=weighted.mean(Berenj_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Roghan_cluster:=weighted.mean(Roghan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Goosht_cluster:=weighted.mean(Goosht_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Morgh_cluster:=weighted.mean(Morgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mahi_cluster:=weighted.mean(Mahi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Shir_cluster:=weighted.mean(Shir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mast_cluster:=weighted.mean(Mast_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Panir_cluster:=weighted.mean(Panir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Tokhmemorgh_cluster:=weighted.mean(Tokhmemorgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mive_cluster:=weighted.mean(Mive_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Sabzi_cluster:=weighted.mean(Sabzi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Makarooni_cluster:=weighted.mean(Makarooni_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Sibzamini_cluster:=weighted.mean(Sibzamini_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[, Daily_Calories_cluster2 := Reduce(`+`, .SD), .SDcols=c(186:201)][]
#utils::View(CBNPoor)
#Calculate Per_calories in clusters(=2100)
CBNPoor[,Daily2_Ghand:=(Daily_Ghand_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Ghand")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Hoboobat:=(Daily_Hoboobat_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Hoboobat")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Nan:=(Daily_Nan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Nan")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Berenj:=(Daily_Berenj_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Berenj")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Roghan:=(Daily_Roghan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Roghan")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Goosht:=(Daily_Goosht_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Goosht")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Morgh:=(Daily_Morgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Morgh")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mahi:=(Daily_Mahi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mahi")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Shir:=(Daily_Shir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Shir")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mast:=(Daily_Mast_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mast")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Panir:=(Daily_Panir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Panir")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Tokhmemorgh:=(Daily_Tokhmemorgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Tokhmemorgh")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mive:=(Daily_Mive_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mive")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Sabzi:=(Daily_Sabzi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sabzi")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Makarooni:=(Daily_Makarooni_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Makarooni")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Sibzamini:=(Daily_Sibzamini_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sibzamini")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[, Daily_Calories3 := Reduce(`+`, .SD), .SDcols=c(203:218)][]
CBNPoor[,FoodExpenditure_Per_cluster:=weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(FoodExpenditure_Per_cluster,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Calory_Price:=(FoodExpenditure_Per_cluster/(Daily_Calories_cluster2))]
CBNPoor[,weighted.mean(Calory_Price,Weight,na.rm = TRUE),by=cluster]
#Calculate per_Calory from resturants
CBNPoor[,Per_Calory_Resturant:=(0.7*Resturant_Exp/EqSizeCalory)/Calory_Price]
for (col in c("Per_Calory_Resturant")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor<-CBNPoor[Per_Daily_Exp_Calories!=0]
CBNPoor[,Per_Daily_Calories:=Per_Daily_Exp_Calories+Per_Calory_Resturant]
CBNPoor[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Per_Calory_Resturant,Weight,na.rm = TRUE),by=cluster]
#sum of total food expenditures
CBNPoor[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
CBNPoor[,weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(FoodExpenditure_Per_total,Weight,na.rm = TRUE),by=cluster]
CBN[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
#Food expenditures (equal 2100 CCAL)
CBNPoor[,Bundle_Value:=FoodExpenditure_Per_total*2100/Per_Daily_Calories]
CBNPoor[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=ProvinceCode]
#CBN[,weighted.mean(Poor,Weight),by=cluster][order(cluster)]
#Real Prices
T_Bundle_Value<-subset(CBNPoor, ProvinceCode==2301, select=c(Bundle_Value,Home_Per_Metr,Weight))
Tehran_Bundle_Value1<-weighted.mean(T_Bundle_Value$Bundle_Value,T_Bundle_Value$Weight,na.rm = TRUE)
Tehran_Bundle_Value2<-weighted.mean(T_Bundle_Value$Home_Per_Metr,T_Bundle_Value$Weight,na.rm = TRUE)
CBNPoor[,RealPriceIndex1:=weighted.mean(Bundle_Value,Weight,na.rm = TRUE)/Tehran_Bundle_Value1,by=ProvinceCode]
CBNPoor[,RealPriceIndex2:=weighted.mean(Home_Per_Metr,Weight,na.rm = TRUE)/Tehran_Bundle_Value2,by=ProvinceCode]
CBNPoor[,weighted.mean(RealPriceIndex1,Weight),by=ProvinceCode]
CBNPoor[,weighted.mean(RealPriceIndex2,Weight),by=ProvinceCode]
Indexes2<-CBNPoor[,.(RealPriceIndex1,RealPriceIndex2,ProvinceCode,Weight)]
Indexes2<-Indexes2[,RealPriceIndex:=(RealPriceIndex1+RealPriceIndex2)/2]
Indexes<-Indexes2[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(ProvinceCode)]
Indexes39<-Indexes[,.(ProvinceCode,RealPriceIndex1,RealPriceIndex2,RealPriceIndex)]
Indexes<-Indexes[,.(ProvinceCode,RealPriceIndex)]
CBN[,RealPriceIndex:=NULL]
CBN<-merge(CBN,Indexes,by=c("ProvinceCode"),all.x = TRUE)
CBN<-CBN[,Total_Food_Month_Per2:=FoodExpenditure_Per*RealPriceIndex]
CBN<-CBN[,Total_Exp_Month_Per2:=Total_Exp_Month_Per_nondurable*RealPriceIndex]
#utils::View(CBN)
#Sort Expenditure data
CBN<- CBN[order(Total_Exp_Month_Per2)]
#Calculate cumulative weights
sum(CBN$Weight)
CBN$cumweight <- cumsum(CBN$Weight)
tx <- max(CBN$cumweight)
#Calculate deciles by weights
CBN[,Decile:=cut(cumweight,breaks = seq(0,tx,tx/10),labels = 1:10)]
CBN[,Percentile:=cut(cumweight,breaks=seq(0,tx,tx/100),labels=1:100)]
#Update Poors
CBN[,Poor:=ifelse(Decile %in% 2:5,1,0)]
CBNPoor<-CBN[Poor==1]
CBNPoor<-merge(CBNPoor,dt2,by=c("ProvinceCode"),all.x = TRUE)
CBNPoor[,sum(HIndivNo),by=.(ProvinceCode)][order(ProvinceCode)]
###Iteration1-10
#Calculate Per_calories in clusters
CBNPoor[,Daily_Ghand_cluster:=weighted.mean(Ghand_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Hoboobat_cluster:=weighted.mean(Hoboobat_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Nan_cluster:=weighted.mean(Nan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Berenj_cluster:=weighted.mean(Berenj_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Roghan_cluster:=weighted.mean(Roghan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Goosht_cluster:=weighted.mean(Goosht_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Morgh_cluster:=weighted.mean(Morgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mahi_cluster:=weighted.mean(Mahi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Shir_cluster:=weighted.mean(Shir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mast_cluster:=weighted.mean(Mast_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Panir_cluster:=weighted.mean(Panir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Tokhmemorgh_cluster:=weighted.mean(Tokhmemorgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mive_cluster:=weighted.mean(Mive_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Sabzi_cluster:=weighted.mean(Sabzi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Makarooni_cluster:=weighted.mean(Makarooni_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Sibzamini_cluster:=weighted.mean(Sibzamini_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[, Daily_Calories_cluster2 := Reduce(`+`, .SD), .SDcols=c(186:201)][]
#utils::View(CBNPoor)
#Calculate Per_calories in clusters(=2100)
CBNPoor[,Daily2_Ghand:=(Daily_Ghand_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Ghand")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Hoboobat:=(Daily_Hoboobat_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Hoboobat")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Nan:=(Daily_Nan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Nan")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Berenj:=(Daily_Berenj_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Berenj")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Roghan:=(Daily_Roghan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Roghan")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Goosht:=(Daily_Goosht_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Goosht")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Morgh:=(Daily_Morgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Morgh")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mahi:=(Daily_Mahi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mahi")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Shir:=(Daily_Shir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Shir")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mast:=(Daily_Mast_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mast")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Panir:=(Daily_Panir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Panir")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Tokhmemorgh:=(Daily_Tokhmemorgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Tokhmemorgh")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mive:=(Daily_Mive_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mive")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Sabzi:=(Daily_Sabzi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sabzi")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Makarooni:=(Daily_Makarooni_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Makarooni")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Sibzamini:=(Daily_Sibzamini_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sibzamini")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[, Daily_Calories3 := Reduce(`+`, .SD), .SDcols=c(203:218)][]
CBNPoor[,FoodExpenditure_Per_cluster:=weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(FoodExpenditure_Per_cluster,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Calory_Price:=(FoodExpenditure_Per_cluster/(Daily_Calories_cluster2))]
CBNPoor[,weighted.mean(Calory_Price,Weight,na.rm = TRUE),by=cluster]
#Calculate per_Calory from resturants
CBNPoor[,Per_Calory_Resturant:=(0.7*Resturant_Exp/EqSizeCalory)/Calory_Price]
for (col in c("Per_Calory_Resturant")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor<-CBNPoor[Per_Daily_Exp_Calories!=0]
CBNPoor[,Per_Daily_Calories:=Per_Daily_Exp_Calories+Per_Calory_Resturant]
CBNPoor[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Per_Calory_Resturant,Weight,na.rm = TRUE),by=cluster]
#sum of total food expenditures
CBNPoor[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
CBNPoor[,weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(FoodExpenditure_Per_total,Weight,na.rm = TRUE),by=cluster]
CBN[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
#Food expenditures (equal 2100 CCAL)
CBNPoor[,Bundle_Value:=FoodExpenditure_Per_total*2100/Per_Daily_Calories]
CBNPoor[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=ProvinceCode]
#CBN[,weighted.mean(Poor,Weight),by=cluster][order(cluster)]
#Real Prices
T_Bundle_Value<-subset(CBNPoor, ProvinceCode==2301, select=c(Bundle_Value,Home_Per_Metr,Weight))
Tehran_Bundle_Value1<-weighted.mean(T_Bundle_Value$Bundle_Value,T_Bundle_Value$Weight,na.rm = TRUE)
Tehran_Bundle_Value2<-weighted.mean(T_Bundle_Value$Home_Per_Metr,T_Bundle_Value$Weight,na.rm = TRUE)
CBNPoor[,RealPriceIndex1:=weighted.mean(Bundle_Value,Weight,na.rm = TRUE)/Tehran_Bundle_Value1,by=ProvinceCode]
CBNPoor[,RealPriceIndex2:=weighted.mean(Home_Per_Metr,Weight,na.rm = TRUE)/Tehran_Bundle_Value2,by=ProvinceCode]
CBNPoor[,weighted.mean(RealPriceIndex1,Weight),by=ProvinceCode]
CBNPoor[,weighted.mean(RealPriceIndex2,Weight),by=ProvinceCode]
Indexes2<-CBNPoor[,.(RealPriceIndex1,RealPriceIndex2,ProvinceCode,Weight)]
Indexes2<-Indexes2[,RealPriceIndex:=(RealPriceIndex1+RealPriceIndex2)/2]
Indexes<-Indexes2[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(ProvinceCode)]
Indexes310<-Indexes[,.(ProvinceCode,RealPriceIndex1,RealPriceIndex2,RealPriceIndex)]
Indexes<-Indexes[,.(ProvinceCode,RealPriceIndex)]
CBN[,RealPriceIndex:=NULL]
CBN<-merge(CBN,Indexes,by=c("ProvinceCode"),all.x = TRUE)
CBN<-CBN[,Total_Food_Month_Per2:=FoodExpenditure_Per*RealPriceIndex]
CBN<-CBN[,Total_Exp_Month_Per2:=Total_Exp_Month_Per_nondurable*RealPriceIndex]
#utils::View(CBN)
#Sort Expenditure data
CBN<- CBN[order(Total_Exp_Month_Per2)]
#Calculate cumulative weights
sum(CBN$Weight)
CBN$cumweight <- cumsum(CBN$Weight)
tx <- max(CBN$cumweight)
#Calculate deciles by weights
CBN[,Decile:=cut(cumweight,breaks = seq(0,tx,tx/10),labels = 1:10)]
CBN[,Percentile:=cut(cumweight,breaks=seq(0,tx,tx/100),labels=1:100)]
#Update Poors
CBN[,Poor:=ifelse(Decile %in% 2:5,1,0)]
CBNPoor<-CBN[Poor==1]
CBNPoor<-merge(CBNPoor,dt2,by=c("ProvinceCode"),all.x = TRUE)
CBNPoor[,sum(HIndivNo),by=.(cluster)][order(cluster)]
#Calculate Per_calories in clusters
CBNPoor[,Daily_Ghand_cluster:=weighted.mean(Ghand_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Hoboobat_cluster:=weighted.mean(Hoboobat_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Nan_cluster:=weighted.mean(Nan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Berenj_cluster:=weighted.mean(Berenj_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Roghan_cluster:=weighted.mean(Roghan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Goosht_cluster:=weighted.mean(Goosht_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Morgh_cluster:=weighted.mean(Morgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mahi_cluster:=weighted.mean(Mahi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Shir_cluster:=weighted.mean(Shir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mast_cluster:=weighted.mean(Mast_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Panir_cluster:=weighted.mean(Panir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Tokhmemorgh_cluster:=weighted.mean(Tokhmemorgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Mive_cluster:=weighted.mean(Mive_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Sabzi_cluster:=weighted.mean(Sabzi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Makarooni_cluster:=weighted.mean(Makarooni_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Daily_Sibzamini_cluster:=weighted.mean(Sibzamini_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor[, Daily_Calories_cluster2 := Reduce(`+`, .SD), .SDcols=c(189:204)][]
#utils::View(CBNPoor)
#Calculate Per_calories in clusters(=2100)
CBNPoor[,Daily2_Ghand:=(Daily_Ghand_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Ghand")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Hoboobat:=(Daily_Hoboobat_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Hoboobat")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Nan:=(Daily_Nan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Nan")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Berenj:=(Daily_Berenj_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Berenj")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Roghan:=(Daily_Roghan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Roghan")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Goosht:=(Daily_Goosht_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Goosht")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Morgh:=(Daily_Morgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Morgh")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mahi:=(Daily_Mahi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mahi")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Shir:=(Daily_Shir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Shir")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mast:=(Daily_Mast_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mast")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Panir:=(Daily_Panir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Panir")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Tokhmemorgh:=(Daily_Tokhmemorgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Tokhmemorgh")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Mive:=(Daily_Mive_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mive")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Sabzi:=(Daily_Sabzi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sabzi")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Makarooni:=(Daily_Makarooni_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Makarooni")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[,Daily2_Sibzamini:=(Daily_Sibzamini_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sibzamini")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor[, Daily_Calories3 := Reduce(`+`, .SD), .SDcols=c(206:221)][]
CBNPoor[,FoodExpenditure_Per_cluster:=weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(FoodExpenditure_Per_cluster,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,Calory_Price:=(FoodExpenditure_Per_cluster/(Daily_Calories_cluster2))]
CBNPoor[,weighted.mean(Calory_Price,Weight,na.rm = TRUE),by=cluster]
#Calculate per_Calory from resturants
CBNPoor[,Per_Calory_Resturant:=(0.7*Resturant_Exp/EqSizeCalory)/Calory_Price]
for (col in c("Per_Calory_Resturant")) CBNPoor[is.na(get(col)), (col) := 0]
CBNPoor<-CBNPoor[Per_Daily_Exp_Calories!=0]
CBNPoor[,Per_Daily_Calories:=Per_Daily_Exp_Calories+Per_Calory_Resturant]
CBNCalory<-CBNPoor[,.(Per_Daily_Calories,Per_Daily_Exp_Calories,Per_Calory_Resturant,Resturant_Exp,cluster,ProvinceCode)]
CBNPoor[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Per_Calory_Resturant,Weight,na.rm = TRUE),by=cluster]
#sum of total food expenditures
CBNPoor[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
CBNPoor[,weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(FoodExpenditure_Per_total,Weight,na.rm = TRUE),by=cluster]
CBN[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
#Food expenditures (equal 2100 CCAL)
CBNPoor[,Bundle_Value:=FoodExpenditure_Per_total*2100/Per_Daily_Calories]
CBNPoor[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=ProvinceCode]
#Calculations
CBNPoor[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(FoodExpenditure_Per_day,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor[,sum(Size*Weight),by=cluster][order(cluster)]
#Food Poverty Line for each cluster
#cluster 1
CBNPoorCluster<-CBNPoor[cluster==1]
Food_Povertyline1_1<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 2
CBNPoorCluster<-CBNPoor[cluster==2]
Food_Povertyline2_1<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 3
CBNPoorCluster<-CBNPoor[cluster==3]
Food_Povertyline3_1<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 4
CBNPoorCluster<-CBNPoor[cluster==4]
Food_Povertyline4_1<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#ee<-CBNPoor[,.(Total_Exp_Month_Real,Total_Exp_Month,Total_Food_Month_Per2,Total_Exp_Month_Per_nondurable,Total_Exp_Month_nondurable_Real_Per,FoodExpenditure_Per,cluster)]
#mean(ee[,Total_Food_Month_Per2==Total_Exp_Month_nondurable_Real])
#mean(ee[,Total_Food_Month_Per2<3500000])
#ee<- ee[order(Total_Food_Month_Per2)]
#utils::View(CBN)
CBN<-merge(CBN,dt2,by=c("ProvinceCode"),all.x = TRUE)
#for (col in c("Total_Food_Month_Per2")) CBN[is.na(get(col)), (col) := Total_Exp_Month_Per_nondurable]
c<-CBN[,.(FoodExpenditure_Per,FoodExpenditure_Per_total,Total_Exp_Month_Per_nondurable,Total_Exp_Month,Total_Food_Month_Per2,Poor,Decile,Weight,cluster)]
#########Iteration 2###############
#Sort Expenditure data
CBN<- CBN[order(Total_Food_Month_Per2)]
c<- c[order(Total_Food_Month_Per2)]
#Indicate new poors
CBN[,Poor2:=ifelse(FoodExpenditure_Per_total < Food_Povertyline1_1 & cluster==1,1,0)]
c[,Poor2:=ifelse(FoodExpenditure_Per_total < Food_Povertyline1_1 & cluster==1 ,1,0)]
CBN[,Poor2:=ifelse(FoodExpenditure_Per_total < Food_Povertyline2_1 & cluster==2,1,Poor2)]
c[,Poor2:=ifelse(FoodExpenditure_Per_total < Food_Povertyline2_1 & cluster==2 ,1,Poor2)]
CBN[,Poor2:=ifelse(FoodExpenditure_Per_total < Food_Povertyline3_1 & cluster==3,1,Poor2)]
c[,Poor2:=ifelse(FoodExpenditure_Per_total < Food_Povertyline3_1 & cluster==3 ,1,Poor2)]
CBN[,Poor2:=ifelse(FoodExpenditure_Per_total < Food_Povertyline4_1 & cluster==4,1,Poor2)]
c[,Poor2:=ifelse(FoodExpenditure_Per_total < Food_Povertyline4_1 & cluster==4 ,1,Poor2)]
CBN[,weighted.mean(Poor2,Weight),by=cluster][order(cluster)]
CBNPoor[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor2<-CBN[Poor2==1]
#CalculatePer_calories in clusters
CBNPoor2[,Daily_Ghand_cluster:=weighted.mean(Ghand_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor2[,Daily_Hoboobat_cluster:=weighted.mean(Hoboobat_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor2[,Daily_Nan_cluster:=weighted.mean(Nan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor2[,Daily_Berenj_cluster:=weighted.mean(Berenj_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor2[,Daily_Roghan_cluster:=weighted.mean(Roghan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor2[,Daily_Goosht_cluster:=weighted.mean(Goosht_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor2[,Daily_Morgh_cluster:=weighted.mean(Morgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor2[,Daily_Mahi_cluster:=weighted.mean(Mahi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor2[,Daily_Shir_cluster:=weighted.mean(Shir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor2[,Daily_Mast_cluster:=weighted.mean(Mast_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor2[,Daily_Panir_cluster:=weighted.mean(Panir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor2[,Daily_Tokhmemorgh_cluster:=weighted.mean(Tokhmemorgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor2[,Daily_Mive_cluster:=weighted.mean(Mive_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor2[,Daily_Sabzi_cluster:=weighted.mean(Sabzi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor2[,Daily_Makarooni_cluster:=weighted.mean(Makarooni_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor2[,Daily_Sibzamini_cluster:=weighted.mean(Sibzamini_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor2[, Daily_Calories_cluster2 := Reduce(`+`, .SD), .SDcols=c(190:205)][]
#utils::View(CBNPoor2)
#Calculate Per_calories in clusters(=2100)
CBNPoor2[,Daily2_Ghand:=(Daily_Ghand_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Ghand")) CBNPoor2[is.na(get(col)), (col) := 0]
CBNPoor2[,Daily2_Hoboobat:=(Daily_Hoboobat_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Hoboobat")) CBNPoor2[is.na(get(col)), (col) := 0]
CBNPoor2[,Daily2_Nan:=(Daily_Nan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Nan")) CBNPoor2[is.na(get(col)), (col) := 0]
CBNPoor2[,Daily2_Berenj:=(Daily_Berenj_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Berenj")) CBNPoor2[is.na(get(col)), (col) := 0]
CBNPoor2[,Daily2_Roghan:=(Daily_Roghan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Roghan")) CBNPoor2[is.na(get(col)), (col) := 0]
CBNPoor2[,Daily2_Goosht:=(Daily_Goosht_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Goosht")) CBNPoor2[is.na(get(col)), (col) := 0]
CBNPoor2[,Daily2_Morgh:=(Daily_Morgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Morgh")) CBNPoor2[is.na(get(col)), (col) := 0]
CBNPoor2[,Daily2_Mahi:=(Daily_Mahi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mahi")) CBNPoor2[is.na(get(col)), (col) := 0]
CBNPoor2[,Daily2_Shir:=(Daily_Shir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Shir")) CBNPoor2[is.na(get(col)), (col) := 0]
CBNPoor2[,Daily2_Mast:=(Daily_Mast_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mast")) CBNPoor2[is.na(get(col)), (col) := 0]
CBNPoor2[,Daily2_Panir:=(Daily_Panir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Panir")) CBNPoor2[is.na(get(col)), (col) := 0]
CBNPoor2[,Daily2_Tokhmemorgh:=(Daily_Tokhmemorgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Tokhmemorgh")) CBNPoor2[is.na(get(col)), (col) := 0]
CBNPoor2[,Daily2_Mive:=(Daily_Mive_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mive")) CBNPoor2[is.na(get(col)), (col) := 0]
CBNPoor2[,Daily2_Sabzi:=(Daily_Sabzi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sabzi")) CBNPoor2[is.na(get(col)), (col) := 0]
CBNPoor2[,Daily2_Makarooni:=(Daily_Makarooni_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Makarooni")) CBNPoor2[is.na(get(col)), (col) := 0]
CBNPoor2[,Daily2_Sibzamini:=(Daily_Sibzamini_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sibzamini")) CBNPoor2[is.na(get(col)), (col) := 0]
CBNPoor2[, Daily_Calories3 := Reduce(`+`, .SD), .SDcols=c(207:222)][]
CBNPoor2[,FoodExpenditure_Per_cluster:=weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor2[,weighted.mean(FoodExpenditure_Per_cluster,Weight,na.rm = TRUE),by=cluster]
CBNPoor2[,Calory_Price:=(FoodExpenditure_Per_cluster/(Daily_Calories_cluster2))]
CBNPoor2[,weighted.mean(Calory_Price,Weight,na.rm = TRUE),by=cluster]
#Calculate per_Calory from resturants
CBNPoor2[,Per_Calory_Resturant:=(0.7*Resturant_Exp/EqSizeCalory)/Calory_Price]
for (col in c("Per_Calory_Resturant")) CBNPoor2[is.na(get(col)), (col) := 0]
CBNPoor2<-CBNPoor2[Per_Daily_Exp_Calories!=0]
CBNPoor2[,Per_Daily_Calories:=Per_Daily_Exp_Calories+Per_Calory_Resturant]
CBNPoor2[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor2[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor2[,weighted.mean(Per_Calory_Resturant,Weight,na.rm = TRUE),by=cluster]
#sum of total food expenditures
CBNPoor2[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
CBNPoor2[,weighted.mean(FoodExpenditure_Per_total,Weight,na.rm = TRUE),by=cluster]
#Food expenditures (equal 2100 CCAL)
CBNPoor2[,Bundle_Value:=FoodExpenditure_Per_total*2100/Per_Daily_Calories]
CBNPoor2[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=cluster]
CBNPoor2[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=ProvinceCode]
#Calculations
CBNPoor2[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor2[,weighted.mean(FoodExpenditure_Per_day,Weight,na.rm = TRUE),by=cluster]
CBNPoor2[,weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor2[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
#Food Poverty Line for each cluster
#cluster 1
CBNPoorCluster<-CBNPoor2[cluster==1]
Food_Povertyline1_2<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 2
CBNPoorCluster<-CBNPoor2[cluster==2]
Food_Povertyline2_2<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 3
CBNPoorCluster<-CBNPoor2[cluster==3]
Food_Povertyline3_2<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 4
CBNPoorCluster<-CBNPoor2[cluster==4]
Food_Povertyline4_2<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#ee<-CBNPoor[,.(Total_Exp_Month_Real,Total_Exp_Month,Total_Food_Month_Per2,Total_Exp_Month_Per_nondurable,Total_Exp_Month_nondurable_Real_Per,FoodExpenditure_Per,cluster)]
#mean(ee[,Total_Food_Month_Per2==Total_Exp_Month_nondurable_Real])
#mean(ee[,Total_Food_Month_Per2<3500000])
#ee<- ee[order(Total_Food_Month_Per2)]
#utils::View(CBN)
#for (col in c("Total_Food_Month_Per2")) CBN[is.na(get(col)), (col) := Total_Exp_Month_Per_nondurable]
c<-CBN[,.(FoodExpenditure_Per,FoodExpenditure_Per_total,Total_Exp_Month_Per_nondurable,Total_Exp_Month,Total_Food_Month_Per2,Poor,Decile,Weight,cluster)]
#########Iteration 3###############
#Sort Expenditure data
CBN<- CBN[order(Total_Food_Month_Per2)]
c<- c[order(Total_Food_Month_Per2)]
#Indicate new poors
CBN[,Poor3:=ifelse(FoodExpenditure_Per_total < Food_Povertyline1_2 & cluster==1,1,0)]
c[,Poor3:=ifelse(FoodExpenditure_Per_total < Food_Povertyline1_2 & cluster==1 ,1,0)]
CBN[,Poor3:=ifelse(FoodExpenditure_Per_total < Food_Povertyline2_2 & cluster==2,1,Poor3)]
c[,Poor3:=ifelse(FoodExpenditure_Per_total < Food_Povertyline2_2 & cluster==2 ,1,Poor3)]
CBN[,Poor3:=ifelse(FoodExpenditure_Per_total < Food_Povertyline3_2 & cluster==3,1,Poor3)]
c[,Poor3:=ifelse(FoodExpenditure_Per_total < Food_Povertyline3_2 & cluster==3 ,1,Poor3)]
CBN[,Poor3:=ifelse(FoodExpenditure_Per_total < Food_Povertyline4_2 & cluster==4,1,Poor3)]
c[,Poor3:=ifelse(FoodExpenditure_Per_total < Food_Povertyline4_2 & cluster==4 ,1,Poor3)]
CBN[,weighted.mean(Poor3,Weight),by=cluster][order(cluster)]
CBNPoor2[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor3<-CBN[Poor3==1]
#CalculatePer_calories in clusters
CBNPoor3[,Daily_Ghand_cluster:=weighted.mean(Ghand_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor3[,Daily_Hoboobat_cluster:=weighted.mean(Hoboobat_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor3[,Daily_Nan_cluster:=weighted.mean(Nan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor3[,Daily_Berenj_cluster:=weighted.mean(Berenj_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor3[,Daily_Roghan_cluster:=weighted.mean(Roghan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor3[,Daily_Goosht_cluster:=weighted.mean(Goosht_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor3[,Daily_Morgh_cluster:=weighted.mean(Morgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor3[,Daily_Mahi_cluster:=weighted.mean(Mahi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor3[,Daily_Shir_cluster:=weighted.mean(Shir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor3[,Daily_Mast_cluster:=weighted.mean(Mast_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor3[,Daily_Panir_cluster:=weighted.mean(Panir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor3[,Daily_Tokhmemorgh_cluster:=weighted.mean(Tokhmemorgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor3[,Daily_Mive_cluster:=weighted.mean(Mive_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor3[,Daily_Sabzi_cluster:=weighted.mean(Sabzi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor3[,Daily_Makarooni_cluster:=weighted.mean(Makarooni_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor3[,Daily_Sibzamini_cluster:=weighted.mean(Sibzamini_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor3[, Daily_Calories_cluster2 := Reduce(`+`, .SD), .SDcols=c(191:206)][]
#utils::View(CBNPoor3)
#Calculate Per_calories in clusters(=2100)
CBNPoor3[,Daily2_Ghand:=(Daily_Ghand_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Ghand")) CBNPoor3[is.na(get(col)), (col) := 0]
CBNPoor3[,Daily2_Hoboobat:=(Daily_Hoboobat_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Hoboobat")) CBNPoor3[is.na(get(col)), (col) := 0]
CBNPoor3[,Daily2_Nan:=(Daily_Nan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Nan")) CBNPoor3[is.na(get(col)), (col) := 0]
CBNPoor3[,Daily2_Berenj:=(Daily_Berenj_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Berenj")) CBNPoor3[is.na(get(col)), (col) := 0]
CBNPoor3[,Daily2_Roghan:=(Daily_Roghan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Roghan")) CBNPoor3[is.na(get(col)), (col) := 0]
CBNPoor3[,Daily2_Goosht:=(Daily_Goosht_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Goosht")) CBNPoor3[is.na(get(col)), (col) := 0]
CBNPoor3[,Daily2_Morgh:=(Daily_Morgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Morgh")) CBNPoor3[is.na(get(col)), (col) := 0]
CBNPoor3[,Daily2_Mahi:=(Daily_Mahi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mahi")) CBNPoor3[is.na(get(col)), (col) := 0]
CBNPoor3[,Daily2_Shir:=(Daily_Shir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Shir")) CBNPoor3[is.na(get(col)), (col) := 0]
CBNPoor3[,Daily2_Mast:=(Daily_Mast_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mast")) CBNPoor3[is.na(get(col)), (col) := 0]
CBNPoor3[,Daily2_Panir:=(Daily_Panir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Panir")) CBNPoor3[is.na(get(col)), (col) := 0]
CBNPoor3[,Daily2_Tokhmemorgh:=(Daily_Tokhmemorgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Tokhmemorgh")) CBNPoor3[is.na(get(col)), (col) := 0]
CBNPoor3[,Daily2_Mive:=(Daily_Mive_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mive")) CBNPoor3[is.na(get(col)), (col) := 0]
CBNPoor3[,Daily2_Sabzi:=(Daily_Sabzi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sabzi")) CBNPoor3[is.na(get(col)), (col) := 0]
CBNPoor3[,Daily2_Makarooni:=(Daily_Makarooni_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Makarooni")) CBNPoor3[is.na(get(col)), (col) := 0]
CBNPoor3[,Daily2_Sibzamini:=(Daily_Sibzamini_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sibzamini")) CBNPoor3[is.na(get(col)), (col) := 0]
CBNPoor3[, Daily_Calories3 := Reduce(`+`, .SD), .SDcols=c(208:223)][]
CBNPoor3[,FoodExpenditure_Per_cluster:=weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor3[,weighted.mean(FoodExpenditure_Per_cluster,Weight,na.rm = TRUE),by=cluster]
CBNPoor3[,Calory_Price:=(FoodExpenditure_Per_cluster/(Daily_Calories_cluster2))]
CBNPoor3[,weighted.mean(Calory_Price,Weight,na.rm = TRUE),by=cluster]
#Calculate per_Calory from resturants
CBNPoor3[,Per_Calory_Resturant:=(0.7*Resturant_Exp/EqSizeCalory)/Calory_Price]
for (col in c("Per_Calory_Resturant")) CBNPoor3[is.na(get(col)), (col) := 0]
CBNPoor3<-CBNPoor3[Per_Daily_Exp_Calories!=0]
CBNPoor3[,Per_Daily_Calories:=Per_Daily_Exp_Calories+Per_Calory_Resturant]
CBNPoor3[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor3[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor3[,weighted.mean(Per_Calory_Resturant,Weight,na.rm = TRUE),by=cluster]
#sum of total food expenditures
CBNPoor3[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
CBNPoor3[,weighted.mean(FoodExpenditure_Per_total,Weight,na.rm = TRUE),by=cluster]
#Food expenditures (equal 2100 CCAL)
CBNPoor3[,Bundle_Value:=FoodExpenditure_Per_total*2100/Per_Daily_Calories]
CBNPoor3[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=cluster]
CBNPoor3[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=ProvinceCode]
#Calculations
CBNPoor3[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor3[,weighted.mean(FoodExpenditure_Per_day,Weight,na.rm = TRUE),by=cluster]
CBNPoor3[,weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor3[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
#Food Poverty Line for each cluster
#cluster 1
CBNPoorCluster<-CBNPoor3[cluster==1]
Food_Povertyline1_3<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 2
CBNPoorCluster<-CBNPoor3[cluster==2]
Food_Povertyline2_3<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 3
CBNPoorCluster<-CBNPoor3[cluster==3]
Food_Povertyline3_3<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 4
CBNPoorCluster<-CBNPoor3[cluster==4]
Food_Povertyline4_3<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#ee<-CBNPoor[,.(Total_Exp_Month_Real,Total_Exp_Month,Total_Food_Month_Per2,Total_Exp_Month_Per_nondurable,Total_Exp_Month_nondurable_Real_Per,FoodExpenditure_Per,cluster)]
#mean(ee[,Total_Food_Month_Per2==Total_Exp_Month_nondurable_Real])
#mean(ee[,Total_Food_Month_Per2<3500000])
#ee<- ee[order(Total_Food_Month_Per2)]
#utils::View(CBN)
#for (col in c("Total_Food_Month_Per2")) CBN[is.na(get(col)), (col) := Total_Exp_Month_Per_nondurable]
c<-CBN[,.(FoodExpenditure_Per,FoodExpenditure_Per_total,Total_Exp_Month_Per_nondurable,Total_Exp_Month,Total_Food_Month_Per2,Poor,Decile,Weight,cluster)]
#########Iteration 4###############
#Sort Expenditure data
CBN<- CBN[order(Total_Food_Month_Per2)]
c<- c[order(Total_Food_Month_Per2)]
#Indicate new poors
CBN[,Poor4:=ifelse(FoodExpenditure_Per_total < Food_Povertyline1_3 & cluster==1,1,0)]
c[,Poor4:=ifelse(FoodExpenditure_Per_total < Food_Povertyline1_3 & cluster==1 ,1,0)]
CBN[,Poor4:=ifelse(FoodExpenditure_Per_total < Food_Povertyline2_3 & cluster==2,1,Poor4)]
c[,Poor4:=ifelse(FoodExpenditure_Per_total < Food_Povertyline2_3 & cluster==2 ,1,Poor4)]
CBN[,Poor4:=ifelse(FoodExpenditure_Per_total < Food_Povertyline3_3 & cluster==3,1,Poor4)]
c[,Poor4:=ifelse(FoodExpenditure_Per_total < Food_Povertyline3_3 & cluster==3 ,1,Poor4)]
CBN[,Poor4:=ifelse(FoodExpenditure_Per_total < Food_Povertyline4_3 & cluster==4,1,Poor4)]
c[,Poor4:=ifelse(FoodExpenditure_Per_total < Food_Povertyline4_3 & cluster==4 ,1,Poor4)]
CBN[,weighted.mean(Poor4,Weight),by=cluster][order(cluster)]
CBNPoor3[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor4<-CBN[Poor4==1]
#CalculatePer_calories in clusters
CBNPoor4[,Daily_Ghand_cluster:=weighted.mean(Ghand_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor4[,Daily_Hoboobat_cluster:=weighted.mean(Hoboobat_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor4[,Daily_Nan_cluster:=weighted.mean(Nan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor4[,Daily_Berenj_cluster:=weighted.mean(Berenj_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor4[,Daily_Roghan_cluster:=weighted.mean(Roghan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor4[,Daily_Goosht_cluster:=weighted.mean(Goosht_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor4[,Daily_Morgh_cluster:=weighted.mean(Morgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor4[,Daily_Mahi_cluster:=weighted.mean(Mahi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor4[,Daily_Shir_cluster:=weighted.mean(Shir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor4[,Daily_Mast_cluster:=weighted.mean(Mast_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor4[,Daily_Panir_cluster:=weighted.mean(Panir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor4[,Daily_Tokhmemorgh_cluster:=weighted.mean(Tokhmemorgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor4[,Daily_Mive_cluster:=weighted.mean(Mive_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor4[,Daily_Sabzi_cluster:=weighted.mean(Sabzi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor4[,Daily_Makarooni_cluster:=weighted.mean(Makarooni_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor4[,Daily_Sibzamini_cluster:=weighted.mean(Sibzamini_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor4[, Daily_Calories_cluster2 := Reduce(`+`, .SD), .SDcols=c(191:206)][]
#utils::View(CBNPoor4)
#Calculate Per_calories in clusters(=2100)
CBNPoor4[,Daily2_Ghand:=(Daily_Ghand_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Ghand")) CBNPoor4[is.na(get(col)), (col) := 0]
CBNPoor4[,Daily2_Hoboobat:=(Daily_Hoboobat_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Hoboobat")) CBNPoor4[is.na(get(col)), (col) := 0]
CBNPoor4[,Daily2_Nan:=(Daily_Nan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Nan")) CBNPoor4[is.na(get(col)), (col) := 0]
CBNPoor4[,Daily2_Berenj:=(Daily_Berenj_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Berenj")) CBNPoor4[is.na(get(col)), (col) := 0]
CBNPoor4[,Daily2_Roghan:=(Daily_Roghan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Roghan")) CBNPoor4[is.na(get(col)), (col) := 0]
CBNPoor4[,Daily2_Goosht:=(Daily_Goosht_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Goosht")) CBNPoor4[is.na(get(col)), (col) := 0]
CBNPoor4[,Daily2_Morgh:=(Daily_Morgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Morgh")) CBNPoor4[is.na(get(col)), (col) := 0]
CBNPoor4[,Daily2_Mahi:=(Daily_Mahi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mahi")) CBNPoor4[is.na(get(col)), (col) := 0]
CBNPoor4[,Daily2_Shir:=(Daily_Shir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Shir")) CBNPoor4[is.na(get(col)), (col) := 0]
CBNPoor4[,Daily2_Mast:=(Daily_Mast_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mast")) CBNPoor4[is.na(get(col)), (col) := 0]
CBNPoor4[,Daily2_Panir:=(Daily_Panir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Panir")) CBNPoor4[is.na(get(col)), (col) := 0]
CBNPoor4[,Daily2_Tokhmemorgh:=(Daily_Tokhmemorgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Tokhmemorgh")) CBNPoor4[is.na(get(col)), (col) := 0]
CBNPoor4[,Daily2_Mive:=(Daily_Mive_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mive")) CBNPoor4[is.na(get(col)), (col) := 0]
CBNPoor4[,Daily2_Sabzi:=(Daily_Sabzi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sabzi")) CBNPoor4[is.na(get(col)), (col) := 0]
CBNPoor4[,Daily2_Makarooni:=(Daily_Makarooni_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Makarooni")) CBNPoor4[is.na(get(col)), (col) := 0]
CBNPoor4[,Daily2_Sibzamini:=(Daily_Sibzamini_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sibzamini")) CBNPoor4[is.na(get(col)), (col) := 0]
CBNPoor4[, Daily_Calories3 := Reduce(`+`, .SD), .SDcols=c(208:223)][]
CBNPoor4[,FoodExpenditure_Per_cluster:=weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor4[,weighted.mean(FoodExpenditure_Per_cluster,Weight,na.rm = TRUE),by=cluster]
CBNPoor4[,Calory_Price:=(FoodExpenditure_Per_cluster/(Daily_Calories_cluster2))]
CBNPoor4[,weighted.mean(Calory_Price,Weight,na.rm = TRUE),by=cluster]
#Calculate per_Calory from resturants
CBNPoor4[,Per_Calory_Resturant:=(0.7*Resturant_Exp/EqSizeCalory)/Calory_Price]
for (col in c("Per_Calory_Resturant")) CBNPoor4[is.na(get(col)), (col) := 0]
CBNPoor4<-CBNPoor4[Per_Daily_Exp_Calories!=0]
CBNPoor4[,Per_Daily_Calories:=Per_Daily_Exp_Calories+Per_Calory_Resturant]
CBNPoor4[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor4[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor4[,weighted.mean(Per_Calory_Resturant,Weight,na.rm = TRUE),by=cluster]
#sum of total food expenditures
CBNPoor4[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
CBNPoor4[,weighted.mean(FoodExpenditure_Per_total,Weight,na.rm = TRUE),by=cluster]
#Food expenditures (equal 2100 CCAL)
CBNPoor4[,Bundle_Value:=FoodExpenditure_Per_total*2100/Per_Daily_Calories]
CBNPoor4[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=cluster]
CBNPoor4[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=ProvinceCode]
#Calculations
CBNPoor4[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor4[,weighted.mean(FoodExpenditure_Per_day,Weight,na.rm = TRUE),by=cluster]
CBNPoor4[,weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor4[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
#Food Poverty Line for each cluster
#cluster 1
CBNPoorCluster<-CBNPoor4[cluster==1]
Food_Povertyline1_4<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 2
CBNPoorCluster<-CBNPoor4[cluster==2]
Food_Povertyline2_4<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 3
CBNPoorCluster<-CBNPoor4[cluster==3]
Food_Povertyline3_4<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 4
CBNPoorCluster<-CBNPoor4[cluster==4]
Food_Povertyline4_4<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#ee<-CBNPoor[,.(Total_Exp_Month_Real,Total_Exp_Month,Total_Food_Month_Per2,Total_Exp_Month_Per_nondurable,Total_Exp_Month_nondurable_Real_Per,FoodExpenditure_Per,cluster)]
#mean(ee[,Total_Food_Month_Per2==Total_Exp_Month_nondurable_Real])
#mean(ee[,Total_Food_Month_Per2<3500000])
#ee<- ee[order(Total_Food_Month_Per2)]
#utils::View(CBN)
#for (col in c("Total_Food_Month_Per2")) CBN[is.na(get(col)), (col) := Total_Exp_Month_Per_nondurable]
c<-CBN[,.(FoodExpenditure_Per,FoodExpenditure_Per_total,Total_Exp_Month_Per_nondurable,Total_Exp_Month,Total_Food_Month_Per2,Poor,Decile,Weight,cluster)]
#########Iteration 5###############
#Sort Expenditure data
CBN<- CBN[order(Total_Food_Month_Per2)]
c<- c[order(Total_Food_Month_Per2)]
#Indicate new poors
CBN[,Poor5:=ifelse(FoodExpenditure_Per_total < Food_Povertyline1_4 & cluster==1,1,0)]
c[,Poor5:=ifelse(FoodExpenditure_Per_total < Food_Povertyline1_4 & cluster==1 ,1,0)]
CBN[,Poor5:=ifelse(FoodExpenditure_Per_total < Food_Povertyline2_4 & cluster==2,1,Poor5)]
c[,Poor5:=ifelse(FoodExpenditure_Per_total < Food_Povertyline2_4 & cluster==2 ,1,Poor5)]
CBN[,Poor5:=ifelse(FoodExpenditure_Per_total < Food_Povertyline3_4 & cluster==3,1,Poor5)]
c[,Poor5:=ifelse(FoodExpenditure_Per_total < Food_Povertyline3_4 & cluster==3 ,1,Poor5)]
CBN[,Poor5:=ifelse(FoodExpenditure_Per_total < Food_Povertyline4_4 & cluster==4,1,Poor5)]
c[,Poor5:=ifelse(FoodExpenditure_Per_total < Food_Povertyline4_4 & cluster==4 ,1,Poor5)]
CBN[,weighted.mean(Poor5,Weight),by=cluster][order(cluster)]
CBNPoor4[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor5<-CBN[Poor5==1]
#CalculatePer_calories in clusters
CBNPoor5[,Daily_Ghand_cluster:=weighted.mean(Ghand_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor5[,Daily_Hoboobat_cluster:=weighted.mean(Hoboobat_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor5[,Daily_Nan_cluster:=weighted.mean(Nan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor5[,Daily_Berenj_cluster:=weighted.mean(Berenj_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor5[,Daily_Roghan_cluster:=weighted.mean(Roghan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor5[,Daily_Goosht_cluster:=weighted.mean(Goosht_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor5[,Daily_Morgh_cluster:=weighted.mean(Morgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor5[,Daily_Mahi_cluster:=weighted.mean(Mahi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor5[,Daily_Shir_cluster:=weighted.mean(Shir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor5[,Daily_Mast_cluster:=weighted.mean(Mast_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor5[,Daily_Panir_cluster:=weighted.mean(Panir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor5[,Daily_Tokhmemorgh_cluster:=weighted.mean(Tokhmemorgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor5[,Daily_Mive_cluster:=weighted.mean(Mive_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor5[,Daily_Sabzi_cluster:=weighted.mean(Sabzi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor5[,Daily_Makarooni_cluster:=weighted.mean(Makarooni_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor5[,Daily_Sibzamini_cluster:=weighted.mean(Sibzamini_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor5[, Daily_Calories_cluster2 := Reduce(`+`, .SD), .SDcols=c(191:206)][]
#utils::View(CBNPoor5)
#Calculate Per_calories in clusters(=2100)
CBNPoor5[,Daily2_Ghand:=(Daily_Ghand_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Ghand")) CBNPoor5[is.na(get(col)), (col) := 0]
CBNPoor5[,Daily2_Hoboobat:=(Daily_Hoboobat_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Hoboobat")) CBNPoor5[is.na(get(col)), (col) := 0]
CBNPoor5[,Daily2_Nan:=(Daily_Nan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Nan")) CBNPoor5[is.na(get(col)), (col) := 0]
CBNPoor5[,Daily2_Berenj:=(Daily_Berenj_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Berenj")) CBNPoor5[is.na(get(col)), (col) := 0]
CBNPoor5[,Daily2_Roghan:=(Daily_Roghan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Roghan")) CBNPoor5[is.na(get(col)), (col) := 0]
CBNPoor5[,Daily2_Goosht:=(Daily_Goosht_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Goosht")) CBNPoor5[is.na(get(col)), (col) := 0]
CBNPoor5[,Daily2_Morgh:=(Daily_Morgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Morgh")) CBNPoor5[is.na(get(col)), (col) := 0]
CBNPoor5[,Daily2_Mahi:=(Daily_Mahi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mahi")) CBNPoor5[is.na(get(col)), (col) := 0]
CBNPoor5[,Daily2_Shir:=(Daily_Shir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Shir")) CBNPoor5[is.na(get(col)), (col) := 0]
CBNPoor5[,Daily2_Mast:=(Daily_Mast_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mast")) CBNPoor5[is.na(get(col)), (col) := 0]
CBNPoor5[,Daily2_Panir:=(Daily_Panir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Panir")) CBNPoor5[is.na(get(col)), (col) := 0]
CBNPoor5[,Daily2_Tokhmemorgh:=(Daily_Tokhmemorgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Tokhmemorgh")) CBNPoor5[is.na(get(col)), (col) := 0]
CBNPoor5[,Daily2_Mive:=(Daily_Mive_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mive")) CBNPoor5[is.na(get(col)), (col) := 0]
CBNPoor5[,Daily2_Sabzi:=(Daily_Sabzi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sabzi")) CBNPoor5[is.na(get(col)), (col) := 0]
CBNPoor5[,Daily2_Makarooni:=(Daily_Makarooni_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Makarooni")) CBNPoor5[is.na(get(col)), (col) := 0]
CBNPoor5[,Daily2_Sibzamini:=(Daily_Sibzamini_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sibzamini")) CBNPoor5[is.na(get(col)), (col) := 0]
CBNPoor5[, Daily_Calories3 := Reduce(`+`, .SD), .SDcols=c(208:223)][]
CBNPoor5[,FoodExpenditure_Per_cluster:=weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor5[,weighted.mean(FoodExpenditure_Per_cluster,Weight,na.rm = TRUE),by=cluster]
CBNPoor5[,Calory_Price:=(FoodExpenditure_Per_cluster/(Daily_Calories_cluster2))]
CBNPoor5[,weighted.mean(Calory_Price,Weight,na.rm = TRUE),by=cluster]
#Calculate per_Calory from resturants
CBNPoor5[,Per_Calory_Resturant:=(0.7*Resturant_Exp/EqSizeCalory)/Calory_Price]
for (col in c("Per_Calory_Resturant")) CBNPoor5[is.na(get(col)), (col) := 0]
CBNPoor5<-CBNPoor5[Per_Daily_Exp_Calories!=0]
CBNPoor5[,Per_Daily_Calories:=Per_Daily_Exp_Calories+Per_Calory_Resturant]
CBNPoor5[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor5[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor5[,weighted.mean(Per_Calory_Resturant,Weight,na.rm = TRUE),by=cluster]
#sum of total food expenditures
CBNPoor5[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
CBNPoor5[,weighted.mean(FoodExpenditure_Per_total,Weight,na.rm = TRUE),by=cluster]
#Food expenditures (equal 2100 CCAL)
CBNPoor5[,Bundle_Value:=FoodExpenditure_Per_total*2100/Per_Daily_Calories]
CBNPoor5[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=cluster]
CBNPoor5[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=ProvinceCode]
#Calculations
CBNPoor5[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor5[,weighted.mean(FoodExpenditure_Per_day,Weight,na.rm = TRUE),by=cluster]
CBNPoor5[,weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor5[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
#Food Poverty Line for each cluster
#cluster 1
CBNPoorCluster<-CBNPoor5[cluster==1]
Food_Povertyline1_5<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 2
CBNPoorCluster<-CBNPoor5[cluster==2]
Food_Povertyline2_5<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 3
CBNPoorCluster<-CBNPoor5[cluster==3]
Food_Povertyline3_5<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 4
CBNPoorCluster<-CBNPoor5[cluster==4]
Food_Povertyline4_5<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#ee<-CBNPoor[,.(Total_Exp_Month_Real,Total_Exp_Month,Total_Food_Month_Per2,Total_Exp_Month_Per_nondurable,Total_Exp_Month_nondurable_Real_Per,FoodExpenditure_Per,cluster)]
#mean(ee[,Total_Food_Month_Per2==Total_Exp_Month_nondurable_Real])
#mean(ee[,Total_Food_Month_Per2<3500000])
#ee<- ee[order(Total_Food_Month_Per2)]
#utils::View(CBN)
#for (col in c("Total_Food_Month_Per2")) CBN[is.na(get(col)), (col) := Total_Exp_Month_Per_nondurable]
c<-CBN[,.(FoodExpenditure_Per,FoodExpenditure_Per_total,Total_Exp_Month_Per_nondurable,Total_Exp_Month,Total_Food_Month_Per2,Poor,Decile,Weight,cluster)]
#########Iteration 6###############
#Sort Expenditure data
CBN<- CBN[order(Total_Food_Month_Per2)]
c<- c[order(Total_Food_Month_Per2)]
#Indicate new poors
CBN[,Poor6:=ifelse(FoodExpenditure_Per_total < Food_Povertyline1_5 & cluster==1,1,0)]
c[,Poor6:=ifelse(FoodExpenditure_Per_total < Food_Povertyline1_5 & cluster==1 ,1,0)]
CBN[,Poor6:=ifelse(FoodExpenditure_Per_total < Food_Povertyline2_5 & cluster==2,1,Poor6)]
c[,Poor6:=ifelse(FoodExpenditure_Per_total < Food_Povertyline2_5 & cluster==2 ,1,Poor6)]
CBN[,Poor6:=ifelse(FoodExpenditure_Per_total < Food_Povertyline3_5 & cluster==3,1,Poor6)]
c[,Poor6:=ifelse(FoodExpenditure_Per_total < Food_Povertyline3_5 & cluster==3 ,1,Poor6)]
CBN[,Poor6:=ifelse(FoodExpenditure_Per_total < Food_Povertyline4_5 & cluster==4,1,Poor6)]
c[,Poor6:=ifelse(FoodExpenditure_Per_total < Food_Povertyline4_5 & cluster==4 ,1,Poor6)]
CBN[,weighted.mean(Poor6,Weight),by=cluster][order(cluster)]
CBNPoor5[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor6<-CBN[Poor6==1]
#CalculatePer_calories in clusters
CBNPoor6[,Daily_Ghand_cluster:=weighted.mean(Ghand_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor6[,Daily_Hoboobat_cluster:=weighted.mean(Hoboobat_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor6[,Daily_Nan_cluster:=weighted.mean(Nan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor6[,Daily_Berenj_cluster:=weighted.mean(Berenj_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor6[,Daily_Roghan_cluster:=weighted.mean(Roghan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor6[,Daily_Goosht_cluster:=weighted.mean(Goosht_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor6[,Daily_Morgh_cluster:=weighted.mean(Morgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor6[,Daily_Mahi_cluster:=weighted.mean(Mahi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor6[,Daily_Shir_cluster:=weighted.mean(Shir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor6[,Daily_Mast_cluster:=weighted.mean(Mast_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor6[,Daily_Panir_cluster:=weighted.mean(Panir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor6[,Daily_Tokhmemorgh_cluster:=weighted.mean(Tokhmemorgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor6[,Daily_Mive_cluster:=weighted.mean(Mive_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor6[,Daily_Sabzi_cluster:=weighted.mean(Sabzi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor6[,Daily_Makarooni_cluster:=weighted.mean(Makarooni_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor6[,Daily_Sibzamini_cluster:=weighted.mean(Sibzamini_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor6[, Daily_Calories_cluster2 := Reduce(`+`, .SD), .SDcols=c(191:206)][]
#utils::View(CBNPoor6)
#Calculate Per_calories in clusters(=2100)
CBNPoor6[,Daily2_Ghand:=(Daily_Ghand_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Ghand")) CBNPoor6[is.na(get(col)), (col) := 0]
CBNPoor6[,Daily2_Hoboobat:=(Daily_Hoboobat_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Hoboobat")) CBNPoor6[is.na(get(col)), (col) := 0]
CBNPoor6[,Daily2_Nan:=(Daily_Nan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Nan")) CBNPoor6[is.na(get(col)), (col) := 0]
CBNPoor6[,Daily2_Berenj:=(Daily_Berenj_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Berenj")) CBNPoor6[is.na(get(col)), (col) := 0]
CBNPoor6[,Daily2_Roghan:=(Daily_Roghan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Roghan")) CBNPoor6[is.na(get(col)), (col) := 0]
CBNPoor6[,Daily2_Goosht:=(Daily_Goosht_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Goosht")) CBNPoor6[is.na(get(col)), (col) := 0]
CBNPoor6[,Daily2_Morgh:=(Daily_Morgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Morgh")) CBNPoor6[is.na(get(col)), (col) := 0]
CBNPoor6[,Daily2_Mahi:=(Daily_Mahi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mahi")) CBNPoor6[is.na(get(col)), (col) := 0]
CBNPoor6[,Daily2_Shir:=(Daily_Shir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Shir")) CBNPoor6[is.na(get(col)), (col) := 0]
CBNPoor6[,Daily2_Mast:=(Daily_Mast_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mast")) CBNPoor6[is.na(get(col)), (col) := 0]
CBNPoor6[,Daily2_Panir:=(Daily_Panir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Panir")) CBNPoor6[is.na(get(col)), (col) := 0]
CBNPoor6[,Daily2_Tokhmemorgh:=(Daily_Tokhmemorgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Tokhmemorgh")) CBNPoor6[is.na(get(col)), (col) := 0]
CBNPoor6[,Daily2_Mive:=(Daily_Mive_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mive")) CBNPoor6[is.na(get(col)), (col) := 0]
CBNPoor6[,Daily2_Sabzi:=(Daily_Sabzi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sabzi")) CBNPoor6[is.na(get(col)), (col) := 0]
CBNPoor6[,Daily2_Makarooni:=(Daily_Makarooni_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Makarooni")) CBNPoor6[is.na(get(col)), (col) := 0]
CBNPoor6[,Daily2_Sibzamini:=(Daily_Sibzamini_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sibzamini")) CBNPoor6[is.na(get(col)), (col) := 0]
CBNPoor6[, Daily_Calories3 := Reduce(`+`, .SD), .SDcols=c(208:223)][]
CBNPoor6[,FoodExpenditure_Per_cluster:=weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor6[,weighted.mean(FoodExpenditure_Per_cluster,Weight,na.rm = TRUE),by=cluster]
CBNPoor6[,Calory_Price:=(FoodExpenditure_Per_cluster/(Daily_Calories_cluster2))]
CBNPoor6[,weighted.mean(Calory_Price,Weight,na.rm = TRUE),by=cluster]
#Calculate per_Calory from resturants
CBNPoor6[,Per_Calory_Resturant:=(0.7*Resturant_Exp/EqSizeCalory)/Calory_Price]
for (col in c("Per_Calory_Resturant")) CBNPoor6[is.na(get(col)), (col) := 0]
CBNPoor6<-CBNPoor6[Per_Daily_Exp_Calories!=0]
CBNPoor6[,Per_Daily_Calories:=Per_Daily_Exp_Calories+Per_Calory_Resturant]
CBNPoor6[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor6[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor6[,weighted.mean(Per_Calory_Resturant,Weight,na.rm = TRUE),by=cluster]
#sum of total food expenditures
CBNPoor6[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
CBNPoor6[,weighted.mean(FoodExpenditure_Per_total,Weight,na.rm = TRUE),by=cluster]
#Food expenditures (equal 2100 CCAL)
CBNPoor6[,Bundle_Value:=FoodExpenditure_Per_total*2100/Per_Daily_Calories]
CBNPoor6[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=cluster]
CBNPoor6[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=ProvinceCode]
#Calculations
CBNPoor6[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor6[,weighted.mean(FoodExpenditure_Per_day,Weight,na.rm = TRUE),by=cluster]
CBNPoor6[,weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor6[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
#Food Poverty Line for each cluster
#cluster 1
CBNPoorCluster<-CBNPoor6[cluster==1]
Food_Povertyline1_6<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 2
CBNPoorCluster<-CBNPoor6[cluster==2]
Food_Povertyline2_6<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 3
CBNPoorCluster<-CBNPoor6[cluster==3]
Food_Povertyline3_6<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 4
CBNPoorCluster<-CBNPoor6[cluster==4]
Food_Povertyline4_6<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#ee<-CBNPoor[,.(Total_Exp_Month_Real,Total_Exp_Month,Total_Food_Month_Per2,Total_Exp_Month_Per_nondurable,Total_Exp_Month_nondurable_Real_Per,FoodExpenditure_Per,cluster)]
#mean(ee[,Total_Food_Month_Per2==Total_Exp_Month_nondurable_Real])
#mean(ee[,Total_Food_Month_Per2<3500000])
#ee<- ee[order(Total_Food_Month_Per2)]
#utils::View(CBN)
#for (col in c("Total_Food_Month_Per2")) CBN[is.na(get(col)), (col) := Total_Exp_Month_Per_nondurable]
c<-CBN[,.(FoodExpenditure_Per,FoodExpenditure_Per_total,Total_Exp_Month_Per_nondurable,Total_Exp_Month,Total_Food_Month_Per2,Poor,Decile,Weight,cluster)]
#########Iteration 7###############
#Sort Expenditure data
CBN<- CBN[order(Total_Food_Month_Per2)]
c<- c[order(Total_Food_Month_Per2)]
#Indicate new poors
CBN[,Poor7:=ifelse(FoodExpenditure_Per_total < Food_Povertyline1_6 & cluster==1,1,0)]
c[,Poor7:=ifelse(FoodExpenditure_Per_total < Food_Povertyline1_6 & cluster==1 ,1,0)]
CBN[,Poor7:=ifelse(FoodExpenditure_Per_total < Food_Povertyline2_6 & cluster==2,1,Poor7)]
c[,Poor7:=ifelse(FoodExpenditure_Per_total < Food_Povertyline2_6 & cluster==2 ,1,Poor7)]
CBN[,Poor7:=ifelse(FoodExpenditure_Per_total < Food_Povertyline3_6 & cluster==3,1,Poor7)]
c[,Poor7:=ifelse(FoodExpenditure_Per_total < Food_Povertyline3_6 & cluster==3 ,1,Poor7)]
CBN[,Poor7:=ifelse(FoodExpenditure_Per_total < Food_Povertyline4_6 & cluster==4,1,Poor7)]
c[,Poor7:=ifelse(FoodExpenditure_Per_total < Food_Povertyline4_6 & cluster==4 ,1,Poor7)]
CBN[,weighted.mean(Poor7,Weight),by=cluster][order(cluster)]
CBNPoor6[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor7<-CBN[Poor7==1]
#CalculatePer_calories in clusters
CBNPoor7[,Daily_Ghand_cluster:=weighted.mean(Ghand_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor7[,Daily_Hoboobat_cluster:=weighted.mean(Hoboobat_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor7[,Daily_Nan_cluster:=weighted.mean(Nan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor7[,Daily_Berenj_cluster:=weighted.mean(Berenj_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor7[,Daily_Roghan_cluster:=weighted.mean(Roghan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor7[,Daily_Goosht_cluster:=weighted.mean(Goosht_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor7[,Daily_Morgh_cluster:=weighted.mean(Morgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor7[,Daily_Mahi_cluster:=weighted.mean(Mahi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor7[,Daily_Shir_cluster:=weighted.mean(Shir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor7[,Daily_Mast_cluster:=weighted.mean(Mast_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor7[,Daily_Panir_cluster:=weighted.mean(Panir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor7[,Daily_Tokhmemorgh_cluster:=weighted.mean(Tokhmemorgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor7[,Daily_Mive_cluster:=weighted.mean(Mive_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor7[,Daily_Sabzi_cluster:=weighted.mean(Sabzi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor7[,Daily_Makarooni_cluster:=weighted.mean(Makarooni_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor7[,Daily_Sibzamini_cluster:=weighted.mean(Sibzamini_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor7[, Daily_Calories_cluster2 := Reduce(`+`, .SD), .SDcols=c(191:206)][]
#utils::View(CBNPoor7)
#Calculate Per_calories in clusters(=2100)
CBNPoor7[,Daily2_Ghand:=(Daily_Ghand_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Ghand")) CBNPoor7[is.na(get(col)), (col) := 0]
CBNPoor7[,Daily2_Hoboobat:=(Daily_Hoboobat_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Hoboobat")) CBNPoor7[is.na(get(col)), (col) := 0]
CBNPoor7[,Daily2_Nan:=(Daily_Nan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Nan")) CBNPoor7[is.na(get(col)), (col) := 0]
CBNPoor7[,Daily2_Berenj:=(Daily_Berenj_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Berenj")) CBNPoor7[is.na(get(col)), (col) := 0]
CBNPoor7[,Daily2_Roghan:=(Daily_Roghan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Roghan")) CBNPoor7[is.na(get(col)), (col) := 0]
CBNPoor7[,Daily2_Goosht:=(Daily_Goosht_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Goosht")) CBNPoor7[is.na(get(col)), (col) := 0]
CBNPoor7[,Daily2_Morgh:=(Daily_Morgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Morgh")) CBNPoor7[is.na(get(col)), (col) := 0]
CBNPoor7[,Daily2_Mahi:=(Daily_Mahi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mahi")) CBNPoor7[is.na(get(col)), (col) := 0]
CBNPoor7[,Daily2_Shir:=(Daily_Shir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Shir")) CBNPoor7[is.na(get(col)), (col) := 0]
CBNPoor7[,Daily2_Mast:=(Daily_Mast_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mast")) CBNPoor7[is.na(get(col)), (col) := 0]
CBNPoor7[,Daily2_Panir:=(Daily_Panir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Panir")) CBNPoor7[is.na(get(col)), (col) := 0]
CBNPoor7[,Daily2_Tokhmemorgh:=(Daily_Tokhmemorgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Tokhmemorgh")) CBNPoor7[is.na(get(col)), (col) := 0]
CBNPoor7[,Daily2_Mive:=(Daily_Mive_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mive")) CBNPoor7[is.na(get(col)), (col) := 0]
CBNPoor7[,Daily2_Sabzi:=(Daily_Sabzi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sabzi")) CBNPoor7[is.na(get(col)), (col) := 0]
CBNPoor7[,Daily2_Makarooni:=(Daily_Makarooni_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Makarooni")) CBNPoor7[is.na(get(col)), (col) := 0]
CBNPoor7[,Daily2_Sibzamini:=(Daily_Sibzamini_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sibzamini")) CBNPoor7[is.na(get(col)), (col) := 0]
CBNPoor7[, Daily_Calories3 := Reduce(`+`, .SD), .SDcols=c(208:223)][]
CBNPoor7[,FoodExpenditure_Per_cluster:=weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor7[,weighted.mean(FoodExpenditure_Per_cluster,Weight,na.rm = TRUE),by=cluster]
CBNPoor7[,Calory_Price:=(FoodExpenditure_Per_cluster/(Daily_Calories_cluster2))]
CBNPoor7[,weighted.mean(Calory_Price,Weight,na.rm = TRUE),by=cluster]
#Calculate per_Calory from resturants
CBNPoor7[,Per_Calory_Resturant:=(0.7*Resturant_Exp/EqSizeCalory)/Calory_Price]
for (col in c("Per_Calory_Resturant")) CBNPoor7[is.na(get(col)), (col) := 0]
CBNPoor7<-CBNPoor7[Per_Daily_Exp_Calories!=0]
CBNPoor7[,Per_Daily_Calories:=Per_Daily_Exp_Calories+Per_Calory_Resturant]
CBNPoor7[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor7[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor7[,weighted.mean(Per_Calory_Resturant,Weight,na.rm = TRUE),by=cluster]
#sum of total food expenditures
CBNPoor7[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
CBNPoor7[,weighted.mean(FoodExpenditure_Per_total,Weight,na.rm = TRUE),by=cluster]
#Food expenditures (equal 2100 CCAL)
CBNPoor7[,Bundle_Value:=FoodExpenditure_Per_total*2100/Per_Daily_Calories]
CBNPoor7[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=cluster]
CBNPoor7[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=ProvinceCode]
#Calculations
CBNPoor7[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor7[,weighted.mean(FoodExpenditure_Per_day,Weight,na.rm = TRUE),by=cluster]
CBNPoor7[,weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor7[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
#Food Poverty Line for each cluster
#cluster 1
CBNPoorCluster<-CBNPoor7[cluster==1]
Food_Povertyline1_7<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 2
CBNPoorCluster<-CBNPoor7[cluster==2]
Food_Povertyline2_7<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 3
CBNPoorCluster<-CBNPoor7[cluster==3]
Food_Povertyline3_7<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 4
CBNPoorCluster<-CBNPoor7[cluster==4]
Food_Povertyline4_7<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#ee<-CBNPoor[,.(Total_Exp_Month_Real,Total_Exp_Month,Total_Food_Month_Per2,Total_Exp_Month_Per_nondurable,Total_Exp_Month_nondurable_Real_Per,FoodExpenditure_Per,cluster)]
#mean(ee[,Total_Food_Month_Per2==Total_Exp_Month_nondurable_Real])
#mean(ee[,Total_Food_Month_Per2<3500000])
#ee<- ee[order(Total_Food_Month_Per2)]
#utils::View(CBN)
#for (col in c("Total_Food_Month_Per2")) CBN[is.na(get(col)), (col) := Total_Exp_Month_Per_nondurable]
c<-CBN[,.(FoodExpenditure_Per,FoodExpenditure_Per_total,Total_Exp_Month_Per_nondurable,Total_Exp_Month,Total_Food_Month_Per2,Poor,Decile,Weight,cluster)]
#########Iteration 8###############
#Sort Expenditure data
CBN<- CBN[order(Total_Food_Month_Per2)]
c<- c[order(Total_Food_Month_Per2)]
#Indicate new poors
CBN[,Poor8:=ifelse(FoodExpenditure_Per_total < Food_Povertyline1_7 & cluster==1,1,0)]
c[,Poor8:=ifelse(FoodExpenditure_Per_total < Food_Povertyline1_7 & cluster==1 ,1,0)]
CBN[,Poor8:=ifelse(FoodExpenditure_Per_total < Food_Povertyline2_7 & cluster==2,1,Poor8)]
c[,Poor8:=ifelse(FoodExpenditure_Per_total < Food_Povertyline2_7 & cluster==2 ,1,Poor8)]
CBN[,Poor8:=ifelse(FoodExpenditure_Per_total < Food_Povertyline3_7 & cluster==3,1,Poor8)]
c[,Poor8:=ifelse(FoodExpenditure_Per_total < Food_Povertyline3_7 & cluster==3 ,1,Poor8)]
CBN[,Poor8:=ifelse(FoodExpenditure_Per_total < Food_Povertyline4_7 & cluster==4,1,Poor8)]
c[,Poor8:=ifelse(FoodExpenditure_Per_total < Food_Povertyline4_7 & cluster==4 ,1,Poor8)]
CBN[,weighted.mean(Poor8,Weight),by=cluster][order(cluster)]
CBNPoor7[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor8<-CBN[Poor8==1]
#CalculatePer_calories in clusters
CBNPoor8[,Daily_Ghand_cluster:=weighted.mean(Ghand_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor8[,Daily_Hoboobat_cluster:=weighted.mean(Hoboobat_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor8[,Daily_Nan_cluster:=weighted.mean(Nan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor8[,Daily_Berenj_cluster:=weighted.mean(Berenj_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor8[,Daily_Roghan_cluster:=weighted.mean(Roghan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor8[,Daily_Goosht_cluster:=weighted.mean(Goosht_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor8[,Daily_Morgh_cluster:=weighted.mean(Morgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor8[,Daily_Mahi_cluster:=weighted.mean(Mahi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor8[,Daily_Shir_cluster:=weighted.mean(Shir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor8[,Daily_Mast_cluster:=weighted.mean(Mast_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor8[,Daily_Panir_cluster:=weighted.mean(Panir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor8[,Daily_Tokhmemorgh_cluster:=weighted.mean(Tokhmemorgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor8[,Daily_Mive_cluster:=weighted.mean(Mive_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor8[,Daily_Sabzi_cluster:=weighted.mean(Sabzi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor8[,Daily_Makarooni_cluster:=weighted.mean(Makarooni_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor8[,Daily_Sibzamini_cluster:=weighted.mean(Sibzamini_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor8[, Daily_Calories_cluster2 := Reduce(`+`, .SD), .SDcols=c(191:206)][]
#utils::View(CBNPoor8)
#Calculate Per_calories in clusters(=2100)
CBNPoor8[,Daily2_Ghand:=(Daily_Ghand_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Ghand")) CBNPoor8[is.na(get(col)), (col) := 0]
CBNPoor8[,Daily2_Hoboobat:=(Daily_Hoboobat_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Hoboobat")) CBNPoor8[is.na(get(col)), (col) := 0]
CBNPoor8[,Daily2_Nan:=(Daily_Nan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Nan")) CBNPoor8[is.na(get(col)), (col) := 0]
CBNPoor8[,Daily2_Berenj:=(Daily_Berenj_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Berenj")) CBNPoor8[is.na(get(col)), (col) := 0]
CBNPoor8[,Daily2_Roghan:=(Daily_Roghan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Roghan")) CBNPoor8[is.na(get(col)), (col) := 0]
CBNPoor8[,Daily2_Goosht:=(Daily_Goosht_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Goosht")) CBNPoor8[is.na(get(col)), (col) := 0]
CBNPoor8[,Daily2_Morgh:=(Daily_Morgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Morgh")) CBNPoor8[is.na(get(col)), (col) := 0]
CBNPoor8[,Daily2_Mahi:=(Daily_Mahi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mahi")) CBNPoor8[is.na(get(col)), (col) := 0]
CBNPoor8[,Daily2_Shir:=(Daily_Shir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Shir")) CBNPoor8[is.na(get(col)), (col) := 0]
CBNPoor8[,Daily2_Mast:=(Daily_Mast_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mast")) CBNPoor8[is.na(get(col)), (col) := 0]
CBNPoor8[,Daily2_Panir:=(Daily_Panir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Panir")) CBNPoor8[is.na(get(col)), (col) := 0]
CBNPoor8[,Daily2_Tokhmemorgh:=(Daily_Tokhmemorgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Tokhmemorgh")) CBNPoor8[is.na(get(col)), (col) := 0]
CBNPoor8[,Daily2_Mive:=(Daily_Mive_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mive")) CBNPoor8[is.na(get(col)), (col) := 0]
CBNPoor8[,Daily2_Sabzi:=(Daily_Sabzi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sabzi")) CBNPoor8[is.na(get(col)), (col) := 0]
CBNPoor8[,Daily2_Makarooni:=(Daily_Makarooni_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Makarooni")) CBNPoor8[is.na(get(col)), (col) := 0]
CBNPoor8[,Daily2_Sibzamini:=(Daily_Sibzamini_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sibzamini")) CBNPoor8[is.na(get(col)), (col) := 0]
CBNPoor8[, Daily_Calories3 := Reduce(`+`, .SD), .SDcols=c(208:223)][]
CBNPoor8[,FoodExpenditure_Per_cluster:=weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor8[,weighted.mean(FoodExpenditure_Per_cluster,Weight,na.rm = TRUE),by=cluster]
CBNPoor8[,Calory_Price:=(FoodExpenditure_Per_cluster/(Daily_Calories_cluster2))]
CBNPoor8[,weighted.mean(Calory_Price,Weight,na.rm = TRUE),by=cluster]
#Calculate per_Calory from resturants
CBNPoor8[,Per_Calory_Resturant:=(0.7*Resturant_Exp/EqSizeCalory)/Calory_Price]
for (col in c("Per_Calory_Resturant")) CBNPoor8[is.na(get(col)), (col) := 0]
CBNPoor8<-CBNPoor8[Per_Daily_Exp_Calories!=0]
CBNPoor8[,Per_Daily_Calories:=Per_Daily_Exp_Calories+Per_Calory_Resturant]
CBNPoor8[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor8[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor8[,weighted.mean(Per_Calory_Resturant,Weight,na.rm = TRUE),by=cluster]
#sum of total food expenditures
CBNPoor8[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
CBNPoor8[,weighted.mean(FoodExpenditure_Per_total,Weight,na.rm = TRUE),by=cluster]
#Food expenditures (equal 2100 CCAL)
CBNPoor8[,Bundle_Value:=FoodExpenditure_Per_total*2100/Per_Daily_Calories]
CBNPoor8[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=cluster]
CBNPoor8[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=ProvinceCode]
#Calculations
CBNPoor8[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor8[,weighted.mean(FoodExpenditure_Per_day,Weight,na.rm = TRUE),by=cluster]
CBNPoor8[,weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor8[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
#Food Poverty Line for each cluster
#cluster 1
CBNPoorCluster<-CBNPoor8[cluster==1]
Food_Povertyline1_8<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 2
CBNPoorCluster<-CBNPoor8[cluster==2]
Food_Povertyline2_8<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 3
CBNPoorCluster<-CBNPoor8[cluster==3]
Food_Povertyline3_8<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 4
CBNPoorCluster<-CBNPoor8[cluster==4]
Food_Povertyline4_8<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#ee<-CBNPoor[,.(Total_Exp_Month_Real,Total_Exp_Month,Total_Food_Month_Per2,Total_Exp_Month_Per_nondurable,Total_Exp_Month_nondurable_Real_Per,FoodExpenditure_Per,cluster)]
#mean(ee[,Total_Food_Month_Per2==Total_Exp_Month_nondurable_Real])
#mean(ee[,Total_Food_Month_Per2<3500000])
#ee<- ee[order(Total_Food_Month_Per2)]
#utils::View(CBN)
#for (col in c("Total_Food_Month_Per2")) CBN[is.na(get(col)), (col) := Total_Exp_Month_Per_nondurable]
c<-CBN[,.(FoodExpenditure_Per,FoodExpenditure_Per_total,Total_Exp_Month_Per_nondurable,Total_Exp_Month,Total_Food_Month_Per2,Poor,Decile,Weight,cluster)]
#########Iteration 9###############
#Sort Expenditure data
CBN<- CBN[order(Total_Food_Month_Per2)]
c<- c[order(Total_Food_Month_Per2)]
#Indicate new poors
CBN[,Poor9:=ifelse(FoodExpenditure_Per_total < Food_Povertyline1_8 & cluster==1,1,0)]
c[,Poor9:=ifelse(FoodExpenditure_Per_total < Food_Povertyline1_8 & cluster==1 ,1,0)]
CBN[,Poor9:=ifelse(FoodExpenditure_Per_total < Food_Povertyline2_8 & cluster==2,1,Poor9)]
c[,Poor9:=ifelse(FoodExpenditure_Per_total < Food_Povertyline2_8 & cluster==2 ,1,Poor9)]
CBN[,Poor9:=ifelse(FoodExpenditure_Per_total < Food_Povertyline3_8 & cluster==3,1,Poor9)]
c[,Poor9:=ifelse(FoodExpenditure_Per_total < Food_Povertyline3_8 & cluster==3 ,1,Poor9)]
CBN[,Poor9:=ifelse(FoodExpenditure_Per_total < Food_Povertyline4_8 & cluster==4,1,Poor9)]
c[,Poor9:=ifelse(FoodExpenditure_Per_total < Food_Povertyline4_8 & cluster==4 ,1,Poor9)]
CBN[,weighted.mean(Poor9,Weight),by=cluster][order(cluster)]
CBNPoor8[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor9<-CBN[Poor9==1]
#CalculatePer_calories in clusters
CBNPoor9[,Daily_Ghand_cluster:=weighted.mean(Ghand_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor9[,Daily_Hoboobat_cluster:=weighted.mean(Hoboobat_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor9[,Daily_Nan_cluster:=weighted.mean(Nan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor9[,Daily_Berenj_cluster:=weighted.mean(Berenj_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor9[,Daily_Roghan_cluster:=weighted.mean(Roghan_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor9[,Daily_Goosht_cluster:=weighted.mean(Goosht_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor9[,Daily_Morgh_cluster:=weighted.mean(Morgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor9[,Daily_Mahi_cluster:=weighted.mean(Mahi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor9[,Daily_Shir_cluster:=weighted.mean(Shir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor9[,Daily_Mast_cluster:=weighted.mean(Mast_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor9[,Daily_Panir_cluster:=weighted.mean(Panir_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor9[,Daily_Tokhmemorgh_cluster:=weighted.mean(Tokhmemorgh_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor9[,Daily_Mive_cluster:=weighted.mean(Mive_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor9[,Daily_Sabzi_cluster:=weighted.mean(Sabzi_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor9[,Daily_Makarooni_cluster:=weighted.mean(Makarooni_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor9[,Daily_Sibzamini_cluster:=weighted.mean(Sibzamini_per_Calory,Weight,na.rm = TRUE),by=cluster]
CBNPoor9[, Daily_Calories_cluster2 := Reduce(`+`, .SD), .SDcols=c(191:206)][]
#utils::View(CBNPoor9)
#Calculate Per_calories in clusters(=2100)
CBNPoor9[,Daily2_Ghand:=(Daily_Ghand_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Ghand")) CBNPoor9[is.na(get(col)), (col) := 0]
CBNPoor9[,Daily2_Hoboobat:=(Daily_Hoboobat_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Hoboobat")) CBNPoor9[is.na(get(col)), (col) := 0]
CBNPoor9[,Daily2_Nan:=(Daily_Nan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Nan")) CBNPoor9[is.na(get(col)), (col) := 0]
CBNPoor9[,Daily2_Berenj:=(Daily_Berenj_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Berenj")) CBNPoor9[is.na(get(col)), (col) := 0]
CBNPoor9[,Daily2_Roghan:=(Daily_Roghan_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Roghan")) CBNPoor9[is.na(get(col)), (col) := 0]
CBNPoor9[,Daily2_Goosht:=(Daily_Goosht_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Goosht")) CBNPoor9[is.na(get(col)), (col) := 0]
CBNPoor9[,Daily2_Morgh:=(Daily_Morgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Morgh")) CBNPoor9[is.na(get(col)), (col) := 0]
CBNPoor9[,Daily2_Mahi:=(Daily_Mahi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mahi")) CBNPoor9[is.na(get(col)), (col) := 0]
CBNPoor9[,Daily2_Shir:=(Daily_Shir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Shir")) CBNPoor9[is.na(get(col)), (col) := 0]
CBNPoor9[,Daily2_Mast:=(Daily_Mast_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mast")) CBNPoor9[is.na(get(col)), (col) := 0]
CBNPoor9[,Daily2_Panir:=(Daily_Panir_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Panir")) CBNPoor9[is.na(get(col)), (col) := 0]
CBNPoor9[,Daily2_Tokhmemorgh:=(Daily_Tokhmemorgh_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Tokhmemorgh")) CBNPoor9[is.na(get(col)), (col) := 0]
CBNPoor9[,Daily2_Mive:=(Daily_Mive_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Mive")) CBNPoor9[is.na(get(col)), (col) := 0]
CBNPoor9[,Daily2_Sabzi:=(Daily_Sabzi_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sabzi")) CBNPoor9[is.na(get(col)), (col) := 0]
CBNPoor9[,Daily2_Makarooni:=(Daily_Makarooni_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Makarooni")) CBNPoor9[is.na(get(col)), (col) := 0]
CBNPoor9[,Daily2_Sibzamini:=(Daily_Sibzamini_cluster*2100)/(Daily_Calories_cluster2)]
for (col in c("Daily2_Sibzamini")) CBNPoor9[is.na(get(col)), (col) := 0]
CBNPoor9[, Daily_Calories3 := Reduce(`+`, .SD), .SDcols=c(208:223)][]
CBNPoor9[,FoodExpenditure_Per_cluster:=weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor9[,weighted.mean(FoodExpenditure_Per_cluster,Weight,na.rm = TRUE),by=cluster]
CBNPoor9[,Calory_Price:=(FoodExpenditure_Per_cluster/(Daily_Calories_cluster2))]
CBNPoor9[,weighted.mean(Calory_Price,Weight,na.rm = TRUE),by=cluster]
#Calculate per_Calory from resturants
CBNPoor9[,Per_Calory_Resturant:=(0.7*Resturant_Exp/EqSizeCalory)/Calory_Price]
for (col in c("Per_Calory_Resturant")) CBNPoor9[is.na(get(col)), (col) := 0]
CBNPoor9<-CBNPoor9[Per_Daily_Exp_Calories!=0]
CBNPoor9[,Per_Daily_Calories:=Per_Daily_Exp_Calories+Per_Calory_Resturant]
CBNPoor9[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor9[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor9[,weighted.mean(Per_Calory_Resturant,Weight,na.rm = TRUE),by=cluster]
#sum of total food expenditures
CBNPoor9[,FoodExpenditure_Per_total:=FoodExpenditure_Per+(0.7*Resturant_Exp/EqSizeCalory)]
CBNPoor9[,weighted.mean(FoodExpenditure_Per_total,Weight,na.rm = TRUE),by=cluster]
#Food expenditures (equal 2100 CCAL)
CBNPoor9[,Bundle_Value:=FoodExpenditure_Per_total*2100/Per_Daily_Calories]
CBNPoor9[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=cluster]
CBNPoor9[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=ProvinceCode]
#Calculations
CBNPoor9[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor9[,weighted.mean(FoodExpenditure_Per_day,Weight,na.rm = TRUE),by=cluster]
CBNPoor9[,weighted.mean(FoodExpenditure_Per,Weight,na.rm = TRUE),by=cluster]
CBNPoor9[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
#Food Poverty Line for each cluster
#cluster 1
CBNPoorCluster<-CBNPoor9[cluster==1]
Food_Povertyline1_9<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 2
CBNPoorCluster<-CBNPoor9[cluster==2]
Food_Povertyline2_9<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 3
CBNPoorCluster<-CBNPoor9[cluster==3]
Food_Povertyline3_9<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#cluster 4
CBNPoorCluster<-CBNPoor9[cluster==4]
Food_Povertyline4_9<-weighted.mean(CBNPoorCluster$Bundle_Value,CBNPoorCluster$Weight,na.rm = TRUE)
#ee<-CBNPoor[,.(Total_Exp_Month_Real,Total_Exp_Month,Total_Food_Month_Per2,Total_Exp_Month_Per_nondurable,Total_Exp_Month_nondurable_Real_Per,FoodExpenditure_Per,cluster)]
#mean(ee[,Total_Food_Month_Per2==Total_Exp_Month_nondurable_Real])
#mean(ee[,Total_Food_Month_Per2<3500000])
#ee<- ee[order(Total_Food_Month_Per2)]
#utils::View(CBN)
#for (col in c("Total_Food_Month_Per2")) CBN[is.na(get(col)), (col) := Total_Exp_Month_Per_nondurable]
c<-CBN[,.(FoodExpenditure_Per,FoodExpenditure_Per_total,Total_Exp_Month_Per_nondurable,Total_Exp_Month,Total_Food_Month_Per2,Poor,Decile,Weight,cluster)]
#########Iteration 10###############
#Sort Expenditure data
CBN<- CBN[order(Total_Food_Month_Per2)]
c<- c[order(Total_Food_Month_Per2)]
#Indicate new poors
CBN[,Poor10:=ifelse(FoodExpenditure_Per_total < Food_Povertyline1_9 & cluster==1,1,0)]
c[,Poor10:=ifelse(FoodExpenditure_Per_total < Food_Povertyline1_9 & cluster==1 ,1,0)]
CBN[,Poor10:=ifelse(FoodExpenditure_Per_total < Food_Povertyline2_9 & cluster==2,1,Poor10)]
c[,Poor10:=ifelse(FoodExpenditure_Per_total < Food_Povertyline2_9 & cluster==2 ,1,Poor10)]
CBN[,Poor10:=ifelse(FoodExpenditure_Per_total < Food_Povertyline3_9 & cluster==3,1,Poor10)]
c[,Poor10:=ifelse(FoodExpenditure_Per_total < Food_Povertyline3_9 & cluster==3 ,1,Poor10)]
CBN[,Poor10:=ifelse(FoodExpenditure_Per_total < Food_Povertyline4_9 & cluster==4,1,Poor10)]
c[,Poor10:=ifelse(FoodExpenditure_Per_total < Food_Povertyline4_9 & cluster==4 ,1,Poor10)]
CBN[,weighted.mean(Poor10,Weight),by=cluster][order(cluster)]
CBNPoor9[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor10<-CBN[Poor10==1]
#Engel
#CBNPoor9<-CBNPoor9[,ratio1:=FoodExpenditure/Total_Exp_Month]
#CBNPoor9[,weighted.mean(ratio1,Weight),by=cluster]
#summary(CBNPoor9$ratio1)
CBN<-CBN[,ratio1:=FoodExpenditure/Total_Exp_Month]
CBN[,weighted.mean(ratio1,Weight),by=ProvinceCode][order(ProvinceCode)]
CBN<-CBN[,ratio2:=ServiceExp/Total_Exp_Month]
CBN[,weighted.mean(ratio2,Weight),by=ProvinceCode][order(ProvinceCode)]
#Engel-home ratio calculations
CBN[cluster==1 & FoodExpenditure_Per_total<1.1*Food_Povertyline1_9 & FoodExpenditure_Per_total>0.90*Food_Povertyline1_9,sum(HIndivNo),by=ProvinceCode][order(ProvinceCode)]
CBN[cluster==2 & FoodExpenditure_Per_total<1.1*Food_Povertyline2_9 & FoodExpenditure_Per_total>0.90*Food_Povertyline1_9,sum(HIndivNo),by=ProvinceCode][order(ProvinceCode)]
CBN[cluster==3 & FoodExpenditure_Per_total<1.1*Food_Povertyline3_9 & FoodExpenditure_Per_total>0.90*Food_Povertyline1_9,sum(HIndivNo),by=ProvinceCode][order(ProvinceCode)]
CBN[cluster==4 & FoodExpenditure_Per_total<1.1*Food_Povertyline4_9 & FoodExpenditure_Per_total>0.90*Food_Povertyline1_9,sum(HIndivNo),by=ProvinceCode][order(ProvinceCode)]
CBN[cluster==1 & FoodExpenditure_Per_total<1.1*Food_Povertyline1_9 & FoodExpenditure_Per_total>0.90*Food_Povertyline1_9,weighted.mean(ratio1,Weight),by=ProvinceCode][order(ProvinceCode)]
CBN[cluster==2 & FoodExpenditure_Per_total<1.1*Food_Povertyline2_9 & FoodExpenditure_Per_total>0.90*Food_Povertyline1_9,weighted.mean(ratio1,Weight),by=ProvinceCode][order(ProvinceCode)]
CBN[cluster==3 & FoodExpenditure_Per_total<1.1*Food_Povertyline3_9 & FoodExpenditure_Per_total>0.90*Food_Povertyline1_9,weighted.mean(ratio1,Weight),by=ProvinceCode][order(ProvinceCode)]
CBN[cluster==4 & FoodExpenditure_Per_total<1.1*Food_Povertyline4_9 & FoodExpenditure_Per_total>0.90*Food_Povertyline1_9,weighted.mean(ratio1,Weight),by=ProvinceCode][order(ProvinceCode)]
CBN[cluster==1 & FoodExpenditure_Per_total<1.1*Food_Povertyline1_9 & FoodExpenditure_Per_total>0.90*Food_Povertyline1_9,weighted.mean(ratio2,Weight),by=ProvinceCode][order(ProvinceCode)]
CBN[cluster==2 & FoodExpenditure_Per_total<1.1*Food_Povertyline2_9 & FoodExpenditure_Per_total>0.90*Food_Povertyline1_9,weighted.mean(ratio2,Weight),by=ProvinceCode][order(ProvinceCode)]
CBN[cluster==3 & FoodExpenditure_Per_total<1.1*Food_Povertyline3_9 & FoodExpenditure_Per_total>0.90*Food_Povertyline1_9,weighted.mean(ratio2,Weight),by=ProvinceCode][order(ProvinceCode)]
CBN[cluster==4 & FoodExpenditure_Per_total<1.1*Food_Povertyline4_9 & FoodExpenditure_Per_total>0.90*Food_Povertyline1_9,weighted.mean(ratio2,Weight),by=ProvinceCode][order(ProvinceCode)]
# Poverty Line for each cluster
#cluster 1
CBNPoorCluster<-CBN[cluster==1 & FoodExpenditure_Per_total<1.1*Food_Povertyline1_9 & FoodExpenditure_Per_total>0.90*Food_Povertyline1_9]
UrbanEngel1<-CBNPoorCluster[,.(HHID,Region,ProvinceCode,cluster,ratio1,FoodExpenditure_Per_total)]
CBNPoorCluster<-CBN[cluster==1 & FoodExpenditure_Per_total<0.9*Food_Povertyline1_9]
UrbanunderEngel1<-CBNPoorCluster[,.(HHID,Region,ProvinceCode,cluster,ratio1,FoodExpenditure_Per_total)]
CBNPoorCluster<-CBN[cluster==1 & FoodExpenditure_Per_total>1.1*Food_Povertyline1_9]
UrbanaboveEngel1<-CBNPoorCluster[,.(HHID,Region,ProvinceCode,cluster,ratio1,FoodExpenditure_Per_total)]
Engel1<-weighted.mean(CBNPoorCluster$ratio1,CBNPoorCluster$Weight)
Engel_Reverse1<-1/Engel1
Povertyline1_9<-Engel_Reverse1*Food_Povertyline1_9
#cluster 2
CBNPoorCluster<-CBN[cluster==2 & FoodExpenditure_Per_total<1.1*Food_Povertyline2_9 & FoodExpenditure_Per_total>0.90*Food_Povertyline2_9]
UrbanEngel2<-CBNPoorCluster[,.(HHID,Region,ProvinceCode,cluster,ratio1,FoodExpenditure_Per_total)]
CBNPoorCluster<-CBN[cluster==2 & FoodExpenditure_Per_total<0.9*Food_Povertyline2_9]
UrbanunderEngel2<-CBNPoorCluster[,.(HHID,Region,ProvinceCode,cluster,ratio1,FoodExpenditure_Per_total)]
CBNPoorCluster<-CBN[cluster==2 & FoodExpenditure_Per_total>1.1*Food_Povertyline2_9]
UrbanaboveEngel2<-CBNPoorCluster[,.(HHID,Region,ProvinceCode,cluster,ratio1,FoodExpenditure_Per_total)]
Engel2<-weighted.mean(CBNPoorCluster$ratio1,CBNPoorCluster$Weight)
Engel_Reverse2<-1/Engel2
Povertyline2_9<-Engel_Reverse2*Food_Povertyline2_9
#cluster 3
CBNPoorCluster<-CBN[cluster==3 & FoodExpenditure_Per_total<1.1*Food_Povertyline3_9 & FoodExpenditure_Per_total>0.90*Food_Povertyline3_9]
UrbanEngel3<-CBNPoorCluster[,.(HHID,Region,ProvinceCode,cluster,ratio1,FoodExpenditure_Per_total)]
CBNPoorCluster<-CBN[cluster==3 & FoodExpenditure_Per_total<0.9*Food_Povertyline3_9]
UrbanunderEngel3<-CBNPoorCluster[,.(HHID,Region,ProvinceCode,cluster,ratio1,FoodExpenditure_Per_total)]
CBNPoorCluster<-CBN[cluster==3 & FoodExpenditure_Per_total>1.1*Food_Povertyline3_9]
UrbanaboveEngel3<-CBNPoorCluster[,.(HHID,Region,ProvinceCode,cluster,ratio1,FoodExpenditure_Per_total)]
Engel3<-weighted.mean(CBNPoorCluster$ratio1,CBNPoorCluster$Weight)
Engel_Reverse3<-1/Engel3
Povertyline3_9<-Engel_Reverse3*Food_Povertyline3_9
#cluster 4
CBNPoorCluster<-CBN[cluster==4 & FoodExpenditure_Per_total<1.1*Food_Povertyline4_9 & FoodExpenditure_Per_total>0.90*Food_Povertyline4_9]
UrbanEngel4<-CBNPoorCluster[,.(HHID,Region,ProvinceCode,cluster,ratio1,FoodExpenditure_Per_total)]
CBNPoorCluster<-CBN[cluster==4 & FoodExpenditure_Per_total<0.9*Food_Povertyline4_9]
UrbanunderEngel4<-CBNPoorCluster[,.(HHID,Region,ProvinceCode,cluster,ratio1,FoodExpenditure_Per_total)]
CBNPoorCluster<-CBN[cluster==4 & FoodExpenditure_Per_total>1.1*Food_Povertyline4_9]
UrbanaboveEngel4<-CBNPoorCluster[,.(HHID,Region,ProvinceCode,cluster,ratio1,FoodExpenditure_Per_total)]
Engel4<-weighted.mean(CBNPoorCluster$ratio1,CBNPoorCluster$Weight)
Engel_Reverse4<-1/Engel4
Povertyline4_9<-Engel_Reverse4*Food_Povertyline4_9
UrbanEngel<-rbind(UrbanEngel1,UrbanEngel2,UrbanEngel3,UrbanEngel4)
save(UrbanEngel, file=paste0(Settings$HEISProcessedPath,"Y",year,"UrbanEngel.rda"))
UrbanunderEngel<-rbind(UrbanunderEngel1,UrbanunderEngel2,UrbanunderEngel3,UrbanunderEngel4)
save(UrbanunderEngel, file=paste0(Settings$HEISProcessedPath,"Y",year,"UrbanunderEngel.rda"))
UrbanaboveEngel<-rbind(UrbanaboveEngel1,UrbanaboveEngel2,UrbanaboveEngel3,UrbanaboveEngel4)
save(UrbanaboveEngel, file=paste0(Settings$HEISProcessedPath,"Y",year,"UrbanaboveEngel.rda"))
Povertyline1_9<-4107570
Povertyline2_9<-3642510
Povertyline3_9<-5055730
Povertyline4_9<-7689100
#Indicate final poors
CBN<-CBN[,Total_Exp_Month_Per2:=Total_Exp_Month_Per_nondurable*RealPriceIndex]
CBN[,Poor11:=ifelse(Total_Exp_Month_Per_nondurable < Povertyline1_9 & cluster==1,1,0)]
CBN[,Poor11:=ifelse(Total_Exp_Month_Per_nondurable < Povertyline2_9 & cluster==2,1,Poor11)]
CBN[,Poor11:=ifelse(Total_Exp_Month_Per_nondurable < Povertyline3_9 & cluster==3,1,Poor11)]
CBN[,Poor11:=ifelse(Total_Exp_Month_Per_nondurable < Povertyline4_9 & cluster==4,1,Poor11)]
CBN[,weighted.mean(Poor11,Weight),by=cluster][order(cluster)]
CBNPoor9[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster]
CBNPoor11<-CBN[Poor11==1]
CBN[,sum(Size*Weight),by=cluster][order(cluster)]
CBN[,sum(Size*Weight),by=.(cluster,Decile)][order(cluster,Decile)]
CBNPoor[,sum(Size*Weight),by=cluster][order(cluster)]
CBNPoor11[,sum(Size*Weight),by=cluster][order(cluster)]
CBNPoor9[,sum(Size*Weight),by=cluster][order(cluster)]
CBNPoor9[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=cluster][order(cluster)]
CBNPoor9[,weighted.mean(Per_Daily_Calories,Weight,na.rm = TRUE),by=cluster][order(cluster)]
CBN[,weighted.mean(Poor11,Weight)]
CBN[,weighted.mean(Poor11,Weight),by=cluster][order(cluster)]
CBN[,weighted.mean(Poor11,Weight),by=ProvinceCode][order(ProvinceCode)]
CBNPoor11[,sum(Size*Weight),by=ProvinceCode][order(ProvinceCode)]
CBNPoor11[,sum(Weight),by=ProvinceCode][order(ProvinceCode)]
CBN[,sum(Size*Weight),by=ProvinceCode][order(ProvinceCode)]
CBN[,sum(Size*Weight)]
CBNPoor11[,weighted.mean(FoodExpenditure_Per_total,Weight,na.rm = TRUE),by=cluster][order(cluster)]
CBNPoor11[,weighted.mean(Per_Daily_Exp_Calories,Weight,na.rm = TRUE),by=cluster][order(cluster)]
##############################
###Real Prices for report###
##############################
#sum of total food expenditures
CBN[,FoodExpenditure_Per_total:=FoodExpenditure_Per]
#Food expenditures (equal 2100 CCAL)
CBN<-CBN[Per_Daily_Exp_Calories>0]
CBN[,Bundle_Value:=FoodExpenditure_Per_total*2100/Per_Daily_Exp_Calories]
CBN[,weighted.mean(Bundle_Value,Weight,na.rm = TRUE),by=cluster]
T_Bundle_Value<-subset(CBN, ProvinceCode==2301, select=c(Bundle_Value,Home_Per_Metr,Weight))
Tehran_Bundle_Value1<-weighted.mean(T_Bundle_Value$Bundle_Value,T_Bundle_Value$Weight,na.rm = TRUE)
Tehran_Bundle_Value2<-weighted.mean(T_Bundle_Value$Home_Per_Metr,T_Bundle_Value$Weight,na.rm = TRUE)
CBN[,RealPriceIndex1:=weighted.mean(Bundle_Value,Weight,na.rm = TRUE)/Tehran_Bundle_Value1,by=ProvinceCode]
CBN[,RealPriceIndex2:=weighted.mean(Home_Per_Metr,Weight,na.rm = TRUE)/Tehran_Bundle_Value2,by=ProvinceCode]
CBN[,weighted.mean(RealPriceIndex1,Weight),by=ProvinceCode]
CBN[,weighted.mean(RealPriceIndex2,Weight),by=ProvinceCode]
Indexes2_1<-CBN[,.(RealPriceIndex1,RealPriceIndex2,ProvinceCode,Weight)]
Indexes2_1<-Indexes2_1[,RealPriceIndex:=(RealPriceIndex1+RealPriceIndex2)/2]
Indexes_total<-Indexes2_1[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(ProvinceCode)]
Indexes3_1<-Indexes_total[,.(ProvinceCode,RealPriceIndex1,RealPriceIndex2,RealPriceIndex)]
Indexes<-Indexes[,.(ProvinceCode,RealPriceIndex)]
CBN_Poor<-CBN[Poor11==1]
T_Bundle_Value<-subset(CBN_Poor, ProvinceCode==2301, select=c(Bundle_Value,Home_Per_Metr,Weight))
Tehran_Bundle_Value1<-weighted.mean(T_Bundle_Value$Bundle_Value,T_Bundle_Value$Weight,na.rm = TRUE)
Tehran_Bundle_Value2<-weighted.mean(T_Bundle_Value$Home_Per_Metr,T_Bundle_Value$Weight,na.rm = TRUE)
CBN_Poor[,RealPriceIndex1:=weighted.mean(Bundle_Value,Weight,na.rm = TRUE)/Tehran_Bundle_Value1,by=ProvinceCode]
CBN_Poor[,RealPriceIndex2:=weighted.mean(Home_Per_Metr,Weight,na.rm = TRUE)/Tehran_Bundle_Value2,by=ProvinceCode]
CBN_Poor[,weighted.mean(RealPriceIndex1,Weight),by=ProvinceCode]
CBN_Poor[,weighted.mean(RealPriceIndex2,Weight),by=ProvinceCode]
Indexes2_2<-CBN_Poor[,.(RealPriceIndex1,RealPriceIndex2,ProvinceCode,Weight,Poor11)]
Indexes2_2<-Indexes2_2[,RealPriceIndex:=(RealPriceIndex1+RealPriceIndex2)/2]
Indexes_finalpoor<-Indexes2_2[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(ProvinceCode)]
Indexes3_2<-Indexes_finalpoor[,.(ProvinceCode,RealPriceIndex1,RealPriceIndex2,RealPriceIndex)]
Indexes<-Indexes[,.(ProvinceCode,RealPriceIndex)]
###Save Tables
CBN_Urban<-CBN
save(CBN_Urban, file = paste0(Settings$HEISProcessedPath,"CBN_Urban","95.rda"))
CBNPoor_Urban<-CBNPoor11
save(CBNPoor_Urban, file = paste0(Settings$HEISProcessedPath,"CBNPoor_Urban","95.rda"))
#utils::View(CBN)
for (col in c("GhandPrice")) CBNPoor11[is.na(get(col)), (col) := 0]
for (col in c("HoboobatPrice")) CBNPoor11[is.na(get(col)), (col) := 0]
for (col in c("RoghanPrice")) CBNPoor11[is.na(get(col)), (col) := 0]
for (col in c("BerenjPrice")) CBNPoor11[is.na(get(col)), (col) := 0]
for (col in c("NanPrice")) CBNPoor11[is.na(get(col)), (col) := 0]
for (col in c("GooshtPrice")) CBNPoor11[is.na(get(col)), (col) := 0]
for (col in c("MorghPrice")) CBNPoor11[is.na(get(col)), (col) := 0]
for (col in c("MahiPrice")) CBNPoor11[is.na(get(col)), (col) := 0]
for (col in c("ShirPrice")) CBNPoor11[is.na(get(col)), (col) := 0]
for (col in c("MastPrice")) CBNPoor11[is.na(get(col)), (col) := 0]
for (col in c("PanirPrice")) CBNPoor11[is.na(get(col)), (col) := 0]
for (col in c("TokhmemorghPrice")) CBNPoor11[is.na(get(col)), (col) := 0]
for (col in c("MivePrice")) CBNPoor11[is.na(get(col)), (col) := 0]
for (col in c("SabziPrice")) CBNPoor11[is.na(get(col)), (col) := 0]
for (col in c("MakarooniPrice")) CBNPoor11[is.na(get(col)), (col) := 0]
for (col in c("SibzaminiPrice")) CBNPoor11[is.na(get(col)), (col) := 0]
CBNPoor11<-CBNPoor11[,x:=GhandPrice*Ghandgram+HoboobatPrice*Hoboobatgram+RoghanPrice*Roghangram+BerenjPrice*Berenjgram+NanPrice*Nangram+GooshtPrice*Gooshtgram+MorghPrice*Morghgram+MahiPrice*Mahigram+ShirPrice*Shirgram+MastPrice*Mastgram+PanirPrice*Panirgram+TokhmemorghPrice*Tokhmemorghgram+MivePrice*Mivegram+SabziPrice*Sabzigram+MakarooniPrice*Makaroonigram+SibzaminiPrice*Sibzaminigram]
CBNPoor11[,weighted.mean((GhandPrice*Ghandgram)/x,Weight),cluster==4]
CBNPoor11[,weighted.mean((HoboobatPrice*Hoboobatgram)/x,Weight),cluster==4]
CBNPoor11[,weighted.mean((RoghanPrice*Roghangram)/x,Weight),cluster==4]
CBNPoor11[,weighted.mean((BerenjPrice*Berenjgram)/x,Weight),cluster==4]
CBNPoor11[,weighted.mean((NanPrice*Nangram)/x,Weight),cluster==4]
CBNPoor11[,weighted.mean((GooshtPrice*Gooshtgram)/x,Weight),cluster==4]
CBNPoor11[,weighted.mean((MorghPrice*Morghgram)/x,Weight),cluster==4]
CBNPoor11[,weighted.mean((MahiPrice*Mahigram)/x,Weight),cluster==4]
CBNPoor11[,weighted.mean((ShirPrice*Shirgram)/x,Weight),cluster==4]
CBNPoor11[,weighted.mean((MastPrice*Mastgram)/x,Weight),cluster==4]
CBNPoor11[,weighted.mean((PanirPrice*Panirgram)/x,Weight),cluster==4]
CBNPoor11[,weighted.mean((TokhmemorghPrice*Tokhmemorghgram)/x,Weight),cluster==4]
CBNPoor11[,weighted.mean((MivePrice*Mivegram)/x,Weight),cluster==4]
CBNPoor11[,weighted.mean((SabziPrice*Sabzigram)/x,Weight),cluster==4]
CBNPoor11[,weighted.mean((MakarooniPrice*Makaroonigram)/x,Weight),cluster==4]
CBNPoor11[,weighted.mean((SibzaminiPrice*Sibzaminigram)/x,Weight),cluster==4]
for (col in c("GhandPrice")) CBN[is.na(get(col)), (col) := 0]
for (col in c("HoboobatPrice")) CBN[is.na(get(col)), (col) := 0]
for (col in c("RoghanPrice")) CBN[is.na(get(col)), (col) := 0]
for (col in c("BerenjPrice")) CBN[is.na(get(col)), (col) := 0]
for (col in c("NanPrice")) CBN[is.na(get(col)), (col) := 0]
for (col in c("GooshtPrice")) CBN[is.na(get(col)), (col) := 0]
for (col in c("MorghPrice")) CBN[is.na(get(col)), (col) := 0]
for (col in c("MahiPrice")) CBN[is.na(get(col)), (col) := 0]
for (col in c("ShirPrice")) CBN[is.na(get(col)), (col) := 0]
for (col in c("MastPrice")) CBN[is.na(get(col)), (col) := 0]
for (col in c("PanirPrice")) CBN[is.na(get(col)), (col) := 0]
for (col in c("TokhmemorghPrice")) CBN[is.na(get(col)), (col) := 0]
for (col in c("MivePrice")) CBN[is.na(get(col)), (col) := 0]
for (col in c("SabziPrice")) CBN[is.na(get(col)), (col) := 0]
for (col in c("MakarooniPrice")) CBN[is.na(get(col)), (col) := 0]
for (col in c("SibzaminiPrice")) CBN[is.na(get(col)), (col) := 0]
CBN<-CBN[,x:=GhandPrice*Ghandgram+HoboobatPrice*Hoboobatgram+RoghanPrice*Roghangram+BerenjPrice*Berenjgram+NanPrice*Nangram+GooshtPrice*Gooshtgram+MorghPrice*Morghgram+MahiPrice*Mahigram+ShirPrice*Shirgram+MastPrice*Mastgram+PanirPrice*Panirgram+TokhmemorghPrice*Tokhmemorghgram+MivePrice*Mivegram+SabziPrice*Sabzigram+MakarooniPrice*Makaroonigram+SibzaminiPrice*Sibzaminigram]
CBN[,weighted.mean((GhandPrice*Ghandgram)/x,Weight)*103.5]
CBN[,weighted.mean((HoboobatPrice*Hoboobatgram)/x,Weight)*117]
CBN[,weighted.mean((RoghanPrice*Roghangram)/x,Weight)*111.5]
CBN[,weighted.mean((BerenjPrice*Berenjgram)/x,Weight)*114.3]
CBN[,weighted.mean((NanPrice*Nangram)/x,Weight)*110.8]
CBN[,weighted.mean((GooshtPrice*Gooshtgram)/x,Weight)*123.5]
CBN[,weighted.mean((MorghPrice*Morghgram)/x,Weight)*114.1]
CBN[,weighted.mean((MahiPrice*Mahigram)/x,Weight)*107.6]
CBN[,weighted.mean((ShirPrice*Shirgram)/x,Weight)*103.4]
CBN[,weighted.mean((MastPrice*Mastgram)/x,Weight)*106.1]
CBN[,weighted.mean((PanirPrice*Panirgram)/x,Weight)*106.8]
CBN[,weighted.mean((TokhmemorghPrice*Tokhmemorghgram)/x,Weight)*125.9]
CBN[,weighted.mean((MivePrice*Mivegram)/x,Weight)*97.1]
CBN[,weighted.mean((SabziPrice*Sabzigram)/x,Weight)*109.9]
CBN[,weighted.mean((MakarooniPrice*Makaroonigram)/x,Weight)*101.9]
CBN[,weighted.mean((SibzaminiPrice*Sibzaminigram)/x,Weight)*119.7]
CBNUrban<-CBN[,.(HHID,Percentile,Poor2,Weight,ProvinceCode)]
save(CBNUrban, file=paste0(Settings$HEISProcessedPath,"Y",year,"CBNUrban.rda"))
OldFoodUrban<-CBN[,.(HHID,Percentile,Poor9)]
save(OldFoodUrban, file=paste0(Settings$HEISProcessedPath,"Y",year,"OldFoodUrban.rda"))
OldFinalPoorUrban<-CBN[,.(HHID,Percentile,Poor11)]
save(OldFinalPoorUrban, file=paste0(Settings$HEISProcessedPath,"Y",year,"OldFinalPoorUrban.rda"))
endtime <- proc.time()
cat("\n\n============================\nIt took ")
cat(endtime-starttime)
|
rm(list=ls())
human.pin <- read.csv("../../human.pin.csv", header=T, stringsAsFactors = F)
geneA <- human.pin$geneA
geneB <- human.pin$geneB
#All genes appear in PIN
all.list <- unique(c(geneA, geneB))
#the Gene list
geneset <- read.csv("HALLMARK_XENOBIOTIC_METABOLISM.csv", header=T, stringsAsFactors = F)
gene.list <- geneset$gene
#number of genes in the set
length(gene.list)
#number of genes of the set appear in the PIN
length(which(gene.list %in% all.list))
#Constructing the sub network fromn the gene set
subA <- geneA[which((geneA %in% gene.list) & (geneB %in% gene.list))]
subB <- geneB[which((geneA %in% gene.list) & (geneB %in% gene.list))]
sub.web <- data.frame(cbind(subA, subB))
length(sub.web[,1])
library(igraph)
library(gplots)
'%ni%' <- Negate('%in%')
connected.set <- unique(c(subA, subB))
isolated <- gene.list[which(gene.list %ni% connected.set)]
sub.graph <- graph.data.frame(sub.web, directed = F)
sub.graph.iso <- sub.graph %>% add_vertices(length(isolated))
pdf("HALLMARK_XENOBIOTIC_METABOLISM.full.pdf")
plot(sub.graph.iso, vertex.label=NA, vertex.size=6)
dev.off()
#the largest clique of the sub network
sub.largest.clique <- largest_cliques(sub.graph.iso)
#total number of largest clques
length(sub.largest.clique)
#number of genes in the largest cliques
sub.clique.1 <- sub.largest.clique[[1]]
degree <- length(sub.clique.1)
degree
#clique 1
sub.c1.graph <- graph.full(degree)
V(sub.c1.graph)$name <- V(sub.graph)$name[sub.clique.1]
pdf("HALLMARK_XENOBIOTIC_METABOLISM.clique1.pdf")
plot(sub.c1.graph)
dev.off()
|
/Data/MSigDB.go.pathway/HALLMARK_XENOBIOTIC_METABOLISM/sub.network.R
|
no_license
|
haoboguo/NetBAS
|
R
| false
| false
| 1,556
|
r
|
rm(list=ls())
human.pin <- read.csv("../../human.pin.csv", header=T, stringsAsFactors = F)
geneA <- human.pin$geneA
geneB <- human.pin$geneB
#All genes appear in PIN
all.list <- unique(c(geneA, geneB))
#the Gene list
geneset <- read.csv("HALLMARK_XENOBIOTIC_METABOLISM.csv", header=T, stringsAsFactors = F)
gene.list <- geneset$gene
#number of genes in the set
length(gene.list)
#number of genes of the set appear in the PIN
length(which(gene.list %in% all.list))
#Constructing the sub network fromn the gene set
subA <- geneA[which((geneA %in% gene.list) & (geneB %in% gene.list))]
subB <- geneB[which((geneA %in% gene.list) & (geneB %in% gene.list))]
sub.web <- data.frame(cbind(subA, subB))
length(sub.web[,1])
library(igraph)
library(gplots)
'%ni%' <- Negate('%in%')
connected.set <- unique(c(subA, subB))
isolated <- gene.list[which(gene.list %ni% connected.set)]
sub.graph <- graph.data.frame(sub.web, directed = F)
sub.graph.iso <- sub.graph %>% add_vertices(length(isolated))
pdf("HALLMARK_XENOBIOTIC_METABOLISM.full.pdf")
plot(sub.graph.iso, vertex.label=NA, vertex.size=6)
dev.off()
#the largest clique of the sub network
sub.largest.clique <- largest_cliques(sub.graph.iso)
#total number of largest clques
length(sub.largest.clique)
#number of genes in the largest cliques
sub.clique.1 <- sub.largest.clique[[1]]
degree <- length(sub.clique.1)
degree
#clique 1
sub.c1.graph <- graph.full(degree)
V(sub.c1.graph)$name <- V(sub.graph)$name[sub.clique.1]
pdf("HALLMARK_XENOBIOTIC_METABOLISM.clique1.pdf")
plot(sub.c1.graph)
dev.off()
|
## week12
## use creeplot to visualize the relative importance of principal components
pca <- princomp(data)
screeplot(pca)
## extract the result from a PCA
var <- get_pca_var(res.pca)
## variables for PCA var
var$coord ## coordinates of variables to create a scatter plot
var$cos2 ## represents the quality of representation for variables on the factor map. It’s calculated as the squared coordinates: var.cos2 = var.coord * var.coord.
var$contrib ## contains the contributions (in percentage) of the variables to the principal components. The contribution of a variable (var) to a given principal component is (in percentage) : (var.cos2 * 100) / (total cos2 of the component)
## plotting PCA using ggbiplot
library(devtools)
install_github("vqv/ggbiplot")
library(ggbiplot)
ggbiplot(mtcars.pca)
## Interpreting the results
mtcars.country <- c(rep("Japan", 3), rep("US",4), rep("Europe", 7),rep("US",3), "Europe", rep("Japan", 3), rep("US",4), rep("Europe", 3), "US", rep("Europe", 3))
ggbiplot(mtcars.pca,ellipse=TRUE, labels=rownames(mtcars), groups=mtcars.country)
## Graphical parameters with ggbiplot
ggbiplot(mtcars.pca,ellipse=TRUE,circle=TRUE, labels=rownames(mtcars), groups=mtcars.country)
## remove the arrows
ggbiplot(mtcars.pca,ellipse=TRUE,obs.scale = 1, var.scale = 1,var.axes=FALSE, labels=rownames(mtcars), groups=mtcars.country)
## Customize ggbiplot
ggbiplot(mtcars.pca,ellipse=TRUE,obs.scale = 1, var.scale = 1, labels=rownames(mtcars), groups=mtcars.country) +
scale_colour_manual(name="Origin", values= c("forest green", "red3", "dark blue"))+
ggtitle("PCA of mtcars dataset")+
theme_minimal()+
theme(legend.position = "bottom")
## Adding a new sample
spacecar <- c(1000,60,50,500,0,0.5,2.5,0,1,0,0)
mtcarsplus <- rbind(mtcars, spacecar)
mtcars.countryplus <- c(mtcars.country, "Jupiter")
mtcarsplus.pca <- prcomp(mtcarsplus[,c(1:7,10,11)], center = TRUE,scale. = TRUE)
ggbiplot(mtcarsplus.pca, obs.scale = 1, var.scale = 1, ellipse = TRUE, circle = FALSE, var.axes=TRUE, labels=c(rownames(mtcars), "spacecar"), groups=mtcars.countryplus)+
scale_colour_manual(name="Origin", values= c("forest green", "red3", "violet", "dark blue"))+
ggtitle("PCA of mtcars dataset, with extra sample added")+
theme_minimal()+
theme(legend.position = "bottom")
## Project a new sample onto the original PCA
s.sc <- scale(t(spacecar[c(1:7,10,11)]), center= mtcars.pca$center)
s.pred <- s.sc %*% mtcars.pca$rotation
mtcars.plusproj.pca <- mtcars.pca
mtcars.plusproj.pca$x <- rbind(mtcars.plusproj.pca$x, s.pred)
ggbiplot(mtcars.plusproj.pca, obs.scale = 1, var.scale = 1, ellipse = TRUE, circle = FALSE, var.axes=TRUE, labels=c(rownames(mtcars), "spacecar"), groups=mtcars.countryplus)+
scale_colour_manual(name="Origin", values= c("forest green", "red3", "violet", "dark blue"))+
ggtitle("PCA of mtcars dataset, with extra sample projected")+
theme_minimal()+
theme(legend.position = "bottom")
|
/Code_Portfolio_Shipeng/week12/week12.r
|
no_license
|
1210545510/ANLY506_Shipeng-Sun
|
R
| false
| false
| 2,950
|
r
|
## week12
## use creeplot to visualize the relative importance of principal components
pca <- princomp(data)
screeplot(pca)
## extract the result from a PCA
var <- get_pca_var(res.pca)
## variables for PCA var
var$coord ## coordinates of variables to create a scatter plot
var$cos2 ## represents the quality of representation for variables on the factor map. It’s calculated as the squared coordinates: var.cos2 = var.coord * var.coord.
var$contrib ## contains the contributions (in percentage) of the variables to the principal components. The contribution of a variable (var) to a given principal component is (in percentage) : (var.cos2 * 100) / (total cos2 of the component)
## plotting PCA using ggbiplot
library(devtools)
install_github("vqv/ggbiplot")
library(ggbiplot)
ggbiplot(mtcars.pca)
## Interpreting the results
mtcars.country <- c(rep("Japan", 3), rep("US",4), rep("Europe", 7),rep("US",3), "Europe", rep("Japan", 3), rep("US",4), rep("Europe", 3), "US", rep("Europe", 3))
ggbiplot(mtcars.pca,ellipse=TRUE, labels=rownames(mtcars), groups=mtcars.country)
## Graphical parameters with ggbiplot
ggbiplot(mtcars.pca,ellipse=TRUE,circle=TRUE, labels=rownames(mtcars), groups=mtcars.country)
## remove the arrows
ggbiplot(mtcars.pca,ellipse=TRUE,obs.scale = 1, var.scale = 1,var.axes=FALSE, labels=rownames(mtcars), groups=mtcars.country)
## Customize ggbiplot
ggbiplot(mtcars.pca,ellipse=TRUE,obs.scale = 1, var.scale = 1, labels=rownames(mtcars), groups=mtcars.country) +
scale_colour_manual(name="Origin", values= c("forest green", "red3", "dark blue"))+
ggtitle("PCA of mtcars dataset")+
theme_minimal()+
theme(legend.position = "bottom")
## Adding a new sample
spacecar <- c(1000,60,50,500,0,0.5,2.5,0,1,0,0)
mtcarsplus <- rbind(mtcars, spacecar)
mtcars.countryplus <- c(mtcars.country, "Jupiter")
mtcarsplus.pca <- prcomp(mtcarsplus[,c(1:7,10,11)], center = TRUE,scale. = TRUE)
ggbiplot(mtcarsplus.pca, obs.scale = 1, var.scale = 1, ellipse = TRUE, circle = FALSE, var.axes=TRUE, labels=c(rownames(mtcars), "spacecar"), groups=mtcars.countryplus)+
scale_colour_manual(name="Origin", values= c("forest green", "red3", "violet", "dark blue"))+
ggtitle("PCA of mtcars dataset, with extra sample added")+
theme_minimal()+
theme(legend.position = "bottom")
## Project a new sample onto the original PCA
s.sc <- scale(t(spacecar[c(1:7,10,11)]), center= mtcars.pca$center)
s.pred <- s.sc %*% mtcars.pca$rotation
mtcars.plusproj.pca <- mtcars.pca
mtcars.plusproj.pca$x <- rbind(mtcars.plusproj.pca$x, s.pred)
ggbiplot(mtcars.plusproj.pca, obs.scale = 1, var.scale = 1, ellipse = TRUE, circle = FALSE, var.axes=TRUE, labels=c(rownames(mtcars), "spacecar"), groups=mtcars.countryplus)+
scale_colour_manual(name="Origin", values= c("forest green", "red3", "violet", "dark blue"))+
ggtitle("PCA of mtcars dataset, with extra sample projected")+
theme_minimal()+
theme(legend.position = "bottom")
|
##--------------------------------
# COVID Geographic Union
##--------------------------------
## Load Packages ----------------------------------------------------------------------------------
library(tidyverse)
library(lubridate)
## Load Data --------------------------------------------------------------------------------------
# https://www.alberta.ca/data/stats/covid-19-alberta-statistics-data.csv
COVID_Cases_by_Local_Area <- read_csv("~\\COVID\\covid-19-alberta-statistics-map-data.csv")
# Spatial Union between Alberta Local Health Regions: https://www.alberta.ca/data/stats/covid-19-alberta-statistics-map-data.csv
# and the Census Division digital bountry file: https://www12.statcan.gc.ca/census-recensement/2011/geo/bound-limit/bound-limit-2016-eng.cfm
Geographic_Union <- read_csv("~\\COVID\\Geographic Joining.csv")
## Check the differences between location names
COVID_Locations <- sort(unique(COVID_Cases_by_Local_Area$`Region name`))
COVID_Geo_Union <- sort(unique(Geographic_Union$LOCAL_NAME))
Diff <- setdiff(COVID_Locations,COVID_Geo_Union) # Geographic union includes county aggregation, we don't need these
names(Diff) <- "Counties"
## Create a mapping dataframe between Census Division and Local Health Region
`%notin%` <- Negate(`%in%`)
COVID_by_Census_Division <- Geographic_Union %>%
select(CDUID, CDNAME, LOCAL_NAME, pct_Health, pct_Division, URBAN, Div_Area, Area_H) %>%
filter(LOCAL_NAME %notin% Diff) %>%
right_join(., COVID_Cases_by_Local_Area, by = c("LOCAL_NAME" = "Region name")) %>%
mutate(Cases_Allocated = `Active cases`*pct_Division,
Population_Allocated = Population*pct_Division,
Urban = case_when(URBAN %in% c("URBAN", "MODERATE URBAN INFLUENCE", "MODERATE METRO INFLUENCE", "METRO") ~ Population_Allocated,
TRUE ~ 0),
Rural = case_when(URBAN %in% c("RURAL", "RURAL CENTRE AREA", "RURAL REMOTE") ~ Population_Allocated,
TRUE ~ 0)) %>%
group_by(CDUID, CDNAME, Date) %>%
summarize(Active_Cases = sum(Cases_Allocated),
Population = sum(Population_Allocated),
Urban_Population = sum(Urban),
Rural_Population = sum(Rural)) %>%
mutate(Rural_Percentage = Rural_Population/(Urban_Population + Rural_Population),
Report_Date = mdy(Date)) %>%
select(-Date)
## Write Out Covid statistics by Census Division
write_csv(COVID_by_Census_Division, "~\\COVID\\COVID_by_Census_Division.csv")
|
/COVID Geographic Joining.R
|
permissive
|
keenanviney/Rural_Internet_Hackathon
|
R
| false
| false
| 2,541
|
r
|
##--------------------------------
# COVID Geographic Union
##--------------------------------
## Load Packages ----------------------------------------------------------------------------------
library(tidyverse)
library(lubridate)
## Load Data --------------------------------------------------------------------------------------
# https://www.alberta.ca/data/stats/covid-19-alberta-statistics-data.csv
COVID_Cases_by_Local_Area <- read_csv("~\\COVID\\covid-19-alberta-statistics-map-data.csv")
# Spatial Union between Alberta Local Health Regions: https://www.alberta.ca/data/stats/covid-19-alberta-statistics-map-data.csv
# and the Census Division digital bountry file: https://www12.statcan.gc.ca/census-recensement/2011/geo/bound-limit/bound-limit-2016-eng.cfm
Geographic_Union <- read_csv("~\\COVID\\Geographic Joining.csv")
## Check the differences between location names
COVID_Locations <- sort(unique(COVID_Cases_by_Local_Area$`Region name`))
COVID_Geo_Union <- sort(unique(Geographic_Union$LOCAL_NAME))
Diff <- setdiff(COVID_Locations,COVID_Geo_Union) # Geographic union includes county aggregation, we don't need these
names(Diff) <- "Counties"
## Create a mapping dataframe between Census Division and Local Health Region
`%notin%` <- Negate(`%in%`)
COVID_by_Census_Division <- Geographic_Union %>%
select(CDUID, CDNAME, LOCAL_NAME, pct_Health, pct_Division, URBAN, Div_Area, Area_H) %>%
filter(LOCAL_NAME %notin% Diff) %>%
right_join(., COVID_Cases_by_Local_Area, by = c("LOCAL_NAME" = "Region name")) %>%
mutate(Cases_Allocated = `Active cases`*pct_Division,
Population_Allocated = Population*pct_Division,
Urban = case_when(URBAN %in% c("URBAN", "MODERATE URBAN INFLUENCE", "MODERATE METRO INFLUENCE", "METRO") ~ Population_Allocated,
TRUE ~ 0),
Rural = case_when(URBAN %in% c("RURAL", "RURAL CENTRE AREA", "RURAL REMOTE") ~ Population_Allocated,
TRUE ~ 0)) %>%
group_by(CDUID, CDNAME, Date) %>%
summarize(Active_Cases = sum(Cases_Allocated),
Population = sum(Population_Allocated),
Urban_Population = sum(Urban),
Rural_Population = sum(Rural)) %>%
mutate(Rural_Percentage = Rural_Population/(Urban_Population + Rural_Population),
Report_Date = mdy(Date)) %>%
select(-Date)
## Write Out Covid statistics by Census Division
write_csv(COVID_by_Census_Division, "~\\COVID\\COVID_by_Census_Division.csv")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/power_plot.R
\name{month_plot}
\alias{month_plot}
\title{month_plot}
\usage{
month_plot(data, meter_name)
}
\arguments{
\item{data}{A dataframe which has peak power values of a virtual meter and the total 4 meters.}
\item{meter_name}{The name of a meter}
}
\description{
This is to plot in terms of month with respect to meter, group, color and facet.
}
\examples{
month_plot(data, "PQ")
}
\keyword{monthly}
\keyword{plot}
|
/demand/man/month_plot.Rd
|
permissive
|
aidowu/demand_acep
|
R
| false
| true
| 502
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/power_plot.R
\name{month_plot}
\alias{month_plot}
\title{month_plot}
\usage{
month_plot(data, meter_name)
}
\arguments{
\item{data}{A dataframe which has peak power values of a virtual meter and the total 4 meters.}
\item{meter_name}{The name of a meter}
}
\description{
This is to plot in terms of month with respect to meter, group, color and facet.
}
\examples{
month_plot(data, "PQ")
}
\keyword{monthly}
\keyword{plot}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parseIdentifier.R
\name{parseIdentifier}
\alias{parseIdentifier}
\title{Parse Datetime from a Local Identifier}
\usage{
parseIdentifier(identifier)
}
\description{
Parse Datetime from a Local Identifier
}
|
/man/parseIdentifier.Rd
|
no_license
|
meerapatelmd/HemOncExt
|
R
| false
| true
| 283
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parseIdentifier.R
\name{parseIdentifier}
\alias{parseIdentifier}
\title{Parse Datetime from a Local Identifier}
\usage{
parseIdentifier(identifier)
}
\description{
Parse Datetime from a Local Identifier
}
|
# Copyright 2018 Observational Health Data Sciences and Informatics
#
# This file is part of PneumoniaRiskOfPPI
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Execute the Study
#'
#' @details
#' This function executes the Alendronate Vs Raloxifene study
#'
#' The \code{createCohorts}, \code{synthesizePositiveControls}, \code{runAnalyses}, and \code{runDiagnostics} arguments
#' are intended to be used to run parts of the full study at a time, but none of the parts are considerd to be optional.
#'
#' @param connectionDetails An object of type \code{connectionDetails} as created using the
#' \code{\link[DatabaseConnector]{createConnectionDetails}} function in the
#' DatabaseConnector package.
#' @param cdmDatabaseSchema Schema name where your patient-level data in OMOP CDM format resides.
#' Note that for SQL Server, this should include both the database and
#' schema name, for example 'cdm_data.dbo'.
#' @param cohortDatabaseSchema Schema name where intermediate data can be stored. You will need to have
#' write priviliges in this schema. Note that for SQL Server, this should
#' include both the database and schema name, for example 'cdm_data.dbo'.
#' @param cohortTable The name of the table that will be created in the work database schema.
#' This table will hold the exposure and outcome cohorts used in this
#' study.
#' @param oracleTempSchema Should be used in Oracle to specify a schema where the user has write
#' priviliges for storing temporary tables.
#' @param outputFolder Name of local folder to place results; make sure to use forward slashes
#' (/). Do not use a folder on a network drive since this greatly impacts
#' performance.
#' @param createCohorts Create the cohortTable table with the exposure and outcome cohorts?
#' @param synthesizePositiveControls Should positive controls be synthesized?
#' @param runAnalyses Perform the cohort method analyses?
#' @param runDiagnostics Compute study diagnostics?
#' @param packageResults Should results be packaged for later sharing?
#' @param maxCores How many parallel cores should be used? If more cores are made available
#' this can speed up the analyses.
#' @param minCellCount The minimum number of subjects contributing to a count before it can be included
#' in packaged results.
#'
#' @examples
#' \dontrun{
#' connectionDetails <- createConnectionDetails(dbms = "postgresql",
#' user = "joe",
#' password = "secret",
#' server = "myserver")
#'
#' execute(connectionDetails,
#' cdmDatabaseSchema = "cdm_data",
#' cohortDatabaseSchema = "study_results",
#' cohortTable = "cohort",
#' oracleTempSchema = NULL,
#' outputFolder = "c:/temp/study_results",
#' maxCores = 4)
#' }
#'
#' @export
execute <- function(connectionDetails,
cdmDatabaseSchema,
cohortDatabaseSchema = cdmDatabaseSchema,
cohortTable = cohortTable,
oracleTempSchema = cohortDatabaseSchema,
outputFolder = outputFolder,
createCohorts = TRUE,
synthesizePositiveControls = TRUE,
runAnalyses = TRUE,
runDiagnostics = TRUE,
packageResults = TRUE,
maxCores = 4,
minCellCount= 5) {
if (!file.exists(outputFolder))
dir.create(outputFolder, recursive = TRUE)
OhdsiRTools::addDefaultFileLogger(file.path(outputFolder, "log.txt"))
if (createCohorts) {
OhdsiRTools::logInfo("Creating exposure and outcome cohorts")
createCohorts(connectionDetails = connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
cohortDatabaseSchema = cohortDatabaseSchema,
cohortTable = cohortTable,
oracleTempSchema = oracleTempSchema,
outputFolder = outputFolder)
}
if (synthesizePositiveControls) {
OhdsiRTools::logInfo("Synthesizing positive controls")
synthesizePositiveControls(connectionDetails = connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
cohortDatabaseSchema = cohortDatabaseSchema,
cohortTable = cohortTable,
oracleTempSchema = oracleTempSchema,
outputFolder = outputFolder,
maxCores = maxCores)
}
if (runAnalyses) {
OhdsiRTools::logInfo("Running analyses")
cmOutputFolder <- file.path(outputFolder, "cmOutput")
if (!file.exists(cmOutputFolder))
dir.create(cmOutputFolder)
cmAnalysisListFile <- system.file("settings",
"cmAnalysisList.json",
package = "PneumoniaRiskOfPPI")
cmAnalysisList <- CohortMethod::loadCmAnalysisList(cmAnalysisListFile)
dcosList <- createTcos(outputFolder = outputFolder)
results <- CohortMethod::runCmAnalyses(connectionDetails = connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
exposureDatabaseSchema = cohortDatabaseSchema,
exposureTable = cohortTable,
outcomeDatabaseSchema = cohortDatabaseSchema,
outcomeTable = cohortTable,
outputFolder = cmOutputFolder,
oracleTempSchema = oracleTempSchema,
cmAnalysisList = cmAnalysisList,
drugComparatorOutcomesList = dcosList,
getDbCohortMethodDataThreads = min(3, maxCores),
createStudyPopThreads = min(3, maxCores),
createPsThreads = max(1, round(maxCores/10)),
psCvThreads = min(10, maxCores),
computeCovarBalThreads = min(3, maxCores),
trimMatchStratifyThreads = min(10, maxCores),
fitOutcomeModelThreads = max(1, round(maxCores/4)),
outcomeCvThreads = min(4, maxCores),
refitPsForEveryOutcome = FALSE)
}
if (runDiagnostics) {
OhdsiRTools::logInfo("Running diagnostics")
generateDiagnostics(outputFolder = outputFolder)
}
if (packageResults) {
OhdsiRTools::logInfo("Packaging results")
packageResults(connectionDetails = connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
outputFolder = outputFolder,
minCellCount = minCellCount)
}
invisible(NULL)
}
|
/PneumoniaRiskOfPPI/R/Main.R
|
permissive
|
NEONKID/StudyProtocolSandbox
|
R
| false
| false
| 8,204
|
r
|
# Copyright 2018 Observational Health Data Sciences and Informatics
#
# This file is part of PneumoniaRiskOfPPI
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Execute the Study
#'
#' @details
#' This function executes the Alendronate Vs Raloxifene study
#'
#' The \code{createCohorts}, \code{synthesizePositiveControls}, \code{runAnalyses}, and \code{runDiagnostics} arguments
#' are intended to be used to run parts of the full study at a time, but none of the parts are considerd to be optional.
#'
#' @param connectionDetails An object of type \code{connectionDetails} as created using the
#' \code{\link[DatabaseConnector]{createConnectionDetails}} function in the
#' DatabaseConnector package.
#' @param cdmDatabaseSchema Schema name where your patient-level data in OMOP CDM format resides.
#' Note that for SQL Server, this should include both the database and
#' schema name, for example 'cdm_data.dbo'.
#' @param cohortDatabaseSchema Schema name where intermediate data can be stored. You will need to have
#' write priviliges in this schema. Note that for SQL Server, this should
#' include both the database and schema name, for example 'cdm_data.dbo'.
#' @param cohortTable The name of the table that will be created in the work database schema.
#' This table will hold the exposure and outcome cohorts used in this
#' study.
#' @param oracleTempSchema Should be used in Oracle to specify a schema where the user has write
#' priviliges for storing temporary tables.
#' @param outputFolder Name of local folder to place results; make sure to use forward slashes
#' (/). Do not use a folder on a network drive since this greatly impacts
#' performance.
#' @param createCohorts Create the cohortTable table with the exposure and outcome cohorts?
#' @param synthesizePositiveControls Should positive controls be synthesized?
#' @param runAnalyses Perform the cohort method analyses?
#' @param runDiagnostics Compute study diagnostics?
#' @param packageResults Should results be packaged for later sharing?
#' @param maxCores How many parallel cores should be used? If more cores are made available
#' this can speed up the analyses.
#' @param minCellCount The minimum number of subjects contributing to a count before it can be included
#' in packaged results.
#'
#' @examples
#' \dontrun{
#' connectionDetails <- createConnectionDetails(dbms = "postgresql",
#' user = "joe",
#' password = "secret",
#' server = "myserver")
#'
#' execute(connectionDetails,
#' cdmDatabaseSchema = "cdm_data",
#' cohortDatabaseSchema = "study_results",
#' cohortTable = "cohort",
#' oracleTempSchema = NULL,
#' outputFolder = "c:/temp/study_results",
#' maxCores = 4)
#' }
#'
#' @export
execute <- function(connectionDetails,
cdmDatabaseSchema,
cohortDatabaseSchema = cdmDatabaseSchema,
cohortTable = cohortTable,
oracleTempSchema = cohortDatabaseSchema,
outputFolder = outputFolder,
createCohorts = TRUE,
synthesizePositiveControls = TRUE,
runAnalyses = TRUE,
runDiagnostics = TRUE,
packageResults = TRUE,
maxCores = 4,
minCellCount= 5) {
if (!file.exists(outputFolder))
dir.create(outputFolder, recursive = TRUE)
OhdsiRTools::addDefaultFileLogger(file.path(outputFolder, "log.txt"))
if (createCohorts) {
OhdsiRTools::logInfo("Creating exposure and outcome cohorts")
createCohorts(connectionDetails = connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
cohortDatabaseSchema = cohortDatabaseSchema,
cohortTable = cohortTable,
oracleTempSchema = oracleTempSchema,
outputFolder = outputFolder)
}
if (synthesizePositiveControls) {
OhdsiRTools::logInfo("Synthesizing positive controls")
synthesizePositiveControls(connectionDetails = connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
cohortDatabaseSchema = cohortDatabaseSchema,
cohortTable = cohortTable,
oracleTempSchema = oracleTempSchema,
outputFolder = outputFolder,
maxCores = maxCores)
}
if (runAnalyses) {
OhdsiRTools::logInfo("Running analyses")
cmOutputFolder <- file.path(outputFolder, "cmOutput")
if (!file.exists(cmOutputFolder))
dir.create(cmOutputFolder)
cmAnalysisListFile <- system.file("settings",
"cmAnalysisList.json",
package = "PneumoniaRiskOfPPI")
cmAnalysisList <- CohortMethod::loadCmAnalysisList(cmAnalysisListFile)
dcosList <- createTcos(outputFolder = outputFolder)
results <- CohortMethod::runCmAnalyses(connectionDetails = connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
exposureDatabaseSchema = cohortDatabaseSchema,
exposureTable = cohortTable,
outcomeDatabaseSchema = cohortDatabaseSchema,
outcomeTable = cohortTable,
outputFolder = cmOutputFolder,
oracleTempSchema = oracleTempSchema,
cmAnalysisList = cmAnalysisList,
drugComparatorOutcomesList = dcosList,
getDbCohortMethodDataThreads = min(3, maxCores),
createStudyPopThreads = min(3, maxCores),
createPsThreads = max(1, round(maxCores/10)),
psCvThreads = min(10, maxCores),
computeCovarBalThreads = min(3, maxCores),
trimMatchStratifyThreads = min(10, maxCores),
fitOutcomeModelThreads = max(1, round(maxCores/4)),
outcomeCvThreads = min(4, maxCores),
refitPsForEveryOutcome = FALSE)
}
if (runDiagnostics) {
OhdsiRTools::logInfo("Running diagnostics")
generateDiagnostics(outputFolder = outputFolder)
}
if (packageResults) {
OhdsiRTools::logInfo("Packaging results")
packageResults(connectionDetails = connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
outputFolder = outputFolder,
minCellCount = minCellCount)
}
invisible(NULL)
}
|
x <- c(1,2,3,4,5)
summary(x)
x
var(x)
y <- c(1,2,3,4,30)
summary(y)
var(y)
library(psych)
install.packages("https://cran.r-project.org/src/contrib/Archive/psych/psych_1.8.10.tar.gz")
install.packages("psych", dependencies=TRUE, INSTALL_opts = c('--no-lock'))
library(psych)
names(USJudgeRatings)
dim(USJudgeRatings)
head(USJudgeRatings)
fa.parallel(USJudgeRatings[,-1],fa="pc",n.iter = 100,show.legend = FALSE)
pc <- principal(USJudgeRatings[,-1],nfactors = 1)
pc
fa.parallel(Harman23.cor$cov,n.obs = 302,fa="pc",n.iter = 100,show.legend = FALSE,main ="kk")
kk <- principal(Harman23.cor$cov,nfactors = 2,rotate = "none")
kk
pc2<-principal(Harman23.cor$cov,nfactors=2,rotate = "varimax",scores=T)
unclass(pc2$EPVAL)
|
/20190626-1.R
|
no_license
|
davidzhang725/R-Learning
|
R
| false
| false
| 729
|
r
|
x <- c(1,2,3,4,5)
summary(x)
x
var(x)
y <- c(1,2,3,4,30)
summary(y)
var(y)
library(psych)
install.packages("https://cran.r-project.org/src/contrib/Archive/psych/psych_1.8.10.tar.gz")
install.packages("psych", dependencies=TRUE, INSTALL_opts = c('--no-lock'))
library(psych)
names(USJudgeRatings)
dim(USJudgeRatings)
head(USJudgeRatings)
fa.parallel(USJudgeRatings[,-1],fa="pc",n.iter = 100,show.legend = FALSE)
pc <- principal(USJudgeRatings[,-1],nfactors = 1)
pc
fa.parallel(Harman23.cor$cov,n.obs = 302,fa="pc",n.iter = 100,show.legend = FALSE,main ="kk")
kk <- principal(Harman23.cor$cov,nfactors = 2,rotate = "none")
kk
pc2<-principal(Harman23.cor$cov,nfactors=2,rotate = "varimax",scores=T)
unclass(pc2$EPVAL)
|
library(readxl)
library(dplyr)
library(tidyr)
library(stringr)
## Input
target<-c(...) ## TODO: input the list of gene target
vbc<-read_excel(".../VBC_hg38_top6_sgRNAs.xlsx",sheet = "Sheet1")
tkov3<-read_excel(".../TKOV3_guide_sequence.xlsx",sheet = "TKOv3-Human-Library")
geckov2a<-read.table(".../hGECKOV2_library_A_09mar2015.csv",header = T,sep = ',')
geckov2b<-read.table(".../hGECKOV2_library_B_09mar2015.csv",header = T,sep = ',')
geckov2<-data.frame(rbind(geckov2a[,-4],geckov2b[,-4]))
geckov2<-geckov2[order(geckov2$gene_id),]
kinomea<-read_excel(".../KinomeKO.xlsx",sheet = "A")
kinomeb<-read_excel(".../KinomeKO.xlsx",sheet = "B")
kinome<-data.frame(rbind(kinomea,kinomeb))
kinome<-kinome[order(kinome$Target.Gene.ID),]
## Integration
gRNA<-data.frame(matrix(ncol = 8))
colnames(gRNA)<-c("Gene","gRNA_seq","Kinome","TKOv3","hGECKOv2","vbc_top6","intersect","vbc_scores")
for (i in 1:length(target)) {
geckov2_1<-data.frame(geckov2[geckov2$gene_id==as.character(target[i]),])
tkov3_1<-data.frame(tkov3[tkov3$GENE==as.character(target[i]),])
kinome_1<-data.frame(kinome[kinome$Target.Gene.Symbol==as.character(target[i]),])
vbc_1<-data.frame(vbc[vbc$gene==as.character(target[i]),])
vbc_1$sequence<-str_sub(vbc_1$sgRNA, end=-4) ##remove PAM from vbc$sgRNA
temp<-Reduce(union, list(kinome_1$sgRNA.Target.Sequence,tkov3_1$SEQUENCE,geckov2_1$seq,vbc_1$sequence))
gRNA_temp<-data.frame(matrix(ncol = 8,nrow = length(temp))) ##gRNA infor for specific genes
colnames(gRNA_temp)<-c("Gene","gRNA_seq","Kinome","TKOv3","hGECKOv2","vbc_top6","intersect","vbc_scores")
gRNA_temp[,3:7]<-0
gRNA_temp$Gene<-rep(as.character(target$Target[i]),length(temp))
gRNA_temp$gRNA_seq<-temp
for (j in 1:length(temp)) { ##j for each specific seq
gRNA_temp$Kinome[j]<-sum(as.character(kinome_1$sgRNA.Target.Sequence)==temp[j])
gRNA_temp$TKOv3[j]<-sum(as.character(tkov3_1$SEQUENCE)==temp[j])
gRNA_temp$hGECKOv2[j]<-sum(as.character(geckov2_1$seq)==temp[j])
gRNA_temp$vbc_top6[j]<-sum(as.character(vbc_1$sequence)==temp[j])
if (sum(vbc_1$sequence==temp[j])){ ##seq is in the vbc
#gRNA_temp$vbc_scores[j]= vbc_1$`VBC score`[min(which(vbc_1$sequence==temp[j]))]
vbc_temp<-vbc_1[vbc_1$sequence==temp[j],]
gRNA_temp$vbc_scores[j]=vbc_temp$VBC.score
} else {next}
}
gRNA_temp$intersect=gRNA_temp$Kinome+gRNA_temp$TKOv3+gRNA_temp$hGECKOv2+gRNA_temp$vbc_top6
gRNA<-rbind(gRNA,gRNA_temp)
}
gRNA<-gRNA[-1,]
gRNA$length<-nchar(gRNA$gRNA_seq)
#write.xlsx(gRNA, "C:/Users/tang53/Box Sync/1Lab/1LabProject/CDKO/prep/TargetSum.xlsx", sheetName="gRNA_seq_V2", append=TRUE)
|
/sgRNA_info/sgRNA_integration.R
|
no_license
|
Shantang3/Combinatorial-CRISPR-Screen-Anaylysis-Pipeline
|
R
| false
| false
| 2,714
|
r
|
library(readxl)
library(dplyr)
library(tidyr)
library(stringr)
## Input
target<-c(...) ## TODO: input the list of gene target
vbc<-read_excel(".../VBC_hg38_top6_sgRNAs.xlsx",sheet = "Sheet1")
tkov3<-read_excel(".../TKOV3_guide_sequence.xlsx",sheet = "TKOv3-Human-Library")
geckov2a<-read.table(".../hGECKOV2_library_A_09mar2015.csv",header = T,sep = ',')
geckov2b<-read.table(".../hGECKOV2_library_B_09mar2015.csv",header = T,sep = ',')
geckov2<-data.frame(rbind(geckov2a[,-4],geckov2b[,-4]))
geckov2<-geckov2[order(geckov2$gene_id),]
kinomea<-read_excel(".../KinomeKO.xlsx",sheet = "A")
kinomeb<-read_excel(".../KinomeKO.xlsx",sheet = "B")
kinome<-data.frame(rbind(kinomea,kinomeb))
kinome<-kinome[order(kinome$Target.Gene.ID),]
## Integration
gRNA<-data.frame(matrix(ncol = 8))
colnames(gRNA)<-c("Gene","gRNA_seq","Kinome","TKOv3","hGECKOv2","vbc_top6","intersect","vbc_scores")
for (i in 1:length(target)) {
geckov2_1<-data.frame(geckov2[geckov2$gene_id==as.character(target[i]),])
tkov3_1<-data.frame(tkov3[tkov3$GENE==as.character(target[i]),])
kinome_1<-data.frame(kinome[kinome$Target.Gene.Symbol==as.character(target[i]),])
vbc_1<-data.frame(vbc[vbc$gene==as.character(target[i]),])
vbc_1$sequence<-str_sub(vbc_1$sgRNA, end=-4) ##remove PAM from vbc$sgRNA
temp<-Reduce(union, list(kinome_1$sgRNA.Target.Sequence,tkov3_1$SEQUENCE,geckov2_1$seq,vbc_1$sequence))
gRNA_temp<-data.frame(matrix(ncol = 8,nrow = length(temp))) ##gRNA infor for specific genes
colnames(gRNA_temp)<-c("Gene","gRNA_seq","Kinome","TKOv3","hGECKOv2","vbc_top6","intersect","vbc_scores")
gRNA_temp[,3:7]<-0
gRNA_temp$Gene<-rep(as.character(target$Target[i]),length(temp))
gRNA_temp$gRNA_seq<-temp
for (j in 1:length(temp)) { ##j for each specific seq
gRNA_temp$Kinome[j]<-sum(as.character(kinome_1$sgRNA.Target.Sequence)==temp[j])
gRNA_temp$TKOv3[j]<-sum(as.character(tkov3_1$SEQUENCE)==temp[j])
gRNA_temp$hGECKOv2[j]<-sum(as.character(geckov2_1$seq)==temp[j])
gRNA_temp$vbc_top6[j]<-sum(as.character(vbc_1$sequence)==temp[j])
if (sum(vbc_1$sequence==temp[j])){ ##seq is in the vbc
#gRNA_temp$vbc_scores[j]= vbc_1$`VBC score`[min(which(vbc_1$sequence==temp[j]))]
vbc_temp<-vbc_1[vbc_1$sequence==temp[j],]
gRNA_temp$vbc_scores[j]=vbc_temp$VBC.score
} else {next}
}
gRNA_temp$intersect=gRNA_temp$Kinome+gRNA_temp$TKOv3+gRNA_temp$hGECKOv2+gRNA_temp$vbc_top6
gRNA<-rbind(gRNA,gRNA_temp)
}
gRNA<-gRNA[-1,]
gRNA$length<-nchar(gRNA$gRNA_seq)
#write.xlsx(gRNA, "C:/Users/tang53/Box Sync/1Lab/1LabProject/CDKO/prep/TargetSum.xlsx", sheetName="gRNA_seq_V2", append=TRUE)
|
### R code from vignette source 'modbin-foodstamp.Rnw'
###################################################
### code chunk number 1: modbin-foodstamp.Rnw:12-15 (eval = FALSE)
###################################################
## library(catdata)
## data(foodstamp)
## attach(foodstamp)
###################################################
### code chunk number 2: modbin-foodstamp.Rnw:18-20 (eval = FALSE)
###################################################
## food1 <- glm(y ~ TEN + SUP + INC, family=binomial, data=foodstamp)
## summary(food1)
###################################################
### code chunk number 3: modbin-foodstamp.Rnw:23-24 (eval = FALSE)
###################################################
## plot(food1,2)
|
/data/genthat_extracted_code/catdata/vignettes/modbin-foodstamp.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 763
|
r
|
### R code from vignette source 'modbin-foodstamp.Rnw'
###################################################
### code chunk number 1: modbin-foodstamp.Rnw:12-15 (eval = FALSE)
###################################################
## library(catdata)
## data(foodstamp)
## attach(foodstamp)
###################################################
### code chunk number 2: modbin-foodstamp.Rnw:18-20 (eval = FALSE)
###################################################
## food1 <- glm(y ~ TEN + SUP + INC, family=binomial, data=foodstamp)
## summary(food1)
###################################################
### code chunk number 3: modbin-foodstamp.Rnw:23-24 (eval = FALSE)
###################################################
## plot(food1,2)
|
# Page Structure Module --------------------------------------------------------
# UI ---------------------------------------------------------------------------
#' pagestructureUI
#'
#' @rdname pagestructure
#'
#' @return Lits of tags
#'
#' @export
pagestructureUI <- function(id) {
ns <- NS(id)
tagList(
uiOutput(ns("sidebar"))
)
}
# Server -----------------------------------------------------------------------
#' Page Structure
#'
#' @rdname pagestructure
#' @template return-outputNavigation
#' @template params-module
#' @template params-active
#'
#' @return collapsed status of panel.
#'
#' @importFrom shinyWidgets toggleDropdownButton
#'
#' @export
pagestructure <- function(input, output, session,
active = reactive(TRUE)) {
ns <- session$ns
navigation_state <- reactiveNavigation()
state <- reactiveValues(
collapsed = FALSE
)
observeEvent(input$abuttoncollapsesidebar, {
state$collapsed <- !state$collapsed
})
observe({
output$sidebar <-
renderUI(pagestructureSidebar(ns, state$collapsed))
})
### Navigation Menu ----------------------------------------------------------
observeEvent(input$abuttondefineanasingle, {
updateNavigation(navigation_state, "SA")
toggleDropdownButton(ns("abuttonanalysis"))
})
observeEvent(input$abuttondefineanabatch, {
updateNavigation(navigation_state, "BA")
toggleDropdownButton(ns("abuttonanalysis"))
})
observeEvent(input$abuttonbrowseSBR, {
updateNavigation(navigation_state, "SBR")
toggleDropdownButton(ns("abuttonbrowse"))
})
observeEvent(input$abuttonbrowseBBR, {
updateNavigation(navigation_state, "BBR")
toggleDropdownButton(ns("abuttonbrowse"))
})
observeEvent(input$abuttonbrowseCBR, {
updateNavigation(navigation_state, "CBR")
toggleDropdownButton(ns("abuttonbrowse"))
})
observeEvent(input$abuttonhome, {
updateNavigation(navigation_state, "LP")
})
### Module Output ------------------------------------------------------------
moduleOutput <- c(
outputNavigation(navigation_state),
list(
collapsed = reactive(state$collapsed)
) # placeholder
)
moduleOutput
}
|
/BFE_RShiny/oasisui/R/pagestructure_module.R
|
permissive
|
smacintyreR/OasisUI
|
R
| false
| false
| 2,203
|
r
|
# Page Structure Module --------------------------------------------------------
# UI ---------------------------------------------------------------------------
#' pagestructureUI
#'
#' @rdname pagestructure
#'
#' @return Lits of tags
#'
#' @export
pagestructureUI <- function(id) {
ns <- NS(id)
tagList(
uiOutput(ns("sidebar"))
)
}
# Server -----------------------------------------------------------------------
#' Page Structure
#'
#' @rdname pagestructure
#' @template return-outputNavigation
#' @template params-module
#' @template params-active
#'
#' @return collapsed status of panel.
#'
#' @importFrom shinyWidgets toggleDropdownButton
#'
#' @export
pagestructure <- function(input, output, session,
active = reactive(TRUE)) {
ns <- session$ns
navigation_state <- reactiveNavigation()
state <- reactiveValues(
collapsed = FALSE
)
observeEvent(input$abuttoncollapsesidebar, {
state$collapsed <- !state$collapsed
})
observe({
output$sidebar <-
renderUI(pagestructureSidebar(ns, state$collapsed))
})
### Navigation Menu ----------------------------------------------------------
observeEvent(input$abuttondefineanasingle, {
updateNavigation(navigation_state, "SA")
toggleDropdownButton(ns("abuttonanalysis"))
})
observeEvent(input$abuttondefineanabatch, {
updateNavigation(navigation_state, "BA")
toggleDropdownButton(ns("abuttonanalysis"))
})
observeEvent(input$abuttonbrowseSBR, {
updateNavigation(navigation_state, "SBR")
toggleDropdownButton(ns("abuttonbrowse"))
})
observeEvent(input$abuttonbrowseBBR, {
updateNavigation(navigation_state, "BBR")
toggleDropdownButton(ns("abuttonbrowse"))
})
observeEvent(input$abuttonbrowseCBR, {
updateNavigation(navigation_state, "CBR")
toggleDropdownButton(ns("abuttonbrowse"))
})
observeEvent(input$abuttonhome, {
updateNavigation(navigation_state, "LP")
})
### Module Output ------------------------------------------------------------
moduleOutput <- c(
outputNavigation(navigation_state),
list(
collapsed = reactive(state$collapsed)
) # placeholder
)
moduleOutput
}
|
## This function very closely models the example cache mean of vector
## functions used in assignment description
## This function creates a special "matrix" object
## that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
## function assumes that the matrix is a square invertible matrix
## used to run cacheSolve function
myInv <- NULL
set <- function(y) {
x <<- y
myInv <<- NULL
}
get <- function() x
## store in Cache
setMyInv <- function(inverse) myInv <<- inverse
## get from cache
getMyInv <- function() myInv
list(set = set, get = get,
setMyInv = setMyInv,
getMyInv = getMyInv)
}
## Write a short comment describing this function
## This function computes the inverse of the special
## "matrix" returned by `makeCacheMatrix` above. If the inverse has
## already been calculated (and the matrix has not changed), then
## `cacheSolve` should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
myInv <- x$getMyInv()
## check to see if the inverse has already been calcualted
## and if so get it from the cache instead of recalculating
if(!is.null(myInv)) {
message("getting cached data")
return(myInv)
}
## if not already calculated, use solve function to find inverse of a square matrix
data <- x$get()
myInv <- solve(data, ...)
x$setMyInv(myInv)
myInv
}
|
/cachematrix.R
|
no_license
|
rhavlir/ProgrammingAssignment2
|
R
| false
| false
| 1,438
|
r
|
## This function very closely models the example cache mean of vector
## functions used in assignment description
## This function creates a special "matrix" object
## that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
## function assumes that the matrix is a square invertible matrix
## used to run cacheSolve function
myInv <- NULL
set <- function(y) {
x <<- y
myInv <<- NULL
}
get <- function() x
## store in Cache
setMyInv <- function(inverse) myInv <<- inverse
## get from cache
getMyInv <- function() myInv
list(set = set, get = get,
setMyInv = setMyInv,
getMyInv = getMyInv)
}
## Write a short comment describing this function
## This function computes the inverse of the special
## "matrix" returned by `makeCacheMatrix` above. If the inverse has
## already been calculated (and the matrix has not changed), then
## `cacheSolve` should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
myInv <- x$getMyInv()
## check to see if the inverse has already been calcualted
## and if so get it from the cache instead of recalculating
if(!is.null(myInv)) {
message("getting cached data")
return(myInv)
}
## if not already calculated, use solve function to find inverse of a square matrix
data <- x$get()
myInv <- solve(data, ...)
x$setMyInv(myInv)
myInv
}
|
## File Name: mlnormal_update_ml_derivative_V.R
## File Version: 0.21
##############################################
# derivatives of V
mlnormal_update_ml_derivative_V <- function( N, NT, G,
Z_index, Z_list, theta, REML, V1_list,
variance_shortcut, freq_id, do_compute ){
D1_V_list <- as.list(1:NT)
D1_V_pp_list <- as.list(1:G)
V1_D1V_V1_list <- D1_V_list
V1_D1V_V1_pp_list <- D1_V_pp_list
do_computation <- TRUE
for (pp in 1:NT){
# pp <- 1 # parameter pp
for (gg in 1:G){
# gg <- 1
if ( do_compute[gg] ){
Z_index_gg <- Z_index[ gg,,, drop=FALSE ]
index_pp <- which( Z_index_gg[,,pp] !=0 )
H0 <- 0*Z_list[[gg]][[1]]
#**** correct these lines!!!
# derivatives are allowed for powers of parameters
for (ii in index_pp){
# ii <- index_pp[1]
i1 <- Z_index_gg[1,ii,pp]
# computing derivatives
# if ( i1==1 ){ a1 <- 1 }
# if ( i1==2 ){ a1 <- 2*theta[pp] }
# a1 <- i1 * theta[pp]^(i1-1) # d/dx x^p=p x^(p-1)
a1 <- i1 * theta[pp]^( i1-1 )
#*******
# correction ARb 2016-07-15
a2 <- prod( ( theta[-pp])^( Z_index_gg[1,ii, - pp ] ) )
a1 <- a1*a2
H0 <- H0 + a1 * Z_list[[gg]][[ii]]
}
}
D1_V_pp_list[[gg]] <- H0
if (REML){
if ( do_compute[gg] ){
V1_gg <- V1_list[[gg]]
V2_gg <- V1_gg %*% H0 %*% V1_gg
}
V1_D1V_V1_pp_list[[gg]] <- V2_gg
}
} # end group gg
D1_V_list[[pp]] <- D1_V_pp_list
V1_D1V_V1_list[[pp]] <- V1_D1V_V1_pp_list
} # end parameter pp
#--- output
res <- list( "D1_V_list"=D1_V_list, V1_D1V_V1_list=V1_D1V_V1_list )
return(res)
}
|
/LAM/R/mlnormal_update_ml_derivative_V.R
|
no_license
|
akhikolla/TestedPackages-NoIssues
|
R
| false
| false
| 2,108
|
r
|
## File Name: mlnormal_update_ml_derivative_V.R
## File Version: 0.21
##############################################
# derivatives of V
mlnormal_update_ml_derivative_V <- function( N, NT, G,
Z_index, Z_list, theta, REML, V1_list,
variance_shortcut, freq_id, do_compute ){
D1_V_list <- as.list(1:NT)
D1_V_pp_list <- as.list(1:G)
V1_D1V_V1_list <- D1_V_list
V1_D1V_V1_pp_list <- D1_V_pp_list
do_computation <- TRUE
for (pp in 1:NT){
# pp <- 1 # parameter pp
for (gg in 1:G){
# gg <- 1
if ( do_compute[gg] ){
Z_index_gg <- Z_index[ gg,,, drop=FALSE ]
index_pp <- which( Z_index_gg[,,pp] !=0 )
H0 <- 0*Z_list[[gg]][[1]]
#**** correct these lines!!!
# derivatives are allowed for powers of parameters
for (ii in index_pp){
# ii <- index_pp[1]
i1 <- Z_index_gg[1,ii,pp]
# computing derivatives
# if ( i1==1 ){ a1 <- 1 }
# if ( i1==2 ){ a1 <- 2*theta[pp] }
# a1 <- i1 * theta[pp]^(i1-1) # d/dx x^p=p x^(p-1)
a1 <- i1 * theta[pp]^( i1-1 )
#*******
# correction ARb 2016-07-15
a2 <- prod( ( theta[-pp])^( Z_index_gg[1,ii, - pp ] ) )
a1 <- a1*a2
H0 <- H0 + a1 * Z_list[[gg]][[ii]]
}
}
D1_V_pp_list[[gg]] <- H0
if (REML){
if ( do_compute[gg] ){
V1_gg <- V1_list[[gg]]
V2_gg <- V1_gg %*% H0 %*% V1_gg
}
V1_D1V_V1_pp_list[[gg]] <- V2_gg
}
} # end group gg
D1_V_list[[pp]] <- D1_V_pp_list
V1_D1V_V1_list[[pp]] <- V1_D1V_V1_pp_list
} # end parameter pp
#--- output
res <- list( "D1_V_list"=D1_V_list, V1_D1V_V1_list=V1_D1V_V1_list )
return(res)
}
|
library(queueing)
### Name: L
### Title: Returns the mean number of customers in a queueing model (or
### network)
### Aliases: L
### Keywords: Queueing Models
### ** Examples
## create input parameters
i_mm1 <- NewInput.MM1(lambda=1/4, mu=1/3, n=0)
## Build the model
o_mm1 <- QueueingModel(i_mm1)
## Returns the L
L(o_mm1)
|
/data/genthat_extracted_code/queueing/examples/L.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 336
|
r
|
library(queueing)
### Name: L
### Title: Returns the mean number of customers in a queueing model (or
### network)
### Aliases: L
### Keywords: Queueing Models
### ** Examples
## create input parameters
i_mm1 <- NewInput.MM1(lambda=1/4, mu=1/3, n=0)
## Build the model
o_mm1 <- QueueingModel(i_mm1)
## Returns the L
L(o_mm1)
|
testlist <- list(n = 180667588L)
result <- do.call(breakfast:::setBitNumber,testlist)
str(result)
|
/breakfast/inst/testfiles/setBitNumber/libFuzzer_setBitNumber/setBitNumber_valgrind_files/1609962436-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 97
|
r
|
testlist <- list(n = 180667588L)
result <- do.call(breakfast:::setBitNumber,testlist)
str(result)
|
#read data
power <-read.table("household_power_consumption.txt",header=TRUE, sep=";",na.strings="?",colClasses=c(rep('character',2),rep('numeric',7)))
power <- power[power$Date=='1/2/2007' | power$Date=='2/2/2007',]
#merge data
dateTime <- as.POSIXct(paste(power$Date, power$Time, sep = ";"), format = "%d/%m/%Y;%H:%M:%S")
power$Date <- NULL
power$Time <- NULL
power <- cbind("DateTime" = dateTime, power)
remove(dateTime)
#plot graph
png(filename = "plot4.png", width = 480, height = 480)
par(mfrow = c(2,2))
plot(power$DateTime, power$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power")
plot(power$DateTime, power$Voltage, type = "l", xlab = "datetime", ylab = "Voltage")
plot(power$DateTime, power$Sub_metering_1, type = "l", col = "black", xlab = "", ylab = "Global Active Power (kilowatts)")
lines(power$DateTime, power$Sub_metering_2, type = "l", col = "red")
lines(power$DateTime, power$Sub_metering_3, type = "l", col = "blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty = c(1,1,1), col = c("black", "red", "blue"))
plot(power$DateTime, power$Global_reactive_power, type = "l", xlab = "datetime", ylab = "Global_reactive_power", lwd = 0.5)
#turn off graphic device
dev.off()
|
/plot4.R
|
no_license
|
onghf/Exploratory_Data_Analysis
|
R
| false
| false
| 1,245
|
r
|
#read data
power <-read.table("household_power_consumption.txt",header=TRUE, sep=";",na.strings="?",colClasses=c(rep('character',2),rep('numeric',7)))
power <- power[power$Date=='1/2/2007' | power$Date=='2/2/2007',]
#merge data
dateTime <- as.POSIXct(paste(power$Date, power$Time, sep = ";"), format = "%d/%m/%Y;%H:%M:%S")
power$Date <- NULL
power$Time <- NULL
power <- cbind("DateTime" = dateTime, power)
remove(dateTime)
#plot graph
png(filename = "plot4.png", width = 480, height = 480)
par(mfrow = c(2,2))
plot(power$DateTime, power$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power")
plot(power$DateTime, power$Voltage, type = "l", xlab = "datetime", ylab = "Voltage")
plot(power$DateTime, power$Sub_metering_1, type = "l", col = "black", xlab = "", ylab = "Global Active Power (kilowatts)")
lines(power$DateTime, power$Sub_metering_2, type = "l", col = "red")
lines(power$DateTime, power$Sub_metering_3, type = "l", col = "blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty = c(1,1,1), col = c("black", "red", "blue"))
plot(power$DateTime, power$Global_reactive_power, type = "l", xlab = "datetime", ylab = "Global_reactive_power", lwd = 0.5)
#turn off graphic device
dev.off()
|
library(randomForest)
### Name: randomForest
### Title: Classification and Regression with Random Forest
### Aliases: randomForest randomForest.formula randomForest.default
### print.randomForest
### Keywords: classif regression tree
### ** Examples
## Classification:
##data(iris)
set.seed(71)
iris.rf <- randomForest(Species ~ ., data=iris, importance=TRUE,
proximity=TRUE)
print(iris.rf)
## Look at variable importance:
round(importance(iris.rf), 2)
## Do MDS on 1 - proximity:
iris.mds <- cmdscale(1 - iris.rf$proximity, eig=TRUE)
op <- par(pty="s")
pairs(cbind(iris[,1:4], iris.mds$points), cex=0.6, gap=0,
col=c("red", "green", "blue")[as.numeric(iris$Species)],
main="Iris Data: Predictors and MDS of Proximity Based on RandomForest")
par(op)
print(iris.mds$GOF)
## The `unsupervised' case:
set.seed(17)
iris.urf <- randomForest(iris[, -5])
MDSplot(iris.urf, iris$Species)
## stratified sampling: draw 20, 30, and 20 of the species to grow each tree.
(iris.rf2 <- randomForest(iris[1:4], iris$Species,
sampsize=c(20, 30, 20)))
## Regression:
## data(airquality)
set.seed(131)
ozone.rf <- randomForest(Ozone ~ ., data=airquality, mtry=3,
importance=TRUE, na.action=na.omit)
print(ozone.rf)
## Show "importance" of variables: higher value mean more important:
round(importance(ozone.rf), 2)
## "x" can be a matrix instead of a data frame:
set.seed(17)
x <- matrix(runif(5e2), 100)
y <- gl(2, 50)
(myrf <- randomForest(x, y))
(predict(myrf, x))
## "complicated" formula:
(swiss.rf <- randomForest(sqrt(Fertility) ~ . - Catholic + I(Catholic < 50),
data=swiss))
(predict(swiss.rf, swiss))
## Test use of 32-level factor as a predictor:
set.seed(1)
x <- data.frame(x1=gl(53, 10), x2=runif(530), y=rnorm(530))
(rf1 <- randomForest(x[-3], x[[3]], ntree=10))
## Grow no more than 4 nodes per tree:
(treesize(randomForest(Species ~ ., data=iris, maxnodes=4, ntree=30)))
## test proximity in regression
iris.rrf <- randomForest(iris[-1], iris[[1]], ntree=101, proximity=TRUE, oob.prox=FALSE)
str(iris.rrf$proximity)
|
/data/genthat_extracted_code/randomForest/examples/randomForest.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 2,143
|
r
|
library(randomForest)
### Name: randomForest
### Title: Classification and Regression with Random Forest
### Aliases: randomForest randomForest.formula randomForest.default
### print.randomForest
### Keywords: classif regression tree
### ** Examples
## Classification:
##data(iris)
set.seed(71)
iris.rf <- randomForest(Species ~ ., data=iris, importance=TRUE,
proximity=TRUE)
print(iris.rf)
## Look at variable importance:
round(importance(iris.rf), 2)
## Do MDS on 1 - proximity:
iris.mds <- cmdscale(1 - iris.rf$proximity, eig=TRUE)
op <- par(pty="s")
pairs(cbind(iris[,1:4], iris.mds$points), cex=0.6, gap=0,
col=c("red", "green", "blue")[as.numeric(iris$Species)],
main="Iris Data: Predictors and MDS of Proximity Based on RandomForest")
par(op)
print(iris.mds$GOF)
## The `unsupervised' case:
set.seed(17)
iris.urf <- randomForest(iris[, -5])
MDSplot(iris.urf, iris$Species)
## stratified sampling: draw 20, 30, and 20 of the species to grow each tree.
(iris.rf2 <- randomForest(iris[1:4], iris$Species,
sampsize=c(20, 30, 20)))
## Regression:
## data(airquality)
set.seed(131)
ozone.rf <- randomForest(Ozone ~ ., data=airquality, mtry=3,
importance=TRUE, na.action=na.omit)
print(ozone.rf)
## Show "importance" of variables: higher value mean more important:
round(importance(ozone.rf), 2)
## "x" can be a matrix instead of a data frame:
set.seed(17)
x <- matrix(runif(5e2), 100)
y <- gl(2, 50)
(myrf <- randomForest(x, y))
(predict(myrf, x))
## "complicated" formula:
(swiss.rf <- randomForest(sqrt(Fertility) ~ . - Catholic + I(Catholic < 50),
data=swiss))
(predict(swiss.rf, swiss))
## Test use of 32-level factor as a predictor:
set.seed(1)
x <- data.frame(x1=gl(53, 10), x2=runif(530), y=rnorm(530))
(rf1 <- randomForest(x[-3], x[[3]], ntree=10))
## Grow no more than 4 nodes per tree:
(treesize(randomForest(Species ~ ., data=iris, maxnodes=4, ntree=30)))
## test proximity in regression
iris.rrf <- randomForest(iris[-1], iris[[1]], ntree=101, proximity=TRUE, oob.prox=FALSE)
str(iris.rrf$proximity)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.iot_operations.R
\name{start_thing_registration_task}
\alias{start_thing_registration_task}
\title{Creates a bulk thing provisioning task}
\usage{
start_thing_registration_task(templateBody, inputFileBucket, inputFileKey,
roleArn)
}
\arguments{
\item{templateBody}{[required] The provisioning template.}
\item{inputFileBucket}{[required] The S3 bucket that contains the input file.}
\item{inputFileKey}{[required] The name of input file within the S3 bucket. This file contains a newline delimited JSON file. Each line contains the parameter values to provision one device (thing).}
\item{roleArn}{[required] The IAM role ARN that grants permission the input file.}
}
\description{
Creates a bulk thing provisioning task.
}
\section{Accepted Parameters}{
\preformatted{start_thing_registration_task(
templateBody = "string",
inputFileBucket = "string",
inputFileKey = "string",
roleArn = "string"
)
}
}
|
/service/paws.iot/man/start_thing_registration_task.Rd
|
permissive
|
CR-Mercado/paws
|
R
| false
| true
| 1,000
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.iot_operations.R
\name{start_thing_registration_task}
\alias{start_thing_registration_task}
\title{Creates a bulk thing provisioning task}
\usage{
start_thing_registration_task(templateBody, inputFileBucket, inputFileKey,
roleArn)
}
\arguments{
\item{templateBody}{[required] The provisioning template.}
\item{inputFileBucket}{[required] The S3 bucket that contains the input file.}
\item{inputFileKey}{[required] The name of input file within the S3 bucket. This file contains a newline delimited JSON file. Each line contains the parameter values to provision one device (thing).}
\item{roleArn}{[required] The IAM role ARN that grants permission the input file.}
}
\description{
Creates a bulk thing provisioning task.
}
\section{Accepted Parameters}{
\preformatted{start_thing_registration_task(
templateBody = "string",
inputFileBucket = "string",
inputFileKey = "string",
roleArn = "string"
)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nest_fit.R
\name{funest_fit}
\alias{funest_fit}
\title{Fitting functional ensemble survival tree model}
\usage{
funest_fit(
long_train,
surv_train,
noftree = 500,
nofcov = 2,
split_rule = "maxstat",
tv_names,
fv_names,
nofp = 3,
t_star,
t_pred,
...
)
}
\arguments{
\item{long_train}{long form of survival data from the training set}
\item{surv_train}{short form of survival data from the training set}
\item{noftree}{number of trees in the random survival forest}
\item{nofcov}{number of covariates selected in each survival tree}
\item{split_rule}{binary splitting rule for random survival forest, default is "maxstat"}
\item{tv_names}{a list of names of time-varying covariates}
\item{fv_names}{a list of names of fixed covariates}
\item{nofp}{number of multivariate principal components}
\item{t_star}{time for the last observed biomarker measurement}
\item{t_pred}{time at prediction}
\item{...}{extra arguments that can be passed to ranger()}
}
\value{
A list compose two items. The first item is a list
of necessary information for prediction used in funest_pred()
function. The second item is the ranger object of the fitted
random survival forest.
\itemize{
\item misc - a list composed of 1) long_train: long form of survival data from the training set,
2) surv_train: short form of survival data from the training set,
3) fmla: covariates passed into the ensemble survival tree
4) score_names: intermediate names for the covariates
5) nofp: number of multivariate principal components
6) train_data.sub: data frame of all covariates after MFPCA been performed
\item rg - functional ensemble survival tree model
}
}
\description{
The function funest_fit takes a long and a short form of the survival data,
among other arguments for a random survival forest, to fit an functional ensemble survival tree
model for predicting survival probability.
}
\examples{
library(funest)
data("long_train")
data("surv_train")
w = funest_fit(long_train, surv_train, tv_names = list("Y1", "Y2", "Y3"), fv_names = list("W"),
noftree = 10, t_star = 5.5, t_pred = 11)
}
\references{
\insertRef{nestpaper}{funest}
\insertRef{ranger}{funest}
}
|
/man/funest_fit.Rd
|
no_license
|
cran/funest
|
R
| false
| true
| 2,244
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nest_fit.R
\name{funest_fit}
\alias{funest_fit}
\title{Fitting functional ensemble survival tree model}
\usage{
funest_fit(
long_train,
surv_train,
noftree = 500,
nofcov = 2,
split_rule = "maxstat",
tv_names,
fv_names,
nofp = 3,
t_star,
t_pred,
...
)
}
\arguments{
\item{long_train}{long form of survival data from the training set}
\item{surv_train}{short form of survival data from the training set}
\item{noftree}{number of trees in the random survival forest}
\item{nofcov}{number of covariates selected in each survival tree}
\item{split_rule}{binary splitting rule for random survival forest, default is "maxstat"}
\item{tv_names}{a list of names of time-varying covariates}
\item{fv_names}{a list of names of fixed covariates}
\item{nofp}{number of multivariate principal components}
\item{t_star}{time for the last observed biomarker measurement}
\item{t_pred}{time at prediction}
\item{...}{extra arguments that can be passed to ranger()}
}
\value{
A list compose two items. The first item is a list
of necessary information for prediction used in funest_pred()
function. The second item is the ranger object of the fitted
random survival forest.
\itemize{
\item misc - a list composed of 1) long_train: long form of survival data from the training set,
2) surv_train: short form of survival data from the training set,
3) fmla: covariates passed into the ensemble survival tree
4) score_names: intermediate names for the covariates
5) nofp: number of multivariate principal components
6) train_data.sub: data frame of all covariates after MFPCA been performed
\item rg - functional ensemble survival tree model
}
}
\description{
The function funest_fit takes a long and a short form of the survival data,
among other arguments for a random survival forest, to fit an functional ensemble survival tree
model for predicting survival probability.
}
\examples{
library(funest)
data("long_train")
data("surv_train")
w = funest_fit(long_train, surv_train, tv_names = list("Y1", "Y2", "Y3"), fv_names = list("W"),
noftree = 10, t_star = 5.5, t_pred = 11)
}
\references{
\insertRef{nestpaper}{funest}
\insertRef{ranger}{funest}
}
|
##' @include gtree.R
NULL
##' Toolkit constructor
##'
##' @inheritParams gWidgets2::gvarbrowser
##' @export
##' @rdname gWidgets2Qt-undocumented
##' @method .gvarbrowser guiWidgetsToolkitQt
##' @S3method .gvarbrowser guiWidgetsToolkitQt
.gvarbrowser.guiWidgetsToolkitQt <- function(toolkit,
handler = NULL,action = "summary", container = NULL, ... ) {
GVarBrowser$new(toolkit,
handler = handler,action = action, container = container, ...)
}
## TODO:
## =====
## * add in popup menu with common actions: rm, ...
qsetClass("GQStandardItemModel", Qt$QStandardItemModel)
qsetProperty("obj", GQStandardItemModel)
qsetMethod("mimeData", GQStandardItemModel, function(lst) {
if(length(lst) == 0)
super("mimeData", lst)
idx <- lst[[1]]
path <- obj$path_from_index(idx)$path[-1]
if(length(path) == 0)
super("mimeData", lst)
data <- Qt$QMimeData()
txt <- obj$notify_observers(signal="drag-event", drag_data=path)[[1]]
data$setText(txt)
data
})
## Class for variable browser.
GVarBrowser <- setRefClass("GVarBrowser",
contains="GTreeBase",
fields=list(
"ws_model"="ANY",
"filter_classes"="list",
"filter_name"="character",
"other_label"="character",
"timer"= "ANY",
"use_timer"="logical",
"item_list"="list"
),
methods=list(
initialize=function(toolkit=NULL,
handler=NULL, action=NULL, container=NULL, ..., fill=NULL) {
ws_model <<- gWidgets2:::WSWatcherModel$new()
o = gWidgets2:::Observer$new(function(self) {self$update_view()}, obj=.self)
ws_model$add_observer(o)
widget <<- Qt$QTreeView()
model <- GQStandardItemModel(rows=0, columns=2) # name, summary
model$obj <- .self
model$setHorizontalHeaderLabels(c(gettext("Variable"), gettext("Summary")))
widget$setAlternatingRowColors(TRUE)
widget$setIndentation(14) # tighten up
## But how to recover the dragged object?
## it is in raw format:
## mime_data$data("application/x-qabstractitemmodeldatalist")
widget$setModel(model)
model$setParent(widget) # avoid early gc
widget$setEditTriggers(Qt$QAbstractItemView$NoEditTriggers)
widget$setSelectionBehavior(Qt$QAbstractItemView$SelectRows)
widget$setSelectionMode(Qt$QAbstractItemView$MultiSelection) # multiple selection
initFields(block=widget,
change_signal="activated",
item_list=list(),
filter_classes=gWidgets2:::gvarbrowser_default_classes,
filter_name="",
other_label="Other",
use_timer=TRUE
)
## set up drag source
add_drop_source(function(h,...) {
svalue(h$obj)
})
add_context_menu()
## fill hack
if(is(container, "GBoxContainer") && (missing(fill) || is.null(fill)))
fill <- "both"
add_to_parent(container, .self, ..., fill=fill)
handler_id <<- add_handler_changed(handler, action)
## Try our own timer.
timer <<- gtimer(1000, function(...) .self$ws_model$update_state())
populate_view()
callSuper(toolkit)
},
start_timer=function() {
if(use_timer)
timer$start_timer()
},
stop_timer=function() timer$stop_timer(),
adjust_timer=function(ms) {
"Adjust interval to size of workspace"
if(missing(ms)) {
n <- length(ls(envir=.GlobalEnv))
ms <- 1000 * floor(log(5 + n, 5))
}
timer$set_interval(ms)
},
set_filter_name=function(value) {
message("Setting a regular expression to filter the displayed objects is not supported")
return()
filter_name <<- value
populate_view()
},
set_filter_classes=function(value) {
filter_classes <<- value
populate_view()
},
##
add_value=function(x, name, parent_item, item=NULL) {
"Add a row to the model"
if(is.null(item))
item <- Qt$QStandardItem(name)
else
item$setData(name)
summary_item <- Qt$QStandardItem(gWidgets2:::short_summary(x))
icon <- getStockIconByName(stockIconFromObject(x))
if(!is.null(icon))
item$setIcon(as_qicon(icon))
## tooltip?
if(is.null(item$parent())) {
parent_item$appendRow(list(item, summary_item))
}
## store in lookup if appropriate
if(is.null(parent_item$parent())) {
item_list[[name]] <<- item
}
## recurse if needed
if(is.list(x) && !is.null(attr(x, "names"))) {
item$setRowCount(0L) # clear out if there
nms <- names(x)
sapply(seq_along(x), function(i) add_value(x[[i]], nms[i], item))
}
## return item
invisible(item)
},
clear_items=function() {
"Clear old items"
model <- widget$model()
cnt <- model$rowCount()
sapply(rev(seq_len(cnt)) - 1, function(i) {
root <- model$indexFromItem(model$invisibleRootItem())
model$removeRow(i, root)
})
},
populate_view=function(...) {
"Initialize tree. Afterwards we only modify values"
## we need to update top-level object
## use filter_classes to break up object
clear_items()
root <- widget$model()$invisibleRootItem()
## do categories
categories <- names(filter_classes) # also "Other"
category_color <- Qt$QBrush(qcolor(0, 0, 255))
for(i in categories) {
item <- Qt$QStandardItem(i)
item$setForeground(category_color)
root$appendRow(item)
## what to add
klasses <- filter_classes[[i]]
out <- ws_model$get_by_function(function(y) length(Filter(function(x) is(y, x), klasses) > 0))
out_names <- names(out)
idx <- order(out_names)
if(length(out))
sapply(seq_along(out), function(i) add_value(out[idx][[i]], out_names[idx][i], item))
}
## other
item <- Qt$QStandardItem(gettext(other_label))
item$setForeground(category_color)
root$appendRow(item)
klasses <- unlist(filter_classes)
out <- ws_model$get_by_function(function(y) !(length(Filter(function(x) is(y, x), klasses) > 0)))
out_names <- names(out)
idx <- order(out_names)
out <- out[idx]; out_names <- out_names[idx]
if(length(out))
sapply(seq_along(out), function(i) add_value(out[[i]], out_names[i], item))
start_timer()
},
update_view=function(...) {
"Update view of objects"
stop_timer()
on.exit({adjust_timer(); start_timer()})
## for items in the global workspace.
remove_item <- function(nm) {
item <- item_list[[nm, exact=TRUE]]
item_list[[nm]] <<- NULL
item$parent()$removeRow(item$row())
}
add_item <- function(x, nm) {
type <- Filter(function(i) any(sapply(i, "is", object=x)), filter_classes)
if(length(type) == 0)
type <- gettext("Other") # catch all
else
type <- names(type)
## add to type, then sort within ... too much sorting
parent_item <- widget$model()$findItems(type)[[1]]
item <- add_value(x, nm, parent_item)
parent_item$sortChildren(0L)
## cache
item_list[[nm]] <<- item
}
update_item <- function(x, nm) {
remove_item(nm)
add_item(x, nm)
}
## We use item_list as a cache to do most of the work
if(nchar(filter_name)) {
objs <- ws_model$filter_names(function(x) {
force(filter_name)
grepl(filter_name, x)
})
} else {
## use changes
changes <- ws_model$changes
mapply(remove_item, changes$removed) # name only, object is gone
mapply(add_item, mget(changes$added, .GlobalEnv), changes$added)
mapply(update_item, mget(changes$changed, .GlobalEnv), changes$changed)
}
},
##
get_value=function(drop=TRUE, ...) {
"Get selected values as names or objects if drop=FALSE"
out <- callSuper("get_value", drop=FALSE)
if(!is.list(out)) ## work with lists
out <- list(out)
if(nchar(filter_name) == 0)
out <- lapply(out, "[", -1)
if(length(out) == 0)
return(character(0))
nms <- lapply(out, function(x) {
sapply(x, function(i) ifelse(grepl("\\s", i),
sprintf("'%s'", i),
i))
})
nms <- lapply(nms, paste, collapse="$")
if(is.null(drop) || drop) {
## return non "" names
Filter(nchar, nms)
} else {
## return objects, not values
out <- lapply(out, gWidgets2:::get_object_from_string)
names(out) <- nms
ind <- which(nms == "")
out <- out[ind]
if(length(out) == 1)
out[[1]]
else
out
}
},
set_value=function(value, ...) {
"Select and open value given."
},
## context menu popup
add_context_menu=function() {
return()
## XXX update
## make context sensitive menu. Requires identifying value of selected
},
## selection is changed
add_handler_selection_changed=function(handler, action=NULL, ...) {
add_handler("selectionChanged", handler, action, emitter=widget$selectionModel())
}
))
|
/R/gvarbrowser.R
|
no_license
|
jverzani/gWidgets2Qt
|
R
| false
| false
| 16,280
|
r
|
##' @include gtree.R
NULL
##' Toolkit constructor
##'
##' @inheritParams gWidgets2::gvarbrowser
##' @export
##' @rdname gWidgets2Qt-undocumented
##' @method .gvarbrowser guiWidgetsToolkitQt
##' @S3method .gvarbrowser guiWidgetsToolkitQt
.gvarbrowser.guiWidgetsToolkitQt <- function(toolkit,
handler = NULL,action = "summary", container = NULL, ... ) {
GVarBrowser$new(toolkit,
handler = handler,action = action, container = container, ...)
}
## TODO:
## =====
## * add in popup menu with common actions: rm, ...
qsetClass("GQStandardItemModel", Qt$QStandardItemModel)
qsetProperty("obj", GQStandardItemModel)
qsetMethod("mimeData", GQStandardItemModel, function(lst) {
if(length(lst) == 0)
super("mimeData", lst)
idx <- lst[[1]]
path <- obj$path_from_index(idx)$path[-1]
if(length(path) == 0)
super("mimeData", lst)
data <- Qt$QMimeData()
txt <- obj$notify_observers(signal="drag-event", drag_data=path)[[1]]
data$setText(txt)
data
})
## Class for variable browser.
GVarBrowser <- setRefClass("GVarBrowser",
contains="GTreeBase",
fields=list(
"ws_model"="ANY",
"filter_classes"="list",
"filter_name"="character",
"other_label"="character",
"timer"= "ANY",
"use_timer"="logical",
"item_list"="list"
),
methods=list(
initialize=function(toolkit=NULL,
handler=NULL, action=NULL, container=NULL, ..., fill=NULL) {
ws_model <<- gWidgets2:::WSWatcherModel$new()
o = gWidgets2:::Observer$new(function(self) {self$update_view()}, obj=.self)
ws_model$add_observer(o)
widget <<- Qt$QTreeView()
model <- GQStandardItemModel(rows=0, columns=2) # name, summary
model$obj <- .self
model$setHorizontalHeaderLabels(c(gettext("Variable"), gettext("Summary")))
widget$setAlternatingRowColors(TRUE)
widget$setIndentation(14) # tighten up
## But how to recover the dragged object?
## it is in raw format:
## mime_data$data("application/x-qabstractitemmodeldatalist")
widget$setModel(model)
model$setParent(widget) # avoid early gc
widget$setEditTriggers(Qt$QAbstractItemView$NoEditTriggers)
widget$setSelectionBehavior(Qt$QAbstractItemView$SelectRows)
widget$setSelectionMode(Qt$QAbstractItemView$MultiSelection) # multiple selection
initFields(block=widget,
change_signal="activated",
item_list=list(),
filter_classes=gWidgets2:::gvarbrowser_default_classes,
filter_name="",
other_label="Other",
use_timer=TRUE
)
## set up drag source
add_drop_source(function(h,...) {
svalue(h$obj)
})
add_context_menu()
## fill hack
if(is(container, "GBoxContainer") && (missing(fill) || is.null(fill)))
fill <- "both"
add_to_parent(container, .self, ..., fill=fill)
handler_id <<- add_handler_changed(handler, action)
## Try our own timer.
timer <<- gtimer(1000, function(...) .self$ws_model$update_state())
populate_view()
callSuper(toolkit)
},
start_timer=function() {
if(use_timer)
timer$start_timer()
},
stop_timer=function() timer$stop_timer(),
adjust_timer=function(ms) {
"Adjust interval to size of workspace"
if(missing(ms)) {
n <- length(ls(envir=.GlobalEnv))
ms <- 1000 * floor(log(5 + n, 5))
}
timer$set_interval(ms)
},
set_filter_name=function(value) {
message("Setting a regular expression to filter the displayed objects is not supported")
return()
filter_name <<- value
populate_view()
},
set_filter_classes=function(value) {
filter_classes <<- value
populate_view()
},
##
add_value=function(x, name, parent_item, item=NULL) {
"Add a row to the model"
if(is.null(item))
item <- Qt$QStandardItem(name)
else
item$setData(name)
summary_item <- Qt$QStandardItem(gWidgets2:::short_summary(x))
icon <- getStockIconByName(stockIconFromObject(x))
if(!is.null(icon))
item$setIcon(as_qicon(icon))
## tooltip?
if(is.null(item$parent())) {
parent_item$appendRow(list(item, summary_item))
}
## store in lookup if appropriate
if(is.null(parent_item$parent())) {
item_list[[name]] <<- item
}
## recurse if needed
if(is.list(x) && !is.null(attr(x, "names"))) {
item$setRowCount(0L) # clear out if there
nms <- names(x)
sapply(seq_along(x), function(i) add_value(x[[i]], nms[i], item))
}
## return item
invisible(item)
},
clear_items=function() {
"Clear old items"
model <- widget$model()
cnt <- model$rowCount()
sapply(rev(seq_len(cnt)) - 1, function(i) {
root <- model$indexFromItem(model$invisibleRootItem())
model$removeRow(i, root)
})
},
populate_view=function(...) {
"Initialize tree. Afterwards we only modify values"
## we need to update top-level object
## use filter_classes to break up object
clear_items()
root <- widget$model()$invisibleRootItem()
## do categories
categories <- names(filter_classes) # also "Other"
category_color <- Qt$QBrush(qcolor(0, 0, 255))
for(i in categories) {
item <- Qt$QStandardItem(i)
item$setForeground(category_color)
root$appendRow(item)
## what to add
klasses <- filter_classes[[i]]
out <- ws_model$get_by_function(function(y) length(Filter(function(x) is(y, x), klasses) > 0))
out_names <- names(out)
idx <- order(out_names)
if(length(out))
sapply(seq_along(out), function(i) add_value(out[idx][[i]], out_names[idx][i], item))
}
## other
item <- Qt$QStandardItem(gettext(other_label))
item$setForeground(category_color)
root$appendRow(item)
klasses <- unlist(filter_classes)
out <- ws_model$get_by_function(function(y) !(length(Filter(function(x) is(y, x), klasses) > 0)))
out_names <- names(out)
idx <- order(out_names)
out <- out[idx]; out_names <- out_names[idx]
if(length(out))
sapply(seq_along(out), function(i) add_value(out[[i]], out_names[i], item))
start_timer()
},
update_view=function(...) {
"Update view of objects"
stop_timer()
on.exit({adjust_timer(); start_timer()})
## for items in the global workspace.
remove_item <- function(nm) {
item <- item_list[[nm, exact=TRUE]]
item_list[[nm]] <<- NULL
item$parent()$removeRow(item$row())
}
add_item <- function(x, nm) {
type <- Filter(function(i) any(sapply(i, "is", object=x)), filter_classes)
if(length(type) == 0)
type <- gettext("Other") # catch all
else
type <- names(type)
## add to type, then sort within ... too much sorting
parent_item <- widget$model()$findItems(type)[[1]]
item <- add_value(x, nm, parent_item)
parent_item$sortChildren(0L)
## cache
item_list[[nm]] <<- item
}
update_item <- function(x, nm) {
remove_item(nm)
add_item(x, nm)
}
## We use item_list as a cache to do most of the work
if(nchar(filter_name)) {
objs <- ws_model$filter_names(function(x) {
force(filter_name)
grepl(filter_name, x)
})
} else {
## use changes
changes <- ws_model$changes
mapply(remove_item, changes$removed) # name only, object is gone
mapply(add_item, mget(changes$added, .GlobalEnv), changes$added)
mapply(update_item, mget(changes$changed, .GlobalEnv), changes$changed)
}
},
##
get_value=function(drop=TRUE, ...) {
"Get selected values as names or objects if drop=FALSE"
out <- callSuper("get_value", drop=FALSE)
if(!is.list(out)) ## work with lists
out <- list(out)
if(nchar(filter_name) == 0)
out <- lapply(out, "[", -1)
if(length(out) == 0)
return(character(0))
nms <- lapply(out, function(x) {
sapply(x, function(i) ifelse(grepl("\\s", i),
sprintf("'%s'", i),
i))
})
nms <- lapply(nms, paste, collapse="$")
if(is.null(drop) || drop) {
## return non "" names
Filter(nchar, nms)
} else {
## return objects, not values
out <- lapply(out, gWidgets2:::get_object_from_string)
names(out) <- nms
ind <- which(nms == "")
out <- out[ind]
if(length(out) == 1)
out[[1]]
else
out
}
},
set_value=function(value, ...) {
"Select and open value given."
},
## context menu popup
add_context_menu=function() {
return()
## XXX update
## make context sensitive menu. Requires identifying value of selected
},
## selection is changed
add_handler_selection_changed=function(handler, action=NULL, ...) {
add_handler("selectionChanged", handler, action, emitter=widget$selectionModel())
}
))
|
# Factors
# http://r4ds.had.co.nz/factors.html
# ----
library(tidyverse)
library(forcats)
# ----
# Creating Factors
# ----
x1 <- c("Dec", "Apr", "Jan", "Mar")
x2 <- c("Dec", "Apr", "Jam", "Mar")
#
month_levels <- c(
"Jan", "Feb", "Mar", "Apr",
"May", "Jun", "Jul", "Aug",
"Sep", "Oct", "Nov", "Dec"
)
y1 <- factor(x1, levels = month_levels)
sort(y1)
#
y2 <- factor(x2, levels = month_levels)
y2
# make levels appear in order of the first appearance in the data
f1 <- factor(x1, levels = unique(x1))
f1
# or
f2 <- x1 %>% factor() %>% fct_inorder()
f2
# access levels
levels(f2)
# ----
# General social survey
# ----
gss <- forcats::gss_cat
#
gss %>%
count(race)
#
ggplot(gss, aes(race)) +
geom_bar()
# ----
# Exercises
# ----
# 1.
ggplot(gss, aes(rincome)) +
geom_bar()
levels(gss$rincome)
# ----
# Modifying factor order
# ----
relig_summary <- gss %>%
group_by(relig) %>%
summarise(
age = mean(age, na.rm = TRUE),
tvhours = mean(tvhours, na.rm = TRUE),
n = n()
)
ggplot(relig_summary, aes(tvhours, relig)) +
geom_point()
ggplot(relig_summary, aes(tvhours, fct_reorder(relig, tvhours))) +
geom_point()
#
relig_summary %>%
mutate(relig = fct_reorder(relig, tvhours)) %>%
ggplot(aes(tvhours, relig)) +
geom_point()
#
rincome_summary <- gss %>%
group_by(rincome) %>%
summarise(
age = mean(age, na.rm = TRUE),
tvhours = mean(tvhours, na.rm = TRUE),
n = n()
)
ggplot(rincome_summary, aes(age, fct_reorder(rincome, age))) +
geom_point()
#
ggplot(rincome_summary, aes(age, fct_relevel(rincome, "Not applicable"))) +
geom_point()
# fct_reorder2
by_age <- gss %>%
filter(!is.na(age)) %>%
group_by(age, marital) %>%
count() %>%
mutate(prop = n / sum(n))
ggplot(by_age, aes(age, prop, colour = marital)) +
geom_line(na.rm = TRUE)
# ----
# Modify factor levels
# ----
gss %>% count(partyid)
#
gss %>%
mutate(partyid = fct_recode(partyid,
"Republican, strong" = "Strong republican",
"Republican, weak" = "Not str republican",
"Independent, near rep" = "Ind,near rep",
"Independent, near dem" = "Ind,near dem",
"Democrat, weak" = "Not str democrat",
"Democrat, strong" = "Strong democrat"
)) %>%
count(partyid)
# combine levels
gss %>%
mutate(partyid = fct_recode(partyid,
"Republican, strong" = "Strong republican",
"Republican, weak" = "Not str republican",
"Independent, near rep" = "Ind,near rep",
"Independent, near dem" = "Ind,near dem",
"Democrat, weak" = "Not str democrat",
"Democrat, strong" = "Strong democrat",
"Other" = "No answer",
"Other" = "Don't know",
"Other" = "Other party"
)) %>%
count(partyid)
# collapsing many levels with fct_collapse
gss %>%
mutate(partyid = fct_collapse(partyid,
other = c("No answer", "Don't know", "Other party"),
rep = c("Strong republican", "Not str republican"),
ind = c("Ind,near rep", "Independent", "Ind,near dem"),
dem = c("Not str democrat", "Strong democrat")
)) %>%
count(partyid)
# lump together small groups
gss %>%
mutate(relig = fct_lump(relig)) %>%
count(relig)
# this probably over-aggregates; use 'n=' instead
gss %>%
mutate(relig = fct_lump(relig, n = 10)) %>%
count(relig, sort = TRUE) %>%
print(n = Inf)
# ----
# Exercises
# ----
# 1.
gss_cat %>%
mutate(
partyid = fct_collapse(partyid,
other = c("No answer", "Don't know", "Other party"),
rep = c("Strong republican", "Not str republican"),
ind = c("Ind,near rep", "Independent", "Ind,near dem"),
dem = c("Not str democrat", "Strong democrat")
)) %>%
count(year, partyid) %>%
group_by(year) %>%
mutate(p = n / sum(n)) %>%
ggplot(aes(x = year, y = p,
color = fct_reorder2(partyid, year, p))) +
geom_point() +
geom_line() +
labs(
color = "Party ID",
y = "Proportion",
x = "Year"
)
# ----
# 2.
levels(gss_cat$rincome)
gss_cat %>%
mutate(
rincome = fct_collapse(rincome,
"N/A" = c("No answer","Don't know","Refused","Not applicable"),
"$10000 - 24999" = c("$20000 - 24999","$15000 - 19999","$10000 - 14999"),
"$4000 - 9999" = c("$8000 to 9999","$7000 to 7999","$6000 to 6999","$5000 to 5999","$4000 to 4999"),
"Lt $4000" = c("$3000 to 3999","$1000 to 2999","Lt $1000")
)) %>%
count(rincome)
|
/Ch15_Factors.R
|
no_license
|
bjarnih81/R-for-data-science
|
R
| false
| false
| 5,311
|
r
|
# Factors
# http://r4ds.had.co.nz/factors.html
# ----
library(tidyverse)
library(forcats)
# ----
# Creating Factors
# ----
x1 <- c("Dec", "Apr", "Jan", "Mar")
x2 <- c("Dec", "Apr", "Jam", "Mar")
#
month_levels <- c(
"Jan", "Feb", "Mar", "Apr",
"May", "Jun", "Jul", "Aug",
"Sep", "Oct", "Nov", "Dec"
)
y1 <- factor(x1, levels = month_levels)
sort(y1)
#
y2 <- factor(x2, levels = month_levels)
y2
# make levels appear in order of the first appearance in the data
f1 <- factor(x1, levels = unique(x1))
f1
# or
f2 <- x1 %>% factor() %>% fct_inorder()
f2
# access levels
levels(f2)
# ----
# General social survey
# ----
gss <- forcats::gss_cat
#
gss %>%
count(race)
#
ggplot(gss, aes(race)) +
geom_bar()
# ----
# Exercises
# ----
# 1.
ggplot(gss, aes(rincome)) +
geom_bar()
levels(gss$rincome)
# ----
# Modifying factor order
# ----
relig_summary <- gss %>%
group_by(relig) %>%
summarise(
age = mean(age, na.rm = TRUE),
tvhours = mean(tvhours, na.rm = TRUE),
n = n()
)
ggplot(relig_summary, aes(tvhours, relig)) +
geom_point()
ggplot(relig_summary, aes(tvhours, fct_reorder(relig, tvhours))) +
geom_point()
#
relig_summary %>%
mutate(relig = fct_reorder(relig, tvhours)) %>%
ggplot(aes(tvhours, relig)) +
geom_point()
#
rincome_summary <- gss %>%
group_by(rincome) %>%
summarise(
age = mean(age, na.rm = TRUE),
tvhours = mean(tvhours, na.rm = TRUE),
n = n()
)
ggplot(rincome_summary, aes(age, fct_reorder(rincome, age))) +
geom_point()
#
ggplot(rincome_summary, aes(age, fct_relevel(rincome, "Not applicable"))) +
geom_point()
# fct_reorder2
by_age <- gss %>%
filter(!is.na(age)) %>%
group_by(age, marital) %>%
count() %>%
mutate(prop = n / sum(n))
ggplot(by_age, aes(age, prop, colour = marital)) +
geom_line(na.rm = TRUE)
# ----
# Modify factor levels
# ----
gss %>% count(partyid)
#
gss %>%
mutate(partyid = fct_recode(partyid,
"Republican, strong" = "Strong republican",
"Republican, weak" = "Not str republican",
"Independent, near rep" = "Ind,near rep",
"Independent, near dem" = "Ind,near dem",
"Democrat, weak" = "Not str democrat",
"Democrat, strong" = "Strong democrat"
)) %>%
count(partyid)
# combine levels
gss %>%
mutate(partyid = fct_recode(partyid,
"Republican, strong" = "Strong republican",
"Republican, weak" = "Not str republican",
"Independent, near rep" = "Ind,near rep",
"Independent, near dem" = "Ind,near dem",
"Democrat, weak" = "Not str democrat",
"Democrat, strong" = "Strong democrat",
"Other" = "No answer",
"Other" = "Don't know",
"Other" = "Other party"
)) %>%
count(partyid)
# collapsing many levels with fct_collapse
gss %>%
mutate(partyid = fct_collapse(partyid,
other = c("No answer", "Don't know", "Other party"),
rep = c("Strong republican", "Not str republican"),
ind = c("Ind,near rep", "Independent", "Ind,near dem"),
dem = c("Not str democrat", "Strong democrat")
)) %>%
count(partyid)
# lump together small groups
gss %>%
mutate(relig = fct_lump(relig)) %>%
count(relig)
# this probably over-aggregates; use 'n=' instead
gss %>%
mutate(relig = fct_lump(relig, n = 10)) %>%
count(relig, sort = TRUE) %>%
print(n = Inf)
# ----
# Exercises
# ----
# 1.
gss_cat %>%
mutate(
partyid = fct_collapse(partyid,
other = c("No answer", "Don't know", "Other party"),
rep = c("Strong republican", "Not str republican"),
ind = c("Ind,near rep", "Independent", "Ind,near dem"),
dem = c("Not str democrat", "Strong democrat")
)) %>%
count(year, partyid) %>%
group_by(year) %>%
mutate(p = n / sum(n)) %>%
ggplot(aes(x = year, y = p,
color = fct_reorder2(partyid, year, p))) +
geom_point() +
geom_line() +
labs(
color = "Party ID",
y = "Proportion",
x = "Year"
)
# ----
# 2.
levels(gss_cat$rincome)
gss_cat %>%
mutate(
rincome = fct_collapse(rincome,
"N/A" = c("No answer","Don't know","Refused","Not applicable"),
"$10000 - 24999" = c("$20000 - 24999","$15000 - 19999","$10000 - 14999"),
"$4000 - 9999" = c("$8000 to 9999","$7000 to 7999","$6000 to 6999","$5000 to 5999","$4000 to 4999"),
"Lt $4000" = c("$3000 to 3999","$1000 to 2999","Lt $1000")
)) %>%
count(rincome)
|
## These functions are used together to cache the inverse of a
# given invertible matrix. makeCacheMatrix takes a given matrix, stores it and
# includes a getter/setter for its inverse if it has already been calculated.
# cacheSolve will take a makeCacheMatrix object, return already calculated inverse if
# one exists; otherwise calculates, stores, and returns the inverse.
##Sample Usage
# m <- matrix(1:4, 2,2) #Creates invertable matrix
# matrixToSolve <- makeCacheMatrix(m) #Initializes makeCacheMatrix object using m
# cacheSolve(matrixToSolve) #First call solves and stores the inverse of the matrix
# cacheSolve(matrixToSolve) #Second call retireves pre-caluculation, prints message and returns inverse
## makeCacheMatrix is a special matrix object that stores a matrix, and calculates, stores, and
# retrieves its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(solve) m <<- solve
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve checks to see if the inverse of makeCacheMatrix has
# already been computed. If so, it retieves and returns the already
# computed inverse. If not, it computes, caches, and returns the inverse.
cacheSolve <- function(x, ...) {
m <- x$getinverse() # retrieve pre-calculated inverse if one exists
if(!is.null(m)) { # if existing inverse was retrieved, print message and return matrix
message("getting cached data")
return(m)
}
data <- x$get() # if no inverse was retrieved, get matrix to calculate
m <- solve(data, ...) # use the solve function to create the inverse
x$setinverse(m) # store the calculation in the object for future use
m # Returns a matrix that is the inverse of 'x'
}
|
/cachematrix.R
|
no_license
|
jlehn/ProgrammingAssignment2
|
R
| false
| false
| 1,921
|
r
|
## These functions are used together to cache the inverse of a
# given invertible matrix. makeCacheMatrix takes a given matrix, stores it and
# includes a getter/setter for its inverse if it has already been calculated.
# cacheSolve will take a makeCacheMatrix object, return already calculated inverse if
# one exists; otherwise calculates, stores, and returns the inverse.
##Sample Usage
# m <- matrix(1:4, 2,2) #Creates invertable matrix
# matrixToSolve <- makeCacheMatrix(m) #Initializes makeCacheMatrix object using m
# cacheSolve(matrixToSolve) #First call solves and stores the inverse of the matrix
# cacheSolve(matrixToSolve) #Second call retireves pre-caluculation, prints message and returns inverse
## makeCacheMatrix is a special matrix object that stores a matrix, and calculates, stores, and
# retrieves its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(solve) m <<- solve
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve checks to see if the inverse of makeCacheMatrix has
# already been computed. If so, it retieves and returns the already
# computed inverse. If not, it computes, caches, and returns the inverse.
cacheSolve <- function(x, ...) {
m <- x$getinverse() # retrieve pre-calculated inverse if one exists
if(!is.null(m)) { # if existing inverse was retrieved, print message and return matrix
message("getting cached data")
return(m)
}
data <- x$get() # if no inverse was retrieved, get matrix to calculate
m <- solve(data, ...) # use the solve function to create the inverse
x$setinverse(m) # store the calculation in the object for future use
m # Returns a matrix that is the inverse of 'x'
}
|
# load data
house_p_consumption <- read.table("10.30_household_power_consumption.txt",
header = T,sep = ";",na.strings = "?", stringsAsFactors = F)
# clear date and time format
library(lubridate)
house_p_consumption$date_time <- dmy_hms(paste(house_p_consumption$Date, house_p_consumption$Time, sep = " "))
# We will only be using data from the dates 2007-02-01 and 2007-02-02
house_p_consumption1 <- subset(house_p_consumption,
Date == "1/2/2007" | Date == "2/2/2007")
# for plot 1
png("plot1.png") # the default is 480 * 480
hist(house_p_consumption1$Global_active_power, col = "red",
main = "Global Active Power", xlab = "Global Active Power (kilowatts)")
dev.off()
# for plot 2
png("plot2.png") # the default is 480 * 480
with(house_p_consumption1, plot(date_time, Global_active_power, type = "n",
xlab = "", ylab = "Global Active Power (kilowatts)"))
with(house_p_consumption1, lines(date_time, Global_active_power))
dev.off()
# for plot 3
png("plot3.png") # the default is 480 * 480
with(house_p_consumption1, plot(date_time, Sub_metering_1, type = "n",
xlab = "", ylab = "Energy sub metering"))
with(house_p_consumption1, lines(date_time, Sub_metering_1))
with(house_p_consumption1, lines(date_time, Sub_metering_2, col = "red"))
with(house_p_consumption1, lines(date_time, Sub_metering_3, col = "blue"))
legend("topright",
c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
col = c("black", "red","blue"), lty = 1, cex = 0.7)
dev.off()
# for plot 4
png("plot4.png") # the default is 480 * 480
par(mfrow = c(2,2))
# top_left
with(house_p_consumption1, plot(date_time, Global_active_power, type = "n",
xlab = "", ylab = "Global Active Power (kilowatts)"))
with(house_p_consumption1, lines(date_time, Global_active_power))
# top_right
with(house_p_consumption1, plot(date_time, Voltage, type = "n",
xlab = "datetime", ylab = "Voltage"))
with(house_p_consumption1, lines(date_time, Voltage))
# buttom_left
with(house_p_consumption1, plot(date_time, Sub_metering_1, type = "n",
xlab = "", ylab = "Energy sub metering"))
with(house_p_consumption1, lines(date_time, Sub_metering_1))
with(house_p_consumption1, lines(date_time, Sub_metering_2, col = "red"))
with(house_p_consumption1, lines(date_time, Sub_metering_3, col = "blue"))
legend("topright",
c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
col = c("black", "red","blue"), lty = 1, cex = 0.5, box.col = "white")
# buttom_right
with(house_p_consumption1, plot(date_time, Global_reactive_power, type = "n",
xlab = "datetime", ylab = "Global_reactive_power"))
with(house_p_consumption1, lines(date_time, Global_reactive_power))
# close device
dev.off()
|
/10.30_plot.R
|
no_license
|
wscrdzg/ExData_Plotting1
|
R
| false
| false
| 2,931
|
r
|
# load data
house_p_consumption <- read.table("10.30_household_power_consumption.txt",
header = T,sep = ";",na.strings = "?", stringsAsFactors = F)
# clear date and time format
library(lubridate)
house_p_consumption$date_time <- dmy_hms(paste(house_p_consumption$Date, house_p_consumption$Time, sep = " "))
# We will only be using data from the dates 2007-02-01 and 2007-02-02
house_p_consumption1 <- subset(house_p_consumption,
Date == "1/2/2007" | Date == "2/2/2007")
# for plot 1
png("plot1.png") # the default is 480 * 480
hist(house_p_consumption1$Global_active_power, col = "red",
main = "Global Active Power", xlab = "Global Active Power (kilowatts)")
dev.off()
# for plot 2
png("plot2.png") # the default is 480 * 480
with(house_p_consumption1, plot(date_time, Global_active_power, type = "n",
xlab = "", ylab = "Global Active Power (kilowatts)"))
with(house_p_consumption1, lines(date_time, Global_active_power))
dev.off()
# for plot 3
png("plot3.png") # the default is 480 * 480
with(house_p_consumption1, plot(date_time, Sub_metering_1, type = "n",
xlab = "", ylab = "Energy sub metering"))
with(house_p_consumption1, lines(date_time, Sub_metering_1))
with(house_p_consumption1, lines(date_time, Sub_metering_2, col = "red"))
with(house_p_consumption1, lines(date_time, Sub_metering_3, col = "blue"))
legend("topright",
c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
col = c("black", "red","blue"), lty = 1, cex = 0.7)
dev.off()
# for plot 4
png("plot4.png") # the default is 480 * 480
par(mfrow = c(2,2))
# top_left
with(house_p_consumption1, plot(date_time, Global_active_power, type = "n",
xlab = "", ylab = "Global Active Power (kilowatts)"))
with(house_p_consumption1, lines(date_time, Global_active_power))
# top_right
with(house_p_consumption1, plot(date_time, Voltage, type = "n",
xlab = "datetime", ylab = "Voltage"))
with(house_p_consumption1, lines(date_time, Voltage))
# buttom_left
with(house_p_consumption1, plot(date_time, Sub_metering_1, type = "n",
xlab = "", ylab = "Energy sub metering"))
with(house_p_consumption1, lines(date_time, Sub_metering_1))
with(house_p_consumption1, lines(date_time, Sub_metering_2, col = "red"))
with(house_p_consumption1, lines(date_time, Sub_metering_3, col = "blue"))
legend("topright",
c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
col = c("black", "red","blue"), lty = 1, cex = 0.5, box.col = "white")
# buttom_right
with(house_p_consumption1, plot(date_time, Global_reactive_power, type = "n",
xlab = "datetime", ylab = "Global_reactive_power"))
with(house_p_consumption1, lines(date_time, Global_reactive_power))
# close device
dev.off()
|
\name{from_WDI}
\alias{from_WDI}
\title{World Bank data}
\usage{
from_WDI(..., auxiliary.indicator = c(), country.name = NULL,
translation.table = NULL)
}
\arguments{
\item{...}{Arguments sent sent to \code{WDI} (primarily
of interest are \code{country}, defaulting to "all",
\code{start}, defaulting to 1961, \code{end}, defaulting
to 2013, and \code{indicator}, defaulting to
"NY.GDP.PCAP.PP.KD" (GDP per capita constant 2005 USD))}
\item{auxiliary.indicator}{additional indicators to
include as auxiliary values}
\item{country.name}{column name to use for country code
column (used as index), defaults to "iso2c" used by WDI}
\item{translation.table}{list mapping indicator code
names to column names to be used in final
\code{time.table}}
}
\description{
Construct a time.table from World Bank data.
}
|
/man/from_WDI.Rd
|
no_license
|
rossklin/timetablr
|
R
| false
| false
| 836
|
rd
|
\name{from_WDI}
\alias{from_WDI}
\title{World Bank data}
\usage{
from_WDI(..., auxiliary.indicator = c(), country.name = NULL,
translation.table = NULL)
}
\arguments{
\item{...}{Arguments sent sent to \code{WDI} (primarily
of interest are \code{country}, defaulting to "all",
\code{start}, defaulting to 1961, \code{end}, defaulting
to 2013, and \code{indicator}, defaulting to
"NY.GDP.PCAP.PP.KD" (GDP per capita constant 2005 USD))}
\item{auxiliary.indicator}{additional indicators to
include as auxiliary values}
\item{country.name}{column name to use for country code
column (used as index), defaults to "iso2c" used by WDI}
\item{translation.table}{list mapping indicator code
names to column names to be used in final
\code{time.table}}
}
\description{
Construct a time.table from World Bank data.
}
|
context("syllabification")
test_that("Syllabification returns data frames", {
syls_df <- syllabify_test_dict %>%
mutate(syl_df = map(trans, ~try(syllabify(.x))),
class = map(syl_df, ~class(.x)[1]) %>% simplify())
for (i in seq_along(syls_df$syl_df)){
expect_is(syls_df$syl_df[[i]], "tbl_df")
}
})
test_that("Syllabification is same length as transcription", {
syls_df <- syllabify_test_dict %>%
mutate(trans_vector= map(trans, pronunciation_check_cmu),
trans_length = map(trans_vector, length) %>% simplify(),
syl_df = map(trans, ~try(syllabify(.x))),
syl_length = map(syl_df, nrow) %>% simplify()
)
for (i in seq_along(syls_df$syl_df)){
expect_equal(syls_df$syl_length[[1]], syls_df$trans_length[[1]])
}
})
test_that("Check Pronunciation", {
expect_equal(length(pronunciation_check_cmu("F UW1")), 2)
expect_equal(length(pronunciation_check_cmu("F UW1")), 2)
expect_error(pronunciation_check_cmu("C"),
"Not a licit CMU transcription label: C")
})
test_that("Data columns", {
syls_df <- syllabify_test_dict %>%
mutate(syl_df = map(trans, ~try(syllabify(.x)))) %>%
unnest(syl_df)
expect_type(syls_df$syll, "double")
expect_type(syls_df$part, "character")
expect_type(syls_df$phone, "character")
expect_type(syls_df$stress, "character")
expect_true(all(syls_df$stress %in% c("0", "1", "2", NA)))
expect_true(all(syls_df$part %in% c("onset", "nucleus", "coda")))
})
|
/tests/testthat/test-syllabification.R
|
permissive
|
JoFrhwld/syllabifyr
|
R
| false
| false
| 1,497
|
r
|
context("syllabification")
test_that("Syllabification returns data frames", {
syls_df <- syllabify_test_dict %>%
mutate(syl_df = map(trans, ~try(syllabify(.x))),
class = map(syl_df, ~class(.x)[1]) %>% simplify())
for (i in seq_along(syls_df$syl_df)){
expect_is(syls_df$syl_df[[i]], "tbl_df")
}
})
test_that("Syllabification is same length as transcription", {
syls_df <- syllabify_test_dict %>%
mutate(trans_vector= map(trans, pronunciation_check_cmu),
trans_length = map(trans_vector, length) %>% simplify(),
syl_df = map(trans, ~try(syllabify(.x))),
syl_length = map(syl_df, nrow) %>% simplify()
)
for (i in seq_along(syls_df$syl_df)){
expect_equal(syls_df$syl_length[[1]], syls_df$trans_length[[1]])
}
})
test_that("Check Pronunciation", {
expect_equal(length(pronunciation_check_cmu("F UW1")), 2)
expect_equal(length(pronunciation_check_cmu("F UW1")), 2)
expect_error(pronunciation_check_cmu("C"),
"Not a licit CMU transcription label: C")
})
test_that("Data columns", {
syls_df <- syllabify_test_dict %>%
mutate(syl_df = map(trans, ~try(syllabify(.x)))) %>%
unnest(syl_df)
expect_type(syls_df$syll, "double")
expect_type(syls_df$part, "character")
expect_type(syls_df$phone, "character")
expect_type(syls_df$stress, "character")
expect_true(all(syls_df$stress %in% c("0", "1", "2", NA)))
expect_true(all(syls_df$part %in% c("onset", "nucleus", "coda")))
})
|
# inferCNV Code
# Author: Yue Pan
# Last edited: 04/18/19
########################################
# Notes:
# This will be a directly application of inferCNV in current package
########################################
library("devtools")
library(rjags)
library(infercnv)
library(data.table)
# install.packages("rjags")
# devtools::install_github("broadinstitute/infercnv", ref="RELEASE_0_99_4")
########################################
## Start real code
# prepare for raw count matrix
data <- fread('GSE72056_melanoma_single_cell_revised_v2.txt',
sep = "\t",header = TRUE,stringsAsFactors=FALSE)
data <- as.data.frame(data)
for (i in 2:ncol(data)){
if (data[2,i] == 1) data[2,i] = 'non-malignant'
if (data[2,i] == 2) data[2,i] = 'malignant'
if (data[2,i] == 0) data[2,i] = 'unresolved'
}
# count <- data[-c(1,2,3),]
count <- data[-c(1,2,3),c(1,which(data[1,]==78))] # change here for subject
rownames <- count$Cell
count <- count[,-1]
rownames(count) <- make.names(rownames, unique=TRUE)
cell_name <- colnames(count)
# cell_type <- as.factor(data[2,])[2:ncol(data)]
cell_type <- data[2,][which(data[1,]==78)] # change here for subject
# prepare for annotation file
sample_annotation <- matrix(c(cell_name,cell_type), ncol = 2)
# final raw counts matrix
count <- as.data.frame(lapply(count, as.numeric))
colnames(count) <- cell_name
rownames(count) <- make.names(rownames, unique=TRUE)
count <- (2^count - 1)*10
# inferCNV object
infercnv_obj = CreateInfercnvObject(raw_counts_matrix=as.matrix(count),
annotations_file="cellAnnotations_78.txt",
delim="\t",
gene_order_file="gencode_v19_gene_pos.txt",
ref_group_names=c("non-malignant"),
#ref_group_names = NULL,
chr_exclude=c('chrY', 'chrM'))
# perform infercnv operations to reveal cnv signal
infercnv_obj = infercnv::run(infercnv_obj,
cutoff=1, # use 1 for smart-seq, 0.1 for 10x-genomics
# max_centered_threshold = NA,
out_dir="output_dir_78_final", # dir is auto-created for storing outputs
cluster_by_groups=T, # cluster
denoise=T,
HMM=T
)
# final plot
plot_cnv(infercnv_obj,
out_dir="output_dir_78plot_final",
obs_title="malignant/unknown",
ref_title="non-malignant",
cluster_by_groups=TRUE,
#k_obs_groups = 3,
contig_cex=2.5,
x.center=mean(infercnv_obj@expr.data),
x.range="auto", #NA,
hclust_method='ward.D',
color_safe_pal=FALSE,
output_filename="infercnv_plot",
output_format="png", #pdf, png, NA
png_res=300,
dynamic_resize=0,
ref_contig = NULL,
write_expr_matrix=FALSE)
|
/inferCNV.R
|
no_license
|
Yue221/785Project
|
R
| false
| false
| 3,106
|
r
|
# inferCNV Code
# Author: Yue Pan
# Last edited: 04/18/19
########################################
# Notes:
# This will be a directly application of inferCNV in current package
########################################
library("devtools")
library(rjags)
library(infercnv)
library(data.table)
# install.packages("rjags")
# devtools::install_github("broadinstitute/infercnv", ref="RELEASE_0_99_4")
########################################
## Start real code
# prepare for raw count matrix
data <- fread('GSE72056_melanoma_single_cell_revised_v2.txt',
sep = "\t",header = TRUE,stringsAsFactors=FALSE)
data <- as.data.frame(data)
for (i in 2:ncol(data)){
if (data[2,i] == 1) data[2,i] = 'non-malignant'
if (data[2,i] == 2) data[2,i] = 'malignant'
if (data[2,i] == 0) data[2,i] = 'unresolved'
}
# count <- data[-c(1,2,3),]
count <- data[-c(1,2,3),c(1,which(data[1,]==78))] # change here for subject
rownames <- count$Cell
count <- count[,-1]
rownames(count) <- make.names(rownames, unique=TRUE)
cell_name <- colnames(count)
# cell_type <- as.factor(data[2,])[2:ncol(data)]
cell_type <- data[2,][which(data[1,]==78)] # change here for subject
# prepare for annotation file
sample_annotation <- matrix(c(cell_name,cell_type), ncol = 2)
# final raw counts matrix
count <- as.data.frame(lapply(count, as.numeric))
colnames(count) <- cell_name
rownames(count) <- make.names(rownames, unique=TRUE)
count <- (2^count - 1)*10
# inferCNV object
infercnv_obj = CreateInfercnvObject(raw_counts_matrix=as.matrix(count),
annotations_file="cellAnnotations_78.txt",
delim="\t",
gene_order_file="gencode_v19_gene_pos.txt",
ref_group_names=c("non-malignant"),
#ref_group_names = NULL,
chr_exclude=c('chrY', 'chrM'))
# perform infercnv operations to reveal cnv signal
infercnv_obj = infercnv::run(infercnv_obj,
cutoff=1, # use 1 for smart-seq, 0.1 for 10x-genomics
# max_centered_threshold = NA,
out_dir="output_dir_78_final", # dir is auto-created for storing outputs
cluster_by_groups=T, # cluster
denoise=T,
HMM=T
)
# final plot
plot_cnv(infercnv_obj,
out_dir="output_dir_78plot_final",
obs_title="malignant/unknown",
ref_title="non-malignant",
cluster_by_groups=TRUE,
#k_obs_groups = 3,
contig_cex=2.5,
x.center=mean(infercnv_obj@expr.data),
x.range="auto", #NA,
hclust_method='ward.D',
color_safe_pal=FALSE,
output_filename="infercnv_plot",
output_format="png", #pdf, png, NA
png_res=300,
dynamic_resize=0,
ref_contig = NULL,
write_expr_matrix=FALSE)
|
# RDash rdash-angular2-alfa 47
## Responsive, bloat free, bootstrap powered admin style dashboard from Startangular.com.
rdash-angular-ts is an Angular 2.0 alfa-47 with typescript implementation of the RDash admin dashboard.
Innovating 1 year old project by Startangular.com! Add new module versions, typescript with typings, ng2-bootstrap ...
In later Angular versions there were so many changes. This repo can not be used as a reference! It is preserved in the memory of those days.
## Credits
* [Elliot Hesp](https://github.com/Ehesp)
* [Leonel Samayoa](https://github.com/lsamayoa)
* [Mathew Goldsborough](https://github.com/mgoldsborough)
* [Ricardo Pascua Jr](https://github.com/rdpascua)
|
/readme.rd
|
permissive
|
beliyu/Whi03
|
R
| false
| false
| 700
|
rd
|
# RDash rdash-angular2-alfa 47
## Responsive, bloat free, bootstrap powered admin style dashboard from Startangular.com.
rdash-angular-ts is an Angular 2.0 alfa-47 with typescript implementation of the RDash admin dashboard.
Innovating 1 year old project by Startangular.com! Add new module versions, typescript with typings, ng2-bootstrap ...
In later Angular versions there were so many changes. This repo can not be used as a reference! It is preserved in the memory of those days.
## Credits
* [Elliot Hesp](https://github.com/Ehesp)
* [Leonel Samayoa](https://github.com/lsamayoa)
* [Mathew Goldsborough](https://github.com/mgoldsborough)
* [Ricardo Pascua Jr](https://github.com/rdpascua)
|
library(readxl)
rent_data_1219 = read_xlsx("~/data/rent_data/new_rent_data.xlsx")
#modify the original data for predict(put rent data 3 month ahead), remove col not used for this model
getModelData = function(rent_data_1219){
rent_data_1219 = data.table(rent_data_1219)
rent_data_1219[,rent := c(finalprice[-1:-3],rep(NA,3)),by = "mall_name"]
assign('rent_data_1219',rent_data_1219,envir = .GlobalEnv)
colpicked = !(colnames(rent_data_1219)%in%c("year","open_date","finalprice","key1"))
rent_data_1219 = data.frame(rent_data_1219)
rent_data_1219 = rent_data_1219[,colpicked]
return(rent_data_1219)
}
rentModelData = getModelData(rent_data_1219)
getyearModeData = function(){
rent_data_1219[,.SD[1:(.N-12),],by = "mall_name"]
rent_data_1219[,seq := 1:.N,by = "mall_name"]
rent_data_1219[,getYearPara(date_id,min),by = "mall_name"]
# View(rent_data_1219[,.(startmon = getYearPara(date_id,min)),by = "mall_name"])
sum_col = c("finalprice","rent_area_wa","customer_num","sale","finalprice_jiaju","area_jiaju",
"jiaju_num","finalprice_jiancai","area_jiancai","jiancai_num","finalprice_ruanzhuang",
"area_ruanzhuang","ruanzhuang_num","final_jinkou","area_jinkou","jinkou_num","finalprice_xinyetai",
"area_xinyetai","xinyetai_num","brand_num","gdp","population","region_area","density")
avg_col = c("avg_gdp","avg_salary","highway_distance","road_distance","location",
"subway_distance","shangquan_distance","shangquan_num")
rent_data_year = rent_data_1219[,c(lapply(.SD[,sum_col,with=FALSE],getYearPara,sum),lapply(.SD[,avg_col,with=FALSE],getYearPara,mean),lapply(.SD[,"date_id",with = FALSE],getYearPara,max),"predprice"=lapply(.SD[,"finalprice",with = FALSE],getYearReal,sum)),by = "mall_name"]
# setnames(rent_data_year,"predprice.finalprice","predprice")
rent_data_year = rent_data_year[,predprice:=c(predprice.finalprice[1:(.N-12)],rep(NA,12)),by = "mall_name"]
rent_data_year$predprice.finalprice = NULL
setnames(rent_data_year,"predprice","rent")
# base_rent = rent_data_year[,!(names(rent_data_year)%in%c("mall_name","date_id"))]
dest_rent = rent_data_year[date_id == 201711,]
rest_rent = rent_data_year[!is.na(rent),]
test_rent = rest_rent[,.SD[.N,],by = "mall_name"]
train_rent = rest_rent[,.SD[1:(.N-1),],by = "mall_name"]
test_mall_names = test_rent$mall_name
dest_mall_names = dest_rent$mall_name
train_rent = train_rent[,!(names(train_rent)%in%c("mall_name","date_id")),with = FALSE]
test_rent = test_rent[,!(names(test_rent)%in%c("mall_name","date_id")),with = FALSE]
dest_rent = dest_rent[,!(names(dest_rent)%in%c("mall_name","date_id")),with = FALSE]
train_rent = data.frame(train_rent)
test_rent = data.frame(test_rent)
dest_rent = data.frame(dest_rent)
}
#seperate the data into train,test and dest set, along with the name of the malls returned
getTrainTestData = function(big_general_info,test_time_index = 201706:201708,dest_time_index = 201709:201711){
train_df = big_general_info[!(big_general_info$date_id %in% c(test_time_index,dest_time_index)),]
dest_df = big_general_info[big_general_info$date_id %in% dest_time_index,]
test_df = big_general_info[big_general_info$date_id %in% test_time_index,]
train_df = train_df[complete.cases(train_df),]
dest_df = dest_df[complete.cases(dest_df[,!(names(dest_df) %in% "rent")]),]
test_df = test_df[complete.cases(test_df),]
test_mall_names = test_df[,"mall_name"]
train_rent = train_df[,!(names(big_general_info) %in% c("mall_name","mall_code","date_id"))]
dest_rent = dest_df[,!(names(big_general_info) %in% c("mall_name","mall_code","date_id"))]
test_rent = test_df[,!(names(big_general_info) %in% c("mall_name","mall_code","date_id"))]
return(list("train_rent"=train_rent,"test_rent"=test_rent,"dest_rent"=dest_rent,"test_mall_names"=test_mall_names))
}
rentModelDataList = getTrainTestData(rentModelData)
train_rent_1219 = rentModelDataList$train_rent
test_rent_1219 = rentModelDataList$test_rent
dest_rent_1219 = rentModelDataList$dest_rent
test_mall_names_1219 = rentModelDataList$test_mall_names
rentModelCor = cor(rbindlist(list(train_rent_1219,test_rent_1219)))
View(rentModelCor)
library(caret)
rentModelHighlyCorrelated <- findCorrelation(rentModelCor, cutoff=0.5,names = TRUE)
print(rentModelHighlyCorrelated)
control <- trainControl(method="repeatedcv", number=10, repeats=3)
# train the model
rentModel <- train(rent~., data = train_rent_1219, method="neuralnet", preProcess="scale", trControl=control,
importance = T)
rentModel = gbm(rent ~ . ,data = train_rent_1219,distribution = "gaussian",n.trees = 100000,interaction.depth = 4)
# estimate variable importance
importance = importancegbm <- varImp(rentModel, scale=FALSE)
# importancegbm <- varImp(rentModel, scale=FALSE,numTrees = 100000)
# summarize importance
print(importance)
# plot importance
plot(importance)
train_rent = train_rent_1219
test_rent = test_rent_1219
dest_rent = dest_rent_1219
debugSource2 = function (file, start, end, ...)
{
file.lines <- scan(file, what = character(), skip = start -
1, nlines = end - start + 1, sep = "\n")
file.lines.collapsed <- paste(file.lines, collapse = "\n")
debugSource(textConnection(file.lines.collapsed), ...)
}
|
/rental_model/Rfile/data_process3.R
|
no_license
|
yuanqingye/R_Projects
|
R
| false
| false
| 5,299
|
r
|
library(readxl)
rent_data_1219 = read_xlsx("~/data/rent_data/new_rent_data.xlsx")
#modify the original data for predict(put rent data 3 month ahead), remove col not used for this model
getModelData = function(rent_data_1219){
rent_data_1219 = data.table(rent_data_1219)
rent_data_1219[,rent := c(finalprice[-1:-3],rep(NA,3)),by = "mall_name"]
assign('rent_data_1219',rent_data_1219,envir = .GlobalEnv)
colpicked = !(colnames(rent_data_1219)%in%c("year","open_date","finalprice","key1"))
rent_data_1219 = data.frame(rent_data_1219)
rent_data_1219 = rent_data_1219[,colpicked]
return(rent_data_1219)
}
rentModelData = getModelData(rent_data_1219)
getyearModeData = function(){
rent_data_1219[,.SD[1:(.N-12),],by = "mall_name"]
rent_data_1219[,seq := 1:.N,by = "mall_name"]
rent_data_1219[,getYearPara(date_id,min),by = "mall_name"]
# View(rent_data_1219[,.(startmon = getYearPara(date_id,min)),by = "mall_name"])
sum_col = c("finalprice","rent_area_wa","customer_num","sale","finalprice_jiaju","area_jiaju",
"jiaju_num","finalprice_jiancai","area_jiancai","jiancai_num","finalprice_ruanzhuang",
"area_ruanzhuang","ruanzhuang_num","final_jinkou","area_jinkou","jinkou_num","finalprice_xinyetai",
"area_xinyetai","xinyetai_num","brand_num","gdp","population","region_area","density")
avg_col = c("avg_gdp","avg_salary","highway_distance","road_distance","location",
"subway_distance","shangquan_distance","shangquan_num")
rent_data_year = rent_data_1219[,c(lapply(.SD[,sum_col,with=FALSE],getYearPara,sum),lapply(.SD[,avg_col,with=FALSE],getYearPara,mean),lapply(.SD[,"date_id",with = FALSE],getYearPara,max),"predprice"=lapply(.SD[,"finalprice",with = FALSE],getYearReal,sum)),by = "mall_name"]
# setnames(rent_data_year,"predprice.finalprice","predprice")
rent_data_year = rent_data_year[,predprice:=c(predprice.finalprice[1:(.N-12)],rep(NA,12)),by = "mall_name"]
rent_data_year$predprice.finalprice = NULL
setnames(rent_data_year,"predprice","rent")
# base_rent = rent_data_year[,!(names(rent_data_year)%in%c("mall_name","date_id"))]
dest_rent = rent_data_year[date_id == 201711,]
rest_rent = rent_data_year[!is.na(rent),]
test_rent = rest_rent[,.SD[.N,],by = "mall_name"]
train_rent = rest_rent[,.SD[1:(.N-1),],by = "mall_name"]
test_mall_names = test_rent$mall_name
dest_mall_names = dest_rent$mall_name
train_rent = train_rent[,!(names(train_rent)%in%c("mall_name","date_id")),with = FALSE]
test_rent = test_rent[,!(names(test_rent)%in%c("mall_name","date_id")),with = FALSE]
dest_rent = dest_rent[,!(names(dest_rent)%in%c("mall_name","date_id")),with = FALSE]
train_rent = data.frame(train_rent)
test_rent = data.frame(test_rent)
dest_rent = data.frame(dest_rent)
}
#seperate the data into train,test and dest set, along with the name of the malls returned
getTrainTestData = function(big_general_info,test_time_index = 201706:201708,dest_time_index = 201709:201711){
train_df = big_general_info[!(big_general_info$date_id %in% c(test_time_index,dest_time_index)),]
dest_df = big_general_info[big_general_info$date_id %in% dest_time_index,]
test_df = big_general_info[big_general_info$date_id %in% test_time_index,]
train_df = train_df[complete.cases(train_df),]
dest_df = dest_df[complete.cases(dest_df[,!(names(dest_df) %in% "rent")]),]
test_df = test_df[complete.cases(test_df),]
test_mall_names = test_df[,"mall_name"]
train_rent = train_df[,!(names(big_general_info) %in% c("mall_name","mall_code","date_id"))]
dest_rent = dest_df[,!(names(big_general_info) %in% c("mall_name","mall_code","date_id"))]
test_rent = test_df[,!(names(big_general_info) %in% c("mall_name","mall_code","date_id"))]
return(list("train_rent"=train_rent,"test_rent"=test_rent,"dest_rent"=dest_rent,"test_mall_names"=test_mall_names))
}
rentModelDataList = getTrainTestData(rentModelData)
train_rent_1219 = rentModelDataList$train_rent
test_rent_1219 = rentModelDataList$test_rent
dest_rent_1219 = rentModelDataList$dest_rent
test_mall_names_1219 = rentModelDataList$test_mall_names
rentModelCor = cor(rbindlist(list(train_rent_1219,test_rent_1219)))
View(rentModelCor)
library(caret)
rentModelHighlyCorrelated <- findCorrelation(rentModelCor, cutoff=0.5,names = TRUE)
print(rentModelHighlyCorrelated)
control <- trainControl(method="repeatedcv", number=10, repeats=3)
# train the model
rentModel <- train(rent~., data = train_rent_1219, method="neuralnet", preProcess="scale", trControl=control,
importance = T)
rentModel = gbm(rent ~ . ,data = train_rent_1219,distribution = "gaussian",n.trees = 100000,interaction.depth = 4)
# estimate variable importance
importance = importancegbm <- varImp(rentModel, scale=FALSE)
# importancegbm <- varImp(rentModel, scale=FALSE,numTrees = 100000)
# summarize importance
print(importance)
# plot importance
plot(importance)
train_rent = train_rent_1219
test_rent = test_rent_1219
dest_rent = dest_rent_1219
debugSource2 = function (file, start, end, ...)
{
file.lines <- scan(file, what = character(), skip = start -
1, nlines = end - start + 1, sep = "\n")
file.lines.collapsed <- paste(file.lines, collapse = "\n")
debugSource(textConnection(file.lines.collapsed), ...)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/monty-hall-problem.R
\name{play_n_games}
\alias{play_n_games}
\title{Play n Number of Game}
\usage{
play_n_games(n = 100)
}
\arguments{
\item{Setting}{the n of the function.}
}
\value{
Table of numeric values indicating the percentage of times a win occurred.
}
\description{
\code{play_n_games()} plays the Monty Hall game as many times as the
simulator wants to play.
}
\details{
The function allows the simulator to choose an n for the number of times
the simulator wishes to simulate the Monty Hall game. Choosing the n
allows the simulator to choose a number and calculate the ideal
winning strategy. Running the simulation a large number of times
will indicate whether the strategy of stay or switch is most
likely to result in a win.
}
\examples{
play_n_games()
}
|
/man/play_n_games.Rd
|
no_license
|
JasonSills/montyhall
|
R
| false
| true
| 849
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/monty-hall-problem.R
\name{play_n_games}
\alias{play_n_games}
\title{Play n Number of Game}
\usage{
play_n_games(n = 100)
}
\arguments{
\item{Setting}{the n of the function.}
}
\value{
Table of numeric values indicating the percentage of times a win occurred.
}
\description{
\code{play_n_games()} plays the Monty Hall game as many times as the
simulator wants to play.
}
\details{
The function allows the simulator to choose an n for the number of times
the simulator wishes to simulate the Monty Hall game. Choosing the n
allows the simulator to choose a number and calculate the ideal
winning strategy. Running the simulation a large number of times
will indicate whether the strategy of stay or switch is most
likely to result in a win.
}
\examples{
play_n_games()
}
|
# Load Data/Packages ------------------------------------------------------
library(tidyverse)
# You may need to change this to match your computer's directory structure
setwd("assets/Titanic/")
# Load test data
train <- readr::read_csv("train.csv") %>%
# Clean things up a little
dplyr::select(Survived, Sex)
# LOOCV -------------------------------------------------------------------
n <- nrow(train)
train <- train %>%
mutate(fold=1:n)
# For each observation, you'll store your score here:
scores <- rep(0, n)
for(i in 1:n){
# 1. Create disjoint pseudo-train and pseudo-test sets based on folding
# scheme. In this case pseudo_test is just 1 row:
pseudo_train <- train %>%
filter(fold != i)
pseudo_test <- train %>%
filter(fold == i)
# 2. Train the model using (i.e. fit the model to) the pseudo_train data.
#
# In this case: Nothing to do here!
# 3. Get fitted/predicted values y-hat for the pseudo_test data using the
# trained model
#
# In this case: Predicting here is trivial since the model is trivial. Make
# single prediction here
pseudo_test <- pseudo_test %>%
mutate(Predictions = ifelse(Sex == "female", 1, 0))
# 4. Compute your score on the pseudo_test data
#
# In this case: Since the outcome is binary and not continuous, "Did you guess
# correct?" is a more appropriate scoring system than MSE. For the single
# observation, does your prediction match the truth?
pseudo_test <- pseudo_test %>%
mutate(score = Predictions == Survived)
# 5. Save your score for this fold
scores[i] <- pseudo_test$score
}
# The average over the n-folds is computed below. Recall the score on Kaggle
# of 0.7655. A close approximation!
scores
mean(scores)
# k=5 Fold CV -------------------------------------------------------------
# Assign folds at random
n_folds <- 5
train <- train %>%
mutate(fold = sample(1:n_folds, replace=TRUE, size=nrow(train)))
# Store your scores here:
scores <- rep(0, n_folds)
for(i in 1:n_folds){
# 1. Create disjoint pseudo-train and pseudo-test sets based on folding
# scheme. Now the pseudo_test has more than one observation, it has 178 = ~1/5
# of data
pseudo_train <- train %>%
filter(fold != i)
pseudo_test <- train %>%
filter(fold == i)
# 2. Train the model using (i.e. fit the model to) the pseudo_train data.
#
# In this case: Nothing to do here!
# 3. Get fitted/predicted values y-hat for the pseudo_test data using the
# trained model
#
# In this case: Predicting here is trivial since the model is trivial. Make
# ~178 predictions here
pseudo_test <- pseudo_test %>%
mutate(Predictions = ifelse(Sex == "female", 1, 0))
# 4. Compute your score on the pseudo_test data
#
# In this case: Since the outcome is binary and not continuous, "Did you guess
# correct?" is a more appropriate scoring system than MSE. Now instead of a single
# observation, we take the average over all ~178 predictions
pseudo_test_score <- pseudo_test %>%
summarise(score = mean(Predictions == Survived))
# 5. Save your score for this fold
scores[i] <- pseudo_test_score$score
}
# The average over the 5-folds is computed below. Recall the score on Kaggle
# of 0.7655. A close approximation!
scores
mean(scores)
# Using Leaderboard Information -------------------------------------------
# We saw that Kaggle takes the test data (418 rows), only reports your score on
# the leaderboard based on half of these data, and declares the winner based on
# the other half which is withholded until the very end of the competition. Not
# only that, Kaggle does not tell you how they split the 418 rows. Say Kaggle
# didn't do this and reported your score on the leaderboard based on the entire
# test data (418 rows). Write 2-3 sentences outlining a strategy of how you
# could exploit the information given by the leaderboard to get a perfect score
# of 100%.
# You could:
# 1. Submit something and get your score
# 2. Change the only the first prediction and see the effect on your score:
# a) If your score goes up, then you were initially wrong
# b) If your score goes down, then you were intially right
# 3. Do this 418 times to get EVERYTHING correct
|
/assets/PS/PS02_solutions.R
|
no_license
|
biancaglez/MATH218
|
R
| false
| false
| 4,227
|
r
|
# Load Data/Packages ------------------------------------------------------
library(tidyverse)
# You may need to change this to match your computer's directory structure
setwd("assets/Titanic/")
# Load test data
train <- readr::read_csv("train.csv") %>%
# Clean things up a little
dplyr::select(Survived, Sex)
# LOOCV -------------------------------------------------------------------
n <- nrow(train)
train <- train %>%
mutate(fold=1:n)
# For each observation, you'll store your score here:
scores <- rep(0, n)
for(i in 1:n){
# 1. Create disjoint pseudo-train and pseudo-test sets based on folding
# scheme. In this case pseudo_test is just 1 row:
pseudo_train <- train %>%
filter(fold != i)
pseudo_test <- train %>%
filter(fold == i)
# 2. Train the model using (i.e. fit the model to) the pseudo_train data.
#
# In this case: Nothing to do here!
# 3. Get fitted/predicted values y-hat for the pseudo_test data using the
# trained model
#
# In this case: Predicting here is trivial since the model is trivial. Make
# single prediction here
pseudo_test <- pseudo_test %>%
mutate(Predictions = ifelse(Sex == "female", 1, 0))
# 4. Compute your score on the pseudo_test data
#
# In this case: Since the outcome is binary and not continuous, "Did you guess
# correct?" is a more appropriate scoring system than MSE. For the single
# observation, does your prediction match the truth?
pseudo_test <- pseudo_test %>%
mutate(score = Predictions == Survived)
# 5. Save your score for this fold
scores[i] <- pseudo_test$score
}
# The average over the n-folds is computed below. Recall the score on Kaggle
# of 0.7655. A close approximation!
scores
mean(scores)
# k=5 Fold CV -------------------------------------------------------------
# Assign folds at random
n_folds <- 5
train <- train %>%
mutate(fold = sample(1:n_folds, replace=TRUE, size=nrow(train)))
# Store your scores here:
scores <- rep(0, n_folds)
for(i in 1:n_folds){
# 1. Create disjoint pseudo-train and pseudo-test sets based on folding
# scheme. Now the pseudo_test has more than one observation, it has 178 = ~1/5
# of data
pseudo_train <- train %>%
filter(fold != i)
pseudo_test <- train %>%
filter(fold == i)
# 2. Train the model using (i.e. fit the model to) the pseudo_train data.
#
# In this case: Nothing to do here!
# 3. Get fitted/predicted values y-hat for the pseudo_test data using the
# trained model
#
# In this case: Predicting here is trivial since the model is trivial. Make
# ~178 predictions here
pseudo_test <- pseudo_test %>%
mutate(Predictions = ifelse(Sex == "female", 1, 0))
# 4. Compute your score on the pseudo_test data
#
# In this case: Since the outcome is binary and not continuous, "Did you guess
# correct?" is a more appropriate scoring system than MSE. Now instead of a single
# observation, we take the average over all ~178 predictions
pseudo_test_score <- pseudo_test %>%
summarise(score = mean(Predictions == Survived))
# 5. Save your score for this fold
scores[i] <- pseudo_test_score$score
}
# The average over the 5-folds is computed below. Recall the score on Kaggle
# of 0.7655. A close approximation!
scores
mean(scores)
# Using Leaderboard Information -------------------------------------------
# We saw that Kaggle takes the test data (418 rows), only reports your score on
# the leaderboard based on half of these data, and declares the winner based on
# the other half which is withholded until the very end of the competition. Not
# only that, Kaggle does not tell you how they split the 418 rows. Say Kaggle
# didn't do this and reported your score on the leaderboard based on the entire
# test data (418 rows). Write 2-3 sentences outlining a strategy of how you
# could exploit the information given by the leaderboard to get a perfect score
# of 100%.
# You could:
# 1. Submit something and get your score
# 2. Change the only the first prediction and see the effect on your score:
# a) If your score goes up, then you were initially wrong
# b) If your score goes down, then you were intially right
# 3. Do this 418 times to get EVERYTHING correct
|
#################################################
# Survival Models, T. Kleinow ###############
# Script to test submissions for project 1 ##
#################################################
# DO NOT INCLUDE THIS CODE INTO YOUR R-FILE #
#################################################
# remove all R objects in the current workspace
# never include this line in the R file for your
# submission
rm(list = ls())
# close all graphical displays
graphics.off()
# Load R file
# The following line needs to be changed if your
# R file is not called Project1_sub.R
source("Project3.r")
# Check that all answers for this project are
# correctly defined
print(c(AnswerQ1a, AnswerQ1b, AnswerQ3, AnswerQ4a, AnswerQ4b, AnswerQ5a, AnswerQ5b))
|
/p_01/TestSubmissionProj1.R
|
no_license
|
amitkparekh/HW-F79SU
|
R
| false
| false
| 753
|
r
|
#################################################
# Survival Models, T. Kleinow ###############
# Script to test submissions for project 1 ##
#################################################
# DO NOT INCLUDE THIS CODE INTO YOUR R-FILE #
#################################################
# remove all R objects in the current workspace
# never include this line in the R file for your
# submission
rm(list = ls())
# close all graphical displays
graphics.off()
# Load R file
# The following line needs to be changed if your
# R file is not called Project1_sub.R
source("Project3.r")
# Check that all answers for this project are
# correctly defined
print(c(AnswerQ1a, AnswerQ1b, AnswerQ3, AnswerQ4a, AnswerQ4b, AnswerQ5a, AnswerQ5b))
|
## Reading, Tidying & Transforming Raw Team Data
## Load Relevant Packages
library(tidyverse)
## Reading In Data
team_stats_1 <- read_csv("data/raw/2018-19_nba_team_statistics_1.csv")
team_stats_2 <- read_csv("data/raw/2018-19_nba_team_statistics_2.csv")
team_payroll <- read_csv("data/raw/2019-20_nba_team-payroll.csv")
## The above code reads in the relevant data sets and saves them to the objects preceding the <-.
## Tidying & Transforming Data - team_stats_1
team_tidy_1 <- team_stats_1 %>%
rename(x3PAr = '3PAr',
TSp = 'TS%',
eFGp = 'eFG%',
TOVp = 'TOV%',
ORBp = 'ORB%',
FTpFGA = 'FT/FGA',
DRBp = 'DRB%') %>%
arrange(Team) %>%
select(Team : W, Rk, ORtg : DRtg, TSp : eFGp)
## The above code does the following:
## 1. Creates new object, team_tidy_1, consisting of data from team_stats_1.
## 2. Renames variables with illegal characters in them.
## 3. Arranges the data so the teams are in alphabetical order.
## 4. Selects variables in the preferred order, leaving out ones deemed irrelavant for the analysis.
## Tidying & Transforming Data - team_stats_2
team_tidy_2 <-team_stats_2 %>%
rename(FGp = 'FG%',
x3P = '3P',
x3PA = '3PA',
x3Pp = '3P%',
x2P = '2P',
x2PA = '2PA',
x2Pp = '2P%',
FTp = 'FT%') %>%
arrange(Team) %>%
mutate(PPG = PTS / G,
APG = AST / G,
RPG = TRB / G,
OPG = ORB / G,
DPG = DRB / G,
SPG = STL / G,
BPG = BLK / G,
TPG = TOV / G,
FPG = PF / G,
PPM = PTS / MP,
APM = AST / MP,
RPM = TRB / MP) %>%
mutate_if(is.numeric, round, digits = 3) %>%
select(Team : G, PPG : RPM)
## The above code does the following:
## 1. Creates a new object, team_tidy_2, using data from team_stats_2.
## 2. Renames variables with illegal characters in them.
## 3. Arranges the teams so they are in alphabetical order.
## 4. Creates new variables based on existing variables, paying particular attention to per game
## and per minute metrics.
## 5. Rounds the variables categorised as numeric to 3 decimal places.
## 6. Selects variables in the preferred order, leaving out ones deemed unnecessary for the analysis.
## Tidying & Transforming Data - team_payroll
team_payroll$salary <- team_payroll$salary %>%
str_replace_all(pattern = "\\$", replacement = "") %>%
str_replace_all(pattern = "\\,", replacement = "") %>%
as.numeric()
## The above code does the following:
## 1. Saves changes made into salary variable of the team_payroll data set.
## 2. Removes the $ and , from the salary value to make them resemble numbers.
## 3. Changes the variable class to numeric.
team_payroll$team_id <- team_payroll$team_id %>%
as.factor()
## The above code changes the team_id variable in the team_payroll data from a character vector to a factor vector.
## This is important for the next step.
levels(team_payroll$team_id) <- c("Miami Heat", "Golden State Warriors", "Oklahoma City Thunder", "Toronto Raptors",
"Milwaukee Bucks", "Portland Trail Blazers", "Detroit Pistons", "Houston Rockets",
"Memphis Grizzlies", "Boston Celtics", "Washington Wizards", "New York Knicks",
"Cleveland Cavaliers", "Minnesota Timberwolves", "San Antonio Spurs",
"Charlotte Hornets", "Brooklyn Nets", "Denver Nuggets", "Los Angeles Clippers",
"New Orleans Pelicans", "Philadelphia 76ers", "Orlando Magic", "Utah Jazz",
"Chicago Bulls", "Indiana Pacers", "Phoenix Suns", "Los Angeles Lakers",
"Sacramento Kings", "Dallas Mavericks", "Atlanta Hawks")
team_payroll$team_id <- as.character(team_payroll$team_id)
## The above code renames all the team_id levels to their respective city and nickname. It is then converted
## to a character vector so it is easier to combine the data later on.
team_payroll_tidy <- team_payroll %>%
rename(Team = 'team_id',
Salary = 'salary') %>%
arrange(Team) %>%
select(Team, Salary)
## The above code does the following:
## 1. Creates a new object, team_payroll_tidy, using data from team_payroll.
## 2. Renames the team_id variable to "Team" to facilitate easier combining of objects later on.
## The salary variable is also capitalised.
## 3. Arranges the Team variable so the teams are in alphabetical order.
## 4. Selects the variables in the preferred order, leaving out ones deemed unnecessary for the analysis.
## Combining the Transformed Data
team_joined <- left_join(team_tidy_1, team_tidy_2,
"Team")
## The above code joins team_trans_1 to team_trans_2, matching by Team, and is then saved into a new
## object called team_joined.
team_stats_final <- team_joined %>%
left_join(x = team_joined, y = team_payroll_tidy,
by = "Team") %>%
select(Team, G, W, Rk, Salary, ORtg : eFGp, PPG : RPM)
## The above code does the following:
## 1. Creates a new object, team_stats_final, using data from team_joined.
## 2. team_payroll_tidy is joined to team_joined, matching by Team.
## 3. The variables are reordered in a more ideal way for analysis.
## Writing the Processed Data to a New File
write_csv(x = team_stats_final, path = "data/processed/team_stats_final.csv")
|
/cleaning.R/tidying_team-data.R
|
no_license
|
shaun-cameron/nba-data-analysis-project
|
R
| false
| false
| 5,468
|
r
|
## Reading, Tidying & Transforming Raw Team Data
## Load Relevant Packages
library(tidyverse)
## Reading In Data
team_stats_1 <- read_csv("data/raw/2018-19_nba_team_statistics_1.csv")
team_stats_2 <- read_csv("data/raw/2018-19_nba_team_statistics_2.csv")
team_payroll <- read_csv("data/raw/2019-20_nba_team-payroll.csv")
## The above code reads in the relevant data sets and saves them to the objects preceding the <-.
## Tidying & Transforming Data - team_stats_1
team_tidy_1 <- team_stats_1 %>%
rename(x3PAr = '3PAr',
TSp = 'TS%',
eFGp = 'eFG%',
TOVp = 'TOV%',
ORBp = 'ORB%',
FTpFGA = 'FT/FGA',
DRBp = 'DRB%') %>%
arrange(Team) %>%
select(Team : W, Rk, ORtg : DRtg, TSp : eFGp)
## The above code does the following:
## 1. Creates new object, team_tidy_1, consisting of data from team_stats_1.
## 2. Renames variables with illegal characters in them.
## 3. Arranges the data so the teams are in alphabetical order.
## 4. Selects variables in the preferred order, leaving out ones deemed irrelavant for the analysis.
## Tidying & Transforming Data - team_stats_2
team_tidy_2 <-team_stats_2 %>%
rename(FGp = 'FG%',
x3P = '3P',
x3PA = '3PA',
x3Pp = '3P%',
x2P = '2P',
x2PA = '2PA',
x2Pp = '2P%',
FTp = 'FT%') %>%
arrange(Team) %>%
mutate(PPG = PTS / G,
APG = AST / G,
RPG = TRB / G,
OPG = ORB / G,
DPG = DRB / G,
SPG = STL / G,
BPG = BLK / G,
TPG = TOV / G,
FPG = PF / G,
PPM = PTS / MP,
APM = AST / MP,
RPM = TRB / MP) %>%
mutate_if(is.numeric, round, digits = 3) %>%
select(Team : G, PPG : RPM)
## The above code does the following:
## 1. Creates a new object, team_tidy_2, using data from team_stats_2.
## 2. Renames variables with illegal characters in them.
## 3. Arranges the teams so they are in alphabetical order.
## 4. Creates new variables based on existing variables, paying particular attention to per game
## and per minute metrics.
## 5. Rounds the variables categorised as numeric to 3 decimal places.
## 6. Selects variables in the preferred order, leaving out ones deemed unnecessary for the analysis.
## Tidying & Transforming Data - team_payroll
team_payroll$salary <- team_payroll$salary %>%
str_replace_all(pattern = "\\$", replacement = "") %>%
str_replace_all(pattern = "\\,", replacement = "") %>%
as.numeric()
## The above code does the following:
## 1. Saves changes made into salary variable of the team_payroll data set.
## 2. Removes the $ and , from the salary value to make them resemble numbers.
## 3. Changes the variable class to numeric.
team_payroll$team_id <- team_payroll$team_id %>%
as.factor()
## The above code changes the team_id variable in the team_payroll data from a character vector to a factor vector.
## This is important for the next step.
levels(team_payroll$team_id) <- c("Miami Heat", "Golden State Warriors", "Oklahoma City Thunder", "Toronto Raptors",
"Milwaukee Bucks", "Portland Trail Blazers", "Detroit Pistons", "Houston Rockets",
"Memphis Grizzlies", "Boston Celtics", "Washington Wizards", "New York Knicks",
"Cleveland Cavaliers", "Minnesota Timberwolves", "San Antonio Spurs",
"Charlotte Hornets", "Brooklyn Nets", "Denver Nuggets", "Los Angeles Clippers",
"New Orleans Pelicans", "Philadelphia 76ers", "Orlando Magic", "Utah Jazz",
"Chicago Bulls", "Indiana Pacers", "Phoenix Suns", "Los Angeles Lakers",
"Sacramento Kings", "Dallas Mavericks", "Atlanta Hawks")
team_payroll$team_id <- as.character(team_payroll$team_id)
## The above code renames all the team_id levels to their respective city and nickname. It is then converted
## to a character vector so it is easier to combine the data later on.
team_payroll_tidy <- team_payroll %>%
rename(Team = 'team_id',
Salary = 'salary') %>%
arrange(Team) %>%
select(Team, Salary)
## The above code does the following:
## 1. Creates a new object, team_payroll_tidy, using data from team_payroll.
## 2. Renames the team_id variable to "Team" to facilitate easier combining of objects later on.
## The salary variable is also capitalised.
## 3. Arranges the Team variable so the teams are in alphabetical order.
## 4. Selects the variables in the preferred order, leaving out ones deemed unnecessary for the analysis.
## Combining the Transformed Data
team_joined <- left_join(team_tidy_1, team_tidy_2,
"Team")
## The above code joins team_trans_1 to team_trans_2, matching by Team, and is then saved into a new
## object called team_joined.
team_stats_final <- team_joined %>%
left_join(x = team_joined, y = team_payroll_tidy,
by = "Team") %>%
select(Team, G, W, Rk, Salary, ORtg : eFGp, PPG : RPM)
## The above code does the following:
## 1. Creates a new object, team_stats_final, using data from team_joined.
## 2. team_payroll_tidy is joined to team_joined, matching by Team.
## 3. The variables are reordered in a more ideal way for analysis.
## Writing the Processed Data to a New File
write_csv(x = team_stats_final, path = "data/processed/team_stats_final.csv")
|
\name{src_mysql}
\alias{src_mysql}
\alias{tbl.src_mysql}
\title{Connect to mysql/mariadb.}
\usage{
src_mysql(dbname, host = NULL, port = 0L, user = "root",
password = "", ...)
\method{tbl}{src_mysql} (src, from, ...)
}
\arguments{
\item{dbname}{Database name}
\item{host,port}{Host name and port number of database}
\item{user,password}{User name and password (if needed)}
\item{...}{for the src, other arguments passed on to the
underlying database connector, \code{dbConnect}. For the
tbl, included for compatibility with the generic, but
otherwise ignored.}
\item{src}{a mysql src created with \code{src_mysql}.}
\item{from}{Either a string giving the name of table in
database, or \code{\link{sql}} described a derived table
or compound join.}
}
\description{
Use \code{src_mysql} to connect to an existing mysql or
mariadb database, and \code{tbl} to connect to tables
within that database. If you are running a local mysqlql
database, leave all parameters set as their defaults to
connect. If you're connecting to a remote database, ask
your database administrator for the values of these
variables.
}
\section{Debugging}{
To see exactly what SQL is being sent to the database,
you can set option \code{dplyr.show_sql} to true:
\code{options(dplyr.show_sql = TRUE).} If you're
wondering why a particularly query is slow, it can be
helpful to see the query plan. You can do this by setting
\code{options(dplyr.explain_sql = TRUE)}.
}
\section{Grouping}{
Typically you will create a grouped data table is to call
the \code{group_by} method on a mysql tbl: this will take
care of capturing the unevalated expressions for you.
For best performance, the database should have an index
on the variables that you are grouping by. Use
\code{\link{explain_sql}} to check that mysql is using
the indexes that you expect.
}
\section{Output}{
All data manipulation on SQL tbls are lazy: they will not
actually run the query or retrieve the data unless you
ask for it: they all return a new \code{\link{tbl_sql}}
object. Use \code{\link{compute}} to run the query and
save the results in a temporary in the database, or use
\code{\link{collect}} to retrieve the results to R.
Note that \code{do} is not lazy since it must pull the
data into R. It returns a \code{\link{tbl_df}} or
\code{\link{grouped_df}}, with one column for each
grouping variable, and one list column that contains the
results of the operation. \code{do} never simplifies its
output.
}
\section{Query principles}{
This section attempts to lay out the principles governing
the generation of SQL queries from the manipulation
verbs. The basic principle is that a sequence of
operations should return the same value (modulo class)
regardless of where the data is stored.
\itemize{ \item \code{arrange(arrange(df, x), y)} should
be equivalent to \code{arrange(df, y, x)}
\item \code{select(select(df, a:x), n:o)} should be
equivalent to \code{select(df, n:o)}
\item \code{mutate(mutate(df, x2 = x * 2), y2 = y * 2)}
should be equivalent to \code{mutate(df, x2 = x * 2, y2 =
y * 2)}
\item \code{filter(filter(df, x == 1), y == 2)} should be
equivalent to \code{filter(df, x == 1, y == 2)}
\item \code{summarise} should return the summarised
output with one level of grouping peeled off. }
}
\examples{
\dontrun{
# Connection basics ---------------------------------------------------------
# To connect to a database first create a src:
my_db <- src_mysql(host = "blah.com", user = "hadley",
password = "pass")
# Then reference a tbl within that src
my_tbl <- tbl(my_db, "my_table")
}
# Here we'll use the Lahman database: to create your own local copy,
# create a local database called "lahman", or tell lahman_mysql() how to
# a database that you can write to
if (has_lahman("mysql")) {
# Methods -------------------------------------------------------------------
batting <- tbl(lahman_mysql(), "Batting")
dim(batting)
colnames(batting)
head(batting)
# Data manipulation verbs ---------------------------------------------------
filter(batting, yearID > 2005, G > 130)
select(batting, playerID:lgID)
arrange(batting, playerID, desc(yearID))
summarise(batting, G = mean(G), n = n())
mutate(batting, rbi2 = 1.0 * R / AB)
# note that all operations are lazy: they don't do anything until you
# request the data, either by `print()`ing it (which shows the first ten
# rows), by looking at the `head()`, or `collect()` the results locally.
system.time(recent <- filter(batting, yearID > 2010))
system.time(collect(recent))
# Group by operations -------------------------------------------------------
# To perform operations by group, create a grouped object with group_by
players <- group_by(batting, playerID)
group_size(players)
# MySQL doesn't support windowed functions, which means that only
# grouped summaries are really useful:
summarise(players, mean_g = mean(G), best_ab = max(AB))
# When you group by multiple level, each summarise peels off one level
per_year <- group_by(batting, playerID, yearID)
stints <- summarise(per_year, stints = max(stint))
filter(ungroup(stints), stints > 3)
summarise(stints, max(stints))
# Joins ---------------------------------------------------------------------
player_info <- select(tbl(lahman_mysql(), "Master"), playerID, hofID,
birthYear)
hof <- select(filter(tbl(lahman_mysql(), "HallOfFame"), inducted == "Y"),
hofID, votedBy, category)
# Match players and their hall of fame data
inner_join(player_info, hof)
# Keep all players, match hof data where available
left_join(player_info, hof)
# Find only players in hof
semi_join(player_info, hof)
# Find players not in hof
anti_join(player_info, hof)
# Arbitrary SQL -------------------------------------------------------------
# You can also provide sql as is, using the sql function:
# batting2008 <- tbl(lahman_mysql(),
# sql("SELECT * FROM Batting WHERE YearID = 2008"))
# batting2008
}
}
|
/man/src_mysql.Rd
|
no_license
|
Funreason/dplyr
|
R
| false
| false
| 6,010
|
rd
|
\name{src_mysql}
\alias{src_mysql}
\alias{tbl.src_mysql}
\title{Connect to mysql/mariadb.}
\usage{
src_mysql(dbname, host = NULL, port = 0L, user = "root",
password = "", ...)
\method{tbl}{src_mysql} (src, from, ...)
}
\arguments{
\item{dbname}{Database name}
\item{host,port}{Host name and port number of database}
\item{user,password}{User name and password (if needed)}
\item{...}{for the src, other arguments passed on to the
underlying database connector, \code{dbConnect}. For the
tbl, included for compatibility with the generic, but
otherwise ignored.}
\item{src}{a mysql src created with \code{src_mysql}.}
\item{from}{Either a string giving the name of table in
database, or \code{\link{sql}} described a derived table
or compound join.}
}
\description{
Use \code{src_mysql} to connect to an existing mysql or
mariadb database, and \code{tbl} to connect to tables
within that database. If you are running a local mysqlql
database, leave all parameters set as their defaults to
connect. If you're connecting to a remote database, ask
your database administrator for the values of these
variables.
}
\section{Debugging}{
To see exactly what SQL is being sent to the database,
you can set option \code{dplyr.show_sql} to true:
\code{options(dplyr.show_sql = TRUE).} If you're
wondering why a particularly query is slow, it can be
helpful to see the query plan. You can do this by setting
\code{options(dplyr.explain_sql = TRUE)}.
}
\section{Grouping}{
Typically you will create a grouped data table is to call
the \code{group_by} method on a mysql tbl: this will take
care of capturing the unevalated expressions for you.
For best performance, the database should have an index
on the variables that you are grouping by. Use
\code{\link{explain_sql}} to check that mysql is using
the indexes that you expect.
}
\section{Output}{
All data manipulation on SQL tbls are lazy: they will not
actually run the query or retrieve the data unless you
ask for it: they all return a new \code{\link{tbl_sql}}
object. Use \code{\link{compute}} to run the query and
save the results in a temporary in the database, or use
\code{\link{collect}} to retrieve the results to R.
Note that \code{do} is not lazy since it must pull the
data into R. It returns a \code{\link{tbl_df}} or
\code{\link{grouped_df}}, with one column for each
grouping variable, and one list column that contains the
results of the operation. \code{do} never simplifies its
output.
}
\section{Query principles}{
This section attempts to lay out the principles governing
the generation of SQL queries from the manipulation
verbs. The basic principle is that a sequence of
operations should return the same value (modulo class)
regardless of where the data is stored.
\itemize{ \item \code{arrange(arrange(df, x), y)} should
be equivalent to \code{arrange(df, y, x)}
\item \code{select(select(df, a:x), n:o)} should be
equivalent to \code{select(df, n:o)}
\item \code{mutate(mutate(df, x2 = x * 2), y2 = y * 2)}
should be equivalent to \code{mutate(df, x2 = x * 2, y2 =
y * 2)}
\item \code{filter(filter(df, x == 1), y == 2)} should be
equivalent to \code{filter(df, x == 1, y == 2)}
\item \code{summarise} should return the summarised
output with one level of grouping peeled off. }
}
\examples{
\dontrun{
# Connection basics ---------------------------------------------------------
# To connect to a database first create a src:
my_db <- src_mysql(host = "blah.com", user = "hadley",
password = "pass")
# Then reference a tbl within that src
my_tbl <- tbl(my_db, "my_table")
}
# Here we'll use the Lahman database: to create your own local copy,
# create a local database called "lahman", or tell lahman_mysql() how to
# a database that you can write to
if (has_lahman("mysql")) {
# Methods -------------------------------------------------------------------
batting <- tbl(lahman_mysql(), "Batting")
dim(batting)
colnames(batting)
head(batting)
# Data manipulation verbs ---------------------------------------------------
filter(batting, yearID > 2005, G > 130)
select(batting, playerID:lgID)
arrange(batting, playerID, desc(yearID))
summarise(batting, G = mean(G), n = n())
mutate(batting, rbi2 = 1.0 * R / AB)
# note that all operations are lazy: they don't do anything until you
# request the data, either by `print()`ing it (which shows the first ten
# rows), by looking at the `head()`, or `collect()` the results locally.
system.time(recent <- filter(batting, yearID > 2010))
system.time(collect(recent))
# Group by operations -------------------------------------------------------
# To perform operations by group, create a grouped object with group_by
players <- group_by(batting, playerID)
group_size(players)
# MySQL doesn't support windowed functions, which means that only
# grouped summaries are really useful:
summarise(players, mean_g = mean(G), best_ab = max(AB))
# When you group by multiple level, each summarise peels off one level
per_year <- group_by(batting, playerID, yearID)
stints <- summarise(per_year, stints = max(stint))
filter(ungroup(stints), stints > 3)
summarise(stints, max(stints))
# Joins ---------------------------------------------------------------------
player_info <- select(tbl(lahman_mysql(), "Master"), playerID, hofID,
birthYear)
hof <- select(filter(tbl(lahman_mysql(), "HallOfFame"), inducted == "Y"),
hofID, votedBy, category)
# Match players and their hall of fame data
inner_join(player_info, hof)
# Keep all players, match hof data where available
left_join(player_info, hof)
# Find only players in hof
semi_join(player_info, hof)
# Find players not in hof
anti_join(player_info, hof)
# Arbitrary SQL -------------------------------------------------------------
# You can also provide sql as is, using the sql function:
# batting2008 <- tbl(lahman_mysql(),
# sql("SELECT * FROM Batting WHERE YearID = 2008"))
# batting2008
}
}
|
## copied/edited from KMsurv
lifetab <-
function (tis, ninit, nlost, nevent)
{
Ypj <- c(ninit, ninit - cumsum(nlost + nevent)[-length(nevent)])
Yj <- Ypj - nlost/2
Sj <- cumprod(1 - nevent/Yj)
qj <- nevent/Yj
pj <- 1 - qj
n <- length(Yj)
Sj <- c(1, Sj[-n])
fmj <- c(diff(-1 * Sj), NA)/diff(tis)
hmj <- nevent/diff(tis)/(Yj - nevent/2)
hmj[n] <- NA
Sj.se <- c(0, Sj[-1] * sqrt(cumsum(nevent/Yj/(Yj - nevent))[-length(Sj)]))
fmj.se <- Sj * qj/diff(tis) * sqrt(c(0, cumsum(qj/Yj/pj)[-n]) +
(pj/Yj/qj))
fmj.se[n] <- NA
hmj.se <- sqrt(1 - (hmj * diff(tis)/2)^2) * sqrt(hmj^2/Yj/qj)
hmj.se[n] <- NA
data.frame(tstart=tis[-n-1], # new
tstop=tis[-1], # new
nsubs = Ypj, nlost = nlost, nrisk = Yj, nevent = nevent,
surv = Sj, pdf = fmj, hazard = hmj, se.surv = Sj.se,
se.pdf = fmj.se, se.hazard = hmj.se,
row.names = paste(tis[-n - 1], tis[-1], sep = "-"))
}
lifetab2 <-
function (formula, data, subset, breaks=NULL)
{
Call <- match.call()
Call[[1]] <- as.name("lifetab2")
indx <- match(c("formula", "data", "subset"),
names(Call), nomatch = 0)
if (indx[1] == 0)
stop("a formula argument is required")
temp <- Call[c(1, indx)]
temp[[1L]] <- quote(stats::model.frame)
m <- eval.parent(temp)
Terms <- terms(formula, c("strata", "cluster"))
ord <- attr(Terms, "order")
if (length(ord) & any(ord != 1))
stop("Interaction terms are not valid for this function")
n <- nrow(m)
Y <- model.extract(m, "response")
if (!is.Surv(Y))
stop("Response must be a survival object")
if (!is.null(attr(Terms, "offset")))
warning("Offset term ignored")
ll <- attr(Terms, "term.labels")
if (length(ll) == 0)
X <- factor(rep(1, n))
else X <- strata(m[ll])
if (!is.Surv(Y))
stop("y must be a Surv object")
## newY <- aeqSurv(Y)
if (is.null(breaks))
breaks <- c(sort(unique(Y[,1,drop=FALSE])), Inf)
if (breaks[1] != 0) breaks <- c(0,breaks)
if (breaks[length(breaks)] != Inf) breaks <- c(breaks,Inf)
if (attr(Y, "type") == "right" || (attr(Y, "type") == "counting" && all(Y[1,]==0))) {
NA2zero <- function(x) {if (any(is.na(x))) x[is.na(x)] <- 0; x}
temp <- tapply(1:nrow(Y), X,
function(index) {
counting <- if(attr(Y, "type") == "counting") 1 else 0
time <- Y[index,1+counting,drop=FALSE]
event <- Y[index,2+counting,drop=FALSE]
cut_time <- cut(time,breaks,include.lowest=TRUE,right=FALSE)
nevent <- NA2zero(tapply(event,cut_time,sum))
nlost <- NA2zero(tapply(event,cut_time,length)) - nevent
lifetab(tis = breaks, # should be one element longer for the intervals
ninit = nrow(data), # number of individuals at the start
nlost = nlost, # number lost for each interval
nevent = nevent)
}, # number of events for each interval
simplify=FALSE)
} else {
stop("survival type not supported")
}
if (length(temp)==1) {
temp <- temp[[1]]
}
structure(temp, call=Call, class=c("lifetab2", "data.frame"))
}
plot.lifetab2 <- function(x, y=NULL, ...) {
plot(x$tstart, x$surv, ...)
}
lines.lifetab2 <- function(x, y=NULL, ...) {
lines(x$tstart, x$surv, ...)
}
.survset <- function(.surv, data, scale=1, origin=0, enter=NULL, exit=NULL, start="tstart", end="tstop", event="event", zero = 0, valid="tvalid") {
Y <- eval(substitute(.surv), data, parent.frame())
enter <- eval(substitute(enter), data, parent.frame())
exit <- eval(substitute(exit), data, parent.frame())
origin <- eval(substitute(origin), data, parent.frame())
stopifnot(attr(Y, "type") %in% c("right", "counting"))
if (ncol(Y) == 2)
Y <- cbind(zero,Y)
.tstart <- Y[,1] - origin
.tstop <- Y[,2] - origin
.event <- Y[,3]
if (!is.null(enter))
.tstart <- pmax(.tstart, enter)
if (!is.null(exit)) {
old.tstop <- .tstop
.tstop <- pmin(.tstop, exit)
.event <- ifelse(.tstop == old.tstop, .event, 0)
}
## TODO: check for invalid values?
.valid <- .tstart < .tstop
.tstart <- .tstart/scale
.tstop <- .tstop/scale
data[[start]] <- .tstart
data[[end]] <- .tstop
data[[event]] <- .event
data[[valid]] <- .valid
data
}
|
/R/lifetab2.R
|
no_license
|
mclements/biostat3
|
R
| false
| false
| 4,730
|
r
|
## copied/edited from KMsurv
lifetab <-
function (tis, ninit, nlost, nevent)
{
Ypj <- c(ninit, ninit - cumsum(nlost + nevent)[-length(nevent)])
Yj <- Ypj - nlost/2
Sj <- cumprod(1 - nevent/Yj)
qj <- nevent/Yj
pj <- 1 - qj
n <- length(Yj)
Sj <- c(1, Sj[-n])
fmj <- c(diff(-1 * Sj), NA)/diff(tis)
hmj <- nevent/diff(tis)/(Yj - nevent/2)
hmj[n] <- NA
Sj.se <- c(0, Sj[-1] * sqrt(cumsum(nevent/Yj/(Yj - nevent))[-length(Sj)]))
fmj.se <- Sj * qj/diff(tis) * sqrt(c(0, cumsum(qj/Yj/pj)[-n]) +
(pj/Yj/qj))
fmj.se[n] <- NA
hmj.se <- sqrt(1 - (hmj * diff(tis)/2)^2) * sqrt(hmj^2/Yj/qj)
hmj.se[n] <- NA
data.frame(tstart=tis[-n-1], # new
tstop=tis[-1], # new
nsubs = Ypj, nlost = nlost, nrisk = Yj, nevent = nevent,
surv = Sj, pdf = fmj, hazard = hmj, se.surv = Sj.se,
se.pdf = fmj.se, se.hazard = hmj.se,
row.names = paste(tis[-n - 1], tis[-1], sep = "-"))
}
lifetab2 <-
function (formula, data, subset, breaks=NULL)
{
Call <- match.call()
Call[[1]] <- as.name("lifetab2")
indx <- match(c("formula", "data", "subset"),
names(Call), nomatch = 0)
if (indx[1] == 0)
stop("a formula argument is required")
temp <- Call[c(1, indx)]
temp[[1L]] <- quote(stats::model.frame)
m <- eval.parent(temp)
Terms <- terms(formula, c("strata", "cluster"))
ord <- attr(Terms, "order")
if (length(ord) & any(ord != 1))
stop("Interaction terms are not valid for this function")
n <- nrow(m)
Y <- model.extract(m, "response")
if (!is.Surv(Y))
stop("Response must be a survival object")
if (!is.null(attr(Terms, "offset")))
warning("Offset term ignored")
ll <- attr(Terms, "term.labels")
if (length(ll) == 0)
X <- factor(rep(1, n))
else X <- strata(m[ll])
if (!is.Surv(Y))
stop("y must be a Surv object")
## newY <- aeqSurv(Y)
if (is.null(breaks))
breaks <- c(sort(unique(Y[,1,drop=FALSE])), Inf)
if (breaks[1] != 0) breaks <- c(0,breaks)
if (breaks[length(breaks)] != Inf) breaks <- c(breaks,Inf)
if (attr(Y, "type") == "right" || (attr(Y, "type") == "counting" && all(Y[1,]==0))) {
NA2zero <- function(x) {if (any(is.na(x))) x[is.na(x)] <- 0; x}
temp <- tapply(1:nrow(Y), X,
function(index) {
counting <- if(attr(Y, "type") == "counting") 1 else 0
time <- Y[index,1+counting,drop=FALSE]
event <- Y[index,2+counting,drop=FALSE]
cut_time <- cut(time,breaks,include.lowest=TRUE,right=FALSE)
nevent <- NA2zero(tapply(event,cut_time,sum))
nlost <- NA2zero(tapply(event,cut_time,length)) - nevent
lifetab(tis = breaks, # should be one element longer for the intervals
ninit = nrow(data), # number of individuals at the start
nlost = nlost, # number lost for each interval
nevent = nevent)
}, # number of events for each interval
simplify=FALSE)
} else {
stop("survival type not supported")
}
if (length(temp)==1) {
temp <- temp[[1]]
}
structure(temp, call=Call, class=c("lifetab2", "data.frame"))
}
plot.lifetab2 <- function(x, y=NULL, ...) {
plot(x$tstart, x$surv, ...)
}
lines.lifetab2 <- function(x, y=NULL, ...) {
lines(x$tstart, x$surv, ...)
}
.survset <- function(.surv, data, scale=1, origin=0, enter=NULL, exit=NULL, start="tstart", end="tstop", event="event", zero = 0, valid="tvalid") {
Y <- eval(substitute(.surv), data, parent.frame())
enter <- eval(substitute(enter), data, parent.frame())
exit <- eval(substitute(exit), data, parent.frame())
origin <- eval(substitute(origin), data, parent.frame())
stopifnot(attr(Y, "type") %in% c("right", "counting"))
if (ncol(Y) == 2)
Y <- cbind(zero,Y)
.tstart <- Y[,1] - origin
.tstop <- Y[,2] - origin
.event <- Y[,3]
if (!is.null(enter))
.tstart <- pmax(.tstart, enter)
if (!is.null(exit)) {
old.tstop <- .tstop
.tstop <- pmin(.tstop, exit)
.event <- ifelse(.tstop == old.tstop, .event, 0)
}
## TODO: check for invalid values?
.valid <- .tstart < .tstop
.tstart <- .tstart/scale
.tstop <- .tstop/scale
data[[start]] <- .tstart
data[[end]] <- .tstop
data[[event]] <- .event
data[[valid]] <- .valid
data
}
|
###Scrapping
linkPage= "https://en.wikipedia.org/wiki/2019%E2%80%9320_coronavirus_pandemic"
linkPath = '//*[@id="thetable"]'
#El id=thetable es el código particular de la tabla de datos en wikipedia
library(htmltab)
#Estamos creando un nuevo objeto con el nombre "coronavirus"
coronavirus = htmltab(doc = linkPage, which =linkPath, rm_nodata_cols = F)
###Limpieza
head(coronavirus)
coronavirus = coronavirus[,c(2:5)]
###Cambiando los nombres
names(coronavirus)
new_names = c("Paises", "Casos", "Muertes", "Recuperados")
names(coronavirus) = new_names
names (coronavirus)
###Eliminando los pie de p?ginas
coronavirus = coronavirus[-c(229:230),]
View(coronavirus)
###Algo m?s de limpieza para el valor –
library(dplyr)
coronavirus = coronavirus %>%
mutate(Recuperados = replace(Recuperados, Recuperados == 'NA', NA))
View(coronavirus)
###Análisis
str(coronavirus)
|
/Githubpc.R
|
no_license
|
gaboreparaguay/Proyecto2020
|
R
| false
| false
| 887
|
r
|
###Scrapping
linkPage= "https://en.wikipedia.org/wiki/2019%E2%80%9320_coronavirus_pandemic"
linkPath = '//*[@id="thetable"]'
#El id=thetable es el código particular de la tabla de datos en wikipedia
library(htmltab)
#Estamos creando un nuevo objeto con el nombre "coronavirus"
coronavirus = htmltab(doc = linkPage, which =linkPath, rm_nodata_cols = F)
###Limpieza
head(coronavirus)
coronavirus = coronavirus[,c(2:5)]
###Cambiando los nombres
names(coronavirus)
new_names = c("Paises", "Casos", "Muertes", "Recuperados")
names(coronavirus) = new_names
names (coronavirus)
###Eliminando los pie de p?ginas
coronavirus = coronavirus[-c(229:230),]
View(coronavirus)
###Algo m?s de limpieza para el valor –
library(dplyr)
coronavirus = coronavirus %>%
mutate(Recuperados = replace(Recuperados, Recuperados == 'NA', NA))
View(coronavirus)
###Análisis
str(coronavirus)
|
# Pittard, Steve August 2014 - wsp@emory.edu
# Code to demonstrate the Normal distribution
# The formula for the Normal Probability Density Function is:
normpdf <- expression(paste(frac(1, sigma*sqrt(2*pi)), plain(e)^{frac(-(x-mu)^2, 2*sigma^2)}, sep=""))
par(mfrow=c(1,1))
plot(1:3,1:3,type="n",axes=F,ylab="",xlab="")
text(1.5,2,normpdf,cex=1.8)
# If the mean is 0 and the standard deviation is 1 then the formula can be
# is called the Standard normal probability distribution function and can be
# plotted like:
snormpdf <- expression(paste(frac(1, sqrt(2*pi)), plain(e)^{frac(-x^2, 2)}, sep=""))
text(2.5,2,snormpdf,cex=1.8)
# Using the second form of the function let's generate a sequence of numbers between
# -4 and 4 in increments of .1
x <- seq(-4,4,.1)
y <- 1/sqrt(2*pi)*exp(-x^2/2)
# Plot the x,y pairs. Does the resulting curve look familiar ?
par(mfrow=c(1,2)) # Put the two plots side by side
plot(x,y,lwd=1,,type="p",col="blue",cex=0.6,pch=19,main=snormpdf)
xlen <- length(x)
legend("topright",paste("N = ",xlen),cex=0.8)
grid()
# Let's get more x values - increments of 0.01
x <- seq(-4,4,.01)
y <- 1/sqrt(2*pi)*exp(-x^2/2)
plot(x,y,type="p",col="blue",cex=0.6,pch=19,main=snormpdf)
xlen <- length(x)
legend("topright",paste("N = ",xlen),cex=0.8)
grid()
# Let's work with the second version - I'll draw this a few times so I'll put it
# into a function for convenience.
drawNorm <- function(xs,style="l") {
par(mfrow=c(1,1))
ys <- 1/sqrt(2*pi)*exp(-xs^2/2)
plot(xs,ys,lwd=2,type=style,col="blue",cex=0.6,pch=19,main=snormpdf,xaxt="n")
axis(1,-4:4)
xlen <- length(xs)
legend("topright",paste("N = ",xlen),cex=0.8)
legend("topleft","Total area under curve is 1",cex=0.8)
abline(v=0,lty=3)
grid()
}
drawNorm(x)
# Let's say we wanted to find the area underneath the curve
# in the shaded region.
xvals <- x[x > -4 & x < -1]
yvals <- 1/sqrt(2*pi)*exp(-xvals^2/2)
polygon(c(-4,xvals,-1),c(0,yvals,0),col="gray")
text(-1.5,0.05,"?",cex=1.5)
# Well we can integrate the function from -4 to -1 to give the answer
# Lucky for us R has an Integrate function.
func2integrate <- function(xvals) {
return(1/sqrt(2*pi)*exp(-xvals^2/2))
}
results <- integrate(func2integrate,-4,-1)
round(results$value,2) # Should be around 0.16
# What is the area of the curve between -1 and 4 ? Easy.
# 1 - 0.16 = 0.84
# So what if we wanted to determine the area for curves
# starting from -4 to -3, -4 to 3.0, -4 to -2, ... -4,4 ?
# We could do our integration in a loop.
xvals <- seq(-4,4,1)
areavec <- vector()
for (ii in xvals) {
areavec <- c(areavec,integrate(func2integrate,-4,ii)$value)
}
areavec <- round(areavec,3)
names(areavec) <- paste(-4,-4:4,sep="_to_")
areavec
# Note we have symmetry
drawNorm(x)
abline(v=0)
# Area under curve from -4 to 0 is 0.5. And the area from 0 to 4 is also 0.5
areavec
# If this symmetry is true then the area, for example, from -4 to -2 should then
# be the same as the area from 2 to 4
# Let's put the polygon stuff into a function to make drawing the shaded
# regions more convenient
shady <- function(vec,lim1,lim2,color="gray") {
xs <- vec[vec > lim1 & vec < lim2]
ys <- 1/sqrt(2*pi)*exp(-xs^2/2)
polygon(c(lim1,xs,lim2),c(0,ys,0),col=color)
}
shady(x,-4,-2,"green")
shady(x,2,4,"green")
# What is the area from -4 to -2 ? Approximately 0.023
areavec[3]
# To get the area from 2 to 4 we can subtract the area value
# corresponding to the integegration from -4 to 2 from 1
1 - areavec[7]
all.equal(as.numeric(areavec[3]),as.numeric(1-areavec[7]))
# It turns out that these areas represent probabilities. So we can
# start to ask questions like what is the probability associated with
# observing a value of 1.47 or less. we already know how to do this !
# do the integration.
drawNorm(x)
shady(x,-4,1.47,"green")
text(0,0.1,"?",cex=3)
probval <- integrate(func2integrate,-4,1.47)$value
text(0,0.2,round(probval,3))
# What then is the probability of observing a value of greater than 1.47 ?
# Super easy
1 - probval
1 - integrate(func2integrate,-4,1.47)$value
# Or this too - just change the limits on the integration
integrate(func2integrate,1.47,4)$value
# But we'll graph the problem anyway.
drawNorm(x)
shady(x,1.47,4)
text(2,0.02,"?")
text(2.5,0.1,round(integrate(func2integrate,1.47,4)$value,2))
# What is the probability of observing a value between -1 and 1 ?
# Graph it
drawNorm(x)
shady(x,-1,1,col="blue")
# This is all getting to be too easy isn't it ?
integrate(func2integrate,-1,1)$value
# Or
integrate(func2integrate,-4,1)$value - integrate(func2integrate,-4,-1)$value
# Okay but R has some functions that will do this for us. That is
# we don't need to do integration explicitly every time
# Check the pnorm function. It gives F(x) = P(X <= x)
pnorm(1) - pnorm(-1) # Probability of getting a number between -1 and 1
pnorm(1.47) # Probability of getting a number of 1.47 or less
1 - pnorm(1.47) # Probability of getting a number > 1.47
pnorm(1.47,lower.tail=FALSE)
all.equal(pnorm(1.47,lower.tail=FALSE), 1 - pnorm(1.47) )
# Note that unless you tell pnorm otherwise it will assume a standard
# normal distribution with mean 0 and sd of 1.
# What percentage of the data is contained within one standard of
# the mean ? We did this already
integrate(func2integrate,-1,1)$value
pnorm(1) - pnorm(-1)
# What about two standard deviations ?
integrate(func2integrate,-2,2)$value
pnorm(2) - pnorm(-2)
# What about three ?
integrate(func2integrate,-3,3)$value
pnorm(3) - pnorm(-3)
# So use pnorm from now on. Also if we want to provide a bunch of x values
# as input to the standard normal function we can use the built in dnorm
# function. So this:
somex <- seq(-4,4,.01)
normys <- 1/sqrt(2*pi)*exp(-somex^2/2)
# is more easily done as:
dnormys <- dnorm(seq(-4,4,.01))
all.equal(normys,dnormys)
# Say we have some Normally distributed data that isn't
# standardized - As long as we are confident that it's normal
# we can scale it / standardize it. (x - u) / sd()
somegrades <- dget("http://steviep42.bitbucket.org/YOUTUBE.DIR/grades")
summary(somegrades)
# Okay sowhat is the probability of someone getting a grade of 82
# or less ?
standard <- (82 - mean(somegrades))/sd(somegrades)
# Cool so now we can use the pnorm function to find the prob associated
# with 1.215573 (which is what standard winds up being)
pnorm(standard)
# But hey ! pnorm takes arguments so we don't have to do the intermediate
# step with the standardization process.
pnorm(82,mean(somegrades),sd(somegrades))
all.equal(pnorm(standard),pnorm(82,mean(somegrades),sd(somegrades)))
# DONT USE INTEGRATE USE PNORM HERE !!!!!!!
# Let's reconisder the section from above where wanted to integrate
# regions from like -4 to -3, -4 to -2, -4 to -1,... -4 to 4 except here
# we will use a smaller increment so as to get more xvalues / granularity.
xvals <- seq(-4,4,0.1)
areavec <- vector()
for (ii in xvals) {
areavec <- c(areavec,pnorm(ii))
}
areavec <- round(areavec,3)
# Let's plot this. Does it look familiar ?
plot(xvals,areavec,type="l",lwd=2,main="Cumulative Distribution",xaxt="n")
axis(1,-4:4)
grid()
# How is this useful ? Well it let's us find the value associated with a
# given probability value. So what value from the standard normal distribution
# corresponds to the 50th percentile ? Easy - its 0. We learned this earlier
# but we can easily read it off the graph. Or look into the areavec vector
# to see what element corresponds to 0.5
obs <- which(areavec-.50 == 0)
xvals[obs]
segments(-4,0.5,0,.5,lty=2)
segments(0,0,0,.5,lty=2)
#
xvals <- seq(-4,4,0.1)
areavec <- vector()
for (ii in xvals) {
areavec <- c(areavec,pnorm(ii))
}
areavec <- round(areavec,3)
|
/YOUTUBE.DIR/Normal.R
|
no_license
|
steviep42/youtube
|
R
| false
| false
| 7,732
|
r
|
# Pittard, Steve August 2014 - wsp@emory.edu
# Code to demonstrate the Normal distribution
# The formula for the Normal Probability Density Function is:
normpdf <- expression(paste(frac(1, sigma*sqrt(2*pi)), plain(e)^{frac(-(x-mu)^2, 2*sigma^2)}, sep=""))
par(mfrow=c(1,1))
plot(1:3,1:3,type="n",axes=F,ylab="",xlab="")
text(1.5,2,normpdf,cex=1.8)
# If the mean is 0 and the standard deviation is 1 then the formula can be
# is called the Standard normal probability distribution function and can be
# plotted like:
snormpdf <- expression(paste(frac(1, sqrt(2*pi)), plain(e)^{frac(-x^2, 2)}, sep=""))
text(2.5,2,snormpdf,cex=1.8)
# Using the second form of the function let's generate a sequence of numbers between
# -4 and 4 in increments of .1
x <- seq(-4,4,.1)
y <- 1/sqrt(2*pi)*exp(-x^2/2)
# Plot the x,y pairs. Does the resulting curve look familiar ?
par(mfrow=c(1,2)) # Put the two plots side by side
plot(x,y,lwd=1,,type="p",col="blue",cex=0.6,pch=19,main=snormpdf)
xlen <- length(x)
legend("topright",paste("N = ",xlen),cex=0.8)
grid()
# Let's get more x values - increments of 0.01
x <- seq(-4,4,.01)
y <- 1/sqrt(2*pi)*exp(-x^2/2)
plot(x,y,type="p",col="blue",cex=0.6,pch=19,main=snormpdf)
xlen <- length(x)
legend("topright",paste("N = ",xlen),cex=0.8)
grid()
# Let's work with the second version - I'll draw this a few times so I'll put it
# into a function for convenience.
drawNorm <- function(xs,style="l") {
par(mfrow=c(1,1))
ys <- 1/sqrt(2*pi)*exp(-xs^2/2)
plot(xs,ys,lwd=2,type=style,col="blue",cex=0.6,pch=19,main=snormpdf,xaxt="n")
axis(1,-4:4)
xlen <- length(xs)
legend("topright",paste("N = ",xlen),cex=0.8)
legend("topleft","Total area under curve is 1",cex=0.8)
abline(v=0,lty=3)
grid()
}
drawNorm(x)
# Let's say we wanted to find the area underneath the curve
# in the shaded region.
xvals <- x[x > -4 & x < -1]
yvals <- 1/sqrt(2*pi)*exp(-xvals^2/2)
polygon(c(-4,xvals,-1),c(0,yvals,0),col="gray")
text(-1.5,0.05,"?",cex=1.5)
# Well we can integrate the function from -4 to -1 to give the answer
# Lucky for us R has an Integrate function.
func2integrate <- function(xvals) {
return(1/sqrt(2*pi)*exp(-xvals^2/2))
}
results <- integrate(func2integrate,-4,-1)
round(results$value,2) # Should be around 0.16
# What is the area of the curve between -1 and 4 ? Easy.
# 1 - 0.16 = 0.84
# So what if we wanted to determine the area for curves
# starting from -4 to -3, -4 to 3.0, -4 to -2, ... -4,4 ?
# We could do our integration in a loop.
xvals <- seq(-4,4,1)
areavec <- vector()
for (ii in xvals) {
areavec <- c(areavec,integrate(func2integrate,-4,ii)$value)
}
areavec <- round(areavec,3)
names(areavec) <- paste(-4,-4:4,sep="_to_")
areavec
# Note we have symmetry
drawNorm(x)
abline(v=0)
# Area under curve from -4 to 0 is 0.5. And the area from 0 to 4 is also 0.5
areavec
# If this symmetry is true then the area, for example, from -4 to -2 should then
# be the same as the area from 2 to 4
# Let's put the polygon stuff into a function to make drawing the shaded
# regions more convenient
shady <- function(vec,lim1,lim2,color="gray") {
xs <- vec[vec > lim1 & vec < lim2]
ys <- 1/sqrt(2*pi)*exp(-xs^2/2)
polygon(c(lim1,xs,lim2),c(0,ys,0),col=color)
}
shady(x,-4,-2,"green")
shady(x,2,4,"green")
# What is the area from -4 to -2 ? Approximately 0.023
areavec[3]
# To get the area from 2 to 4 we can subtract the area value
# corresponding to the integegration from -4 to 2 from 1
1 - areavec[7]
all.equal(as.numeric(areavec[3]),as.numeric(1-areavec[7]))
# It turns out that these areas represent probabilities. So we can
# start to ask questions like what is the probability associated with
# observing a value of 1.47 or less. we already know how to do this !
# do the integration.
drawNorm(x)
shady(x,-4,1.47,"green")
text(0,0.1,"?",cex=3)
probval <- integrate(func2integrate,-4,1.47)$value
text(0,0.2,round(probval,3))
# What then is the probability of observing a value of greater than 1.47 ?
# Super easy
1 - probval
1 - integrate(func2integrate,-4,1.47)$value
# Or this too - just change the limits on the integration
integrate(func2integrate,1.47,4)$value
# But we'll graph the problem anyway.
drawNorm(x)
shady(x,1.47,4)
text(2,0.02,"?")
text(2.5,0.1,round(integrate(func2integrate,1.47,4)$value,2))
# What is the probability of observing a value between -1 and 1 ?
# Graph it
drawNorm(x)
shady(x,-1,1,col="blue")
# This is all getting to be too easy isn't it ?
integrate(func2integrate,-1,1)$value
# Or
integrate(func2integrate,-4,1)$value - integrate(func2integrate,-4,-1)$value
# Okay but R has some functions that will do this for us. That is
# we don't need to do integration explicitly every time
# Check the pnorm function. It gives F(x) = P(X <= x)
pnorm(1) - pnorm(-1) # Probability of getting a number between -1 and 1
pnorm(1.47) # Probability of getting a number of 1.47 or less
1 - pnorm(1.47) # Probability of getting a number > 1.47
pnorm(1.47,lower.tail=FALSE)
all.equal(pnorm(1.47,lower.tail=FALSE), 1 - pnorm(1.47) )
# Note that unless you tell pnorm otherwise it will assume a standard
# normal distribution with mean 0 and sd of 1.
# What percentage of the data is contained within one standard of
# the mean ? We did this already
integrate(func2integrate,-1,1)$value
pnorm(1) - pnorm(-1)
# What about two standard deviations ?
integrate(func2integrate,-2,2)$value
pnorm(2) - pnorm(-2)
# What about three ?
integrate(func2integrate,-3,3)$value
pnorm(3) - pnorm(-3)
# So use pnorm from now on. Also if we want to provide a bunch of x values
# as input to the standard normal function we can use the built in dnorm
# function. So this:
somex <- seq(-4,4,.01)
normys <- 1/sqrt(2*pi)*exp(-somex^2/2)
# is more easily done as:
dnormys <- dnorm(seq(-4,4,.01))
all.equal(normys,dnormys)
# Say we have some Normally distributed data that isn't
# standardized - As long as we are confident that it's normal
# we can scale it / standardize it. (x - u) / sd()
somegrades <- dget("http://steviep42.bitbucket.org/YOUTUBE.DIR/grades")
summary(somegrades)
# Okay sowhat is the probability of someone getting a grade of 82
# or less ?
standard <- (82 - mean(somegrades))/sd(somegrades)
# Cool so now we can use the pnorm function to find the prob associated
# with 1.215573 (which is what standard winds up being)
pnorm(standard)
# But hey ! pnorm takes arguments so we don't have to do the intermediate
# step with the standardization process.
pnorm(82,mean(somegrades),sd(somegrades))
all.equal(pnorm(standard),pnorm(82,mean(somegrades),sd(somegrades)))
# DONT USE INTEGRATE USE PNORM HERE !!!!!!!
# Let's reconisder the section from above where wanted to integrate
# regions from like -4 to -3, -4 to -2, -4 to -1,... -4 to 4 except here
# we will use a smaller increment so as to get more xvalues / granularity.
xvals <- seq(-4,4,0.1)
areavec <- vector()
for (ii in xvals) {
areavec <- c(areavec,pnorm(ii))
}
areavec <- round(areavec,3)
# Let's plot this. Does it look familiar ?
plot(xvals,areavec,type="l",lwd=2,main="Cumulative Distribution",xaxt="n")
axis(1,-4:4)
grid()
# How is this useful ? Well it let's us find the value associated with a
# given probability value. So what value from the standard normal distribution
# corresponds to the 50th percentile ? Easy - its 0. We learned this earlier
# but we can easily read it off the graph. Or look into the areavec vector
# to see what element corresponds to 0.5
obs <- which(areavec-.50 == 0)
xvals[obs]
segments(-4,0.5,0,.5,lty=2)
segments(0,0,0,.5,lty=2)
#
xvals <- seq(-4,4,0.1)
areavec <- vector()
for (ii in xvals) {
areavec <- c(areavec,pnorm(ii))
}
areavec <- round(areavec,3)
|
data("sf_bike_parking")
properties <- list(
filter = "spaces > 4",
visible = TRUE,
extruded = TRUE,
cellSize = 200,
elevationScale = 4,
getPosition = "@=[lng, lat]", #~lng + lat,
colorRange = RColorBrewer::brewer.pal(6, "YlOrRd"),
tooltip = "{{position.0}}, {{position.1}}<br/>Count: {{count}}"
)
deck <- deckgl(zoom = 11, pitch = 45, bearing = 35, element_id = "grid-layer") %>%
add_source("sf-bike-parking", sf_bike_parking) %>%
add_grid_layer(
source = "sf-bike-parking",
properties = properties
) %>%
add_control("Grid Layer") %>%
add_basemap() %>%
add_json_editor(wrap = 50, maxLines = 23)
if (interactive()) deck
|
/inst/examples/deckgl-api-reference/grid-layer.R
|
permissive
|
crazycapivara/deckgl
|
R
| false
| false
| 658
|
r
|
data("sf_bike_parking")
properties <- list(
filter = "spaces > 4",
visible = TRUE,
extruded = TRUE,
cellSize = 200,
elevationScale = 4,
getPosition = "@=[lng, lat]", #~lng + lat,
colorRange = RColorBrewer::brewer.pal(6, "YlOrRd"),
tooltip = "{{position.0}}, {{position.1}}<br/>Count: {{count}}"
)
deck <- deckgl(zoom = 11, pitch = 45, bearing = 35, element_id = "grid-layer") %>%
add_source("sf-bike-parking", sf_bike_parking) %>%
add_grid_layer(
source = "sf-bike-parking",
properties = properties
) %>%
add_control("Grid Layer") %>%
add_basemap() %>%
add_json_editor(wrap = 50, maxLines = 23)
if (interactive()) deck
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plug.R
\name{plug}
\alias{plug}
\title{Switch indeterminates in a polynomial}
\usage{
plug(p, indeterminate, value)
}
\arguments{
\item{p}{a polynomial}
\item{indeterminate}{the indeterminate in the polynomial to
switch}
\item{value}{the value/indeterminate to substitute}
}
\value{
an mpoly object
}
\description{
Switch indeterminates in a polynomial
}
\examples{
# on an mpoly
(p <- mp("(x+y)^3"))
plug(p, "x", 5)
plug(p, "x", "t")
plug(p, "x", "y")
plug(p, "x", mp("2 y"))
plug(p, "x", mp("x + y"))
mp("((x+y)+y)^3")
# on an mpolyList
ps <- mp(c("x+y", "x+1"))
plug(ps, "x", 1)
}
|
/man/plug.Rd
|
no_license
|
GrantInnerst/mpoly
|
R
| false
| true
| 668
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plug.R
\name{plug}
\alias{plug}
\title{Switch indeterminates in a polynomial}
\usage{
plug(p, indeterminate, value)
}
\arguments{
\item{p}{a polynomial}
\item{indeterminate}{the indeterminate in the polynomial to
switch}
\item{value}{the value/indeterminate to substitute}
}
\value{
an mpoly object
}
\description{
Switch indeterminates in a polynomial
}
\examples{
# on an mpoly
(p <- mp("(x+y)^3"))
plug(p, "x", 5)
plug(p, "x", "t")
plug(p, "x", "y")
plug(p, "x", mp("2 y"))
plug(p, "x", mp("x + y"))
mp("((x+y)+y)^3")
# on an mpolyList
ps <- mp(c("x+y", "x+1"))
plug(ps, "x", 1)
}
|
# loading required library
library(shiny)
library(shinydashboard)
library(ade4)
library(adegraphics)
library(magrittr)
library(dplyr)
library(ggplot2)
library(data.table)
library(DT)
library(plotly)
if (getOption("vizection.dataIs") == "genesAndLibs") {
genes <- get(getOption("vizection.genes"), .GlobalEnv)
libs <- get(getOption("vizection.libs"), .GlobalEnv)
} else if (getOption("vizection.dataIs") == "se") {
se <- get(getOption("vizection.se"), .GlobalEnv)
genes <- SummarizedExperiment::assay(se) %>% data.frame
libs <- SummarizedExperiment::colData(se) %>% data.frame(stringsAsFactors = FALSE)
libs$samplename <- rownames(libs)
libs$counts <- colSums(genes)
} else stop("Could not detect what data to load.")
if (is.null(libs$group))
libs$group <- "No groups"
libs$group %<>% factor
vizectionValidate(genes = genes, libs = libs)
showDendrColors <- function(dendro){
dendrapply(dendro, function(X){
if(is.leaf(X)){
attr(X, "edgePar")[1]
}
}) %>% unlist
}
shinyServer(function(input, output, session) {
# FILTERS
# =======
filterSelectionBool <- reactive({
withProgress(message = 'Updating pre-filter', {
incProgress(1/2, detail = "updating")
vizection:::filterSelectionBool(libs, input)
})
})
filterSelectionBoolFinal <- reactive({
withProgress(message = 'Updating filter', {
incProgress(1/2, detail = "updating")
vizection:::filterSelectionBoolFinal(libs, input)
})
})
# SUBGENES SUBLIBS
# ================
subgenes <- reactive({
withProgress(message = 'Updating subgenes', {
incProgress(1/3, detail = "filtering")
pre_subgenes <- vizection:::subgenes_1(libs, input, genes)
incProgress(2/3, detail = "removing useless genes")
vizection:::subgenes_2(pre_subgenes) #removing useless genes
})
})
sublibs <- reactive({
withProgress(message = 'Updating sublibs', {
incProgress(1/2, detail = "filtering")
vizection:::sublibs(libs, input)
})
})
# -> libsGroup
contentlibsGroup <- reactive({
withProgress(message = 'updating groups', {
incProgress(1/3, detail = "extracting from filter")
filterExtractedBool <- vizection:::filterExtractedBool(libs, input)
incProgress(2/3, detail = "creating checkbox")
myGroups <- vizection:::addNumberOfSamples(libs, paste(unique(libs$group[filterExtractedBool])))
checkboxGroupInput(inputId = "groupsCheck", label = "",
choices = myGroups,
selected = myGroups
)
})
})
output$libsGroup <- renderUI({
contentlibsGroup()
})
observe({
filterExtractedBool <- vizection:::filterExtractedBool(libs, input)
myGroups <- vizection:::addNumberOfSamples(libs, paste(unique(libs$group[filterExtractedBool])))
updateCheckboxGroupInput(session,
"groupsCheck",
choices = myGroups,
selected = if(input$bar) myGroups
)
})
# -> libsSamplename
contentlibsSamplename <- eventReactive(input$updateSamples, {
withProgress(message = 'updating samples', {
incProgress(1/3, detail = "extracting selection")
filterSelectionNames <- rownames(libs)[filterSelectionBool()]
incProgress(2/3, detail = "creating checkbox")
mySamples <- vizection:::addGroupName(libs, paste(filterSelectionNames))
checkboxGroupInput(inputId = "samplesCheck", label = "",
choices = mySamples,
selected = mySamples
)
})
})
output$libsSamplename <- renderUI({
contentlibsSamplename()
})
# SHARED
# ======
corMat <- eventReactive(input$updateCorMat, {
withProgress(message = 'correlation matrix', {
incProgress(1/4, detail = "TPM")
a <- subgenes() %>% vizection:::corMat_1()
incProgress(2/4, detail = "log1p")
b <- a %>% vizection:::corMat_2()
incProgress(3/4, detail = "cor")
b %>% vizection:::corMat_3()
})
})
distCorMat <- reactive({
withProgress(message = 'distance matrix', value = 0, {
incProgress(1/3, detail = "as.dist")
a <- corMat() %>% vizection:::distCorMat_1()
incProgress(2/3, detail = "quasieuclid")
a %>% vizection:::distCorMat_2()
})
})
genesDend <- reactive({
withProgress(message = 'cluster', value = 0, {
incProgress(1/2, detail = "hclust")
distCorMat() %>% vizection:::genesDend()
})
})
genesDend2 <- reactive({
withProgress(message = 'dendrogram', {
incProgress(1/6, detail = "nbGroups")
nbGroups <- vizection:::genesDend2_1(input)
incProgress(2/6, detail = "colGroups")
colsGrps <- vizection:::genesDend2_2(nbGroups)
incProgress(3/6, detail = "colors")
cols <- vizection:::genesDend2_3(input)
incProgress(4/6, detail = "customization")
a <- genesDend() %>%
vizection:::genesDend2_4( input
, nbGroups = nbGroups
, colsGrps = colsGrps
, cols = cols)
incProgress(5/6, detail = "ladderize")
a %>% vizection:::genesDend2_5()
})
})
colorsPcaLi <- reactive({
withProgress(message = 'colors PCA', {
incProgress(1/3, detail = "collecting nb clusters")
ifelse(input$nbClusters!= 1, palette(rainbow_hcl(input$nbClusters, c=50, l=100)), palette(rainbow_hcl(2, c=50, l=100)))
incProgress(2/3, detail = "generating colors")
data.frame(colors = showDendrColors(genesDend2()), sampleIndex = order.dendrogram(genesDend2())) %>%
setorder("sampleIndex") %$%
return(colors)
})
})
# HEADER
# ======
contentgeneral <- eventReactive(input$updateSelection, {
withProgress(message = 'Updating selection information', {
incProgress(1/5, detail = "collecting sublibs")
sublibs <- sublibs()
incProgress(2/5, detail = "collecting subgenes")
subgenes <- subgenes()
incProgress(3/5, detail = "generating dataframe")
data <- data.frame(
group = c("Samples", "Groups", "Genes"),
value = c(sum(filterSelectionBoolFinal()) / nrow(libs) * 100,
length(unique(sublibs$group)) / length(unique(libs$group)) * 100,
nrow(subgenes[-1,]) / nrow(genes[-1,]) * 100),
total = c(nrow(libs), length(unique(libs$group)), nrow(genes[-1,])),
selection = c(sum(filterSelectionBoolFinal()), length(unique(sublibs$group)), nrow(subgenes[-1,]))
)
incProgress(4/5, detail = "final process")
list(
samples = data %>% filter(group == "Samples") %$% value %>% round(digits = 2),
groups = data %>% filter(group == "Groups") %$% value %>% round(digits = 2),
genes = data %>% filter(group == "Genes") %$% value %>% round(digits = 2),
totalSamples = data %>% filter(group == "Samples") %$% total %>% round(digits = 2),
totalGroups = data %>% filter(group == "Groups") %$% total %>% round(digits = 2),
totalGenes = data %>% filter(group == "Genes") %$% total %>% round(digits = 2),
selectionSamples = data %>% filter(group == "Samples") %$% selection %>% round(digits = 2),
selectionGroups = data %>% filter(group == "Groups") %$% selection %>% round(digits = 2),
selectionGenes = data %>% filter(group == "Genes") %$% selection %>% round(digits = 2)
)#list
})
})
contenttasksMenu <- reactive ({
general <- contentgeneral()
dropdownMenu(type = "tasks", badgeStatus = "success",
taskItem(value = general$samples, color = "blue",
paste("Samples: ", general$selectionSamples, "/", general$totalSamples)
),
taskItem(value = general$groups, color = "green",
paste("Groups: ", general$selectionGroups, "/", general$totalGroups)
),
taskItem(value = general$genes, color = "red",
paste("Genes: ", general$selectionGenes, "/", general$totalGenes)
)
)
})
output$tasksMenu <- renderMenu({
contenttasksMenu()
})
# HOME
# ====
output$UIboxplotGroupsSub <- renderUI({
withProgress(message = 'Updating boxplot list', {
incProgress(1/2, detail = "parsing sublibs")
selectInput("boxplotGroupsSub", "Groups (selection)", c("none", paste(unique(sublibs() %>% select(group) %>% extract(,1)))), selected = "none")
})
})
# -> boxplotTotal
output$boxplotTotal <- renderPlot({
if(input$boxplotGroupsTotal != "none"){
withProgress(message = 'Updating total boxplot', {
incProgress(1/3, detail = "collecting sublibs")
sublibs <- sublibs()
incProgress(2/3, detail = "generating")
ggplot(data = libs[libs$group == input$boxplotGroupsTotal, ], aes(input$boxplotGroupsTotal, counts)) +
geom_boxplot() +
xlab("") + ylab("") +
ylim(c(0, max(libs[libs$group == input$boxplotGroupsTotal, "counts"], sublibs[sublibs$group == input$boxplotGroupsSub, "counts"]))) +
theme_minimal()
})
}
})
output$boxplotSub <- renderPlot({
if(input$boxplotGroupsSub != "none"){
withProgress(message = 'Updating sub boxplot', {
incProgress(1/3, detail = "collecting sublibs")
sublibs <- sublibs()
incProgress(2/3, detail = "generating")
ggplot(data = sublibs[sublibs$group == input$boxplotGroupsSub, ], aes(input$boxplotGroupsSub, counts)) +
geom_boxplot() +
xlab("") + ylab("") +
ylim(c(0, max(libs[libs$group == input$boxplotGroupsTotal, "counts"], sublibs[sublibs$group == input$boxplotGroupsSub, "counts"]))) +
theme_minimal()
})
}
})
# DENDROGRAM
# ==========
contentdendrogram <- eventReactive(input$updateDendrogram, {
withProgress(message = 'dendrogram plot', value = 0, {
incProgress(1/3, detail = "modifying display parameters")
par(mar = c(6,2,2,6))
incProgress(2/3, detail = "generating plot")
genesDend2() %>%
dendextend::set("labels_cex", input$dendroSize) %>%
plot(horiz = input$dendroHoriz)
})
})
output$dendrogram <- renderPlot({
contentdendrogram()
})
output$dendrogramPlot <- renderUI({
plotOutput("dendrogram", height = paste0(input$heightDendro,"px"))
})
contentheight <- eventReactive(input$updateheight, {
withProgress(message = 'dendrogram height', value = 0, {
incProgress(1/3, detail = "collecting dendrogram")
genesDend <- genesDend()
genesDendRev <- rev(genesDend$height)
incProgress(2/3, detail = "drawing plot")
plot(genesDendRev[1:input$heightlength], pch = 20, ylab = "Clusters height")
abline(v = input$nbClusters + 0.5, col = "red", lty = 2)
for(i in genesDendRev){abline(h = i, lty= 2, col = "grey")}
})
})
output$height <- renderPlot({
contentheight()
})
# HEATMAP
# =======
contentheatmapGenes <- eventReactive(input$updateHeatmap, {
withProgress(message = 'heatmap', value = 0, {
incProgress(1/2, detail = "construction")
vizection:::contentheatmapGenes( cormat = corMat()
, dendr = genesDend2()
, sublibs = sublibs())
})
})
output$heatmapGenes <- renderPlot({
contentheatmapGenes()
})
# PCoA AND KMEANS
# ===============
contentgenesPCoA <- eventReactive(input$updatePCoA,{
withProgress(message = 'PCoA', {
incProgress(1/3, detail = "collecting data")
distCorMat <- distCorMat()
incProgress(2/3, detail = "calculating")
dudi.pco(distCorMat, scannf = F, nf = 2)
})
})
genesPCoA <- reactive({
contentgenesPCoA()
})
output$pcoasummary <- renderPrint({
summary(genesPCoA())
})
#
rangespcoa12 <- reactiveValues(x = NULL, y = NULL)
observeEvent(input$pcoa12dblclick, {
brush <- input$pcoa12brush
if(!is.null(brush)) {
rangespcoa12$x <- c(brush$xmin, brush$xmax)
rangespcoa12$y <- c(brush$ymin, brush$ymax)
} else {
rangespcoa12$x <- NULL
rangespcoa12$y <- NULL
}
})
pcoa12 <- eventReactive(input$updatePCoA, {
genesPCoAli <- genesPCoA()$li
ggplot(genesPCoAli, aes(x = A1, y = A2))
})
contentpcoagenes12 <- reactive({
withProgress(message = 'plot PCoA', {
incProgress(1/4, detail = "collecting PCoA")
pcoa12 <- pcoa12()
incProgress(2/4, detail = "collecting k-means colors")
kmeansColor <- kmeansColor()
incProgress(3/4, detail = "creating plot")
pcoa12 +
geom_point(color = kmeansColor) +
coord_cartesian(xlim = rangespcoa12$x, ylim = rangespcoa12$y) +
geom_vline(xintercept = 0, alpha = 0.2) +
geom_hline(yintercept = 0, alpha = 0.2) +
theme_light() +
theme(legend.position = "none")
})
})
output$pcoagenes12 <- renderPlot({
contentpcoagenes12()
})
contentdataPCoA <- reactive({
withProgress(message = 'data PCoA', {
incProgress(1/4, detail = "collecting sulibs")
sublibs <- sublibs()
incProgress(2/4, detail = "filtering")
res0 <- brushedPoints(genesPCoA()$li, input$pcoa12brush, xvar = "A1", yvar = "A2")
colour <- kmeansColor()
resCol <- cbind(colour, sublibs[, -1])
res <- resCol[rownames(resCol) %in% rownames(res0), ]
colour2 <- res$colour
incProgress(3/4, detail = "creating datatable")
datatable(res, options = list(scrollX = TRUE)) %>% formatStyle(
"colour", target = 'row', backgroundColor = styleEqual(colour2, colour2)
)
})
})
output$dataPCoA <- renderDataTable({
contentdataPCoA()
})
#
contentSSE <- eventReactive(input$updateSSE, {
SSE <- function(mydata, title = ""){
wss <- (nrow(mydata)-1)*sum(apply(mydata,2,var))
for (i in 2:input$SSElength) wss[i] <- sum(kmeans(mydata, centers=i)$withinss)
plot(1:input$SSElength, wss, type="b", xlab="Number of Clusters", ylab="Within groups sum of squares", main = title)
for(i in wss){abline(h = i, lty = 2, col = "grey")}
}
SSE(genesPCoA()$li[, c(1, 2)])
})
output$SSE <- renderPlot({
contentSSE()
})
#
contentpcoakmeans <- eventReactive(input$updatekmeans, {
withProgress(message = 'kmeans PCoA', {
incProgress(1/4, detail = "performing kmeans")
kmeanspco <- kmeans(genesPCoA()$li[, c(1, 2)], input$kmeansClusters)
incProgress(2/4, detail = "extracting clusters")
kmeanspcofitted <- fitted(kmeanspco)
incProgress(3/4, detail = "grouping information")
data.frame("sampleName" = rownames(genesPCoA()$li), "cluster" = kmeanspcofitted %>% rownames() %>% as.factor(), "centroidX" = kmeanspcofitted[, 1], "centroidY" = kmeanspcofitted[, 2])
})
})
pcoakmeans <- reactive({
contentpcoakmeans()
})
contentkmeansColor <- eventReactive(input$updatekmeans, {
withProgress(message = 'colors PCoA', {
incProgress(1/4, detail = "attribution")
pcoakmeans <- pcoakmeans()
colorvector <- rainbow(input$kmeansClusters) %>% substr(., 1, nchar(.)-2)
colorvector[pcoakmeans$cluster]
})
})
kmeansColor <- reactive({
contentkmeansColor()
})
# PCA
# ===
contentgenesPCA <- eventReactive(input$updatePCASummary, {
withProgress(message = 'PCA summary', {
incProgress(1/3, detail = "TPM")
genesTpm <- subgenes() %>% vizection:::contentgenesPCA_1()
incProgress(2/3, detail = "dudi.pca")
genesTpm %>% vizection:::contentgenesPCA_2()
})
})
genesPca <- reactive({
contentgenesPCA()
})
output$pcasummary <- renderPrint({
summary(genesPca())
})
output$eigenvalues <- renderPlot({
genesPca() %>% vizection:::plotEigenValues()
})
#
contentcomponents1 <- eventReactive(input$updatePCAComponents, {
withProgress(message = 'components1', {
incProgress(1/4, detail = "collecting PCA")
genesPca <- genesPca()
incProgress(2/4, detail = 'generating list')
genesCoComp1 <- vizection:::pcaCompGenesList(genesPca$co, 1)
incProgress(3/4, detail = 'generating plot')
vizection::plotHTB(genesCoComp1, 1, input$nbDispGenes)
})
})
output$components1 <- renderPlot({
contentcomponents1()
})
contentcomponents2 <- eventReactive(input$updatePCAComponents, {
withProgress(message = 'components2', {
incProgress(1/4, detail = "collecting PCA")
genesPca <- genesPca()
incProgress(2/4, detail = 'generating list')
genesCoComp2 <- vizection:::pcaCompGenesList(genesPca$co, 2)
incProgress(3/4, detail = 'generating plot')
vizection::plotHTB(genesCoComp2, 2, input$nbDispGenes)
})
})
output$components2 <- renderPlot({
contentcomponents2()
})
contentcomponents3 <- eventReactive(input$updatePCAComponents, {
withProgress(message = 'components3', {
incProgress(1/4, detail = "collecting PCA")
genesPca <- genesPca()
incProgress(2/4, detail = 'generating list')
genesCoComp3 <- vizection:::pcaCompGenesList(genesPca$co, 3)
incProgress(3/4, detail = 'generating plot')
vizection::plotHTB(genesCoComp3, 3, input$nbDispGenes)
})
})
output$components3 <- renderPlot({
contentcomponents3()
})
pcaColor <- reactive({
if(input$PCAcolor == 2){
paste(colorsPcaLi())
}
else if(input$PCAcolor == 3){
kmeansColor()
}
else{
myColors <- sublibs()$group %>% levels %>% length %>% rainbow() %>% substr(., 1, nchar(.)-2)
myColors[sublibs()$group]
}
})
pcaGroup <- reactive({
if(input$PCAcolor == 2){
as.factor(colorsPcaLi())
}
else if(input$PCAcolor == 3){
as.factor(kmeansColor())
}
else{
sublibs()$group
}
})
# ax 12
####
ranges12li <- reactiveValues(x = NULL, y = NULL)
observeEvent(input$PCA12lidblclick, {
brush <- input$PCA12librush
if (!is.null(brush)) {
ranges12li$x <- c(brush$xmin, brush$xmax)
ranges12li$y <- c(brush$ymin, brush$ymax)
} else {
ranges12li$x <- NULL
ranges12li$y <- NULL
}
})
####
g12li <- eventReactive(input$updatePCAPlots, {
genesPcali <- genesPca()$li
if(input$showEllipse){
ggplot(genesPcali, aes(x = Axis1, y = Axis2, group = pcaGroup(), color = pcaColor(), fill = pcaColor())) + stat_ellipse(aes(color = pcaColor(), fill = pcaColor()))}
else {
ggplot(genesPcali, aes(x = Axis1, y = Axis2, group = pcaGroup(), color = pcaColor()))
}
})
contentinteractPCA12li <- reactive({
withProgress(message = 'li axes 1-2', {
incProgress(1/4, detail = "collecting PCA")
g12li <- g12li()
incProgress(2/4, detail = "collecting colors")
pcaColor <- pcaColor()
incProgress(3/4, detail = "creating plot")
g12li +
geom_point(color = pcaColor) +
coord_cartesian(xlim = ranges12li$x, ylim = ranges12li$y) +
geom_vline(xintercept = 0, alpha = 0.2) +
geom_hline(yintercept = 0, alpha = 0.2) +
theme_light() +
theme(legend.position = "none")
})
})
output$interactPCA12li <- renderPlot({
contentinteractPCA12li()
})
####
contentdataPCA12li <- reactive({
withProgress(message = 'data 1-2', {
incProgress(1/4, detail = "collecting sulibs")
sublibs <- sublibs()
incProgress(2/4, detail = "filtering")
res0 <- brushedPoints(genesPca()$li, input$PCA12librush, xvar = "Axis1", yvar = "Axis2")
colour <- pcaColor()
resCol <- cbind(colour, sublibs[, -1])
res <- resCol[rownames(resCol) %in% rownames(res0), ]
colour2 <- res$colour
incProgress(3/4, detail = "creating datatable")
datatable(res, options = list(scrollX = TRUE)) %>% formatStyle(
"colour", target = 'row', backgroundColor = styleEqual(colour2, colour2)
)
})
})
output$dataPCA12li <- renderDataTable({
contentdataPCA12li()
})
#####
ranges12co <- reactiveValues(x = NULL, y = NULL)
observeEvent(input$PCA12codblclick, {
brush <- input$PCA12cobrush
if (!is.null(brush)) {
ranges12co$x <- c(brush$xmin, brush$xmax)
ranges12co$y <- c(brush$ymin, brush$ymax)
} else {
ranges12co$x <- NULL
ranges12co$y <- NULL
}
})
#####
g12co <- eventReactive(input$updatePCAPlots, {
genesPcaco <- genesPca()$co
ggplot(genesPcaco, aes(x = Comp1, y = Comp2))
})
contentinteractPCA12co <- reactive({
withProgress(message = 'co axes 1-2', {
incProgress(1/3, detail = "collecting PCA")
g12co <- g12co()
incProgress(2/3, detail = "creating plot")
g12co +
geom_segment(aes(x=0, y=0, xend=Comp1, yend=Comp2)) +
coord_cartesian(xlim = ranges12co$x, ylim = ranges12co$y) +
geom_vline(xintercept = 0, alpha = 0.2) +
geom_hline(yintercept = 0, alpha = 0.2) +
theme_light()
})
})
output$interactPCA12co <- renderPlot({
contentinteractPCA12co()
})
#####
contentdataPCA12co <- reactive({
withProgress(message = 'data 1-2', {
incProgress(1/3, detail = "filtering")
res <- brushedPoints(genesPca()$co, input$PCA12cobrush, xvar = "Comp1", yvar = "Comp2")
incProgress(2/3, detail = "creating datatable")
datatable(res, options = list(scrollX = TRUE))
})
})
output$dataPCA12co <- renderDataTable({
contentdataPCA12co()
})
# ax 13
####
ranges13li <- reactiveValues(x = NULL, y = NULL)
observeEvent(input$PCA13lidblclick, {
brush <- input$PCA13librush
if (!is.null(brush)) {
ranges13li$x <- c(brush$xmin, brush$xmax)
ranges13li$y <- c(brush$ymin, brush$ymax)
} else {
ranges13li$x <- NULL
ranges13li$y <- NULL
}
})
####
g13li <- eventReactive(input$updatePCAPlots, {
genesPcali <- genesPca()$li
ggplot(genesPcali, aes(x = Axis1, y = Axis3))
})
contentinteractPCA13li <- reactive({
withProgress(message = 'li axes 1-3', {
incProgress(1/4, detail = "collecting PCA")
g13li <- g13li()
incProgress(2/4, detail = "collecting colors")
pcaColor <- pcaColor()
incProgress(3/4, detail = "creating plot")
g13li +
geom_point(color = pcaColor) +
coord_cartesian(xlim = ranges13li$x, ylim = ranges13li$y) +
geom_vline(xintercept = 0, alpha = 0.2) +
geom_hline(yintercept = 0, alpha = 0.2) +
theme_light()
})
})
output$interactPCA13li <- renderPlot({
contentinteractPCA13li()
})
####
contentdataPCA13li <- reactive({
withProgress(message = 'data 1-3', {
incProgress(1/4, detail = "collecting sulibs")
sublibs <- sublibs()
incProgress(2/4, detail = "filtering")
res0 <- brushedPoints(genesPca()$li, input$PCA13librush, xvar = "Axis1", yvar = "Axis3")
colour <- pcaColor()
resCol <- cbind(colour, sublibs[, -1])
res <- resCol[rownames(resCol) %in% rownames(res0), ]
colour2 <- res$colour
incProgress(3/4, detail = "creating datatable")
datatable(res, options = list(scrollX = TRUE)) %>% formatStyle(
"colour", target = 'row', backgroundColor = styleEqual(colour2, colour2)
)
})
})
output$dataPCA13li <- renderDataTable({
contentdataPCA13li()
})
#####
ranges13co <- reactiveValues(x = NULL, y = NULL)
observeEvent(input$PCA13codblclick, {
brush <- input$PCA13cobrush
if (!is.null(brush)) {
ranges13co$x <- c(brush$xmin, brush$xmax)
ranges13co$y <- c(brush$ymin, brush$ymax)
} else {
ranges13co$x <- NULL
ranges13co$y <- NULL
}
})
#####
g13co <- eventReactive(input$updatePCAPlots, {
genesPcaco <- genesPca()$co
ggplot(genesPcaco, aes(x = Comp1, y = Comp3))
})
contentinteractPCA13co <- reactive({
withProgress(message = 'co axes 1-3', {
incProgress(1/3, detail = "collecting PCA")
g13co <- g13co()
incProgress(2/3, detail = "creating plot")
g13co +
geom_segment(aes(x=0, y=0, xend=Comp1, yend=Comp3)) +
coord_cartesian(xlim = ranges13co$x, ylim = ranges13co$y) +
geom_vline(xintercept = 0, alpha = 0.2) +
geom_hline(yintercept = 0, alpha = 0.2) +
theme_light()
})
})
output$interactPCA13co <- renderPlot({
contentinteractPCA13co()
})
#####
contentdataPCA13co <- reactive({
withProgress(message = 'data 1-3', {
incProgress(1/3, detail = "filtering")
res <- brushedPoints(genesPca()$co, input$PCA13cobrush, xvar = "Comp1", yvar = "Comp3")
incProgress(2/3, detail = "creating datatable")
datatable(res, options = list(scrollX = TRUE))
})
})
output$dataPCA13co <- renderDataTable({
contentdataPCA13co()
})
# ax 32
####
ranges32li <- reactiveValues(x = NULL, y = NULL)
observeEvent(input$PCA32lidblclick, {
brush <- input$PCA32librush
if (!is.null(brush)) {
ranges32li$x <- c(brush$xmin, brush$xmax)
ranges32li$y <- c(brush$ymin, brush$ymax)
} else {
ranges32li$x <- NULL
ranges32li$y <- NULL
}
})
####
g32li <- eventReactive(input$updatePCAPlots, {
genesPcali <- genesPca()$li
ggplot(genesPcali, aes(x = Axis3, y = Axis2))
})
contentinteractPCA32li <- reactive({
withProgress(message = 'li axes 3-2', {
incProgress(1/4, detail = "collecting PCA")
g32li <- g32li()
incProgress(2/4, detail = "collecting colors")
pcaColor <- pcaColor()
incProgress(3/4, detail = "creating plot")
g32li +
geom_point(color = pcaColor) +
coord_cartesian(xlim = ranges32li$x, ylim = ranges32li$y) +
geom_vline(xintercept = 0, alpha = 0.2) +
geom_hline(yintercept = 0, alpha = 0.2) +
theme_light()
})
})
output$interactPCA32li <- renderPlot({
contentinteractPCA32li()
})
####
contentdataPCA32li <- reactive({
withProgress(message = 'data 3-2', {
incProgress(1/4, detail = "collecting sulibs")
sublibs <- sublibs()
incProgress(2/4, detail = "filtering")
res0 <- brushedPoints(genesPca()$li, input$PCA32librush, xvar = "Axis3", yvar = "Axis2")
colour <- pcaColor()
resCol <- cbind(colour, sublibs[, -1])
res <- resCol[rownames(resCol) %in% rownames(res0), ]
colour2 <- res$colour
incProgress(3/4, detail = "creating datatable")
datatable(res, options = list(scrollX = TRUE)) %>% formatStyle(
"colour", target = 'row', backgroundColor = styleEqual(colour2, colour2)
)
})
})
output$dataPCA32li <- renderDataTable({
contentdataPCA32li()
})
#####
ranges32co <- reactiveValues(x = NULL, y = NULL)
observeEvent(input$PCA32codblclick, {
brush <- input$PCA32cobrush
if (!is.null(brush)) {
ranges32co$x <- c(brush$xmin, brush$xmax)
ranges32co$y <- c(brush$ymin, brush$ymax)
} else {
ranges32co$x <- NULL
ranges32co$y <- NULL
}
})
#####
g32co <- eventReactive(input$updatePCAPlots, {
genesPcaco <- genesPca()$co
ggplot(genesPcaco, aes(x = Comp3, y = Comp2))
})
contentinteractPCA32co <- reactive({
withProgress(message = 'co axes 3-2', {
incProgress(1/3, detail = "collecting PCA")
g32co <- g32co()
incProgress(2/3, detail = "creating plot")
g32co +
geom_segment(aes(x=0, y=0, xend=Comp3, yend=Comp2)) +
coord_cartesian(xlim = ranges32co$x, ylim = ranges32co$y) +
geom_vline(xintercept = 0, alpha = 0.2) +
geom_hline(yintercept = 0, alpha = 0.2) +
theme_light()
})
})
output$interactPCA32co <- renderPlot({
contentinteractPCA32co()
})
#####
contentdataPCA32co <- reactive({
withProgress(message = 'data 3-2', {
incProgress(1/3, detail = "filtering")
res <- brushedPoints(genesPca()$co, input$PCA32cobrush, xvar = "Comp3", yvar = "Comp2")
incProgress(2/3, detail = "creating datatable")
datatable(res, options = list(scrollX = TRUE))
})
})
output$dataPCA32co <- renderDataTable({
contentdataPCA32co()
})
contentpca3D <- eventReactive(input$generatepca3d, {
withProgress(message = 'pca 3D', {
incProgress(1/4, detail = "collecting PCA")
pcaGenesli <- genesPca()$li
incProgress(2/4, detail = "collecting colors")
pcaColor <- pcaColor()
pcaGroup <- pcaGroup()
incProgress(3/4, detail = "creating 3D plot")
plotly::plot_ly(data = pcaGenesli, x = pcaGenesli$Axis1, y = pcaGenesli$Axis2, z = pcaGenesli$Axis3,
type = "scatter3d", mode = "markers", marker = list(size = input$pca3ddotsize),
color = pcaGroup, colors = pcaColor,
text = sublibs()$samplename) %>%
layout(scene = list(
xaxis = list(title = "Axis1"),
yaxis = list(title = "Axis2"),
zaxis = list(title = "Axis3")))
})
})
output$pca3D <- plotly::renderPlotly({
contentpca3D()
})
#
contentcheckplot <- eventReactive(input$updatecheckplot, {
withProgress(message = 'checkplot', {
incProgress(1/4, detail = "data collection")
if(input$dataCheckplot == "total"){
d <- genes[paste(input$geneNamescheckplot), ] %>% t %>% tbl_df() %T>% setnames("geneName")
d$group <-as.factor(libs$group)
}
else {
subgenes <- subgenes()
sublibs <- sublibs()
d <- subgenes[paste(input$geneNamescheckplot), ] %>% t %>% tbl_df() %T>% setnames("geneName")
d$group <- as.factor(sublibs$group)
}
incProgress(2/4, detail = "generation")
g <- d %>% ggplot(aes(geneName), group = group) +
geom_histogram(binwidth = 1) +
theme_light() +
xlab(paste(input$geneNamescheckplot))
incProgress(3/4, detail = "(faceting) and annotating")
if(input$facetcheckplot){
g <- g + facet_grid(group ~ .)
}
if(input$sampleNamescheckplot != "None"){
if(input$dataCheckplot == "total"){
xvalue <- genes[paste(input$geneNamescheckplot), paste(input$sampleNamescheckplot)]
yvalue <- sum(genes[paste(input$geneNamescheckplot), ] == xvalue)
g <- g + geom_vline(xintercept = xvalue, colour = "red", linetype = "dashed") +
annotate("text", x = xvalue, y = yvalue + 1, label = paste(input$sampleNamescheckplot), colour = "red")
} else {
subgenes <- subgenes()
xvalue <- subgenes[paste(input$geneNamescheckplot), paste(input$sampleNamescheckplot)]
yvalue <- sum(subgenes[paste(input$geneNamescheckplot), ] == xvalue)
g <- g + geom_vline(xintercept = xvalue, colour = "red", linetype = "dashed") +
annotate("text", x = xvalue, y = yvalue + 1, label = paste(input$sampleNamescheckplot), colour = "red")
}
}
g
})
})
output$checkplot <- renderPlot({
contentcheckplot()
})
contentgeneNamescheckplotUI <- eventReactive(input$updatelistcheckplot, {
withProgress(message = 'checkplot genes', {
incProgress(1/3, detail = "searching...")
if(input$dataCheckplot == "total"){
checkplotGrep <- rownames(genes) %>% grep(input$geneNameCheckplot, .) %>% rownames(genes)[.]
} else {
checkplotGrep <- rownames(subgenes()) %>% grep(input$geneNameCheckplot, .) %>% rownames(subgenes())[.]
}
incProgress(2/3, detail = "creating UI")
selectInput("geneNamescheckplot", "Gene name:", c("None", checkplotGrep), selected = "None")
})
})
output$geneNamescheckplotUI <- renderUI({
contentgeneNamescheckplotUI()
})
contentsampleNamescheckplotUI <- eventReactive(input$updatelistcheckplot, {
withProgress(message = 'checkplot samples', {
incProgress(1/2, detail = "data collection")
if(input$dataCheckplot == "total"){
selectInput("sampleNamescheckplot", "Sample name:",
choices = c("None", paste(rownames(libs))), selected = "None"
)
} else {
selectInput("sampleNamescheckplot", "Sample name:",
choices = c("None", paste(rownames(sublibs()))), selected = "None"
)
}
})
})
output$sampleNamescheckplotUI <- renderUI({
contentsampleNamescheckplotUI()
})
output$checkplotUI <- renderUI({
plotOutput("checkplot", height = input$heightcheckplot)
})
# CA
# ==
contentcontribDataFrame <- reactive({
withProgress(message = 'calculating contribution', {
incProgress(1/5, detail = "collecting PCA")
genesPca <- genesPca()
selectedAxis <- as.numeric(input$selectAxisCoA)
incProgress(2/5, detail = "calculating")
contribution <- abs(genesPca$co[,selectedAxis])/sum(abs(genesPca$co[,selectedAxis])) * 100
incProgress(3/5, detail = "checking results")
stopifnot(all.equal(sum(contribution), 100))
incProgress(4/5, detail = "creating data frame")
data.frame("geneName" = rownames(genesPca$co), "contribution" = contribution)
})
})
contribDataFrame <- reactive({
contentcontribDataFrame()
})
contentcontributionBoxplot <- eventReactive(input$generateContributionBoxplot, {
withProgress(message = 'contrib boxplot', {
incProgress(1/3, detail = "collecting contrib")
contribDataFrame <- contribDataFrame()
incProgress(2/3, detail = "generating boxplot")
boxplot(contribDataFrame$contribution, horizontal = T, main = paste0("Contribution on axis ", input$selectAxisCoA))
})
})
output$contributionBoxplot <- renderPlot({
contentcontributionBoxplot()
})
contentthresholded <- eventReactive(input$applyThreshold, {
withProgress(message = 'applying threshold', {
incProgress(1/4, detail = "collecting threshold")
threshold <- input$nbGenesToKeep
contribDataFrame <- contribDataFrame()
incProgress(2/4, detail = "filtering contrib data frame")
indexesThresholded <- which(contribDataFrame$contribution >= threshold)
incProgress(3/4, detail = "creating list")
list(
names = contribDataFrame$geneName[indexesThresholded],
values = contribDataFrame$contribution[indexesThresholded]
)
})
})
thresholded <- reactive({
contentthresholded()
})
output$thresholdedPrint <- renderDataTable({
as.data.frame(thresholded())
})
contentnumberGenesThresholded <- eventReactive(input$applyThreshold, {
thresholded <- thresholded()
paste("Selection of", length(thresholded$names), "genes.")
})
output$numberGenesThresholded <- renderPrint({
contentnumberGenesThresholded()
})
contentthresholdBoxplot <- eventReactive(input$applyThreshold, {
contribDataFrame <- contribDataFrame()
boxplot(contribDataFrame$contribution, horizontal = T, main = paste0("Contribution on axis ", input$selectAxisCoA))
abline(lty = 2, col = "red", v = input$nbGenesToKeep)
})
output$thresholdBoxplot <- renderPlot({
contentthresholdBoxplot()
})
#
contentcoaGenes <- eventReactive(input$updateCoA, {
withProgress(message = 'CoA summary', {
incProgress(1/3, detail = "creating thresholded genes and libs")
subgenes <- subgenes()
thresholded <- thresholded()
mainGenes <- subgenes[rownames(subgenes) %in% thresholded$names, ]
incProgress(2/3, detail = "dudi.coa")
dudi.coa(mainGenes %>% t %>% as.data.frame, scannf = F, nf = 3)
})
})
coaGenes <- reactive({
contentcoaGenes()
})
output$coasummary <- renderPrint({
summary(coaGenes())
})
output$coaeigenvalues <- renderPlot({
barplot(coaGenes() %$% eig, xlab = "Eigenvalues")
})
coaColor <- reactive({
if(input$COAcolor == 2){
paste(colorsPcaLi())
}
else if(input$COAcolor == 3){
kmeansColor()
}
else{
myColors <- sublibs()$group %>% levels %>% length %>% rainbow() %>% substr(., 1, nchar(.)-2)
myColors[sublibs()$group]
}
})
coaGroup <- reactive({
if(input$COAcolor == 2){
as.factor(colorsPcaLi())
}
else if(input$COAcolor == 3){
as.factor(kmeansColor())
}
else{
sublibs()$group
}
})
####
rangescoa12 <- reactiveValues(x = NULL, y = NULL)
observeEvent(input$COA12dblclick, {
brush <- input$COA12brush
if (!is.null(brush)) {
rangescoa12$x <- c(brush$xmin, brush$xmax)
rangescoa12$y <- c(brush$ymin, brush$ymax)
} else {
rangescoa12$x <- NULL
rangescoa12$y <- NULL
}
})
####
coa12 <- eventReactive(input$updateCoAPlots, {
coaGenesli <- coaGenes()$li
ggplot(coaGenesli, aes(x = Axis1, y = Axis2))
})
contentinteractCOA12 <- reactive({
withProgress(message = 'coa axes 1-2', {
incProgress(1/4, detail = "collecting COA")
coa12 <- coa12()
incProgress(2/4, detail = "collecting colors")
coaColor <- coaColor()
incProgress(3/4, detail = "creating plot")
coa12 +
geom_point(color = coaColor) +
coord_cartesian(xlim = rangescoa12$x, ylim = rangescoa12$y) +
geom_vline(xintercept = 0, alpha = 0.2) +
geom_hline(yintercept = 0, alpha = 0.2) +
theme_light() +
geom_point(data = coaGenes()$co, aes(x = Comp1, y = Comp2)) +
geom_text(data = coaGenes()$co, aes(x = Comp1, y = Comp2, label = rownames(coaGenes()$co)), hjust = 0, nudge_x = 0.05)
})
})
output$interactCOA12 <- renderPlot({
contentinteractCOA12()
})
####
contentdataCOA12 <- reactive({
withProgress(message = 'data 1-2', {
incProgress(1/4, detail = "collecting sulibs")
sublibs <- sublibs()
incProgress(2/4, detail = "filtering")
res0 <- brushedPoints(coaGenes()$li, input$COA12brush, xvar = "Axis1", yvar = "Axis2")
colour <- coaColor()
resCol <- cbind(colour, sublibs[, -1])
res <- resCol[rownames(resCol) %in% rownames(res0), ]
colour2 <- res$colour
incProgress(3/4, detail = "creating datatable")
datatable(res, options = list(scrollX = TRUE)) %>% formatStyle(
"colour", target = 'row', backgroundColor = styleEqual(colour2, colour2)
)
})
})
output$dataCOA12 <- renderDataTable({
contentdataCOA12()
})
####
rangescoa13 <- reactiveValues(x = NULL, y = NULL)
observeEvent(input$COA13dblclick, {
brush <- input$COA13brush
if (!is.null(brush)) {
rangescoa13$x <- c(brush$xmin, brush$xmax)
rangescoa13$y <- c(brush$ymin, brush$ymax)
} else {
rangescoa13$x <- NULL
rangescoa13$y <- NULL
}
})
####
coa13 <- eventReactive(input$updateCoAPlots, {
coaGenesli <- coaGenes()$li
ggplot(coaGenesli, aes(x = Axis1, y = Axis3))
})
contentinteractCOA13 <- reactive({
withProgress(message = 'coa axes 1-2=3', {
incProgress(1/4, detail = "collecting COA")
coa13 <- coa13()
incProgress(2/4, detail = "collecting colors")
coaColor <- coaColor()
incProgress(3/4, detail = "creating plot")
coa13 +
geom_point(color = coaColor) +
coord_cartesian(xlim = rangescoa13$x, ylim = rangescoa13$y) +
geom_vline(xintercept = 0, alpha = 0.2) +
geom_hline(yintercept = 0, alpha = 0.2) +
theme_light() +
geom_point(data = coaGenes()$co, aes(x = Comp1, y = Comp3)) +
geom_text(data = coaGenes()$co, aes(x = Comp1, y = Comp3, label = rownames(coaGenes()$co)), hjust = 0, nudge_x = 0.05)
})
})
output$interactCOA13 <- renderPlot({
contentinteractCOA13()
})
####
contentdataCOA13 <- reactive({
withProgress(message = 'data 1-3', {
incProgress(1/4, detail = "collecting sulibs")
sublibs <- sublibs()
incProgress(2/4, detail = "filtering")
res0 <- brushedPoints(coaGenes()$li, input$COA13brush, xvar = "Axis1", yvar = "Axis3")
colour <- coaColor()
resCol <- cbind(colour, sublibs[, -1])
res <- resCol[rownames(resCol) %in% rownames(res0), ]
colour2 <- res$colour
incProgress(3/4, detail = "creating datatable")
datatable(res, options = list(scrollX = TRUE)) %>% formatStyle(
"colour", target = 'row', backgroundColor = styleEqual(colour2, colour2)
)
})
})
output$dataCOA13 <- renderDataTable({
contentdataCOA13()
})
####
rangescoa32 <- reactiveValues(x = NULL, y = NULL)
observeEvent(input$COA32dblclick, {
brush <- input$COA32brush
if (!is.null(brush)) {
rangescoa32$x <- c(brush$xmin, brush$xmax)
rangescoa32$y <- c(brush$ymin, brush$ymax)
} else {
rangescoa32$x <- NULL
rangescoa32$y <- NULL
}
})
####
coa32 <- eventReactive(input$updateCoAPlots, {
coaGenesli <- coaGenes()$li
ggplot(coaGenesli, aes(x = Axis3, y = Axis2))
})
contentinteractCOA32 <- reactive({
withProgress(message = 'coa axes 3-2', {
incProgress(1/4, detail = "collecting COA")
coa32 <- coa32()
incProgress(2/4, detail = "collecting colors")
coaColor <- coaColor()
incProgress(3/4, detail = "creating plot")
coa32 +
geom_point(color = coaColor) +
coord_cartesian(xlim = rangescoa32$x, ylim = rangescoa32$y) +
geom_vline(xintercept = 0, alpha = 0.2) +
geom_hline(yintercept = 0, alpha = 0.2) +
theme_light() +
geom_point(data = coaGenes()$co, aes(x = Comp3, y = Comp2)) +
geom_text(data = coaGenes()$co, aes(x = Comp3, y = Comp2, label = rownames(coaGenes()$co)), hjust = 0, nudge_x = 0.05)
})
})
output$interactCOA32 <- renderPlot({
contentinteractCOA32()
})
####
contentdataCOA32 <- reactive({
withProgress(message = 'data 3-2', {
incProgress(1/4, detail = "collecting sulibs")
sublibs <- sublibs()
incProgress(2/4, detail = "filtering")
res0 <- brushedPoints(coaGenes()$li, input$COA32brush, xvar = "Axis3", yvar = "Axis2")
colour <- coaColor()
resCol <- cbind(colour, sublibs[, -1])
res <- resCol[rownames(resCol) %in% rownames(res0), ]
colour2 <- res$colour
incProgress(3/4, detail = "creating datatable")
datatable(res, options = list(scrollX = TRUE)) %>% formatStyle(
"colour", target = 'row', backgroundColor = styleEqual(colour2, colour2)
)
})
})
output$dataCOA32 <- renderDataTable({
contentdataCOA32()
})
contentcoa3D <- eventReactive(input$generatecoa3d, {
withProgress(message = 'coa 3D', {
incProgress(1/4, detail = "collecting")
coaGenesli <- coaGenes()$li
incProgress(2/4, detail = "collecting colors")
coaColor <- coaColor()
coaGroup <- coaGroup()
incProgress(3/4, detail = "creating 3D plot")
plotly::plot_ly(data = coaGenesli, x = coaGenesli$Axis1, y = coaGenesli$Axis2, z = coaGenesli$Axis3,
type = "scatter3d", mode = "markers", marker = list(size = input$coa3ddotsize),
color = coaGroup, colors = coaColor,
text = sublibs()$samplename) %>%
layout(scene = list(
xaxis = list(title = "Axis1"),
yaxis = list(title = "Axis2"),
zaxis = list(title = "Axis3")))
})
})
output$coa3D <- plotly::renderPlotly({
contentcoa3D()
})
# EXPORT
# ======
observeEvent(input$exportGenes, {
withProgress(message = "Exporting genes", {
incProgress(1/2, detail = "processing")
saveRDS(subgenes(), file = file.path(getwd(), paste(input$genesRDSName)))
})
})
observeEvent(input$exportLibs, {
withProgress(message = "Exporting libs", {
incProgress(1/2, detail = "processing")
saveRDS(sublibs(), file = paste(input$libsRDSName))
})
})
})
|
/inst/shiny/vizection/server.R
|
no_license
|
shamansim/Vizection
|
R
| false
| false
| 43,513
|
r
|
# loading required library
library(shiny)
library(shinydashboard)
library(ade4)
library(adegraphics)
library(magrittr)
library(dplyr)
library(ggplot2)
library(data.table)
library(DT)
library(plotly)
if (getOption("vizection.dataIs") == "genesAndLibs") {
genes <- get(getOption("vizection.genes"), .GlobalEnv)
libs <- get(getOption("vizection.libs"), .GlobalEnv)
} else if (getOption("vizection.dataIs") == "se") {
se <- get(getOption("vizection.se"), .GlobalEnv)
genes <- SummarizedExperiment::assay(se) %>% data.frame
libs <- SummarizedExperiment::colData(se) %>% data.frame(stringsAsFactors = FALSE)
libs$samplename <- rownames(libs)
libs$counts <- colSums(genes)
} else stop("Could not detect what data to load.")
if (is.null(libs$group))
libs$group <- "No groups"
libs$group %<>% factor
vizectionValidate(genes = genes, libs = libs)
showDendrColors <- function(dendro){
dendrapply(dendro, function(X){
if(is.leaf(X)){
attr(X, "edgePar")[1]
}
}) %>% unlist
}
shinyServer(function(input, output, session) {
# FILTERS
# =======
filterSelectionBool <- reactive({
withProgress(message = 'Updating pre-filter', {
incProgress(1/2, detail = "updating")
vizection:::filterSelectionBool(libs, input)
})
})
filterSelectionBoolFinal <- reactive({
withProgress(message = 'Updating filter', {
incProgress(1/2, detail = "updating")
vizection:::filterSelectionBoolFinal(libs, input)
})
})
# SUBGENES SUBLIBS
# ================
subgenes <- reactive({
withProgress(message = 'Updating subgenes', {
incProgress(1/3, detail = "filtering")
pre_subgenes <- vizection:::subgenes_1(libs, input, genes)
incProgress(2/3, detail = "removing useless genes")
vizection:::subgenes_2(pre_subgenes) #removing useless genes
})
})
sublibs <- reactive({
withProgress(message = 'Updating sublibs', {
incProgress(1/2, detail = "filtering")
vizection:::sublibs(libs, input)
})
})
# -> libsGroup
contentlibsGroup <- reactive({
withProgress(message = 'updating groups', {
incProgress(1/3, detail = "extracting from filter")
filterExtractedBool <- vizection:::filterExtractedBool(libs, input)
incProgress(2/3, detail = "creating checkbox")
myGroups <- vizection:::addNumberOfSamples(libs, paste(unique(libs$group[filterExtractedBool])))
checkboxGroupInput(inputId = "groupsCheck", label = "",
choices = myGroups,
selected = myGroups
)
})
})
output$libsGroup <- renderUI({
contentlibsGroup()
})
observe({
filterExtractedBool <- vizection:::filterExtractedBool(libs, input)
myGroups <- vizection:::addNumberOfSamples(libs, paste(unique(libs$group[filterExtractedBool])))
updateCheckboxGroupInput(session,
"groupsCheck",
choices = myGroups,
selected = if(input$bar) myGroups
)
})
# -> libsSamplename
contentlibsSamplename <- eventReactive(input$updateSamples, {
withProgress(message = 'updating samples', {
incProgress(1/3, detail = "extracting selection")
filterSelectionNames <- rownames(libs)[filterSelectionBool()]
incProgress(2/3, detail = "creating checkbox")
mySamples <- vizection:::addGroupName(libs, paste(filterSelectionNames))
checkboxGroupInput(inputId = "samplesCheck", label = "",
choices = mySamples,
selected = mySamples
)
})
})
output$libsSamplename <- renderUI({
contentlibsSamplename()
})
# SHARED
# ======
corMat <- eventReactive(input$updateCorMat, {
withProgress(message = 'correlation matrix', {
incProgress(1/4, detail = "TPM")
a <- subgenes() %>% vizection:::corMat_1()
incProgress(2/4, detail = "log1p")
b <- a %>% vizection:::corMat_2()
incProgress(3/4, detail = "cor")
b %>% vizection:::corMat_3()
})
})
distCorMat <- reactive({
withProgress(message = 'distance matrix', value = 0, {
incProgress(1/3, detail = "as.dist")
a <- corMat() %>% vizection:::distCorMat_1()
incProgress(2/3, detail = "quasieuclid")
a %>% vizection:::distCorMat_2()
})
})
genesDend <- reactive({
withProgress(message = 'cluster', value = 0, {
incProgress(1/2, detail = "hclust")
distCorMat() %>% vizection:::genesDend()
})
})
genesDend2 <- reactive({
withProgress(message = 'dendrogram', {
incProgress(1/6, detail = "nbGroups")
nbGroups <- vizection:::genesDend2_1(input)
incProgress(2/6, detail = "colGroups")
colsGrps <- vizection:::genesDend2_2(nbGroups)
incProgress(3/6, detail = "colors")
cols <- vizection:::genesDend2_3(input)
incProgress(4/6, detail = "customization")
a <- genesDend() %>%
vizection:::genesDend2_4( input
, nbGroups = nbGroups
, colsGrps = colsGrps
, cols = cols)
incProgress(5/6, detail = "ladderize")
a %>% vizection:::genesDend2_5()
})
})
colorsPcaLi <- reactive({
withProgress(message = 'colors PCA', {
incProgress(1/3, detail = "collecting nb clusters")
ifelse(input$nbClusters!= 1, palette(rainbow_hcl(input$nbClusters, c=50, l=100)), palette(rainbow_hcl(2, c=50, l=100)))
incProgress(2/3, detail = "generating colors")
data.frame(colors = showDendrColors(genesDend2()), sampleIndex = order.dendrogram(genesDend2())) %>%
setorder("sampleIndex") %$%
return(colors)
})
})
# HEADER
# ======
contentgeneral <- eventReactive(input$updateSelection, {
withProgress(message = 'Updating selection information', {
incProgress(1/5, detail = "collecting sublibs")
sublibs <- sublibs()
incProgress(2/5, detail = "collecting subgenes")
subgenes <- subgenes()
incProgress(3/5, detail = "generating dataframe")
data <- data.frame(
group = c("Samples", "Groups", "Genes"),
value = c(sum(filterSelectionBoolFinal()) / nrow(libs) * 100,
length(unique(sublibs$group)) / length(unique(libs$group)) * 100,
nrow(subgenes[-1,]) / nrow(genes[-1,]) * 100),
total = c(nrow(libs), length(unique(libs$group)), nrow(genes[-1,])),
selection = c(sum(filterSelectionBoolFinal()), length(unique(sublibs$group)), nrow(subgenes[-1,]))
)
incProgress(4/5, detail = "final process")
list(
samples = data %>% filter(group == "Samples") %$% value %>% round(digits = 2),
groups = data %>% filter(group == "Groups") %$% value %>% round(digits = 2),
genes = data %>% filter(group == "Genes") %$% value %>% round(digits = 2),
totalSamples = data %>% filter(group == "Samples") %$% total %>% round(digits = 2),
totalGroups = data %>% filter(group == "Groups") %$% total %>% round(digits = 2),
totalGenes = data %>% filter(group == "Genes") %$% total %>% round(digits = 2),
selectionSamples = data %>% filter(group == "Samples") %$% selection %>% round(digits = 2),
selectionGroups = data %>% filter(group == "Groups") %$% selection %>% round(digits = 2),
selectionGenes = data %>% filter(group == "Genes") %$% selection %>% round(digits = 2)
)#list
})
})
contenttasksMenu <- reactive ({
general <- contentgeneral()
dropdownMenu(type = "tasks", badgeStatus = "success",
taskItem(value = general$samples, color = "blue",
paste("Samples: ", general$selectionSamples, "/", general$totalSamples)
),
taskItem(value = general$groups, color = "green",
paste("Groups: ", general$selectionGroups, "/", general$totalGroups)
),
taskItem(value = general$genes, color = "red",
paste("Genes: ", general$selectionGenes, "/", general$totalGenes)
)
)
})
output$tasksMenu <- renderMenu({
contenttasksMenu()
})
# HOME
# ====
output$UIboxplotGroupsSub <- renderUI({
withProgress(message = 'Updating boxplot list', {
incProgress(1/2, detail = "parsing sublibs")
selectInput("boxplotGroupsSub", "Groups (selection)", c("none", paste(unique(sublibs() %>% select(group) %>% extract(,1)))), selected = "none")
})
})
# -> boxplotTotal
output$boxplotTotal <- renderPlot({
if(input$boxplotGroupsTotal != "none"){
withProgress(message = 'Updating total boxplot', {
incProgress(1/3, detail = "collecting sublibs")
sublibs <- sublibs()
incProgress(2/3, detail = "generating")
ggplot(data = libs[libs$group == input$boxplotGroupsTotal, ], aes(input$boxplotGroupsTotal, counts)) +
geom_boxplot() +
xlab("") + ylab("") +
ylim(c(0, max(libs[libs$group == input$boxplotGroupsTotal, "counts"], sublibs[sublibs$group == input$boxplotGroupsSub, "counts"]))) +
theme_minimal()
})
}
})
output$boxplotSub <- renderPlot({
if(input$boxplotGroupsSub != "none"){
withProgress(message = 'Updating sub boxplot', {
incProgress(1/3, detail = "collecting sublibs")
sublibs <- sublibs()
incProgress(2/3, detail = "generating")
ggplot(data = sublibs[sublibs$group == input$boxplotGroupsSub, ], aes(input$boxplotGroupsSub, counts)) +
geom_boxplot() +
xlab("") + ylab("") +
ylim(c(0, max(libs[libs$group == input$boxplotGroupsTotal, "counts"], sublibs[sublibs$group == input$boxplotGroupsSub, "counts"]))) +
theme_minimal()
})
}
})
# DENDROGRAM
# ==========
contentdendrogram <- eventReactive(input$updateDendrogram, {
withProgress(message = 'dendrogram plot', value = 0, {
incProgress(1/3, detail = "modifying display parameters")
par(mar = c(6,2,2,6))
incProgress(2/3, detail = "generating plot")
genesDend2() %>%
dendextend::set("labels_cex", input$dendroSize) %>%
plot(horiz = input$dendroHoriz)
})
})
output$dendrogram <- renderPlot({
contentdendrogram()
})
output$dendrogramPlot <- renderUI({
plotOutput("dendrogram", height = paste0(input$heightDendro,"px"))
})
contentheight <- eventReactive(input$updateheight, {
withProgress(message = 'dendrogram height', value = 0, {
incProgress(1/3, detail = "collecting dendrogram")
genesDend <- genesDend()
genesDendRev <- rev(genesDend$height)
incProgress(2/3, detail = "drawing plot")
plot(genesDendRev[1:input$heightlength], pch = 20, ylab = "Clusters height")
abline(v = input$nbClusters + 0.5, col = "red", lty = 2)
for(i in genesDendRev){abline(h = i, lty= 2, col = "grey")}
})
})
output$height <- renderPlot({
contentheight()
})
# HEATMAP
# =======
contentheatmapGenes <- eventReactive(input$updateHeatmap, {
withProgress(message = 'heatmap', value = 0, {
incProgress(1/2, detail = "construction")
vizection:::contentheatmapGenes( cormat = corMat()
, dendr = genesDend2()
, sublibs = sublibs())
})
})
output$heatmapGenes <- renderPlot({
contentheatmapGenes()
})
# PCoA AND KMEANS
# ===============
contentgenesPCoA <- eventReactive(input$updatePCoA,{
withProgress(message = 'PCoA', {
incProgress(1/3, detail = "collecting data")
distCorMat <- distCorMat()
incProgress(2/3, detail = "calculating")
dudi.pco(distCorMat, scannf = F, nf = 2)
})
})
genesPCoA <- reactive({
contentgenesPCoA()
})
output$pcoasummary <- renderPrint({
summary(genesPCoA())
})
#
rangespcoa12 <- reactiveValues(x = NULL, y = NULL)
observeEvent(input$pcoa12dblclick, {
brush <- input$pcoa12brush
if(!is.null(brush)) {
rangespcoa12$x <- c(brush$xmin, brush$xmax)
rangespcoa12$y <- c(brush$ymin, brush$ymax)
} else {
rangespcoa12$x <- NULL
rangespcoa12$y <- NULL
}
})
pcoa12 <- eventReactive(input$updatePCoA, {
genesPCoAli <- genesPCoA()$li
ggplot(genesPCoAli, aes(x = A1, y = A2))
})
contentpcoagenes12 <- reactive({
withProgress(message = 'plot PCoA', {
incProgress(1/4, detail = "collecting PCoA")
pcoa12 <- pcoa12()
incProgress(2/4, detail = "collecting k-means colors")
kmeansColor <- kmeansColor()
incProgress(3/4, detail = "creating plot")
pcoa12 +
geom_point(color = kmeansColor) +
coord_cartesian(xlim = rangespcoa12$x, ylim = rangespcoa12$y) +
geom_vline(xintercept = 0, alpha = 0.2) +
geom_hline(yintercept = 0, alpha = 0.2) +
theme_light() +
theme(legend.position = "none")
})
})
output$pcoagenes12 <- renderPlot({
contentpcoagenes12()
})
contentdataPCoA <- reactive({
withProgress(message = 'data PCoA', {
incProgress(1/4, detail = "collecting sulibs")
sublibs <- sublibs()
incProgress(2/4, detail = "filtering")
res0 <- brushedPoints(genesPCoA()$li, input$pcoa12brush, xvar = "A1", yvar = "A2")
colour <- kmeansColor()
resCol <- cbind(colour, sublibs[, -1])
res <- resCol[rownames(resCol) %in% rownames(res0), ]
colour2 <- res$colour
incProgress(3/4, detail = "creating datatable")
datatable(res, options = list(scrollX = TRUE)) %>% formatStyle(
"colour", target = 'row', backgroundColor = styleEqual(colour2, colour2)
)
})
})
output$dataPCoA <- renderDataTable({
contentdataPCoA()
})
#
contentSSE <- eventReactive(input$updateSSE, {
SSE <- function(mydata, title = ""){
wss <- (nrow(mydata)-1)*sum(apply(mydata,2,var))
for (i in 2:input$SSElength) wss[i] <- sum(kmeans(mydata, centers=i)$withinss)
plot(1:input$SSElength, wss, type="b", xlab="Number of Clusters", ylab="Within groups sum of squares", main = title)
for(i in wss){abline(h = i, lty = 2, col = "grey")}
}
SSE(genesPCoA()$li[, c(1, 2)])
})
output$SSE <- renderPlot({
contentSSE()
})
#
contentpcoakmeans <- eventReactive(input$updatekmeans, {
withProgress(message = 'kmeans PCoA', {
incProgress(1/4, detail = "performing kmeans")
kmeanspco <- kmeans(genesPCoA()$li[, c(1, 2)], input$kmeansClusters)
incProgress(2/4, detail = "extracting clusters")
kmeanspcofitted <- fitted(kmeanspco)
incProgress(3/4, detail = "grouping information")
data.frame("sampleName" = rownames(genesPCoA()$li), "cluster" = kmeanspcofitted %>% rownames() %>% as.factor(), "centroidX" = kmeanspcofitted[, 1], "centroidY" = kmeanspcofitted[, 2])
})
})
pcoakmeans <- reactive({
contentpcoakmeans()
})
contentkmeansColor <- eventReactive(input$updatekmeans, {
withProgress(message = 'colors PCoA', {
incProgress(1/4, detail = "attribution")
pcoakmeans <- pcoakmeans()
colorvector <- rainbow(input$kmeansClusters) %>% substr(., 1, nchar(.)-2)
colorvector[pcoakmeans$cluster]
})
})
kmeansColor <- reactive({
contentkmeansColor()
})
# PCA
# ===
contentgenesPCA <- eventReactive(input$updatePCASummary, {
withProgress(message = 'PCA summary', {
incProgress(1/3, detail = "TPM")
genesTpm <- subgenes() %>% vizection:::contentgenesPCA_1()
incProgress(2/3, detail = "dudi.pca")
genesTpm %>% vizection:::contentgenesPCA_2()
})
})
genesPca <- reactive({
contentgenesPCA()
})
output$pcasummary <- renderPrint({
summary(genesPca())
})
output$eigenvalues <- renderPlot({
genesPca() %>% vizection:::plotEigenValues()
})
#
contentcomponents1 <- eventReactive(input$updatePCAComponents, {
withProgress(message = 'components1', {
incProgress(1/4, detail = "collecting PCA")
genesPca <- genesPca()
incProgress(2/4, detail = 'generating list')
genesCoComp1 <- vizection:::pcaCompGenesList(genesPca$co, 1)
incProgress(3/4, detail = 'generating plot')
vizection::plotHTB(genesCoComp1, 1, input$nbDispGenes)
})
})
output$components1 <- renderPlot({
contentcomponents1()
})
contentcomponents2 <- eventReactive(input$updatePCAComponents, {
withProgress(message = 'components2', {
incProgress(1/4, detail = "collecting PCA")
genesPca <- genesPca()
incProgress(2/4, detail = 'generating list')
genesCoComp2 <- vizection:::pcaCompGenesList(genesPca$co, 2)
incProgress(3/4, detail = 'generating plot')
vizection::plotHTB(genesCoComp2, 2, input$nbDispGenes)
})
})
output$components2 <- renderPlot({
contentcomponents2()
})
contentcomponents3 <- eventReactive(input$updatePCAComponents, {
withProgress(message = 'components3', {
incProgress(1/4, detail = "collecting PCA")
genesPca <- genesPca()
incProgress(2/4, detail = 'generating list')
genesCoComp3 <- vizection:::pcaCompGenesList(genesPca$co, 3)
incProgress(3/4, detail = 'generating plot')
vizection::plotHTB(genesCoComp3, 3, input$nbDispGenes)
})
})
output$components3 <- renderPlot({
contentcomponents3()
})
pcaColor <- reactive({
if(input$PCAcolor == 2){
paste(colorsPcaLi())
}
else if(input$PCAcolor == 3){
kmeansColor()
}
else{
myColors <- sublibs()$group %>% levels %>% length %>% rainbow() %>% substr(., 1, nchar(.)-2)
myColors[sublibs()$group]
}
})
pcaGroup <- reactive({
if(input$PCAcolor == 2){
as.factor(colorsPcaLi())
}
else if(input$PCAcolor == 3){
as.factor(kmeansColor())
}
else{
sublibs()$group
}
})
# ax 12
####
ranges12li <- reactiveValues(x = NULL, y = NULL)
observeEvent(input$PCA12lidblclick, {
brush <- input$PCA12librush
if (!is.null(brush)) {
ranges12li$x <- c(brush$xmin, brush$xmax)
ranges12li$y <- c(brush$ymin, brush$ymax)
} else {
ranges12li$x <- NULL
ranges12li$y <- NULL
}
})
####
g12li <- eventReactive(input$updatePCAPlots, {
genesPcali <- genesPca()$li
if(input$showEllipse){
ggplot(genesPcali, aes(x = Axis1, y = Axis2, group = pcaGroup(), color = pcaColor(), fill = pcaColor())) + stat_ellipse(aes(color = pcaColor(), fill = pcaColor()))}
else {
ggplot(genesPcali, aes(x = Axis1, y = Axis2, group = pcaGroup(), color = pcaColor()))
}
})
contentinteractPCA12li <- reactive({
withProgress(message = 'li axes 1-2', {
incProgress(1/4, detail = "collecting PCA")
g12li <- g12li()
incProgress(2/4, detail = "collecting colors")
pcaColor <- pcaColor()
incProgress(3/4, detail = "creating plot")
g12li +
geom_point(color = pcaColor) +
coord_cartesian(xlim = ranges12li$x, ylim = ranges12li$y) +
geom_vline(xintercept = 0, alpha = 0.2) +
geom_hline(yintercept = 0, alpha = 0.2) +
theme_light() +
theme(legend.position = "none")
})
})
output$interactPCA12li <- renderPlot({
contentinteractPCA12li()
})
####
contentdataPCA12li <- reactive({
withProgress(message = 'data 1-2', {
incProgress(1/4, detail = "collecting sulibs")
sublibs <- sublibs()
incProgress(2/4, detail = "filtering")
res0 <- brushedPoints(genesPca()$li, input$PCA12librush, xvar = "Axis1", yvar = "Axis2")
colour <- pcaColor()
resCol <- cbind(colour, sublibs[, -1])
res <- resCol[rownames(resCol) %in% rownames(res0), ]
colour2 <- res$colour
incProgress(3/4, detail = "creating datatable")
datatable(res, options = list(scrollX = TRUE)) %>% formatStyle(
"colour", target = 'row', backgroundColor = styleEqual(colour2, colour2)
)
})
})
output$dataPCA12li <- renderDataTable({
contentdataPCA12li()
})
#####
ranges12co <- reactiveValues(x = NULL, y = NULL)
observeEvent(input$PCA12codblclick, {
brush <- input$PCA12cobrush
if (!is.null(brush)) {
ranges12co$x <- c(brush$xmin, brush$xmax)
ranges12co$y <- c(brush$ymin, brush$ymax)
} else {
ranges12co$x <- NULL
ranges12co$y <- NULL
}
})
#####
g12co <- eventReactive(input$updatePCAPlots, {
genesPcaco <- genesPca()$co
ggplot(genesPcaco, aes(x = Comp1, y = Comp2))
})
contentinteractPCA12co <- reactive({
withProgress(message = 'co axes 1-2', {
incProgress(1/3, detail = "collecting PCA")
g12co <- g12co()
incProgress(2/3, detail = "creating plot")
g12co +
geom_segment(aes(x=0, y=0, xend=Comp1, yend=Comp2)) +
coord_cartesian(xlim = ranges12co$x, ylim = ranges12co$y) +
geom_vline(xintercept = 0, alpha = 0.2) +
geom_hline(yintercept = 0, alpha = 0.2) +
theme_light()
})
})
output$interactPCA12co <- renderPlot({
contentinteractPCA12co()
})
#####
contentdataPCA12co <- reactive({
withProgress(message = 'data 1-2', {
incProgress(1/3, detail = "filtering")
res <- brushedPoints(genesPca()$co, input$PCA12cobrush, xvar = "Comp1", yvar = "Comp2")
incProgress(2/3, detail = "creating datatable")
datatable(res, options = list(scrollX = TRUE))
})
})
output$dataPCA12co <- renderDataTable({
contentdataPCA12co()
})
# ax 13
####
ranges13li <- reactiveValues(x = NULL, y = NULL)
observeEvent(input$PCA13lidblclick, {
brush <- input$PCA13librush
if (!is.null(brush)) {
ranges13li$x <- c(brush$xmin, brush$xmax)
ranges13li$y <- c(brush$ymin, brush$ymax)
} else {
ranges13li$x <- NULL
ranges13li$y <- NULL
}
})
####
g13li <- eventReactive(input$updatePCAPlots, {
genesPcali <- genesPca()$li
ggplot(genesPcali, aes(x = Axis1, y = Axis3))
})
contentinteractPCA13li <- reactive({
withProgress(message = 'li axes 1-3', {
incProgress(1/4, detail = "collecting PCA")
g13li <- g13li()
incProgress(2/4, detail = "collecting colors")
pcaColor <- pcaColor()
incProgress(3/4, detail = "creating plot")
g13li +
geom_point(color = pcaColor) +
coord_cartesian(xlim = ranges13li$x, ylim = ranges13li$y) +
geom_vline(xintercept = 0, alpha = 0.2) +
geom_hline(yintercept = 0, alpha = 0.2) +
theme_light()
})
})
output$interactPCA13li <- renderPlot({
contentinteractPCA13li()
})
####
contentdataPCA13li <- reactive({
withProgress(message = 'data 1-3', {
incProgress(1/4, detail = "collecting sulibs")
sublibs <- sublibs()
incProgress(2/4, detail = "filtering")
res0 <- brushedPoints(genesPca()$li, input$PCA13librush, xvar = "Axis1", yvar = "Axis3")
colour <- pcaColor()
resCol <- cbind(colour, sublibs[, -1])
res <- resCol[rownames(resCol) %in% rownames(res0), ]
colour2 <- res$colour
incProgress(3/4, detail = "creating datatable")
datatable(res, options = list(scrollX = TRUE)) %>% formatStyle(
"colour", target = 'row', backgroundColor = styleEqual(colour2, colour2)
)
})
})
output$dataPCA13li <- renderDataTable({
contentdataPCA13li()
})
#####
ranges13co <- reactiveValues(x = NULL, y = NULL)
observeEvent(input$PCA13codblclick, {
brush <- input$PCA13cobrush
if (!is.null(brush)) {
ranges13co$x <- c(brush$xmin, brush$xmax)
ranges13co$y <- c(brush$ymin, brush$ymax)
} else {
ranges13co$x <- NULL
ranges13co$y <- NULL
}
})
#####
g13co <- eventReactive(input$updatePCAPlots, {
genesPcaco <- genesPca()$co
ggplot(genesPcaco, aes(x = Comp1, y = Comp3))
})
contentinteractPCA13co <- reactive({
withProgress(message = 'co axes 1-3', {
incProgress(1/3, detail = "collecting PCA")
g13co <- g13co()
incProgress(2/3, detail = "creating plot")
g13co +
geom_segment(aes(x=0, y=0, xend=Comp1, yend=Comp3)) +
coord_cartesian(xlim = ranges13co$x, ylim = ranges13co$y) +
geom_vline(xintercept = 0, alpha = 0.2) +
geom_hline(yintercept = 0, alpha = 0.2) +
theme_light()
})
})
output$interactPCA13co <- renderPlot({
contentinteractPCA13co()
})
#####
contentdataPCA13co <- reactive({
withProgress(message = 'data 1-3', {
incProgress(1/3, detail = "filtering")
res <- brushedPoints(genesPca()$co, input$PCA13cobrush, xvar = "Comp1", yvar = "Comp3")
incProgress(2/3, detail = "creating datatable")
datatable(res, options = list(scrollX = TRUE))
})
})
output$dataPCA13co <- renderDataTable({
contentdataPCA13co()
})
# ax 32
####
ranges32li <- reactiveValues(x = NULL, y = NULL)
observeEvent(input$PCA32lidblclick, {
brush <- input$PCA32librush
if (!is.null(brush)) {
ranges32li$x <- c(brush$xmin, brush$xmax)
ranges32li$y <- c(brush$ymin, brush$ymax)
} else {
ranges32li$x <- NULL
ranges32li$y <- NULL
}
})
####
g32li <- eventReactive(input$updatePCAPlots, {
genesPcali <- genesPca()$li
ggplot(genesPcali, aes(x = Axis3, y = Axis2))
})
contentinteractPCA32li <- reactive({
withProgress(message = 'li axes 3-2', {
incProgress(1/4, detail = "collecting PCA")
g32li <- g32li()
incProgress(2/4, detail = "collecting colors")
pcaColor <- pcaColor()
incProgress(3/4, detail = "creating plot")
g32li +
geom_point(color = pcaColor) +
coord_cartesian(xlim = ranges32li$x, ylim = ranges32li$y) +
geom_vline(xintercept = 0, alpha = 0.2) +
geom_hline(yintercept = 0, alpha = 0.2) +
theme_light()
})
})
output$interactPCA32li <- renderPlot({
contentinteractPCA32li()
})
####
contentdataPCA32li <- reactive({
withProgress(message = 'data 3-2', {
incProgress(1/4, detail = "collecting sulibs")
sublibs <- sublibs()
incProgress(2/4, detail = "filtering")
res0 <- brushedPoints(genesPca()$li, input$PCA32librush, xvar = "Axis3", yvar = "Axis2")
colour <- pcaColor()
resCol <- cbind(colour, sublibs[, -1])
res <- resCol[rownames(resCol) %in% rownames(res0), ]
colour2 <- res$colour
incProgress(3/4, detail = "creating datatable")
datatable(res, options = list(scrollX = TRUE)) %>% formatStyle(
"colour", target = 'row', backgroundColor = styleEqual(colour2, colour2)
)
})
})
output$dataPCA32li <- renderDataTable({
contentdataPCA32li()
})
#####
ranges32co <- reactiveValues(x = NULL, y = NULL)
observeEvent(input$PCA32codblclick, {
brush <- input$PCA32cobrush
if (!is.null(brush)) {
ranges32co$x <- c(brush$xmin, brush$xmax)
ranges32co$y <- c(brush$ymin, brush$ymax)
} else {
ranges32co$x <- NULL
ranges32co$y <- NULL
}
})
#####
g32co <- eventReactive(input$updatePCAPlots, {
genesPcaco <- genesPca()$co
ggplot(genesPcaco, aes(x = Comp3, y = Comp2))
})
contentinteractPCA32co <- reactive({
withProgress(message = 'co axes 3-2', {
incProgress(1/3, detail = "collecting PCA")
g32co <- g32co()
incProgress(2/3, detail = "creating plot")
g32co +
geom_segment(aes(x=0, y=0, xend=Comp3, yend=Comp2)) +
coord_cartesian(xlim = ranges32co$x, ylim = ranges32co$y) +
geom_vline(xintercept = 0, alpha = 0.2) +
geom_hline(yintercept = 0, alpha = 0.2) +
theme_light()
})
})
output$interactPCA32co <- renderPlot({
contentinteractPCA32co()
})
#####
contentdataPCA32co <- reactive({
withProgress(message = 'data 3-2', {
incProgress(1/3, detail = "filtering")
res <- brushedPoints(genesPca()$co, input$PCA32cobrush, xvar = "Comp3", yvar = "Comp2")
incProgress(2/3, detail = "creating datatable")
datatable(res, options = list(scrollX = TRUE))
})
})
output$dataPCA32co <- renderDataTable({
contentdataPCA32co()
})
contentpca3D <- eventReactive(input$generatepca3d, {
withProgress(message = 'pca 3D', {
incProgress(1/4, detail = "collecting PCA")
pcaGenesli <- genesPca()$li
incProgress(2/4, detail = "collecting colors")
pcaColor <- pcaColor()
pcaGroup <- pcaGroup()
incProgress(3/4, detail = "creating 3D plot")
plotly::plot_ly(data = pcaGenesli, x = pcaGenesli$Axis1, y = pcaGenesli$Axis2, z = pcaGenesli$Axis3,
type = "scatter3d", mode = "markers", marker = list(size = input$pca3ddotsize),
color = pcaGroup, colors = pcaColor,
text = sublibs()$samplename) %>%
layout(scene = list(
xaxis = list(title = "Axis1"),
yaxis = list(title = "Axis2"),
zaxis = list(title = "Axis3")))
})
})
output$pca3D <- plotly::renderPlotly({
contentpca3D()
})
#
contentcheckplot <- eventReactive(input$updatecheckplot, {
withProgress(message = 'checkplot', {
incProgress(1/4, detail = "data collection")
if(input$dataCheckplot == "total"){
d <- genes[paste(input$geneNamescheckplot), ] %>% t %>% tbl_df() %T>% setnames("geneName")
d$group <-as.factor(libs$group)
}
else {
subgenes <- subgenes()
sublibs <- sublibs()
d <- subgenes[paste(input$geneNamescheckplot), ] %>% t %>% tbl_df() %T>% setnames("geneName")
d$group <- as.factor(sublibs$group)
}
incProgress(2/4, detail = "generation")
g <- d %>% ggplot(aes(geneName), group = group) +
geom_histogram(binwidth = 1) +
theme_light() +
xlab(paste(input$geneNamescheckplot))
incProgress(3/4, detail = "(faceting) and annotating")
if(input$facetcheckplot){
g <- g + facet_grid(group ~ .)
}
if(input$sampleNamescheckplot != "None"){
if(input$dataCheckplot == "total"){
xvalue <- genes[paste(input$geneNamescheckplot), paste(input$sampleNamescheckplot)]
yvalue <- sum(genes[paste(input$geneNamescheckplot), ] == xvalue)
g <- g + geom_vline(xintercept = xvalue, colour = "red", linetype = "dashed") +
annotate("text", x = xvalue, y = yvalue + 1, label = paste(input$sampleNamescheckplot), colour = "red")
} else {
subgenes <- subgenes()
xvalue <- subgenes[paste(input$geneNamescheckplot), paste(input$sampleNamescheckplot)]
yvalue <- sum(subgenes[paste(input$geneNamescheckplot), ] == xvalue)
g <- g + geom_vline(xintercept = xvalue, colour = "red", linetype = "dashed") +
annotate("text", x = xvalue, y = yvalue + 1, label = paste(input$sampleNamescheckplot), colour = "red")
}
}
g
})
})
output$checkplot <- renderPlot({
contentcheckplot()
})
contentgeneNamescheckplotUI <- eventReactive(input$updatelistcheckplot, {
withProgress(message = 'checkplot genes', {
incProgress(1/3, detail = "searching...")
if(input$dataCheckplot == "total"){
checkplotGrep <- rownames(genes) %>% grep(input$geneNameCheckplot, .) %>% rownames(genes)[.]
} else {
checkplotGrep <- rownames(subgenes()) %>% grep(input$geneNameCheckplot, .) %>% rownames(subgenes())[.]
}
incProgress(2/3, detail = "creating UI")
selectInput("geneNamescheckplot", "Gene name:", c("None", checkplotGrep), selected = "None")
})
})
output$geneNamescheckplotUI <- renderUI({
contentgeneNamescheckplotUI()
})
contentsampleNamescheckplotUI <- eventReactive(input$updatelistcheckplot, {
withProgress(message = 'checkplot samples', {
incProgress(1/2, detail = "data collection")
if(input$dataCheckplot == "total"){
selectInput("sampleNamescheckplot", "Sample name:",
choices = c("None", paste(rownames(libs))), selected = "None"
)
} else {
selectInput("sampleNamescheckplot", "Sample name:",
choices = c("None", paste(rownames(sublibs()))), selected = "None"
)
}
})
})
output$sampleNamescheckplotUI <- renderUI({
contentsampleNamescheckplotUI()
})
output$checkplotUI <- renderUI({
plotOutput("checkplot", height = input$heightcheckplot)
})
# CA
# ==
contentcontribDataFrame <- reactive({
withProgress(message = 'calculating contribution', {
incProgress(1/5, detail = "collecting PCA")
genesPca <- genesPca()
selectedAxis <- as.numeric(input$selectAxisCoA)
incProgress(2/5, detail = "calculating")
contribution <- abs(genesPca$co[,selectedAxis])/sum(abs(genesPca$co[,selectedAxis])) * 100
incProgress(3/5, detail = "checking results")
stopifnot(all.equal(sum(contribution), 100))
incProgress(4/5, detail = "creating data frame")
data.frame("geneName" = rownames(genesPca$co), "contribution" = contribution)
})
})
contribDataFrame <- reactive({
contentcontribDataFrame()
})
contentcontributionBoxplot <- eventReactive(input$generateContributionBoxplot, {
withProgress(message = 'contrib boxplot', {
incProgress(1/3, detail = "collecting contrib")
contribDataFrame <- contribDataFrame()
incProgress(2/3, detail = "generating boxplot")
boxplot(contribDataFrame$contribution, horizontal = T, main = paste0("Contribution on axis ", input$selectAxisCoA))
})
})
output$contributionBoxplot <- renderPlot({
contentcontributionBoxplot()
})
contentthresholded <- eventReactive(input$applyThreshold, {
withProgress(message = 'applying threshold', {
incProgress(1/4, detail = "collecting threshold")
threshold <- input$nbGenesToKeep
contribDataFrame <- contribDataFrame()
incProgress(2/4, detail = "filtering contrib data frame")
indexesThresholded <- which(contribDataFrame$contribution >= threshold)
incProgress(3/4, detail = "creating list")
list(
names = contribDataFrame$geneName[indexesThresholded],
values = contribDataFrame$contribution[indexesThresholded]
)
})
})
thresholded <- reactive({
contentthresholded()
})
output$thresholdedPrint <- renderDataTable({
as.data.frame(thresholded())
})
contentnumberGenesThresholded <- eventReactive(input$applyThreshold, {
thresholded <- thresholded()
paste("Selection of", length(thresholded$names), "genes.")
})
output$numberGenesThresholded <- renderPrint({
contentnumberGenesThresholded()
})
contentthresholdBoxplot <- eventReactive(input$applyThreshold, {
contribDataFrame <- contribDataFrame()
boxplot(contribDataFrame$contribution, horizontal = T, main = paste0("Contribution on axis ", input$selectAxisCoA))
abline(lty = 2, col = "red", v = input$nbGenesToKeep)
})
output$thresholdBoxplot <- renderPlot({
contentthresholdBoxplot()
})
#
contentcoaGenes <- eventReactive(input$updateCoA, {
withProgress(message = 'CoA summary', {
incProgress(1/3, detail = "creating thresholded genes and libs")
subgenes <- subgenes()
thresholded <- thresholded()
mainGenes <- subgenes[rownames(subgenes) %in% thresholded$names, ]
incProgress(2/3, detail = "dudi.coa")
dudi.coa(mainGenes %>% t %>% as.data.frame, scannf = F, nf = 3)
})
})
coaGenes <- reactive({
contentcoaGenes()
})
output$coasummary <- renderPrint({
summary(coaGenes())
})
output$coaeigenvalues <- renderPlot({
barplot(coaGenes() %$% eig, xlab = "Eigenvalues")
})
coaColor <- reactive({
if(input$COAcolor == 2){
paste(colorsPcaLi())
}
else if(input$COAcolor == 3){
kmeansColor()
}
else{
myColors <- sublibs()$group %>% levels %>% length %>% rainbow() %>% substr(., 1, nchar(.)-2)
myColors[sublibs()$group]
}
})
coaGroup <- reactive({
if(input$COAcolor == 2){
as.factor(colorsPcaLi())
}
else if(input$COAcolor == 3){
as.factor(kmeansColor())
}
else{
sublibs()$group
}
})
####
rangescoa12 <- reactiveValues(x = NULL, y = NULL)
observeEvent(input$COA12dblclick, {
brush <- input$COA12brush
if (!is.null(brush)) {
rangescoa12$x <- c(brush$xmin, brush$xmax)
rangescoa12$y <- c(brush$ymin, brush$ymax)
} else {
rangescoa12$x <- NULL
rangescoa12$y <- NULL
}
})
####
coa12 <- eventReactive(input$updateCoAPlots, {
coaGenesli <- coaGenes()$li
ggplot(coaGenesli, aes(x = Axis1, y = Axis2))
})
contentinteractCOA12 <- reactive({
withProgress(message = 'coa axes 1-2', {
incProgress(1/4, detail = "collecting COA")
coa12 <- coa12()
incProgress(2/4, detail = "collecting colors")
coaColor <- coaColor()
incProgress(3/4, detail = "creating plot")
coa12 +
geom_point(color = coaColor) +
coord_cartesian(xlim = rangescoa12$x, ylim = rangescoa12$y) +
geom_vline(xintercept = 0, alpha = 0.2) +
geom_hline(yintercept = 0, alpha = 0.2) +
theme_light() +
geom_point(data = coaGenes()$co, aes(x = Comp1, y = Comp2)) +
geom_text(data = coaGenes()$co, aes(x = Comp1, y = Comp2, label = rownames(coaGenes()$co)), hjust = 0, nudge_x = 0.05)
})
})
output$interactCOA12 <- renderPlot({
contentinteractCOA12()
})
####
contentdataCOA12 <- reactive({
withProgress(message = 'data 1-2', {
incProgress(1/4, detail = "collecting sulibs")
sublibs <- sublibs()
incProgress(2/4, detail = "filtering")
res0 <- brushedPoints(coaGenes()$li, input$COA12brush, xvar = "Axis1", yvar = "Axis2")
colour <- coaColor()
resCol <- cbind(colour, sublibs[, -1])
res <- resCol[rownames(resCol) %in% rownames(res0), ]
colour2 <- res$colour
incProgress(3/4, detail = "creating datatable")
datatable(res, options = list(scrollX = TRUE)) %>% formatStyle(
"colour", target = 'row', backgroundColor = styleEqual(colour2, colour2)
)
})
})
output$dataCOA12 <- renderDataTable({
contentdataCOA12()
})
####
rangescoa13 <- reactiveValues(x = NULL, y = NULL)
observeEvent(input$COA13dblclick, {
brush <- input$COA13brush
if (!is.null(brush)) {
rangescoa13$x <- c(brush$xmin, brush$xmax)
rangescoa13$y <- c(brush$ymin, brush$ymax)
} else {
rangescoa13$x <- NULL
rangescoa13$y <- NULL
}
})
####
coa13 <- eventReactive(input$updateCoAPlots, {
coaGenesli <- coaGenes()$li
ggplot(coaGenesli, aes(x = Axis1, y = Axis3))
})
contentinteractCOA13 <- reactive({
withProgress(message = 'coa axes 1-2=3', {
incProgress(1/4, detail = "collecting COA")
coa13 <- coa13()
incProgress(2/4, detail = "collecting colors")
coaColor <- coaColor()
incProgress(3/4, detail = "creating plot")
coa13 +
geom_point(color = coaColor) +
coord_cartesian(xlim = rangescoa13$x, ylim = rangescoa13$y) +
geom_vline(xintercept = 0, alpha = 0.2) +
geom_hline(yintercept = 0, alpha = 0.2) +
theme_light() +
geom_point(data = coaGenes()$co, aes(x = Comp1, y = Comp3)) +
geom_text(data = coaGenes()$co, aes(x = Comp1, y = Comp3, label = rownames(coaGenes()$co)), hjust = 0, nudge_x = 0.05)
})
})
output$interactCOA13 <- renderPlot({
contentinteractCOA13()
})
####
contentdataCOA13 <- reactive({
withProgress(message = 'data 1-3', {
incProgress(1/4, detail = "collecting sulibs")
sublibs <- sublibs()
incProgress(2/4, detail = "filtering")
res0 <- brushedPoints(coaGenes()$li, input$COA13brush, xvar = "Axis1", yvar = "Axis3")
colour <- coaColor()
resCol <- cbind(colour, sublibs[, -1])
res <- resCol[rownames(resCol) %in% rownames(res0), ]
colour2 <- res$colour
incProgress(3/4, detail = "creating datatable")
datatable(res, options = list(scrollX = TRUE)) %>% formatStyle(
"colour", target = 'row', backgroundColor = styleEqual(colour2, colour2)
)
})
})
output$dataCOA13 <- renderDataTable({
contentdataCOA13()
})
####
rangescoa32 <- reactiveValues(x = NULL, y = NULL)
observeEvent(input$COA32dblclick, {
brush <- input$COA32brush
if (!is.null(brush)) {
rangescoa32$x <- c(brush$xmin, brush$xmax)
rangescoa32$y <- c(brush$ymin, brush$ymax)
} else {
rangescoa32$x <- NULL
rangescoa32$y <- NULL
}
})
####
coa32 <- eventReactive(input$updateCoAPlots, {
coaGenesli <- coaGenes()$li
ggplot(coaGenesli, aes(x = Axis3, y = Axis2))
})
contentinteractCOA32 <- reactive({
withProgress(message = 'coa axes 3-2', {
incProgress(1/4, detail = "collecting COA")
coa32 <- coa32()
incProgress(2/4, detail = "collecting colors")
coaColor <- coaColor()
incProgress(3/4, detail = "creating plot")
coa32 +
geom_point(color = coaColor) +
coord_cartesian(xlim = rangescoa32$x, ylim = rangescoa32$y) +
geom_vline(xintercept = 0, alpha = 0.2) +
geom_hline(yintercept = 0, alpha = 0.2) +
theme_light() +
geom_point(data = coaGenes()$co, aes(x = Comp3, y = Comp2)) +
geom_text(data = coaGenes()$co, aes(x = Comp3, y = Comp2, label = rownames(coaGenes()$co)), hjust = 0, nudge_x = 0.05)
})
})
output$interactCOA32 <- renderPlot({
contentinteractCOA32()
})
####
contentdataCOA32 <- reactive({
withProgress(message = 'data 3-2', {
incProgress(1/4, detail = "collecting sulibs")
sublibs <- sublibs()
incProgress(2/4, detail = "filtering")
res0 <- brushedPoints(coaGenes()$li, input$COA32brush, xvar = "Axis3", yvar = "Axis2")
colour <- coaColor()
resCol <- cbind(colour, sublibs[, -1])
res <- resCol[rownames(resCol) %in% rownames(res0), ]
colour2 <- res$colour
incProgress(3/4, detail = "creating datatable")
datatable(res, options = list(scrollX = TRUE)) %>% formatStyle(
"colour", target = 'row', backgroundColor = styleEqual(colour2, colour2)
)
})
})
output$dataCOA32 <- renderDataTable({
contentdataCOA32()
})
contentcoa3D <- eventReactive(input$generatecoa3d, {
withProgress(message = 'coa 3D', {
incProgress(1/4, detail = "collecting")
coaGenesli <- coaGenes()$li
incProgress(2/4, detail = "collecting colors")
coaColor <- coaColor()
coaGroup <- coaGroup()
incProgress(3/4, detail = "creating 3D plot")
plotly::plot_ly(data = coaGenesli, x = coaGenesli$Axis1, y = coaGenesli$Axis2, z = coaGenesli$Axis3,
type = "scatter3d", mode = "markers", marker = list(size = input$coa3ddotsize),
color = coaGroup, colors = coaColor,
text = sublibs()$samplename) %>%
layout(scene = list(
xaxis = list(title = "Axis1"),
yaxis = list(title = "Axis2"),
zaxis = list(title = "Axis3")))
})
})
output$coa3D <- plotly::renderPlotly({
contentcoa3D()
})
# EXPORT
# ======
observeEvent(input$exportGenes, {
withProgress(message = "Exporting genes", {
incProgress(1/2, detail = "processing")
saveRDS(subgenes(), file = file.path(getwd(), paste(input$genesRDSName)))
})
})
observeEvent(input$exportLibs, {
withProgress(message = "Exporting libs", {
incProgress(1/2, detail = "processing")
saveRDS(sublibs(), file = paste(input$libsRDSName))
})
})
})
|
### Simuationssoftware mit Einzelfällen
#
n = 8000 # nur Einzelfälle statt 80 Mio.
# stati = c("before", "infected new", "infected infectuous", "infected symptoms", "infected known", "recovered", "dead")
stati = factor(c("before", "new", "infectuous", "symptoms", "known", "recovered", "dead"))
shareDiscovered = 0.8 #wieviele von den Kontaktpersonen werden identifiziert
anteilSymptomatisch = 0.5
anteilTest = 0.8 # wie wahrscheinlich macht jemand mit Symptomen einen Test
# Übergangswahrscheinlichkeiten
bn = .02 #infektionsrate
ni = c(0, 0.2, 0.5, 0.5, 1)
is = c(.1, .3, .5, .7, .8, .9, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) * anteilSymptomatisch
sk = c(.1, .3, .5, .5, 1,1,1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1,1,1) * anteilTest
sd = c(0,0,0, 0.001, 0.001, 0.001, 0.002, 0.002, 0.002, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001)
sr = c(0,0,0,0,0,0,0,0.01,0.02,0.03,0.04,0.05,0.06,0.07,0.08,0.1 ,0.1, .1, .1, .1, .1, .2, .2, .2, .2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)
ir = sr
person = data.frame(p=1:n)
person$status = "before"
person$since = 1
person$known = FALSE
person$age = sample(1:80)
for (tage in 1:50)
{
print(tage)
for (i in 1:n)
{
oldStatus = person$status[i]
if (person$status[i] == "symptoms")
if (runif(1)<sd[person$since[i]])
person$status[i] = "dead"
if (person$status[i] == "symptoms")
if (runif(1)<sr[person$since[i]])
person$status[i] = "recovered"
if (person$status[i] == "infectuous")
if (runif(1)<is[person$since[i]])
person$status[i] = "symptoms"
if (person$status[i] == "infectuous")
if (runif(1)<ir[person$since[i]])
person$status[i] = "recovered"
if (person$status[i] == "new")
if (runif(1)<ni[person$since[i]])
person$status[i] = "infectuous"
if (person$status[i] == "symptoms")
if (runif(1)<sk[person$since[i]])
person$known[i] = TRUE
if (person$status[i] in c("infectuous","symptoms"))
{
newPerson = runif(1, min=1, max=8000)
if (runif(1)<bn)
if (person$status[newPerson] == "before")
{
person$status[newPerson] = "new"
person$since[newPerson] = 1
}}
if (person$status[i] != oldStatus)
person$since[i] = 1
else
person$since[i] = person$since[i]+1
}
print(table(person$status))
print(table(person$since))
}
ni
i
person[i,]
print table(person$status)
print table (person$since)
is[person$since[i]]
iS[5]
person$since[i]
person$status[i]
i
runif()
View(person)
bn[person$since[1:10]]
#person ist dataframe n, Datum mit Zustand stati und since
bn
table(person$status)
xf <- factor(x, levels = c("Male", "Man" , "Lady", "Female"), labels = c("Male", "Male", "Female", "Female"))
xf
str(xf)
data.frame(1, 1:10, sample(c("a","b","cf"), 10, replace = TRUE))
|
/simulation Einzelbürger.R
|
no_license
|
Data-Scientists-against-Disease/CaseNumbers
|
R
| false
| false
| 3,262
|
r
|
### Simuationssoftware mit Einzelfällen
#
n = 8000 # nur Einzelfälle statt 80 Mio.
# stati = c("before", "infected new", "infected infectuous", "infected symptoms", "infected known", "recovered", "dead")
stati = factor(c("before", "new", "infectuous", "symptoms", "known", "recovered", "dead"))
shareDiscovered = 0.8 #wieviele von den Kontaktpersonen werden identifiziert
anteilSymptomatisch = 0.5
anteilTest = 0.8 # wie wahrscheinlich macht jemand mit Symptomen einen Test
# Übergangswahrscheinlichkeiten
bn = .02 #infektionsrate
ni = c(0, 0.2, 0.5, 0.5, 1)
is = c(.1, .3, .5, .7, .8, .9, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) * anteilSymptomatisch
sk = c(.1, .3, .5, .5, 1,1,1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1,1,1) * anteilTest
sd = c(0,0,0, 0.001, 0.001, 0.001, 0.002, 0.002, 0.002, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001)
sr = c(0,0,0,0,0,0,0,0.01,0.02,0.03,0.04,0.05,0.06,0.07,0.08,0.1 ,0.1, .1, .1, .1, .1, .2, .2, .2, .2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)
ir = sr
person = data.frame(p=1:n)
person$status = "before"
person$since = 1
person$known = FALSE
person$age = sample(1:80)
for (tage in 1:50)
{
print(tage)
for (i in 1:n)
{
oldStatus = person$status[i]
if (person$status[i] == "symptoms")
if (runif(1)<sd[person$since[i]])
person$status[i] = "dead"
if (person$status[i] == "symptoms")
if (runif(1)<sr[person$since[i]])
person$status[i] = "recovered"
if (person$status[i] == "infectuous")
if (runif(1)<is[person$since[i]])
person$status[i] = "symptoms"
if (person$status[i] == "infectuous")
if (runif(1)<ir[person$since[i]])
person$status[i] = "recovered"
if (person$status[i] == "new")
if (runif(1)<ni[person$since[i]])
person$status[i] = "infectuous"
if (person$status[i] == "symptoms")
if (runif(1)<sk[person$since[i]])
person$known[i] = TRUE
if (person$status[i] in c("infectuous","symptoms"))
{
newPerson = runif(1, min=1, max=8000)
if (runif(1)<bn)
if (person$status[newPerson] == "before")
{
person$status[newPerson] = "new"
person$since[newPerson] = 1
}}
if (person$status[i] != oldStatus)
person$since[i] = 1
else
person$since[i] = person$since[i]+1
}
print(table(person$status))
print(table(person$since))
}
ni
i
person[i,]
print table(person$status)
print table (person$since)
is[person$since[i]]
iS[5]
person$since[i]
person$status[i]
i
runif()
View(person)
bn[person$since[1:10]]
#person ist dataframe n, Datum mit Zustand stati und since
bn
table(person$status)
xf <- factor(x, levels = c("Male", "Man" , "Lady", "Female"), labels = c("Male", "Male", "Female", "Female"))
xf
str(xf)
data.frame(1, 1:10, sample(c("a","b","cf"), 10, replace = TRUE))
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
# Initial library/source files set-up
library(shiny)
source("analysis.R")
# Pages 1-7 instantiated and structured for use
page_one <- tabPanel(
"Introduction",
titlePanel("Mental Health In The Technology Workplace"),
h3("The Problem Situation"),
p("Mental health has become an increasingly hot issue in the workplace. The mental health of employees
is being negatively impacted by numerous factors within the workplace. Due to this, there is an
increased strain on worker's and their performance, which is also negatively affecting company results.
Currently, mental health is a stigmatized topic in the workplace, and as such, is an area of high
confidentiality between employees and employers."),
h3("What is the Problem?"),
p("Approximately half of millennial workers and 75% of Gen-Zers have quit their jobs due to mental
health reasons according to a survey conducted by Mind Share Partners, SAP, and Qualtrics. About
46.6 million US adults are dealing with mental illnesses, including depression and anxiety disorders.
The main factors contributing to this is money, work load, and a negative working environment.")
)
page_two <- tabPanel(
"Background",
titlePanel("Why This Topic And What We Hope To Find"),
h3("Why we care about Mental Health"),
p("Our group cares about this topic because we have heard of first-hand accounts of employees
cracking under intense stress within tech companies like Amazon, and as young Informatics
majors looking to go into the tech field, we want to help improve the environment we go into
not just for ourselves, but for everyone."),
h3("What we Hope to Answer"),
tags$li("Which states in the US seem to have a higher proportion of their tech workers
affected by mental illnesses compared to other states?"),
tags$li("Within each state, what mental health illnesses/disorders are people in the tech field
dealing with?")
)
page_three <- tabPanel(
"Interactive Map",
titlePanel("Prevalence Of Mental Disorders Throughout The United States"),
interactive_map,
p("With our interactive map, we actually get some surprising results. New Hampshire has the highest
proportion of people who took the survey living in New Hampshire that reported being affected by mental
illnesses/disorders at 86%. Kentucky and Alaska follow close behind at 80% and 77% respectively.
We expected tech company-heavy states such as California and Washington to have the highest proportion.
Of course, this could be due to the relatively small sample size of survey respondents, as there are
only a total of about 800 survey respondents within the United States. Still, over half of the survey
respondents in both Washington and California reported being affected by mental illnesses/disorders,
which is a very significant amount of people.")
)
page_four <- tabPanel(
"Bar Charts",
titlePanel("Mental Disorders By State"),
sidebarLayout(
sidebarPanel(
selectInput("summ_state", "State:",
choices = sort(unique(osmi_df$State)),
selected = "Washington")
),
mainPanel(
plotOutput("state_plot")
)
),
p("From these bar charts, we can see that for most states, anxiety and mood disorders are the most
common mental disorders that are plaguing tech workers in the United States. For example, in the
state of Washington, the top two most common mental disorders are mood disorders such as depression
and bipolar disorder with 25 affected survey respondents. Anxiety disorders such as social anxiety
and phobias are second with 17 affected survey respondents.")
)
page_five <- tabPanel(
"Conclusion",
titlePanel("What We Found"),
h3("Results"),
p("With the data sets that we worked with and the visualizations we were able to create, we were able
to answer our research questions to an extent. The sample sizes weren't quite as large as we'd like,
and it's very important to realize that. But we were still able to gather some valuable insights
into how widespread mental health is in the United States within the tech industry. There are a
variety of mental disorders that are affecting workers, with some of the more common ones being
various forms of anxiety such as social anxiety, generalized anxiety, and panic disorders, or
various forms of mood disorders such as depression, bipolar disorder, and dysthymia. But there
are tech workers that are experiencing other types of disorders, like attention deficit
hyperactivity disorder and post traumatic stress disorder."),
p("Despite the somewhat low number of responses between the two data sets of the OSMI survey, we can
see that mental health is impacting tech employees in most states except Nevada, Missouri, Iowa,
Indiana, Tennessee, Georgia, Ohio, West Virginia, Maryland, and Virginia. That is most likely due
to the low number of survey responses, as I imagine there companies/offices in these states that
have some form of IT/engineering/software development departments. As mentioned earlier, we we're
expecting tech company-heavy states such as Washington, California and New York to be at the top of
our charts for the proportion of tech workers surveyed in those states that are dealing with a
mental illness/ disorder.")
)
page_six <- tabPanel(
"Technical Info",
titlePanel("Technical Specifications"),
h3("The Data Sets"),
p("The two data sets we used were created by a non-profit organization called Open Sourcing Mental Illness.
Their mission is \"to raise awareness, educate, and provide resources to support mental wellness
in the tech and open source communities\". This data is gathered through a yearly
survey that OSMI conducts, and then compiles these survey results into data sets. These data sets
were created to help OSMI with their research in finding out what factors within tech and open source
communities have an effect on someone's mental health. We accessed this data through OSMI's official
website, where they host their data sets on Kaggle, free and available for anyone to use."),
h3("The Shiny App"),
p("With the help of a7, we were able to create a Shiny app similar to how we structured a7. To create
the Shiny app's site navigation, we decided to use a navbarPage() layout that allowed us to insert
different page layouts and have an easy way to navigate in between. For our second visualization,
we made sure to use a sidebarLayout() to have our charts render on the right and have the widgets
that allow the user to select states on the left."),
p("We also used a variety of packages to help wrangle our data and create visualizations. We primarily
used dplyr to wrangle that data and organize it into usable data frames containing relevant
information for the task at hand. We used ggplot2, reshape2, and scales to create the bar charts
used for our second visualization. We used knitr, leaflet, and tigris to create the map used for
our first visualization."),
h3("Tech Report"),
p(a(href = "https://github.com/jovecalimlim/AE_TeamWellness/wiki/Technical-Report",
"Here is the link to our Technical Report on GitHub.")),
)
page_seven <- tabPanel(
"About Us",
titlePanel("Meet The Team"),
h3("Ryan Bogatez"),
p("I am a fourth year student from Australia studying Information Systems and Marketing with
a focus on Computer Science. As I am graduating in 2019, I will look to enter the Information
Technology industry in the coming years to follow my passion of creating and distributing
software. The issue of mental health is one that will be of great importance to myself and
others around me as I enter the workforce due to the at times stressful nature of working.
I hope that as I go through my career, young individuals will have better resources and support
to help them in making their experiences at work increasingly positive."),
h3("Jove Calimlim"),
p("I'm a transfer student from Seattle Central College in my first quarter here at the University
of Washington in the Informatics major. I am pursuing a career in data science so taking INFO 201
has been a great experience for me. Learning the R language, Git, Markdown, and R's various packages
like dplyr and shiny has given me a strong basis for developing skills to become a better
data scientist. Doing this project was a good experience for my team and I as it allowed us to work
with data sets of our own choosing and finding insights to answer our own questions. As I finish this
class, I look forward to taking more data science, machine learning, algorithms, data structures, and
statistical classes in the future."),
h3("Samuel Christ"),
p("I’m an international student from Indonesia and just transferred from Edmonds Community College.
I am also a content creator on YouTube with more than 900K subscribers! I enjoy creating short
films and editing videos. I’m very glad I can get into the Informatics major
because there are a lot of designing classes in this major and that is what I want to do.
Regarding my skills, I think I am a creative person, can think in detail and can do design."),
h3("Eugene Lim"),
p("Eugene is a third year Informatics student. He is interested in software development and making
an impact in his local community. This class has taught Eugene many valuable skills like
troubleshooting and will carry over to future classes and future occupations. Mental health is
important to Eugene because there are many people in his life that are impacted. Hopefully in the
near future, Eugene wants to help people with mental illness through technology.")
)
# UI pages set in navbarPage style
ui <- navbarPage(
"P3: Final Project",
page_one,
page_two,
navbarMenu("Visualizations",
page_three,
page_four
),
page_five,
page_six,
page_seven
)
# Define server logic required to draw a histogram
server <- function(input, output) {
output$state_plot <- renderPlot({
get_state_disorders(input$summ_state)
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
/p3_shinyapp/app.R
|
no_license
|
jovecalimlim/AE_TeamWellness
|
R
| false
| false
| 10,511
|
r
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
# Initial library/source files set-up
library(shiny)
source("analysis.R")
# Pages 1-7 instantiated and structured for use
page_one <- tabPanel(
"Introduction",
titlePanel("Mental Health In The Technology Workplace"),
h3("The Problem Situation"),
p("Mental health has become an increasingly hot issue in the workplace. The mental health of employees
is being negatively impacted by numerous factors within the workplace. Due to this, there is an
increased strain on worker's and their performance, which is also negatively affecting company results.
Currently, mental health is a stigmatized topic in the workplace, and as such, is an area of high
confidentiality between employees and employers."),
h3("What is the Problem?"),
p("Approximately half of millennial workers and 75% of Gen-Zers have quit their jobs due to mental
health reasons according to a survey conducted by Mind Share Partners, SAP, and Qualtrics. About
46.6 million US adults are dealing with mental illnesses, including depression and anxiety disorders.
The main factors contributing to this is money, work load, and a negative working environment.")
)
page_two <- tabPanel(
"Background",
titlePanel("Why This Topic And What We Hope To Find"),
h3("Why we care about Mental Health"),
p("Our group cares about this topic because we have heard of first-hand accounts of employees
cracking under intense stress within tech companies like Amazon, and as young Informatics
majors looking to go into the tech field, we want to help improve the environment we go into
not just for ourselves, but for everyone."),
h3("What we Hope to Answer"),
tags$li("Which states in the US seem to have a higher proportion of their tech workers
affected by mental illnesses compared to other states?"),
tags$li("Within each state, what mental health illnesses/disorders are people in the tech field
dealing with?")
)
page_three <- tabPanel(
"Interactive Map",
titlePanel("Prevalence Of Mental Disorders Throughout The United States"),
interactive_map,
p("With our interactive map, we actually get some surprising results. New Hampshire has the highest
proportion of people who took the survey living in New Hampshire that reported being affected by mental
illnesses/disorders at 86%. Kentucky and Alaska follow close behind at 80% and 77% respectively.
We expected tech company-heavy states such as California and Washington to have the highest proportion.
Of course, this could be due to the relatively small sample size of survey respondents, as there are
only a total of about 800 survey respondents within the United States. Still, over half of the survey
respondents in both Washington and California reported being affected by mental illnesses/disorders,
which is a very significant amount of people.")
)
page_four <- tabPanel(
"Bar Charts",
titlePanel("Mental Disorders By State"),
sidebarLayout(
sidebarPanel(
selectInput("summ_state", "State:",
choices = sort(unique(osmi_df$State)),
selected = "Washington")
),
mainPanel(
plotOutput("state_plot")
)
),
p("From these bar charts, we can see that for most states, anxiety and mood disorders are the most
common mental disorders that are plaguing tech workers in the United States. For example, in the
state of Washington, the top two most common mental disorders are mood disorders such as depression
and bipolar disorder with 25 affected survey respondents. Anxiety disorders such as social anxiety
and phobias are second with 17 affected survey respondents.")
)
page_five <- tabPanel(
"Conclusion",
titlePanel("What We Found"),
h3("Results"),
p("With the data sets that we worked with and the visualizations we were able to create, we were able
to answer our research questions to an extent. The sample sizes weren't quite as large as we'd like,
and it's very important to realize that. But we were still able to gather some valuable insights
into how widespread mental health is in the United States within the tech industry. There are a
variety of mental disorders that are affecting workers, with some of the more common ones being
various forms of anxiety such as social anxiety, generalized anxiety, and panic disorders, or
various forms of mood disorders such as depression, bipolar disorder, and dysthymia. But there
are tech workers that are experiencing other types of disorders, like attention deficit
hyperactivity disorder and post traumatic stress disorder."),
p("Despite the somewhat low number of responses between the two data sets of the OSMI survey, we can
see that mental health is impacting tech employees in most states except Nevada, Missouri, Iowa,
Indiana, Tennessee, Georgia, Ohio, West Virginia, Maryland, and Virginia. That is most likely due
to the low number of survey responses, as I imagine there companies/offices in these states that
have some form of IT/engineering/software development departments. As mentioned earlier, we we're
expecting tech company-heavy states such as Washington, California and New York to be at the top of
our charts for the proportion of tech workers surveyed in those states that are dealing with a
mental illness/ disorder.")
)
page_six <- tabPanel(
"Technical Info",
titlePanel("Technical Specifications"),
h3("The Data Sets"),
p("The two data sets we used were created by a non-profit organization called Open Sourcing Mental Illness.
Their mission is \"to raise awareness, educate, and provide resources to support mental wellness
in the tech and open source communities\". This data is gathered through a yearly
survey that OSMI conducts, and then compiles these survey results into data sets. These data sets
were created to help OSMI with their research in finding out what factors within tech and open source
communities have an effect on someone's mental health. We accessed this data through OSMI's official
website, where they host their data sets on Kaggle, free and available for anyone to use."),
h3("The Shiny App"),
p("With the help of a7, we were able to create a Shiny app similar to how we structured a7. To create
the Shiny app's site navigation, we decided to use a navbarPage() layout that allowed us to insert
different page layouts and have an easy way to navigate in between. For our second visualization,
we made sure to use a sidebarLayout() to have our charts render on the right and have the widgets
that allow the user to select states on the left."),
p("We also used a variety of packages to help wrangle our data and create visualizations. We primarily
used dplyr to wrangle that data and organize it into usable data frames containing relevant
information for the task at hand. We used ggplot2, reshape2, and scales to create the bar charts
used for our second visualization. We used knitr, leaflet, and tigris to create the map used for
our first visualization."),
h3("Tech Report"),
p(a(href = "https://github.com/jovecalimlim/AE_TeamWellness/wiki/Technical-Report",
"Here is the link to our Technical Report on GitHub.")),
)
page_seven <- tabPanel(
"About Us",
titlePanel("Meet The Team"),
h3("Ryan Bogatez"),
p("I am a fourth year student from Australia studying Information Systems and Marketing with
a focus on Computer Science. As I am graduating in 2019, I will look to enter the Information
Technology industry in the coming years to follow my passion of creating and distributing
software. The issue of mental health is one that will be of great importance to myself and
others around me as I enter the workforce due to the at times stressful nature of working.
I hope that as I go through my career, young individuals will have better resources and support
to help them in making their experiences at work increasingly positive."),
h3("Jove Calimlim"),
p("I'm a transfer student from Seattle Central College in my first quarter here at the University
of Washington in the Informatics major. I am pursuing a career in data science so taking INFO 201
has been a great experience for me. Learning the R language, Git, Markdown, and R's various packages
like dplyr and shiny has given me a strong basis for developing skills to become a better
data scientist. Doing this project was a good experience for my team and I as it allowed us to work
with data sets of our own choosing and finding insights to answer our own questions. As I finish this
class, I look forward to taking more data science, machine learning, algorithms, data structures, and
statistical classes in the future."),
h3("Samuel Christ"),
p("I’m an international student from Indonesia and just transferred from Edmonds Community College.
I am also a content creator on YouTube with more than 900K subscribers! I enjoy creating short
films and editing videos. I’m very glad I can get into the Informatics major
because there are a lot of designing classes in this major and that is what I want to do.
Regarding my skills, I think I am a creative person, can think in detail and can do design."),
h3("Eugene Lim"),
p("Eugene is a third year Informatics student. He is interested in software development and making
an impact in his local community. This class has taught Eugene many valuable skills like
troubleshooting and will carry over to future classes and future occupations. Mental health is
important to Eugene because there are many people in his life that are impacted. Hopefully in the
near future, Eugene wants to help people with mental illness through technology.")
)
# UI pages set in navbarPage style
ui <- navbarPage(
"P3: Final Project",
page_one,
page_two,
navbarMenu("Visualizations",
page_three,
page_four
),
page_five,
page_six,
page_seven
)
# Define server logic required to draw a histogram
server <- function(input, output) {
output$state_plot <- renderPlot({
get_state_disorders(input$summ_state)
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/renderer.R
\name{translator_html}
\alias{translator_html}
\alias{space_html}
\alias{newline_html}
\alias{renderer_html}
\title{html renderer using span tags and CSS}
\usage{
translator_html(x, size)
space_html()
newline_html()
renderer_html(
document = TRUE,
translator = translator_html,
formatter = formatter_html,
space = space_html,
newline = newline_html,
header = header_html(document, stylesheet),
footer = footer_html(document),
stylesheet = "default",
...
)
}
\arguments{
\item{x}{argument to the translator. Returned as is.}
\item{size}{font size. ignored}
\item{document}{logical. Indicates if the renderer should render a full document
or simply a \samp{<pre>} section containing the highlighted
tokens. This argument is used by the \code{\link{header_html}} and
\code{\link{footer_html}} to build appropriate header and footer.}
\item{translator}{Since the highlighted tokens are wrapped in a \samp{<pre>} tag,
no further translation is needed.}
\item{formatter}{html formatter. creates \samp{<span>} tags for all tokens.
See \code{\link{formatter_html}}}
\item{space}{returns a space character}
\item{newline}{returns a newline character}
\item{header}{html header. Depending on the \samp{document} argument, this will be a
function building a the beginning of a
complete html document (starting with \samp{<html>}) including
css definitions or simply a function returning \samp{<pre>}
enabling the renderer to be used to just render the syntax
as part of a bigger document.}
\item{footer}{html footer. Depending on the \samp{document} argument, this will
either close the full document (close the \samp{</html>} tag)
or simply close the \samp{</pre>} tag.}
\item{stylesheet}{stylesheet to use. This is used by the header when document is TRUE.
The content of the stylesheet is copied verbatim into a \samp{<style>}
tag in that case. See \code{\link{getStyleFile}} for details
on where the stylesheet can be located}
\item{\dots}{Additional arguments. unused.}
}
\value{
A renderer capable suitable for the \samp{renderer} argument
of \code{\link{highlight}}
}
\description{
implementation of the \code{\link{renderer}} that renders
the information as a series of \samp{<span>} html tags
}
\seealso{
\code{\link{renderer}} for a description of the interface
this renderer is implementing.
\code{\link{highlight}} takes a renderer argument to which it
delegates rendering.
}
|
/man/renderer_html.Rd
|
no_license
|
cran/highlight
|
R
| false
| true
| 2,521
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/renderer.R
\name{translator_html}
\alias{translator_html}
\alias{space_html}
\alias{newline_html}
\alias{renderer_html}
\title{html renderer using span tags and CSS}
\usage{
translator_html(x, size)
space_html()
newline_html()
renderer_html(
document = TRUE,
translator = translator_html,
formatter = formatter_html,
space = space_html,
newline = newline_html,
header = header_html(document, stylesheet),
footer = footer_html(document),
stylesheet = "default",
...
)
}
\arguments{
\item{x}{argument to the translator. Returned as is.}
\item{size}{font size. ignored}
\item{document}{logical. Indicates if the renderer should render a full document
or simply a \samp{<pre>} section containing the highlighted
tokens. This argument is used by the \code{\link{header_html}} and
\code{\link{footer_html}} to build appropriate header and footer.}
\item{translator}{Since the highlighted tokens are wrapped in a \samp{<pre>} tag,
no further translation is needed.}
\item{formatter}{html formatter. creates \samp{<span>} tags for all tokens.
See \code{\link{formatter_html}}}
\item{space}{returns a space character}
\item{newline}{returns a newline character}
\item{header}{html header. Depending on the \samp{document} argument, this will be a
function building a the beginning of a
complete html document (starting with \samp{<html>}) including
css definitions or simply a function returning \samp{<pre>}
enabling the renderer to be used to just render the syntax
as part of a bigger document.}
\item{footer}{html footer. Depending on the \samp{document} argument, this will
either close the full document (close the \samp{</html>} tag)
or simply close the \samp{</pre>} tag.}
\item{stylesheet}{stylesheet to use. This is used by the header when document is TRUE.
The content of the stylesheet is copied verbatim into a \samp{<style>}
tag in that case. See \code{\link{getStyleFile}} for details
on where the stylesheet can be located}
\item{\dots}{Additional arguments. unused.}
}
\value{
A renderer capable suitable for the \samp{renderer} argument
of \code{\link{highlight}}
}
\description{
implementation of the \code{\link{renderer}} that renders
the information as a series of \samp{<span>} html tags
}
\seealso{
\code{\link{renderer}} for a description of the interface
this renderer is implementing.
\code{\link{highlight}} takes a renderer argument to which it
delegates rendering.
}
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(shinythemes)
# Define UI for application that draws a histogram
shinyUI(fluidPage(theme = shinytheme("sandstone"),
# Application title
titlePanel("Baseball Lahman Teams"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
sliderInput("year", "Select Years", min=2000, max=2019, value=c(2000, 2019), step = 1),
h4("App Documentation:"),
textOutput("text")
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("distPlot")
)
)
))
|
/Baseball Win Percentage App/ui.R
|
no_license
|
mabdih/Coursera-Week-4-Shiny-Assignment
|
R
| false
| false
| 866
|
r
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(shinythemes)
# Define UI for application that draws a histogram
shinyUI(fluidPage(theme = shinytheme("sandstone"),
# Application title
titlePanel("Baseball Lahman Teams"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
sliderInput("year", "Select Years", min=2000, max=2019, value=c(2000, 2019), step = 1),
h4("App Documentation:"),
textOutput("text")
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("distPlot")
)
)
))
|
#ARDKernel
ARDKernel <- function(r,theta){
return(exp(-theta[[1]]*r/2))
}
|
/PGPLVM/PGPLVM-Trad/Modules/ARDKernel.R
|
no_license
|
AsgerMorville/GPLVM
|
R
| false
| false
| 82
|
r
|
#ARDKernel
ARDKernel <- function(r,theta){
return(exp(-theta[[1]]*r/2))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_csv_spark.R
\name{read_csv_spark}
\alias{read_csv_spark}
\title{wczytuje dane zapisane za pomocą sparklyr::spark_write_csv()}
\usage{
read_csv_spark(sciezka, typyKolumn = NULL)
}
\arguments{
\item{sciezka}{ścieżka do pliku/katalogu przechowującego dane}
\item{typyKolumn}{opcjonalnie typy kolumn w formacie parametru
\code{col_types} funkcji \code{\link[readr]{read_csv}} (d - double, i -
integer, c - character)}
}
\value{
data.frame
}
\description{
sparklyr::spark_write_csv() zapisuje większe zbiory danych w częściach.
Ta funkcja umożliwia ich łatwe wczytywanie
}
|
/man/read_csv_spark.Rd
|
no_license
|
zozlak/MLAKdane
|
R
| false
| true
| 661
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_csv_spark.R
\name{read_csv_spark}
\alias{read_csv_spark}
\title{wczytuje dane zapisane za pomocą sparklyr::spark_write_csv()}
\usage{
read_csv_spark(sciezka, typyKolumn = NULL)
}
\arguments{
\item{sciezka}{ścieżka do pliku/katalogu przechowującego dane}
\item{typyKolumn}{opcjonalnie typy kolumn w formacie parametru
\code{col_types} funkcji \code{\link[readr]{read_csv}} (d - double, i -
integer, c - character)}
}
\value{
data.frame
}
\description{
sparklyr::spark_write_csv() zapisuje większe zbiory danych w częściach.
Ta funkcja umożliwia ich łatwe wczytywanie
}
|
library(nlme)
library(geiger)
require(phytools)
# Read in the overlap data files (made by overlap_vs_distance.R)
phfiles<-as.character(list.files(path="../data/Corrected_Matrices/Plant_herbivore_matrices", full.names=TRUE, pattern="*.csv"))
ppfiles<-as.character(list.files(path="../data/Corrected_Matrices/Stouffer_Ecology_Matrices", full.names=TRUE, pattern="*.csv"))
files=c(phfiles,ppfiles)
files1<-as.character(list.files(path="../data/Overlap_dist/Plant_herbivore_matrices", full.names=TRUE, pattern="*.csv"))
files2<-as.character(list.files(path="../data/Overlap_dist/Stouffer_Ecology_Matrices", full.names=TRUE, pattern="*.csv"))
altfiles=c(files1,files2)
overlap_data <- list()
for(network in altfiles){
netname=gsub('../data/Corrected/',"",network)
subfolder=strsplit(strsplit(netname,'-')[[1]][1],'/')[[1]][1]
if(subfolder=='Stouffer_Ecology_Matrices'){
nettype='pp' } else {
nettype='ph' }
message(paste0("Processing '",netname,"' "),appendLF=FALSE)
overlap_data[[netname]]=read.csv(network,header=TRUE,row.names=1,check.names=FALSE,sep=',')
overlap_data[[netname]]$network <- netname
overlap_data[[netname]]$nettype <- nettype
message("Done")
}
all_data <- do.call("rbind", overlap_data)
#### Build the tree
planttree=read.newick("../data/plant_phylogeny/dated_tree.new")
collapsed_tree=collapse.singles(planttree) #Collapses non-branching nodes (i.e., internal nodes on lines leading to only 1 species)
#### Directory of species names and networks
speciesnames=read.table('../data/plant_phylogeny/corrected_names.tsv',header=TRUE,sep=',')
speciesnames$new_lower=tolower(speciesnames$new_names)
speciesnames$original_lower=tolower(speciesnames$original_name)
# Remove all problematic characters
speciesnames$original_lower=gsub(' ', ' ', speciesnames$original_lower)
speciesnames$original_lower=gsub('_', ' ', speciesnames$original_lower)
speciesnames$original_lower=gsub('-', ' ', speciesnames$original_lower)
speciesnames$original_lower=gsub("'", ' ', speciesnames$original_lower)
# # Set up the chi-square table
chitable=matrix(nrow=length(files),ncol=length(levels(speciesnames$family)),data=0)
colnames(chitable)=levels(speciesnames$family)
#### Split the pairs 100x
for(n in 1:length(files)){
file=files[n]
print(file)
netname=gsub('../data/Corrected_Matrices/','',file)
if(file%in%phfiles){
nettype='ph'} else {
nettype='pp'}
network_matrix=read.csv(file,header=TRUE,sep=',',row.names=1)
if('X'%in%colnames(network_matrix)){ # Trim any superfluous columns
network_matrix$X=NULL }
numeric_matrix=matrix(nrow=nrow(network_matrix),ncol=ncol(network_matrix))
#Ensure all columns are numeric
for(i in 1:nrow(network_matrix)){
for(j in 1:ncol(network_matrix)){
if(as.numeric(as.character(network_matrix[i,j]))>0){
numeric_matrix[i,j]=1 }
if(as.numeric(as.character(network_matrix[i,j]))==0){
numeric_matrix[i,j]=0 }
}}
rownames(numeric_matrix)=rownames(network_matrix)
colnames(numeric_matrix)=colnames(network_matrix)
# colnames are plants
fammat=matrix(nrow=ncol(numeric_matrix),ncol=2)
colnames(fammat)=c("species","family")
r=1
for(sp in colnames(numeric_matrix)){
if(length(as.vector(strsplit(sp,'_ph_'))[[1]])==2){
species=strsplit(sp,'_ph_')[[1]][1]
} else {
species=strsplit(sp,'_m_pl_')[[1]][1]
}
spec=gsub('_',' ',species)
tax=subset(speciesnames,speciesnames$original_lower==spec)
# If necessary, try again with underscores included
if(dim(tax)[1]==0){
tax=subset(speciesnames,speciesnames$original_lower==species)
}
fammat[r,1]=species
fammat[r,2]=as.character(tax$family[1])
r=r+1
}
famresults=as.data.frame(t(tapply(fammat[,2],fammat[,2],length)))
for(colname in colnames(famresults)){
chitable[n,colname]=famresults[,colname]
}
}
chitable=chitable[,which(colSums(chitable)>0)]
chitable=chitable[,which(!colnames(chitable)=="unidentified")]
chisq.test(chitable)
# Restrict to families represented by more than 10 species
rest_chitable=chitable[,which(colSums(chitable)>10)]
chisq.test(rest_chitable)
# In both cases, families are really, really unevenly represented.
|
/code/chitest.R
|
no_license
|
cirtwill/Bipartite-essentials
|
R
| false
| false
| 4,204
|
r
|
library(nlme)
library(geiger)
require(phytools)
# Read in the overlap data files (made by overlap_vs_distance.R)
phfiles<-as.character(list.files(path="../data/Corrected_Matrices/Plant_herbivore_matrices", full.names=TRUE, pattern="*.csv"))
ppfiles<-as.character(list.files(path="../data/Corrected_Matrices/Stouffer_Ecology_Matrices", full.names=TRUE, pattern="*.csv"))
files=c(phfiles,ppfiles)
files1<-as.character(list.files(path="../data/Overlap_dist/Plant_herbivore_matrices", full.names=TRUE, pattern="*.csv"))
files2<-as.character(list.files(path="../data/Overlap_dist/Stouffer_Ecology_Matrices", full.names=TRUE, pattern="*.csv"))
altfiles=c(files1,files2)
overlap_data <- list()
for(network in altfiles){
netname=gsub('../data/Corrected/',"",network)
subfolder=strsplit(strsplit(netname,'-')[[1]][1],'/')[[1]][1]
if(subfolder=='Stouffer_Ecology_Matrices'){
nettype='pp' } else {
nettype='ph' }
message(paste0("Processing '",netname,"' "),appendLF=FALSE)
overlap_data[[netname]]=read.csv(network,header=TRUE,row.names=1,check.names=FALSE,sep=',')
overlap_data[[netname]]$network <- netname
overlap_data[[netname]]$nettype <- nettype
message("Done")
}
all_data <- do.call("rbind", overlap_data)
#### Build the tree
planttree=read.newick("../data/plant_phylogeny/dated_tree.new")
collapsed_tree=collapse.singles(planttree) #Collapses non-branching nodes (i.e., internal nodes on lines leading to only 1 species)
#### Directory of species names and networks
speciesnames=read.table('../data/plant_phylogeny/corrected_names.tsv',header=TRUE,sep=',')
speciesnames$new_lower=tolower(speciesnames$new_names)
speciesnames$original_lower=tolower(speciesnames$original_name)
# Remove all problematic characters
speciesnames$original_lower=gsub(' ', ' ', speciesnames$original_lower)
speciesnames$original_lower=gsub('_', ' ', speciesnames$original_lower)
speciesnames$original_lower=gsub('-', ' ', speciesnames$original_lower)
speciesnames$original_lower=gsub("'", ' ', speciesnames$original_lower)
# # Set up the chi-square table
chitable=matrix(nrow=length(files),ncol=length(levels(speciesnames$family)),data=0)
colnames(chitable)=levels(speciesnames$family)
#### Split the pairs 100x
for(n in 1:length(files)){
file=files[n]
print(file)
netname=gsub('../data/Corrected_Matrices/','',file)
if(file%in%phfiles){
nettype='ph'} else {
nettype='pp'}
network_matrix=read.csv(file,header=TRUE,sep=',',row.names=1)
if('X'%in%colnames(network_matrix)){ # Trim any superfluous columns
network_matrix$X=NULL }
numeric_matrix=matrix(nrow=nrow(network_matrix),ncol=ncol(network_matrix))
#Ensure all columns are numeric
for(i in 1:nrow(network_matrix)){
for(j in 1:ncol(network_matrix)){
if(as.numeric(as.character(network_matrix[i,j]))>0){
numeric_matrix[i,j]=1 }
if(as.numeric(as.character(network_matrix[i,j]))==0){
numeric_matrix[i,j]=0 }
}}
rownames(numeric_matrix)=rownames(network_matrix)
colnames(numeric_matrix)=colnames(network_matrix)
# colnames are plants
fammat=matrix(nrow=ncol(numeric_matrix),ncol=2)
colnames(fammat)=c("species","family")
r=1
for(sp in colnames(numeric_matrix)){
if(length(as.vector(strsplit(sp,'_ph_'))[[1]])==2){
species=strsplit(sp,'_ph_')[[1]][1]
} else {
species=strsplit(sp,'_m_pl_')[[1]][1]
}
spec=gsub('_',' ',species)
tax=subset(speciesnames,speciesnames$original_lower==spec)
# If necessary, try again with underscores included
if(dim(tax)[1]==0){
tax=subset(speciesnames,speciesnames$original_lower==species)
}
fammat[r,1]=species
fammat[r,2]=as.character(tax$family[1])
r=r+1
}
famresults=as.data.frame(t(tapply(fammat[,2],fammat[,2],length)))
for(colname in colnames(famresults)){
chitable[n,colname]=famresults[,colname]
}
}
chitable=chitable[,which(colSums(chitable)>0)]
chitable=chitable[,which(!colnames(chitable)=="unidentified")]
chisq.test(chitable)
# Restrict to families represented by more than 10 species
rest_chitable=chitable[,which(colSums(chitable)>10)]
chisq.test(rest_chitable)
# In both cases, families are really, really unevenly represented.
|
\name{linLogHist}
\alias{linLogHist}
\title{lin-log transition histogram}
\description{draw histograms that gradually transform from a linear to a logarithmic axis}
\usage{linLogHist(x, steps=100, breaks=20, col="blue", las=1,
xlab=deparse(substitute(x)), xlim=range(x, finite=TRUE), box=TRUE, parexpr, endexpr,
axisargs=NULL, axisargs2=NULL, firstplot=TRUE, lastplot=TRUE,
write_t=TRUE, values_t=NULL, ...)}
\arguments{
\item{x}{x values to be plotted in animation}
\item{steps}{Number of steps in transition. DEFAULT: 100}
\item{breaks}{\code{\link{hist}} breaks. DEFAULT: 20}
\item{col}{\code{\link{hist}} color. DEFAULT: "blue"}
\item{las}{\code{\link{par}} LabelAxisStyle (numbers upright). DEFAULT: 1}
\item{xlab}{Label for the x axis. DEFAULT: deparse(substitute(x))}
\item{xlim}{xlim range in non-log units. DEFAULT: range(x, finite=TRUE)}
\item{box}{Draw box at the end to overplot \code{\link{abline}s} crossing the box? DEFAULT: TRUE}
\item{parexpr}{Characterized Expression to set \code{\link{par}}, eg. \code{parexpr='par(mar=c(2,0.5,1.5,0.5), mpg=c(1.8,1,0))'}}
\item{endexpr}{Characterized Expression executed at the end of the plot, eg. \code{endexpr='mtext("Probability Density", line=-1, adj=0.03, outer=T)'}}
\item{axisargs}{List of arguments passed to \code{\link{logVals}}, like base. DEFAULT: NULL}
\item{axisargs2}{List of arguments passed to \code{\link{logAxis}}in the final plot. DEFAULT: NULL}
\item{firstplot}{plot on linear scale first? DEFAULT: TRUE}
\item{lastplot}{plot on logarithmic scale at the end? DEFAULT: TRUE}
\item{write_t}{write transformation value in lower right corner? DEFAULT: TRUE}
\item{values_t}{Supply vector with values for transformation (1/t). Overides steps. If you have a better algorithm than I do, please let me know! DEFAULT: NULL}
\item{\dots}{further arguments passed to \code{\link{hist}}, like freq, main, xlim, ylab. Excluded: x, xaxt, possibly add}
}
\value{Returned invisibly: transformation values used. Plotted: \code{steps} number of images.}
\note{It's best to save the plots into a pdf or wrap it within\cr \code{png("Transition\%03d"); linLogHist(x); dev.off()}}
\author{Berry Boessenkool, \email{berry-b@gmx.de}, April 2015}
\seealso{\code{\link{linLogTrans}} }
\examples{
x <- rlnorm(700, m=3)
hist(x, col=4)
hist(log10(x), xaxt="n"); logAxis(1); hist(log10(x), col=4, add=TRUE)
op <- par()
linLogHist(x, xlab="ddd", breaks=30, steps=3, write_t=FALSE, yaxt="n", freq=FALSE,
main="", parexpr='par(mar=c(2,0.5,1.5,0.5), mgp=c(1.8,1,0))',
endexpr='mtext("Probability Density", line=-1.2, adj=0.03, outer=T)')
par(op)
\dontrun{
## Rcmd check --as-cran doesn't like to open external devices such as pdf,
## so this example is excluded from running in the checks.
pdf("LinLogTransitionAnimation.pdf")
linLogHist(x, main="Example Transition", steps=20, freq=FALSE)
dev.off()
# if you have FFmpeg installed, you can use the animation package like this:
library2(animation)
saveVideo(linLogHist(x, steps=50), video.name="linlog_anim.mp4", interval=0.08,
ffmpeg="C:/ffmpeg-20150424-git-cd69c0e-win64-static/bin/ffmpeg.exe")
}
}
\keyword{dplot}
\keyword{hplot}
\keyword{dynamic}
|
/man/linLogHist.Rd
|
no_license
|
costagc/berryFunctions
|
R
| false
| false
| 3,185
|
rd
|
\name{linLogHist}
\alias{linLogHist}
\title{lin-log transition histogram}
\description{draw histograms that gradually transform from a linear to a logarithmic axis}
\usage{linLogHist(x, steps=100, breaks=20, col="blue", las=1,
xlab=deparse(substitute(x)), xlim=range(x, finite=TRUE), box=TRUE, parexpr, endexpr,
axisargs=NULL, axisargs2=NULL, firstplot=TRUE, lastplot=TRUE,
write_t=TRUE, values_t=NULL, ...)}
\arguments{
\item{x}{x values to be plotted in animation}
\item{steps}{Number of steps in transition. DEFAULT: 100}
\item{breaks}{\code{\link{hist}} breaks. DEFAULT: 20}
\item{col}{\code{\link{hist}} color. DEFAULT: "blue"}
\item{las}{\code{\link{par}} LabelAxisStyle (numbers upright). DEFAULT: 1}
\item{xlab}{Label for the x axis. DEFAULT: deparse(substitute(x))}
\item{xlim}{xlim range in non-log units. DEFAULT: range(x, finite=TRUE)}
\item{box}{Draw box at the end to overplot \code{\link{abline}s} crossing the box? DEFAULT: TRUE}
\item{parexpr}{Characterized Expression to set \code{\link{par}}, eg. \code{parexpr='par(mar=c(2,0.5,1.5,0.5), mpg=c(1.8,1,0))'}}
\item{endexpr}{Characterized Expression executed at the end of the plot, eg. \code{endexpr='mtext("Probability Density", line=-1, adj=0.03, outer=T)'}}
\item{axisargs}{List of arguments passed to \code{\link{logVals}}, like base. DEFAULT: NULL}
\item{axisargs2}{List of arguments passed to \code{\link{logAxis}}in the final plot. DEFAULT: NULL}
\item{firstplot}{plot on linear scale first? DEFAULT: TRUE}
\item{lastplot}{plot on logarithmic scale at the end? DEFAULT: TRUE}
\item{write_t}{write transformation value in lower right corner? DEFAULT: TRUE}
\item{values_t}{Supply vector with values for transformation (1/t). Overides steps. If you have a better algorithm than I do, please let me know! DEFAULT: NULL}
\item{\dots}{further arguments passed to \code{\link{hist}}, like freq, main, xlim, ylab. Excluded: x, xaxt, possibly add}
}
\value{Returned invisibly: transformation values used. Plotted: \code{steps} number of images.}
\note{It's best to save the plots into a pdf or wrap it within\cr \code{png("Transition\%03d"); linLogHist(x); dev.off()}}
\author{Berry Boessenkool, \email{berry-b@gmx.de}, April 2015}
\seealso{\code{\link{linLogTrans}} }
\examples{
x <- rlnorm(700, m=3)
hist(x, col=4)
hist(log10(x), xaxt="n"); logAxis(1); hist(log10(x), col=4, add=TRUE)
op <- par()
linLogHist(x, xlab="ddd", breaks=30, steps=3, write_t=FALSE, yaxt="n", freq=FALSE,
main="", parexpr='par(mar=c(2,0.5,1.5,0.5), mgp=c(1.8,1,0))',
endexpr='mtext("Probability Density", line=-1.2, adj=0.03, outer=T)')
par(op)
\dontrun{
## Rcmd check --as-cran doesn't like to open external devices such as pdf,
## so this example is excluded from running in the checks.
pdf("LinLogTransitionAnimation.pdf")
linLogHist(x, main="Example Transition", steps=20, freq=FALSE)
dev.off()
# if you have FFmpeg installed, you can use the animation package like this:
library2(animation)
saveVideo(linLogHist(x, steps=50), video.name="linlog_anim.mp4", interval=0.08,
ffmpeg="C:/ffmpeg-20150424-git-cd69c0e-win64-static/bin/ffmpeg.exe")
}
}
\keyword{dplot}
\keyword{hplot}
\keyword{dynamic}
|
> find.b
function(time, fail, beta0 = 1)
{
# Find beta for grouped Crow-AMSAA
#
# requires function "gbeta.like" (Grouped Beta Likelihood)
#
ms.data <- data.frame(time, fail)
parameters(ms.data) <- list(beta = beta0)
print(ms.data)
ms.fit <- ms( ~ gbeta.like(time, fail, beta), data = ms.data) #
#summary(ms.fit) #
#ms.pro <- profile(ms.fit)
#print(ms.pro)
}
|
/find-b.R
|
no_license
|
robertandrewstevens/R
|
R
| false
| false
| 382
|
r
|
> find.b
function(time, fail, beta0 = 1)
{
# Find beta for grouped Crow-AMSAA
#
# requires function "gbeta.like" (Grouped Beta Likelihood)
#
ms.data <- data.frame(time, fail)
parameters(ms.data) <- list(beta = beta0)
print(ms.data)
ms.fit <- ms( ~ gbeta.like(time, fail, beta), data = ms.data) #
#summary(ms.fit) #
#ms.pro <- profile(ms.fit)
#print(ms.pro)
}
|
txt <- "College"
my_function <- function() {
txt = "SSIM"
paste("saneeth is from", txt)
}
my_function()
txt
|
/Saneeth/global_varIable.R
|
no_license
|
tactlabs/r-samples
|
R
| false
| false
| 114
|
r
|
txt <- "College"
my_function <- function() {
txt = "SSIM"
paste("saneeth is from", txt)
}
my_function()
txt
|
##########################
# CSEA for Maher tcf4 mice
library(pSI)
library(WriteXLS)
if(FALSE){
source("/users/ajaffe/Lieber/Projects/KeriM/csea_functions.R")
labs = read.delim('tables/CSEA_mouse_cell_types.csv',stringsAsFactors = F)
rownames(labs) = toupper(labs$Label); labs[,1] = NULL
mouse = lapply(mouse,function(x) {
names(x) = toupper(names(x));
rownames(x) = toupper(rownames(x));
x})
human = lapply(mouse,function(x) {
names(x) = toupper(names(x));
rownames(x) = toupper(rownames(x));
x})
save(labs,mouse,human,file = 'rdas/csea_human_mouse_pSI.rda')
} else{
load('rdas/csea_human_mouse_pSI.rda')
}
load('rdas/mouse_tcf4_ages_DE_objects_DESeq2.rda')
# genes for enrichment
gList = lapply(outGeneList, function(x) toupper(x$Symbol[x$padj< 0.05 & !is.na(x$padj)]))
sapply(gList,length)
enrList = lapply(gList, fisher.iteration, pSIs = mouse$psi.out,
background="data.set")
sigEnrList = lapply(enrList, function(x) x[rowSums(x < 0.05) > 0,])
sigEnrList = lapply(sigEnrList,function(x) cbind(Description = labs[rownames(x),1],x))
WriteXLS(sigEnrList,ExcelFileName ="tables/CSEA_enrichment_tcf4_mouse_all_ages.xlsx",
row.names= TRUE)
|
/tcf4_mouse/csea_tcf4_mouse.R
|
no_license
|
LieberInstitute/PTHS_mouse
|
R
| false
| false
| 1,214
|
r
|
##########################
# CSEA for Maher tcf4 mice
library(pSI)
library(WriteXLS)
if(FALSE){
source("/users/ajaffe/Lieber/Projects/KeriM/csea_functions.R")
labs = read.delim('tables/CSEA_mouse_cell_types.csv',stringsAsFactors = F)
rownames(labs) = toupper(labs$Label); labs[,1] = NULL
mouse = lapply(mouse,function(x) {
names(x) = toupper(names(x));
rownames(x) = toupper(rownames(x));
x})
human = lapply(mouse,function(x) {
names(x) = toupper(names(x));
rownames(x) = toupper(rownames(x));
x})
save(labs,mouse,human,file = 'rdas/csea_human_mouse_pSI.rda')
} else{
load('rdas/csea_human_mouse_pSI.rda')
}
load('rdas/mouse_tcf4_ages_DE_objects_DESeq2.rda')
# genes for enrichment
gList = lapply(outGeneList, function(x) toupper(x$Symbol[x$padj< 0.05 & !is.na(x$padj)]))
sapply(gList,length)
enrList = lapply(gList, fisher.iteration, pSIs = mouse$psi.out,
background="data.set")
sigEnrList = lapply(enrList, function(x) x[rowSums(x < 0.05) > 0,])
sigEnrList = lapply(sigEnrList,function(x) cbind(Description = labs[rownames(x),1],x))
WriteXLS(sigEnrList,ExcelFileName ="tables/CSEA_enrichment_tcf4_mouse_all_ages.xlsx",
row.names= TRUE)
|
###########################################################################/**
# @RdocClass AromaUnitFracBCnBinaryFile
#
# @title "The AromaUnitFracBCnBinaryFile class"
#
# \description{
# @classhierarchy
#
# An AromaUnitFracBCnBinaryFile is a @see "AromaUnitTabularBinaryFile".
# }
#
# @synopsis
#
# \arguments{
# \item{...}{Arguments passed to @see "AromaUnitTabularBinaryFile".}
# }
#
# \section{Fields and Methods}{
# @allmethods "public"
# }
#
# @author
#*/###########################################################################
setConstructorS3("AromaUnitFracBCnBinaryFile", function(...) {
extend(AromaUnitSignalBinaryFile(...), "AromaUnitFracBCnBinaryFile"
)
})
setMethodS3("extractRawAlleleBFractions", "AromaUnitFracBCnBinaryFile", function(this, ..., clazz=RawAlleleBFractions) {
extractRawGenomicSignals(this, ..., clazz=clazz)
})
|
/R/AromaUnitFracBCnBinaryFile.R
|
no_license
|
HenrikBengtsson/aroma.core
|
R
| false
| false
| 863
|
r
|
###########################################################################/**
# @RdocClass AromaUnitFracBCnBinaryFile
#
# @title "The AromaUnitFracBCnBinaryFile class"
#
# \description{
# @classhierarchy
#
# An AromaUnitFracBCnBinaryFile is a @see "AromaUnitTabularBinaryFile".
# }
#
# @synopsis
#
# \arguments{
# \item{...}{Arguments passed to @see "AromaUnitTabularBinaryFile".}
# }
#
# \section{Fields and Methods}{
# @allmethods "public"
# }
#
# @author
#*/###########################################################################
setConstructorS3("AromaUnitFracBCnBinaryFile", function(...) {
extend(AromaUnitSignalBinaryFile(...), "AromaUnitFracBCnBinaryFile"
)
})
setMethodS3("extractRawAlleleBFractions", "AromaUnitFracBCnBinaryFile", function(this, ..., clazz=RawAlleleBFractions) {
extractRawGenomicSignals(this, ..., clazz=clazz)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getGenomeAndMask.R
\name{getGenomeAndMask}
\alias{getGenomeAndMask}
\title{getGenomeAndMask}
\usage{
getGenomeAndMask(genome, mask=NULL)
}
\arguments{
\item{genome}{the genome object or genome identifier.}
\item{mask}{the mask of the genome in a valid RS format (data.frame, GRanges, BED-like file...). If mask is \code{\link{NULL}}, it will try to get a mask from the genome. If mask is \code{\link{NA}} it will return an empty mask. (Default=NULL)}
}
\value{
A list with two elements: genome and mask. Genome and mask are GRanges objects.
}
\description{
Function to obtain a valid genome and mask pair given a valid genome identifier and optionally a mask.
If the genome is not a \code{\link{BSgenome}} object or a character string uniquely identifying a \code{\link{BSgenome}} package installed, it will return the genome "as is". If a mask is provided, it will simply return it. Otherwise it will return the mask returned by \code{\link{getMask}(genome)} or an empty mask if genome is not a valid \code{\link{BSgenome}} or \code{\link{BSgenome}} identifier.
}
\note{
This function is memoised (cached) using the \code{\link{memoise}} package. To empty the cache, use \code{\link{forget}(getGenomeAndMask)}
}
\examples{
getGenomeAndMask("hg19", mask=NA)
getGenomeAndMask(genome=data.frame(c("chrA", "chrB"), c(15000000, 10000000)), mask=NA)
}
\seealso{
\code{\link{getMask}}, \code{\link{getGenome}}, \code{\link{characterToBSGenome}}, \code{\link{maskFromBSGenome}}, \code{\link{emptyCacheRegioneR}}
}
|
/man/getGenomeAndMask.Rd
|
no_license
|
bernatgel/regioneR
|
R
| false
| true
| 1,590
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getGenomeAndMask.R
\name{getGenomeAndMask}
\alias{getGenomeAndMask}
\title{getGenomeAndMask}
\usage{
getGenomeAndMask(genome, mask=NULL)
}
\arguments{
\item{genome}{the genome object or genome identifier.}
\item{mask}{the mask of the genome in a valid RS format (data.frame, GRanges, BED-like file...). If mask is \code{\link{NULL}}, it will try to get a mask from the genome. If mask is \code{\link{NA}} it will return an empty mask. (Default=NULL)}
}
\value{
A list with two elements: genome and mask. Genome and mask are GRanges objects.
}
\description{
Function to obtain a valid genome and mask pair given a valid genome identifier and optionally a mask.
If the genome is not a \code{\link{BSgenome}} object or a character string uniquely identifying a \code{\link{BSgenome}} package installed, it will return the genome "as is". If a mask is provided, it will simply return it. Otherwise it will return the mask returned by \code{\link{getMask}(genome)} or an empty mask if genome is not a valid \code{\link{BSgenome}} or \code{\link{BSgenome}} identifier.
}
\note{
This function is memoised (cached) using the \code{\link{memoise}} package. To empty the cache, use \code{\link{forget}(getGenomeAndMask)}
}
\examples{
getGenomeAndMask("hg19", mask=NA)
getGenomeAndMask(genome=data.frame(c("chrA", "chrB"), c(15000000, 10000000)), mask=NA)
}
\seealso{
\code{\link{getMask}}, \code{\link{getGenome}}, \code{\link{characterToBSGenome}}, \code{\link{maskFromBSGenome}}, \code{\link{emptyCacheRegioneR}}
}
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
bhnoneq_LL <- function(stpar, year, Lbar, ss, Linf, K, Lc, nbreaks) {
.Call('_DLMtool_bhnoneq_LL', PACKAGE = 'DLMtool', stpar, year, Lbar, ss, Linf, K, Lc, nbreaks)
}
#' Rcpp version of the Projection function for calculating Reference Yield
#'
#'
#' @param lnF internal
#' @param Mmat internal
#' @param Wac internal
#' @param Mac internal
#' @param Pc internal
#' @param N_c internal
#' @param SSN_c internal
#' @param Biomass_c internal
#' @param VBiomass_c internal
#' @param SSB_c internal
#' @param Vc internal
#' @param retAc internal
#' @param hc internal
#' @param R0ac internal
#' @param proyears internal
#' @param nareas internal
#' @param maxage internal
#' @param movc internal
#' @param SSBpRc internal
#' @param aRc internal
#' @param bRc internal
#' @param SRrelc internal
#' @param Spat_targc internal
#'
#' @export
#' @keywords internal
doprojPI_cpp <- function(lnF, Mmat, Wac, Mac, Pc, N_c, SSN_c, Biomass_c, VBiomass_c, SSB_c, Vc, retAc, hc, R0ac, proyears, nareas, maxage, movc, SSBpRc, aRc, bRc, SRrelc, Spat_targc) {
.Call('_DLMtool_doprojPI_cpp', PACKAGE = 'DLMtool', lnF, Mmat, Wac, Mac, Pc, N_c, SSN_c, Biomass_c, VBiomass_c, SSB_c, Vc, retAc, hc, R0ac, proyears, nareas, maxage, movc, SSBpRc, aRc, bRc, SRrelc, Spat_targc)
}
#' Generate length composition of catch
#'
#' Generate size composition of catch given sample of catch-at-age,
#' expected length-at-age, and standard deviation of length-at-age.
#' Model assumes length-at-age is normally distributed, and that
#' selectivity is size-dependant
#'
#' @param CAL_bins vector of catch-at-length size bins
#' @param CAL_binsmid vector (nbins = length(CAL_bins) - 1) of mid-points for catch-at-length size bins
#' @param SL matrix (nbins, nyears) of selectivity-at-length class for each year
#' @param CAL_ESS effective sample size of catch-at-length data
#' @param CAL_nsamp sample size of catch-at-length data
#' @param CN matrix (nyears, maxage) of catch-at-age for each year
#' @param LaA matrix (maxage, nyears) of expected length-at-age for each year
#' @param LaASD matrix (maxage, nyears) of standard deviation of length-at-age for each year
#' @param truncSD optional argument to truncate the length-at-age distribution at `truncSD` standard deviations
#' e.g., a value of 2 truncates the length-at-age distribution at two standard deviations (set to 0 to ignore (default))
#'
#' @export
genLenComp <- function(CAL_bins, CAL_binsmid, SL, CAL_ESS, CAL_nsamp, CN, LaA, LaASD, truncSD) {
.Call('_DLMtool_genLenComp', PACKAGE = 'DLMtool', CAL_bins, CAL_binsmid, SL, CAL_ESS, CAL_nsamp, CN, LaA, LaASD, truncSD)
}
#' Internal estimation function for LSRA and LSRA2 functions
#'
#' Rcpp version of R code
#' @param param a numeric value representing log(R0)
#' @param FF_a numeric value, recent fishign mortality rate (apical F)
#' @param Chist a vector of historical catch observations [nyears]
#' @param M_a numeric value, natural mortality rate
#' @param Mat_age_a a vector of maturity at age [nage]
#' @param Wt_age_a a vector of weight at age [nage]
#' @param sel_a a vector of selectivity at age [nage]
#' @param Recdevs_a a vector of recruitment deviations [nyears]
#' @param h_a a numeric value of steepness values of the Bev-Holt Stock-Recruitment relationship
#' @param Umax maximum harvest rate per year
#' @author T. Carruthers with an amateur attempt at converting to Rcpp by A. Hordyk (but it works!)
#' @useDynLib DLMtool
#' @keywords internal
#' @export
LSRA_opt_cpp <- function(param, FF_a, Chist, M_a, Mat_age_a, Wt_age_a, sel_a, Recdevs_a, h_a, Umax) {
.Call('_DLMtool_LSRA_opt_cpp', PACKAGE = 'DLMtool', param, FF_a, Chist, M_a, Mat_age_a, Wt_age_a, sel_a, Recdevs_a, h_a, Umax)
}
#' Internal SRA MCMC CPP code
#'
#' Rcpp version of R code
#' @param nits number of iterations
#' @param pars vector of parameters
#' @param JumpCV jump cv vector
#' @param adapt adapt vector
#' @param parLB lower bounds
#' @param parUB upper bounds
#' @param R0ind index for R0
#' @param inflind index for inflection
#' @param slpind index for slope
#' @param RDind index for recruitment deviations
#' @param nyears number of projection years
#' @param maxage maximum age
#' @param M Natural mortality
#' @param Mat_age A vector of maturity at age
#' @param Wt_age A vector of weight at age
#' @param Chist_a A vector of historical catch observations (nyears long) going back to unfished conditions
#' @param Umax A numeric value representing the maximum harvest rate for any age class (rejection of sims where this occurs)
#' @param h steepness of SRR
#' @param CAA A matrix nyears (rows) by nages (columns) of catch at age (age 1 to maxage in length)
#' @param CAAadj internal parameter
#' @param sigmaR A numeric value representing the prior standard deviation of log space recruitment deviations
#'
#' @author A. Hordyk
#' @export
LSRA_MCMC_sim <- function(nits, pars, JumpCV, adapt, parLB, parUB, R0ind, inflind, slpind, RDind, nyears, maxage, M, Mat_age, Wt_age, Chist_a, Umax, h, CAA, CAAadj, sigmaR) {
.Call('_DLMtool_LSRA_MCMC_sim', PACKAGE = 'DLMtool', nits, pars, JumpCV, adapt, parLB, parUB, R0ind, inflind, slpind, RDind, nyears, maxage, M, Mat_age, Wt_age, Chist_a, Umax, h, CAA, CAAadj, sigmaR)
}
#' Rcpp version of the Optimization function that returns the squared difference between user
#' specified and calculated movement parameters.
#'
#' The user specifies the probability of staying in the same area and spatial
#' heterogeneity (both in the unfished state). This function returns the
#' squared difference between these values and those produced by the three
#' logit movement model.
#'
#' This is paired with getmov to find the correct movement model.
#'
#' @param par Three parameters in the logit space that control the four
#' probabilities of moving between 2 areas
#' @param prb User specified probability that individuals in area 1 remain in
#' that area (unfished conditions)
#' @param frac User specified fraction of individuals found in area 1 (unfished
#' conditions)
#'
#' @author T. Carruthers with an amateur attempt at converting to Rcpp by A. Hordyk (but it works!)
#' @useDynLib DLMtool
#' @export
movfit_Rcpp <- function(par, prb, frac) {
.Call('_DLMtool_movfit_Rcpp', PACKAGE = 'DLMtool', par, prb, frac)
}
#' Rcpp version of the q Optimizer
#'
#' Optimize for catchability coefficient
#'
#' @param lnIn internal
#' @param Fc internal
#' @param Perrc internal
#' @param Mc internal
#' @param hc internal
#' @param Mac internal
#' @param Wac internal
#' @param R0c internal
#' @param Vc internal
#' @param nyears internal
#' @param maxage internal
#' @param movc internal
#' @param Spat_targc internal
#' @param SRrelc internal
#' @param aRc internal
#' @param bRc internal
#' @param movc internal
#' @param SSBpRc internal
#'
#' @export
#' @keywords internal
optQ_cpp <- function(lnIn, depc, Fc, Perrc, Mc, hc, Mac, Wac, R0c, Vc, nyears, maxage, movc, Spat_targc, SRrelc, aRc, bRc) {
.Call('_DLMtool_optQ_cpp', PACKAGE = 'DLMtool', lnIn, depc, Fc, Perrc, Mc, hc, Mac, Wac, R0c, Vc, nyears, maxage, movc, Spat_targc, SRrelc, aRc, bRc)
}
#' Population dynamics model for one annual time-step
#'
#' Project population forward one time-step given current numbers-at-age and total mortality
#'
#' @param nareas The number of spatial areas
#' @param maxage The maximum age
#' @param SSBcurr A numeric vector of length nareas with the current spawning biomass in each area
#' @param Ncurr A numeric matrix (maxage, nareas) with current numbers-at-age in each area
#' @param Zcurr A numeric matrix (maxage, nareas) with total mortality-at-age in each area
#' @param PerrYr A numeric value with recruitment deviation for current year
#' @param hs Steepness of SRR
#' @param R0a Numeric vector with unfished recruitment by area
#' @param SSBpR Numeric vector with unfished spawning stock per recruit by area
#' @param aR Numeric vector with Ricker SRR a parameter by area
#' @param bR Numeric vector with Ricker SRR b parameter by area
#' @param mov Numeric matrix (nareas by nareas) with the movement matrix
#' @param SRrel Integer indicating the stock-recruitment relationship to use (1 for Beverton-Holt, 2 for Ricker)
#'
#' @author A. Hordyk
#'
#' @export
#' @keywords internal
popdynOneTScpp <- function(nareas, maxage, SSBcurr, Ncurr, Zcurr, PerrYr, hs, R0a, SSBpR, aR, bR, mov, SRrel) {
.Call('_DLMtool_popdynOneTScpp', PACKAGE = 'DLMtool', nareas, maxage, SSBcurr, Ncurr, Zcurr, PerrYr, hs, R0a, SSBpR, aR, bR, mov, SRrel)
}
#' Population dynamics model in CPP
#'
#' Project population forward pyears given current numbers-at-age and total mortality, etc
#' for the future years
#'
#' @param nareas The number of spatial areas
#' @param maxage The maximum age
#' @param SSBcurr A numeric vector of length nareas with the current spawning biomass in each area
#' @param Ncurr A numeric matrix (maxage, nareas) with current numbers-at-age in each area
#' @param pyears The number of years to project the population forward
#' @param M_age Numeric matrix (maxage, pyears) with natural mortality by age and year
#' @param Asize_c Numeric vector (length nareas) with size of each area
#' @param MatAge Numeric vector with proportion mature by age
#' @param WtAge Numeric matrix (maxage, pyears) with weight by age and year
#' @param Vuln Numeric matrix (maxage, pyears) with vulnerability by age and year
#' @param Retc Numeric matrix (maxage, pyears) with retention by age and year
#' @param Prec Numeric vector (pyears) with recruitment error
#' @param mov Numeric matrix (nareas by nareas) with the movement matrix
#' @param SRrelc Integer indicating the stock-recruitment relationship to use (1 for Beverton-Holt, 2 for Ricker)
#' @param Effind Numeric vector (length pyears) with the fishing effort by year
#' @param Spat_targc Integer. Spatial targetting
#' @param hc Numeric. Steepness of stock-recruit relationship
#' @param R0c Numeric vector of length nareas with unfished recruitment by area
#' @param SSBpRc Numeric vector of length nareas with unfished spawning per recruit by area
#' @param aRc Numeric. Ricker SRR a value by area
#' @param bRc Numeric. Ricker SRR b value by area
#' @param Qc Numeric. Catchability coefficient
#' @param Fapic Numeric. Apical F value
#' @param maxF A numeric value specifying the maximum fishing mortality for any single age class
#' @param control Integer. 1 to use q and effort to calculate F, 2 to use Fapic (apical F) and
#' vulnerablity to calculate F.
#'
#' @author A. Hordyk
#'
#' @export
#' @keywords internal
popdynCPP <- function(nareas, maxage, Ncurr, pyears, M_age, Asize_c, MatAge, WtAge, Vuln, Retc, Prec, movc, SRrelc, Effind, Spat_targc, hc, R0c, SSBpRc, aRc, bRc, Qc, Fapic, maxF, control) {
.Call('_DLMtool_popdynCPP', PACKAGE = 'DLMtool', nareas, maxage, Ncurr, pyears, M_age, Asize_c, MatAge, WtAge, Vuln, Retc, Prec, movc, SRrelc, Effind, Spat_targc, hc, R0c, SSBpRc, aRc, bRc, Qc, Fapic, maxF, control)
}
#' Rcpp version of the Projection Optimizer
#'
#' Optimize for MSY and calculate MSY reference points
#'
#' @param lnIn internal
#' @param Mc internal
#' @param hc internal
#' @param Mac internal
#' @param Wac internal
#' @param R0c internal
#' @param Vc internal
#' @param retAc internal
#' @param nyears internal
#' @param maxage internal
#' @param movc internal
#' @param Spat_targc internal
#' @param SRrelc internal
#' @param aRc internal
#' @param bRc internal
#' @param movc internal
#' @param SSBpRc internal
#' @param proyears internal
#' @param Control internal
#'
#' @export
#' @keywords internal
projOpt_cpp <- function(lnIn, Mc, hc, Mac, Wac, R0c, Vc, retAc, nyears, maxage, movc, Spat_targc, SRrelc, aRc, bRc, proyears, Control) {
.Call('_DLMtool_projOpt_cpp', PACKAGE = 'DLMtool', lnIn, Mc, hc, Mac, Wac, R0c, Vc, retAc, nyears, maxage, movc, Spat_targc, SRrelc, aRc, bRc, proyears, Control)
}
|
/R/RcppExports.R
|
no_license
|
Lijiuqi/DLMtool
|
R
| false
| false
| 12,016
|
r
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
bhnoneq_LL <- function(stpar, year, Lbar, ss, Linf, K, Lc, nbreaks) {
.Call('_DLMtool_bhnoneq_LL', PACKAGE = 'DLMtool', stpar, year, Lbar, ss, Linf, K, Lc, nbreaks)
}
#' Rcpp version of the Projection function for calculating Reference Yield
#'
#'
#' @param lnF internal
#' @param Mmat internal
#' @param Wac internal
#' @param Mac internal
#' @param Pc internal
#' @param N_c internal
#' @param SSN_c internal
#' @param Biomass_c internal
#' @param VBiomass_c internal
#' @param SSB_c internal
#' @param Vc internal
#' @param retAc internal
#' @param hc internal
#' @param R0ac internal
#' @param proyears internal
#' @param nareas internal
#' @param maxage internal
#' @param movc internal
#' @param SSBpRc internal
#' @param aRc internal
#' @param bRc internal
#' @param SRrelc internal
#' @param Spat_targc internal
#'
#' @export
#' @keywords internal
doprojPI_cpp <- function(lnF, Mmat, Wac, Mac, Pc, N_c, SSN_c, Biomass_c, VBiomass_c, SSB_c, Vc, retAc, hc, R0ac, proyears, nareas, maxage, movc, SSBpRc, aRc, bRc, SRrelc, Spat_targc) {
.Call('_DLMtool_doprojPI_cpp', PACKAGE = 'DLMtool', lnF, Mmat, Wac, Mac, Pc, N_c, SSN_c, Biomass_c, VBiomass_c, SSB_c, Vc, retAc, hc, R0ac, proyears, nareas, maxage, movc, SSBpRc, aRc, bRc, SRrelc, Spat_targc)
}
#' Generate length composition of catch
#'
#' Generate size composition of catch given sample of catch-at-age,
#' expected length-at-age, and standard deviation of length-at-age.
#' Model assumes length-at-age is normally distributed, and that
#' selectivity is size-dependant
#'
#' @param CAL_bins vector of catch-at-length size bins
#' @param CAL_binsmid vector (nbins = length(CAL_bins) - 1) of mid-points for catch-at-length size bins
#' @param SL matrix (nbins, nyears) of selectivity-at-length class for each year
#' @param CAL_ESS effective sample size of catch-at-length data
#' @param CAL_nsamp sample size of catch-at-length data
#' @param CN matrix (nyears, maxage) of catch-at-age for each year
#' @param LaA matrix (maxage, nyears) of expected length-at-age for each year
#' @param LaASD matrix (maxage, nyears) of standard deviation of length-at-age for each year
#' @param truncSD optional argument to truncate the length-at-age distribution at `truncSD` standard deviations
#' e.g., a value of 2 truncates the length-at-age distribution at two standard deviations (set to 0 to ignore (default))
#'
#' @export
genLenComp <- function(CAL_bins, CAL_binsmid, SL, CAL_ESS, CAL_nsamp, CN, LaA, LaASD, truncSD) {
.Call('_DLMtool_genLenComp', PACKAGE = 'DLMtool', CAL_bins, CAL_binsmid, SL, CAL_ESS, CAL_nsamp, CN, LaA, LaASD, truncSD)
}
#' Internal estimation function for LSRA and LSRA2 functions
#'
#' Rcpp version of R code
#' @param param a numeric value representing log(R0)
#' @param FF_a numeric value, recent fishign mortality rate (apical F)
#' @param Chist a vector of historical catch observations [nyears]
#' @param M_a numeric value, natural mortality rate
#' @param Mat_age_a a vector of maturity at age [nage]
#' @param Wt_age_a a vector of weight at age [nage]
#' @param sel_a a vector of selectivity at age [nage]
#' @param Recdevs_a a vector of recruitment deviations [nyears]
#' @param h_a a numeric value of steepness values of the Bev-Holt Stock-Recruitment relationship
#' @param Umax maximum harvest rate per year
#' @author T. Carruthers with an amateur attempt at converting to Rcpp by A. Hordyk (but it works!)
#' @useDynLib DLMtool
#' @keywords internal
#' @export
LSRA_opt_cpp <- function(param, FF_a, Chist, M_a, Mat_age_a, Wt_age_a, sel_a, Recdevs_a, h_a, Umax) {
.Call('_DLMtool_LSRA_opt_cpp', PACKAGE = 'DLMtool', param, FF_a, Chist, M_a, Mat_age_a, Wt_age_a, sel_a, Recdevs_a, h_a, Umax)
}
#' Internal SRA MCMC CPP code
#'
#' Rcpp version of R code
#' @param nits number of iterations
#' @param pars vector of parameters
#' @param JumpCV jump cv vector
#' @param adapt adapt vector
#' @param parLB lower bounds
#' @param parUB upper bounds
#' @param R0ind index for R0
#' @param inflind index for inflection
#' @param slpind index for slope
#' @param RDind index for recruitment deviations
#' @param nyears number of projection years
#' @param maxage maximum age
#' @param M Natural mortality
#' @param Mat_age A vector of maturity at age
#' @param Wt_age A vector of weight at age
#' @param Chist_a A vector of historical catch observations (nyears long) going back to unfished conditions
#' @param Umax A numeric value representing the maximum harvest rate for any age class (rejection of sims where this occurs)
#' @param h steepness of SRR
#' @param CAA A matrix nyears (rows) by nages (columns) of catch at age (age 1 to maxage in length)
#' @param CAAadj internal parameter
#' @param sigmaR A numeric value representing the prior standard deviation of log space recruitment deviations
#'
#' @author A. Hordyk
#' @export
LSRA_MCMC_sim <- function(nits, pars, JumpCV, adapt, parLB, parUB, R0ind, inflind, slpind, RDind, nyears, maxage, M, Mat_age, Wt_age, Chist_a, Umax, h, CAA, CAAadj, sigmaR) {
.Call('_DLMtool_LSRA_MCMC_sim', PACKAGE = 'DLMtool', nits, pars, JumpCV, adapt, parLB, parUB, R0ind, inflind, slpind, RDind, nyears, maxage, M, Mat_age, Wt_age, Chist_a, Umax, h, CAA, CAAadj, sigmaR)
}
#' Rcpp version of the Optimization function that returns the squared difference between user
#' specified and calculated movement parameters.
#'
#' The user specifies the probability of staying in the same area and spatial
#' heterogeneity (both in the unfished state). This function returns the
#' squared difference between these values and those produced by the three
#' logit movement model.
#'
#' This is paired with getmov to find the correct movement model.
#'
#' @param par Three parameters in the logit space that control the four
#' probabilities of moving between 2 areas
#' @param prb User specified probability that individuals in area 1 remain in
#' that area (unfished conditions)
#' @param frac User specified fraction of individuals found in area 1 (unfished
#' conditions)
#'
#' @author T. Carruthers with an amateur attempt at converting to Rcpp by A. Hordyk (but it works!)
#' @useDynLib DLMtool
#' @export
movfit_Rcpp <- function(par, prb, frac) {
.Call('_DLMtool_movfit_Rcpp', PACKAGE = 'DLMtool', par, prb, frac)
}
#' Rcpp version of the q Optimizer
#'
#' Optimize for catchability coefficient
#'
#' @param lnIn internal
#' @param Fc internal
#' @param Perrc internal
#' @param Mc internal
#' @param hc internal
#' @param Mac internal
#' @param Wac internal
#' @param R0c internal
#' @param Vc internal
#' @param nyears internal
#' @param maxage internal
#' @param movc internal
#' @param Spat_targc internal
#' @param SRrelc internal
#' @param aRc internal
#' @param bRc internal
#' @param movc internal
#' @param SSBpRc internal
#'
#' @export
#' @keywords internal
optQ_cpp <- function(lnIn, depc, Fc, Perrc, Mc, hc, Mac, Wac, R0c, Vc, nyears, maxage, movc, Spat_targc, SRrelc, aRc, bRc) {
.Call('_DLMtool_optQ_cpp', PACKAGE = 'DLMtool', lnIn, depc, Fc, Perrc, Mc, hc, Mac, Wac, R0c, Vc, nyears, maxage, movc, Spat_targc, SRrelc, aRc, bRc)
}
#' Population dynamics model for one annual time-step
#'
#' Project population forward one time-step given current numbers-at-age and total mortality
#'
#' @param nareas The number of spatial areas
#' @param maxage The maximum age
#' @param SSBcurr A numeric vector of length nareas with the current spawning biomass in each area
#' @param Ncurr A numeric matrix (maxage, nareas) with current numbers-at-age in each area
#' @param Zcurr A numeric matrix (maxage, nareas) with total mortality-at-age in each area
#' @param PerrYr A numeric value with recruitment deviation for current year
#' @param hs Steepness of SRR
#' @param R0a Numeric vector with unfished recruitment by area
#' @param SSBpR Numeric vector with unfished spawning stock per recruit by area
#' @param aR Numeric vector with Ricker SRR a parameter by area
#' @param bR Numeric vector with Ricker SRR b parameter by area
#' @param mov Numeric matrix (nareas by nareas) with the movement matrix
#' @param SRrel Integer indicating the stock-recruitment relationship to use (1 for Beverton-Holt, 2 for Ricker)
#'
#' @author A. Hordyk
#'
#' @export
#' @keywords internal
popdynOneTScpp <- function(nareas, maxage, SSBcurr, Ncurr, Zcurr, PerrYr, hs, R0a, SSBpR, aR, bR, mov, SRrel) {
.Call('_DLMtool_popdynOneTScpp', PACKAGE = 'DLMtool', nareas, maxage, SSBcurr, Ncurr, Zcurr, PerrYr, hs, R0a, SSBpR, aR, bR, mov, SRrel)
}
#' Population dynamics model in CPP
#'
#' Project population forward pyears given current numbers-at-age and total mortality, etc
#' for the future years
#'
#' @param nareas The number of spatial areas
#' @param maxage The maximum age
#' @param SSBcurr A numeric vector of length nareas with the current spawning biomass in each area
#' @param Ncurr A numeric matrix (maxage, nareas) with current numbers-at-age in each area
#' @param pyears The number of years to project the population forward
#' @param M_age Numeric matrix (maxage, pyears) with natural mortality by age and year
#' @param Asize_c Numeric vector (length nareas) with size of each area
#' @param MatAge Numeric vector with proportion mature by age
#' @param WtAge Numeric matrix (maxage, pyears) with weight by age and year
#' @param Vuln Numeric matrix (maxage, pyears) with vulnerability by age and year
#' @param Retc Numeric matrix (maxage, pyears) with retention by age and year
#' @param Prec Numeric vector (pyears) with recruitment error
#' @param mov Numeric matrix (nareas by nareas) with the movement matrix
#' @param SRrelc Integer indicating the stock-recruitment relationship to use (1 for Beverton-Holt, 2 for Ricker)
#' @param Effind Numeric vector (length pyears) with the fishing effort by year
#' @param Spat_targc Integer. Spatial targetting
#' @param hc Numeric. Steepness of stock-recruit relationship
#' @param R0c Numeric vector of length nareas with unfished recruitment by area
#' @param SSBpRc Numeric vector of length nareas with unfished spawning per recruit by area
#' @param aRc Numeric. Ricker SRR a value by area
#' @param bRc Numeric. Ricker SRR b value by area
#' @param Qc Numeric. Catchability coefficient
#' @param Fapic Numeric. Apical F value
#' @param maxF A numeric value specifying the maximum fishing mortality for any single age class
#' @param control Integer. 1 to use q and effort to calculate F, 2 to use Fapic (apical F) and
#' vulnerablity to calculate F.
#'
#' @author A. Hordyk
#'
#' @export
#' @keywords internal
popdynCPP <- function(nareas, maxage, Ncurr, pyears, M_age, Asize_c, MatAge, WtAge, Vuln, Retc, Prec, movc, SRrelc, Effind, Spat_targc, hc, R0c, SSBpRc, aRc, bRc, Qc, Fapic, maxF, control) {
.Call('_DLMtool_popdynCPP', PACKAGE = 'DLMtool', nareas, maxage, Ncurr, pyears, M_age, Asize_c, MatAge, WtAge, Vuln, Retc, Prec, movc, SRrelc, Effind, Spat_targc, hc, R0c, SSBpRc, aRc, bRc, Qc, Fapic, maxF, control)
}
#' Rcpp version of the Projection Optimizer
#'
#' Optimize for MSY and calculate MSY reference points
#'
#' @param lnIn internal
#' @param Mc internal
#' @param hc internal
#' @param Mac internal
#' @param Wac internal
#' @param R0c internal
#' @param Vc internal
#' @param retAc internal
#' @param nyears internal
#' @param maxage internal
#' @param movc internal
#' @param Spat_targc internal
#' @param SRrelc internal
#' @param aRc internal
#' @param bRc internal
#' @param movc internal
#' @param SSBpRc internal
#' @param proyears internal
#' @param Control internal
#'
#' @export
#' @keywords internal
projOpt_cpp <- function(lnIn, Mc, hc, Mac, Wac, R0c, Vc, retAc, nyears, maxage, movc, Spat_targc, SRrelc, aRc, bRc, proyears, Control) {
.Call('_DLMtool_projOpt_cpp', PACKAGE = 'DLMtool', lnIn, Mc, hc, Mac, Wac, R0c, Vc, retAc, nyears, maxage, movc, Spat_targc, SRrelc, aRc, bRc, proyears, Control)
}
|
#' Return a vector of all subcatchments downstream of a specified catchment
#' @param hierarchy a dataframe containing catchment id and next downstream (nextds) id fields
#' @param catchname a dataframe containing catchment id for which a vector of downstream catchment
#' ids will be returned.
#' @return a vector of downstream catchment ids
#' @note Function depends on the next downstream field in a stream network 'hierarchy' table (dataframe).
#' When used in conjunction with list_all_downstream it is possible to get a list of catchments downstream
#' of a vector of sites. This can then be used to support further aggregation of environmental variables
#' for sub-catchments downstream of a list of catchments of interest (e.g. for calculating barrier numbers).
#' @examples
#'
#'#find all sites downstream of the first site in the catchment list
#'data(mwcats)
#'
#'alldownstream(hierarchy = mwcats, catchname = mwcats$site[1])
#' @export
alldownstream <- function(hierarchy, catchname) {
names(hierarchy) <- c("site", "nextds")
if (length(which(hierarchy$site == catchname)) > 0) {
catchname <- as.vector(catchname)
allsc <- as.vector(hierarchy$nextds[hierarchy$site == catchname])
allsc <- allsc[!is.na(allsc)]
# subcatchments immediately upstream
nbrnch <- end <- length(allsc)
# number of branches immediately upstream
start <- 1
while (nbrnch > 0 & !-1 %in% allsc) {
for (j in start:end) {
allsc <- c(allsc, as.vector(hierarchy$nextds[hierarchy$site == allsc[j]]))
allsc <- allsc[!is.na(allsc)]
}
start <- end + 1
end <- length(allsc)
nbrnch <- end - (start - 1)
}
allsc <- c(catchname, allsc)
allsc <- allsc[allsc != -1]
allsc
} else cat(paste(catchname, "is not a site listed in the hierarchy table", "\n"))
}
|
/R/alldownstream.R
|
no_license
|
lhmet-forks/catchstats
|
R
| false
| false
| 1,928
|
r
|
#' Return a vector of all subcatchments downstream of a specified catchment
#' @param hierarchy a dataframe containing catchment id and next downstream (nextds) id fields
#' @param catchname a dataframe containing catchment id for which a vector of downstream catchment
#' ids will be returned.
#' @return a vector of downstream catchment ids
#' @note Function depends on the next downstream field in a stream network 'hierarchy' table (dataframe).
#' When used in conjunction with list_all_downstream it is possible to get a list of catchments downstream
#' of a vector of sites. This can then be used to support further aggregation of environmental variables
#' for sub-catchments downstream of a list of catchments of interest (e.g. for calculating barrier numbers).
#' @examples
#'
#'#find all sites downstream of the first site in the catchment list
#'data(mwcats)
#'
#'alldownstream(hierarchy = mwcats, catchname = mwcats$site[1])
#' @export
alldownstream <- function(hierarchy, catchname) {
names(hierarchy) <- c("site", "nextds")
if (length(which(hierarchy$site == catchname)) > 0) {
catchname <- as.vector(catchname)
allsc <- as.vector(hierarchy$nextds[hierarchy$site == catchname])
allsc <- allsc[!is.na(allsc)]
# subcatchments immediately upstream
nbrnch <- end <- length(allsc)
# number of branches immediately upstream
start <- 1
while (nbrnch > 0 & !-1 %in% allsc) {
for (j in start:end) {
allsc <- c(allsc, as.vector(hierarchy$nextds[hierarchy$site == allsc[j]]))
allsc <- allsc[!is.na(allsc)]
}
start <- end + 1
end <- length(allsc)
nbrnch <- end - (start - 1)
}
allsc <- c(catchname, allsc)
allsc <- allsc[allsc != -1]
allsc
} else cat(paste(catchname, "is not a site listed in the hierarchy table", "\n"))
}
|
testlist <- list(doy = 4.70274965399612e+162, latitude = c(-1.76507117752212e+170, -7.88781071482505e+93, 7.97122499511694e-290, 6.44409915094729e+257, -3.37449889583673e-234, 6.44409915093636e+257, 9.00974448185223e-313, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549125e-158, -1.3258495253834e-113, 2.79620616433656e-119, 8.65820739823682e-304, 1.49235215549396e-315, 51539607754, 6.68889884134308e+51, -1.79376295339038e-307, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -9.25282796475835e+157, 8.92614281163287e-157, -1.10159691082425e-309, 3.0590349445539e-07, 4.00294354529823e-221, -1.15261897385911e+41, -8.10849672500667e+229, -3.94941359881217e-277))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result)
|
/meteor/inst/testfiles/ET0_ThornthwaiteWilmott/AFL_ET0_ThornthwaiteWilmott/ET0_ThornthwaiteWilmott_valgrind_files/1615831395-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 819
|
r
|
testlist <- list(doy = 4.70274965399612e+162, latitude = c(-1.76507117752212e+170, -7.88781071482505e+93, 7.97122499511694e-290, 6.44409915094729e+257, -3.37449889583673e-234, 6.44409915093636e+257, 9.00974448185223e-313, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549125e-158, -1.3258495253834e-113, 2.79620616433656e-119, 8.65820739823682e-304, 1.49235215549396e-315, 51539607754, 6.68889884134308e+51, -1.79376295339038e-307, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -9.25282796475835e+157, 8.92614281163287e-157, -1.10159691082425e-309, 3.0590349445539e-07, 4.00294354529823e-221, -1.15261897385911e+41, -8.10849672500667e+229, -3.94941359881217e-277))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result)
|
df1 <- read.csv("F:/Ivy-BigData/BigData-IVY-R/Packages/forestfires.csv")
summary(df1)
#Question1
transform(df1,X_square=X*X)
#Question2
mean_FMMC=mean(df1[,5])
sum_FMMC=sum(df1[,5])
median_FMMC=median(df1[,5])
sd_FMMC=sd(df1[,5])
mean_DMC=mean(df1[,6])
sum_DMC=sum(df1[,6])
median_DMC=median(df1[,6])
sd_DMC=sd(df1[,6])
mean_DC=mean(df1[,7])
sum_DC=sum(df1[,7])
median_DC=median(df1[,7])
sd_DC=sd(df1[,7])
cbind(sd_DC,sd_DMC,sd_FMMC,mean_DC,mean_DMC,mean_FMMC,median_DC,median_DMC,median_FMMC,sum_DC,sum_DMC,sum_FMMC)
#Question 3
df1$Month <-
sapply(df$month,function(x){
if(x=="jan"){
x <- as.factor("January")
}
if(x=="feb"){
x <- as.factor("February")
}
if(x=="mar"){
x <- as.factor("March")
}
if(x=="apr"){
x <- as.factor("April")
}
if(x=="may"){
x <- as.factor("May")
}
if(x=="jun"){
x <- as.factor("June")
}
if(x=="jul"){
x <- as.factor("July")
}
if(x=="aug"){
x <- as.factor("August")
}
if(x=="sep"){
x <- as.factor("September")
}
if(x=="oct"){
x <- as.factor("October")
}
if(x=="nov"){
x <- as.factor("November")
}
if(x=="dec"){
x <- as.factor("December")
}
return( x )
}
)
print(df1)
# Question 4
df1$Day_num <-
sapply(df$day,function(x){
if(x=="sun"){
x <- as.factor("1")
}
if(x=="mon"){
x <- as.factor("2")
}
if(x=="tue"){
x <- as.factor("3")
}
if(x=="wed"){
x <- as.factor("4")
}
if(x=="thu"){
x <- as.factor("5")
}
if(x=="fri"){
x <- as.factor("6")
}
if(x=="sat"){
x <- as.factor("7")
}
return( x )
}
)
print(df1)
#Question5
print(df1)
x=df1[,1]
y=df1[,2]
cor.test(x,y)
# Question 6
install.packages("dplyr")
library(dplyr)
summarise(group_by(df, month), sum_rain=sum(rain),sum_wind=sum(wind))
#Question 7
summarise(group_by(df,month),mean_rain=mean(rain),mean_wind=mean(wind),count=n())
#Question 8
summarise(group_by(df,month),count=n())
#Question 9
summarise(group_by(df,month,day),count=n())
|
/Assignment_2_solutions_final.R
|
no_license
|
hiraltaunk/R-Assignments
|
R
| false
| false
| 2,298
|
r
|
df1 <- read.csv("F:/Ivy-BigData/BigData-IVY-R/Packages/forestfires.csv")
summary(df1)
#Question1
transform(df1,X_square=X*X)
#Question2
mean_FMMC=mean(df1[,5])
sum_FMMC=sum(df1[,5])
median_FMMC=median(df1[,5])
sd_FMMC=sd(df1[,5])
mean_DMC=mean(df1[,6])
sum_DMC=sum(df1[,6])
median_DMC=median(df1[,6])
sd_DMC=sd(df1[,6])
mean_DC=mean(df1[,7])
sum_DC=sum(df1[,7])
median_DC=median(df1[,7])
sd_DC=sd(df1[,7])
cbind(sd_DC,sd_DMC,sd_FMMC,mean_DC,mean_DMC,mean_FMMC,median_DC,median_DMC,median_FMMC,sum_DC,sum_DMC,sum_FMMC)
#Question 3
df1$Month <-
sapply(df$month,function(x){
if(x=="jan"){
x <- as.factor("January")
}
if(x=="feb"){
x <- as.factor("February")
}
if(x=="mar"){
x <- as.factor("March")
}
if(x=="apr"){
x <- as.factor("April")
}
if(x=="may"){
x <- as.factor("May")
}
if(x=="jun"){
x <- as.factor("June")
}
if(x=="jul"){
x <- as.factor("July")
}
if(x=="aug"){
x <- as.factor("August")
}
if(x=="sep"){
x <- as.factor("September")
}
if(x=="oct"){
x <- as.factor("October")
}
if(x=="nov"){
x <- as.factor("November")
}
if(x=="dec"){
x <- as.factor("December")
}
return( x )
}
)
print(df1)
# Question 4
df1$Day_num <-
sapply(df$day,function(x){
if(x=="sun"){
x <- as.factor("1")
}
if(x=="mon"){
x <- as.factor("2")
}
if(x=="tue"){
x <- as.factor("3")
}
if(x=="wed"){
x <- as.factor("4")
}
if(x=="thu"){
x <- as.factor("5")
}
if(x=="fri"){
x <- as.factor("6")
}
if(x=="sat"){
x <- as.factor("7")
}
return( x )
}
)
print(df1)
#Question5
print(df1)
x=df1[,1]
y=df1[,2]
cor.test(x,y)
# Question 6
install.packages("dplyr")
library(dplyr)
summarise(group_by(df, month), sum_rain=sum(rain),sum_wind=sum(wind))
#Question 7
summarise(group_by(df,month),mean_rain=mean(rain),mean_wind=mean(wind),count=n())
#Question 8
summarise(group_by(df,month),count=n())
#Question 9
summarise(group_by(df,month,day),count=n())
|
library(dplyr)
set.seed(10);
n_seed <- sample.int(n = 1000, size = size)
load(sprintf(PATTERN, n_seed[1]))
print(n_seed[1])
msm_mod.def <- msm_mod
msm_mod_4.def <- msm_mod_4
cox_mod.def <- cox_mod
cox_mod_4.def <- cox_mod_4
disc_mod.def <- disc_mod
disc_mod_4.def <- disc_mod_4
for (.seed in n_seed[2:size]){
print(.seed)
load(sprintf(PATTERN, .seed))
ln <- length(msm_mod.def)
ln2 <- length(msm_mod)
for (.i in 1:ln2){
msm_mod.def[[ln + .i]] <- msm_mod[[.i]]
msm_mod_4.def[[ln + .i]] <- msm_mod_4[[.i]]
cox_mod.def[[ln + .i]] <- cox_mod[[.i]]
cox_mod_4.def[[ln + .i]] <- cox_mod_4[[.i]]
disc_mod.def[[ln + .i]] <- disc_mod[[.i]]
disc_mod_4.def[[ln + .i]] <- disc_mod_4[[.i]]
}
}
msm_50 <- msm_50.def
msm_left <- msm_left.def
cox_50 <- cox_50.def
cox_left <- cox_left.def
msm_50_4 <- msm_50_4.def
msm_left_4 <- msm_left_4.def
cox_50_4 <- cox_50_4.def
cox_left_4 <- cox_left_4.def
disc_50 <- disc_50.def
disc_left <- disc_left.def
disc_50_4 <- disc_50_4.def
disc_left_4 <- disc_left_4.def
save(msm_50, msm_left, cox_50, cox_left,
msm_50_4, msm_left_4, cox_50_4, cox_left_4,
disc_50, disc_left, disc_50_4, disc_left_4, file = OFILE)
|
/aux_unio.R
|
no_license
|
Blanch-Font/PhD
|
R
| false
| false
| 1,182
|
r
|
library(dplyr)
set.seed(10);
n_seed <- sample.int(n = 1000, size = size)
load(sprintf(PATTERN, n_seed[1]))
print(n_seed[1])
msm_mod.def <- msm_mod
msm_mod_4.def <- msm_mod_4
cox_mod.def <- cox_mod
cox_mod_4.def <- cox_mod_4
disc_mod.def <- disc_mod
disc_mod_4.def <- disc_mod_4
for (.seed in n_seed[2:size]){
print(.seed)
load(sprintf(PATTERN, .seed))
ln <- length(msm_mod.def)
ln2 <- length(msm_mod)
for (.i in 1:ln2){
msm_mod.def[[ln + .i]] <- msm_mod[[.i]]
msm_mod_4.def[[ln + .i]] <- msm_mod_4[[.i]]
cox_mod.def[[ln + .i]] <- cox_mod[[.i]]
cox_mod_4.def[[ln + .i]] <- cox_mod_4[[.i]]
disc_mod.def[[ln + .i]] <- disc_mod[[.i]]
disc_mod_4.def[[ln + .i]] <- disc_mod_4[[.i]]
}
}
msm_50 <- msm_50.def
msm_left <- msm_left.def
cox_50 <- cox_50.def
cox_left <- cox_left.def
msm_50_4 <- msm_50_4.def
msm_left_4 <- msm_left_4.def
cox_50_4 <- cox_50_4.def
cox_left_4 <- cox_left_4.def
disc_50 <- disc_50.def
disc_left <- disc_left.def
disc_50_4 <- disc_50_4.def
disc_left_4 <- disc_left_4.def
save(msm_50, msm_left, cox_50, cox_left,
msm_50_4, msm_left_4, cox_50_4, cox_left_4,
disc_50, disc_left, disc_50_4, disc_left_4, file = OFILE)
|
#Andrew Dhawan
#Sept 8 2016
#make_heatmap_upload.R
#This code enables the generation of a heatmap from
#values within an excel file, as defined by the example
#input file heatmap_example.xlsx
library(readxl)
data <- read_excel("heatmap_example.xlsx")
rowNames <- data$`Drug`
data$`Drug` <-NULL
ec50_matrix <- data.matrix(data)
my_palette <- colorRampPalette(c("#2c7bb6","#f7f7f7", "#d7191c"))(n = 128) #299)
ec50_heatmap <- heatmap.2(ec50_matrix, Rowv=NA, Colv=NA, col =my_palette, na.color = "darkgrey", scale="none", margins=c(10,10),labRow=rowNames, trace='none',dendrogram="none",density.info="none")
|
/make_heatmap_upload.R
|
permissive
|
andrewdhawan/alk-collateral-sensitivity
|
R
| false
| false
| 608
|
r
|
#Andrew Dhawan
#Sept 8 2016
#make_heatmap_upload.R
#This code enables the generation of a heatmap from
#values within an excel file, as defined by the example
#input file heatmap_example.xlsx
library(readxl)
data <- read_excel("heatmap_example.xlsx")
rowNames <- data$`Drug`
data$`Drug` <-NULL
ec50_matrix <- data.matrix(data)
my_palette <- colorRampPalette(c("#2c7bb6","#f7f7f7", "#d7191c"))(n = 128) #299)
ec50_heatmap <- heatmap.2(ec50_matrix, Rowv=NA, Colv=NA, col =my_palette, na.color = "darkgrey", scale="none", margins=c(10,10),labRow=rowNames, trace='none',dendrogram="none",density.info="none")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/VisCms.R
\name{visGroup}
\alias{visGroup}
\title{visGroup}
\usage{
visGroup(sce, group, dim_red = "TSNE")
}
\arguments{
\item{sce}{A \code{SingleCellExperiment} object.}
\item{group}{Character. Name of group/batch variable.
Needs to be one of \code{names(colData(sce))}.}
\item{dim_red}{Character. Name of embeddings to use as subspace for plotting.
Default is "TSNE".}
}
\value{
a \code{ggplot} object.
}
\description{
Plot group label in a reduced dimensional plot.
}
\details{
Plots a reduced dimension plot colored by group parameter.
The dimesion reduction embedding can be specified, but only tsne embeddings
will automatically be computed by \code{runTSNE}. Embeddings from data
integration methods (e.g. mnn.correct) can be used as long as they are
specified in \code{reducedDimNames(sce)}.
}
\examples{
library(SingleCellExperiment)
sim_list <- readRDS(system.file("extdata/sim50.rds", package = "CellMixS"))
sce <- sim_list[[1]][, c(1:50, 300:350)]
visGroup(sce, "batch")
}
\seealso{
\code{\link{visOverview}}, \code{\link{visMetric}}
Other visualize functions: \code{\link{visCluster}}
}
\concept{visualize functions}
|
/man/visGroup.Rd
|
no_license
|
zhoux85/CellMixS
|
R
| false
| true
| 1,213
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/VisCms.R
\name{visGroup}
\alias{visGroup}
\title{visGroup}
\usage{
visGroup(sce, group, dim_red = "TSNE")
}
\arguments{
\item{sce}{A \code{SingleCellExperiment} object.}
\item{group}{Character. Name of group/batch variable.
Needs to be one of \code{names(colData(sce))}.}
\item{dim_red}{Character. Name of embeddings to use as subspace for plotting.
Default is "TSNE".}
}
\value{
a \code{ggplot} object.
}
\description{
Plot group label in a reduced dimensional plot.
}
\details{
Plots a reduced dimension plot colored by group parameter.
The dimesion reduction embedding can be specified, but only tsne embeddings
will automatically be computed by \code{runTSNE}. Embeddings from data
integration methods (e.g. mnn.correct) can be used as long as they are
specified in \code{reducedDimNames(sce)}.
}
\examples{
library(SingleCellExperiment)
sim_list <- readRDS(system.file("extdata/sim50.rds", package = "CellMixS"))
sce <- sim_list[[1]][, c(1:50, 300:350)]
visGroup(sce, "batch")
}
\seealso{
\code{\link{visOverview}}, \code{\link{visMetric}}
Other visualize functions: \code{\link{visCluster}}
}
\concept{visualize functions}
|
library(plotly)
data=read.csv("Resultados_filt.csv",header=FALSE)
mat_data=data.matrix(data[2:nrow(data),2:ncol(data)])
rownames(mat_data)<-data[2:nrow(data),1]
colnames(mat_data)<-data[1,2:ncol(data)]
mat_data<-mat_data[,order(as.double(colnames(mat_data)))]
ax<-list(title = "Log10 axis")
p<-plot_ly(x=log10(as.double(colnames(mat_data))),y=rownames(mat_data),z=mat_data, type = "heatmap")
p<-plot_ly(x=log10(as.double(colnames(mat_data))),y=rownames(mat_data),z=mat_data, type = "heatmap")%>%layout(title= "Gap heatmap",xaxis=ax)
p
|
/Tarea8-TransicionFase/Grafica.R
|
no_license
|
Norberto89/AnalisisDise-oAlgoritmos
|
R
| false
| false
| 535
|
r
|
library(plotly)
data=read.csv("Resultados_filt.csv",header=FALSE)
mat_data=data.matrix(data[2:nrow(data),2:ncol(data)])
rownames(mat_data)<-data[2:nrow(data),1]
colnames(mat_data)<-data[1,2:ncol(data)]
mat_data<-mat_data[,order(as.double(colnames(mat_data)))]
ax<-list(title = "Log10 axis")
p<-plot_ly(x=log10(as.double(colnames(mat_data))),y=rownames(mat_data),z=mat_data, type = "heatmap")
p<-plot_ly(x=log10(as.double(colnames(mat_data))),y=rownames(mat_data),z=mat_data, type = "heatmap")%>%layout(title= "Gap heatmap",xaxis=ax)
p
|
/run_analysis.R
|
no_license
|
vkati06/Getting-and-Cleaning-Data
|
R
| false
| false
| 2,563
|
r
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.